changeset 1153:f52844c71454

remerged r809
author Nicolas Saunier <nicolas.saunier@polymtl.ca>
date Mon, 24 Aug 2020 16:02:06 -0400
parents 658f87232536 (diff) bb0fad90cc31 (current diff)
children 2795d0e114c9
files
diffstat 110 files changed, 20809 insertions(+), 12253 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Fri Jun 10 15:43:02 2016 -0400
+++ b/.hgignore	Mon Aug 24 16:02:06 2020 -0400
@@ -29,6 +29,8 @@
 bin
 build
 CMakeFiles
+dist
+trafficintelligence.egg-info
 ipch
 win32-depends/opencv/
 win32-depends/boost/
--- a/.hgtags	Fri Jun 10 15:43:02 2016 -0400
+++ b/.hgtags	Mon Aug 24 16:02:06 2020 -0400
@@ -1,2 +1,7 @@
 ea2a8e8e4e77dbf0374e3ddc935d3a28672e8456 v0.1
 6022350f81736f3881726076987300fb3381cd25 OpenCV 3.1
+6022350f81736f3881726076987300fb3381cd25 OpenCV 3.1
+0000000000000000000000000000000000000000 OpenCV 3.1
+8bcac18c2b554e96c3bd7a5164e0def31d9b9920 OpenCV3 and Python3
+73b1241609111283f3b124f0fcf42e7e7504b2f4 v0.2
+799ef82caa1ab1964ccb54a4a875fbd8732a06c1 v0.2.4
--- a/CMakeLists.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-CMAKE_MINIMUM_REQUIRED( VERSION 2.6 )
-
-FIND_PACKAGE(
-	OpenCV REQUIRED
-)
-
-FIND_LIBRARY(
-	SQLite3_LIBS sqlite3
-)
-
-#FIND_PACKAGE(TrajectoryManagement)
-
-SET(
-	CMAKE_CXX_FLAGS "-g -Wall"
-)
-
-set(CMAKE_RUNTIME_OUTPUT_DIRECTORY bin)
-
-add_executable(feature-based-tracking
-	c/cvutils.cpp
-	c/feature-based-tracking.cpp
-	c/Motion.cpp
-	c/Parameters.cpp
-	c/utils.cpp
-	c/InputFrameListModule.cpp
-	c/InputVideoFileModule.cpp
-	)
-
-find_package(Boost REQUIRED program_options filesystem system)
-find_library(TrajectoryManagement_LIBRARY TrajectoryManagementAndAnalysis)
-find_path(TrajectoryManagement_INCLUDE_DIR src/Trajectory.h)
-
-add_definitions(
-	-DUSE_OPENCV
-	)
-
-include_directories(
-	${PROJECT_SOURCE_DIR}/include
-	${TrajectoryManagement_INCLUDE_DIR}
-	)
-
-target_link_libraries(feature-based-tracking
-	${TrajectoryManagement_LIBRARY}
-	${SQLite3_LIBS}
-	${OpenCV_LIBS}
-	${Boost_LIBRARIES}
-	)
-
-install(TARGETS feature-based-tracking DESTINATION bin)
--- a/Makefile	Fri Jun 10 15:43:02 2016 -0400
+++ b/Makefile	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,5 @@
 INSTALL_DIR = /usr/local/bin
+PYTHON_LIB_DIR = 
 
 cexe:
 	@cd c && make feature-based-tracking
@@ -10,16 +11,25 @@
 	@cd c && make clean
 	@cd python && rm *.pyc
 
-install: cexe
+installpython:
+	@echo "========================================="
+	@echo "Installing Python modules and scripts"
+	@tar cf /tmp/trafficintelligence.tar setup.py README trafficintelligence
+	@gzip /tmp/trafficintelligence.tar
+	@pip3 install /tmp/trafficintelligence.tar.gz
+	@rm /tmp/trafficintelligence.tar.gz
+	@cp scripts/* $(INSTALL_DIR)
+uploadpython:
+	@python setup.py sdist bdist_wheel
+	@twine upload dist/* --skip-existing
+	#python setup.py sdist upload
+
+install: cexe installpython
 	@echo "========================================="
 	@echo "Installing for Linux"
 	@echo "========================================="
-	@echo "Copying feature-based tracking executable"
-	@cp bin/feature-based-tracking /usr/local/bin
-	@echo "========================================="
-	@echo "Copying Python scripts"
-	@cp scripts/* $(INSTALL_DIR)
-
+	@echo "Installing feature-based tracking executable"
+	@cp bin/feature-based-tracking $(INSTALL_DIR)
 uninstall:
 	@echo "Uninstalling for Linux"
 	rm $(INSTALL_DIR)/feature-based-tracking 
--- a/README-Win32.txt	Fri Jun 10 15:43:02 2016 -0400
+++ b/README-Win32.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -1,22 +1,22 @@
-To be able to use traffic intelligence on windows, you will first have to fetch the 3rd party libraries.
-To do this, go in the folder win32-depends and launch win32-depends-installer.bat. It should fetch the library 
-and unarchive them correctly.
-
-Second you will need is the project TrajectoryManagementAndAnalysis available at https://bitbucket.org/trajectories/trajectorymanagementandanalysis
-
-
-
-If you want the sln to work, the project must be in the same folder where you have cloned trafficintelligence.
-To clone the project, use hg clone https://bitbucket.org/trajectories/trajectorymanagementandanalysis
-
-If you've done everything correctly, you should have
-
-/trafficintelligence
-/trafficintelligence/win32-depends/boost/
-/trafficintelligence/win32-depends/klt/
-/trafficintelligence/win32-depends/opencv/
-/trafficintelligence/win32-depends/sqlite/
-/trajectorymanagementandanalysis
-
-
-
+To be able to use traffic intelligence on windows, you will first have to fetch the 3rd party libraries.
+To do this, go in the folder win32-depends and launch win32-depends-installer.bat. It should fetch the library 
+and unarchive them correctly.
+
+Second you will need is the project TrajectoryManagementAndAnalysis available at https://bitbucket.org/trajectories/trajectorymanagementandanalysis
+
+
+
+If you want the sln to work, the project must be in the same folder where you have cloned trafficintelligence.
+To clone the project, use hg clone https://bitbucket.org/trajectories/trajectorymanagementandanalysis
+
+If you've done everything correctly, you should have
+
+/trafficintelligence
+/trafficintelligence/win32-depends/boost/
+/trafficintelligence/win32-depends/klt/
+/trafficintelligence/win32-depends/opencv/
+/trafficintelligence/win32-depends/sqlite/
+/trajectorymanagementandanalysis
+
+
+
--- a/c/Makefile	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/Makefile	Mon Aug 24 16:02:06 2020 -0400
@@ -1,6 +1,6 @@
 EXE_DIR=../bin
 SCRIPTS_DIR=../scripts
-TRAJECTORYMANAGEMENT_DIR=$(HOME)/Research/Code/trajectorymanagementandanalysis/trunk/src/TrajectoryManagementAndAnalysis
+TRAJECTORYMANAGEMENT_DIR=../../trajectorymanagementandanalysis/trunk/src/TrajectoryManagementAndAnalysis
 
 CXX = g++
 
@@ -11,7 +11,7 @@
 #LDFLAGS = -Wl,-Bstatic -lm
 LDFLAGS = -lm
 LDFLAGS += -lTrajectoryManagementAndAnalysis -lsqlite3
-LDFLAGS += -lboost_program_options
+LDFLAGS += -lboost_program_options -lboost_filesystem -lboost_system
 #LDFLAGS += -lfltk
 
 CFLAGS = -Wall -W -Wextra -std=c++11
@@ -19,12 +19,18 @@
 
 ifneq ($(OPENCV), 0)
 	CFLAGS += -DUSE_OPENCV
-	LDFLAGS += -lopencv_highgui -lopencv_core -lopencv_video -lopencv_features2d -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio
+	LDFLAGS += -lopencv_highgui -lopencv_core -lopencv_video -lopencv_features2d -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_calib3d
 endif
 
 #LDFLAGS += -Wl,--as-needed -Wl,-Bdynamic,-lgcc_s,-Bstatic
 
 ifeq ($(UNAME), Linux)
+	INCLUDE+= -I/usr/local/include/opencv4
+	LIBS += -L/usr/local/lib
+	LINUX_BOOST_PREFIX = /usr/local
+	CFLAGS += -DLINUX
+	EXE_EXTENSION=''
+else ifeq ($(UNAME), Darwin)
 	OPENCV_HOME=/usr/local
 	INCLUDE+= -I$(OPENCV_HOME)/include -I$(OPENCV_HOME)/include/opencv
 	LIBS += -L$(OPENCV_HOME)/lib
@@ -40,7 +46,11 @@
 ifeq ($(DEBUG), 1)
 	CFLAGS += -g -gstabs+ -DDEBUG
 else
+    ifeq ($(UNAME), Darwin)
+        CFLAGS += -O3
+    else
 	CFLAGS += -O3 --fast-math
+    endif
 	CFLAGS += -DNDEBUG
 endif
 
@@ -57,9 +67,9 @@
 TESTS_OBJS = test_feature.o test_graph.o
 
 
-default: builddir all
+default: builddir tests feature-based-tracking
 
-all: test-pixels optical-flow track-features
+optional: test-pixels optical-flow track-features
 
 builddir:
 	@$(SCRIPTS_DIR)/createdirectory.sh $(EXE_DIR)
--- a/c/Motion.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/Motion.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -4,8 +4,8 @@
 #include "src/TrajectoryDBAccessList.h"
 
 //#include "opencv2/core/core.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/highgui.hpp"
 
 #include <boost/graph/connected_components.hpp>
 
@@ -53,7 +53,7 @@
     float disp = 0;
     for (unsigned int i=0; i<nDisplacements; i++)
       disp += displacementDistances[nPositions-2-i];
-    result = disp <= minTotalFeatureDisplacement;
+    result = disp < minTotalFeatureDisplacement;
   }
   return result;
 }
@@ -125,17 +125,21 @@
 
 #ifdef USE_OPENCV
 /// \todo add option for anti-aliased drawing, thickness
-void FeatureTrajectory::draw(Mat& img, const Mat& homography, const Scalar& color) const {
+void FeatureTrajectory::draw(Mat& img, const Mat& homography, const Mat& intrinsicCameraMatrix, const Scalar& color) const {
   Point2f p1, p2;
   if (!homography.empty())
     p1 = project((*positions)[0], homography);
   else
     p1 = (*positions)[0];
+  if (!intrinsicCameraMatrix.empty())
+    p1 = cameraProject(p1, intrinsicCameraMatrix);
   for (unsigned int i=1; i<positions->size(); i++) {
     if (!homography.empty())
       p2 = project((*positions)[i], homography);
     else
       p2 = (*positions)[i];
+    if (!intrinsicCameraMatrix.empty())
+      p2 = cameraProject(p2, intrinsicCameraMatrix);
     line(img, p1, p2, color, 1);
     p1 = p2;
   }
--- a/c/Parameters.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/Parameters.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -1,11 +1,14 @@
 #include "Parameters.hpp"
 
 #include <boost/program_options.hpp>
+#include <boost/filesystem.hpp>
 
 #include <iostream>
 #include <fstream>
 
 namespace po = boost::program_options;
+namespace fs = boost::filesystem; // soon std
+
 using namespace std;
 
 KLTFeatureTrackingParameters::KLTFeatureTrackingParameters(const int argc, char* argv[]) {
@@ -19,6 +22,7 @@
     ("tf", "tracks features")
     ("gf", "groups features")
     ("loading-time", "report feature and object loading times")
+    ("quiet", "mute printing of frame numbers")
     ("config-file", po::value<string>(&configurationFilename), "configuration file")
     ;
 
@@ -92,9 +96,10 @@
       cout << cmdLine << endl;
       exit(0);      
     }
-      
+
     cout << "Using configuration file " << configurationFilename << endl;
-
+    parentDirname = fs::path(configurationFilename).parent_path().string();
+    
     ifstream configurationFile(configurationFilename.c_str());
     store(po::parse_config_file(configurationFile, cmdLineAndFile, true), vm);
     notify(vm);
@@ -104,6 +109,7 @@
     trackFeatures = vm.count("tf")>0;
     groupFeatures = vm.count("gf")>0;
     loadingTime = vm.count("loading-time")>0;
+    quiet = vm.count("quiet")>0;
 
     if (vm.count("help")) {
       cout << cmdLine << endl;
--- a/c/cvutils.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/cvutils.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -1,9 +1,9 @@
 #include "cvutils.hpp"
 #include "utils.hpp"
 
-#include "opencv2/core/core.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/core.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/features2d.hpp"
 
 #include <iostream>
 #include <vector>
@@ -25,6 +25,14 @@
   return Point2f(x, y);
 }
 
+Point2f cameraProject(const Point2f& p, const Mat& cameraMatrix) {
+  //Mat homogeneous(3, 1, CV_32FC1);
+  float x, y;
+  x = cameraMatrix.at<double>(0,0)*p.x+cameraMatrix.at<double>(0,2);
+  y = cameraMatrix.at<double>(1,1)*p.y+cameraMatrix.at<double>(1,2);
+  return Point2f(x, y);
+}
+
 Mat loadMat(const string& filename, const string& separator) {
   vector<vector<float> > numbers = ::loadNumbers(filename, separator);
   
--- a/c/feature-based-tracking.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/feature-based-tracking.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -7,14 +7,17 @@
 #include "src/TrajectoryDBAccessList.h"
 #include "src/TrajectoryDBAccessBlob.h"
 
-#include "opencv2/core/core.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
 #include "opencv2/video/tracking.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/objdetect/objdetect.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/videoio.hpp"
+#include "opencv2/objdetect.hpp"
+#include "opencv2/calib3d.hpp"
 
 #include <boost/foreach.hpp>
+#include <boost/filesystem.hpp>
 
 #include <iostream>
 #include <vector>
@@ -23,6 +26,8 @@
 #include <memory>
 #include <limits>
 
+namespace fs = boost::filesystem; // soon std
+
 using namespace std;
 using namespace cv;
 
@@ -62,7 +67,8 @@
 }
 
 void trackFeatures(const KLTFeatureTrackingParameters& params) {
-  Mat homography = ::loadMat(params.homographyFilename, " ");
+  Mat homography = ::loadMat(::getRelativeFilename(params.parentDirname, params.homographyFilename), " ");
+    
   Mat invHomography;
   if (params.display && !homography.empty())
     invHomography = homography.inv();
@@ -89,15 +95,15 @@
     cout << "Empty video filename. Exiting." << endl;
     exit(0);
   }
-    
-  VideoCapture capture(params.videoFilename);
+
+  VideoCapture capture(::getRelativeFilename(params.parentDirname, params.videoFilename));
   if(!capture.isOpened()) {
     cout << "Video filename " << params.videoFilename << " could not be opened. Exiting." << endl;
     exit(0);
   }
   
-  Size videoSize = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
-  unsigned int nFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
+  Size videoSize = Size(capture.get(CAP_PROP_FRAME_WIDTH), capture.get(CAP_PROP_FRAME_HEIGHT));
+  unsigned int nFrames = capture.get(CAP_PROP_FRAME_COUNT);
   if (nFrames <= 0) {
     cout << "Guessing that the number of frames could not be read: " << nFrames << endl;
     nFrames = numeric_limits<int>::max();
@@ -110,33 +116,30 @@
 	  ", nframes=" << nFrames << endl;
 
   Mat map1, map2;
+  Mat intrinsicCameraMatrix, newIntrinsicCameraMatrix;
   if (params.undistort) {
-    Mat intrinsicCameraMatrix = ::loadMat(params.intrinsicCameraFilename, " ");
-    Mat newIntrinsicCameraMatrix = intrinsicCameraMatrix.clone(); 
-    videoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
-    newIntrinsicCameraMatrix.at<float>(0,2) = videoSize.width/2.;
-    newIntrinsicCameraMatrix.at<float>(1,2) = videoSize.height/2.;
-    initUndistortRectifyMap(intrinsicCameraMatrix, params.distortionCoefficients, Mat::eye(3,3, CV_32FC1), newIntrinsicCameraMatrix, videoSize, CV_32FC1, map1, map2);
+    intrinsicCameraMatrix = ::loadMat(::getRelativeFilename(params.parentDirname, params.intrinsicCameraFilename), " ");
+    Size undistortedVideoSize = Size(static_cast<int>(round(videoSize.width*params.undistortedImageMultiplication)), static_cast<int>(round(videoSize.height*params.undistortedImageMultiplication)));
+    newIntrinsicCameraMatrix = getDefaultNewCameraMatrix(intrinsicCameraMatrix, undistortedVideoSize, true);// for some reason, it's double type //getOptimalNewCameraMatrix(intrinsicCameraMatrix, params.distortionCoefficients, videoSize, 1, undistortedVideoSize);//, 0, true);
+    initUndistortRectifyMap(intrinsicCameraMatrix, params.distortionCoefficients, Mat::eye(3,3, CV_32FC1) /* 0 ?*/, newIntrinsicCameraMatrix, undistortedVideoSize, CV_32FC1, map1, map2);
     
-    cout << "Undistorted width=" << videoSize.width <<
-      ", height=" << videoSize.height << endl;
+    cout << "Undistorted width=" << undistortedVideoSize.width <<
+      ", height=" << undistortedVideoSize.height << endl;
   }
   
-  Mat mask = imread(params.maskFilename, 0);
+  Mat mask = imread(::getRelativeFilename(params.parentDirname, params.maskFilename), 0);
   if (mask.empty()) {
     cout << "Mask filename " << params.maskFilename << " could not be opened." << endl;
     mask = Mat::ones(videoSize, CV_8UC1);
   }
 
   std::shared_ptr<TrajectoryDBAccess<Point2f> > trajectoryDB = std::shared_ptr<TrajectoryDBAccess<Point2f> >(new TrajectoryDBAccessList<Point2f>());
-  //TrajectoryDBAccess<Point2f>* trajectoryDB = new TrajectoryDBAccessBlob<Point2f>();
-  trajectoryDB->connect(params.databaseFilename.c_str());
+  trajectoryDB->connect(::getRelativeFilename(params.parentDirname, params.databaseFilename).c_str());
   trajectoryDB->createTable("positions");
   trajectoryDB->createTable("velocities");
   trajectoryDB->beginTransaction();
 
-  std::vector<KeyPoint> prevKpts, currKpts;
-  std::vector<Point2f> prevPts, currPts, newPts;
+  std::vector<Point2f> prevPts, currPts, newPts, undistortedPts; // all points but undistortedPts are in image space
   std::vector<uchar> status;
   std::vector<float> errors;
   Mat prevDesc, currDesc;
@@ -146,45 +149,45 @@
 
   int key = '?';
   unsigned int savedFeatureId=0;
-  Mat frame = Mat::zeros(1, 1, CV_8UC1), currentFrameBW, previousFrameBW, undistortedFrame;
+  Mat frame, currentFrameBW, previousFrameBW, displayFrame; // = Mat::zeros(1, 1, CV_8UC1)
 
   unsigned int lastFrameNum = nFrames;
   if (params.nFrames > 0)
     lastFrameNum = MIN(params.frame1+static_cast<unsigned int>(params.nFrames), nFrames);
 
-  capture.set(CV_CAP_PROP_POS_FRAMES, params.frame1);
+  capture.set(CAP_PROP_POS_FRAMES, params.frame1);
   for (unsigned int frameNum = params.frame1; (frameNum < lastFrameNum) && !::interruptionKey(key); frameNum++) {
     capture >> frame;
     if (frame.empty()) {
       cout << "Empty frame " << frameNum << ", breaking (" << frame.empty() << " [" << frame.size().width << "x" << frame.size().height << "])" << endl;
       break;
-    } else if (frameNum%50 ==0)
+    } else if (!params.quiet && (frameNum%50 ==0))
       cout << "frame " << frameNum << endl;
-
-    if (params.undistort) {
-      remap(frame, undistortedFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
-      frame = undistortedFrame;
-
-      if (frame.size() != videoSize) {
-	cout << "Different frame size " << frameNum << ", breaking ([" << frame.size().width << "x" << frame.size().height << "])" << endl;
-	break;
-      }
-    }
     
-    cvtColor(frame, currentFrameBW, CV_RGB2GRAY);
+    cvtColor(frame, currentFrameBW, COLOR_RGB2GRAY);
     
     if (!prevPts.empty()) {
       currPts.clear();
       calcOpticalFlowPyrLK(previousFrameBW, currentFrameBW, prevPts, currPts, status, errors, window, params.pyramidLevel, TermCriteria(static_cast<int>(TermCriteria::COUNT)+static_cast<int>(TermCriteria::EPS) /* = 3 */, params.maxNumberTrackingIterations, params.minTrackingError), /* int flags = */ 0, params.minFeatureEigThreshold);
       /// \todo try calcOpticalFlowFarneback
 
+      if (params.undistort)
+	undistortPoints(currPts, undistortedPts, intrinsicCameraMatrix, params.distortionCoefficients);
+      else
+	undistortedPts =currPts;
+      
       std::vector<Point2f> trackedPts;
       std::vector<FeaturePointMatch>::iterator iter = featurePointMatches.begin();
       while (iter != featurePointMatches.end()) {
 	bool deleteFeature = false;
-	  
-	if (status[iter->pointNum] && (mask.at<uchar>(static_cast<int>(round(currPts[iter->pointNum].y)), static_cast<int>(round(currPts[iter->pointNum].x))) != 0)) {
-	  iter->feature->addPoint(frameNum, currPts[iter->pointNum], homography);
+	
+	int currPtX = static_cast<int>(floor(currPts[iter->pointNum].x));
+	int currPtY = static_cast<int>(floor(currPts[iter->pointNum].y));
+	if ((status[iter->pointNum] =! 0) && 
+	    (currPtX >= 0) && (currPtX < videoSize.width) && 
+	    (currPtY >= 0) && (currPtY < videoSize.height) && 
+	    (mask.at<uchar>(currPtY, currPtX) != 0)) {
+	  iter->feature->addPoint(frameNum, undistortedPts[iter->pointNum], homography);
 	  
 	  deleteFeature = iter->feature->isDisplacementSmall(params.nDisplacements, minTotalFeatureDisplacement)
 	    || !iter->feature->isMotionSmooth(params.accelerationBound, params.deviationBound);
@@ -210,10 +213,14 @@
       currPts = trackedPts;
       assert(currPts.size() == featurePointMatches.size());
       saveFeatures(lostFeatures, *trajectoryDB, "positions", "velocities");
-	
+      
       if (params.display) {
+	if (params.undistort)
+	  remap(frame, displayFrame, map1, map2, interpolationMethod, BORDER_CONSTANT, 0.);
+	 else
+	  displayFrame = frame.clone();
 	BOOST_FOREACH(FeaturePointMatch fp, featurePointMatches)
-	  fp.feature->draw(frame, invHomography, Colors::red());
+	  fp.feature->draw(displayFrame, invHomography, newIntrinsicCameraMatrix, Colors::red());
       }
     }
     
@@ -224,15 +231,20 @@
 	for (int i=MAX(0, currPts[n].y-params.minFeatureDistanceKLT); i<MIN(videoSize.height, currPts[n].y+params.minFeatureDistanceKLT+1); i++)
 	  featureMask.at<uchar>(i,j)=0;
     goodFeaturesToTrack(currentFrameBW, newPts, params.maxNFeatures, params.featureQuality, params.minFeatureDistanceKLT, featureMask, params.blockSize, params.useHarrisDetector, params.k);
-    BOOST_FOREACH(Point2f p, newPts) {
-      FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, p, homography));
-      featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
-      currPts.push_back(p);
+    if (params.undistort && newPts.size() > 0)
+      undistortPoints(newPts, undistortedPts, intrinsicCameraMatrix, params.distortionCoefficients);
+    else
+      undistortedPts = newPts;
+	
+    for (unsigned int i=0; i<newPts.size(); i++) {
+	FeatureTrajectoryPtr f = FeatureTrajectoryPtr(new FeatureTrajectory(frameNum, undistortedPts[i], homography));
+	featurePointMatches.push_back(FeaturePointMatch(f, currPts.size()));
+	currPts.push_back(newPts[i]);
     }
-      
-    if (params.display) {
+  
+    if (params.display && !displayFrame.empty()) {
       imshow("mask", featureMask*256);
-      imshow("frame", frame);
+      imshow("frame", displayFrame);
       key = waitKey(2);
     }
     previousFrameBW = currentFrameBW.clone();
@@ -257,7 +269,7 @@
 
 void groupFeatures(const KLTFeatureTrackingParameters& params) {
   std::shared_ptr<TrajectoryDBAccessList<Point2f> > trajectoryDB = std::shared_ptr<TrajectoryDBAccessList<Point2f> >(new TrajectoryDBAccessList<Point2f>());
-  bool success = trajectoryDB->connect(params.databaseFilename.c_str());
+  bool success = trajectoryDB->connect(::getRelativeFilename(params.parentDirname, params.databaseFilename).c_str());
   trajectoryDB->createObjectTable("objects", "objects_features");
   unsigned int savedObjectId=0;
 
@@ -285,7 +297,7 @@
   for (frameNum = firstFrameNum; frameNum<lastFrameNum; frameNum ++) {
     vector<int> trajectoryIds;
     success  = trajectoryDB->trajectoryIdEndingAt(trajectoryIds, frameNum);
-    if (frameNum%100 ==0)
+    if (!params.quiet && (frameNum%100 ==0))
       cout << "frame " << frameNum << endl;
 #if DEBUG
     cout << trajectoryIds.size() << " trajectories " << endl;
@@ -310,7 +322,7 @@
       }
     }
     
-    if (frameNum%100 ==0)
+    if (!params.quiet && (frameNum%100 ==0))
       cout << featureGraph.informationString() << endl;
   }
 
@@ -332,7 +344,7 @@
 
 void loadingTimes(const KLTFeatureTrackingParameters& params) {
   std::shared_ptr<TrajectoryDBAccessList<Point2f> > trajectoryDB = std::shared_ptr<TrajectoryDBAccessList<Point2f> >(new TrajectoryDBAccessList<Point2f>());
-  bool success = trajectoryDB->connect(params.databaseFilename.c_str());
+  bool success = trajectoryDB->connect(::getRelativeFilename(params.parentDirname, params.databaseFilename).c_str());
   
   vector<std::shared_ptr<Trajectory<Point2f> > > trajectories;
   //cout << trajectories.size() << endl;
--- a/c/test_feature.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/test_feature.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -3,7 +3,7 @@
 #include "Motion.hpp"
 #include "testutils.hpp"
 
-#include "opencv2/core/core.hpp"
+#include "opencv2/core.hpp"
 
 #include "catch.hpp"
 
--- a/c/test_graph.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/test_graph.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -1,7 +1,7 @@
 #include "Motion.hpp"
 #include "testutils.hpp"
 
-#include "opencv2/core/core.hpp"
+#include "opencv2/core.hpp"
 
 #include "catch.hpp"
 
--- a/c/utils.cpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/c/utils.cpp	Mon Aug 24 16:02:06 2020 -0400
@@ -1,10 +1,13 @@
 #include "utils.hpp"
 
 #include <boost/foreach.hpp>
+#include <boost/filesystem.hpp>
 
 #include <iostream>
 #include <fstream>
 
+namespace fs = boost::filesystem; // soon std
+
 using namespace std;
 
 std::vector<std::vector<float> > loadNumbers(const string& filename, const string& separator /* = " " */) {
@@ -27,6 +30,15 @@
   return result;
 }
 
+std::string getRelativeFilename(const std::string& parentDirname, const std::string& filename) {
+  fs::path parentPath(parentDirname);
+  fs::path filePath(filename);
+  if (filePath.is_absolute())
+    return filename;
+  else
+    return (parentPath/filePath).string();
+}
+
 int toInt(const std::string& s) { int i; fromString(i, s); return i;} //atoi
 
 float toFloat(const std::string& s) { float x; fromString(x, s); return x;}// lexical_cast<float>(s)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/classifier.cfg	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,42 @@
+# filename of the general ped/cyc/veh SVM classifier
+pbv-svm-filename = modelPBV.xml
+# filename of the cyc/veh SVM classifier
+bv-svm-filename = modelBV.xml
+# percent increase of the max of width and height of the bounding box of features extracted for classification
+percent-increase-crop = 0.2
+# min number of pixels in cropped image to classify by SVM
+min-npixels-crop = 800
+# square size to resize image crops for HoG computation
+hog-rescale-size = 64
+# number of HoG orientation
+hog-norientations = 9
+# number of pixels per cell for HoG computation
+hog-npixels-cell = 8
+# number of cells per block for HoG computation
+hog-ncells-block = 2
+# block normalization method (L1, L1-sqrt, L2, L2-Hys)
+hog-block-norm = L1-sqrt
+# method to aggregate road user speed: mean, median or any (per)centile
+speed-aggregation-method = median
+# number of frames to ignore at both ends of a series (noisy)
+nframes-ignore-at-ends = 2
+# centile for the speed aggregation, if centile is chosen
+speed-aggregation-centile = 50
+# speed value below which all classes are equiprobable (distributions give odd values there) (km/h)
+min-speed-equiprobable = 3.33
+# maximum proportion of the instants with unknow appearance classification to use speed information
+max-prop-unknown-appearance = 0.66
+# maximum pedestrian speed (agregate: mean, median, 85th centile, etc.) 10 km/h
+max-ped-speed = 10.0
+# maximum cyclist speed (agregate: mean, median, 85th centile, etc.) 30 km/h (3xped)
+max-cyc-speed = 30.0
+# mean pedestrian speed and standard deviation (in a normal distribution) 4.91+-0.88 km/h
+mean-ped-speed = 4.91
+std-ped-speed = 0.88
+# mean cyclist speed and standard deviation (in a log-normal distribution) 11.+-4.83 km/h
+# to multiply the parameters by a, loc = loc+ln(a)
+cyc-speed-loc = 2.31
+cyc-speed-scale = 0.42
+# mean vehicle speed and standard deviation (in a normal distribution) 18.45+-7.6 km/h
+mean-veh-speed = 18.45
+std-veh-speed = 7.6
--- a/include/Motion.hpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/include/Motion.hpp	Mon Aug 24 16:02:06 2020 -0400
@@ -52,7 +52,7 @@
   void write(TrajectoryDBAccess<cv::Point2f>& trajectoryDB, const std::string& positionsTableName, const std::string& velocitiesTableName) const;
 
 #ifdef USE_OPENCV
-  void draw(cv::Mat& img, const cv::Mat& homography, const cv::Scalar& color) const;
+  void draw(cv::Mat& img, const cv::Mat& homography, const cv::Mat& intrinsicCameraMatrix, const cv::Scalar& color) const;
 #endif
 
   friend std::ostream& operator<<(std::ostream& out, const FeatureTrajectory& ft);
--- a/include/Parameters.hpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/include/Parameters.hpp	Mon Aug 24 16:02:06 2020 -0400
@@ -17,7 +17,9 @@
   bool trackFeatures;
   bool groupFeatures;
   bool loadingTime;
+  bool quiet;
 
+  std::string parentDirname;
   std::string videoFilename;
   std::string databaseFilename;
   std::string homographyFilename;
--- a/include/catch.hpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/include/catch.hpp	Mon Aug 24 16:02:06 2020 -0400
@@ -1,5 +1,6 @@
 /*
- *  Generated: 2012-06-06 08:05:56.928287
+ *  Catch v1.5.6
+ *  Generated: 2016-06-09 19:20:41.460328
  *  ----------------------------------------------------------
  *  This file has been merged from multiple headers. Please don't edit it directly
  *  Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved.
@@ -10,38 +11,334 @@
 #ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED
 #define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED
 
-// #included from: internal/catch_context.h
-
-// #included from: catch_interfaces_reporter.h
+#define TWOBLUECUBES_CATCH_HPP_INCLUDED
+
+#ifdef __clang__
+#    pragma clang system_header
+#elif defined __GNUC__
+#    pragma GCC system_header
+#endif
+
+// #included from: internal/catch_suppress_warnings.h
+
+#ifdef __clang__
+#   ifdef __ICC // icpc defines the __clang__ macro
+#       pragma warning(push)
+#       pragma warning(disable: 161 1682)
+#   else // __ICC
+#       pragma clang diagnostic ignored "-Wglobal-constructors"
+#       pragma clang diagnostic ignored "-Wvariadic-macros"
+#       pragma clang diagnostic ignored "-Wc99-extensions"
+#       pragma clang diagnostic ignored "-Wunused-variable"
+#       pragma clang diagnostic push
+#       pragma clang diagnostic ignored "-Wpadded"
+#       pragma clang diagnostic ignored "-Wc++98-compat"
+#       pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
+#       pragma clang diagnostic ignored "-Wswitch-enum"
+#       pragma clang diagnostic ignored "-Wcovered-switch-default"
+#    endif
+#elif defined __GNUC__
+#    pragma GCC diagnostic ignored "-Wvariadic-macros"
+#    pragma GCC diagnostic ignored "-Wunused-variable"
+#    pragma GCC diagnostic push
+#    pragma GCC diagnostic ignored "-Wpadded"
+#endif
+#if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER)
+#  define CATCH_IMPL
+#endif
+
+#ifdef CATCH_IMPL
+#  ifndef CLARA_CONFIG_MAIN
+#    define CLARA_CONFIG_MAIN_NOT_DEFINED
+#    define CLARA_CONFIG_MAIN
+#  endif
+#endif
+
+// #included from: internal/catch_notimplemented_exception.h
+#define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_H_INCLUDED
 
 // #included from: catch_common.h
+#define TWOBLUECUBES_CATCH_COMMON_H_INCLUDED
 
 #define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line
 #define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line )
-#define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ )
+#ifdef CATCH_CONFIG_COUNTER
+#  define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ )
+#else
+#  define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ )
+#endif
 
 #define INTERNAL_CATCH_STRINGIFY2( expr ) #expr
 #define INTERNAL_CATCH_STRINGIFY( expr ) INTERNAL_CATCH_STRINGIFY2( expr )
 
-#ifdef __GNUC__
-#define ATTRIBUTE_NORETURN __attribute__ ((noreturn))
-#else
-#define ATTRIBUTE_NORETURN
-#endif
-
 #include <sstream>
 #include <stdexcept>
 #include <algorithm>
 
+// #included from: catch_compiler_capabilities.h
+#define TWOBLUECUBES_CATCH_COMPILER_CAPABILITIES_HPP_INCLUDED
+
+// Detect a number of compiler features - mostly C++11/14 conformance - by compiler
+// The following features are defined:
+//
+// CATCH_CONFIG_CPP11_NULLPTR : is nullptr supported?
+// CATCH_CONFIG_CPP11_NOEXCEPT : is noexcept supported?
+// CATCH_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods
+// CATCH_CONFIG_CPP11_IS_ENUM : std::is_enum is supported?
+// CATCH_CONFIG_CPP11_TUPLE : std::tuple is supported
+// CATCH_CONFIG_CPP11_LONG_LONG : is long long supported?
+// CATCH_CONFIG_CPP11_OVERRIDE : is override supported?
+// CATCH_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr)
+
+// CATCH_CONFIG_CPP11_OR_GREATER : Is C++11 supported?
+
+// CATCH_CONFIG_VARIADIC_MACROS : are variadic macros supported?
+// CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported?
+// ****************
+// Note to maintainers: if new toggles are added please document them
+// in configuration.md, too
+// ****************
+
+// In general each macro has a _NO_<feature name> form
+// (e.g. CATCH_CONFIG_CPP11_NO_NULLPTR) which disables the feature.
+// Many features, at point of detection, define an _INTERNAL_ macro, so they
+// can be combined, en-mass, with the _NO_ forms later.
+
+// All the C++11 features can be disabled with CATCH_CONFIG_NO_CPP11
+
+#ifdef __cplusplus
+
+#  if __cplusplus >= 201103L
+#    define CATCH_CPP11_OR_GREATER
+#  endif
+
+#  if __cplusplus >= 201402L
+#    define CATCH_CPP14_OR_GREATER
+#  endif
+
+#endif
+
+#ifdef __clang__
+
+#  if __has_feature(cxx_nullptr)
+#    define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR
+#  endif
+
+#  if __has_feature(cxx_noexcept)
+#    define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#  endif
+
+#   if defined(CATCH_CPP11_OR_GREATER)
+#       define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS _Pragma( "clang diagnostic ignored \"-Wparentheses\"" )
+#   endif
+
+#endif // __clang__
+
+////////////////////////////////////////////////////////////////////////////////
+// Borland
+#ifdef __BORLANDC__
+
+#endif // __BORLANDC__
+
+////////////////////////////////////////////////////////////////////////////////
+// EDG
+#ifdef __EDG_VERSION__
+
+#endif // __EDG_VERSION__
+
+////////////////////////////////////////////////////////////////////////////////
+// Digital Mars
+#ifdef __DMC__
+
+#endif // __DMC__
+
+////////////////////////////////////////////////////////////////////////////////
+// GCC
+#ifdef __GNUC__
+
+#   if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__)
+#       define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR
+#   endif
+
+#   if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) && defined(CATCH_CPP11_OR_GREATER)
+#       define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS _Pragma( "GCC diagnostic ignored \"-Wparentheses\"" )
+#   endif
+
+// - otherwise more recent versions define __cplusplus >= 201103L
+// and will get picked up below
+
+#endif // __GNUC__
+
+////////////////////////////////////////////////////////////////////////////////
+// Visual C++
+#ifdef _MSC_VER
+
+#if (_MSC_VER >= 1600)
+#   define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR
+#   define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR
+#endif
+
+#if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015))
+#define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS
+#endif
+
+#endif // _MSC_VER
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Use variadic macros if the compiler supports them
+#if ( defined _MSC_VER && _MSC_VER > 1400 && !defined __EDGE__) || \
+    ( defined __WAVE__ && __WAVE_HAS_VARIADICS ) || \
+    ( defined __GNUC__ && __GNUC__ >= 3 ) || \
+    ( !defined __cplusplus && __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L )
+
+#define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS
+
+#endif
+
+// Use __COUNTER__ if the compiler supports it
+#if ( defined _MSC_VER && _MSC_VER >= 1300 ) || \
+    ( defined __GNUC__  && __GNUC__ >= 4 && __GNUC_MINOR__ >= 3 ) || \
+    ( defined __clang__ && __clang_major__ >= 3 )
+
+#define CATCH_INTERNAL_CONFIG_COUNTER
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+// C++ language feature support
+
+// catch all support for C++11
+#if defined(CATCH_CPP11_OR_GREATER)
+
+#  if !defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR)
+#    define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR
+#  endif
+
+#  ifndef CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#    define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#  endif
+
+#  ifndef CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS
+#    define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS
+#  endif
+
+#  ifndef CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM
+#    define CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM
+#  endif
+
+#  ifndef CATCH_INTERNAL_CONFIG_CPP11_TUPLE
+#    define CATCH_INTERNAL_CONFIG_CPP11_TUPLE
+#  endif
+
+#  ifndef CATCH_INTERNAL_CONFIG_VARIADIC_MACROS
+#    define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS
+#  endif
+
+#  if !defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG)
+#    define CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG
+#  endif
+
+#  if !defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE)
+#    define CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE
+#  endif
+#  if !defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR)
+#    define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR
+#  endif
+
+#endif // __cplusplus >= 201103L
+
+// Now set the actual defines based on the above + anything the user has configured
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NO_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_NULLPTR
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_NOEXCEPT
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_GENERATED_METHODS
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_NO_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_IS_ENUM
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_CPP11_NO_TUPLE) && !defined(CATCH_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_TUPLE
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_VARIADIC_MACROS) && !defined(CATCH_CONFIG_NO_VARIADIC_MACROS) && !defined(CATCH_CONFIG_VARIADIC_MACROS)
+#   define CATCH_CONFIG_VARIADIC_MACROS
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_NO_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_LONG_LONG
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_NO_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_OVERRIDE
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_NO_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_NO_CPP11)
+#   define CATCH_CONFIG_CPP11_UNIQUE_PTR
+#endif
+#if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER)
+#   define CATCH_CONFIG_COUNTER
+#endif
+
+#if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS)
+#   define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS
+#endif
+
+// noexcept support:
+#if defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_NOEXCEPT)
+#  define CATCH_NOEXCEPT noexcept
+#  define CATCH_NOEXCEPT_IS(x) noexcept(x)
+#else
+#  define CATCH_NOEXCEPT throw()
+#  define CATCH_NOEXCEPT_IS(x)
+#endif
+
+// nullptr support
+#ifdef CATCH_CONFIG_CPP11_NULLPTR
+#   define CATCH_NULL nullptr
+#else
+#   define CATCH_NULL NULL
+#endif
+
+// override support
+#ifdef CATCH_CONFIG_CPP11_OVERRIDE
+#   define CATCH_OVERRIDE override
+#else
+#   define CATCH_OVERRIDE
+#endif
+
+// unique_ptr support
+#ifdef CATCH_CONFIG_CPP11_UNIQUE_PTR
+#   define CATCH_AUTO_PTR( T ) std::unique_ptr<T>
+#else
+#   define CATCH_AUTO_PTR( T ) std::auto_ptr<T>
+#endif
+
 namespace Catch {
 
-	class NonCopyable {
-		NonCopyable( const NonCopyable& );
-		void operator = ( const NonCopyable& );
-	protected:
-		NonCopyable() {}
-		virtual ~NonCopyable() {}
-	};
+    struct IConfig;
+
+    struct CaseSensitive { enum Choice {
+        Yes,
+        No
+    }; };
+
+    class NonCopyable {
+#ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        NonCopyable( NonCopyable const& )              = delete;
+        NonCopyable( NonCopyable && )                  = delete;
+        NonCopyable& operator = ( NonCopyable const& ) = delete;
+        NonCopyable& operator = ( NonCopyable && )     = delete;
+#else
+        NonCopyable( NonCopyable const& info );
+        NonCopyable& operator = ( NonCopyable const& );
+#endif
+
+    protected:
+        NonCopyable() {}
+        virtual ~NonCopyable();
+    };
 
     class SafeBool {
     public:
@@ -59,121 +356,139 @@
         typename ContainerT::const_iterator it = container.begin();
         typename ContainerT::const_iterator itEnd = container.end();
         for(; it != itEnd; ++it )
-        {
             delete *it;
-        }
     }
     template<typename AssociativeContainerT>
     inline void deleteAllValues( AssociativeContainerT& container ) {
         typename AssociativeContainerT::const_iterator it = container.begin();
         typename AssociativeContainerT::const_iterator itEnd = container.end();
         for(; it != itEnd; ++it )
-        {
             delete it->second;
-        }
-    }
-
-    template<typename ContainerT, typename Function>
-    inline void forEach( ContainerT& container, Function function ) {
-        std::for_each( container.begin(), container.end(), function );
-    }
-
-    template<typename ContainerT, typename Function>
-    inline void forEach( const ContainerT& container, Function function ) {
-        std::for_each( container.begin(), container.end(), function );
-    }
+    }
+
+    bool startsWith( std::string const& s, std::string const& prefix );
+    bool endsWith( std::string const& s, std::string const& suffix );
+    bool contains( std::string const& s, std::string const& infix );
+    void toLowerInPlace( std::string& s );
+    std::string toLower( std::string const& s );
+    std::string trim( std::string const& str );
+    bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis );
+
+    struct pluralise {
+        pluralise( std::size_t count, std::string const& label );
+
+        friend std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser );
+
+        std::size_t m_count;
+        std::string m_label;
+    };
 
     struct SourceLineInfo {
 
-        SourceLineInfo() : line( 0 ){}
-        SourceLineInfo( const std::string& _file, std::size_t _line )
-        :   file( _file ),
-            line( _line )
-        {}
-        SourceLineInfo( const SourceLineInfo& other )
-        :   file( other.file ),
-            line( other.line )
-        {}
-        void swap( SourceLineInfo& other ){
-            file.swap( other.file );
-            std::swap( line, other.line );
-        }
+        SourceLineInfo();
+        SourceLineInfo( char const* _file, std::size_t _line );
+        SourceLineInfo( SourceLineInfo const& other );
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        SourceLineInfo( SourceLineInfo && )                  = default;
+        SourceLineInfo& operator = ( SourceLineInfo const& ) = default;
+        SourceLineInfo& operator = ( SourceLineInfo && )     = default;
+#  endif
+        bool empty() const;
+        bool operator == ( SourceLineInfo const& other ) const;
+        bool operator < ( SourceLineInfo const& other ) const;
 
         std::string file;
         std::size_t line;
     };
 
-    inline std::ostream& operator << ( std::ostream& os, const SourceLineInfo& info ) {
-#ifndef __GNUG__
-        os << info.file << "(" << info.line << "): ";
-#else
-        os << info.file << ":" << info.line << ": ";
-#endif
-        return os;
-    }
-
-    ATTRIBUTE_NORETURN
-    inline void throwLogicError( const std::string& message, const std::string& file, std::size_t line ) {
-        std::ostringstream oss;
-        oss << "Internal Catch error: '" << message << "' at: " << SourceLineInfo( file, line );
-        throw std::logic_error( oss.str() );
+    std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info );
+
+    // This is just here to avoid compiler warnings with macro constants and boolean literals
+    inline bool isTrue( bool value ){ return value; }
+    inline bool alwaysTrue() { return true; }
+    inline bool alwaysFalse() { return false; }
+
+    void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo );
+
+    void seedRng( IConfig const& config );
+    unsigned int rngSeed();
+
+    // Use this in variadic streaming macros to allow
+    //    >> +StreamEndStop
+    // as well as
+    //    >> stuff +StreamEndStop
+    struct StreamEndStop {
+        std::string operator+() {
+            return std::string();
+        }
+    };
+    template<typename T>
+    T const& operator + ( T const& value, StreamEndStop ) {
+        return value;
     }
 }
 
-#define CATCH_INTERNAL_ERROR( msg ) throwLogicError( msg, __FILE__, __LINE__ );
-#define CATCH_INTERNAL_LINEINFO ::Catch::SourceLineInfo( __FILE__, __LINE__ )
-
-// #included from: catch_totals.hpp
+#define CATCH_INTERNAL_LINEINFO ::Catch::SourceLineInfo( __FILE__, static_cast<std::size_t>( __LINE__ ) )
+#define CATCH_INTERNAL_ERROR( msg ) ::Catch::throwLogicError( msg, CATCH_INTERNAL_LINEINFO );
+
+#include <ostream>
 
 namespace Catch {
 
-    struct Counts {
-        Counts() : passed( 0 ), failed( 0 ) {}
-
-        Counts operator - ( const Counts& other ) const {
-            Counts diff;
-            diff.passed = passed - other.passed;
-            diff.failed = failed - other.failed;
-            return diff;
-        }
-        Counts& operator += ( const Counts& other ) {
-            passed += other.passed;
-            failed += other.failed;
-            return *this;
-        }
-
-        std::size_t total() const {
-            return passed + failed;
-        }
-
-        std::size_t passed;
-        std::size_t failed;
-    };
-
-    struct Totals {
-
-        Totals operator - ( const Totals& other ) const {
-            Totals diff;
-            diff.assertions = assertions - other.assertions;
-            diff.testCases = testCases - other.testCases;
-            return diff;
-        }
-
-        Totals delta( const Totals& prevTotals ) const {
-            Totals diff = *this - prevTotals;
-            if( diff.assertions.failed > 0 )
-                ++diff.testCases.failed;
-            else
-                ++diff.testCases.passed;
-            return diff;
-        }
-
-        Counts assertions;
-        Counts testCases;
-    };
-}
+    class NotImplementedException : public std::exception
+    {
+    public:
+        NotImplementedException( SourceLineInfo const& lineInfo );
+        NotImplementedException( NotImplementedException const& ) {}
+
+        virtual ~NotImplementedException() CATCH_NOEXCEPT {}
+
+        virtual const char* what() const CATCH_NOEXCEPT;
+
+    private:
+        std::string m_what;
+        SourceLineInfo m_lineInfo;
+    };
+
+} // end namespace Catch
+
+///////////////////////////////////////////////////////////////////////////////
+#define CATCH_NOT_IMPLEMENTED throw Catch::NotImplementedException( CATCH_INTERNAL_LINEINFO )
+
+// #included from: internal/catch_context.h
+#define TWOBLUECUBES_CATCH_CONTEXT_H_INCLUDED
+
+// #included from: catch_interfaces_generators.h
+#define TWOBLUECUBES_CATCH_INTERFACES_GENERATORS_H_INCLUDED
+
+#include <string>
+
+namespace Catch {
+
+    struct IGeneratorInfo {
+        virtual ~IGeneratorInfo();
+        virtual bool moveNext() = 0;
+        virtual std::size_t getCurrentIndex() const = 0;
+    };
+
+    struct IGeneratorsForTest {
+        virtual ~IGeneratorsForTest();
+
+        virtual IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) = 0;
+        virtual bool moveNext() = 0;
+    };
+
+    IGeneratorsForTest* createGeneratorsForTest();
+
+} // end namespace Catch
 
 // #included from: catch_ptr.hpp
+#define TWOBLUECUBES_CATCH_PTR_HPP_INCLUDED
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wpadded"
+#endif
 
 namespace Catch {
 
@@ -183,145 +498,72 @@
     template<typename T>
     class Ptr {
     public:
-        Ptr() : m_p( NULL ){}
+        Ptr() : m_p( CATCH_NULL ){}
         Ptr( T* p ) : m_p( p ){
-            m_p->addRef();
-        }
-        Ptr( const Ptr& other ) : m_p( other.m_p ){
-            m_p->addRef();
+            if( m_p )
+                m_p->addRef();
+        }
+        Ptr( Ptr const& other ) : m_p( other.m_p ){
+            if( m_p )
+                m_p->addRef();
         }
         ~Ptr(){
             if( m_p )
                 m_p->release();
         }
+        void reset() {
+            if( m_p )
+                m_p->release();
+            m_p = CATCH_NULL;
+        }
         Ptr& operator = ( T* p ){
             Ptr temp( p );
             swap( temp );
             return *this;
         }
-        Ptr& operator = ( Ptr& other ){
+        Ptr& operator = ( Ptr const& other ){
             Ptr temp( other );
             swap( temp );
             return *this;
         }
-        void swap( Ptr& other ){
-            std::swap( m_p, other.m_p );
-        }
-
-        T* get(){
-            return m_p;
-        }
-        const T* get() const{
-            return m_p;
-        }
-
-        T& operator*(){
-            return *m_p;
-        }
-        const T& operator*() const{
-            return *m_p;
-        }
-
-        T* operator->(){
-            return m_p;
-        }
-        const T* operator->() const{
-            return m_p;
-        }
+        void swap( Ptr& other ) { std::swap( m_p, other.m_p ); }
+        T* get() const{ return m_p; }
+        T& operator*() const { return *m_p; }
+        T* operator->() const { return m_p; }
+        bool operator !() const { return m_p == CATCH_NULL; }
+        operator SafeBool::type() const { return SafeBool::makeSafe( m_p != CATCH_NULL ); }
 
     private:
         T* m_p;
     };
 
     struct IShared : NonCopyable {
-        virtual ~IShared(){}
-        virtual void addRef() = 0;
-        virtual void release() = 0;
-    };
-
-    template<typename T>
+        virtual ~IShared();
+        virtual void addRef() const = 0;
+        virtual void release() const = 0;
+    };
+
+    template<typename T = IShared>
     struct SharedImpl : T {
 
         SharedImpl() : m_rc( 0 ){}
 
-        virtual void addRef(){
+        virtual void addRef() const {
             ++m_rc;
         }
-        virtual void release(){
+        virtual void release() const {
             if( --m_rc == 0 )
                 delete this;
         }
 
-        int m_rc;
+        mutable unsigned int m_rc;
     };
 
 } // end namespace Catch
 
-#include <string>
-#include <ostream>
-#include <map>
-
-namespace Catch
-{
-    struct IReporterConfig {
-        virtual ~IReporterConfig() {}
-        virtual std::ostream& stream () const = 0;
-        virtual bool includeSuccessfulResults () const = 0;
-        virtual std::string getName () const = 0;
-    };
-
-    class TestCaseInfo;
-    class ResultInfo;
-
-    struct IReporter : IShared {
-        virtual ~IReporter() {}
-        virtual bool shouldRedirectStdout() const = 0;
-        virtual void StartTesting() = 0;
-        virtual void EndTesting( const Totals& totals ) = 0;
-        virtual void StartGroup( const std::string& groupName ) = 0;
-        virtual void EndGroup( const std::string& groupName, const Totals& totals ) = 0;
-        virtual void StartSection( const std::string& sectionName, const std::string& description ) = 0;
-        virtual void EndSection( const std::string& sectionName, const Counts& assertions ) = 0;
-        virtual void StartTestCase( const TestCaseInfo& testInfo ) = 0;
-        virtual void Aborted() = 0;
-        virtual void EndTestCase( const TestCaseInfo& testInfo, const Totals& totals, const std::string& stdOut, const std::string& stdErr ) = 0;
-        virtual void Result( const ResultInfo& result ) = 0;
-    };
-
-    struct IReporterFactory {
-        virtual ~IReporterFactory() {}
-        virtual IReporter* create( const IReporterConfig& config ) const = 0;
-        virtual std::string getDescription() const = 0;
-    };
-
-    struct IReporterRegistry {
-        typedef std::map<std::string, IReporterFactory*> FactoryMap;
-
-        virtual ~IReporterRegistry() {}
-        virtual IReporter* create( const std::string& name, const IReporterConfig& config ) const = 0;
-        virtual void registerReporter( const std::string& name, IReporterFactory* factory ) = 0;
-        virtual const FactoryMap& getFactories() const = 0;
-    };
-
-    inline std::string trim( const std::string& str ) {
-        std::string::size_type start = str.find_first_not_of( "\n\r\t " );
-        std::string::size_type end = str.find_last_not_of( "\n\r\t " );
-
-        return start != std::string::npos ? str.substr( start, 1+end-start ) : "";
-    }
-}
-
-// #included from: catch_interfaces_config.h
-
-namespace Catch {
-
-    struct IConfig {
-
-        virtual ~IConfig(){}
-
-        virtual bool allowThrows() const = 0;
-    };
-}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
 
 #include <memory>
 #include <vector>
@@ -329,110 +571,76 @@
 
 namespace Catch {
 
-    class TestCaseInfo;
+    class TestCase;
+    class Stream;
     struct IResultCapture;
-    struct ITestCaseRegistry;
     struct IRunner;
-    struct IExceptionTranslatorRegistry;
-    class GeneratorsForTest;
-
-    class StreamBufBase : public std::streambuf{};
+    struct IGeneratorsForTest;
+    struct IConfig;
 
     struct IContext
     {
-        virtual ~IContext(){}
-
-        virtual IResultCapture& getResultCapture() = 0;
-        virtual IRunner& getRunner() = 0;
-        virtual IReporterRegistry& getReporterRegistry() = 0;
-        virtual ITestCaseRegistry& getTestCaseRegistry() = 0;
-        virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() = 0;
-        virtual size_t getGeneratorIndex( const std::string& fileInfo, size_t totalSize ) = 0;
+        virtual ~IContext();
+
+        virtual IResultCapture* getResultCapture() = 0;
+        virtual IRunner* getRunner() = 0;
+        virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) = 0;
         virtual bool advanceGeneratorsForCurrentTest() = 0;
-        virtual const IConfig* getConfig() const = 0;
+        virtual Ptr<IConfig const> getConfig() const = 0;
     };
 
     struct IMutableContext : IContext
     {
+        virtual ~IMutableContext();
         virtual void setResultCapture( IResultCapture* resultCapture ) = 0;
         virtual void setRunner( IRunner* runner ) = 0;
-        virtual void setConfig( const IConfig* config ) = 0;
+        virtual void setConfig( Ptr<IConfig const> const& config ) = 0;
     };
 
     IContext& getCurrentContext();
     IMutableContext& getCurrentMutableContext();
-
-    class Context : public IMutableContext {
-
-        Context();
-        Context( const Context& );
-        void operator=( const Context& );
-
-    public: // IContext
-        virtual IResultCapture& getResultCapture();
-        virtual IRunner& getRunner();
-        virtual IReporterRegistry& getReporterRegistry();
-        virtual ITestCaseRegistry& getTestCaseRegistry();
-        virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry();
-        virtual size_t getGeneratorIndex( const std::string& fileInfo, size_t totalSize );
-        virtual bool advanceGeneratorsForCurrentTest();
-        virtual const IConfig* getConfig() const;
-
-    public: // IMutableContext
-        virtual void setResultCapture( IResultCapture* resultCapture );
-        virtual void setRunner( IRunner* runner );
-        virtual void setConfig( const IConfig* config );
-
-    public: // Statics
-        static std::streambuf* createStreamBuf( const std::string& streamName );
-        static void cleanUp();
-
-        friend IMutableContext& getCurrentMutableContext();
-
-    private:
-        GeneratorsForTest* findGeneratorsForCurrentTest();
-        GeneratorsForTest& getGeneratorsForCurrentTest();
-
-    private:
-        std::auto_ptr<IReporterRegistry> m_reporterRegistry;
-        std::auto_ptr<ITestCaseRegistry> m_testCaseRegistry;
-        std::auto_ptr<IExceptionTranslatorRegistry> m_exceptionTranslatorRegistry;
-        IRunner* m_runner;
-        IResultCapture* m_resultCapture;
-        const IConfig* m_config;
-        std::map<std::string, GeneratorsForTest*> m_generatorsByTestName;
-    };
+    void cleanUpContext();
+    Stream createStream( std::string const& streamName );
+
 }
 
 // #included from: internal/catch_test_registry.hpp
+#define TWOBLUECUBES_CATCH_TEST_REGISTRY_HPP_INCLUDED
 
 // #included from: catch_interfaces_testcase.h
+#define TWOBLUECUBES_CATCH_INTERFACES_TESTCASE_H_INCLUDED
 
 #include <vector>
 
 namespace Catch {
-    struct ITestCase {
-        virtual ~ITestCase(){}
+
+    class TestSpec;
+
+    struct ITestCase : IShared {
         virtual void invoke () const = 0;
-        virtual ITestCase* clone() const = 0;
-        virtual bool operator == ( const ITestCase& other ) const = 0;
-        virtual bool operator < ( const ITestCase& other ) const = 0;
-    };
-
-    class TestCaseInfo;
+    protected:
+        virtual ~ITestCase();
+    };
+
+    class TestCase;
+    struct IConfig;
 
     struct ITestCaseRegistry {
-        virtual ~ITestCaseRegistry(){}
-        virtual void registerTest( const TestCaseInfo& testInfo ) = 0;
-        virtual const std::vector<TestCaseInfo>& getAllTests() const = 0;
-        virtual std::vector<TestCaseInfo> getMatchingTestCases( const std::string& rawTestSpec ) = 0;
-    };
+        virtual ~ITestCaseRegistry();
+        virtual std::vector<TestCase> const& getAllTests() const = 0;
+        virtual std::vector<TestCase> const& getAllTestsSorted( IConfig const& config ) const = 0;
+    };
+
+    bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config );
+    std::vector<TestCase> filterTests( std::vector<TestCase> const& testCases, TestSpec const& testSpec, IConfig const& config );
+    std::vector<TestCase> const& getAllTestCasesSorted( IConfig const& config );
+
 }
 
 namespace Catch {
 
 template<typename C>
-class MethodTestCase : public ITestCase {
+class MethodTestCase : public SharedImpl<ITestCase> {
 
 public:
     MethodTestCase( void (C::*method)() ) : m_method( method ) {}
@@ -442,367 +650,652 @@
         (obj.*m_method)();
     }
 
-    virtual ITestCase* clone() const {
-        return new MethodTestCase<C>( m_method );
-    }
-
-    virtual bool operator == ( const ITestCase& other ) const {
-        const MethodTestCase* mtOther = dynamic_cast<const MethodTestCase*>( &other );
-        return mtOther && m_method == mtOther->m_method;
-    }
-
-    virtual bool operator < ( const ITestCase& other ) const {
-        const MethodTestCase* mtOther = dynamic_cast<const MethodTestCase*>( &other );
-        return mtOther && &m_method < &mtOther->m_method;
-    }
-
 private:
+    virtual ~MethodTestCase() {}
+
     void (C::*m_method)();
 };
 
 typedef void(*TestFunction)();
 
+struct NameAndDesc {
+    NameAndDesc( const char* _name = "", const char* _description= "" )
+    : name( _name ), description( _description )
+    {}
+
+    const char* name;
+    const char* description;
+};
+
+void registerTestCase
+    (   ITestCase* testCase,
+        char const* className,
+        NameAndDesc const& nameAndDesc,
+        SourceLineInfo const& lineInfo );
+
 struct AutoReg {
 
-    AutoReg(    TestFunction function,
-                const char* name,
-                const char* description,
-                const SourceLineInfo& lineInfo );
+    AutoReg
+        (   TestFunction function,
+            SourceLineInfo const& lineInfo,
+            NameAndDesc const& nameAndDesc );
 
     template<typename C>
-    AutoReg(    void (C::*method)(),
-                const char* name,
-                const char* description,
-                const SourceLineInfo& lineInfo ) {
-        registerTestCase( new MethodTestCase<C>( method ), name, description, lineInfo );
-    }
-
-    void registerTestCase(  ITestCase* testCase,
-                            const char* name,
-                            const char* description,
-                            const SourceLineInfo& lineInfo );
+    AutoReg
+        (   void (C::*method)(),
+            char const* className,
+            NameAndDesc const& nameAndDesc,
+            SourceLineInfo const& lineInfo ) {
+
+        registerTestCase
+            (   new MethodTestCase<C>( method ),
+                className,
+                nameAndDesc,
+                lineInfo );
+    }
 
     ~AutoReg();
 
 private:
-    AutoReg( const AutoReg& );
-    void operator= ( const AutoReg& );
+    AutoReg( AutoReg const& );
+    void operator= ( AutoReg const& );
 };
 
+void registerTestCaseFunction
+    (   TestFunction function,
+        SourceLineInfo const& lineInfo,
+        NameAndDesc const& nameAndDesc );
+
 } // end namespace Catch
 
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_TESTCASE( Name, Desc ) \
-    static void INTERNAL_CATCH_UNIQUE_NAME( TestCaseFunction_catch_internal_ )(); \
-    namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &INTERNAL_CATCH_UNIQUE_NAME(  TestCaseFunction_catch_internal_ ), Name, Desc, CATCH_INTERNAL_LINEINFO ); }\
-    static void INTERNAL_CATCH_UNIQUE_NAME(  TestCaseFunction_catch_internal_ )()
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_TESTCASE_NORETURN( Name, Desc ) \
-    static void INTERNAL_CATCH_UNIQUE_NAME( TestCaseFunction_catch_internal_ )() ATTRIBUTE_NORETURN; \
-    namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &INTERNAL_CATCH_UNIQUE_NAME(  TestCaseFunction_catch_internal_ ), Name, Desc, CATCH_INTERNAL_LINEINFO ); }\
-    static void INTERNAL_CATCH_UNIQUE_NAME(  TestCaseFunction_catch_internal_ )()
-
-///////////////////////////////////////////////////////////////////////////////
-#define CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, Name, Desc ) \
-    namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, Name, Desc, CATCH_INTERNAL_LINEINFO ); }
-
-///////////////////////////////////////////////////////////////////////////////
-#define TEST_CASE_METHOD( ClassName, TestName, Desc )\
-    namespace{ \
-        struct INTERNAL_CATCH_UNIQUE_NAME( TestCaseMethod_catch_internal_ ) : ClassName{ \
-            void test(); \
-        }; \
-        Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &INTERNAL_CATCH_UNIQUE_NAME( TestCaseMethod_catch_internal_ )::test, TestName, Desc, CATCH_INTERNAL_LINEINFO ); \
-    } \
-    void INTERNAL_CATCH_UNIQUE_NAME( TestCaseMethod_catch_internal_ )::test()
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_TESTCASE2( TestName, ... ) \
+        static void TestName(); \
+        namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); }\
+        static void TestName()
+    #define INTERNAL_CATCH_TESTCASE( ... ) \
+        INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), __VA_ARGS__ )
+
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, ... ) \
+        namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); }
+
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestName, ClassName, ... )\
+        namespace{ \
+            struct TestName : ClassName{ \
+                void test(); \
+            }; \
+            Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestName::test, #ClassName, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); \
+        } \
+        void TestName::test()
+    #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, ... ) \
+        INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, __VA_ARGS__ )
+
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, ... ) \
+        Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) );
+
+#else
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_TESTCASE2( TestName, Name, Desc ) \
+        static void TestName(); \
+        namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); }\
+        static void TestName()
+    #define INTERNAL_CATCH_TESTCASE( Name, Desc ) \
+        INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), Name, Desc )
+
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, Name, Desc ) \
+        namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( Name, Desc ), CATCH_INTERNAL_LINEINFO ); }
+
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestCaseName, ClassName, TestName, Desc )\
+        namespace{ \
+            struct TestCaseName : ClassName{ \
+                void test(); \
+            }; \
+            Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestCaseName::test, #ClassName, Catch::NameAndDesc( TestName, Desc ), CATCH_INTERNAL_LINEINFO ); \
+        } \
+        void TestCaseName::test()
+    #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, TestName, Desc )\
+        INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, TestName, Desc )
+
+    ///////////////////////////////////////////////////////////////////////////////
+    #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, Name, Desc ) \
+        Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) );
+#endif
 
 // #included from: internal/catch_capture.hpp
-
-// #included from: catch_expression_builder.hpp
-
-// #included from: catch_expression.hpp
-
-// #included from: catch_resultinfo_builder.hpp
-
-// #included from: catch_tostring.hpp
-
-#include <sstream>
+#define TWOBLUECUBES_CATCH_CAPTURE_HPP_INCLUDED
+
+// #included from: catch_result_builder.h
+#define TWOBLUECUBES_CATCH_RESULT_BUILDER_H_INCLUDED
+
+// #included from: catch_result_type.h
+#define TWOBLUECUBES_CATCH_RESULT_TYPE_H_INCLUDED
 
 namespace Catch {
-namespace Detail {
-
-    struct NonStreamable {
-        template<typename T> NonStreamable( const T& ){}
-    };
-
-    // If the type does not have its own << overload for ostream then
-    // this one will be used instead
-    inline std::ostream& operator << ( std::ostream& ss, NonStreamable ){
-        return ss << "{?}";
-    }
-
-    template<typename T>
-    inline std::string makeString( const T& value ) {
-        std::ostringstream oss;
-        oss << value;
-        return oss.str();
-    }
-
-    template<typename T>
-    inline std::string makeString( T* p ) {
-        if( !p )
-            return INTERNAL_CATCH_STRINGIFY( NULL );
-        std::ostringstream oss;
-        oss << p;
-        return oss.str();
-    }
-
-    template<typename T>
-    inline std::string makeString( const T* p ) {
-        if( !p )
-            return INTERNAL_CATCH_STRINGIFY( NULL );
-        std::ostringstream oss;
-        oss << p;
-        return oss.str();
-    }
-
-} // end namespace Detail
-
-/// \brief converts any type to a string
-///
-/// The default template forwards on to ostringstream - except when an
-/// ostringstream overload does not exist - in which case it attempts to detect
-/// that and writes {?}.
-/// Overload (not specialise) this template for custom typs that you don't want
-/// to provide an ostream overload for.
-template<typename T>
-std::string toString( const T& value ) {
-    return Detail::makeString( value );
-}
-
-// Built in overloads
-
-inline std::string toString( const std::string& value ) {
-    return "\"" + value + "\"";
-}
-
-inline std::string toString( const std::wstring& value ) {
-    std::ostringstream oss;
-    oss << "\"";
-    for(size_t i = 0; i < value.size(); ++i )
-        oss << static_cast<char>( value[i] <= 0xff ? value[i] : '?');
-    oss << "\"";
-    return oss.str();
-}
-
-inline std::string toString( const char* const value ) {
-    return value ? Catch::toString( std::string( value ) ) : std::string( "{null string}" );
-}
-
-inline std::string toString( char* const value ) {
-    return Catch::toString( static_cast<const char*>( value ) );
-}
-
-inline std::string toString( int value ) {
-    std::ostringstream oss;
-    oss << value;
-    return oss.str();
-}
-
-inline std::string toString( unsigned long value ) {
-    std::ostringstream oss;
-    if( value > 8192 )
-        oss << "0x" << std::hex << value;
-    else
-        oss << value;
-    return oss.str();
-}
-
-inline std::string toString( unsigned int value ) {
-    return toString( static_cast<unsigned long>( value ) );
-}
-
-inline std::string toString( const double value ) {
-    std::ostringstream oss;
-    oss << value;
-    return oss.str();
-}
-
-inline std::string toString( bool value ) {
-    return value ? "true" : "false";
-}
-
-inline std::string toString( char value ) {
-    return value < ' '
-        ? toString( (unsigned int)value )
-        : Detail::makeString( value );
-}
-
-inline std::string toString( signed char value ) {
-    return toString( static_cast<char>( value ) );
-}
-
-#ifdef CATCH_CONFIG_CPP11_NULLPTR
-inline std::string toString( std::nullptr_t ) {
-    return "nullptr";
-}
-#endif
+
+    // ResultWas::OfType enum
+    struct ResultWas { enum OfType {
+        Unknown = -1,
+        Ok = 0,
+        Info = 1,
+        Warning = 2,
+
+        FailureBit = 0x10,
+
+        ExpressionFailed = FailureBit | 1,
+        ExplicitFailure = FailureBit | 2,
+
+        Exception = 0x100 | FailureBit,
+
+        ThrewException = Exception | 1,
+        DidntThrowException = Exception | 2,
+
+        FatalErrorCondition = 0x200 | FailureBit
+
+    }; };
+
+    inline bool isOk( ResultWas::OfType resultType ) {
+        return ( resultType & ResultWas::FailureBit ) == 0;
+    }
+    inline bool isJustInfo( int flags ) {
+        return flags == ResultWas::Info;
+    }
+
+    // ResultDisposition::Flags enum
+    struct ResultDisposition { enum Flags {
+        Normal = 0x01,
+
+        ContinueOnFailure = 0x02,   // Failures fail test, but execution continues
+        FalseTest = 0x04,           // Prefix expression with !
+        SuppressFail = 0x08         // Failures are reported but do not fail the test
+    }; };
+
+    inline ResultDisposition::Flags operator | ( ResultDisposition::Flags lhs, ResultDisposition::Flags rhs ) {
+        return static_cast<ResultDisposition::Flags>( static_cast<int>( lhs ) | static_cast<int>( rhs ) );
+    }
+
+    inline bool shouldContinueOnFailure( int flags )    { return ( flags & ResultDisposition::ContinueOnFailure ) != 0; }
+    inline bool isFalseTest( int flags )                { return ( flags & ResultDisposition::FalseTest ) != 0; }
+    inline bool shouldSuppressFailure( int flags )      { return ( flags & ResultDisposition::SuppressFail ) != 0; }
 
 } // end namespace Catch
 
-// #included from: catch_resultinfo.hpp
+// #included from: catch_assertionresult.h
+#define TWOBLUECUBES_CATCH_ASSERTIONRESULT_H_INCLUDED
 
 #include <string>
-// #included from: catch_result_type.h
 
 namespace Catch {
 
-struct ResultWas { enum OfType {
-    Unknown = -1,
-    Ok = 0,
-    Info = 1,
-    Warning = 2,
-
-    FailureBit = 0x10,
-
-    ExpressionFailed = FailureBit | 1,
-    ExplicitFailure = FailureBit | 2,
-
-    Exception = 0x100 | FailureBit,
-
-    ThrewException = Exception | 1,
-    DidntThrowException = Exception | 2
-
-}; };
-
-struct ResultAction { enum Value {
-    None,
-    Failed = 1, // Failure - but no debug break if Debug bit not set
-    Debug = 2,  // If this bit is set, invoke the debugger
-    Abort = 4   // Test run should abort
-}; };
-
-}
-
-
-namespace Catch {
-
-    class ResultInfo {
+    struct AssertionInfo
+    {
+        AssertionInfo() {}
+        AssertionInfo(  std::string const& _macroName,
+                        SourceLineInfo const& _lineInfo,
+                        std::string const& _capturedExpression,
+                        ResultDisposition::Flags _resultDisposition );
+
+        std::string macroName;
+        SourceLineInfo lineInfo;
+        std::string capturedExpression;
+        ResultDisposition::Flags resultDisposition;
+    };
+
+    struct AssertionResultData
+    {
+        AssertionResultData() : resultType( ResultWas::Unknown ) {}
+
+        std::string reconstructedExpression;
+        std::string message;
+        ResultWas::OfType resultType;
+    };
+
+    class AssertionResult {
     public:
-        ResultInfo()
-        :   m_macroName(),
-            m_expr(),
-            m_lhs(),
-            m_rhs(),
-            m_op(),
-            m_message(),
-            m_result( ResultWas::Unknown ),
-            m_isNot( false )
-        {}
-
-        ResultInfo( const char* expr,
-                    ResultWas::OfType result,
-                    bool isNot,
-                    const SourceLineInfo& lineInfo,
-                    const char* macroName,
-                    const char* message )
-        :   m_macroName( macroName ),
-            m_lineInfo( lineInfo ),
-            m_expr( expr ),
-            m_lhs(),
-            m_rhs(),
-            m_op( isNotExpression( expr ) ? "!" : "" ),
-            m_message( message ),
-            m_result( result ),
-            m_isNot( isNot )
-        {
-            if( isNot )
-                m_expr = "!" + m_expr;
-        }
-
-        virtual ~ResultInfo() {}
-
-        bool ok() const {
-            return ( m_result & ResultWas::FailureBit ) != ResultWas::FailureBit;
-        }
-
-        ResultWas::OfType getResultType() const {
-            return m_result;
-        }
-
-        bool hasExpression() const {
-            return !m_expr.empty();
-        }
-
-        bool hasMessage() const {
-            return !m_message.empty();
-        }
-
-        std::string getExpression() const {
-            return m_expr;
-        }
-
-        bool hasExpandedExpression() const {
-            return hasExpression() && getExpandedExpressionInternal() != m_expr;
-        }
-
-        std::string getExpandedExpression() const {
-            return hasExpression() ? getExpandedExpressionInternal() : "";
-        }
-
-        std::string getMessage() const {
-            return m_message;
-        }
-
-        std::string getFilename() const {
-            return m_lineInfo.file;
-        }
-
-        std::size_t getLine() const {
-            return m_lineInfo.line;
-        }
-
-        std::string getTestMacroName() const {
-            return m_macroName;
-        }
+        AssertionResult();
+        AssertionResult( AssertionInfo const& info, AssertionResultData const& data );
+        ~AssertionResult();
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+         AssertionResult( AssertionResult const& )              = default;
+         AssertionResult( AssertionResult && )                  = default;
+         AssertionResult& operator = ( AssertionResult const& ) = default;
+         AssertionResult& operator = ( AssertionResult && )     = default;
+#  endif
+
+        bool isOk() const;
+        bool succeeded() const;
+        ResultWas::OfType getResultType() const;
+        bool hasExpression() const;
+        bool hasMessage() const;
+        std::string getExpression() const;
+        std::string getExpressionInMacro() const;
+        bool hasExpandedExpression() const;
+        std::string getExpandedExpression() const;
+        std::string getMessage() const;
+        SourceLineInfo getSourceInfo() const;
+        std::string getTestMacroName() const;
 
     protected:
-
-        std::string getExpandedExpressionInternal() const {
-            if( m_op == "" || m_isNot )
-                return m_lhs.empty() ? m_expr : m_op + m_lhs;
-            else if( m_op == "matches" )
-                return m_lhs + " " + m_rhs;
-            else if( m_op != "!" )
-            {
-                if( m_lhs.size() + m_rhs.size() < 30 )
-                    return m_lhs + " " + m_op + " " + m_rhs;
-                else if( m_lhs.size() < 70 && m_rhs.size() < 70 )
-                    return "\n\t" + m_lhs + "\n\t" + m_op + "\n\t" + m_rhs;
-                else
-                    return "\n" + m_lhs + "\n" + m_op + "\n" + m_rhs + "\n\n";
-            }
-            else
-                return "{can't expand - use " + m_macroName + "_FALSE( " + m_expr.substr(1) + " ) instead of " + m_macroName + "( " + m_expr + " ) for better diagnostics}";
-        }
-
-        bool isNotExpression( const char* expr ) {
-            return expr && expr[0] == '!';
-        }
-
-    protected:
-        std::string m_macroName;
-        SourceLineInfo m_lineInfo;
-        std::string m_expr, m_lhs, m_rhs, m_op;
-        std::string m_message;
-        ResultWas::OfType m_result;
-        bool m_isNot;
+        AssertionInfo m_info;
+        AssertionResultData m_resultData;
     };
 
 } // end namespace Catch
 
+// #included from: catch_matchers.hpp
+#define TWOBLUECUBES_CATCH_MATCHERS_HPP_INCLUDED
+
+namespace Catch {
+namespace Matchers {
+    namespace Impl {
+
+    namespace Generic {
+        template<typename ExpressionT> class AllOf;
+        template<typename ExpressionT> class AnyOf;
+        template<typename ExpressionT> class Not;
+    }
+
+    template<typename ExpressionT>
+    struct Matcher : SharedImpl<IShared>
+    {
+        typedef ExpressionT ExpressionType;
+
+        virtual ~Matcher() {}
+        virtual Ptr<Matcher> clone() const = 0;
+        virtual bool match( ExpressionT const& expr ) const = 0;
+        virtual std::string toString() const = 0;
+
+        Generic::AllOf<ExpressionT> operator && ( Matcher<ExpressionT> const& other ) const;
+        Generic::AnyOf<ExpressionT> operator || ( Matcher<ExpressionT> const& other ) const;
+        Generic::Not<ExpressionT> operator ! () const;
+    };
+
+    template<typename DerivedT, typename ExpressionT>
+    struct MatcherImpl : Matcher<ExpressionT> {
+
+        virtual Ptr<Matcher<ExpressionT> > clone() const {
+            return Ptr<Matcher<ExpressionT> >( new DerivedT( static_cast<DerivedT const&>( *this ) ) );
+        }
+    };
+
+    namespace Generic {
+        template<typename ExpressionT>
+        class Not : public MatcherImpl<Not<ExpressionT>, ExpressionT> {
+        public:
+            explicit Not( Matcher<ExpressionT> const& matcher ) : m_matcher(matcher.clone()) {}
+            Not( Not const& other ) : m_matcher( other.m_matcher ) {}
+
+            virtual bool match( ExpressionT const& expr ) const CATCH_OVERRIDE {
+                return !m_matcher->match( expr );
+            }
+
+            virtual std::string toString() const CATCH_OVERRIDE {
+                return "not " + m_matcher->toString();
+            }
+        private:
+            Ptr< Matcher<ExpressionT> > m_matcher;
+        };
+
+        template<typename ExpressionT>
+        class AllOf : public MatcherImpl<AllOf<ExpressionT>, ExpressionT> {
+        public:
+
+            AllOf() {}
+            AllOf( AllOf const& other ) : m_matchers( other.m_matchers ) {}
+
+            AllOf& add( Matcher<ExpressionT> const& matcher ) {
+                m_matchers.push_back( matcher.clone() );
+                return *this;
+            }
+            virtual bool match( ExpressionT const& expr ) const
+            {
+                for( std::size_t i = 0; i < m_matchers.size(); ++i )
+                    if( !m_matchers[i]->match( expr ) )
+                        return false;
+                return true;
+            }
+            virtual std::string toString() const {
+                std::ostringstream oss;
+                oss << "( ";
+                for( std::size_t i = 0; i < m_matchers.size(); ++i ) {
+                    if( i != 0 )
+                        oss << " and ";
+                    oss << m_matchers[i]->toString();
+                }
+                oss << " )";
+                return oss.str();
+            }
+
+            AllOf operator && ( Matcher<ExpressionT> const& other ) const {
+                AllOf allOfExpr( *this );
+                allOfExpr.add( other );
+                return allOfExpr;
+            }
+
+        private:
+            std::vector<Ptr<Matcher<ExpressionT> > > m_matchers;
+        };
+
+        template<typename ExpressionT>
+        class AnyOf : public MatcherImpl<AnyOf<ExpressionT>, ExpressionT> {
+        public:
+
+            AnyOf() {}
+            AnyOf( AnyOf const& other ) : m_matchers( other.m_matchers ) {}
+
+            AnyOf& add( Matcher<ExpressionT> const& matcher ) {
+                m_matchers.push_back( matcher.clone() );
+                return *this;
+            }
+            virtual bool match( ExpressionT const& expr ) const
+            {
+                for( std::size_t i = 0; i < m_matchers.size(); ++i )
+                    if( m_matchers[i]->match( expr ) )
+                        return true;
+                return false;
+            }
+            virtual std::string toString() const {
+                std::ostringstream oss;
+                oss << "( ";
+                for( std::size_t i = 0; i < m_matchers.size(); ++i ) {
+                    if( i != 0 )
+                        oss << " or ";
+                    oss << m_matchers[i]->toString();
+                }
+                oss << " )";
+                return oss.str();
+            }
+
+            AnyOf operator || ( Matcher<ExpressionT> const& other ) const {
+                AnyOf anyOfExpr( *this );
+                anyOfExpr.add( other );
+                return anyOfExpr;
+            }
+
+        private:
+            std::vector<Ptr<Matcher<ExpressionT> > > m_matchers;
+        };
+
+    } // namespace Generic
+
+    template<typename ExpressionT>
+    Generic::AllOf<ExpressionT> Matcher<ExpressionT>::operator && ( Matcher<ExpressionT> const& other ) const {
+        Generic::AllOf<ExpressionT> allOfExpr;
+        allOfExpr.add( *this );
+        allOfExpr.add( other );
+        return allOfExpr;
+    }
+
+    template<typename ExpressionT>
+    Generic::AnyOf<ExpressionT> Matcher<ExpressionT>::operator || ( Matcher<ExpressionT> const& other ) const {
+        Generic::AnyOf<ExpressionT> anyOfExpr;
+        anyOfExpr.add( *this );
+        anyOfExpr.add( other );
+        return anyOfExpr;
+    }
+
+    template<typename ExpressionT>
+    Generic::Not<ExpressionT> Matcher<ExpressionT>::operator ! () const {
+        return Generic::Not<ExpressionT>( *this );
+    }
+
+    namespace StdString {
+
+        inline std::string makeString( std::string const& str ) { return str; }
+        inline std::string makeString( const char* str ) { return str ? std::string( str ) : std::string(); }
+
+        struct CasedString
+        {
+            CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity )
+            :   m_caseSensitivity( caseSensitivity ),
+                m_str( adjustString( str ) )
+            {}
+            std::string adjustString( std::string const& str ) const {
+                return m_caseSensitivity == CaseSensitive::No
+                    ? toLower( str )
+                    : str;
+
+            }
+            std::string toStringSuffix() const
+            {
+                return m_caseSensitivity == CaseSensitive::No
+                    ? " (case insensitive)"
+                    : "";
+            }
+            CaseSensitive::Choice m_caseSensitivity;
+            std::string m_str;
+        };
+
+        struct Equals : MatcherImpl<Equals, std::string> {
+            Equals( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes )
+            :   m_data( str, caseSensitivity )
+            {}
+            Equals( Equals const& other ) : m_data( other.m_data ){}
+
+            virtual ~Equals();
+
+            virtual bool match( std::string const& expr ) const {
+                return m_data.m_str == m_data.adjustString( expr );;
+            }
+            virtual std::string toString() const {
+                return "equals: \"" + m_data.m_str + "\"" + m_data.toStringSuffix();
+            }
+
+            CasedString m_data;
+        };
+
+        struct Contains : MatcherImpl<Contains, std::string> {
+            Contains( std::string const& substr, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes )
+            : m_data( substr, caseSensitivity ){}
+            Contains( Contains const& other ) : m_data( other.m_data ){}
+
+            virtual ~Contains();
+
+            virtual bool match( std::string const& expr ) const {
+                return m_data.adjustString( expr ).find( m_data.m_str ) != std::string::npos;
+            }
+            virtual std::string toString() const {
+                return "contains: \"" + m_data.m_str  + "\"" + m_data.toStringSuffix();
+            }
+
+            CasedString m_data;
+        };
+
+        struct StartsWith : MatcherImpl<StartsWith, std::string> {
+            StartsWith( std::string const& substr, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes )
+            : m_data( substr, caseSensitivity ){}
+
+            StartsWith( StartsWith const& other ) : m_data( other.m_data ){}
+
+            virtual ~StartsWith();
+
+            virtual bool match( std::string const& expr ) const {
+                return startsWith( m_data.adjustString( expr ), m_data.m_str );
+            }
+            virtual std::string toString() const {
+                return "starts with: \"" + m_data.m_str + "\"" + m_data.toStringSuffix();
+            }
+
+            CasedString m_data;
+        };
+
+        struct EndsWith : MatcherImpl<EndsWith, std::string> {
+            EndsWith( std::string const& substr, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes )
+            : m_data( substr, caseSensitivity ){}
+            EndsWith( EndsWith const& other ) : m_data( other.m_data ){}
+
+            virtual ~EndsWith();
+
+            virtual bool match( std::string const& expr ) const {
+                return endsWith( m_data.adjustString( expr ), m_data.m_str );
+            }
+            virtual std::string toString() const {
+                return "ends with: \"" + m_data.m_str + "\"" + m_data.toStringSuffix();
+            }
+
+            CasedString m_data;
+        };
+    } // namespace StdString
+    } // namespace Impl
+
+    // The following functions create the actual matcher objects.
+    // This allows the types to be inferred
+    template<typename ExpressionT>
+    inline Impl::Generic::Not<ExpressionT> Not( Impl::Matcher<ExpressionT> const& m ) {
+        return Impl::Generic::Not<ExpressionT>( m );
+    }
+
+    template<typename ExpressionT>
+    inline Impl::Generic::AllOf<ExpressionT> AllOf( Impl::Matcher<ExpressionT> const& m1,
+                                                    Impl::Matcher<ExpressionT> const& m2 ) {
+        return Impl::Generic::AllOf<ExpressionT>().add( m1 ).add( m2 );
+    }
+    template<typename ExpressionT>
+    inline Impl::Generic::AllOf<ExpressionT> AllOf( Impl::Matcher<ExpressionT> const& m1,
+                                                    Impl::Matcher<ExpressionT> const& m2,
+                                                    Impl::Matcher<ExpressionT> const& m3 ) {
+        return Impl::Generic::AllOf<ExpressionT>().add( m1 ).add( m2 ).add( m3 );
+    }
+    template<typename ExpressionT>
+    inline Impl::Generic::AnyOf<ExpressionT> AnyOf( Impl::Matcher<ExpressionT> const& m1,
+                                                    Impl::Matcher<ExpressionT> const& m2 ) {
+        return Impl::Generic::AnyOf<ExpressionT>().add( m1 ).add( m2 );
+    }
+    template<typename ExpressionT>
+    inline Impl::Generic::AnyOf<ExpressionT> AnyOf( Impl::Matcher<ExpressionT> const& m1,
+                                                    Impl::Matcher<ExpressionT> const& m2,
+                                                    Impl::Matcher<ExpressionT> const& m3 ) {
+        return Impl::Generic::AnyOf<ExpressionT>().add( m1 ).add( m2 ).add( m3 );
+    }
+
+    inline Impl::StdString::Equals      Equals( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ) {
+        return Impl::StdString::Equals( str, caseSensitivity );
+    }
+    inline Impl::StdString::Equals      Equals( const char* str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ) {
+        return Impl::StdString::Equals( Impl::StdString::makeString( str ), caseSensitivity );
+    }
+    inline Impl::StdString::Contains    Contains( std::string const& substr, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ) {
+        return Impl::StdString::Contains( substr, caseSensitivity );
+    }
+    inline Impl::StdString::Contains    Contains( const char* substr, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ) {
+        return Impl::StdString::Contains( Impl::StdString::makeString( substr ), caseSensitivity );
+    }
+    inline Impl::StdString::StartsWith  StartsWith( std::string const& substr ) {
+        return Impl::StdString::StartsWith( substr );
+    }
+    inline Impl::StdString::StartsWith  StartsWith( const char* substr ) {
+        return Impl::StdString::StartsWith( Impl::StdString::makeString( substr ) );
+    }
+    inline Impl::StdString::EndsWith    EndsWith( std::string const& substr ) {
+        return Impl::StdString::EndsWith( substr );
+    }
+    inline Impl::StdString::EndsWith    EndsWith( const char* substr ) {
+        return Impl::StdString::EndsWith( Impl::StdString::makeString( substr ) );
+    }
+
+} // namespace Matchers
+
+using namespace Matchers;
+
+} // namespace Catch
+
+namespace Catch {
+
+    struct TestFailureException{};
+
+    template<typename T> class ExpressionLhs;
+
+    struct STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison;
+
+    struct CopyableStream {
+        CopyableStream() {}
+        CopyableStream( CopyableStream const& other ) {
+            oss << other.oss.str();
+        }
+        CopyableStream& operator=( CopyableStream const& other ) {
+            oss.str("");
+            oss << other.oss.str();
+            return *this;
+        }
+        std::ostringstream oss;
+    };
+
+    class ResultBuilder {
+    public:
+        ResultBuilder(  char const* macroName,
+                        SourceLineInfo const& lineInfo,
+                        char const* capturedExpression,
+                        ResultDisposition::Flags resultDisposition,
+                        char const* secondArg = "" );
+
+        template<typename T>
+        ExpressionLhs<T const&> operator <= ( T const& operand );
+        ExpressionLhs<bool> operator <= ( bool value );
+
+        template<typename T>
+        ResultBuilder& operator << ( T const& value ) {
+            m_stream.oss << value;
+            return *this;
+        }
+
+        template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator && ( RhsT const& );
+        template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator || ( RhsT const& );
+
+        ResultBuilder& setResultType( ResultWas::OfType result );
+        ResultBuilder& setResultType( bool result );
+        ResultBuilder& setLhs( std::string const& lhs );
+        ResultBuilder& setRhs( std::string const& rhs );
+        ResultBuilder& setOp( std::string const& op );
+
+        void endExpression();
+
+        std::string reconstructExpression() const;
+        AssertionResult build() const;
+
+        void useActiveException( ResultDisposition::Flags resultDisposition = ResultDisposition::Normal );
+        void captureResult( ResultWas::OfType resultType );
+        void captureExpression();
+        void captureExpectedException( std::string const& expectedMessage );
+        void captureExpectedException( Matchers::Impl::Matcher<std::string> const& matcher );
+        void handleResult( AssertionResult const& result );
+        void react();
+        bool shouldDebugBreak() const;
+        bool allowThrows() const;
+
+    private:
+        AssertionInfo m_assertionInfo;
+        AssertionResultData m_data;
+        struct ExprComponents {
+            ExprComponents() : testFalse( false ) {}
+            bool testFalse;
+            std::string lhs, rhs, op;
+        } m_exprComponents;
+        CopyableStream m_stream;
+
+        bool m_shouldDebugBreak;
+        bool m_shouldThrow;
+    };
+
+} // namespace Catch
+
+// Include after due to circular dependency:
+// #included from: catch_expression_lhs.hpp
+#define TWOBLUECUBES_CATCH_EXPRESSION_LHS_HPP_INCLUDED
+
 // #included from: catch_evaluate.hpp
+#define TWOBLUECUBES_CATCH_EVALUATE_HPP_INCLUDED
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4389) // '==' : signed/unsigned mismatch
+#endif
+
+#include <cstddef>
 
 namespace Catch {
 namespace Internal {
@@ -824,6 +1317,14 @@
     template<> struct OperatorTraits<IsLessThanOrEqualTo>   { static const char* getName(){ return "<="; } };
     template<> struct OperatorTraits<IsGreaterThanOrEqualTo>{ static const char* getName(){ return ">="; } };
 
+    template<typename T>
+    inline T& opCast(T const& t) { return const_cast<T&>(t); }
+
+// nullptr_t support based on pull request #154 from Konstantin Baumann
+#ifdef CATCH_CONFIG_CPP11_NULLPTR
+    inline std::nullptr_t opCast(std::nullptr_t) { return nullptr; }
+#endif // CATCH_CONFIG_CPP11_NULLPTR
+
     // So the compare overloads can be operator agnostic we convey the operator as a template
     // enum, which is used to specialise an Evaluator for doing the comparison.
     template<typename T1, typename T2, Operator Op>
@@ -831,49 +1332,52 @@
 
     template<typename T1, typename T2>
     struct Evaluator<T1, T2, IsEqualTo> {
-        static bool evaluate( const T1& lhs, const T2& rhs) {
-            return const_cast<T1&>( lhs ) ==  const_cast<T2&>( rhs );
+        static bool evaluate( T1 const& lhs, T2 const& rhs) {
+            return bool( opCast( lhs ) ==  opCast( rhs ) );
         }
     };
     template<typename T1, typename T2>
     struct Evaluator<T1, T2, IsNotEqualTo> {
-        static bool evaluate( const T1& lhs, const T2& rhs ) {
-            return const_cast<T1&>( lhs ) != const_cast<T2&>( rhs );
+        static bool evaluate( T1 const& lhs, T2 const& rhs ) {
+            return bool( opCast( lhs ) != opCast( rhs ) );
         }
     };
     template<typename T1, typename T2>
     struct Evaluator<T1, T2, IsLessThan> {
-        static bool evaluate( const T1& lhs, const T2& rhs ) {
-            return const_cast<T1&>( lhs ) < const_cast<T2&>( rhs );
+        static bool evaluate( T1 const& lhs, T2 const& rhs ) {
+            return bool( opCast( lhs ) < opCast( rhs ) );
         }
     };
     template<typename T1, typename T2>
     struct Evaluator<T1, T2, IsGreaterThan> {
-        static bool evaluate( const T1& lhs, const T2& rhs ) {
-            return const_cast<T1&>( lhs ) > const_cast<T2&>( rhs );
+        static bool evaluate( T1 const& lhs, T2 const& rhs ) {
+            return bool( opCast( lhs ) > opCast( rhs ) );
         }
     };
     template<typename T1, typename T2>
     struct Evaluator<T1, T2, IsGreaterThanOrEqualTo> {
-        static bool evaluate( const T1& lhs, const T2& rhs ) {
-            return const_cast<T1&>( lhs ) >= const_cast<T2&>( rhs );
+        static bool evaluate( T1 const& lhs, T2 const& rhs ) {
+            return bool( opCast( lhs ) >= opCast( rhs ) );
         }
     };
     template<typename T1, typename T2>
     struct Evaluator<T1, T2, IsLessThanOrEqualTo> {
-        static bool evaluate( const T1& lhs, const T2& rhs ) {
-            return const_cast<T1&>( lhs ) <= const_cast<T2&>( rhs );
+        static bool evaluate( T1 const& lhs, T2 const& rhs ) {
+            return bool( opCast( lhs ) <= opCast( rhs ) );
         }
     };
 
     template<Operator Op, typename T1, typename T2>
-    bool applyEvaluator( const T1& lhs, const T2& rhs ) {
+    bool applyEvaluator( T1 const& lhs, T2 const& rhs ) {
         return Evaluator<T1, T2, Op>::evaluate( lhs, rhs );
     }
 
+    // This level of indirection allows us to specialise for integer types
+    // to avoid signed/ unsigned warnings
+
     // "base" overload
     template<Operator Op, typename T1, typename T2>
-    bool compare( const T1& lhs, const T2& rhs ) {
+    bool compare( T1 const& lhs, T2 const& rhs ) {
         return Evaluator<T1, T2, Op>::evaluate( lhs, rhs );
     }
 
@@ -922,579 +1426,916 @@
     }
 
     // pointer to long (when comparing against NULL)
-    template<Operator Op, typename T>
-    bool compare( long lhs, const T* rhs ) {
-        return Evaluator<const T*, const T*, Op>::evaluate( reinterpret_cast<const T*>( lhs ), rhs );
-    }
-
-    template<Operator Op, typename T>
-    bool compare( long lhs, T* rhs ) {
+    template<Operator Op, typename T> bool compare( long lhs, T* rhs ) {
         return Evaluator<T*, T*, Op>::evaluate( reinterpret_cast<T*>( lhs ), rhs );
     }
-
-    template<Operator Op, typename T>
-    bool compare( const T* lhs, long rhs ) {
-        return Evaluator<const T*, const T*, Op>::evaluate( lhs, reinterpret_cast<const T*>( rhs ) );
-    }
-
-    template<Operator Op, typename T>
-    bool compare( T* lhs, long rhs ) {
+    template<Operator Op, typename T> bool compare( T* lhs, long rhs ) {
         return Evaluator<T*, T*, Op>::evaluate( lhs, reinterpret_cast<T*>( rhs ) );
     }
 
     // pointer to int (when comparing against NULL)
-    template<Operator Op, typename T>
-    bool compare( int lhs, const T* rhs ) {
-        return Evaluator<const T*, const T*, Op>::evaluate( reinterpret_cast<const T*>( lhs ), rhs );
-    }
-
-    template<Operator Op, typename T>
-    bool compare( int lhs, T* rhs ) {
+    template<Operator Op, typename T> bool compare( int lhs, T* rhs ) {
         return Evaluator<T*, T*, Op>::evaluate( reinterpret_cast<T*>( lhs ), rhs );
     }
-
-    template<Operator Op, typename T>
-    bool compare( const T* lhs, int rhs ) {
-        return Evaluator<const T*, const T*, Op>::evaluate( lhs, reinterpret_cast<const T*>( rhs ) );
-    }
-
-    template<Operator Op, typename T>
-    bool compare( T* lhs, int rhs ) {
+    template<Operator Op, typename T> bool compare( T* lhs, int rhs ) {
         return Evaluator<T*, T*, Op>::evaluate( lhs, reinterpret_cast<T*>( rhs ) );
     }
 
+#ifdef CATCH_CONFIG_CPP11_LONG_LONG
+    // long long to unsigned X
+    template<Operator Op> bool compare( long long lhs, unsigned int rhs ) {
+        return applyEvaluator<Op>( static_cast<unsigned long>( lhs ), rhs );
+    }
+    template<Operator Op> bool compare( long long lhs, unsigned long rhs ) {
+        return applyEvaluator<Op>( static_cast<unsigned long>( lhs ), rhs );
+    }
+    template<Operator Op> bool compare( long long lhs, unsigned long long rhs ) {
+        return applyEvaluator<Op>( static_cast<unsigned long>( lhs ), rhs );
+    }
+    template<Operator Op> bool compare( long long lhs, unsigned char rhs ) {
+        return applyEvaluator<Op>( static_cast<unsigned long>( lhs ), rhs );
+    }
+
+    // unsigned long long to X
+    template<Operator Op> bool compare( unsigned long long lhs, int rhs ) {
+        return applyEvaluator<Op>( static_cast<long>( lhs ), rhs );
+    }
+    template<Operator Op> bool compare( unsigned long long lhs, long rhs ) {
+        return applyEvaluator<Op>( static_cast<long>( lhs ), rhs );
+    }
+    template<Operator Op> bool compare( unsigned long long lhs, long long rhs ) {
+        return applyEvaluator<Op>( static_cast<long>( lhs ), rhs );
+    }
+    template<Operator Op> bool compare( unsigned long long lhs, char rhs ) {
+        return applyEvaluator<Op>( static_cast<long>( lhs ), rhs );
+    }
+
+    // pointer to long long (when comparing against NULL)
+    template<Operator Op, typename T> bool compare( long long lhs, T* rhs ) {
+        return Evaluator<T*, T*, Op>::evaluate( reinterpret_cast<T*>( lhs ), rhs );
+    }
+    template<Operator Op, typename T> bool compare( T* lhs, long long rhs ) {
+        return Evaluator<T*, T*, Op>::evaluate( lhs, reinterpret_cast<T*>( rhs ) );
+    }
+#endif // CATCH_CONFIG_CPP11_LONG_LONG
+
+#ifdef CATCH_CONFIG_CPP11_NULLPTR
+    // pointer to nullptr_t (when comparing against nullptr)
+    template<Operator Op, typename T> bool compare( std::nullptr_t, T* rhs ) {
+        return Evaluator<T*, T*, Op>::evaluate( nullptr, rhs );
+    }
+    template<Operator Op, typename T> bool compare( T* lhs, std::nullptr_t ) {
+        return Evaluator<T*, T*, Op>::evaluate( lhs, nullptr );
+    }
+#endif // CATCH_CONFIG_CPP11_NULLPTR
+
 } // end of namespace Internal
 } // end of namespace Catch
 
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+// #included from: catch_tostring.h
+#define TWOBLUECUBES_CATCH_TOSTRING_H_INCLUDED
+
+#include <sstream>
+#include <iomanip>
+#include <limits>
+#include <vector>
+#include <cstddef>
+
+#ifdef __OBJC__
+// #included from: catch_objc_arc.hpp
+#define TWOBLUECUBES_CATCH_OBJC_ARC_HPP_INCLUDED
+
+#import <Foundation/Foundation.h>
+
+#ifdef __has_feature
+#define CATCH_ARC_ENABLED __has_feature(objc_arc)
+#else
+#define CATCH_ARC_ENABLED 0
+#endif
+
+void arcSafeRelease( NSObject* obj );
+id performOptionalSelector( id obj, SEL sel );
+
+#if !CATCH_ARC_ENABLED
+inline void arcSafeRelease( NSObject* obj ) {
+    [obj release];
+}
+inline id performOptionalSelector( id obj, SEL sel ) {
+    if( [obj respondsToSelector: sel] )
+        return [obj performSelector: sel];
+    return nil;
+}
+#define CATCH_UNSAFE_UNRETAINED
+#define CATCH_ARC_STRONG
+#else
+inline void arcSafeRelease( NSObject* ){}
+inline id performOptionalSelector( id obj, SEL sel ) {
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Warc-performSelector-leaks"
+#endif
+    if( [obj respondsToSelector: sel] )
+        return [obj performSelector: sel];
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+    return nil;
+}
+#define CATCH_UNSAFE_UNRETAINED __unsafe_unretained
+#define CATCH_ARC_STRONG __strong
+#endif
+
+#endif
+
+#ifdef CATCH_CONFIG_CPP11_TUPLE
+#include <tuple>
+#endif
+
+#ifdef CATCH_CONFIG_CPP11_IS_ENUM
+#include <type_traits>
+#endif
+
 namespace Catch {
 
-struct STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison;
-
-class ResultInfoBuilder : public ResultInfo {
-
-public:
-
-    ResultInfoBuilder() {}
-
-    ResultInfoBuilder(  const char* expr,
-                        bool isNot,
-                        const SourceLineInfo& lineInfo,
-                        const char* macroName,
-                        const char* message = "" )
-    : ResultInfo( expr, ResultWas::Unknown, isNot, lineInfo, macroName, message )
-    {}
-
-    void setResultType( ResultWas::OfType result ) {
-        // Flip bool results if isNot is set
-        if( m_isNot && result == ResultWas::Ok )
-            m_result = ResultWas::ExpressionFailed;
-        else if( m_isNot && result == ResultWas::ExpressionFailed )
-            m_result = ResultWas::Ok;
+// Why we're here.
+template<typename T>
+std::string toString( T const& value );
+
+// Built in overloads
+
+std::string toString( std::string const& value );
+std::string toString( std::wstring const& value );
+std::string toString( const char* const value );
+std::string toString( char* const value );
+std::string toString( const wchar_t* const value );
+std::string toString( wchar_t* const value );
+std::string toString( int value );
+std::string toString( unsigned long value );
+std::string toString( unsigned int value );
+std::string toString( const double value );
+std::string toString( const float value );
+std::string toString( bool value );
+std::string toString( char value );
+std::string toString( signed char value );
+std::string toString( unsigned char value );
+
+#ifdef CATCH_CONFIG_CPP11_LONG_LONG
+std::string toString( long long value );
+std::string toString( unsigned long long value );
+#endif
+
+#ifdef CATCH_CONFIG_CPP11_NULLPTR
+std::string toString( std::nullptr_t );
+#endif
+
+#ifdef __OBJC__
+    std::string toString( NSString const * const& nsstring );
+    std::string toString( NSString * CATCH_ARC_STRONG const& nsstring );
+    std::string toString( NSObject* const& nsObject );
+#endif
+
+namespace Detail {
+
+    extern const std::string unprintableString;
+
+    struct BorgType {
+        template<typename T> BorgType( T const& );
+    };
+
+    struct TrueType { char sizer[1]; };
+    struct FalseType { char sizer[2]; };
+
+    TrueType& testStreamable( std::ostream& );
+    FalseType testStreamable( FalseType );
+
+    FalseType operator<<( std::ostream const&, BorgType const& );
+
+    template<typename T>
+    struct IsStreamInsertable {
+        static std::ostream &s;
+        static T  const&t;
+        enum { value = sizeof( testStreamable(s << t) ) == sizeof( TrueType ) };
+    };
+
+#if defined(CATCH_CONFIG_CPP11_IS_ENUM)
+    template<typename T,
+             bool IsEnum = std::is_enum<T>::value
+             >
+    struct EnumStringMaker
+    {
+        static std::string convert( T const& ) { return unprintableString; }
+    };
+
+    template<typename T>
+    struct EnumStringMaker<T,true>
+    {
+        static std::string convert( T const& v )
+        {
+            return ::Catch::toString(
+                static_cast<typename std::underlying_type<T>::type>(v)
+                );
+        }
+    };
+#endif
+    template<bool C>
+    struct StringMakerBase {
+#if defined(CATCH_CONFIG_CPP11_IS_ENUM)
+        template<typename T>
+        static std::string convert( T const& v )
+        {
+            return EnumStringMaker<T>::convert( v );
+        }
+#else
+        template<typename T>
+        static std::string convert( T const& ) { return unprintableString; }
+#endif
+    };
+
+    template<>
+    struct StringMakerBase<true> {
+        template<typename T>
+        static std::string convert( T const& _value ) {
+            std::ostringstream oss;
+            oss << _value;
+            return oss.str();
+        }
+    };
+
+    std::string rawMemoryToString( const void *object, std::size_t size );
+
+    template<typename T>
+    inline std::string rawMemoryToString( const T& object ) {
+      return rawMemoryToString( &object, sizeof(object) );
+    }
+
+} // end namespace Detail
+
+template<typename T>
+struct StringMaker :
+    Detail::StringMakerBase<Detail::IsStreamInsertable<T>::value> {};
+
+template<typename T>
+struct StringMaker<T*> {
+    template<typename U>
+    static std::string convert( U* p ) {
+        if( !p )
+            return "NULL";
         else
-            m_result = result;
-    }
-
-    void setMessage( const std::string& message ) {
-        m_message = message;
-    }
-
-    void setLineInfo( const SourceLineInfo& lineInfo ) {
-        m_lineInfo = lineInfo;
-    }
-
-    void setLhs( const std::string& lhs ) {
-        m_lhs = lhs;
-    }
-
-    void setRhs( const std::string& rhs ) {
-        m_rhs = rhs;
-    }
-
-    void setOp( const std::string& op ) {
-        m_op = op;
-    }
-
-    template<typename RhsT>
-    STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator ||
-    (
-        const RhsT&
-    );
-
-    template<typename RhsT>
-    STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator &&
-    (
-        const RhsT&
-    );
-
-private:
-    friend class ExpressionBuilder;
-    template<typename T> friend class Expression;
-
-    template<typename T> friend class PtrExpression;
-
-    ResultInfoBuilder& captureBoolExpression( bool result ) {
-        m_lhs = Catch::toString( result );
-        m_op = m_isNot ? "!" : "";
-        setResultType( result ? ResultWas::Ok : ResultWas::ExpressionFailed );
-        return *this;
-    }
-
-    template<Internal::Operator Op, typename T1, typename T2>
-    ResultInfoBuilder& captureExpression( const T1& lhs, const T2& rhs ) {
-        setResultType( Internal::compare<Op>( lhs, rhs ) ? ResultWas::Ok : ResultWas::ExpressionFailed );
-        m_lhs = Catch::toString( lhs );
-        m_rhs = Catch::toString( rhs );
-        m_op = Internal::OperatorTraits<Op>::getName();
-        return *this;
-    }
-
-    template<Internal::Operator Op, typename T>
-    ResultInfoBuilder& captureExpression( const T* lhs, int rhs ) {
-        return captureExpression<Op>( lhs, reinterpret_cast<const T*>( rhs ) );
+            return Detail::rawMemoryToString( p );
     }
 };
 
+template<typename R, typename C>
+struct StringMaker<R C::*> {
+    static std::string convert( R C::* p ) {
+        if( !p )
+            return "NULL";
+        else
+            return Detail::rawMemoryToString( p );
+    }
+};
+
+namespace Detail {
+    template<typename InputIterator>
+    std::string rangeToString( InputIterator first, InputIterator last );
+}
+
+//template<typename T, typename Allocator>
+//struct StringMaker<std::vector<T, Allocator> > {
+//    static std::string convert( std::vector<T,Allocator> const& v ) {
+//        return Detail::rangeToString( v.begin(), v.end() );
+//    }
+//};
+
+template<typename T, typename Allocator>
+std::string toString( std::vector<T,Allocator> const& v ) {
+    return Detail::rangeToString( v.begin(), v.end() );
+}
+
+#ifdef CATCH_CONFIG_CPP11_TUPLE
+
+// toString for tuples
+namespace TupleDetail {
+  template<
+      typename Tuple,
+      std::size_t N = 0,
+      bool = (N < std::tuple_size<Tuple>::value)
+      >
+  struct ElementPrinter {
+      static void print( const Tuple& tuple, std::ostream& os )
+      {
+          os << ( N ? ", " : " " )
+             << Catch::toString(std::get<N>(tuple));
+          ElementPrinter<Tuple,N+1>::print(tuple,os);
+      }
+  };
+
+  template<
+      typename Tuple,
+      std::size_t N
+      >
+  struct ElementPrinter<Tuple,N,false> {
+      static void print( const Tuple&, std::ostream& ) {}
+  };
+
+}
+
+template<typename ...Types>
+struct StringMaker<std::tuple<Types...>> {
+
+    static std::string convert( const std::tuple<Types...>& tuple )
+    {
+        std::ostringstream os;
+        os << '{';
+        TupleDetail::ElementPrinter<std::tuple<Types...>>::print( tuple, os );
+        os << " }";
+        return os.str();
+    }
+};
+#endif // CATCH_CONFIG_CPP11_TUPLE
+
+namespace Detail {
+    template<typename T>
+    std::string makeString( T const& value ) {
+        return StringMaker<T>::convert( value );
+    }
+} // end namespace Detail
+
+/// \brief converts any type to a string
+///
+/// The default template forwards on to ostringstream - except when an
+/// ostringstream overload does not exist - in which case it attempts to detect
+/// that and writes {?}.
+/// Overload (not specialise) this template for custom typs that you don't want
+/// to provide an ostream overload for.
+template<typename T>
+std::string toString( T const& value ) {
+    return StringMaker<T>::convert( value );
+}
+
+    namespace Detail {
+    template<typename InputIterator>
+    std::string rangeToString( InputIterator first, InputIterator last ) {
+        std::ostringstream oss;
+        oss << "{ ";
+        if( first != last ) {
+            oss << Catch::toString( *first );
+            for( ++first ; first != last ; ++first )
+                oss << ", " << Catch::toString( *first );
+        }
+        oss << " }";
+        return oss.str();
+    }
+}
+
 } // end namespace Catch
 
 namespace Catch {
 
+// Wraps the LHS of an expression and captures the operator and RHS (if any) -
+// wrapping them all in a ResultBuilder object
 template<typename T>
-class Expression {
-	void operator = ( const Expression& );
+class ExpressionLhs {
+    ExpressionLhs& operator = ( ExpressionLhs const& );
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+    ExpressionLhs& operator = ( ExpressionLhs && ) = delete;
+#  endif
 
 public:
-    Expression( ResultInfoBuilder& result, T lhs )
-    :   m_result( result ),
-        m_lhs( lhs )
-    {}
+    ExpressionLhs( ResultBuilder& rb, T lhs ) : m_rb( rb ), m_lhs( lhs ) {}
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+    ExpressionLhs( ExpressionLhs const& ) = default;
+    ExpressionLhs( ExpressionLhs && )     = default;
+#  endif
 
     template<typename RhsT>
-    ResultInfoBuilder& operator == ( const RhsT& rhs ) {
-        return m_result.captureExpression<Internal::IsEqualTo>( m_lhs, rhs );
+    ResultBuilder& operator == ( RhsT const& rhs ) {
+        return captureExpression<Internal::IsEqualTo>( rhs );
     }
 
     template<typename RhsT>
-    ResultInfoBuilder& operator != ( const RhsT& rhs ) {
-        return m_result.captureExpression<Internal::IsNotEqualTo>( m_lhs, rhs );
+    ResultBuilder& operator != ( RhsT const& rhs ) {
+        return captureExpression<Internal::IsNotEqualTo>( rhs );
     }
 
     template<typename RhsT>
-    ResultInfoBuilder& operator < ( const RhsT& rhs ) {
-        return m_result.captureExpression<Internal::IsLessThan>( m_lhs, rhs );
+    ResultBuilder& operator < ( RhsT const& rhs ) {
+        return captureExpression<Internal::IsLessThan>( rhs );
+    }
+
+    template<typename RhsT>
+    ResultBuilder& operator > ( RhsT const& rhs ) {
+        return captureExpression<Internal::IsGreaterThan>( rhs );
+    }
+
+    template<typename RhsT>
+    ResultBuilder& operator <= ( RhsT const& rhs ) {
+        return captureExpression<Internal::IsLessThanOrEqualTo>( rhs );
     }
 
     template<typename RhsT>
-    ResultInfoBuilder& operator > ( const RhsT& rhs ) {
-        return m_result.captureExpression<Internal::IsGreaterThan>( m_lhs, rhs );
-    }
-
-    template<typename RhsT>
-    ResultInfoBuilder& operator <= ( const RhsT& rhs ) {
-        return m_result.captureExpression<Internal::IsLessThanOrEqualTo>( m_lhs, rhs );
-    }
-
-    template<typename RhsT>
-    ResultInfoBuilder& operator >= ( const RhsT& rhs ) {
-        return m_result.captureExpression<Internal::IsGreaterThanOrEqualTo>( m_lhs, rhs );
-    }
-
-    ResultInfoBuilder& operator == ( bool rhs ) {
-        return m_result.captureExpression<Internal::IsEqualTo>( m_lhs, rhs );
-    }
-
-    ResultInfoBuilder& operator != ( bool rhs ) {
-        return m_result.captureExpression<Internal::IsNotEqualTo>( m_lhs, rhs );
-    }
-
-    operator ResultInfoBuilder& () {
-        return m_result.captureBoolExpression( m_lhs );
-    }
-
-    template<typename RhsT>
-    STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator + ( const RhsT& );
-
-    template<typename RhsT>
-    STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator - ( const RhsT& );
+    ResultBuilder& operator >= ( RhsT const& rhs ) {
+        return captureExpression<Internal::IsGreaterThanOrEqualTo>( rhs );
+    }
+
+    ResultBuilder& operator == ( bool rhs ) {
+        return captureExpression<Internal::IsEqualTo>( rhs );
+    }
+
+    ResultBuilder& operator != ( bool rhs ) {
+        return captureExpression<Internal::IsNotEqualTo>( rhs );
+    }
+
+    void endExpression() {
+        bool value = m_lhs ? true : false;
+        m_rb
+            .setLhs( Catch::toString( value ) )
+            .setResultType( value )
+            .endExpression();
+    }
+
+    // Only simple binary expressions are allowed on the LHS.
+    // If more complex compositions are required then place the sub expression in parentheses
+    template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator + ( RhsT const& );
+    template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator - ( RhsT const& );
+    template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator / ( RhsT const& );
+    template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator * ( RhsT const& );
+    template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator && ( RhsT const& );
+    template<typename RhsT> STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator || ( RhsT const& );
 
 private:
-    ResultInfoBuilder& m_result;
+    template<Internal::Operator Op, typename RhsT>
+    ResultBuilder& captureExpression( RhsT const& rhs ) {
+        return m_rb
+            .setResultType( Internal::compare<Op>( m_lhs, rhs ) )
+            .setLhs( Catch::toString( m_lhs ) )
+            .setRhs( Catch::toString( rhs ) )
+            .setOp( Internal::OperatorTraits<Op>::getName() );
+    }
+
+private:
+    ResultBuilder& m_rb;
     T m_lhs;
 };
 
 } // end namespace Catch
 
-#include <sstream>
 
 namespace Catch {
 
-class ExpressionBuilder {
-public:
-
-    ExpressionBuilder(  const SourceLineInfo& lineInfo,
-                        const char* macroName,
-                        const char* expr = "",
-                        bool isNot = false )
-    : m_result( expr, isNot, lineInfo, macroName ),
-      m_messageStream()
-    {}
-
-    template<typename T>
-    Expression<const T&> operator->* ( const T & operand ) {
-        Expression<const T&> expr( m_result, operand );
-        return expr;
-    }
-
-    Expression<bool> operator->* ( bool value ) {
-        Expression<bool> expr( m_result, value );
-        return expr;
-    }
-
     template<typename T>
-    ExpressionBuilder& operator << ( const T & value ) {
-        m_messageStream << Catch::toString( value );
-        return *this;
-    }
-
-    template<typename MatcherT, typename ArgT>
-    ExpressionBuilder& acceptMatcher(   const MatcherT& matcher,
-                                        const ArgT& arg,
-                                        const std::string& matcherCallAsString ) {
-        std::string matcherAsString = Catch::toString( matcher );
-        if( matcherAsString == "{?}" )
-            matcherAsString = matcherCallAsString;
-        m_result.setLhs( Catch::toString( arg ) );
-        m_result.setRhs( matcherAsString );
-        m_result.setOp( "matches" );
-        m_result.setResultType( matcher( arg ) ? ResultWas::Ok : ResultWas::ExpressionFailed );
-        return *this;
-    }
-
-    template<typename MatcherT, typename ArgT>
-    ExpressionBuilder& acceptMatcher(   const MatcherT& matcher,
-                                        ArgT* arg,
-                                        const std::string& matcherCallAsString ) {
-        std::string matcherAsString = Catch::toString( matcher );
-        if( matcherAsString == "{?}" )
-            matcherAsString = matcherCallAsString;
-        m_result.setLhs( Catch::toString( arg ) );
-        m_result.setRhs( matcherAsString );
-        m_result.setOp( "matches" );
-        m_result.setResultType( matcher( arg ) ? ResultWas::Ok : ResultWas::ExpressionFailed );
-        return *this;
-    }
-
-    ExpressionBuilder& setResultType( ResultWas::OfType resultType ) {
-        m_result.setResultType( resultType );
-        return *this;
-    }
-
-    operator ResultInfoBuilder&() {
-        m_result.setMessage( m_messageStream.str() );
-        return m_result;
-    }
-
-private:
-    ResultInfoBuilder m_result;
-    std::ostringstream m_messageStream;
-};
+    inline ExpressionLhs<T const&> ResultBuilder::operator <= ( T const& operand ) {
+        return ExpressionLhs<T const&>( *this, operand );
+    }
+
+    inline ExpressionLhs<bool> ResultBuilder::operator <= ( bool value ) {
+        return ExpressionLhs<bool>( *this, value );
+    }
+
+} // namespace Catch
+
+// #included from: catch_message.h
+#define TWOBLUECUBES_CATCH_MESSAGE_H_INCLUDED
+
+#include <string>
+
+namespace Catch {
+
+    struct MessageInfo {
+        MessageInfo(    std::string const& _macroName,
+                        SourceLineInfo const& _lineInfo,
+                        ResultWas::OfType _type );
+
+        std::string macroName;
+        SourceLineInfo lineInfo;
+        ResultWas::OfType type;
+        std::string message;
+        unsigned int sequence;
+
+        bool operator == ( MessageInfo const& other ) const {
+            return sequence == other.sequence;
+        }
+        bool operator < ( MessageInfo const& other ) const {
+            return sequence < other.sequence;
+        }
+    private:
+        static unsigned int globalCount;
+    };
+
+    struct MessageBuilder {
+        MessageBuilder( std::string const& macroName,
+                        SourceLineInfo const& lineInfo,
+                        ResultWas::OfType type )
+        : m_info( macroName, lineInfo, type )
+        {}
+
+        template<typename T>
+        MessageBuilder& operator << ( T const& value ) {
+            m_stream << value;
+            return *this;
+        }
+
+        MessageInfo m_info;
+        std::ostringstream m_stream;
+    };
+
+    class ScopedMessage {
+    public:
+        ScopedMessage( MessageBuilder const& builder );
+        ScopedMessage( ScopedMessage const& other );
+        ~ScopedMessage();
+
+        MessageInfo m_info;
+    };
 
 } // end namespace Catch
 
 // #included from: catch_interfaces_capture.h
+#define TWOBLUECUBES_CATCH_INTERFACES_CAPTURE_H_INCLUDED
 
 #include <string>
 
 namespace Catch {
 
-    class TestCaseInfo;
-    class ScopedInfo;
-    class ResultInfoBuilder;
-    class ResultInfo;
+    class TestCase;
+    class AssertionResult;
+    struct AssertionInfo;
+    struct SectionInfo;
+    struct SectionEndInfo;
+    struct MessageInfo;
+    class ScopedMessageBuilder;
+    struct Counts;
 
     struct IResultCapture {
 
-        virtual ~IResultCapture(){}
-
-        virtual void testEnded( const ResultInfo& result ) = 0;
-        virtual bool sectionStarted(    const std::string& name,
-                                        const std::string& description,
-                                        const SourceLineInfo& lineInfo,
+        virtual ~IResultCapture();
+
+        virtual void assertionEnded( AssertionResult const& result ) = 0;
+        virtual bool sectionStarted(    SectionInfo const& sectionInfo,
                                         Counts& assertions ) = 0;
-        virtual void sectionEnded( const std::string& name, const Counts& assertions ) = 0;
-        virtual void pushScopedInfo( ScopedInfo* scopedInfo ) = 0;
-        virtual void popScopedInfo( ScopedInfo* scopedInfo ) = 0;
-        virtual bool shouldDebugBreak() const = 0;
-
-        virtual ResultAction::Value acceptResult( bool result ) = 0;
-        virtual ResultAction::Value acceptResult( ResultWas::OfType result ) = 0;
-        virtual ResultAction::Value acceptExpression( const ResultInfoBuilder& resultInfo ) = 0;
-        virtual void acceptMessage( const std::string& msg ) = 0;
+        virtual void sectionEnded( SectionEndInfo const& endInfo ) = 0;
+        virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) = 0;
+        virtual void pushScopedMessage( MessageInfo const& message ) = 0;
+        virtual void popScopedMessage( MessageInfo const& message ) = 0;
 
         virtual std::string getCurrentTestName() const = 0;
-        virtual const ResultInfo* getLastResult() const = 0;
-    };
+        virtual const AssertionResult* getLastResult() const = 0;
+
+        virtual void handleFatalErrorCondition( std::string const& message ) = 0;
+    };
+
+    IResultCapture& getResultCapture();
 }
 
-// #included from: catch_debugger.hpp
-
-#include <iostream>
+// #included from: catch_debugger.h
+#define TWOBLUECUBES_CATCH_DEBUGGER_H_INCLUDED
+
+// #included from: catch_platform.h
+#define TWOBLUECUBES_CATCH_PLATFORM_H_INCLUDED
 
 #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED)
 #define CATCH_PLATFORM_MAC
 #elif  defined(__IPHONE_OS_VERSION_MIN_REQUIRED)
 #define CATCH_PLATFORM_IPHONE
-#elif defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER)
+#elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER)
 #define CATCH_PLATFORM_WINDOWS
 #endif
 
+#include <string>
+
+namespace Catch{
+
+    bool isDebuggerActive();
+    void writeToDebugConsole( std::string const& text );
+}
+
 #ifdef CATCH_PLATFORM_MAC
 
-    #include <assert.h>
-    #include <stdbool.h>
-    #include <sys/types.h>
-    #include <unistd.h>
-    #include <sys/sysctl.h>
-
-    namespace Catch{
-
-        // The following function is taken directly from the following technical note:
-        // http://developer.apple.com/library/mac/#qa/qa2004/qa1361.html
-
-        // Returns true if the current process is being debugged (either
-        // running under the debugger or has a debugger attached post facto).
-        inline bool isDebuggerActive(){
-
-            int                 junk;
-            int                 mib[4];
-            struct kinfo_proc   info;
-            size_t              size;
-
-            // Initialize the flags so that, if sysctl fails for some bizarre
-            // reason, we get a predictable result.
-
-            info.kp_proc.p_flag = 0;
-
-            // Initialize mib, which tells sysctl the info we want, in this case
-            // we're looking for information about a specific process ID.
-
-            mib[0] = CTL_KERN;
-            mib[1] = KERN_PROC;
-            mib[2] = KERN_PROC_PID;
-            mib[3] = getpid();
-
-            // Call sysctl.
-
-            size = sizeof(info);
-            junk = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, NULL, 0);
-            assert(junk == 0);
-
-            // We're being debugged if the P_TRACED flag is set.
-
-            return ( (info.kp_proc.p_flag & P_TRACED) != 0 );
-        }
-    }
-
-    // The following code snippet taken from:
+    // The following code snippet based on:
     // http://cocoawithlove.com/2008/03/break-into-debugger.html
     #ifdef DEBUG
         #if defined(__ppc64__) || defined(__ppc__)
-            #define BreakIntoDebugger() \
-            if( Catch::isDebuggerActive() ) { \
-            __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n" \
-            : : : "memory","r0","r3","r4" ); \
-            }
+            #define CATCH_BREAK_INTO_DEBUGGER() \
+                if( Catch::isDebuggerActive() ) { \
+                    __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n" \
+                    : : : "memory","r0","r3","r4" ); \
+                }
         #else
-            #define BreakIntoDebugger() if( Catch::isDebuggerActive() ) {__asm__("int $3\n" : : );}
+            #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) {__asm__("int $3\n" : : );}
         #endif
-    #else
-        inline void BreakIntoDebugger(){}
     #endif
 
 #elif defined(_MSC_VER)
-    extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent();
-    #define BreakIntoDebugger() if (IsDebuggerPresent() ) { __debugbreak(); }
-    inline bool isDebuggerActive() {
-        return IsDebuggerPresent() != 0;
-    }
+    #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) { __debugbreak(); }
 #elif defined(__MINGW32__)
-    extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent();
     extern "C" __declspec(dllimport) void __stdcall DebugBreak();
-    #define BreakIntoDebugger() if (IsDebuggerPresent() ) { DebugBreak(); }
-    inline bool isDebuggerActive() {
-        return IsDebuggerPresent() != 0;
-    }
-#else
-	   inline void BreakIntoDebugger(){}
-	   inline bool isDebuggerActive() { return false; }
-#endif
-
-#ifdef CATCH_PLATFORM_WINDOWS
-extern "C" __declspec(dllimport) void __stdcall OutputDebugStringA( const char* );
-inline void writeToDebugConsole( const std::string& text ) {
-    ::OutputDebugStringA( text.c_str() );
-}
-#else
-inline void writeToDebugConsole( const std::string& text ) {
-    // !TBD: Need a version for Mac/ XCode and other IDEs
-    std::cout << text;
-}
-#endif // CATCH_PLATFORM_WINDOWS
-
-#include <ostream>
+    #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) { DebugBreak(); }
+#endif
+
+#ifndef CATCH_BREAK_INTO_DEBUGGER
+#define CATCH_BREAK_INTO_DEBUGGER() Catch::alwaysTrue();
+#endif
+
+// #included from: catch_interfaces_runner.h
+#define TWOBLUECUBES_CATCH_INTERFACES_RUNNER_H_INCLUDED
 
 namespace Catch {
-
-struct TestFailureException{};
-
-class ScopedInfo {
-public:
-    ScopedInfo() : m_oss() {
-        getCurrentContext().getResultCapture().pushScopedInfo( this );
-    }
-
-    ~ScopedInfo() {
-        getCurrentContext().getResultCapture().popScopedInfo( this );
-    }
-
-    template<typename T>
-    ScopedInfo& operator << ( const T& value ) {
-        m_oss << value;
-        return *this;
-    }
-
-    std::string getInfo () const {
-        return m_oss.str();
-    }
-
-private:
-    std::ostringstream m_oss;
-};
-
-// This is just here to avoid compiler warnings with macro constants
-inline bool isTrue( bool value ){ return value; }
-
-} // end namespace Catch
+    class TestCase;
+
+    struct IRunner {
+        virtual ~IRunner();
+        virtual bool aborting() const = 0;
+    };
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// In the event of a failure works out if the debugger needs to be invoked
+// and/or an exception thrown and takes appropriate action.
+// This needs to be done as a macro so the debugger will stop in the user
+// source code rather than in Catch library code
+#define INTERNAL_CATCH_REACT( resultBuilder ) \
+    if( resultBuilder.shouldDebugBreak() ) CATCH_BREAK_INTO_DEBUGGER(); \
+    resultBuilder.react();
+
+///////////////////////////////////////////////////////////////////////////////
+#define INTERNAL_CATCH_TEST( expr, resultDisposition, macroName ) \
+    do { \
+        Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \
+        try { \
+            CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \
+            ( __catchResult <= expr ).endExpression(); \
+        } \
+        catch( ... ) { \
+            __catchResult.useActiveException( Catch::ResultDisposition::Normal ); \
+        } \
+        INTERNAL_CATCH_REACT( __catchResult ) \
+    } while( Catch::isTrue( false && !!(expr) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look
+
+///////////////////////////////////////////////////////////////////////////////
+#define INTERNAL_CATCH_IF( expr, resultDisposition, macroName ) \
+    INTERNAL_CATCH_TEST( expr, resultDisposition, macroName ); \
+    if( Catch::getResultCapture().getLastResult()->succeeded() )
+
+///////////////////////////////////////////////////////////////////////////////
+#define INTERNAL_CATCH_ELSE( expr, resultDisposition, macroName ) \
+    INTERNAL_CATCH_TEST( expr, resultDisposition, macroName ); \
+    if( !Catch::getResultCapture().getLastResult()->succeeded() )
+
+///////////////////////////////////////////////////////////////////////////////
+#define INTERNAL_CATCH_NO_THROW( expr, resultDisposition, macroName ) \
+    do { \
+        Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \
+        try { \
+            expr; \
+            __catchResult.captureResult( Catch::ResultWas::Ok ); \
+        } \
+        catch( ... ) { \
+            __catchResult.useActiveException( resultDisposition ); \
+        } \
+        INTERNAL_CATCH_REACT( __catchResult ) \
+    } while( Catch::alwaysFalse() )
 
 ///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_ACCEPT_EXPR( expr, stopOnFailure, originalExpr ) \
-    if( Catch::ResultAction::Value internal_catch_action = Catch::getCurrentContext().getResultCapture().acceptExpression( expr )  ) { \
-        if( internal_catch_action & Catch::ResultAction::Debug ) BreakIntoDebugger(); \
-        if( internal_catch_action & Catch::ResultAction::Abort ) throw Catch::TestFailureException(); \
-        if( Catch::isTrue( stopOnFailure ) ) throw Catch::TestFailureException(); \
-        if( Catch::isTrue( false ) ){ bool this_is_here_to_invoke_warnings = ( originalExpr ); Catch::isTrue( this_is_here_to_invoke_warnings ); } \
-    }
+#define INTERNAL_CATCH_THROWS( expr, resultDisposition, matcher, macroName ) \
+    do { \
+        Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition, #matcher ); \
+        if( __catchResult.allowThrows() ) \
+            try { \
+                expr; \
+                __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \
+            } \
+            catch( ... ) { \
+                __catchResult.captureExpectedException( matcher ); \
+            } \
+        else \
+            __catchResult.captureResult( Catch::ResultWas::Ok ); \
+        INTERNAL_CATCH_REACT( __catchResult ) \
+    } while( Catch::alwaysFalse() )
+
+///////////////////////////////////////////////////////////////////////////////
+#define INTERNAL_CATCH_THROWS_AS( expr, exceptionType, resultDisposition, macroName ) \
+    do { \
+        Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \
+        if( __catchResult.allowThrows() ) \
+            try { \
+                expr; \
+                __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \
+            } \
+            catch( exceptionType ) { \
+                __catchResult.captureResult( Catch::ResultWas::Ok ); \
+            } \
+            catch( ... ) { \
+                __catchResult.useActiveException( resultDisposition ); \
+            } \
+        else \
+            __catchResult.captureResult( Catch::ResultWas::Ok ); \
+        INTERNAL_CATCH_REACT( __catchResult ) \
+    } while( Catch::alwaysFalse() )
+
+///////////////////////////////////////////////////////////////////////////////
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+    #define INTERNAL_CATCH_MSG( messageType, resultDisposition, macroName, ... ) \
+        do { \
+            Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \
+            __catchResult << __VA_ARGS__ + ::Catch::StreamEndStop(); \
+            __catchResult.captureResult( messageType ); \
+            INTERNAL_CATCH_REACT( __catchResult ) \
+        } while( Catch::alwaysFalse() )
+#else
+    #define INTERNAL_CATCH_MSG( messageType, resultDisposition, macroName, log ) \
+        do { \
+            Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \
+            __catchResult << log + ::Catch::StreamEndStop(); \
+            __catchResult.captureResult( messageType ); \
+            INTERNAL_CATCH_REACT( __catchResult ) \
+        } while( Catch::alwaysFalse() )
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+#define INTERNAL_CATCH_INFO( log, macroName ) \
+    Catch::ScopedMessage INTERNAL_CATCH_UNIQUE_NAME( scopedMessage ) = Catch::MessageBuilder( macroName, CATCH_INTERNAL_LINEINFO, Catch::ResultWas::Info ) << log;
 
 ///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_TEST( expr, isNot, stopOnFailure, macroName ) \
-    do { try { \
-        INTERNAL_CATCH_ACCEPT_EXPR( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr, isNot )->*expr ), stopOnFailure, expr ); \
-    } catch( Catch::TestFailureException& ) { \
-        throw; \
-    } catch( ... ) { \
-        INTERNAL_CATCH_ACCEPT_EXPR( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr ) << Catch::getCurrentContext().getExceptionTranslatorRegistry().translateActiveException() ).setResultType( Catch::ResultWas::ThrewException ), false, expr ); \
-        throw; \
-    } } while( Catch::isTrue( false ) )
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_IF( expr, isNot, stopOnFailure, macroName ) \
-    INTERNAL_CATCH_TEST( expr, isNot, stopOnFailure, macroName ); \
-    if( Catch::getCurrentContext().getResultCapture().getLastResult()->ok() )
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_ELSE( expr, isNot, stopOnFailure, macroName ) \
-    INTERNAL_CATCH_TEST( expr, isNot, stopOnFailure, macroName ); \
-    if( !Catch::getCurrentContext().getResultCapture().getLastResult()->ok() )
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_NO_THROW( expr, stopOnFailure, macroName ) \
-    try { \
-        expr; \
-        INTERNAL_CATCH_ACCEPT_EXPR( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr ).setResultType( Catch::ResultWas::Ok ), stopOnFailure, false ); \
-    } \
-    catch( ... ) { \
-        INTERNAL_CATCH_ACCEPT_EXPR( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr ) << Catch::getCurrentContext().getExceptionTranslatorRegistry().translateActiveException() ).setResultType( Catch::ResultWas::ThrewException ), stopOnFailure, false ); \
-    }
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_THROWS( expr, exceptionType, stopOnFailure, macroName ) \
-    try { \
-        if( Catch::getCurrentContext().getConfig()->allowThrows() ) { \
-            expr; \
-            INTERNAL_CATCH_ACCEPT_EXPR( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr ).setResultType( Catch::ResultWas::DidntThrowException ), stopOnFailure, false ); \
+#define INTERNAL_CHECK_THAT( arg, matcher, resultDisposition, macroName ) \
+    do { \
+        Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \
+        try { \
+            std::string matcherAsString = (matcher).toString(); \
+            __catchResult \
+                .setLhs( Catch::toString( arg ) ) \
+                .setRhs( matcherAsString == Catch::Detail::unprintableString ? #matcher : matcherAsString ) \
+                .setOp( "matches" ) \
+                .setResultType( (matcher).match( arg ) ); \
+            __catchResult.captureExpression(); \
+        } catch( ... ) { \
+            __catchResult.useActiveException( resultDisposition | Catch::ResultDisposition::ContinueOnFailure ); \
         } \
-    } \
-    catch( Catch::TestFailureException& ) { \
-        throw; \
-    } \
-    catch( exceptionType ) { \
-        INTERNAL_CATCH_ACCEPT_EXPR( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr ).setResultType( Catch::ResultWas::Ok ), stopOnFailure, false ); \
-    }
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_THROWS_AS( expr, exceptionType, stopOnFailure, macroName ) \
-    INTERNAL_CATCH_THROWS( expr, exceptionType, stopOnFailure, macroName ) \
-    catch( ... ) { \
-        INTERNAL_CATCH_ACCEPT_EXPR( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #expr ) << Catch::getCurrentContext().getExceptionTranslatorRegistry().translateActiveException() ).setResultType( Catch::ResultWas::ThrewException ), stopOnFailure, false ); \
-    }
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_MSG( reason, resultType, stopOnFailure, macroName ) \
-    Catch::getCurrentContext().getResultCapture().acceptExpression( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName ) << reason ).setResultType( resultType ) );
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_SCOPED_INFO( log ) \
-    Catch::ScopedInfo INTERNAL_CATCH_UNIQUE_NAME( info ); \
-    INTERNAL_CATCH_UNIQUE_NAME( info ) << log
-
-///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CHECK_THAT( arg, matcher, stopOnFailure, macroName ) \
-    do { try { \
-        INTERNAL_CATCH_ACCEPT_EXPR( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #arg " " #matcher, false ).acceptMatcher( ::Catch::Matchers::matcher, arg, #matcher ) ), stopOnFailure, false ); \
-    } catch( Catch::TestFailureException& ) { \
-        throw; \
-    } catch( ... ) { \
-        INTERNAL_CATCH_ACCEPT_EXPR( ( Catch::ExpressionBuilder( CATCH_INTERNAL_LINEINFO, macroName, #arg " " #matcher ) << Catch::getCurrentContext().getExceptionTranslatorRegistry().translateActiveException() ).setResultType( Catch::ResultWas::ThrewException ), false, false ); \
-        throw; \
-    }}while( Catch::isTrue( false ) )
-
-// #included from: internal/catch_section.hpp
+        INTERNAL_CATCH_REACT( __catchResult ) \
+    } while( Catch::alwaysFalse() )
+
+// #included from: internal/catch_section.h
+#define TWOBLUECUBES_CATCH_SECTION_H_INCLUDED
+
+// #included from: catch_section_info.h
+#define TWOBLUECUBES_CATCH_SECTION_INFO_H_INCLUDED
+
+// #included from: catch_totals.hpp
+#define TWOBLUECUBES_CATCH_TOTALS_HPP_INCLUDED
+
+#include <cstddef>
+
+namespace Catch {
+
+    struct Counts {
+        Counts() : passed( 0 ), failed( 0 ), failedButOk( 0 ) {}
+
+        Counts operator - ( Counts const& other ) const {
+            Counts diff;
+            diff.passed = passed - other.passed;
+            diff.failed = failed - other.failed;
+            diff.failedButOk = failedButOk - other.failedButOk;
+            return diff;
+        }
+        Counts& operator += ( Counts const& other ) {
+            passed += other.passed;
+            failed += other.failed;
+            failedButOk += other.failedButOk;
+            return *this;
+        }
+
+        std::size_t total() const {
+            return passed + failed + failedButOk;
+        }
+        bool allPassed() const {
+            return failed == 0 && failedButOk == 0;
+        }
+        bool allOk() const {
+            return failed == 0;
+        }
+
+        std::size_t passed;
+        std::size_t failed;
+        std::size_t failedButOk;
+    };
+
+    struct Totals {
+
+        Totals operator - ( Totals const& other ) const {
+            Totals diff;
+            diff.assertions = assertions - other.assertions;
+            diff.testCases = testCases - other.testCases;
+            return diff;
+        }
+
+        Totals delta( Totals const& prevTotals ) const {
+            Totals diff = *this - prevTotals;
+            if( diff.assertions.failed > 0 )
+                ++diff.testCases.failed;
+            else if( diff.assertions.failedButOk > 0 )
+                ++diff.testCases.failedButOk;
+            else
+                ++diff.testCases.passed;
+            return diff;
+        }
+
+        Totals& operator += ( Totals const& other ) {
+            assertions += other.assertions;
+            testCases += other.testCases;
+            return *this;
+        }
+
+        Counts assertions;
+        Counts testCases;
+    };
+}
+
+namespace Catch {
+
+    struct SectionInfo {
+        SectionInfo
+            (   SourceLineInfo const& _lineInfo,
+                std::string const& _name,
+                std::string const& _description = std::string() );
+
+        std::string name;
+        std::string description;
+        SourceLineInfo lineInfo;
+    };
+
+    struct SectionEndInfo {
+        SectionEndInfo( SectionInfo const& _sectionInfo, Counts const& _prevAssertions, double _durationInSeconds )
+        : sectionInfo( _sectionInfo ), prevAssertions( _prevAssertions ), durationInSeconds( _durationInSeconds )
+        {}
+
+        SectionInfo sectionInfo;
+        Counts prevAssertions;
+        double durationInSeconds;
+    };
+
+} // end namespace Catch
+
+// #included from: catch_timer.h
+#define TWOBLUECUBES_CATCH_TIMER_H_INCLUDED
+
+#ifdef CATCH_PLATFORM_WINDOWS
+typedef unsigned long long uint64_t;
+#else
+#include <stdint.h>
+#endif
+
+namespace Catch {
+
+    class Timer {
+    public:
+        Timer() : m_ticks( 0 ) {}
+        void start();
+        unsigned int getElapsedMicroseconds() const;
+        unsigned int getElapsedMilliseconds() const;
+        double getElapsedSeconds() const;
+
+    private:
+        uint64_t m_ticks;
+    };
+
+} // namespace Catch
 
 #include <string>
 
 namespace Catch {
 
-    class Section {
+    class Section : NonCopyable {
     public:
-        Section(    const std::string& name,
-                    const std::string& description,
-                    const SourceLineInfo& lineInfo )
-        :   m_name( name ),
-            m_sectionIncluded( getCurrentContext().getResultCapture().sectionStarted( name, description, lineInfo, m_assertions ) )
-        {}
-
-        ~Section() {
-            if( m_sectionIncluded )
-                getCurrentContext().getResultCapture().sectionEnded( m_name, m_assertions );
-        }
+        Section( SectionInfo const& info );
+        ~Section();
 
         // This indicates whether the section should be executed or not
-        operator bool() {
-            return m_sectionIncluded;
-        }
+        operator bool() const;
 
     private:
+        SectionInfo m_info;
 
         std::string m_name;
         Counts m_assertions;
         bool m_sectionIncluded;
+        Timer m_timer;
     };
 
 } // end namespace Catch
 
-#define INTERNAL_CATCH_SECTION( name, desc ) \
-    if( Catch::Section INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::Section( name, desc, CATCH_INTERNAL_LINEINFO ) )
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+    #define INTERNAL_CATCH_SECTION( ... ) \
+        if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, __VA_ARGS__ ) )
+#else
+    #define INTERNAL_CATCH_SECTION( name, desc ) \
+        if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, name, desc ) )
+#endif
 
 // #included from: internal/catch_generators.hpp
+#define TWOBLUECUBES_CATCH_GENERATORS_HPP_INCLUDED
 
 #include <iterator>
 #include <vector>
@@ -1516,7 +2357,7 @@
     BetweenGenerator( T from, T to ) : m_from( from ), m_to( to ){}
 
     virtual T getValue( std::size_t index ) const {
-        return m_from+static_cast<T>( index );
+        return m_from+static_cast<int>( index );
     }
 
     virtual std::size_t size() const {
@@ -1555,12 +2396,12 @@
 public:
     CompositeGenerator() : m_totalSize( 0 ) {}
 
-	// *** Move semantics, similar to auto_ptr ***
+    // *** Move semantics, similar to auto_ptr ***
     CompositeGenerator( CompositeGenerator& other )
     :   m_fileInfo( other.m_fileInfo ),
         m_totalSize( 0 )
     {
-		move( other );
+        move( other );
     }
 
     CompositeGenerator& setFileInfo( const char* fileInfo ) {
@@ -1587,7 +2428,7 @@
             index += generator->size();
         }
         CATCH_INTERNAL_ERROR( "Indexed past end of generated range" );
-		return T(); // Suppress spurious "not all control paths return a value" warning in Visual Studio - if you know how to fix this please do so
+        return T(); // Suppress spurious "not all control paths return a value" warning in Visual Studio - if you know how to fix this please do so
     }
 
     void add( const IGenerator<T>* generator ) {
@@ -1674,22 +2515,63 @@
 #define INTERNAL_CATCH_GENERATE( expr ) expr.setFileInfo( __FILE__ "(" INTERNAL_CATCH_LINESTR( __LINE__ ) ")" )
 
 // #included from: internal/catch_interfaces_exception.h
+#define TWOBLUECUBES_CATCH_INTERFACES_EXCEPTION_H_INCLUDED
+
+#include <string>
+#include <vector>
+
+// #included from: catch_interfaces_registry_hub.h
+#define TWOBLUECUBES_CATCH_INTERFACES_REGISTRY_HUB_H_INCLUDED
 
 #include <string>
 
 namespace Catch {
 
+    class TestCase;
+    struct ITestCaseRegistry;
+    struct IExceptionTranslatorRegistry;
+    struct IExceptionTranslator;
+    struct IReporterRegistry;
+    struct IReporterFactory;
+
+    struct IRegistryHub {
+        virtual ~IRegistryHub();
+
+        virtual IReporterRegistry const& getReporterRegistry() const = 0;
+        virtual ITestCaseRegistry const& getTestCaseRegistry() const = 0;
+        virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() = 0;
+    };
+
+    struct IMutableRegistryHub {
+        virtual ~IMutableRegistryHub();
+        virtual void registerReporter( std::string const& name, Ptr<IReporterFactory> const& factory ) = 0;
+        virtual void registerListener( Ptr<IReporterFactory> const& factory ) = 0;
+        virtual void registerTest( TestCase const& testInfo ) = 0;
+        virtual void registerTranslator( const IExceptionTranslator* translator ) = 0;
+    };
+
+    IRegistryHub& getRegistryHub();
+    IMutableRegistryHub& getMutableRegistryHub();
+    void cleanUp();
+    std::string translateActiveException();
+
+}
+
+namespace Catch {
+
     typedef std::string(*exceptionTranslateFunction)();
 
+    struct IExceptionTranslator;
+    typedef std::vector<const IExceptionTranslator*> ExceptionTranslators;
+
     struct IExceptionTranslator {
-        virtual ~IExceptionTranslator(){}
-        virtual std::string translate() const = 0;
+        virtual ~IExceptionTranslator();
+        virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const = 0;
     };
 
     struct IExceptionTranslatorRegistry {
-        virtual ~IExceptionTranslatorRegistry(){}
-
-        virtual void registerTranslator( IExceptionTranslator* translator ) = 0;
+        virtual ~IExceptionTranslatorRegistry();
+
         virtual std::string translateActiveException() const = 0;
     };
 
@@ -1702,9 +2584,12 @@
             : m_translateFunction( translateFunction )
             {}
 
-            virtual std::string translate() const {
+            virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const CATCH_OVERRIDE {
                 try {
-                    throw;
+                    if( it == itEnd )
+                        throw;
+                    else
+                        return (*it)->translate( it+1, itEnd );
                 }
                 catch( T& ex ) {
                     return m_translateFunction( ex );
@@ -1718,19 +2603,22 @@
     public:
         template<typename T>
         ExceptionTranslatorRegistrar( std::string(*translateFunction)( T& ) ) {
-            getCurrentContext().getExceptionTranslatorRegistry().registerTranslator
+            getMutableRegistryHub().registerTranslator
                 ( new ExceptionTranslator<T>( translateFunction ) );
         }
     };
 }
 
 ///////////////////////////////////////////////////////////////////////////////
-#define INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) \
-    static std::string INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator )( signature ); \
-    namespace{ Catch::ExceptionTranslatorRegistrar INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionRegistrar )( &INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator ) ); }\
-    static std::string INTERNAL_CATCH_UNIQUE_NAME(  catch_internal_ExceptionTranslator )( signature )
+#define INTERNAL_CATCH_TRANSLATE_EXCEPTION2( translatorName, signature ) \
+    static std::string translatorName( signature ); \
+    namespace{ Catch::ExceptionTranslatorRegistrar INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionRegistrar )( &translatorName ); }\
+    static std::string translatorName( signature )
+
+#define INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION2( INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator ), signature )
 
 // #included from: internal/catch_approx.hpp
+#define TWOBLUECUBES_CATCH_APPROX_HPP_INCLUDED
 
 #include <cmath>
 #include <limits>
@@ -1746,7 +2634,7 @@
             m_value( value )
         {}
 
-        Approx( const Approx& other )
+        Approx( Approx const& other )
         :   m_epsilon( other.m_epsilon ),
             m_scale( other.m_scale ),
             m_value( other.m_value )
@@ -1763,20 +2651,20 @@
             return approx;
         }
 
-        friend bool operator == ( double lhs, const Approx& rhs ) {
+        friend bool operator == ( double lhs, Approx const& rhs ) {
             // Thanks to Richard Harris for his help refining this formula
             return fabs( lhs - rhs.m_value ) < rhs.m_epsilon * (rhs.m_scale + (std::max)( fabs(lhs), fabs(rhs.m_value) ) );
         }
 
-        friend bool operator == ( const Approx& lhs, double rhs ) {
+        friend bool operator == ( Approx const& lhs, double rhs ) {
             return operator==( rhs, lhs );
         }
 
-        friend bool operator != ( double lhs, const Approx& rhs ) {
+        friend bool operator != ( double lhs, Approx const& rhs ) {
             return !operator==( lhs, rhs );
         }
 
-        friend bool operator != ( const Approx& lhs, double rhs ) {
+        friend bool operator != ( Approx const& lhs, double rhs ) {
             return !operator==( rhs, lhs );
         }
 
@@ -1792,7 +2680,7 @@
 
         std::string toString() const {
             std::ostringstream oss;
-            oss << "Approx( " << m_value << ")";
+            oss << "Approx( " << Catch::toString( m_value ) << " )";
             return oss.str();
         }
 
@@ -1804,236 +2692,201 @@
 }
 
 template<>
-inline std::string toString<Detail::Approx>( const Detail::Approx& value ) {
+inline std::string toString<Detail::Approx>( Detail::Approx const& value ) {
     return value.toString();
 }
 
 } // end namespace Catch
 
-// #included from: internal/catch_matchers.hpp
-
-namespace Catch {
-namespace Matchers {
-    namespace Impl {
-    namespace StdString {
-
-        struct Equals {
-            Equals( const std::string& str ) : m_str( str ){}
-
-            bool operator()( const std::string& str ) const
-            {
-                return str == m_str;
-            }
-
-            friend std::ostream& operator<<( std::ostream& os, const Equals& matcher )
-            {
-                os << "equals: \"" << matcher.m_str << "\"";
-                return os;
-            }
-            std::string m_str;
-        };
-
-        struct Contains {
-            Contains( const std::string& substr ) : m_substr( substr ){}
-
-            bool operator()( const std::string& str ) const
-            {
-                return str.find( m_substr ) != std::string::npos;
-            }
-
-            friend std::ostream& operator<<( std::ostream& os, const Contains& matcher )
-            {
-                os << "contains: \"" << matcher.m_substr << "\"";
-                return os;
-            }
-            std::string m_substr;
-        };
-
-        struct StartsWith {
-            StartsWith( const std::string& substr ) : m_substr( substr ){}
-
-            bool operator()( const std::string& str ) const
-            {
-                return str.find( m_substr ) == 0;
-            }
-
-            friend std::ostream& operator<<( std::ostream& os, const StartsWith& matcher )
-            {
-                os << "starts with: \"" << matcher.m_substr << "\"";
-                return os;
-            }
-            std::string m_substr;
-        };
-
-        struct EndsWith {
-            EndsWith( const std::string& substr ) : m_substr( substr ){}
-
-            bool operator()( const std::string& str ) const
-            {
-                return str.find( m_substr ) == str.size() - m_substr.size();
-            }
-
-            friend std::ostream& operator<<( std::ostream& os, const EndsWith& matcher )
-            {
-                os << "ends with: \"" << matcher.m_substr << "\"";
-                return os;
-            }
-            std::string m_substr;
-        };
-    } // namespace StdString
-    } // namespace Impl
-
-    inline Impl::StdString::Equals      Equals( const std::string& str ){ return Impl::StdString::Equals( str ); }
-    inline Impl::StdString::Contains    Contains( const std::string& substr ){ return Impl::StdString::Contains( substr ); }
-    inline Impl::StdString::StartsWith  StartsWith( const std::string& substr ){ return Impl::StdString::StartsWith( substr ); }
-    inline Impl::StdString::EndsWith    EndsWith( const std::string& substr ){ return Impl::StdString::EndsWith( substr ); }
-
-} // namespace Matchers
-
-using namespace Matchers;
-
-} // namespace Catch
-
-// These files are included here so the single_include script doesn't put them
-// in the conditionally compiled sections
-// #included from: internal/catch_test_case_info.hpp
-
-#include <map>
-#include <string>
-
-namespace Catch {
-
-    class TestCaseInfo {
-    public:
-        TestCaseInfo(   ITestCase* testCase,
-                        const char* name,
-                        const char* description,
-                        const SourceLineInfo& lineInfo )
-        :   m_test( testCase ),
-            m_name( name ),
-            m_description( description ),
-            m_lineInfo( lineInfo )
-        {}
-
-        TestCaseInfo()
-        :   m_test( NULL ),
-            m_name(),
-            m_description()
-        {}
-
-        TestCaseInfo( const TestCaseInfo& other )
-        :   m_test( other.m_test->clone() ),
-            m_name( other.m_name ),
-            m_description( other.m_description ),
-            m_lineInfo( other.m_lineInfo )
-        {}
-
-        TestCaseInfo( const TestCaseInfo& other, const std::string& name )
-        :   m_test( other.m_test->clone() ),
-            m_name( name ),
-            m_description( other.m_description ),
-            m_lineInfo( other.m_lineInfo )
-        {}
-
-        TestCaseInfo& operator = ( const TestCaseInfo& other ) {
-            TestCaseInfo temp( other );
-            swap( temp );
-            return *this;
-        }
-
-        ~TestCaseInfo() {
-            delete m_test;
-        }
-
-        void invoke() const {
-            m_test->invoke();
-        }
-
-        const std::string& getName() const {
-            return m_name;
-        }
-
-        const std::string& getDescription() const {
-            return m_description;
-        }
-
-        const SourceLineInfo& getLineInfo() const {
-            return m_lineInfo;
-        }
-
-        bool isHidden() const {
-            return m_name.size() >= 2 && m_name[0] == '.' && m_name[1] == '/';
-        }
-
-        void swap( TestCaseInfo& other ) {
-            std::swap( m_test, other.m_test );
-            m_name.swap( other.m_name );
-            m_description.swap( other.m_description );
-            m_lineInfo.swap( other.m_lineInfo );
-        }
-
-        bool operator == ( const TestCaseInfo& other ) const {
-            return *m_test == *other.m_test && m_name == other.m_name;
-        }
-
-        bool operator < ( const TestCaseInfo& other ) const {
-            return m_name < other.m_name;
-        }
-
-    private:
-        ITestCase* m_test;
-        std::string m_name;
-        std::string m_description;
-        SourceLineInfo m_lineInfo;
-    };
-
-    ///////////////////////////////////////////////////////////////////////////
-
-    class TestSpec {
-    public:
-        TestSpec( const std::string& rawSpec )
-        :   m_rawSpec( rawSpec ),
-            m_isWildcarded( false ) {
-
-            if( m_rawSpec[m_rawSpec.size()-1] == '*' ) {
-                m_rawSpec = m_rawSpec.substr( 0, m_rawSpec.size()-1 );
-                m_isWildcarded = true;
-            }
-        }
-
-        bool matches ( const std::string& testName ) const {
-            if( !m_isWildcarded )
-                return m_rawSpec == testName;
-            else
-                return testName.size() >= m_rawSpec.size() && testName.substr( 0, m_rawSpec.size() ) == m_rawSpec;
-        }
-
-    private:
-        std::string m_rawSpec;
-        bool m_isWildcarded;
-    };
-}
-
-// #included from: internal/catch_interfaces_runner.h
+// #included from: internal/catch_interfaces_tag_alias_registry.h
+#define TWOBLUECUBES_CATCH_INTERFACES_TAG_ALIAS_REGISTRY_H_INCLUDED
+
+// #included from: catch_tag_alias.h
+#define TWOBLUECUBES_CATCH_TAG_ALIAS_H_INCLUDED
 
 #include <string>
 
 namespace Catch {
-    class TestCaseInfo;
-
-    struct IRunner {
-        virtual ~IRunner() {}
-        virtual void runAll( bool runHiddenTests = false ) = 0;
-        virtual std::size_t runMatching( const std::string& rawTestSpec ) = 0;
-        virtual Totals getTotals() const = 0;
-    };
+
+    struct TagAlias {
+        TagAlias( std::string _tag, SourceLineInfo _lineInfo ) : tag( _tag ), lineInfo( _lineInfo ) {}
+
+        std::string tag;
+        SourceLineInfo lineInfo;
+    };
+
+    struct RegistrarForTagAliases {
+        RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo );
+    };
+
+} // end namespace Catch
+
+#define CATCH_REGISTER_TAG_ALIAS( alias, spec ) namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); }
+// #included from: catch_option.hpp
+#define TWOBLUECUBES_CATCH_OPTION_HPP_INCLUDED
+
+namespace Catch {
+
+    // An optional type
+    template<typename T>
+    class Option {
+    public:
+        Option() : nullableValue( CATCH_NULL ) {}
+        Option( T const& _value )
+        : nullableValue( new( storage ) T( _value ) )
+        {}
+        Option( Option const& _other )
+        : nullableValue( _other ? new( storage ) T( *_other ) : CATCH_NULL )
+        {}
+
+        ~Option() {
+            reset();
+        }
+
+        Option& operator= ( Option const& _other ) {
+            if( &_other != this ) {
+                reset();
+                if( _other )
+                    nullableValue = new( storage ) T( *_other );
+            }
+            return *this;
+        }
+        Option& operator = ( T const& _value ) {
+            reset();
+            nullableValue = new( storage ) T( _value );
+            return *this;
+        }
+
+        void reset() {
+            if( nullableValue )
+                nullableValue->~T();
+            nullableValue = CATCH_NULL;
+        }
+
+        T& operator*() { return *nullableValue; }
+        T const& operator*() const { return *nullableValue; }
+        T* operator->() { return nullableValue; }
+        const T* operator->() const { return nullableValue; }
+
+        T valueOr( T const& defaultValue ) const {
+            return nullableValue ? *nullableValue : defaultValue;
+        }
+
+        bool some() const { return nullableValue != CATCH_NULL; }
+        bool none() const { return nullableValue == CATCH_NULL; }
+
+        bool operator !() const { return nullableValue == CATCH_NULL; }
+        operator SafeBool::type() const {
+            return SafeBool::makeSafe( some() );
+        }
+
+    private:
+        T* nullableValue;
+        char storage[sizeof(T)];
+    };
+
+} // end namespace Catch
+
+namespace Catch {
+
+    struct ITagAliasRegistry {
+        virtual ~ITagAliasRegistry();
+        virtual Option<TagAlias> find( std::string const& alias ) const = 0;
+        virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const = 0;
+
+        static ITagAliasRegistry const& get();
+    };
+
+} // end namespace Catch
+
+// These files are included here so the single_include script doesn't put them
+// in the conditionally compiled sections
+// #included from: internal/catch_test_case_info.h
+#define TWOBLUECUBES_CATCH_TEST_CASE_INFO_H_INCLUDED
+
+#include <string>
+#include <set>
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wpadded"
+#endif
+
+namespace Catch {
+
+    struct ITestCase;
+
+    struct TestCaseInfo {
+        enum SpecialProperties{
+            None = 0,
+            IsHidden = 1 << 1,
+            ShouldFail = 1 << 2,
+            MayFail = 1 << 3,
+            Throws = 1 << 4
+        };
+
+        TestCaseInfo(   std::string const& _name,
+                        std::string const& _className,
+                        std::string const& _description,
+                        std::set<std::string> const& _tags,
+                        SourceLineInfo const& _lineInfo );
+
+        TestCaseInfo( TestCaseInfo const& other );
+
+        friend void setTags( TestCaseInfo& testCaseInfo, std::set<std::string> const& tags );
+
+        bool isHidden() const;
+        bool throws() const;
+        bool okToFail() const;
+        bool expectedToFail() const;
+
+        std::string name;
+        std::string className;
+        std::string description;
+        std::set<std::string> tags;
+        std::set<std::string> lcaseTags;
+        std::string tagsAsString;
+        SourceLineInfo lineInfo;
+        SpecialProperties properties;
+    };
+
+    class TestCase : public TestCaseInfo {
+    public:
+
+        TestCase( ITestCase* testCase, TestCaseInfo const& info );
+        TestCase( TestCase const& other );
+
+        TestCase withName( std::string const& _newName ) const;
+
+        void invoke() const;
+
+        TestCaseInfo const& getTestCaseInfo() const;
+
+        void swap( TestCase& other );
+        bool operator == ( TestCase const& other ) const;
+        bool operator < ( TestCase const& other ) const;
+        TestCase& operator = ( TestCase const& other );
+
+    private:
+        Ptr<ITestCase> test;
+    };
+
+    TestCase makeTestCase(  ITestCase* testCase,
+                            std::string const& className,
+                            std::string const& name,
+                            std::string const& description,
+                            SourceLineInfo const& lineInfo );
 }
 
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
 
 #ifdef __OBJC__
 // #included from: internal/catch_objc.hpp
-
-#import <Foundation/Foundation.h>
+#define TWOBLUECUBES_CATCH_OBJC_HPP_INCLUDED
+
 #import <objc/runtime.h>
 
 #include <string>
@@ -2042,38 +2895,6 @@
 // in catch.hpp first to make sure they are included by the single
 // header for non obj-usage
 
-#ifdef __has_feature
-#define CATCH_ARC_ENABLED __has_feature(objc_arc)
-#else
-#define CATCH_ARC_ENABLED 0
-#endif
-
-void arcSafeRelease( NSObject* obj );
-id performOptionalSelector( id obj, SEL sel );
-
-#if !CATCH_ARC_ENABLED
-    inline void arcSafeRelease( NSObject* obj ) {
-        [obj release];
-    }
-    inline id performOptionalSelector( id obj, SEL sel ) {
-        if( [obj respondsToSelector: sel] )
-            return [obj performSelector: sel];
-        return nil;
-    }
-    #define CATCH_UNSAFE_UNRETAINED
-#else
-    inline void arcSafeRelease( NSObject* ){}
-    inline id performOptionalSelector( id obj, SEL sel ) {
-    #pragma clang diagnostic push
-    #pragma clang diagnostic ignored "-Warc-performSelector-leaks"
-        if( [obj respondsToSelector: sel] )
-            return [obj performSelector: sel];
-    #pragma clang diagnostic pop
-        return nil;
-    }
-    #define CATCH_UNSAFE_UNRETAINED __unsafe_unretained
-#endif
-
 ///////////////////////////////////////////////////////////////////////////////
 // This protocol is really only here for (self) documenting purposes, since
 // all its methods are optional.
@@ -2088,7 +2909,7 @@
 
 namespace Catch {
 
-    class OcMethod : public ITestCase {
+    class OcMethod : public SharedImpl<ITestCase> {
 
     public:
         OcMethod( Class cls, SEL sel ) : m_cls( cls ), m_sel( sel ) {}
@@ -2102,35 +2923,18 @@
 
             arcSafeRelease( obj );
         }
-
-        virtual ITestCase* clone() const {
-            return new OcMethod( m_cls, m_sel );
-        }
-
-        virtual bool operator == ( const ITestCase& other ) const {
-            const OcMethod* ocmOther = dynamic_cast<const OcMethod*> ( &other );
-            return ocmOther && ocmOther->m_sel == m_sel;
-        }
-
-        virtual bool operator < ( const ITestCase& other ) const {
-            const OcMethod* ocmOther = dynamic_cast<const OcMethod*> ( &other );
-            return ocmOther && ocmOther->m_sel < m_sel;
-        }
-
     private:
+        virtual ~OcMethod() {}
+
         Class m_cls;
         SEL m_sel;
     };
 
     namespace Detail{
 
-        inline bool startsWith( const std::string& str, const std::string& sub ) {
-            return str.length() > sub.length() && str.substr( 0, sub.length() ) == sub;
-        }
-
         inline std::string getAnnotation(   Class cls,
-                                            const std::string& annotationName,
-                                            const std::string& testCaseName ) {
+                                            std::string const& annotationName,
+                                            std::string const& testCaseName ) {
             NSString* selStr = [[NSString alloc] initWithFormat:@"Catch_%s_%s", annotationName.c_str(), testCaseName.c_str()];
             SEL sel = NSSelectorFromString( selStr );
             arcSafeRelease( selStr );
@@ -2143,7 +2947,7 @@
 
     inline size_t registerTestMethods() {
         size_t noTestMethods = 0;
-        int noClasses = objc_getClassList( NULL, 0 );
+        int noClasses = objc_getClassList( CATCH_NULL, 0 );
 
         Class* classes = (CATCH_UNSAFE_UNRETAINED Class *)malloc( sizeof(Class) * noClasses);
         objc_getClassList( classes, noClasses );
@@ -2156,12 +2960,13 @@
                 for( u_int m = 0; m < count ; m++ ) {
                     SEL selector = method_getName(methods[m]);
                     std::string methodName = sel_getName(selector);
-                    if( Detail::startsWith( methodName, "Catch_TestCase_" ) ) {
+                    if( startsWith( methodName, "Catch_TestCase_" ) ) {
                         std::string testCaseName = methodName.substr( 15 );
                         std::string name = Detail::getAnnotation( cls, "Name", testCaseName );
                         std::string desc = Detail::getAnnotation( cls, "Description", testCaseName );
-
-                        getCurrentContext().getTestCaseRegistry().registerTest( TestCaseInfo( new OcMethod( cls, selector ), name.c_str(), desc.c_str(), SourceLineInfo() ) );
+                        const char* className = class_getName( cls );
+
+                        getMutableRegistryHub().registerTest( makeTestCase( new OcMethod( cls, selector ), className, name.c_str(), desc.c_str(), SourceLineInfo() ) );
                         noTestMethods++;
                     }
                 }
@@ -2171,16 +2976,14 @@
         return noTestMethods;
     }
 
-    inline std::string toString( NSString* const& nsstring ) {
-        return std::string( "@\"" ) + [nsstring UTF8String] + "\"";
-    }
-
     namespace Matchers {
         namespace Impl {
         namespace NSStringMatchers {
 
-            struct StringHolder {
+            template<typename MatcherT>
+            struct StringHolder : MatcherImpl<MatcherT, NSString*>{
                 StringHolder( NSString* substr ) : m_substr( [substr copy] ){}
+                StringHolder( StringHolder const& other ) : m_substr( [other.m_substr copy] ){}
                 StringHolder() {
                     arcSafeRelease( m_substr );
                 }
@@ -2188,54 +2991,54 @@
                 NSString* m_substr;
             };
 
-            struct Equals : StringHolder {
+            struct Equals : StringHolder<Equals> {
                 Equals( NSString* substr ) : StringHolder( substr ){}
 
-                bool operator()( NSString* str ) const {
-                    return [str isEqualToString:m_substr];
+                virtual bool match( ExpressionType const& str ) const {
+                    return  (str != nil || m_substr == nil ) &&
+                            [str isEqualToString:m_substr];
                 }
 
-                friend std::ostream& operator<<( std::ostream& os, const Equals& matcher ) {
-                    os << "equals string: " << Catch::toString( matcher.m_substr );
-                    return os;
+                virtual std::string toString() const {
+                    return "equals string: " + Catch::toString( m_substr );
                 }
             };
 
-            struct Contains : StringHolder {
+            struct Contains : StringHolder<Contains> {
                 Contains( NSString* substr ) : StringHolder( substr ){}
 
-                bool operator()( NSString* str ) const {
-                    return [str rangeOfString:m_substr].location != NSNotFound;
+                virtual bool match( ExpressionType const& str ) const {
+                    return  (str != nil || m_substr == nil ) &&
+                            [str rangeOfString:m_substr].location != NSNotFound;
                 }
 
-                friend std::ostream& operator<<( std::ostream& os, const Contains& matcher ) {
-                    os << "contains: " << Catch::toString( matcher.m_substr );
-                    return os;
+                virtual std::string toString() const {
+                    return "contains string: " + Catch::toString( m_substr );
                 }
             };
 
-            struct StartsWith : StringHolder {
+            struct StartsWith : StringHolder<StartsWith> {
                 StartsWith( NSString* substr ) : StringHolder( substr ){}
 
-                bool operator()( NSString* str ) const {
-                    return [str rangeOfString:m_substr].location == 0;
+                virtual bool match( ExpressionType const& str ) const {
+                    return  (str != nil || m_substr == nil ) &&
+                            [str rangeOfString:m_substr].location == 0;
                 }
 
-                friend std::ostream& operator<<( std::ostream& os, const StartsWith& matcher ) {
-                    os << "starts with: " << Catch::toString( matcher.m_substr );
-                    return os;
+                virtual std::string toString() const {
+                    return "starts with: " + Catch::toString( m_substr );
                 }
             };
-            struct EndsWith : StringHolder {
+            struct EndsWith : StringHolder<EndsWith> {
                 EndsWith( NSString* substr ) : StringHolder( substr ){}
 
-                bool operator()( NSString* str ) const {
-                    return [str rangeOfString:m_substr].location == [str length] - [m_substr length];
+                virtual bool match( ExpressionType const& str ) const {
+                    return  (str != nil || m_substr == nil ) &&
+                            [str rangeOfString:m_substr].location == [str length] - [m_substr length];
                 }
 
-                friend std::ostream& operator<<( std::ostream& os, const EndsWith& matcher ) {
-                    os << "ends with: " << Catch::toString( matcher.m_substr );
-                    return os;
+                virtual std::string toString() const {
+                    return "ends with: " + Catch::toString( m_substr );
                 }
             };
 
@@ -2274,504 +3077,2796 @@
 
 #endif
 
-#if defined( CATCH_CONFIG_MAIN ) || defined( CATCH_CONFIG_RUNNER )
-// #included from: catch_runner.hpp
-
-// #included from: internal/catch_context_impl.hpp
-// #included from: catch_test_case_registry_impl.hpp
-
+#ifdef CATCH_IMPL
+// #included from: internal/catch_impl.hpp
+#define TWOBLUECUBES_CATCH_IMPL_HPP_INCLUDED
+
+// Collect all the implementation files together here
+// These are the equivalent of what would usually be cpp files
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wweak-vtables"
+#endif
+
+// #included from: ../catch_session.hpp
+#define TWOBLUECUBES_CATCH_RUNNER_HPP_INCLUDED
+
+// #included from: internal/catch_commandline.hpp
+#define TWOBLUECUBES_CATCH_COMMANDLINE_HPP_INCLUDED
+
+// #included from: catch_config.hpp
+#define TWOBLUECUBES_CATCH_CONFIG_HPP_INCLUDED
+
+// #included from: catch_test_spec_parser.hpp
+#define TWOBLUECUBES_CATCH_TEST_SPEC_PARSER_HPP_INCLUDED
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wpadded"
+#endif
+
+// #included from: catch_test_spec.hpp
+#define TWOBLUECUBES_CATCH_TEST_SPEC_HPP_INCLUDED
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wpadded"
+#endif
+
+// #included from: catch_wildcard_pattern.hpp
+#define TWOBLUECUBES_CATCH_WILDCARD_PATTERN_HPP_INCLUDED
+
+namespace Catch
+{
+    class WildcardPattern {
+        enum WildcardPosition {
+            NoWildcard = 0,
+            WildcardAtStart = 1,
+            WildcardAtEnd = 2,
+            WildcardAtBothEnds = WildcardAtStart | WildcardAtEnd
+        };
+
+    public:
+
+        WildcardPattern( std::string const& pattern, CaseSensitive::Choice caseSensitivity )
+        :   m_caseSensitivity( caseSensitivity ),
+            m_wildcard( NoWildcard ),
+            m_pattern( adjustCase( pattern ) )
+        {
+            if( startsWith( m_pattern, "*" ) ) {
+                m_pattern = m_pattern.substr( 1 );
+                m_wildcard = WildcardAtStart;
+            }
+            if( endsWith( m_pattern, "*" ) ) {
+                m_pattern = m_pattern.substr( 0, m_pattern.size()-1 );
+                m_wildcard = static_cast<WildcardPosition>( m_wildcard | WildcardAtEnd );
+            }
+        }
+        virtual ~WildcardPattern();
+        virtual bool matches( std::string const& str ) const {
+            switch( m_wildcard ) {
+                case NoWildcard:
+                    return m_pattern == adjustCase( str );
+                case WildcardAtStart:
+                    return endsWith( adjustCase( str ), m_pattern );
+                case WildcardAtEnd:
+                    return startsWith( adjustCase( str ), m_pattern );
+                case WildcardAtBothEnds:
+                    return contains( adjustCase( str ), m_pattern );
+            }
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunreachable-code"
+#endif
+            throw std::logic_error( "Unknown enum" );
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+        }
+    private:
+        std::string adjustCase( std::string const& str ) const {
+            return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str;
+        }
+        CaseSensitive::Choice m_caseSensitivity;
+        WildcardPosition m_wildcard;
+        std::string m_pattern;
+    };
+}
+
+#include <string>
 #include <vector>
-#include <set>
-#include <sstream>
-#include <iostream>
 
 namespace Catch {
 
-    class TestRegistry : public ITestCaseRegistry {
+    class TestSpec {
+        struct Pattern : SharedImpl<> {
+            virtual ~Pattern();
+            virtual bool matches( TestCaseInfo const& testCase ) const = 0;
+        };
+        class NamePattern : public Pattern {
+        public:
+            NamePattern( std::string const& name )
+            : m_wildcardPattern( toLower( name ), CaseSensitive::No )
+            {}
+            virtual ~NamePattern();
+            virtual bool matches( TestCaseInfo const& testCase ) const {
+                return m_wildcardPattern.matches( toLower( testCase.name ) );
+            }
+        private:
+            WildcardPattern m_wildcardPattern;
+        };
+
+        class TagPattern : public Pattern {
+        public:
+            TagPattern( std::string const& tag ) : m_tag( toLower( tag ) ) {}
+            virtual ~TagPattern();
+            virtual bool matches( TestCaseInfo const& testCase ) const {
+                return testCase.lcaseTags.find( m_tag ) != testCase.lcaseTags.end();
+            }
+        private:
+            std::string m_tag;
+        };
+
+        class ExcludedPattern : public Pattern {
+        public:
+            ExcludedPattern( Ptr<Pattern> const& underlyingPattern ) : m_underlyingPattern( underlyingPattern ) {}
+            virtual ~ExcludedPattern();
+            virtual bool matches( TestCaseInfo const& testCase ) const { return !m_underlyingPattern->matches( testCase ); }
+        private:
+            Ptr<Pattern> m_underlyingPattern;
+        };
+
+        struct Filter {
+            std::vector<Ptr<Pattern> > m_patterns;
+
+            bool matches( TestCaseInfo const& testCase ) const {
+                // All patterns in a filter must match for the filter to be a match
+                for( std::vector<Ptr<Pattern> >::const_iterator it = m_patterns.begin(), itEnd = m_patterns.end(); it != itEnd; ++it )
+                    if( !(*it)->matches( testCase ) )
+                        return false;
+                    return true;
+            }
+        };
+
     public:
-        TestRegistry() : m_unnamedCount( 0 ) {}
-
-        virtual void registerTest( const TestCaseInfo& testInfo ) {
-            if( testInfo.getName() == "" ) {
-                std::ostringstream oss;
-                oss << testInfo.getName() << "unnamed/" << ++m_unnamedCount;
-                return registerTest( TestCaseInfo( testInfo, oss.str() ) );
-            }
-
-            if( m_functions.find( testInfo ) == m_functions.end() ) {
-                m_functions.insert( testInfo );
-                m_functionsInOrder.push_back( testInfo );
-            }
-            else {
-                const TestCaseInfo& prev = *m_functions.find( testInfo );
-                std::cerr   << "error: TEST_CASE( \"" << testInfo.getName() << "\" ) already defined.\n"
-                            << "\tFirst seen at " << SourceLineInfo( prev.getLineInfo() ) << "\n"
-                            << "\tRedefined at " << SourceLineInfo( testInfo.getLineInfo() ) << std::endl;
-                exit(1);
-            }
-        }
-
-        virtual const std::vector<TestCaseInfo>& getAllTests() const {
-            return m_functionsInOrder;
-        }
-
-        virtual std::vector<TestCaseInfo> getMatchingTestCases( const std::string& rawTestSpec ) {
-            TestSpec testSpec( rawTestSpec );
-
-            std::vector<TestCaseInfo> testList;
-            std::vector<TestCaseInfo>::const_iterator it = m_functionsInOrder.begin();
-            std::vector<TestCaseInfo>::const_iterator itEnd = m_functionsInOrder.end();
-            for(; it != itEnd; ++it ) {
-                if( testSpec.matches( it->getName() ) ) {
-                    testList.push_back( *it );
-                }
-            }
-            return testList;
+        bool hasFilters() const {
+            return !m_filters.empty();
+        }
+        bool matches( TestCaseInfo const& testCase ) const {
+            // A TestSpec matches if any filter matches
+            for( std::vector<Filter>::const_iterator it = m_filters.begin(), itEnd = m_filters.end(); it != itEnd; ++it )
+                if( it->matches( testCase ) )
+                    return true;
+            return false;
         }
 
     private:
-
-        std::set<TestCaseInfo> m_functions;
-        std::vector<TestCaseInfo> m_functionsInOrder;
-        size_t m_unnamedCount;
-    };
-
-    ///////////////////////////////////////////////////////////////////////////
-
-    class FreeFunctionTestCase : public ITestCase {
+        std::vector<Filter> m_filters;
+
+        friend class TestSpecParser;
+    };
+}
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+namespace Catch {
+
+    class TestSpecParser {
+        enum Mode{ None, Name, QuotedName, Tag };
+        Mode m_mode;
+        bool m_exclusion;
+        std::size_t m_start, m_pos;
+        std::string m_arg;
+        TestSpec::Filter m_currentFilter;
+        TestSpec m_testSpec;
+        ITagAliasRegistry const* m_tagAliases;
+
     public:
-
-        FreeFunctionTestCase( TestFunction fun ) : m_fun( fun ) {}
-
-        virtual void invoke() const {
-            m_fun();
-        }
-
-        virtual ITestCase* clone() const {
-            return new FreeFunctionTestCase( m_fun );
-        }
-
-        virtual bool operator == ( const ITestCase& other ) const {
-            const FreeFunctionTestCase* ffOther = dynamic_cast<const FreeFunctionTestCase*> ( &other );
-            return ffOther && m_fun == ffOther->m_fun;
-        }
-
-        virtual bool operator < ( const ITestCase& other ) const {
-            const FreeFunctionTestCase* ffOther = dynamic_cast<const FreeFunctionTestCase*> ( &other );
-            return ffOther && m_fun < ffOther->m_fun;
-        }
-
+        TestSpecParser( ITagAliasRegistry const& tagAliases ) : m_tagAliases( &tagAliases ) {}
+
+        TestSpecParser& parse( std::string const& arg ) {
+            m_mode = None;
+            m_exclusion = false;
+            m_start = std::string::npos;
+            m_arg = m_tagAliases->expandAliases( arg );
+            for( m_pos = 0; m_pos < m_arg.size(); ++m_pos )
+                visitChar( m_arg[m_pos] );
+            if( m_mode == Name )
+                addPattern<TestSpec::NamePattern>();
+            return *this;
+        }
+        TestSpec testSpec() {
+            addFilter();
+            return m_testSpec;
+        }
     private:
-        TestFunction m_fun;
-    };
-
-    ///////////////////////////////////////////////////////////////////////////
-
-    AutoReg::AutoReg(   TestFunction function,
-                        const char* name,
-                        const char* description,
-                        const SourceLineInfo& lineInfo ) {
-        registerTestCase( new FreeFunctionTestCase( function ), name, description, lineInfo );
-    }
-
-    AutoReg::~AutoReg() {}
-
-    void AutoReg::registerTestCase( ITestCase* testCase,
-                                    const char* name,
-                                    const char* description,
-                                    const SourceLineInfo& lineInfo ) {
-        getCurrentContext().getTestCaseRegistry().registerTest( TestCaseInfo( testCase, name, description, lineInfo ) );
-    }
-
-} // end namespace Catch
-
-// #included from: catch_runner_impl.hpp
-
-// #included from: catch_config.hpp
+        void visitChar( char c ) {
+            if( m_mode == None ) {
+                switch( c ) {
+                case ' ': return;
+                case '~': m_exclusion = true; return;
+                case '[': return startNewMode( Tag, ++m_pos );
+                case '"': return startNewMode( QuotedName, ++m_pos );
+                default: startNewMode( Name, m_pos ); break;
+                }
+            }
+            if( m_mode == Name ) {
+                if( c == ',' ) {
+                    addPattern<TestSpec::NamePattern>();
+                    addFilter();
+                }
+                else if( c == '[' ) {
+                    if( subString() == "exclude:" )
+                        m_exclusion = true;
+                    else
+                        addPattern<TestSpec::NamePattern>();
+                    startNewMode( Tag, ++m_pos );
+                }
+            }
+            else if( m_mode == QuotedName && c == '"' )
+                addPattern<TestSpec::NamePattern>();
+            else if( m_mode == Tag && c == ']' )
+                addPattern<TestSpec::TagPattern>();
+        }
+        void startNewMode( Mode mode, std::size_t start ) {
+            m_mode = mode;
+            m_start = start;
+        }
+        std::string subString() const { return m_arg.substr( m_start, m_pos - m_start ); }
+        template<typename T>
+        void addPattern() {
+            std::string token = subString();
+            if( startsWith( token, "exclude:" ) ) {
+                m_exclusion = true;
+                token = token.substr( 8 );
+            }
+            if( !token.empty() ) {
+                Ptr<TestSpec::Pattern> pattern = new T( token );
+                if( m_exclusion )
+                    pattern = new TestSpec::ExcludedPattern( pattern );
+                m_currentFilter.m_patterns.push_back( pattern );
+            }
+            m_exclusion = false;
+            m_mode = None;
+        }
+        void addFilter() {
+            if( !m_currentFilter.m_patterns.empty() ) {
+                m_testSpec.m_filters.push_back( m_currentFilter );
+                m_currentFilter = TestSpec::Filter();
+            }
+        }
+    };
+    inline TestSpec parseTestSpec( std::string const& arg ) {
+        return TestSpecParser( ITagAliasRegistry::get() ).parse( arg ).testSpec();
+    }
+
+} // namespace Catch
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+// #included from: catch_interfaces_config.h
+#define TWOBLUECUBES_CATCH_INTERFACES_CONFIG_H_INCLUDED
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+namespace Catch {
+
+    struct Verbosity { enum Level {
+        NoOutput = 0,
+        Quiet,
+        Normal
+    }; };
+
+    struct WarnAbout { enum What {
+        Nothing = 0x00,
+        NoAssertions = 0x01
+    }; };
+
+    struct ShowDurations { enum OrNot {
+        DefaultForReporter,
+        Always,
+        Never
+    }; };
+    struct RunTests { enum InWhatOrder {
+        InDeclarationOrder,
+        InLexicographicalOrder,
+        InRandomOrder
+    }; };
+    struct UseColour { enum YesOrNo {
+        Auto,
+        Yes,
+        No
+    }; };
+
+    class TestSpec;
+
+    struct IConfig : IShared {
+
+        virtual ~IConfig();
+
+        virtual bool allowThrows() const = 0;
+        virtual std::ostream& stream() const = 0;
+        virtual std::string name() const = 0;
+        virtual bool includeSuccessfulResults() const = 0;
+        virtual bool shouldDebugBreak() const = 0;
+        virtual bool warnAboutMissingAssertions() const = 0;
+        virtual int abortAfter() const = 0;
+        virtual bool showInvisibles() const = 0;
+        virtual ShowDurations::OrNot showDurations() const = 0;
+        virtual TestSpec const& testSpec() const = 0;
+        virtual RunTests::InWhatOrder runOrder() const = 0;
+        virtual unsigned int rngSeed() const = 0;
+        virtual UseColour::YesOrNo useColour() const = 0;
+    };
+}
+
+// #included from: catch_stream.h
+#define TWOBLUECUBES_CATCH_STREAM_H_INCLUDED
+
+// #included from: catch_streambuf.h
+#define TWOBLUECUBES_CATCH_STREAMBUF_H_INCLUDED
+
+#include <streambuf>
+
+namespace Catch {
+
+    class StreamBufBase : public std::streambuf {
+    public:
+        virtual ~StreamBufBase() CATCH_NOEXCEPT;
+    };
+}
+
+#include <streambuf>
+#include <ostream>
+#include <fstream>
+
+namespace Catch {
+
+    std::ostream& cout();
+    std::ostream& cerr();
+
+    struct IStream {
+        virtual ~IStream() CATCH_NOEXCEPT;
+        virtual std::ostream& stream() const = 0;
+    };
+
+    class FileStream : public IStream {
+        mutable std::ofstream m_ofs;
+    public:
+        FileStream( std::string const& filename );
+        virtual ~FileStream() CATCH_NOEXCEPT;
+    public: // IStream
+        virtual std::ostream& stream() const CATCH_OVERRIDE;
+    };
+
+    class CoutStream : public IStream {
+        mutable std::ostream m_os;
+    public:
+        CoutStream();
+        virtual ~CoutStream() CATCH_NOEXCEPT;
+
+    public: // IStream
+        virtual std::ostream& stream() const CATCH_OVERRIDE;
+    };
+
+    class DebugOutStream : public IStream {
+        CATCH_AUTO_PTR( StreamBufBase ) m_streamBuf;
+        mutable std::ostream m_os;
+    public:
+        DebugOutStream();
+        virtual ~DebugOutStream() CATCH_NOEXCEPT;
+
+    public: // IStream
+        virtual std::ostream& stream() const CATCH_OVERRIDE;
+    };
+}
 
 #include <memory>
 #include <vector>
 #include <string>
 #include <iostream>
+#include <ctime>
+
+#ifndef CATCH_CONFIG_CONSOLE_WIDTH
+#define CATCH_CONFIG_CONSOLE_WIDTH 80
+#endif
 
 namespace Catch {
 
-    struct Include { enum WhichResults {
-        FailedOnly,
-        SuccessfulResults
-    }; };
-
-    struct List{ enum What {
-        None = 0,
-
-        Reports = 1,
-        Tests = 2,
-        All = 3,
-
-        WhatMask = 0xf,
-
-        AsText = 0x10,
-        AsXml = 0x11,
-
-        AsMask = 0xf0
-    }; };
-
-    class Config : public IReporterConfig, public IConfig {
+    struct ConfigData {
+
+        ConfigData()
+        :   listTests( false ),
+            listTags( false ),
+            listReporters( false ),
+            listTestNamesOnly( false ),
+            showSuccessfulTests( false ),
+            shouldDebugBreak( false ),
+            noThrow( false ),
+            showHelp( false ),
+            showInvisibles( false ),
+            filenamesAsTags( false ),
+            abortAfter( -1 ),
+            rngSeed( 0 ),
+            verbosity( Verbosity::Normal ),
+            warnings( WarnAbout::Nothing ),
+            showDurations( ShowDurations::DefaultForReporter ),
+            runOrder( RunTests::InDeclarationOrder ),
+            useColour( UseColour::Auto )
+        {}
+
+        bool listTests;
+        bool listTags;
+        bool listReporters;
+        bool listTestNamesOnly;
+
+        bool showSuccessfulTests;
+        bool shouldDebugBreak;
+        bool noThrow;
+        bool showHelp;
+        bool showInvisibles;
+        bool filenamesAsTags;
+
+        int abortAfter;
+        unsigned int rngSeed;
+
+        Verbosity::Level verbosity;
+        WarnAbout::What warnings;
+        ShowDurations::OrNot showDurations;
+        RunTests::InWhatOrder runOrder;
+        UseColour::YesOrNo useColour;
+
+        std::string outputFilename;
+        std::string name;
+        std::string processName;
+
+        std::vector<std::string> reporterNames;
+        std::vector<std::string> testsOrTags;
+    };
+
+    class Config : public SharedImpl<IConfig> {
     private:
-        Config( const Config& other );
-        Config& operator = ( const Config& other );
+        Config( Config const& other );
+        Config& operator = ( Config const& other );
+        virtual void dummy();
     public:
 
         Config()
-        :   m_listSpec( List::None ),
-            m_shouldDebugBreak( false ),
-            m_showHelp( false ),
-            m_streambuf( NULL ),
-            m_os( std::cout.rdbuf() ),
-            m_includeWhichResults( Include::FailedOnly ),
-            m_cutoff( -1 ),
-            m_allowThrows( true )
+        {}
+
+        Config( ConfigData const& data )
+        :   m_data( data ),
+            m_stream( openStream() )
+        {
+            if( !data.testsOrTags.empty() ) {
+                TestSpecParser parser( ITagAliasRegistry::get() );
+                for( std::size_t i = 0; i < data.testsOrTags.size(); ++i )
+                    parser.parse( data.testsOrTags[i] );
+                m_testSpec = parser.testSpec();
+            }
+        }
+
+        virtual ~Config() {
+        }
+
+        std::string const& getFilename() const {
+            return m_data.outputFilename ;
+        }
+
+        bool listTests() const { return m_data.listTests; }
+        bool listTestNamesOnly() const { return m_data.listTestNamesOnly; }
+        bool listTags() const { return m_data.listTags; }
+        bool listReporters() const { return m_data.listReporters; }
+
+        std::string getProcessName() const { return m_data.processName; }
+
+        bool shouldDebugBreak() const { return m_data.shouldDebugBreak; }
+
+        std::vector<std::string> getReporterNames() const { return m_data.reporterNames; }
+
+        int abortAfter() const { return m_data.abortAfter; }
+
+        TestSpec const& testSpec() const { return m_testSpec; }
+
+        bool showHelp() const { return m_data.showHelp; }
+        bool showInvisibles() const { return m_data.showInvisibles; }
+
+        // IConfig interface
+        virtual bool allowThrows() const        { return !m_data.noThrow; }
+        virtual std::ostream& stream() const    { return m_stream->stream(); }
+        virtual std::string name() const        { return m_data.name.empty() ? m_data.processName : m_data.name; }
+        virtual bool includeSuccessfulResults() const   { return m_data.showSuccessfulTests; }
+        virtual bool warnAboutMissingAssertions() const { return m_data.warnings & WarnAbout::NoAssertions; }
+        virtual ShowDurations::OrNot showDurations() const { return m_data.showDurations; }
+        virtual RunTests::InWhatOrder runOrder() const  { return m_data.runOrder; }
+        virtual unsigned int rngSeed() const    { return m_data.rngSeed; }
+        virtual UseColour::YesOrNo useColour() const { return m_data.useColour; }
+
+    private:
+
+        IStream const* openStream() {
+            if( m_data.outputFilename.empty() )
+                return new CoutStream();
+            else if( m_data.outputFilename[0] == '%' ) {
+                if( m_data.outputFilename == "%debug" )
+                    return new DebugOutStream();
+                else
+                    throw std::domain_error( "Unrecognised stream: " + m_data.outputFilename );
+            }
+            else
+                return new FileStream( m_data.outputFilename );
+        }
+        ConfigData m_data;
+
+        CATCH_AUTO_PTR( IStream const ) m_stream;
+        TestSpec m_testSpec;
+    };
+
+} // end namespace Catch
+
+// #included from: catch_clara.h
+#define TWOBLUECUBES_CATCH_CLARA_H_INCLUDED
+
+// Use Catch's value for console width (store Clara's off to the side, if present)
+#ifdef CLARA_CONFIG_CONSOLE_WIDTH
+#define CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH CLARA_CONFIG_CONSOLE_WIDTH
+#undef CLARA_CONFIG_CONSOLE_WIDTH
+#endif
+#define CLARA_CONFIG_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH
+
+// Declare Clara inside the Catch namespace
+#define STITCH_CLARA_OPEN_NAMESPACE namespace Catch {
+// #included from: ../external/clara.h
+
+// Version 0.0.2.4
+
+// Only use header guard if we are not using an outer namespace
+#if !defined(TWOBLUECUBES_CLARA_H_INCLUDED) || defined(STITCH_CLARA_OPEN_NAMESPACE)
+
+#ifndef STITCH_CLARA_OPEN_NAMESPACE
+#define TWOBLUECUBES_CLARA_H_INCLUDED
+#define STITCH_CLARA_OPEN_NAMESPACE
+#define STITCH_CLARA_CLOSE_NAMESPACE
+#else
+#define STITCH_CLARA_CLOSE_NAMESPACE }
+#endif
+
+#define STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE
+
+// ----------- #included from tbc_text_format.h -----------
+
+// Only use header guard if we are not using an outer namespace
+#if !defined(TBC_TEXT_FORMAT_H_INCLUDED) || defined(STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE)
+#ifndef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+#define TBC_TEXT_FORMAT_H_INCLUDED
+#endif
+
+#include <string>
+#include <vector>
+#include <sstream>
+#include <algorithm>
+
+// Use optional outer namespace
+#ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+namespace STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE {
+#endif
+
+namespace Tbc {
+
+#ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH
+    const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH;
+#else
+    const unsigned int consoleWidth = 80;
+#endif
+
+    struct TextAttributes {
+        TextAttributes()
+        :   initialIndent( std::string::npos ),
+            indent( 0 ),
+            width( consoleWidth-1 ),
+            tabChar( '\t' )
         {}
 
-        ~Config() {
-            m_os.rdbuf( std::cout.rdbuf() );
-            delete m_streambuf;
-        }
-
-        void setReporter( const std::string& reporterName ) {
-            if( m_reporter.get() )
-                return setError( "Only one reporter may be specified" );
-            setReporter( getCurrentContext().getReporterRegistry().create( reporterName, *this ) );
-        }
-
-        void addTestSpec( const std::string& testSpec ) {
-            m_testSpecs.push_back( testSpec );
-        }
-
-        bool testsSpecified() const {
-            return !m_testSpecs.empty();
-        }
-
-        const std::vector<std::string>& getTestSpecs() const {
-            return m_testSpecs;
-        }
-
-        List::What getListSpec( void ) const {
-            return m_listSpec;
-        }
-
-        void setListSpec( List::What listSpec ) {
-            m_listSpec = listSpec;
-        }
-
-        void setFilename( const std::string& filename ) {
-            m_filename = filename;
-        }
-
-        const std::string& getFilename() const {
-            return m_filename;
-        }
-
-        const std::string& getMessage() const {
-            return m_message;
-        }
-
-        void setError( const std::string& errorMessage ) {
-            m_message = errorMessage;
-        }
-
-        void setReporter( IReporter* reporter ) {
-            m_reporter = reporter;
-        }
-
-        Ptr<IReporter> getReporter() {
-            if( !m_reporter.get() )
-                const_cast<Config*>( this )->setReporter( getCurrentContext().getReporterRegistry().create( "basic", *this ) );
-            return m_reporter;
-        }
-
-        List::What listWhat() const {
-            return static_cast<List::What>( m_listSpec & List::WhatMask );
-        }
-
-        List::What listAs() const {
-            return static_cast<List::What>( m_listSpec & List::AsMask );
-        }
-
-        void setIncludeWhichResults( Include::WhichResults includeWhichResults ) {
-            m_includeWhichResults = includeWhichResults;
-        }
-
-        void setShouldDebugBreak( bool shouldDebugBreakFlag ) {
-            m_shouldDebugBreak = shouldDebugBreakFlag;
-        }
-
-        void setName( const std::string& name ) {
-            m_name = name;
-        }
-
-        std::string getName() const {
-            return m_name;
-        }
-
-        bool shouldDebugBreak() const {
-            return m_shouldDebugBreak;
-        }
-
-        void setShowHelp( bool showHelpFlag ) {
-            m_showHelp = showHelpFlag;
-        }
-
-        bool showHelp() const {
-            return m_showHelp;
-        }
-
-        virtual std::ostream& stream() const {
-            return m_os;
-        }
-
-        void setStreamBuf( std::streambuf* buf ) {
-            m_os.rdbuf( buf ? buf : std::cout.rdbuf() );
-        }
-
-        void useStream( const std::string& streamName ) {
-            std::streambuf* newBuf = Context::createStreamBuf( streamName );
-            setStreamBuf( newBuf );
-            delete m_streambuf;
-            m_streambuf = newBuf;
-        }
-
-        virtual bool includeSuccessfulResults() const {
-            return m_includeWhichResults == Include::SuccessfulResults;
-        }
-
-        int getCutoff() const {
-            return m_cutoff;
-        }
-
-        void setCutoff( int cutoff ) {
-            m_cutoff = cutoff;
-        }
-
-        void setAllowThrows( bool allowThrows ) {
-            m_allowThrows = allowThrows;
-        }
-
-        virtual bool allowThrows() const {
-            return m_allowThrows;
+        TextAttributes& setInitialIndent( std::size_t _value )  { initialIndent = _value; return *this; }
+        TextAttributes& setIndent( std::size_t _value )         { indent = _value; return *this; }
+        TextAttributes& setWidth( std::size_t _value )          { width = _value; return *this; }
+        TextAttributes& setTabChar( char _value )               { tabChar = _value; return *this; }
+
+        std::size_t initialIndent;  // indent of first line, or npos
+        std::size_t indent;         // indent of subsequent lines, or all if initialIndent is npos
+        std::size_t width;          // maximum width of text, including indent. Longer text will wrap
+        char tabChar;               // If this char is seen the indent is changed to current pos
+    };
+
+    class Text {
+    public:
+        Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() )
+        : attr( _attr )
+        {
+            std::string wrappableChars = " [({.,/|\\-";
+            std::size_t indent = _attr.initialIndent != std::string::npos
+                ? _attr.initialIndent
+                : _attr.indent;
+            std::string remainder = _str;
+
+            while( !remainder.empty() ) {
+                if( lines.size() >= 1000 ) {
+                    lines.push_back( "... message truncated due to excessive size" );
+                    return;
+                }
+                std::size_t tabPos = std::string::npos;
+                std::size_t width = (std::min)( remainder.size(), _attr.width - indent );
+                std::size_t pos = remainder.find_first_of( '\n' );
+                if( pos <= width ) {
+                    width = pos;
+                }
+                pos = remainder.find_last_of( _attr.tabChar, width );
+                if( pos != std::string::npos ) {
+                    tabPos = pos;
+                    if( remainder[width] == '\n' )
+                        width--;
+                    remainder = remainder.substr( 0, tabPos ) + remainder.substr( tabPos+1 );
+                }
+
+                if( width == remainder.size() ) {
+                    spliceLine( indent, remainder, width );
+                }
+                else if( remainder[width] == '\n' ) {
+                    spliceLine( indent, remainder, width );
+                    if( width <= 1 || remainder.size() != 1 )
+                        remainder = remainder.substr( 1 );
+                    indent = _attr.indent;
+                }
+                else {
+                    pos = remainder.find_last_of( wrappableChars, width );
+                    if( pos != std::string::npos && pos > 0 ) {
+                        spliceLine( indent, remainder, pos );
+                        if( remainder[0] == ' ' )
+                            remainder = remainder.substr( 1 );
+                    }
+                    else {
+                        spliceLine( indent, remainder, width-1 );
+                        lines.back() += "-";
+                    }
+                    if( lines.size() == 1 )
+                        indent = _attr.indent;
+                    if( tabPos != std::string::npos )
+                        indent += tabPos;
+                }
+            }
+        }
+
+        void spliceLine( std::size_t _indent, std::string& _remainder, std::size_t _pos ) {
+            lines.push_back( std::string( _indent, ' ' ) + _remainder.substr( 0, _pos ) );
+            _remainder = _remainder.substr( _pos );
+        }
+
+        typedef std::vector<std::string>::const_iterator const_iterator;
+
+        const_iterator begin() const { return lines.begin(); }
+        const_iterator end() const { return lines.end(); }
+        std::string const& last() const { return lines.back(); }
+        std::size_t size() const { return lines.size(); }
+        std::string const& operator[]( std::size_t _index ) const { return lines[_index]; }
+        std::string toString() const {
+            std::ostringstream oss;
+            oss << *this;
+            return oss.str();
+        }
+
+        inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) {
+            for( Text::const_iterator it = _text.begin(), itEnd = _text.end();
+                it != itEnd; ++it ) {
+                if( it != _text.begin() )
+                    _stream << "\n";
+                _stream << *it;
+            }
+            return _stream;
+        }
+
+    private:
+        std::string str;
+        TextAttributes attr;
+        std::vector<std::string> lines;
+    };
+
+} // end namespace Tbc
+
+#ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+} // end outer namespace
+#endif
+
+#endif // TBC_TEXT_FORMAT_H_INCLUDED
+
+// ----------- end of #include from tbc_text_format.h -----------
+// ........... back in clara.h
+
+#undef STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE
+
+// ----------- #included from clara_compilers.h -----------
+
+#ifndef TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED
+#define TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED
+
+// Detect a number of compiler features - mostly C++11/14 conformance - by compiler
+// The following features are defined:
+//
+// CLARA_CONFIG_CPP11_NULLPTR : is nullptr supported?
+// CLARA_CONFIG_CPP11_NOEXCEPT : is noexcept supported?
+// CLARA_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods
+// CLARA_CONFIG_CPP11_OVERRIDE : is override supported?
+// CLARA_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr)
+
+// CLARA_CONFIG_CPP11_OR_GREATER : Is C++11 supported?
+
+// CLARA_CONFIG_VARIADIC_MACROS : are variadic macros supported?
+
+// In general each macro has a _NO_<feature name> form
+// (e.g. CLARA_CONFIG_CPP11_NO_NULLPTR) which disables the feature.
+// Many features, at point of detection, define an _INTERNAL_ macro, so they
+// can be combined, en-mass, with the _NO_ forms later.
+
+// All the C++11 features can be disabled with CLARA_CONFIG_NO_CPP11
+
+#ifdef __clang__
+
+#if __has_feature(cxx_nullptr)
+#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR
+#endif
+
+#if __has_feature(cxx_noexcept)
+#define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#endif
+
+#endif // __clang__
+
+////////////////////////////////////////////////////////////////////////////////
+// GCC
+#ifdef __GNUC__
+
+#if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__)
+#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR
+#endif
+
+// - otherwise more recent versions define __cplusplus >= 201103L
+// and will get picked up below
+
+#endif // __GNUC__
+
+////////////////////////////////////////////////////////////////////////////////
+// Visual C++
+#ifdef _MSC_VER
+
+#if (_MSC_VER >= 1600)
+#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR
+#define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR
+#endif
+
+#if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015))
+#define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS
+#endif
+
+#endif // _MSC_VER
+
+////////////////////////////////////////////////////////////////////////////////
+// C++ language feature support
+
+// catch all support for C++11
+#if defined(__cplusplus) && __cplusplus >= 201103L
+
+#define CLARA_CPP11_OR_GREATER
+
+#if !defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR)
+#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR
+#endif
+
+#ifndef CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT
+#endif
+
+#ifndef CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS
+#define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS
+#endif
+
+#if !defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE)
+#define CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE
+#endif
+#if !defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR)
+#define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR
+#endif
+
+#endif // __cplusplus >= 201103L
+
+// Now set the actual defines based on the above + anything the user has configured
+#if defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NO_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_NO_CPP11)
+#define CLARA_CONFIG_CPP11_NULLPTR
+#endif
+#if defined(CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_NO_CPP11)
+#define CLARA_CONFIG_CPP11_NOEXCEPT
+#endif
+#if defined(CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_NO_CPP11)
+#define CLARA_CONFIG_CPP11_GENERATED_METHODS
+#endif
+#if defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_OVERRIDE) && !defined(CLARA_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_CPP11)
+#define CLARA_CONFIG_CPP11_OVERRIDE
+#endif
+#if defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_UNIQUE_PTR) && !defined(CLARA_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_CPP11)
+#define CLARA_CONFIG_CPP11_UNIQUE_PTR
+#endif
+
+// noexcept support:
+#if defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_NOEXCEPT)
+#define CLARA_NOEXCEPT noexcept
+#  define CLARA_NOEXCEPT_IS(x) noexcept(x)
+#else
+#define CLARA_NOEXCEPT throw()
+#  define CLARA_NOEXCEPT_IS(x)
+#endif
+
+// nullptr support
+#ifdef CLARA_CONFIG_CPP11_NULLPTR
+#define CLARA_NULL nullptr
+#else
+#define CLARA_NULL NULL
+#endif
+
+// override support
+#ifdef CLARA_CONFIG_CPP11_OVERRIDE
+#define CLARA_OVERRIDE override
+#else
+#define CLARA_OVERRIDE
+#endif
+
+// unique_ptr support
+#ifdef CLARA_CONFIG_CPP11_UNIQUE_PTR
+#   define CLARA_AUTO_PTR( T ) std::unique_ptr<T>
+#else
+#   define CLARA_AUTO_PTR( T ) std::auto_ptr<T>
+#endif
+
+#endif // TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED
+
+// ----------- end of #include from clara_compilers.h -----------
+// ........... back in clara.h
+
+#include <map>
+#include <stdexcept>
+#include <memory>
+
+#if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER)
+#define CLARA_PLATFORM_WINDOWS
+#endif
+
+// Use optional outer namespace
+#ifdef STITCH_CLARA_OPEN_NAMESPACE
+STITCH_CLARA_OPEN_NAMESPACE
+#endif
+
+namespace Clara {
+
+    struct UnpositionalTag {};
+
+    extern UnpositionalTag _;
+
+#ifdef CLARA_CONFIG_MAIN
+    UnpositionalTag _;
+#endif
+
+    namespace Detail {
+
+#ifdef CLARA_CONSOLE_WIDTH
+    const unsigned int consoleWidth = CLARA_CONFIG_CONSOLE_WIDTH;
+#else
+    const unsigned int consoleWidth = 80;
+#endif
+
+        using namespace Tbc;
+
+        inline bool startsWith( std::string const& str, std::string const& prefix ) {
+            return str.size() >= prefix.size() && str.substr( 0, prefix.size() ) == prefix;
+        }
+
+        template<typename T> struct RemoveConstRef{ typedef T type; };
+        template<typename T> struct RemoveConstRef<T&>{ typedef T type; };
+        template<typename T> struct RemoveConstRef<T const&>{ typedef T type; };
+        template<typename T> struct RemoveConstRef<T const>{ typedef T type; };
+
+        template<typename T>    struct IsBool       { static const bool value = false; };
+        template<>              struct IsBool<bool> { static const bool value = true; };
+
+        template<typename T>
+        void convertInto( std::string const& _source, T& _dest ) {
+            std::stringstream ss;
+            ss << _source;
+            ss >> _dest;
+            if( ss.fail() )
+                throw std::runtime_error( "Unable to convert " + _source + " to destination type" );
+        }
+        inline void convertInto( std::string const& _source, std::string& _dest ) {
+            _dest = _source;
+        }
+        inline void convertInto( std::string const& _source, bool& _dest ) {
+            std::string sourceLC = _source;
+            std::transform( sourceLC.begin(), sourceLC.end(), sourceLC.begin(), ::tolower );
+            if( sourceLC == "y" || sourceLC == "1" || sourceLC == "true" || sourceLC == "yes" || sourceLC == "on" )
+                _dest = true;
+            else if( sourceLC == "n" || sourceLC == "0" || sourceLC == "false" || sourceLC == "no" || sourceLC == "off" )
+                _dest = false;
+            else
+                throw std::runtime_error( "Expected a boolean value but did not recognise:\n  '" + _source + "'" );
+        }
+
+        template<typename ConfigT>
+        struct IArgFunction {
+            virtual ~IArgFunction() {}
+#ifdef CLARA_CONFIG_CPP11_GENERATED_METHODS
+            IArgFunction()                      = default;
+            IArgFunction( IArgFunction const& ) = default;
+#endif
+            virtual void set( ConfigT& config, std::string const& value ) const = 0;
+            virtual bool takesArg() const = 0;
+            virtual IArgFunction* clone() const = 0;
+        };
+
+        template<typename ConfigT>
+        class BoundArgFunction {
+        public:
+            BoundArgFunction() : functionObj( CLARA_NULL ) {}
+            BoundArgFunction( IArgFunction<ConfigT>* _functionObj ) : functionObj( _functionObj ) {}
+            BoundArgFunction( BoundArgFunction const& other ) : functionObj( other.functionObj ? other.functionObj->clone() : CLARA_NULL ) {}
+            BoundArgFunction& operator = ( BoundArgFunction const& other ) {
+                IArgFunction<ConfigT>* newFunctionObj = other.functionObj ? other.functionObj->clone() : CLARA_NULL;
+                delete functionObj;
+                functionObj = newFunctionObj;
+                return *this;
+            }
+            ~BoundArgFunction() { delete functionObj; }
+
+            void set( ConfigT& config, std::string const& value ) const {
+                functionObj->set( config, value );
+            }
+            bool takesArg() const { return functionObj->takesArg(); }
+
+            bool isSet() const {
+                return functionObj != CLARA_NULL;
+            }
+        private:
+            IArgFunction<ConfigT>* functionObj;
+        };
+
+        template<typename C>
+        struct NullBinder : IArgFunction<C>{
+            virtual void set( C&, std::string const& ) const {}
+            virtual bool takesArg() const { return true; }
+            virtual IArgFunction<C>* clone() const { return new NullBinder( *this ); }
+        };
+
+        template<typename C, typename M>
+        struct BoundDataMember : IArgFunction<C>{
+            BoundDataMember( M C::* _member ) : member( _member ) {}
+            virtual void set( C& p, std::string const& stringValue ) const {
+                convertInto( stringValue, p.*member );
+            }
+            virtual bool takesArg() const { return !IsBool<M>::value; }
+            virtual IArgFunction<C>* clone() const { return new BoundDataMember( *this ); }
+            M C::* member;
+        };
+        template<typename C, typename M>
+        struct BoundUnaryMethod : IArgFunction<C>{
+            BoundUnaryMethod( void (C::*_member)( M ) ) : member( _member ) {}
+            virtual void set( C& p, std::string const& stringValue ) const {
+                typename RemoveConstRef<M>::type value;
+                convertInto( stringValue, value );
+                (p.*member)( value );
+            }
+            virtual bool takesArg() const { return !IsBool<M>::value; }
+            virtual IArgFunction<C>* clone() const { return new BoundUnaryMethod( *this ); }
+            void (C::*member)( M );
+        };
+        template<typename C>
+        struct BoundNullaryMethod : IArgFunction<C>{
+            BoundNullaryMethod( void (C::*_member)() ) : member( _member ) {}
+            virtual void set( C& p, std::string const& stringValue ) const {
+                bool value;
+                convertInto( stringValue, value );
+                if( value )
+                    (p.*member)();
+            }
+            virtual bool takesArg() const { return false; }
+            virtual IArgFunction<C>* clone() const { return new BoundNullaryMethod( *this ); }
+            void (C::*member)();
+        };
+
+        template<typename C>
+        struct BoundUnaryFunction : IArgFunction<C>{
+            BoundUnaryFunction( void (*_function)( C& ) ) : function( _function ) {}
+            virtual void set( C& obj, std::string const& stringValue ) const {
+                bool value;
+                convertInto( stringValue, value );
+                if( value )
+                    function( obj );
+            }
+            virtual bool takesArg() const { return false; }
+            virtual IArgFunction<C>* clone() const { return new BoundUnaryFunction( *this ); }
+            void (*function)( C& );
+        };
+
+        template<typename C, typename T>
+        struct BoundBinaryFunction : IArgFunction<C>{
+            BoundBinaryFunction( void (*_function)( C&, T ) ) : function( _function ) {}
+            virtual void set( C& obj, std::string const& stringValue ) const {
+                typename RemoveConstRef<T>::type value;
+                convertInto( stringValue, value );
+                function( obj, value );
+            }
+            virtual bool takesArg() const { return !IsBool<T>::value; }
+            virtual IArgFunction<C>* clone() const { return new BoundBinaryFunction( *this ); }
+            void (*function)( C&, T );
+        };
+
+    } // namespace Detail
+
+    inline std::vector<std::string> argsToVector( int argc, char const* const* const argv ) {
+        std::vector<std::string> args( static_cast<std::size_t>( argc ) );
+        for( std::size_t i = 0; i < static_cast<std::size_t>( argc ); ++i )
+            args[i] = argv[i];
+
+        return args;
+    }
+
+    class Parser {
+        enum Mode { None, MaybeShortOpt, SlashOpt, ShortOpt, LongOpt, Positional };
+        Mode mode;
+        std::size_t from;
+        bool inQuotes;
+    public:
+
+        struct Token {
+            enum Type { Positional, ShortOpt, LongOpt };
+            Token( Type _type, std::string const& _data ) : type( _type ), data( _data ) {}
+            Type type;
+            std::string data;
+        };
+
+        Parser() : mode( None ), from( 0 ), inQuotes( false ){}
+
+        void parseIntoTokens( std::vector<std::string> const& args, std::vector<Token>& tokens ) {
+            const std::string doubleDash = "--";
+            for( std::size_t i = 1; i < args.size() && args[i] != doubleDash; ++i )
+                parseIntoTokens( args[i], tokens);
+        }
+
+        void parseIntoTokens( std::string const& arg, std::vector<Token>& tokens ) {
+            for( std::size_t i = 0; i <= arg.size(); ++i ) {
+                char c = arg[i];
+                if( c == '"' )
+                    inQuotes = !inQuotes;
+                mode = handleMode( i, c, arg, tokens );
+            }
+        }
+        Mode handleMode( std::size_t i, char c, std::string const& arg, std::vector<Token>& tokens ) {
+            switch( mode ) {
+                case None: return handleNone( i, c );
+                case MaybeShortOpt: return handleMaybeShortOpt( i, c );
+                case ShortOpt:
+                case LongOpt:
+                case SlashOpt: return handleOpt( i, c, arg, tokens );
+                case Positional: return handlePositional( i, c, arg, tokens );
+                default: throw std::logic_error( "Unknown mode" );
+            }
+        }
+
+        Mode handleNone( std::size_t i, char c ) {
+            if( inQuotes ) {
+                from = i;
+                return Positional;
+            }
+            switch( c ) {
+                case '-': return MaybeShortOpt;
+#ifdef CLARA_PLATFORM_WINDOWS
+                case '/': from = i+1; return SlashOpt;
+#endif
+                default: from = i; return Positional;
+            }
+        }
+        Mode handleMaybeShortOpt( std::size_t i, char c ) {
+            switch( c ) {
+                case '-': from = i+1; return LongOpt;
+                default: from = i; return ShortOpt;
+            }
+        }
+        Mode handleOpt( std::size_t i, char c, std::string const& arg, std::vector<Token>& tokens ) {
+            if( std::string( ":=\0", 3 ).find( c ) == std::string::npos )
+                return mode;
+
+            std::string optName = arg.substr( from, i-from );
+            if( mode == ShortOpt )
+                for( std::size_t j = 0; j < optName.size(); ++j )
+                    tokens.push_back( Token( Token::ShortOpt, optName.substr( j, 1 ) ) );
+            else if( mode == SlashOpt && optName.size() == 1 )
+                tokens.push_back( Token( Token::ShortOpt, optName ) );
+            else
+                tokens.push_back( Token( Token::LongOpt, optName ) );
+            return None;
+        }
+        Mode handlePositional( std::size_t i, char c, std::string const& arg, std::vector<Token>& tokens ) {
+            if( inQuotes || std::string( "\0", 1 ).find( c ) == std::string::npos )
+                return mode;
+
+            std::string data = arg.substr( from, i-from );
+            tokens.push_back( Token( Token::Positional, data ) );
+            return None;
+        }
+    };
+
+    template<typename ConfigT>
+    struct CommonArgProperties {
+        CommonArgProperties() {}
+        CommonArgProperties( Detail::BoundArgFunction<ConfigT> const& _boundField ) : boundField( _boundField ) {}
+
+        Detail::BoundArgFunction<ConfigT> boundField;
+        std::string description;
+        std::string detail;
+        std::string placeholder; // Only value if boundField takes an arg
+
+        bool takesArg() const {
+            return !placeholder.empty();
+        }
+        void validate() const {
+            if( !boundField.isSet() )
+                throw std::logic_error( "option not bound" );
+        }
+    };
+    struct OptionArgProperties {
+        std::vector<std::string> shortNames;
+        std::string longName;
+
+        bool hasShortName( std::string const& shortName ) const {
+            return std::find( shortNames.begin(), shortNames.end(), shortName ) != shortNames.end();
+        }
+        bool hasLongName( std::string const& _longName ) const {
+            return _longName == longName;
+        }
+    };
+    struct PositionalArgProperties {
+        PositionalArgProperties() : position( -1 ) {}
+        int position; // -1 means non-positional (floating)
+
+        bool isFixedPositional() const {
+            return position != -1;
+        }
+    };
+
+    template<typename ConfigT>
+    class CommandLine {
+
+        struct Arg : CommonArgProperties<ConfigT>, OptionArgProperties, PositionalArgProperties {
+            Arg() {}
+            Arg( Detail::BoundArgFunction<ConfigT> const& _boundField ) : CommonArgProperties<ConfigT>( _boundField ) {}
+
+            using CommonArgProperties<ConfigT>::placeholder; // !TBD
+
+            std::string dbgName() const {
+                if( !longName.empty() )
+                    return "--" + longName;
+                if( !shortNames.empty() )
+                    return "-" + shortNames[0];
+                return "positional args";
+            }
+            std::string commands() const {
+                std::ostringstream oss;
+                bool first = true;
+                std::vector<std::string>::const_iterator it = shortNames.begin(), itEnd = shortNames.end();
+                for(; it != itEnd; ++it ) {
+                    if( first )
+                        first = false;
+                    else
+                        oss << ", ";
+                    oss << "-" << *it;
+                }
+                if( !longName.empty() ) {
+                    if( !first )
+                        oss << ", ";
+                    oss << "--" << longName;
+                }
+                if( !placeholder.empty() )
+                    oss << " <" << placeholder << ">";
+                return oss.str();
+            }
+        };
+
+        typedef CLARA_AUTO_PTR( Arg ) ArgAutoPtr;
+
+        friend void addOptName( Arg& arg, std::string const& optName )
+        {
+            if( optName.empty() )
+                return;
+            if( Detail::startsWith( optName, "--" ) ) {
+                if( !arg.longName.empty() )
+                    throw std::logic_error( "Only one long opt may be specified. '"
+                        + arg.longName
+                        + "' already specified, now attempting to add '"
+                        + optName + "'" );
+                arg.longName = optName.substr( 2 );
+            }
+            else if( Detail::startsWith( optName, "-" ) )
+                arg.shortNames.push_back( optName.substr( 1 ) );
+            else
+                throw std::logic_error( "option must begin with - or --. Option was: '" + optName + "'" );
+        }
+        friend void setPositionalArg( Arg& arg, int position )
+        {
+            arg.position = position;
+        }
+
+        class ArgBuilder {
+        public:
+            ArgBuilder( Arg* arg ) : m_arg( arg ) {}
+
+            // Bind a non-boolean data member (requires placeholder string)
+            template<typename C, typename M>
+            void bind( M C::* field, std::string const& placeholder ) {
+                m_arg->boundField = new Detail::BoundDataMember<C,M>( field );
+                m_arg->placeholder = placeholder;
+            }
+            // Bind a boolean data member (no placeholder required)
+            template<typename C>
+            void bind( bool C::* field ) {
+                m_arg->boundField = new Detail::BoundDataMember<C,bool>( field );
+            }
+
+            // Bind a method taking a single, non-boolean argument (requires a placeholder string)
+            template<typename C, typename M>
+            void bind( void (C::* unaryMethod)( M ), std::string const& placeholder ) {
+                m_arg->boundField = new Detail::BoundUnaryMethod<C,M>( unaryMethod );
+                m_arg->placeholder = placeholder;
+            }
+
+            // Bind a method taking a single, boolean argument (no placeholder string required)
+            template<typename C>
+            void bind( void (C::* unaryMethod)( bool ) ) {
+                m_arg->boundField = new Detail::BoundUnaryMethod<C,bool>( unaryMethod );
+            }
+
+            // Bind a method that takes no arguments (will be called if opt is present)
+            template<typename C>
+            void bind( void (C::* nullaryMethod)() ) {
+                m_arg->boundField = new Detail::BoundNullaryMethod<C>( nullaryMethod );
+            }
+
+            // Bind a free function taking a single argument - the object to operate on (no placeholder string required)
+            template<typename C>
+            void bind( void (* unaryFunction)( C& ) ) {
+                m_arg->boundField = new Detail::BoundUnaryFunction<C>( unaryFunction );
+            }
+
+            // Bind a free function taking a single argument - the object to operate on (requires a placeholder string)
+            template<typename C, typename T>
+            void bind( void (* binaryFunction)( C&, T ), std::string const& placeholder ) {
+                m_arg->boundField = new Detail::BoundBinaryFunction<C, T>( binaryFunction );
+                m_arg->placeholder = placeholder;
+            }
+
+            ArgBuilder& describe( std::string const& description ) {
+                m_arg->description = description;
+                return *this;
+            }
+            ArgBuilder& detail( std::string const& detail ) {
+                m_arg->detail = detail;
+                return *this;
+            }
+
+        protected:
+            Arg* m_arg;
+        };
+
+        class OptBuilder : public ArgBuilder {
+        public:
+            OptBuilder( Arg* arg ) : ArgBuilder( arg ) {}
+            OptBuilder( OptBuilder& other ) : ArgBuilder( other ) {}
+
+            OptBuilder& operator[]( std::string const& optName ) {
+                addOptName( *ArgBuilder::m_arg, optName );
+                return *this;
+            }
+        };
+
+    public:
+
+        CommandLine()
+        :   m_boundProcessName( new Detail::NullBinder<ConfigT>() ),
+            m_highestSpecifiedArgPosition( 0 ),
+            m_throwOnUnrecognisedTokens( false )
+        {}
+        CommandLine( CommandLine const& other )
+        :   m_boundProcessName( other.m_boundProcessName ),
+            m_options ( other.m_options ),
+            m_positionalArgs( other.m_positionalArgs ),
+            m_highestSpecifiedArgPosition( other.m_highestSpecifiedArgPosition ),
+            m_throwOnUnrecognisedTokens( other.m_throwOnUnrecognisedTokens )
+        {
+            if( other.m_floatingArg.get() )
+                m_floatingArg.reset( new Arg( *other.m_floatingArg ) );
+        }
+
+        CommandLine& setThrowOnUnrecognisedTokens( bool shouldThrow = true ) {
+            m_throwOnUnrecognisedTokens = shouldThrow;
+            return *this;
+        }
+
+        OptBuilder operator[]( std::string const& optName ) {
+            m_options.push_back( Arg() );
+            addOptName( m_options.back(), optName );
+            OptBuilder builder( &m_options.back() );
+            return builder;
+        }
+
+        ArgBuilder operator[]( int position ) {
+            m_positionalArgs.insert( std::make_pair( position, Arg() ) );
+            if( position > m_highestSpecifiedArgPosition )
+                m_highestSpecifiedArgPosition = position;
+            setPositionalArg( m_positionalArgs[position], position );
+            ArgBuilder builder( &m_positionalArgs[position] );
+            return builder;
+        }
+
+        // Invoke this with the _ instance
+        ArgBuilder operator[]( UnpositionalTag ) {
+            if( m_floatingArg.get() )
+                throw std::logic_error( "Only one unpositional argument can be added" );
+            m_floatingArg.reset( new Arg() );
+            ArgBuilder builder( m_floatingArg.get() );
+            return builder;
+        }
+
+        template<typename C, typename M>
+        void bindProcessName( M C::* field ) {
+            m_boundProcessName = new Detail::BoundDataMember<C,M>( field );
+        }
+        template<typename C, typename M>
+        void bindProcessName( void (C::*_unaryMethod)( M ) ) {
+            m_boundProcessName = new Detail::BoundUnaryMethod<C,M>( _unaryMethod );
+        }
+
+        void optUsage( std::ostream& os, std::size_t indent = 0, std::size_t width = Detail::consoleWidth ) const {
+            typename std::vector<Arg>::const_iterator itBegin = m_options.begin(), itEnd = m_options.end(), it;
+            std::size_t maxWidth = 0;
+            for( it = itBegin; it != itEnd; ++it )
+                maxWidth = (std::max)( maxWidth, it->commands().size() );
+
+            for( it = itBegin; it != itEnd; ++it ) {
+                Detail::Text usage( it->commands(), Detail::TextAttributes()
+                                                        .setWidth( maxWidth+indent )
+                                                        .setIndent( indent ) );
+                Detail::Text desc( it->description, Detail::TextAttributes()
+                                                        .setWidth( width - maxWidth - 3 ) );
+
+                for( std::size_t i = 0; i < (std::max)( usage.size(), desc.size() ); ++i ) {
+                    std::string usageCol = i < usage.size() ? usage[i] : "";
+                    os << usageCol;
+
+                    if( i < desc.size() && !desc[i].empty() )
+                        os  << std::string( indent + 2 + maxWidth - usageCol.size(), ' ' )
+                            << desc[i];
+                    os << "\n";
+                }
+            }
+        }
+        std::string optUsage() const {
+            std::ostringstream oss;
+            optUsage( oss );
+            return oss.str();
+        }
+
+        void argSynopsis( std::ostream& os ) const {
+            for( int i = 1; i <= m_highestSpecifiedArgPosition; ++i ) {
+                if( i > 1 )
+                    os << " ";
+                typename std::map<int, Arg>::const_iterator it = m_positionalArgs.find( i );
+                if( it != m_positionalArgs.end() )
+                    os << "<" << it->second.placeholder << ">";
+                else if( m_floatingArg.get() )
+                    os << "<" << m_floatingArg->placeholder << ">";
+                else
+                    throw std::logic_error( "non consecutive positional arguments with no floating args" );
+            }
+            // !TBD No indication of mandatory args
+            if( m_floatingArg.get() ) {
+                if( m_highestSpecifiedArgPosition > 1 )
+                    os << " ";
+                os << "[<" << m_floatingArg->placeholder << "> ...]";
+            }
+        }
+        std::string argSynopsis() const {
+            std::ostringstream oss;
+            argSynopsis( oss );
+            return oss.str();
+        }
+
+        void usage( std::ostream& os, std::string const& procName ) const {
+            validate();
+            os << "usage:\n  " << procName << " ";
+            argSynopsis( os );
+            if( !m_options.empty() ) {
+                os << " [options]\n\nwhere options are: \n";
+                optUsage( os, 2 );
+            }
+            os << "\n";
+        }
+        std::string usage( std::string const& procName ) const {
+            std::ostringstream oss;
+            usage( oss, procName );
+            return oss.str();
+        }
+
+        ConfigT parse( std::vector<std::string> const& args ) const {
+            ConfigT config;
+            parseInto( args, config );
+            return config;
+        }
+
+        std::vector<Parser::Token> parseInto( std::vector<std::string> const& args, ConfigT& config ) const {
+            std::string processName = args[0];
+            std::size_t lastSlash = processName.find_last_of( "/\\" );
+            if( lastSlash != std::string::npos )
+                processName = processName.substr( lastSlash+1 );
+            m_boundProcessName.set( config, processName );
+            std::vector<Parser::Token> tokens;
+            Parser parser;
+            parser.parseIntoTokens( args, tokens );
+            return populate( tokens, config );
+        }
+
+        std::vector<Parser::Token> populate( std::vector<Parser::Token> const& tokens, ConfigT& config ) const {
+            validate();
+            std::vector<Parser::Token> unusedTokens = populateOptions( tokens, config );
+            unusedTokens = populateFixedArgs( unusedTokens, config );
+            unusedTokens = populateFloatingArgs( unusedTokens, config );
+            return unusedTokens;
+        }
+
+        std::vector<Parser::Token> populateOptions( std::vector<Parser::Token> const& tokens, ConfigT& config ) const {
+            std::vector<Parser::Token> unusedTokens;
+            std::vector<std::string> errors;
+            for( std::size_t i = 0; i < tokens.size(); ++i ) {
+                Parser::Token const& token = tokens[i];
+                typename std::vector<Arg>::const_iterator it = m_options.begin(), itEnd = m_options.end();
+                for(; it != itEnd; ++it ) {
+                    Arg const& arg = *it;
+
+                    try {
+                        if( ( token.type == Parser::Token::ShortOpt && arg.hasShortName( token.data ) ) ||
+                            ( token.type == Parser::Token::LongOpt && arg.hasLongName( token.data ) ) ) {
+                            if( arg.takesArg() ) {
+                                if( i == tokens.size()-1 || tokens[i+1].type != Parser::Token::Positional )
+                                    errors.push_back( "Expected argument to option: " + token.data );
+                                else
+                                    arg.boundField.set( config, tokens[++i].data );
+                            }
+                            else {
+                                arg.boundField.set( config, "true" );
+                            }
+                            break;
+                        }
+                    }
+                    catch( std::exception& ex ) {
+                        errors.push_back( std::string( ex.what() ) + "\n- while parsing: (" + arg.commands() + ")" );
+                    }
+                }
+                if( it == itEnd ) {
+                    if( token.type == Parser::Token::Positional || !m_throwOnUnrecognisedTokens )
+                        unusedTokens.push_back( token );
+                    else if( errors.empty() && m_throwOnUnrecognisedTokens )
+                        errors.push_back( "unrecognised option: " + token.data );
+                }
+            }
+            if( !errors.empty() ) {
+                std::ostringstream oss;
+                for( std::vector<std::string>::const_iterator it = errors.begin(), itEnd = errors.end();
+                        it != itEnd;
+                        ++it ) {
+                    if( it != errors.begin() )
+                        oss << "\n";
+                    oss << *it;
+                }
+                throw std::runtime_error( oss.str() );
+            }
+            return unusedTokens;
+        }
+        std::vector<Parser::Token> populateFixedArgs( std::vector<Parser::Token> const& tokens, ConfigT& config ) const {
+            std::vector<Parser::Token> unusedTokens;
+            int position = 1;
+            for( std::size_t i = 0; i < tokens.size(); ++i ) {
+                Parser::Token const& token = tokens[i];
+                typename std::map<int, Arg>::const_iterator it = m_positionalArgs.find( position );
+                if( it != m_positionalArgs.end() )
+                    it->second.boundField.set( config, token.data );
+                else
+                    unusedTokens.push_back( token );
+                if( token.type == Parser::Token::Positional )
+                    position++;
+            }
+            return unusedTokens;
+        }
+        std::vector<Parser::Token> populateFloatingArgs( std::vector<Parser::Token> const& tokens, ConfigT& config ) const {
+            if( !m_floatingArg.get() )
+                return tokens;
+            std::vector<Parser::Token> unusedTokens;
+            for( std::size_t i = 0; i < tokens.size(); ++i ) {
+                Parser::Token const& token = tokens[i];
+                if( token.type == Parser::Token::Positional )
+                    m_floatingArg->boundField.set( config, token.data );
+                else
+                    unusedTokens.push_back( token );
+            }
+            return unusedTokens;
+        }
+
+        void validate() const
+        {
+            if( m_options.empty() && m_positionalArgs.empty() && !m_floatingArg.get() )
+                throw std::logic_error( "No options or arguments specified" );
+
+            for( typename std::vector<Arg>::const_iterator  it = m_options.begin(),
+                                                            itEnd = m_options.end();
+                    it != itEnd; ++it )
+                it->validate();
         }
 
     private:
-        Ptr<IReporter> m_reporter;
-        std::string m_filename;
-        std::string m_message;
-        List::What m_listSpec;
-        std::vector<std::string> m_testSpecs;
-        bool m_shouldDebugBreak;
-        bool m_showHelp;
-        std::streambuf* m_streambuf;
-        mutable std::ostream m_os;
-        Include::WhichResults m_includeWhichResults;
-        std::string m_name;
-        int m_cutoff;
-        bool m_allowThrows;
-    };
-
-    struct NewConfig {
-        std::string reporter;
-        std::string outputFilename;
-        List::What listSpec;
-        std::vector<std::string> testSpecs;
-        bool shouldDebugBreak;
-        bool showHelp;
-        Include::WhichResults includeWhichResults;
-        std::string name;
-    };
+        Detail::BoundArgFunction<ConfigT> m_boundProcessName;
+        std::vector<Arg> m_options;
+        std::map<int, Arg> m_positionalArgs;
+        ArgAutoPtr m_floatingArg;
+        int m_highestSpecifiedArgPosition;
+        bool m_throwOnUnrecognisedTokens;
+    };
+
+} // end namespace Clara
+
+STITCH_CLARA_CLOSE_NAMESPACE
+#undef STITCH_CLARA_OPEN_NAMESPACE
+#undef STITCH_CLARA_CLOSE_NAMESPACE
+
+#endif // TWOBLUECUBES_CLARA_H_INCLUDED
+#undef STITCH_CLARA_OPEN_NAMESPACE
+
+// Restore Clara's value for console width, if present
+#ifdef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH
+#define CLARA_CONFIG_CONSOLE_WIDTH CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH
+#undef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH
+#endif
+
+#include <fstream>
+
+namespace Catch {
+
+    inline void abortAfterFirst( ConfigData& config ) { config.abortAfter = 1; }
+    inline void abortAfterX( ConfigData& config, int x ) {
+        if( x < 1 )
+            throw std::runtime_error( "Value after -x or --abortAfter must be greater than zero" );
+        config.abortAfter = x;
+    }
+    inline void addTestOrTags( ConfigData& config, std::string const& _testSpec ) { config.testsOrTags.push_back( _testSpec ); }
+    inline void addReporterName( ConfigData& config, std::string const& _reporterName ) { config.reporterNames.push_back( _reporterName ); }
+
+    inline void addWarning( ConfigData& config, std::string const& _warning ) {
+        if( _warning == "NoAssertions" )
+            config.warnings = static_cast<WarnAbout::What>( config.warnings | WarnAbout::NoAssertions );
+        else
+            throw std::runtime_error( "Unrecognised warning: '" + _warning + "'" );
+    }
+    inline void setOrder( ConfigData& config, std::string const& order ) {
+        if( startsWith( "declared", order ) )
+            config.runOrder = RunTests::InDeclarationOrder;
+        else if( startsWith( "lexical", order ) )
+            config.runOrder = RunTests::InLexicographicalOrder;
+        else if( startsWith( "random", order ) )
+            config.runOrder = RunTests::InRandomOrder;
+        else
+            throw std::runtime_error( "Unrecognised ordering: '" + order + "'" );
+    }
+    inline void setRngSeed( ConfigData& config, std::string const& seed ) {
+        if( seed == "time" ) {
+            config.rngSeed = static_cast<unsigned int>( std::time(0) );
+        }
+        else {
+            std::stringstream ss;
+            ss << seed;
+            ss >> config.rngSeed;
+            if( ss.fail() )
+                throw std::runtime_error( "Argment to --rng-seed should be the word 'time' or a number" );
+        }
+    }
+    inline void setVerbosity( ConfigData& config, int level ) {
+        // !TBD: accept strings?
+        config.verbosity = static_cast<Verbosity::Level>( level );
+    }
+    inline void setShowDurations( ConfigData& config, bool _showDurations ) {
+        config.showDurations = _showDurations
+            ? ShowDurations::Always
+            : ShowDurations::Never;
+    }
+    inline void setUseColour( ConfigData& config, std::string const& value ) {
+        std::string mode = toLower( value );
+
+        if( mode == "yes" )
+            config.useColour = UseColour::Yes;
+        else if( mode == "no" )
+            config.useColour = UseColour::No;
+        else if( mode == "auto" )
+            config.useColour = UseColour::Auto;
+        else
+            throw std::runtime_error( "colour mode must be one of: auto, yes or no" );
+    }
+    inline void forceColour( ConfigData& config ) {
+        config.useColour = UseColour::Yes;
+    }
+    inline void loadTestNamesFromFile( ConfigData& config, std::string const& _filename ) {
+        std::ifstream f( _filename.c_str() );
+        if( !f.is_open() )
+            throw std::domain_error( "Unable to load input file: " + _filename );
+
+        std::string line;
+        while( std::getline( f, line ) ) {
+            line = trim(line);
+            if( !line.empty() && !startsWith( line, "#" ) )
+                addTestOrTags( config, "\"" + line + "\"," );
+        }
+    }
+
+    inline Clara::CommandLine<ConfigData> makeCommandLineParser() {
+
+        using namespace Clara;
+        CommandLine<ConfigData> cli;
+
+        cli.bindProcessName( &ConfigData::processName );
+
+        cli["-?"]["-h"]["--help"]
+            .describe( "display usage information" )
+            .bind( &ConfigData::showHelp );
+
+        cli["-l"]["--list-tests"]
+            .describe( "list all/matching test cases" )
+            .bind( &ConfigData::listTests );
+
+        cli["-t"]["--list-tags"]
+            .describe( "list all/matching tags" )
+            .bind( &ConfigData::listTags );
+
+        cli["-s"]["--success"]
+            .describe( "include successful tests in output" )
+            .bind( &ConfigData::showSuccessfulTests );
+
+        cli["-b"]["--break"]
+            .describe( "break into debugger on failure" )
+            .bind( &ConfigData::shouldDebugBreak );
+
+        cli["-e"]["--nothrow"]
+            .describe( "skip exception tests" )
+            .bind( &ConfigData::noThrow );
+
+        cli["-i"]["--invisibles"]
+            .describe( "show invisibles (tabs, newlines)" )
+            .bind( &ConfigData::showInvisibles );
+
+        cli["-o"]["--out"]
+            .describe( "output filename" )
+            .bind( &ConfigData::outputFilename, "filename" );
+
+        cli["-r"]["--reporter"]
+//            .placeholder( "name[:filename]" )
+            .describe( "reporter to use (defaults to console)" )
+            .bind( &addReporterName, "name" );
+
+        cli["-n"]["--name"]
+            .describe( "suite name" )
+            .bind( &ConfigData::name, "name" );
+
+        cli["-a"]["--abort"]
+            .describe( "abort at first failure" )
+            .bind( &abortAfterFirst );
+
+        cli["-x"]["--abortx"]
+            .describe( "abort after x failures" )
+            .bind( &abortAfterX, "no. failures" );
+
+        cli["-w"]["--warn"]
+            .describe( "enable warnings" )
+            .bind( &addWarning, "warning name" );
+
+// - needs updating if reinstated
+//        cli.into( &setVerbosity )
+//            .describe( "level of verbosity (0=no output)" )
+//            .shortOpt( "v")
+//            .longOpt( "verbosity" )
+//            .placeholder( "level" );
+
+        cli[_]
+            .describe( "which test or tests to use" )
+            .bind( &addTestOrTags, "test name, pattern or tags" );
+
+        cli["-d"]["--durations"]
+            .describe( "show test durations" )
+            .bind( &setShowDurations, "yes|no" );
+
+        cli["-f"]["--input-file"]
+            .describe( "load test names to run from a file" )
+            .bind( &loadTestNamesFromFile, "filename" );
+
+        cli["-#"]["--filenames-as-tags"]
+            .describe( "adds a tag for the filename" )
+            .bind( &ConfigData::filenamesAsTags );
+
+        // Less common commands which don't have a short form
+        cli["--list-test-names-only"]
+            .describe( "list all/matching test cases names only" )
+            .bind( &ConfigData::listTestNamesOnly );
+
+        cli["--list-reporters"]
+            .describe( "list all reporters" )
+            .bind( &ConfigData::listReporters );
+
+        cli["--order"]
+            .describe( "test case order (defaults to decl)" )
+            .bind( &setOrder, "decl|lex|rand" );
+
+        cli["--rng-seed"]
+            .describe( "set a specific seed for random numbers" )
+            .bind( &setRngSeed, "'time'|number" );
+
+        cli["--force-colour"]
+            .describe( "force colourised output (deprecated)" )
+            .bind( &forceColour );
+
+        cli["--use-colour"]
+            .describe( "should output be colourised" )
+            .bind( &setUseColour, "yes|no" );
+
+        return cli;
+    }
 
 } // end namespace Catch
 
-// #included from: catch_running_test.hpp
-
-// #included from: catch_section_info.hpp
+// #included from: internal/catch_list.hpp
+#define TWOBLUECUBES_CATCH_LIST_HPP_INCLUDED
+
+// #included from: catch_text.h
+#define TWOBLUECUBES_CATCH_TEXT_H_INCLUDED
+
+#define TBC_TEXT_FORMAT_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH
+
+#define CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE Catch
+// #included from: ../external/tbc_text_format.h
+// Only use header guard if we are not using an outer namespace
+#ifndef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+# ifdef TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED
+#  ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED
+#   define TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED
+#  endif
+# else
+#  define TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED
+# endif
+#endif
+#ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED
+#include <string>
+#include <vector>
+#include <sstream>
+
+// Use optional outer namespace
+#ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+namespace CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE {
+#endif
+
+namespace Tbc {
+
+#ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH
+    const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH;
+#else
+    const unsigned int consoleWidth = 80;
+#endif
+
+    struct TextAttributes {
+        TextAttributes()
+        :   initialIndent( std::string::npos ),
+            indent( 0 ),
+            width( consoleWidth-1 ),
+            tabChar( '\t' )
+        {}
+
+        TextAttributes& setInitialIndent( std::size_t _value )  { initialIndent = _value; return *this; }
+        TextAttributes& setIndent( std::size_t _value )         { indent = _value; return *this; }
+        TextAttributes& setWidth( std::size_t _value )          { width = _value; return *this; }
+        TextAttributes& setTabChar( char _value )               { tabChar = _value; return *this; }
+
+        std::size_t initialIndent;  // indent of first line, or npos
+        std::size_t indent;         // indent of subsequent lines, or all if initialIndent is npos
+        std::size_t width;          // maximum width of text, including indent. Longer text will wrap
+        char tabChar;               // If this char is seen the indent is changed to current pos
+    };
+
+    class Text {
+    public:
+        Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() )
+        : attr( _attr )
+        {
+            std::string wrappableChars = " [({.,/|\\-";
+            std::size_t indent = _attr.initialIndent != std::string::npos
+                ? _attr.initialIndent
+                : _attr.indent;
+            std::string remainder = _str;
+
+            while( !remainder.empty() ) {
+                if( lines.size() >= 1000 ) {
+                    lines.push_back( "... message truncated due to excessive size" );
+                    return;
+                }
+                std::size_t tabPos = std::string::npos;
+                std::size_t width = (std::min)( remainder.size(), _attr.width - indent );
+                std::size_t pos = remainder.find_first_of( '\n' );
+                if( pos <= width ) {
+                    width = pos;
+                }
+                pos = remainder.find_last_of( _attr.tabChar, width );
+                if( pos != std::string::npos ) {
+                    tabPos = pos;
+                    if( remainder[width] == '\n' )
+                        width--;
+                    remainder = remainder.substr( 0, tabPos ) + remainder.substr( tabPos+1 );
+                }
+
+                if( width == remainder.size() ) {
+                    spliceLine( indent, remainder, width );
+                }
+                else if( remainder[width] == '\n' ) {
+                    spliceLine( indent, remainder, width );
+                    if( width <= 1 || remainder.size() != 1 )
+                        remainder = remainder.substr( 1 );
+                    indent = _attr.indent;
+                }
+                else {
+                    pos = remainder.find_last_of( wrappableChars, width );
+                    if( pos != std::string::npos && pos > 0 ) {
+                        spliceLine( indent, remainder, pos );
+                        if( remainder[0] == ' ' )
+                            remainder = remainder.substr( 1 );
+                    }
+                    else {
+                        spliceLine( indent, remainder, width-1 );
+                        lines.back() += "-";
+                    }
+                    if( lines.size() == 1 )
+                        indent = _attr.indent;
+                    if( tabPos != std::string::npos )
+                        indent += tabPos;
+                }
+            }
+        }
+
+        void spliceLine( std::size_t _indent, std::string& _remainder, std::size_t _pos ) {
+            lines.push_back( std::string( _indent, ' ' ) + _remainder.substr( 0, _pos ) );
+            _remainder = _remainder.substr( _pos );
+        }
+
+        typedef std::vector<std::string>::const_iterator const_iterator;
+
+        const_iterator begin() const { return lines.begin(); }
+        const_iterator end() const { return lines.end(); }
+        std::string const& last() const { return lines.back(); }
+        std::size_t size() const { return lines.size(); }
+        std::string const& operator[]( std::size_t _index ) const { return lines[_index]; }
+        std::string toString() const {
+            std::ostringstream oss;
+            oss << *this;
+            return oss.str();
+        }
+
+        inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) {
+            for( Text::const_iterator it = _text.begin(), itEnd = _text.end();
+                it != itEnd; ++it ) {
+                if( it != _text.begin() )
+                    _stream << "\n";
+                _stream << *it;
+            }
+            return _stream;
+        }
+
+    private:
+        std::string str;
+        TextAttributes attr;
+        std::vector<std::string> lines;
+    };
+
+} // end namespace Tbc
+
+#ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+} // end outer namespace
+#endif
+
+#endif // TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED
+#undef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE
+
+namespace Catch {
+    using Tbc::Text;
+    using Tbc::TextAttributes;
+}
+
+// #included from: catch_console_colour.hpp
+#define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_HPP_INCLUDED
+
+namespace Catch {
+
+    struct Colour {
+        enum Code {
+            None = 0,
+
+            White,
+            Red,
+            Green,
+            Blue,
+            Cyan,
+            Yellow,
+            Grey,
+
+            Bright = 0x10,
+
+            BrightRed = Bright | Red,
+            BrightGreen = Bright | Green,
+            LightGrey = Bright | Grey,
+            BrightWhite = Bright | White,
+
+            // By intention
+            FileName = LightGrey,
+            Warning = Yellow,
+            ResultError = BrightRed,
+            ResultSuccess = BrightGreen,
+            ResultExpectedFailure = Warning,
+
+            Error = BrightRed,
+            Success = Green,
+
+            OriginalExpression = Cyan,
+            ReconstructedExpression = Yellow,
+
+            SecondaryText = LightGrey,
+            Headers = White
+        };
+
+        // Use constructed object for RAII guard
+        Colour( Code _colourCode );
+        Colour( Colour const& other );
+        ~Colour();
+
+        // Use static method for one-shot changes
+        static void use( Code _colourCode );
+
+    private:
+        bool m_moved;
+    };
+
+    inline std::ostream& operator << ( std::ostream& os, Colour const& ) { return os; }
+
+} // end namespace Catch
+
+// #included from: catch_interfaces_reporter.h
+#define TWOBLUECUBES_CATCH_INTERFACES_REPORTER_H_INCLUDED
+
+#include <string>
+#include <ostream>
+#include <map>
+#include <assert.h>
+
+namespace Catch
+{
+    struct ReporterConfig {
+        explicit ReporterConfig( Ptr<IConfig const> const& _fullConfig )
+        :   m_stream( &_fullConfig->stream() ), m_fullConfig( _fullConfig ) {}
+
+        ReporterConfig( Ptr<IConfig const> const& _fullConfig, std::ostream& _stream )
+        :   m_stream( &_stream ), m_fullConfig( _fullConfig ) {}
+
+        std::ostream& stream() const    { return *m_stream; }
+        Ptr<IConfig const> fullConfig() const { return m_fullConfig; }
+
+    private:
+        std::ostream* m_stream;
+        Ptr<IConfig const> m_fullConfig;
+    };
+
+    struct ReporterPreferences {
+        ReporterPreferences()
+        : shouldRedirectStdOut( false )
+        {}
+
+        bool shouldRedirectStdOut;
+    };
+
+    template<typename T>
+    struct LazyStat : Option<T> {
+        LazyStat() : used( false ) {}
+        LazyStat& operator=( T const& _value ) {
+            Option<T>::operator=( _value );
+            used = false;
+            return *this;
+        }
+        void reset() {
+            Option<T>::reset();
+            used = false;
+        }
+        bool used;
+    };
+
+    struct TestRunInfo {
+        TestRunInfo( std::string const& _name ) : name( _name ) {}
+        std::string name;
+    };
+    struct GroupInfo {
+        GroupInfo(  std::string const& _name,
+                    std::size_t _groupIndex,
+                    std::size_t _groupsCount )
+        :   name( _name ),
+            groupIndex( _groupIndex ),
+            groupsCounts( _groupsCount )
+        {}
+
+        std::string name;
+        std::size_t groupIndex;
+        std::size_t groupsCounts;
+    };
+
+    struct AssertionStats {
+        AssertionStats( AssertionResult const& _assertionResult,
+                        std::vector<MessageInfo> const& _infoMessages,
+                        Totals const& _totals )
+        :   assertionResult( _assertionResult ),
+            infoMessages( _infoMessages ),
+            totals( _totals )
+        {
+            if( assertionResult.hasMessage() ) {
+                // Copy message into messages list.
+                // !TBD This should have been done earlier, somewhere
+                MessageBuilder builder( assertionResult.getTestMacroName(), assertionResult.getSourceInfo(), assertionResult.getResultType() );
+                builder << assertionResult.getMessage();
+                builder.m_info.message = builder.m_stream.str();
+
+                infoMessages.push_back( builder.m_info );
+            }
+        }
+        virtual ~AssertionStats();
+
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        AssertionStats( AssertionStats const& )              = default;
+        AssertionStats( AssertionStats && )                  = default;
+        AssertionStats& operator = ( AssertionStats const& ) = default;
+        AssertionStats& operator = ( AssertionStats && )     = default;
+#  endif
+
+        AssertionResult assertionResult;
+        std::vector<MessageInfo> infoMessages;
+        Totals totals;
+    };
+
+    struct SectionStats {
+        SectionStats(   SectionInfo const& _sectionInfo,
+                        Counts const& _assertions,
+                        double _durationInSeconds,
+                        bool _missingAssertions )
+        :   sectionInfo( _sectionInfo ),
+            assertions( _assertions ),
+            durationInSeconds( _durationInSeconds ),
+            missingAssertions( _missingAssertions )
+        {}
+        virtual ~SectionStats();
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        SectionStats( SectionStats const& )              = default;
+        SectionStats( SectionStats && )                  = default;
+        SectionStats& operator = ( SectionStats const& ) = default;
+        SectionStats& operator = ( SectionStats && )     = default;
+#  endif
+
+        SectionInfo sectionInfo;
+        Counts assertions;
+        double durationInSeconds;
+        bool missingAssertions;
+    };
+
+    struct TestCaseStats {
+        TestCaseStats(  TestCaseInfo const& _testInfo,
+                        Totals const& _totals,
+                        std::string const& _stdOut,
+                        std::string const& _stdErr,
+                        bool _aborting )
+        : testInfo( _testInfo ),
+            totals( _totals ),
+            stdOut( _stdOut ),
+            stdErr( _stdErr ),
+            aborting( _aborting )
+        {}
+        virtual ~TestCaseStats();
+
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        TestCaseStats( TestCaseStats const& )              = default;
+        TestCaseStats( TestCaseStats && )                  = default;
+        TestCaseStats& operator = ( TestCaseStats const& ) = default;
+        TestCaseStats& operator = ( TestCaseStats && )     = default;
+#  endif
+
+        TestCaseInfo testInfo;
+        Totals totals;
+        std::string stdOut;
+        std::string stdErr;
+        bool aborting;
+    };
+
+    struct TestGroupStats {
+        TestGroupStats( GroupInfo const& _groupInfo,
+                        Totals const& _totals,
+                        bool _aborting )
+        :   groupInfo( _groupInfo ),
+            totals( _totals ),
+            aborting( _aborting )
+        {}
+        TestGroupStats( GroupInfo const& _groupInfo )
+        :   groupInfo( _groupInfo ),
+            aborting( false )
+        {}
+        virtual ~TestGroupStats();
+
+#  ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        TestGroupStats( TestGroupStats const& )              = default;
+        TestGroupStats( TestGroupStats && )                  = default;
+        TestGroupStats& operator = ( TestGroupStats const& ) = default;
+        TestGroupStats& operator = ( TestGroupStats && )     = default;
+#  endif
+
+        GroupInfo groupInfo;
+        Totals totals;
+        bool aborting;
+    };
+
+    struct TestRunStats {
+        TestRunStats(   TestRunInfo const& _runInfo,
+                        Totals const& _totals,
+                        bool _aborting )
+        :   runInfo( _runInfo ),
+            totals( _totals ),
+            aborting( _aborting )
+        {}
+        virtual ~TestRunStats();
+
+#  ifndef CATCH_CONFIG_CPP11_GENERATED_METHODS
+        TestRunStats( TestRunStats const& _other )
+        :   runInfo( _other.runInfo ),
+            totals( _other.totals ),
+            aborting( _other.aborting )
+        {}
+#  else
+        TestRunStats( TestRunStats const& )              = default;
+        TestRunStats( TestRunStats && )                  = default;
+        TestRunStats& operator = ( TestRunStats const& ) = default;
+        TestRunStats& operator = ( TestRunStats && )     = default;
+#  endif
+
+        TestRunInfo runInfo;
+        Totals totals;
+        bool aborting;
+    };
+
+    class MultipleReporters;
+
+    struct IStreamingReporter : IShared {
+        virtual ~IStreamingReporter();
+
+        // Implementing class must also provide the following static method:
+        // static std::string getDescription();
+
+        virtual ReporterPreferences getPreferences() const = 0;
+
+        virtual void noMatchingTestCases( std::string const& spec ) = 0;
+
+        virtual void testRunStarting( TestRunInfo const& testRunInfo ) = 0;
+        virtual void testGroupStarting( GroupInfo const& groupInfo ) = 0;
+
+        virtual void testCaseStarting( TestCaseInfo const& testInfo ) = 0;
+        virtual void sectionStarting( SectionInfo const& sectionInfo ) = 0;
+
+        virtual void assertionStarting( AssertionInfo const& assertionInfo ) = 0;
+
+        // The return value indicates if the messages buffer should be cleared:
+        virtual bool assertionEnded( AssertionStats const& assertionStats ) = 0;
+
+        virtual void sectionEnded( SectionStats const& sectionStats ) = 0;
+        virtual void testCaseEnded( TestCaseStats const& testCaseStats ) = 0;
+        virtual void testGroupEnded( TestGroupStats const& testGroupStats ) = 0;
+        virtual void testRunEnded( TestRunStats const& testRunStats ) = 0;
+
+        virtual void skipTest( TestCaseInfo const& testInfo ) = 0;
+
+        virtual MultipleReporters* tryAsMulti() { return CATCH_NULL; }
+    };
+
+    struct IReporterFactory : IShared {
+        virtual ~IReporterFactory();
+        virtual IStreamingReporter* create( ReporterConfig const& config ) const = 0;
+        virtual std::string getDescription() const = 0;
+    };
+
+    struct IReporterRegistry {
+        typedef std::map<std::string, Ptr<IReporterFactory> > FactoryMap;
+        typedef std::vector<Ptr<IReporterFactory> > Listeners;
+
+        virtual ~IReporterRegistry();
+        virtual IStreamingReporter* create( std::string const& name, Ptr<IConfig const> const& config ) const = 0;
+        virtual FactoryMap const& getFactories() const = 0;
+        virtual Listeners const& getListeners() const = 0;
+    };
+
+    Ptr<IStreamingReporter> addReporter( Ptr<IStreamingReporter> const& existingReporter, Ptr<IStreamingReporter> const& additionalReporter );
+
+}
+
+#include <limits>
+#include <algorithm>
+
+namespace Catch {
+
+    inline std::size_t listTests( Config const& config ) {
+
+        TestSpec testSpec = config.testSpec();
+        if( config.testSpec().hasFilters() )
+            Catch::cout() << "Matching test cases:\n";
+        else {
+            Catch::cout() << "All available test cases:\n";
+            testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec();
+        }
+
+        std::size_t matchedTests = 0;
+        TextAttributes nameAttr, tagsAttr;
+        nameAttr.setInitialIndent( 2 ).setIndent( 4 );
+        tagsAttr.setIndent( 6 );
+
+        std::vector<TestCase> matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config );
+        for( std::vector<TestCase>::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end();
+                it != itEnd;
+                ++it ) {
+            matchedTests++;
+            TestCaseInfo const& testCaseInfo = it->getTestCaseInfo();
+            Colour::Code colour = testCaseInfo.isHidden()
+                ? Colour::SecondaryText
+                : Colour::None;
+            Colour colourGuard( colour );
+
+            Catch::cout() << Text( testCaseInfo.name, nameAttr ) << std::endl;
+            if( !testCaseInfo.tags.empty() )
+                Catch::cout() << Text( testCaseInfo.tagsAsString, tagsAttr ) << std::endl;
+        }
+
+        if( !config.testSpec().hasFilters() )
+            Catch::cout() << pluralise( matchedTests, "test case" ) << "\n" << std::endl;
+        else
+            Catch::cout() << pluralise( matchedTests, "matching test case" ) << "\n" << std::endl;
+        return matchedTests;
+    }
+
+    inline std::size_t listTestsNamesOnly( Config const& config ) {
+        TestSpec testSpec = config.testSpec();
+        if( !config.testSpec().hasFilters() )
+            testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec();
+        std::size_t matchedTests = 0;
+        std::vector<TestCase> matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config );
+        for( std::vector<TestCase>::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end();
+                it != itEnd;
+                ++it ) {
+            matchedTests++;
+            TestCaseInfo const& testCaseInfo = it->getTestCaseInfo();
+            Catch::cout() << testCaseInfo.name << std::endl;
+        }
+        return matchedTests;
+    }
+
+    struct TagInfo {
+        TagInfo() : count ( 0 ) {}
+        void add( std::string const& spelling ) {
+            ++count;
+            spellings.insert( spelling );
+        }
+        std::string all() const {
+            std::string out;
+            for( std::set<std::string>::const_iterator it = spellings.begin(), itEnd = spellings.end();
+                        it != itEnd;
+                        ++it )
+                out += "[" + *it + "]";
+            return out;
+        }
+        std::set<std::string> spellings;
+        std::size_t count;
+    };
+
+    inline std::size_t listTags( Config const& config ) {
+        TestSpec testSpec = config.testSpec();
+        if( config.testSpec().hasFilters() )
+            Catch::cout() << "Tags for matching test cases:\n";
+        else {
+            Catch::cout() << "All available tags:\n";
+            testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec();
+        }
+
+        std::map<std::string, TagInfo> tagCounts;
+
+        std::vector<TestCase> matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config );
+        for( std::vector<TestCase>::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end();
+                it != itEnd;
+                ++it ) {
+            for( std::set<std::string>::const_iterator  tagIt = it->getTestCaseInfo().tags.begin(),
+                                                        tagItEnd = it->getTestCaseInfo().tags.end();
+                    tagIt != tagItEnd;
+                    ++tagIt ) {
+                std::string tagName = *tagIt;
+                std::string lcaseTagName = toLower( tagName );
+                std::map<std::string, TagInfo>::iterator countIt = tagCounts.find( lcaseTagName );
+                if( countIt == tagCounts.end() )
+                    countIt = tagCounts.insert( std::make_pair( lcaseTagName, TagInfo() ) ).first;
+                countIt->second.add( tagName );
+            }
+        }
+
+        for( std::map<std::string, TagInfo>::const_iterator countIt = tagCounts.begin(),
+                                                            countItEnd = tagCounts.end();
+                countIt != countItEnd;
+                ++countIt ) {
+            std::ostringstream oss;
+            oss << "  " << std::setw(2) << countIt->second.count << "  ";
+            Text wrapper( countIt->second.all(), TextAttributes()
+                                                    .setInitialIndent( 0 )
+                                                    .setIndent( oss.str().size() )
+                                                    .setWidth( CATCH_CONFIG_CONSOLE_WIDTH-10 ) );
+            Catch::cout() << oss.str() << wrapper << "\n";
+        }
+        Catch::cout() << pluralise( tagCounts.size(), "tag" ) << "\n" << std::endl;
+        return tagCounts.size();
+    }
+
+    inline std::size_t listReporters( Config const& /*config*/ ) {
+        Catch::cout() << "Available reporters:\n";
+        IReporterRegistry::FactoryMap const& factories = getRegistryHub().getReporterRegistry().getFactories();
+        IReporterRegistry::FactoryMap::const_iterator itBegin = factories.begin(), itEnd = factories.end(), it;
+        std::size_t maxNameLen = 0;
+        for(it = itBegin; it != itEnd; ++it )
+            maxNameLen = (std::max)( maxNameLen, it->first.size() );
+
+        for(it = itBegin; it != itEnd; ++it ) {
+            Text wrapper( it->second->getDescription(), TextAttributes()
+                                                        .setInitialIndent( 0 )
+                                                        .setIndent( 7+maxNameLen )
+                                                        .setWidth( CATCH_CONFIG_CONSOLE_WIDTH - maxNameLen-8 ) );
+            Catch::cout() << "  "
+                    << it->first
+                    << ":"
+                    << std::string( maxNameLen - it->first.size() + 2, ' ' )
+                    << wrapper << "\n";
+        }
+        Catch::cout() << std::endl;
+        return factories.size();
+    }
+
+    inline Option<std::size_t> list( Config const& config ) {
+        Option<std::size_t> listedCount;
+        if( config.listTests() )
+            listedCount = listedCount.valueOr(0) + listTests( config );
+        if( config.listTestNamesOnly() )
+            listedCount = listedCount.valueOr(0) + listTestsNamesOnly( config );
+        if( config.listTags() )
+            listedCount = listedCount.valueOr(0) + listTags( config );
+        if( config.listReporters() )
+            listedCount = listedCount.valueOr(0) + listReporters( config );
+        return listedCount;
+    }
+
+} // end namespace Catch
+
+// #included from: internal/catch_run_context.hpp
+#define TWOBLUECUBES_CATCH_RUNNER_IMPL_HPP_INCLUDED
+
+// #included from: catch_test_case_tracker.hpp
+#define TWOBLUECUBES_CATCH_TEST_CASE_TRACKER_HPP_INCLUDED
 
 #include <map>
 #include <string>
-
-namespace Catch {
-
-    class SectionInfo {
-    public:
-
-        enum Status {
-            Root,
-            Unknown,
-            Branch,
-            TestedBranch,
-            TestedLeaf
-        };
-
-        SectionInfo( SectionInfo* parent )
-        :   m_status( Unknown ),
-            m_parent( parent )
-        {}
-
-        SectionInfo()
-        :   m_status( Root ),
-            m_parent( NULL )
-        {}
-
-        ~SectionInfo() {
-            deleteAllValues( m_subSections );
-        }
-
-        bool shouldRun() const {
-            return m_status < TestedBranch;
-        }
-
-        bool ran() {
-            if( m_status < Branch ) {
-                m_status = TestedLeaf;
-                return true;
-            }
-            return false;
-        }
-
-        void ranToCompletion() {
-            if( m_status == Branch && !hasUntestedSections() )
-                m_status = TestedBranch;
-        }
-
-        SectionInfo* findSubSection( const std::string& name ) {
-            std::map<std::string, SectionInfo*>::const_iterator it = m_subSections.find( name );
-            return it != m_subSections.end()
-                        ? it->second
-                        : NULL;
-        }
-
-        SectionInfo* addSubSection( const std::string& name ) {
-            SectionInfo* subSection = new SectionInfo( this );
-            m_subSections.insert( std::make_pair( name, subSection ) );
-            m_status = Branch;
-            return subSection;
-        }
-
-        SectionInfo* getParent() {
-            return m_parent;
-        }
-
-        bool hasUntestedSections() const {
-            if( m_status == Unknown )
-                return true;
-
-            std::map<std::string, SectionInfo*>::const_iterator it = m_subSections.begin();
-            std::map<std::string, SectionInfo*>::const_iterator itEnd = m_subSections.end();
-            for(; it != itEnd; ++it ) {
-                if( it->second->hasUntestedSections() )
-                    return true;
-            }
-            return false;
-        }
-
-    private:
-        Status m_status;
-        std::map<std::string, SectionInfo*> m_subSections;
-        SectionInfo* m_parent;
-    };
-}
+#include <assert.h>
+#include <vector>
 
 namespace Catch {
-
-    class RunningTest {
-
-        enum RunStatus {
-            NothingRun,
-            EncounteredASection,
-            RanAtLeastOneSection,
-            RanToCompletionWithSections,
-            RanToCompletionWithNoSections
+namespace TestCaseTracking {
+
+    struct ITracker : SharedImpl<> {
+        virtual ~ITracker();
+
+        // static queries
+        virtual std::string name() const = 0;
+
+        // dynamic queries
+        virtual bool isComplete() const = 0; // Successfully completed or failed
+        virtual bool isSuccessfullyCompleted() const = 0;
+        virtual bool isOpen() const = 0; // Started but not complete
+        virtual bool hasChildren() const = 0;
+
+        virtual ITracker& parent() = 0;
+
+        // actions
+        virtual void close() = 0; // Successfully complete
+        virtual void fail() = 0;
+        virtual void markAsNeedingAnotherRun() = 0;
+
+        virtual void addChild( Ptr<ITracker> const& child ) = 0;
+        virtual ITracker* findChild( std::string const& name ) = 0;
+        virtual void openChild() = 0;
+
+        // Debug/ checking
+        virtual bool isSectionTracker() const = 0;
+        virtual bool isIndexTracker() const = 0;
+    };
+
+    class TrackerContext {
+
+        enum RunState {
+            NotStarted,
+            Executing,
+            CompletedCycle
         };
 
+        Ptr<ITracker> m_rootTracker;
+        ITracker* m_currentTracker;
+        RunState m_runState;
+
     public:
-        explicit RunningTest( const TestCaseInfo* info = NULL )
-        :   m_info( info ),
-            m_runStatus( RanAtLeastOneSection ),
-            m_currentSection( &m_rootSection ),
-            m_changed( false )
+
+        static TrackerContext& instance() {
+            static TrackerContext s_instance;
+            return s_instance;
+        }
+
+        TrackerContext()
+        :   m_currentTracker( CATCH_NULL ),
+            m_runState( NotStarted )
+        {}
+
+        ITracker& startRun();
+
+        void endRun() {
+            m_rootTracker.reset();
+            m_currentTracker = CATCH_NULL;
+            m_runState = NotStarted;
+        }
+
+        void startCycle() {
+            m_currentTracker = m_rootTracker.get();
+            m_runState = Executing;
+        }
+        void completeCycle() {
+            m_runState = CompletedCycle;
+        }
+
+        bool completedCycle() const {
+            return m_runState == CompletedCycle;
+        }
+        ITracker& currentTracker() {
+            return *m_currentTracker;
+        }
+        void setCurrentTracker( ITracker* tracker ) {
+            m_currentTracker = tracker;
+        }
+    };
+
+    class TrackerBase : public ITracker {
+    protected:
+        enum CycleState {
+            NotStarted,
+            Executing,
+            ExecutingChildren,
+            NeedsAnotherRun,
+            CompletedSuccessfully,
+            Failed
+        };
+        class TrackerHasName {
+            std::string m_name;
+        public:
+            TrackerHasName( std::string const& name ) : m_name( name ) {}
+            bool operator ()( Ptr<ITracker> const& tracker ) {
+                return tracker->name() == m_name;
+            }
+        };
+        typedef std::vector<Ptr<ITracker> > Children;
+        std::string m_name;
+        TrackerContext& m_ctx;
+        ITracker* m_parent;
+        Children m_children;
+        CycleState m_runState;
+    public:
+        TrackerBase( std::string const& name, TrackerContext& ctx, ITracker* parent )
+        :   m_name( name ),
+            m_ctx( ctx ),
+            m_parent( parent ),
+            m_runState( NotStarted )
         {}
-
-        bool wasSectionSeen() const {
-            return  m_runStatus == RanAtLeastOneSection ||
-                    m_runStatus == RanToCompletionWithSections;
-        }
-
-        void reset() {
-            m_runStatus = NothingRun;
-            m_changed = false;
-            m_lastSectionToRun = NULL;
-        }
-
-        void ranToCompletion() {
-            if( m_runStatus == RanAtLeastOneSection ||
-                m_runStatus == EncounteredASection ) {
-                m_runStatus = RanToCompletionWithSections;
-                if( m_lastSectionToRun ) {
-                    m_lastSectionToRun->ranToCompletion();
-                    m_changed = true;
-                }
+        virtual ~TrackerBase();
+
+        virtual std::string name() const CATCH_OVERRIDE {
+            return m_name;
+        }
+        virtual bool isComplete() const CATCH_OVERRIDE {
+            return m_runState == CompletedSuccessfully || m_runState == Failed;
+        }
+        virtual bool isSuccessfullyCompleted() const CATCH_OVERRIDE {
+            return m_runState == CompletedSuccessfully;
+        }
+        virtual bool isOpen() const CATCH_OVERRIDE {
+            return m_runState != NotStarted && !isComplete();
+        }
+        virtual bool hasChildren() const CATCH_OVERRIDE {
+            return !m_children.empty();
+        }
+
+        virtual void addChild( Ptr<ITracker> const& child ) CATCH_OVERRIDE {
+            m_children.push_back( child );
+        }
+
+        virtual ITracker* findChild( std::string const& name ) CATCH_OVERRIDE {
+            Children::const_iterator it = std::find_if( m_children.begin(), m_children.end(), TrackerHasName( name ) );
+            return( it != m_children.end() )
+                ? it->get()
+                : CATCH_NULL;
+        }
+        virtual ITracker& parent() CATCH_OVERRIDE {
+            assert( m_parent ); // Should always be non-null except for root
+            return *m_parent;
+        }
+
+        virtual void openChild() CATCH_OVERRIDE {
+            if( m_runState != ExecutingChildren ) {
+                m_runState = ExecutingChildren;
+                if( m_parent )
+                    m_parent->openChild();
+            }
+        }
+
+        virtual bool isSectionTracker() const CATCH_OVERRIDE { return false; }
+        virtual bool isIndexTracker() const CATCH_OVERRIDE { return false; }
+
+        void open() {
+            m_runState = Executing;
+            moveToThis();
+            if( m_parent )
+                m_parent->openChild();
+        }
+
+        virtual void close() CATCH_OVERRIDE {
+
+            // Close any still open children (e.g. generators)
+            while( &m_ctx.currentTracker() != this )
+                m_ctx.currentTracker().close();
+
+            switch( m_runState ) {
+                case NotStarted:
+                case CompletedSuccessfully:
+                case Failed:
+                    throw std::logic_error( "Illogical state" );
+
+                case NeedsAnotherRun:
+                    break;;
+
+                case Executing:
+                    m_runState = CompletedSuccessfully;
+                    break;
+                case ExecutingChildren:
+                    if( m_children.empty() || m_children.back()->isComplete() )
+                        m_runState = CompletedSuccessfully;
+                    break;
+
+                default:
+                    throw std::logic_error( "Unexpected state" );
+            }
+            moveToParent();
+            m_ctx.completeCycle();
+        }
+        virtual void fail() CATCH_OVERRIDE {
+            m_runState = Failed;
+            if( m_parent )
+                m_parent->markAsNeedingAnotherRun();
+            moveToParent();
+            m_ctx.completeCycle();
+        }
+        virtual void markAsNeedingAnotherRun() CATCH_OVERRIDE {
+            m_runState = NeedsAnotherRun;
+        }
+    private:
+        void moveToParent() {
+            assert( m_parent );
+            m_ctx.setCurrentTracker( m_parent );
+        }
+        void moveToThis() {
+            m_ctx.setCurrentTracker( this );
+        }
+    };
+
+    class SectionTracker : public TrackerBase {
+    public:
+        SectionTracker( std::string const& name, TrackerContext& ctx, ITracker* parent )
+        :   TrackerBase( name, ctx, parent )
+        {}
+        virtual ~SectionTracker();
+
+        virtual bool isSectionTracker() const CATCH_OVERRIDE { return true; }
+
+        static SectionTracker& acquire( TrackerContext& ctx, std::string const& name ) {
+            SectionTracker* section = CATCH_NULL;
+
+            ITracker& currentTracker = ctx.currentTracker();
+            if( ITracker* childTracker = currentTracker.findChild( name ) ) {
+                assert( childTracker );
+                assert( childTracker->isSectionTracker() );
+                section = static_cast<SectionTracker*>( childTracker );
             }
             else {
-                m_runStatus = RanToCompletionWithNoSections;
-            }
-        }
-
-        bool addSection( const std::string& name ) {
-            if( m_runStatus == NothingRun )
-                m_runStatus = EncounteredASection;
-
-            SectionInfo* thisSection = m_currentSection->findSubSection( name );
-            if( !thisSection ) {
-                thisSection = m_currentSection->addSubSection( name );
-                m_changed = true;
-            }
-
-            if( !wasSectionSeen() && thisSection->shouldRun() ) {
-                m_currentSection = thisSection;
-                m_lastSectionToRun = NULL;
-                return true;
-            }
-            return false;
-        }
-
-        void endSection( const std::string& ) {
-            if( m_currentSection->ran() ) {
-                m_runStatus = RanAtLeastOneSection;
-                m_changed = true;
-            }
-            else if( m_runStatus == EncounteredASection ) {
-                m_runStatus = RanAtLeastOneSection;
-                m_lastSectionToRun = m_currentSection;
-            }
-            m_currentSection = m_currentSection->getParent();
-        }
-
-        const TestCaseInfo& getTestCaseInfo() const {
-            return *m_info;
-        }
-
-        bool hasUntestedSections() const {
-            return  m_runStatus == RanAtLeastOneSection ||
-                    ( m_rootSection.hasUntestedSections() && m_changed );
-        }
-
-    private:
-        const TestCaseInfo* m_info;
-        RunStatus m_runStatus;
-        SectionInfo m_rootSection;
-        SectionInfo* m_currentSection;
-        SectionInfo* m_lastSectionToRun;
-        bool m_changed;
-    };
-}
+                section = new SectionTracker( name, ctx, &currentTracker );
+                currentTracker.addChild( section );
+            }
+            if( !ctx.completedCycle() && !section->isComplete() ) {
+
+                section->open();
+            }
+            return *section;
+        }
+    };
+
+    class IndexTracker : public TrackerBase {
+        int m_size;
+        int m_index;
+    public:
+        IndexTracker( std::string const& name, TrackerContext& ctx, ITracker* parent, int size )
+        :   TrackerBase( name, ctx, parent ),
+            m_size( size ),
+            m_index( -1 )
+        {}
+        virtual ~IndexTracker();
+
+        virtual bool isIndexTracker() const CATCH_OVERRIDE { return true; }
+
+        static IndexTracker& acquire( TrackerContext& ctx, std::string const& name, int size ) {
+            IndexTracker* tracker = CATCH_NULL;
+
+            ITracker& currentTracker = ctx.currentTracker();
+            if( ITracker* childTracker = currentTracker.findChild( name ) ) {
+                assert( childTracker );
+                assert( childTracker->isIndexTracker() );
+                tracker = static_cast<IndexTracker*>( childTracker );
+            }
+            else {
+                tracker = new IndexTracker( name, ctx, &currentTracker, size );
+                currentTracker.addChild( tracker );
+            }
+
+            if( !ctx.completedCycle() && !tracker->isComplete() ) {
+                if( tracker->m_runState != ExecutingChildren && tracker->m_runState != NeedsAnotherRun )
+                    tracker->moveNext();
+                tracker->open();
+            }
+
+            return *tracker;
+        }
+
+        int index() const { return m_index; }
+
+        void moveNext() {
+            m_index++;
+            m_children.clear();
+        }
+
+        virtual void close() CATCH_OVERRIDE {
+            TrackerBase::close();
+            if( m_runState == CompletedSuccessfully && m_index < m_size-1 )
+                m_runState = Executing;
+        }
+    };
+
+    inline ITracker& TrackerContext::startRun() {
+        m_rootTracker = new SectionTracker( "{root}", *this, CATCH_NULL );
+        m_currentTracker = CATCH_NULL;
+        m_runState = Executing;
+        return *m_rootTracker;
+    }
+
+} // namespace TestCaseTracking
+
+using TestCaseTracking::ITracker;
+using TestCaseTracking::TrackerContext;
+using TestCaseTracking::SectionTracker;
+using TestCaseTracking::IndexTracker;
+
+} // namespace Catch
+
+// #included from: catch_fatal_condition.hpp
+#define TWOBLUECUBES_CATCH_FATAL_CONDITION_H_INCLUDED
+
+namespace Catch {
+
+    // Report the error condition then exit the process
+    inline void fatal( std::string const& message, int exitCode ) {
+        IContext& context = Catch::getCurrentContext();
+        IResultCapture* resultCapture = context.getResultCapture();
+        resultCapture->handleFatalErrorCondition( message );
+
+		if( Catch::alwaysTrue() ) // avoids "no return" warnings
+            exit( exitCode );
+    }
+
+} // namespace Catch
+
+#if defined ( CATCH_PLATFORM_WINDOWS ) /////////////////////////////////////////
+
+namespace Catch {
+
+    struct FatalConditionHandler {
+		void reset() {}
+	};
+
+} // namespace Catch
+
+#else // Not Windows - assumed to be POSIX compatible //////////////////////////
+
+#include <signal.h>
+
+namespace Catch {
+
+    struct SignalDefs { int id; const char* name; };
+    extern SignalDefs signalDefs[];
+    SignalDefs signalDefs[] = {
+            { SIGINT,  "SIGINT - Terminal interrupt signal" },
+            { SIGILL,  "SIGILL - Illegal instruction signal" },
+            { SIGFPE,  "SIGFPE - Floating point error signal" },
+            { SIGSEGV, "SIGSEGV - Segmentation violation signal" },
+            { SIGTERM, "SIGTERM - Termination request signal" },
+            { SIGABRT, "SIGABRT - Abort (abnormal termination) signal" }
+        };
+
+    struct FatalConditionHandler {
+
+        static void handleSignal( int sig ) {
+            for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i )
+                if( sig == signalDefs[i].id )
+                    fatal( signalDefs[i].name, -sig );
+            fatal( "<unknown signal>", -sig );
+        }
+
+        FatalConditionHandler() : m_isSet( true ) {
+            for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i )
+                signal( signalDefs[i].id, handleSignal );
+        }
+        ~FatalConditionHandler() {
+            reset();
+        }
+        void reset() {
+            if( m_isSet ) {
+                for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i )
+                    signal( signalDefs[i].id, SIG_DFL );
+                m_isSet = false;
+            }
+        }
+
+        bool m_isSet;
+    };
+
+} // namespace Catch
+
+#endif // not Windows
 
 #include <set>
 #include <string>
@@ -2803,256 +5898,1310 @@
 
     ///////////////////////////////////////////////////////////////////////////
 
-    class Runner : public IResultCapture, public IRunner {
-
-        Runner( const Runner& );
-        void operator =( const Runner& );
+    class RunContext : public IResultCapture, public IRunner {
+
+        RunContext( RunContext const& );
+        void operator =( RunContext const& );
 
     public:
 
-        explicit Runner( Config& config )
-        :   m_context( getCurrentMutableContext() ),
-            m_runningTest( NULL ),
-            m_config( config ),
-            m_reporter( config.getReporter() ),
-            m_prevRunner( &m_context.getRunner() ),
-            m_prevResultCapture( &m_context.getResultCapture() )
+        explicit RunContext( Ptr<IConfig const> const& _config, Ptr<IStreamingReporter> const& reporter )
+        :   m_runInfo( _config->name() ),
+            m_context( getCurrentMutableContext() ),
+            m_activeTestCase( CATCH_NULL ),
+            m_config( _config ),
+            m_reporter( reporter )
         {
             m_context.setRunner( this );
-            m_context.setConfig( &m_config );
+            m_context.setConfig( m_config );
             m_context.setResultCapture( this );
-            m_reporter->StartTesting();
-        }
-
-        ~Runner() {
-            m_reporter->EndTesting( m_totals );
-            m_context.setRunner( m_prevRunner );
-            m_context.setConfig( NULL );
-            m_context.setResultCapture( m_prevResultCapture );
-        }
-
-        virtual void runAll( bool runHiddenTests = false ) {
-            const std::vector<TestCaseInfo>& allTests = getCurrentContext().getTestCaseRegistry().getAllTests();
-            for( std::size_t i=0; i < allTests.size(); ++i ) {
-                if( runHiddenTests || !allTests[i].isHidden() )
-                {
-                    if( aborting() ) {
-                        m_reporter->Aborted();
-                        break;
-                    }
-                    runTest( allTests[i] );
-                }
-            }
-        }
-
-        virtual std::size_t runMatching( const std::string& rawTestSpec ) {
-            TestSpec testSpec( rawTestSpec );
-
-            const std::vector<TestCaseInfo>& allTests = getCurrentContext().getTestCaseRegistry().getAllTests();
-            std::size_t testsRun = 0;
-            for( std::size_t i=0; i < allTests.size(); ++i ) {
-                if( testSpec.matches( allTests[i].getName() ) ) {
-                    if( aborting() ) {
-                        m_reporter->Aborted();
-                        break;
-                    }
-                    runTest( allTests[i] );
-                    testsRun++;
-                }
-            }
-            return testsRun;
-        }
-
-        void runTest( const TestCaseInfo& testInfo ) {
+            m_reporter->testRunStarting( m_runInfo );
+        }
+
+        virtual ~RunContext() {
+            m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, aborting() ) );
+        }
+
+        void testGroupStarting( std::string const& testSpec, std::size_t groupIndex, std::size_t groupsCount ) {
+            m_reporter->testGroupStarting( GroupInfo( testSpec, groupIndex, groupsCount ) );
+        }
+        void testGroupEnded( std::string const& testSpec, Totals const& totals, std::size_t groupIndex, std::size_t groupsCount ) {
+            m_reporter->testGroupEnded( TestGroupStats( GroupInfo( testSpec, groupIndex, groupsCount ), totals, aborting() ) );
+        }
+
+        Totals runTest( TestCase const& testCase ) {
             Totals prevTotals = m_totals;
 
             std::string redirectedCout;
             std::string redirectedCerr;
 
-            m_reporter->StartTestCase( testInfo );
-
-            m_runningTest = new RunningTest( &testInfo );
+            TestCaseInfo testInfo = testCase.getTestCaseInfo();
+
+            m_reporter->testCaseStarting( testInfo );
+
+            m_activeTestCase = &testCase;
 
             do {
+                m_trackerContext.startRun();
                 do {
-//                    m_reporter->StartGroup( "test case run" );
-                    m_currentResult.setLineInfo( m_runningTest->getTestCaseInfo().getLineInfo() );
+                    m_trackerContext.startCycle();
+                    m_testCaseTracker = &SectionTracker::acquire( m_trackerContext, testInfo.name );
                     runCurrentTest( redirectedCout, redirectedCerr );
-//                    m_reporter->EndGroup( "test case run", m_totals.delta( prevTotals ) );
                 }
-                while( m_runningTest->hasUntestedSections() && !aborting() );
-            }
+                while( !m_testCaseTracker->isSuccessfullyCompleted() && !aborting() );
+            }
+            // !TBD: deprecated - this will be replaced by indexed trackers
             while( getCurrentContext().advanceGeneratorsForCurrentTest() && !aborting() );
 
-            delete m_runningTest;
-            m_runningTest = NULL;
-
             Totals deltaTotals = m_totals.delta( prevTotals );
+            if( testInfo.expectedToFail() && deltaTotals.testCases.passed > 0 ) {
+                deltaTotals.assertions.failed++;
+                deltaTotals.testCases.passed--;
+                deltaTotals.testCases.failed++;
+            }
             m_totals.testCases += deltaTotals.testCases;
-            m_reporter->EndTestCase( testInfo, deltaTotals, redirectedCout, redirectedCerr );
-        }
-
-        virtual Totals getTotals() const {
-            return m_totals;
-        }
-
-        const Config& config() const {
+            m_reporter->testCaseEnded( TestCaseStats(   testInfo,
+                                                        deltaTotals,
+                                                        redirectedCout,
+                                                        redirectedCerr,
+                                                        aborting() ) );
+
+            m_activeTestCase = CATCH_NULL;
+            m_testCaseTracker = CATCH_NULL;
+
+            return deltaTotals;
+        }
+
+        Ptr<IConfig const> config() const {
             return m_config;
         }
 
     private: // IResultCapture
 
-        virtual ResultAction::Value acceptResult( bool result ) {
-            return acceptResult( result ? ResultWas::Ok : ResultWas::ExpressionFailed );
-        }
-
-        virtual ResultAction::Value acceptResult( ResultWas::OfType result ) {
-            m_currentResult.setResultType( result );
-            return actOnCurrentResult();
-        }
-
-        virtual ResultAction::Value acceptExpression( const ResultInfoBuilder& resultInfo ) {
-            m_currentResult = resultInfo;
-            return actOnCurrentResult();
-        }
-
-        virtual void acceptMessage( const std::string& msg ) {
-            m_currentResult.setMessage( msg );
-        }
-
-        virtual void testEnded( const ResultInfo& result ) {
+        virtual void assertionEnded( AssertionResult const& result ) {
             if( result.getResultType() == ResultWas::Ok ) {
                 m_totals.assertions.passed++;
             }
-            else if( !result.ok() ) {
+            else if( !result.isOk() ) {
                 m_totals.assertions.failed++;
-
-                std::vector<ResultInfo>::const_iterator it = m_info.begin();
-                std::vector<ResultInfo>::const_iterator itEnd = m_info.end();
-                for(; it != itEnd; ++it )
-                    m_reporter->Result( *it );
-                m_info.clear();
-            }
-
-            if( result.getResultType() == ResultWas::Info )
-                m_info.push_back( result );
-            else
-                m_reporter->Result( result );
+            }
+
+            if( m_reporter->assertionEnded( AssertionStats( result, m_messages, m_totals ) ) )
+                m_messages.clear();
+
+            // Reset working state
+            m_lastAssertionInfo = AssertionInfo( "", m_lastAssertionInfo.lineInfo, "{Unknown expression after the reported line}" , m_lastAssertionInfo.resultDisposition );
+            m_lastResult = result;
         }
 
         virtual bool sectionStarted (
-            const std::string& name,
-            const std::string& description,
-            const SourceLineInfo& lineInfo,
+            SectionInfo const& sectionInfo,
             Counts& assertions
         )
         {
             std::ostringstream oss;
-            oss << name << "@" << lineInfo;
-
-            if( !m_runningTest->addSection( oss.str() ) )
+            oss << sectionInfo.name << "@" << sectionInfo.lineInfo;
+
+            ITracker& sectionTracker = SectionTracker::acquire( m_trackerContext, oss.str() );
+            if( !sectionTracker.isOpen() )
                 return false;
-
-            m_currentResult.setLineInfo( lineInfo );
-            m_reporter->StartSection( name, description );
+            m_activeSections.push_back( &sectionTracker );
+
+            m_lastAssertionInfo.lineInfo = sectionInfo.lineInfo;
+
+            m_reporter->sectionStarting( sectionInfo );
+
             assertions = m_totals.assertions;
 
             return true;
         }
-
-        virtual void sectionEnded( const std::string& name, const Counts& prevAssertions ) {
-            m_runningTest->endSection( name );
-            m_reporter->EndSection( name, m_totals.assertions - prevAssertions );
-        }
-
-        virtual void pushScopedInfo( ScopedInfo* scopedInfo ) {
-            m_scopedInfos.push_back( scopedInfo );
-        }
-
-        virtual void popScopedInfo( ScopedInfo* scopedInfo ) {
-            if( m_scopedInfos.back() == scopedInfo )
-                m_scopedInfos.pop_back();
-        }
-
-        virtual bool shouldDebugBreak() const {
-            return m_config.shouldDebugBreak();
+        bool testForMissingAssertions( Counts& assertions ) {
+            if( assertions.total() != 0 )
+                return false;
+            if( !m_config->warnAboutMissingAssertions() )
+                return false;
+            if( m_trackerContext.currentTracker().hasChildren() )
+                return false;
+            m_totals.assertions.failed++;
+            assertions.failed++;
+            return true;
+        }
+
+        virtual void sectionEnded( SectionEndInfo const& endInfo ) {
+            Counts assertions = m_totals.assertions - endInfo.prevAssertions;
+            bool missingAssertions = testForMissingAssertions( assertions );
+
+            if( !m_activeSections.empty() ) {
+                m_activeSections.back()->close();
+                m_activeSections.pop_back();
+            }
+
+            m_reporter->sectionEnded( SectionStats( endInfo.sectionInfo, assertions, endInfo.durationInSeconds, missingAssertions ) );
+            m_messages.clear();
+        }
+
+        virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) {
+            if( m_unfinishedSections.empty() )
+                m_activeSections.back()->fail();
+            else
+                m_activeSections.back()->close();
+            m_activeSections.pop_back();
+
+            m_unfinishedSections.push_back( endInfo );
+        }
+
+        virtual void pushScopedMessage( MessageInfo const& message ) {
+            m_messages.push_back( message );
+        }
+
+        virtual void popScopedMessage( MessageInfo const& message ) {
+            m_messages.erase( std::remove( m_messages.begin(), m_messages.end(), message ), m_messages.end() );
         }
 
         virtual std::string getCurrentTestName() const {
-            return m_runningTest
-                ? m_runningTest->getTestCaseInfo().getName()
+            return m_activeTestCase
+                ? m_activeTestCase->getTestCaseInfo().name
                 : "";
         }
 
-        virtual const ResultInfo* getLastResult() const {
+        virtual const AssertionResult* getLastResult() const {
             return &m_lastResult;
         }
 
+        virtual void handleFatalErrorCondition( std::string const& message ) {
+            ResultBuilder resultBuilder = makeUnexpectedResultBuilder();
+            resultBuilder.setResultType( ResultWas::FatalErrorCondition );
+            resultBuilder << message;
+            resultBuilder.captureExpression();
+
+            handleUnfinishedSections();
+
+            // Recreate section for test case (as we will lose the one that was in scope)
+            TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo();
+            SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description );
+
+            Counts assertions;
+            assertions.failed = 1;
+            SectionStats testCaseSectionStats( testCaseSection, assertions, 0, false );
+            m_reporter->sectionEnded( testCaseSectionStats );
+
+            TestCaseInfo testInfo = m_activeTestCase->getTestCaseInfo();
+
+            Totals deltaTotals;
+            deltaTotals.testCases.failed = 1;
+            m_reporter->testCaseEnded( TestCaseStats(   testInfo,
+                                                        deltaTotals,
+                                                        "",
+                                                        "",
+                                                        false ) );
+            m_totals.testCases.failed++;
+            testGroupEnded( "", m_totals, 1, 1 );
+            m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, false ) );
+        }
+
+    public:
+        // !TBD We need to do this another way!
+        bool aborting() const {
+            return m_totals.assertions.failed == static_cast<std::size_t>( m_config->abortAfter() );
+        }
+
     private:
 
-        bool aborting() const {
-            return m_totals.assertions.failed == static_cast<std::size_t>( m_config.getCutoff() );
-        }
-
-        ResultAction::Value actOnCurrentResult() {
-            testEnded( m_currentResult );
-            m_lastResult = m_currentResult;
-
-            m_currentResult = ResultInfoBuilder();
-
-            ResultAction::Value action = ResultAction::None;
-
-            if( !m_lastResult.ok() ) {
-                action = ResultAction::Failed;
-                if( shouldDebugBreak() )
-                    action = (ResultAction::Value)( action | ResultAction::Debug );
-                if( aborting() )
-                    action = (ResultAction::Value)( action | ResultAction::Abort );
-            }
-            return action;
-        }
-
         void runCurrentTest( std::string& redirectedCout, std::string& redirectedCerr ) {
+            TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo();
+            SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description );
+            m_reporter->sectionStarting( testCaseSection );
+            Counts prevAssertions = m_totals.assertions;
+            double duration = 0;
             try {
-                m_runningTest->reset();
-                if( m_reporter->shouldRedirectStdout() ) {
-                    StreamRedirect coutRedir( std::cout, redirectedCout );
-                    StreamRedirect cerrRedir( std::cerr, redirectedCerr );
-                    m_runningTest->getTestCaseInfo().invoke();
+                m_lastAssertionInfo = AssertionInfo( "TEST_CASE", testCaseInfo.lineInfo, "", ResultDisposition::Normal );
+
+                seedRng( *m_config );
+
+                Timer timer;
+                timer.start();
+                if( m_reporter->getPreferences().shouldRedirectStdOut ) {
+                    StreamRedirect coutRedir( Catch::cout(), redirectedCout );
+                    StreamRedirect cerrRedir( Catch::cerr(), redirectedCerr );
+                    invokeActiveTestCase();
                 }
                 else {
-                    m_runningTest->getTestCaseInfo().invoke();
+                    invokeActiveTestCase();
                 }
-                m_runningTest->ranToCompletion();
+                duration = timer.getElapsedSeconds();
             }
             catch( TestFailureException& ) {
                 // This just means the test was aborted due to failure
             }
             catch(...) {
-                acceptMessage( getCurrentContext().getExceptionTranslatorRegistry().translateActiveException() );
-                acceptResult( ResultWas::ThrewException );
-            }
-            m_info.clear();
+                makeUnexpectedResultBuilder().useActiveException();
+            }
+            m_testCaseTracker->close();
+            handleUnfinishedSections();
+            m_messages.clear();
+
+            Counts assertions = m_totals.assertions - prevAssertions;
+            bool missingAssertions = testForMissingAssertions( assertions );
+
+            if( testCaseInfo.okToFail() ) {
+                std::swap( assertions.failedButOk, assertions.failed );
+                m_totals.assertions.failed -= assertions.failedButOk;
+                m_totals.assertions.failedButOk += assertions.failedButOk;
+            }
+
+            SectionStats testCaseSectionStats( testCaseSection, assertions, duration, missingAssertions );
+            m_reporter->sectionEnded( testCaseSectionStats );
+        }
+
+        void invokeActiveTestCase() {
+            FatalConditionHandler fatalConditionHandler; // Handle signals
+            m_activeTestCase->invoke();
+            fatalConditionHandler.reset();
+        }
+
+    private:
+
+        ResultBuilder makeUnexpectedResultBuilder() const {
+            return ResultBuilder(   m_lastAssertionInfo.macroName.c_str(),
+                                    m_lastAssertionInfo.lineInfo,
+                                    m_lastAssertionInfo.capturedExpression.c_str(),
+                                    m_lastAssertionInfo.resultDisposition );
+        }
+
+        void handleUnfinishedSections() {
+            // If sections ended prematurely due to an exception we stored their
+            // infos here so we can tear them down outside the unwind process.
+            for( std::vector<SectionEndInfo>::const_reverse_iterator it = m_unfinishedSections.rbegin(),
+                        itEnd = m_unfinishedSections.rend();
+                    it != itEnd;
+                    ++it )
+                sectionEnded( *it );
+            m_unfinishedSections.clear();
+        }
+
+        TestRunInfo m_runInfo;
+        IMutableContext& m_context;
+        TestCase const* m_activeTestCase;
+        ITracker* m_testCaseTracker;
+        ITracker* m_currentSectionTracker;
+        AssertionResult m_lastResult;
+
+        Ptr<IConfig const> m_config;
+        Totals m_totals;
+        Ptr<IStreamingReporter> m_reporter;
+        std::vector<MessageInfo> m_messages;
+        AssertionInfo m_lastAssertionInfo;
+        std::vector<SectionEndInfo> m_unfinishedSections;
+        std::vector<ITracker*> m_activeSections;
+        TrackerContext m_trackerContext;
+    };
+
+    IResultCapture& getResultCapture() {
+        if( IResultCapture* capture = getCurrentContext().getResultCapture() )
+            return *capture;
+        else
+            throw std::logic_error( "No result capture instance" );
+    }
+
+} // end namespace Catch
+
+// #included from: internal/catch_version.h
+#define TWOBLUECUBES_CATCH_VERSION_H_INCLUDED
+
+namespace Catch {
+
+    // Versioning information
+    struct Version {
+        Version(    unsigned int _majorVersion,
+                    unsigned int _minorVersion,
+                    unsigned int _patchNumber,
+                    std::string const& _branchName,
+                    unsigned int _buildNumber );
+
+        unsigned int const majorVersion;
+        unsigned int const minorVersion;
+        unsigned int const patchNumber;
+
+        // buildNumber is only used if branchName is not null
+        std::string const branchName;
+        unsigned int const buildNumber;
+
+        friend std::ostream& operator << ( std::ostream& os, Version const& version );
+
+    private:
+        void operator=( Version const& );
+    };
+
+    extern Version libraryVersion;
+}
+
+#include <fstream>
+#include <stdlib.h>
+#include <limits>
+
+namespace Catch {
+
+    Ptr<IStreamingReporter> createReporter( std::string const& reporterName, Ptr<Config> const& config ) {
+        Ptr<IStreamingReporter> reporter = getRegistryHub().getReporterRegistry().create( reporterName, config.get() );
+        if( !reporter ) {
+            std::ostringstream oss;
+            oss << "No reporter registered with name: '" << reporterName << "'";
+            throw std::domain_error( oss.str() );
+        }
+        return reporter;
+    }
+
+    Ptr<IStreamingReporter> makeReporter( Ptr<Config> const& config ) {
+        std::vector<std::string> reporters = config->getReporterNames();
+        if( reporters.empty() )
+            reporters.push_back( "console" );
+
+        Ptr<IStreamingReporter> reporter;
+        for( std::vector<std::string>::const_iterator it = reporters.begin(), itEnd = reporters.end();
+                it != itEnd;
+                ++it )
+            reporter = addReporter( reporter, createReporter( *it, config ) );
+        return reporter;
+    }
+    Ptr<IStreamingReporter> addListeners( Ptr<IConfig const> const& config, Ptr<IStreamingReporter> reporters ) {
+        IReporterRegistry::Listeners listeners = getRegistryHub().getReporterRegistry().getListeners();
+        for( IReporterRegistry::Listeners::const_iterator it = listeners.begin(), itEnd = listeners.end();
+                it != itEnd;
+                ++it )
+            reporters = addReporter(reporters, (*it)->create( ReporterConfig( config ) ) );
+        return reporters;
+    }
+
+    Totals runTests( Ptr<Config> const& config ) {
+
+        Ptr<IConfig const> iconfig = config.get();
+
+        Ptr<IStreamingReporter> reporter = makeReporter( config );
+        reporter = addListeners( iconfig, reporter );
+
+        RunContext context( iconfig, reporter );
+
+        Totals totals;
+
+        context.testGroupStarting( config->name(), 1, 1 );
+
+        TestSpec testSpec = config->testSpec();
+        if( !testSpec.hasFilters() )
+            testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "~[.]" ).testSpec(); // All not hidden tests
+
+        std::vector<TestCase> const& allTestCases = getAllTestCasesSorted( *iconfig );
+        for( std::vector<TestCase>::const_iterator it = allTestCases.begin(), itEnd = allTestCases.end();
+                it != itEnd;
+                ++it ) {
+            if( !context.aborting() && matchTest( *it, testSpec, *iconfig ) )
+                totals += context.runTest( *it );
+            else
+                reporter->skipTest( *it );
+        }
+
+        context.testGroupEnded( iconfig->name(), totals, 1, 1 );
+        return totals;
+    }
+
+    void applyFilenamesAsTags( IConfig const& config ) {
+        std::vector<TestCase> const& tests = getAllTestCasesSorted( config );
+        for(std::size_t i = 0; i < tests.size(); ++i ) {
+            TestCase& test = const_cast<TestCase&>( tests[i] );
+            std::set<std::string> tags = test.tags;
+
+            std::string filename = test.lineInfo.file;
+            std::string::size_type lastSlash = filename.find_last_of( "\\/" );
+            if( lastSlash != std::string::npos )
+                filename = filename.substr( lastSlash+1 );
+
+            std::string::size_type lastDot = filename.find_last_of( "." );
+            if( lastDot != std::string::npos )
+                filename = filename.substr( 0, lastDot );
+
+            tags.insert( "#" + filename );
+            setTags( test, tags );
+        }
+    }
+
+    class Session : NonCopyable {
+        static bool alreadyInstantiated;
+
+    public:
+
+        struct OnUnusedOptions { enum DoWhat { Ignore, Fail }; };
+
+        Session()
+        : m_cli( makeCommandLineParser() ) {
+            if( alreadyInstantiated ) {
+                std::string msg = "Only one instance of Catch::Session can ever be used";
+                Catch::cerr() << msg << std::endl;
+                throw std::logic_error( msg );
+            }
+            alreadyInstantiated = true;
+        }
+        ~Session() {
+            Catch::cleanUp();
+        }
+
+        void showHelp( std::string const& processName ) {
+            Catch::cout() << "\nCatch v" << libraryVersion << "\n";
+
+            m_cli.usage( Catch::cout(), processName );
+            Catch::cout() << "For more detail usage please see the project docs\n" << std::endl;
+        }
+
+        int applyCommandLine( int argc, char const* const* const argv, OnUnusedOptions::DoWhat unusedOptionBehaviour = OnUnusedOptions::Fail ) {
+            try {
+                m_cli.setThrowOnUnrecognisedTokens( unusedOptionBehaviour == OnUnusedOptions::Fail );
+                m_unusedTokens = m_cli.parseInto( Clara::argsToVector( argc, argv ), m_configData );
+                if( m_configData.showHelp )
+                    showHelp( m_configData.processName );
+                m_config.reset();
+            }
+            catch( std::exception& ex ) {
+                {
+                    Colour colourGuard( Colour::Red );
+                    Catch::cerr()
+                        << "\nError(s) in input:\n"
+                        << Text( ex.what(), TextAttributes().setIndent(2) )
+                        << "\n\n";
+                }
+                m_cli.usage( Catch::cout(), m_configData.processName );
+                return (std::numeric_limits<int>::max)();
+            }
+            return 0;
+        }
+
+        void useConfigData( ConfigData const& _configData ) {
+            m_configData = _configData;
+            m_config.reset();
+        }
+
+        int run( int argc, char const* const* const argv ) {
+
+            int returnCode = applyCommandLine( argc, argv );
+            if( returnCode == 0 )
+                returnCode = run();
+            return returnCode;
+        }
+
+        int run() {
+            if( m_configData.showHelp )
+                return 0;
+
+            try
+            {
+                config(); // Force config to be constructed
+
+                seedRng( *m_config );
+
+                if( m_configData.filenamesAsTags )
+                    applyFilenamesAsTags( *m_config );
+
+                // Handle list request
+                if( Option<std::size_t> listed = list( config() ) )
+                    return static_cast<int>( *listed );
+
+                return static_cast<int>( runTests( m_config ).assertions.failed );
+            }
+            catch( std::exception& ex ) {
+                Catch::cerr() << ex.what() << std::endl;
+                return (std::numeric_limits<int>::max)();
+            }
+        }
+
+        Clara::CommandLine<ConfigData> const& cli() const {
+            return m_cli;
+        }
+        std::vector<Clara::Parser::Token> const& unusedTokens() const {
+            return m_unusedTokens;
+        }
+        ConfigData& configData() {
+            return m_configData;
+        }
+        Config& config() {
+            if( !m_config )
+                m_config = new Config( m_configData );
+            return *m_config;
+        }
+    private:
+        Clara::CommandLine<ConfigData> m_cli;
+        std::vector<Clara::Parser::Token> m_unusedTokens;
+        ConfigData m_configData;
+        Ptr<Config> m_config;
+    };
+
+    bool Session::alreadyInstantiated = false;
+
+} // end namespace Catch
+
+// #included from: catch_registry_hub.hpp
+#define TWOBLUECUBES_CATCH_REGISTRY_HUB_HPP_INCLUDED
+
+// #included from: catch_test_case_registry_impl.hpp
+#define TWOBLUECUBES_CATCH_TEST_CASE_REGISTRY_IMPL_HPP_INCLUDED
+
+#include <vector>
+#include <set>
+#include <sstream>
+#include <iostream>
+#include <algorithm>
+
+#ifdef CATCH_CPP14_OR_GREATER
+#include <random>
+#endif
+
+namespace Catch {
+
+    struct RandomNumberGenerator {
+        typedef int result_type;
+
+        result_type operator()( result_type n ) const { return std::rand() % n; }
+
+#ifdef CATCH_CPP14_OR_GREATER
+        static constexpr result_type min() { return 0; }
+        static constexpr result_type max() { return 1000000; }
+        result_type operator()() const { return std::rand() % max(); }
+#endif
+        template<typename V>
+        static void shuffle( V& vector ) {
+            RandomNumberGenerator rng;
+#ifdef CATCH_CPP14_OR_GREATER
+            std::shuffle( vector.begin(), vector.end(), rng );
+#else
+            std::random_shuffle( vector.begin(), vector.end(), rng );
+#endif
+        }
+    };
+
+    inline std::vector<TestCase> sortTests( IConfig const& config, std::vector<TestCase> const& unsortedTestCases ) {
+
+        std::vector<TestCase> sorted = unsortedTestCases;
+
+        switch( config.runOrder() ) {
+            case RunTests::InLexicographicalOrder:
+                std::sort( sorted.begin(), sorted.end() );
+                break;
+            case RunTests::InRandomOrder:
+                {
+                    seedRng( config );
+                    RandomNumberGenerator::shuffle( sorted );
+                }
+                break;
+            case RunTests::InDeclarationOrder:
+                // already in declaration order
+                break;
+        }
+        return sorted;
+    }
+    bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ) {
+        return testSpec.matches( testCase ) && ( config.allowThrows() || !testCase.throws() );
+    }
+
+    void enforceNoDuplicateTestCases( std::vector<TestCase> const& functions ) {
+        std::set<TestCase> seenFunctions;
+        for( std::vector<TestCase>::const_iterator it = functions.begin(), itEnd = functions.end();
+            it != itEnd;
+            ++it ) {
+            std::pair<std::set<TestCase>::const_iterator, bool> prev = seenFunctions.insert( *it );
+            if( !prev.second ) {
+                std::ostringstream ss;
+
+                ss  << Colour( Colour::Red )
+                    << "error: TEST_CASE( \"" << it->name << "\" ) already defined.\n"
+                    << "\tFirst seen at " << prev.first->getTestCaseInfo().lineInfo << "\n"
+                    << "\tRedefined at " << it->getTestCaseInfo().lineInfo << std::endl;
+
+                throw std::runtime_error(ss.str());
+            }
+        }
+    }
+
+    std::vector<TestCase> filterTests( std::vector<TestCase> const& testCases, TestSpec const& testSpec, IConfig const& config ) {
+        std::vector<TestCase> filtered;
+        filtered.reserve( testCases.size() );
+        for( std::vector<TestCase>::const_iterator it = testCases.begin(), itEnd = testCases.end();
+                it != itEnd;
+                ++it )
+            if( matchTest( *it, testSpec, config ) )
+                filtered.push_back( *it );
+        return filtered;
+    }
+    std::vector<TestCase> const& getAllTestCasesSorted( IConfig const& config ) {
+        return getRegistryHub().getTestCaseRegistry().getAllTestsSorted( config );
+    }
+
+    class TestRegistry : public ITestCaseRegistry {
+    public:
+        TestRegistry()
+        :   m_currentSortOrder( RunTests::InDeclarationOrder ),
+            m_unnamedCount( 0 )
+        {}
+        virtual ~TestRegistry();
+
+        virtual void registerTest( TestCase const& testCase ) {
+            std::string name = testCase.getTestCaseInfo().name;
+            if( name == "" ) {
+                std::ostringstream oss;
+                oss << "Anonymous test case " << ++m_unnamedCount;
+                return registerTest( testCase.withName( oss.str() ) );
+            }
+            m_functions.push_back( testCase );
+        }
+
+        virtual std::vector<TestCase> const& getAllTests() const {
+            return m_functions;
+        }
+        virtual std::vector<TestCase> const& getAllTestsSorted( IConfig const& config ) const {
+            if( m_sortedFunctions.empty() )
+                enforceNoDuplicateTestCases( m_functions );
+
+            if(  m_currentSortOrder != config.runOrder() || m_sortedFunctions.empty() ) {
+                m_sortedFunctions = sortTests( config, m_functions );
+                m_currentSortOrder = config.runOrder();
+            }
+            return m_sortedFunctions;
+        }
+
+    private:
+        std::vector<TestCase> m_functions;
+        mutable RunTests::InWhatOrder m_currentSortOrder;
+        mutable std::vector<TestCase> m_sortedFunctions;
+        size_t m_unnamedCount;
+        std::ios_base::Init m_ostreamInit; // Forces cout/ cerr to be initialised
+    };
+
+    ///////////////////////////////////////////////////////////////////////////
+
+    class FreeFunctionTestCase : public SharedImpl<ITestCase> {
+    public:
+
+        FreeFunctionTestCase( TestFunction fun ) : m_fun( fun ) {}
+
+        virtual void invoke() const {
+            m_fun();
         }
 
     private:
-        IMutableContext& m_context;
-        RunningTest* m_runningTest;
-        ResultInfoBuilder m_currentResult;
-        ResultInfo m_lastResult;
-
-        const Config& m_config;
-        Totals m_totals;
-        Ptr<IReporter> m_reporter;
-        std::vector<ScopedInfo*> m_scopedInfos;
-        std::vector<ResultInfo> m_info;
-        IRunner* m_prevRunner;
-        IResultCapture* m_prevResultCapture;
-    };
+        virtual ~FreeFunctionTestCase();
+
+        TestFunction m_fun;
+    };
+
+    inline std::string extractClassName( std::string const& classOrQualifiedMethodName ) {
+        std::string className = classOrQualifiedMethodName;
+        if( startsWith( className, "&" ) )
+        {
+            std::size_t lastColons = className.rfind( "::" );
+            std::size_t penultimateColons = className.rfind( "::", lastColons-1 );
+            if( penultimateColons == std::string::npos )
+                penultimateColons = 1;
+            className = className.substr( penultimateColons, lastColons-penultimateColons );
+        }
+        return className;
+    }
+
+    void registerTestCase
+        (   ITestCase* testCase,
+            char const* classOrQualifiedMethodName,
+            NameAndDesc const& nameAndDesc,
+            SourceLineInfo const& lineInfo ) {
+
+        getMutableRegistryHub().registerTest
+            ( makeTestCase
+                (   testCase,
+                    extractClassName( classOrQualifiedMethodName ),
+                    nameAndDesc.name,
+                    nameAndDesc.description,
+                    lineInfo ) );
+    }
+    void registerTestCaseFunction
+        (   TestFunction function,
+            SourceLineInfo const& lineInfo,
+            NameAndDesc const& nameAndDesc ) {
+        registerTestCase( new FreeFunctionTestCase( function ), "", nameAndDesc, lineInfo );
+    }
+
+    ///////////////////////////////////////////////////////////////////////////
+
+    AutoReg::AutoReg
+        (   TestFunction function,
+            SourceLineInfo const& lineInfo,
+            NameAndDesc const& nameAndDesc ) {
+        registerTestCaseFunction( function, lineInfo, nameAndDesc );
+    }
+
+    AutoReg::~AutoReg() {}
+
+} // end namespace Catch
+
+// #included from: catch_reporter_registry.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_REGISTRY_HPP_INCLUDED
+
+#include <map>
+
+namespace Catch {
+
+    class ReporterRegistry : public IReporterRegistry {
+
+    public:
+
+        virtual ~ReporterRegistry() CATCH_OVERRIDE {}
+
+        virtual IStreamingReporter* create( std::string const& name, Ptr<IConfig const> const& config ) const CATCH_OVERRIDE {
+            FactoryMap::const_iterator it =  m_factories.find( name );
+            if( it == m_factories.end() )
+                return CATCH_NULL;
+            return it->second->create( ReporterConfig( config ) );
+        }
+
+        void registerReporter( std::string const& name, Ptr<IReporterFactory> const& factory ) {
+            m_factories.insert( std::make_pair( name, factory ) );
+        }
+        void registerListener( Ptr<IReporterFactory> const& factory ) {
+            m_listeners.push_back( factory );
+        }
+
+        virtual FactoryMap const& getFactories() const CATCH_OVERRIDE {
+            return m_factories;
+        }
+        virtual Listeners const& getListeners() const CATCH_OVERRIDE {
+            return m_listeners;
+        }
+
+    private:
+        FactoryMap m_factories;
+        Listeners m_listeners;
+    };
+}
+
+// #included from: catch_exception_translator_registry.hpp
+#define TWOBLUECUBES_CATCH_EXCEPTION_TRANSLATOR_REGISTRY_HPP_INCLUDED
+
+#ifdef __OBJC__
+#import "Foundation/Foundation.h"
+#endif
+
+namespace Catch {
+
+    class ExceptionTranslatorRegistry : public IExceptionTranslatorRegistry {
+    public:
+        ~ExceptionTranslatorRegistry() {
+            deleteAll( m_translators );
+        }
+
+        virtual void registerTranslator( const IExceptionTranslator* translator ) {
+            m_translators.push_back( translator );
+        }
+
+        virtual std::string translateActiveException() const {
+            try {
+#ifdef __OBJC__
+                // In Objective-C try objective-c exceptions first
+                @try {
+                    return tryTranslators();
+                }
+                @catch (NSException *exception) {
+                    return Catch::toString( [exception description] );
+                }
+#else
+                return tryTranslators();
+#endif
+            }
+            catch( TestFailureException& ) {
+                throw;
+            }
+            catch( std::exception& ex ) {
+                return ex.what();
+            }
+            catch( std::string& msg ) {
+                return msg;
+            }
+            catch( const char* msg ) {
+                return msg;
+            }
+            catch(...) {
+                return "Unknown exception";
+            }
+        }
+
+        std::string tryTranslators() const {
+            if( m_translators.empty() )
+                throw;
+            else
+                return m_translators[0]->translate( m_translators.begin()+1, m_translators.end() );
+        }
+
+    private:
+        std::vector<const IExceptionTranslator*> m_translators;
+    };
+}
+
+namespace Catch {
+
+    namespace {
+
+        class RegistryHub : public IRegistryHub, public IMutableRegistryHub {
+
+            RegistryHub( RegistryHub const& );
+            void operator=( RegistryHub const& );
+
+        public: // IRegistryHub
+            RegistryHub() {
+            }
+            virtual IReporterRegistry const& getReporterRegistry() const CATCH_OVERRIDE {
+                return m_reporterRegistry;
+            }
+            virtual ITestCaseRegistry const& getTestCaseRegistry() const CATCH_OVERRIDE {
+                return m_testCaseRegistry;
+            }
+            virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() CATCH_OVERRIDE {
+                return m_exceptionTranslatorRegistry;
+            }
+
+        public: // IMutableRegistryHub
+            virtual void registerReporter( std::string const& name, Ptr<IReporterFactory> const& factory ) CATCH_OVERRIDE {
+                m_reporterRegistry.registerReporter( name, factory );
+            }
+            virtual void registerListener( Ptr<IReporterFactory> const& factory ) CATCH_OVERRIDE {
+                m_reporterRegistry.registerListener( factory );
+            }
+            virtual void registerTest( TestCase const& testInfo ) CATCH_OVERRIDE {
+                m_testCaseRegistry.registerTest( testInfo );
+            }
+            virtual void registerTranslator( const IExceptionTranslator* translator ) CATCH_OVERRIDE {
+                m_exceptionTranslatorRegistry.registerTranslator( translator );
+            }
+
+        private:
+            TestRegistry m_testCaseRegistry;
+            ReporterRegistry m_reporterRegistry;
+            ExceptionTranslatorRegistry m_exceptionTranslatorRegistry;
+        };
+
+        // Single, global, instance
+        inline RegistryHub*& getTheRegistryHub() {
+            static RegistryHub* theRegistryHub = CATCH_NULL;
+            if( !theRegistryHub )
+                theRegistryHub = new RegistryHub();
+            return theRegistryHub;
+        }
+    }
+
+    IRegistryHub& getRegistryHub() {
+        return *getTheRegistryHub();
+    }
+    IMutableRegistryHub& getMutableRegistryHub() {
+        return *getTheRegistryHub();
+    }
+    void cleanUp() {
+        delete getTheRegistryHub();
+        getTheRegistryHub() = CATCH_NULL;
+        cleanUpContext();
+    }
+    std::string translateActiveException() {
+        return getRegistryHub().getExceptionTranslatorRegistry().translateActiveException();
+    }
+
+} // end namespace Catch
+
+// #included from: catch_notimplemented_exception.hpp
+#define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_HPP_INCLUDED
+
+#include <ostream>
+
+namespace Catch {
+
+    NotImplementedException::NotImplementedException( SourceLineInfo const& lineInfo )
+    :   m_lineInfo( lineInfo ) {
+        std::ostringstream oss;
+        oss << lineInfo << ": function ";
+        oss << "not implemented";
+        m_what = oss.str();
+    }
+
+    const char* NotImplementedException::what() const CATCH_NOEXCEPT {
+        return m_what.c_str();
+    }
+
+} // end namespace Catch
+
+// #included from: catch_context_impl.hpp
+#define TWOBLUECUBES_CATCH_CONTEXT_IMPL_HPP_INCLUDED
+
+// #included from: catch_stream.hpp
+#define TWOBLUECUBES_CATCH_STREAM_HPP_INCLUDED
+
+#include <stdexcept>
+#include <cstdio>
+#include <iostream>
+
+namespace Catch {
+
+    template<typename WriterF, size_t bufferSize=256>
+    class StreamBufImpl : public StreamBufBase {
+        char data[bufferSize];
+        WriterF m_writer;
+
+    public:
+        StreamBufImpl() {
+            setp( data, data + sizeof(data) );
+        }
+
+        ~StreamBufImpl() CATCH_NOEXCEPT {
+            sync();
+        }
+
+    private:
+        int overflow( int c ) {
+            sync();
+
+            if( c != EOF ) {
+                if( pbase() == epptr() )
+                    m_writer( std::string( 1, static_cast<char>( c ) ) );
+                else
+                    sputc( static_cast<char>( c ) );
+            }
+            return 0;
+        }
+
+        int sync() {
+            if( pbase() != pptr() ) {
+                m_writer( std::string( pbase(), static_cast<std::string::size_type>( pptr() - pbase() ) ) );
+                setp( pbase(), epptr() );
+            }
+            return 0;
+        }
+    };
+
+    ///////////////////////////////////////////////////////////////////////////
+
+    FileStream::FileStream( std::string const& filename ) {
+        m_ofs.open( filename.c_str() );
+        if( m_ofs.fail() ) {
+            std::ostringstream oss;
+            oss << "Unable to open file: '" << filename << "'";
+            throw std::domain_error( oss.str() );
+        }
+    }
+
+    std::ostream& FileStream::stream() const {
+        return m_ofs;
+    }
+
+    struct OutputDebugWriter {
+
+        void operator()( std::string const&str ) {
+            writeToDebugConsole( str );
+        }
+    };
+
+    DebugOutStream::DebugOutStream()
+    :   m_streamBuf( new StreamBufImpl<OutputDebugWriter>() ),
+        m_os( m_streamBuf.get() )
+    {}
+
+    std::ostream& DebugOutStream::stream() const {
+        return m_os;
+    }
+
+    // Store the streambuf from cout up-front because
+    // cout may get redirected when running tests
+    CoutStream::CoutStream()
+    :   m_os( Catch::cout().rdbuf() )
+    {}
+
+    std::ostream& CoutStream::stream() const {
+        return m_os;
+    }
+
+#ifndef CATCH_CONFIG_NOSTDOUT // If you #define this you must implement these functions
+    std::ostream& cout() {
+        return std::cout;
+    }
+    std::ostream& cerr() {
+        return std::cerr;
+    }
+#endif
+}
+
+namespace Catch {
+
+    class Context : public IMutableContext {
+
+        Context() : m_config( CATCH_NULL ), m_runner( CATCH_NULL ), m_resultCapture( CATCH_NULL ) {}
+        Context( Context const& );
+        void operator=( Context const& );
+
+    public: // IContext
+        virtual IResultCapture* getResultCapture() {
+            return m_resultCapture;
+        }
+        virtual IRunner* getRunner() {
+            return m_runner;
+        }
+        virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) {
+            return getGeneratorsForCurrentTest()
+            .getGeneratorInfo( fileInfo, totalSize )
+            .getCurrentIndex();
+        }
+        virtual bool advanceGeneratorsForCurrentTest() {
+            IGeneratorsForTest* generators = findGeneratorsForCurrentTest();
+            return generators && generators->moveNext();
+        }
+
+        virtual Ptr<IConfig const> getConfig() const {
+            return m_config;
+        }
+
+    public: // IMutableContext
+        virtual void setResultCapture( IResultCapture* resultCapture ) {
+            m_resultCapture = resultCapture;
+        }
+        virtual void setRunner( IRunner* runner ) {
+            m_runner = runner;
+        }
+        virtual void setConfig( Ptr<IConfig const> const& config ) {
+            m_config = config;
+        }
+
+        friend IMutableContext& getCurrentMutableContext();
+
+    private:
+        IGeneratorsForTest* findGeneratorsForCurrentTest() {
+            std::string testName = getResultCapture()->getCurrentTestName();
+
+            std::map<std::string, IGeneratorsForTest*>::const_iterator it =
+                m_generatorsByTestName.find( testName );
+            return it != m_generatorsByTestName.end()
+                ? it->second
+                : CATCH_NULL;
+        }
+
+        IGeneratorsForTest& getGeneratorsForCurrentTest() {
+            IGeneratorsForTest* generators = findGeneratorsForCurrentTest();
+            if( !generators ) {
+                std::string testName = getResultCapture()->getCurrentTestName();
+                generators = createGeneratorsForTest();
+                m_generatorsByTestName.insert( std::make_pair( testName, generators ) );
+            }
+            return *generators;
+        }
+
+    private:
+        Ptr<IConfig const> m_config;
+        IRunner* m_runner;
+        IResultCapture* m_resultCapture;
+        std::map<std::string, IGeneratorsForTest*> m_generatorsByTestName;
+    };
+
+    namespace {
+        Context* currentContext = CATCH_NULL;
+    }
+    IMutableContext& getCurrentMutableContext() {
+        if( !currentContext )
+            currentContext = new Context();
+        return *currentContext;
+    }
+    IContext& getCurrentContext() {
+        return getCurrentMutableContext();
+    }
+
+    void cleanUpContext() {
+        delete currentContext;
+        currentContext = CATCH_NULL;
+    }
+}
+
+// #included from: catch_console_colour_impl.hpp
+#define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_IMPL_HPP_INCLUDED
+
+namespace Catch {
+    namespace {
+
+        struct IColourImpl {
+            virtual ~IColourImpl() {}
+            virtual void use( Colour::Code _colourCode ) = 0;
+        };
+
+        struct NoColourImpl : IColourImpl {
+            void use( Colour::Code ) {}
+
+            static IColourImpl* instance() {
+                static NoColourImpl s_instance;
+                return &s_instance;
+            }
+        };
+
+    } // anon namespace
+} // namespace Catch
+
+#if !defined( CATCH_CONFIG_COLOUR_NONE ) && !defined( CATCH_CONFIG_COLOUR_WINDOWS ) && !defined( CATCH_CONFIG_COLOUR_ANSI )
+#   ifdef CATCH_PLATFORM_WINDOWS
+#       define CATCH_CONFIG_COLOUR_WINDOWS
+#   else
+#       define CATCH_CONFIG_COLOUR_ANSI
+#   endif
+#endif
+
+#if defined ( CATCH_CONFIG_COLOUR_WINDOWS ) /////////////////////////////////////////
+
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+
+#ifdef __AFXDLL
+#include <AfxWin.h>
+#else
+#include <windows.h>
+#endif
+
+namespace Catch {
+namespace {
+
+    class Win32ColourImpl : public IColourImpl {
+    public:
+        Win32ColourImpl() : stdoutHandle( GetStdHandle(STD_OUTPUT_HANDLE) )
+        {
+            CONSOLE_SCREEN_BUFFER_INFO csbiInfo;
+            GetConsoleScreenBufferInfo( stdoutHandle, &csbiInfo );
+            originalForegroundAttributes = csbiInfo.wAttributes & ~( BACKGROUND_GREEN | BACKGROUND_RED | BACKGROUND_BLUE | BACKGROUND_INTENSITY );
+            originalBackgroundAttributes = csbiInfo.wAttributes & ~( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY );
+        }
+
+        virtual void use( Colour::Code _colourCode ) {
+            switch( _colourCode ) {
+                case Colour::None:      return setTextAttribute( originalForegroundAttributes );
+                case Colour::White:     return setTextAttribute( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE );
+                case Colour::Red:       return setTextAttribute( FOREGROUND_RED );
+                case Colour::Green:     return setTextAttribute( FOREGROUND_GREEN );
+                case Colour::Blue:      return setTextAttribute( FOREGROUND_BLUE );
+                case Colour::Cyan:      return setTextAttribute( FOREGROUND_BLUE | FOREGROUND_GREEN );
+                case Colour::Yellow:    return setTextAttribute( FOREGROUND_RED | FOREGROUND_GREEN );
+                case Colour::Grey:      return setTextAttribute( 0 );
+
+                case Colour::LightGrey:     return setTextAttribute( FOREGROUND_INTENSITY );
+                case Colour::BrightRed:     return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_RED );
+                case Colour::BrightGreen:   return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN );
+                case Colour::BrightWhite:   return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE );
+
+                case Colour::Bright: throw std::logic_error( "not a colour" );
+            }
+        }
+
+    private:
+        void setTextAttribute( WORD _textAttribute ) {
+            SetConsoleTextAttribute( stdoutHandle, _textAttribute | originalBackgroundAttributes );
+        }
+        HANDLE stdoutHandle;
+        WORD originalForegroundAttributes;
+        WORD originalBackgroundAttributes;
+    };
+
+    IColourImpl* platformColourInstance() {
+        static Win32ColourImpl s_instance;
+
+        Ptr<IConfig const> config = getCurrentContext().getConfig();
+        UseColour::YesOrNo colourMode = config
+            ? config->useColour()
+            : UseColour::Auto;
+        if( colourMode == UseColour::Auto )
+            colourMode = !isDebuggerActive()
+                ? UseColour::Yes
+                : UseColour::No;
+        return colourMode == UseColour::Yes
+            ? &s_instance
+            : NoColourImpl::instance();
+    }
+
+} // end anon namespace
+} // end namespace Catch
+
+#elif defined( CATCH_CONFIG_COLOUR_ANSI ) //////////////////////////////////////
+
+#include <unistd.h>
+
+namespace Catch {
+namespace {
+
+    // use POSIX/ ANSI console terminal codes
+    // Thanks to Adam Strzelecki for original contribution
+    // (http://github.com/nanoant)
+    // https://github.com/philsquared/Catch/pull/131
+    class PosixColourImpl : public IColourImpl {
+    public:
+        virtual void use( Colour::Code _colourCode ) {
+            switch( _colourCode ) {
+                case Colour::None:
+                case Colour::White:     return setColour( "[0m" );
+                case Colour::Red:       return setColour( "[0;31m" );
+                case Colour::Green:     return setColour( "[0;32m" );
+                case Colour::Blue:      return setColour( "[0:34m" );
+                case Colour::Cyan:      return setColour( "[0;36m" );
+                case Colour::Yellow:    return setColour( "[0;33m" );
+                case Colour::Grey:      return setColour( "[1;30m" );
+
+                case Colour::LightGrey:     return setColour( "[0;37m" );
+                case Colour::BrightRed:     return setColour( "[1;31m" );
+                case Colour::BrightGreen:   return setColour( "[1;32m" );
+                case Colour::BrightWhite:   return setColour( "[1;37m" );
+
+                case Colour::Bright: throw std::logic_error( "not a colour" );
+            }
+        }
+        static IColourImpl* instance() {
+            static PosixColourImpl s_instance;
+            return &s_instance;
+        }
+
+    private:
+        void setColour( const char* _escapeCode ) {
+            Catch::cout() << '\033' << _escapeCode;
+        }
+    };
+
+    IColourImpl* platformColourInstance() {
+        Ptr<IConfig const> config = getCurrentContext().getConfig();
+        UseColour::YesOrNo colourMode = config
+            ? config->useColour()
+            : UseColour::Auto;
+        if( colourMode == UseColour::Auto )
+            colourMode = (!isDebuggerActive() && isatty(STDOUT_FILENO) )
+                ? UseColour::Yes
+                : UseColour::No;
+        return colourMode == UseColour::Yes
+            ? PosixColourImpl::instance()
+            : NoColourImpl::instance();
+    }
+
+} // end anon namespace
+} // end namespace Catch
+
+#else  // not Windows or ANSI ///////////////////////////////////////////////
+
+namespace Catch {
+
+    static IColourImpl* platformColourInstance() { return NoColourImpl::instance(); }
+
+} // end namespace Catch
+
+#endif // Windows/ ANSI/ None
+
+namespace Catch {
+
+    Colour::Colour( Code _colourCode ) : m_moved( false ) { use( _colourCode ); }
+    Colour::Colour( Colour const& _other ) : m_moved( false ) { const_cast<Colour&>( _other ).m_moved = true; }
+    Colour::~Colour(){ if( !m_moved ) use( None ); }
+
+    void Colour::use( Code _colourCode ) {
+        static IColourImpl* impl = platformColourInstance();
+        impl->use( _colourCode );
+    }
 
 } // end namespace Catch
 
 // #included from: catch_generators_impl.hpp
+#define TWOBLUECUBES_CATCH_GENERATORS_IMPL_HPP_INCLUDED
 
 #include <vector>
 #include <string>
@@ -3060,7 +7209,7 @@
 
 namespace Catch {
 
-    struct GeneratorInfo {
+    struct GeneratorInfo : IGeneratorInfo {
 
         GeneratorInfo( std::size_t size )
         :   m_size( size ),
@@ -3085,17 +7234,17 @@
 
     ///////////////////////////////////////////////////////////////////////////
 
-    class GeneratorsForTest {
+    class GeneratorsForTest : public IGeneratorsForTest {
 
     public:
         ~GeneratorsForTest() {
             deleteAll( m_generatorsInOrder );
         }
 
-        GeneratorInfo& getGeneratorInfo( const std::string& fileInfo, std::size_t size ) {
-            std::map<std::string, GeneratorInfo*>::const_iterator it = m_generatorsByName.find( fileInfo );
+        IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) {
+            std::map<std::string, IGeneratorInfo*>::const_iterator it = m_generatorsByName.find( fileInfo );
             if( it == m_generatorsByName.end() ) {
-                GeneratorInfo* info = new GeneratorInfo( size );
+                IGeneratorInfo* info = new GeneratorInfo( size );
                 m_generatorsByName.insert( std::make_pair( fileInfo, info ) );
                 m_generatorsInOrder.push_back( info );
                 return *info;
@@ -3104,8 +7253,8 @@
         }
 
         bool moveNext() {
-            std::vector<GeneratorInfo*>::const_iterator it = m_generatorsInOrder.begin();
-            std::vector<GeneratorInfo*>::const_iterator itEnd = m_generatorsInOrder.end();
+            std::vector<IGeneratorInfo*>::const_iterator it = m_generatorsInOrder.begin();
+            std::vector<IGeneratorInfo*>::const_iterator itEnd = m_generatorsInOrder.end();
             for(; it != itEnd; ++it ) {
                 if( (*it)->moveNext() )
                     return true;
@@ -3114,623 +7263,1603 @@
         }
 
     private:
-        std::map<std::string, GeneratorInfo*> m_generatorsByName;
-        std::vector<GeneratorInfo*> m_generatorsInOrder;
-    };
+        std::map<std::string, IGeneratorInfo*> m_generatorsByName;
+        std::vector<IGeneratorInfo*> m_generatorsInOrder;
+    };
+
+    IGeneratorsForTest* createGeneratorsForTest()
+    {
+        return new GeneratorsForTest();
+    }
 
 } // end namespace Catch
 
-#define INTERNAL_CATCH_LINESTR2( line ) #line
-#define INTERNAL_CATCH_LINESTR( line ) INTERNAL_CATCH_LINESTR2( line )
-
-#define INTERNAL_CATCH_GENERATE( expr ) expr.setFileInfo( __FILE__ "(" INTERNAL_CATCH_LINESTR( __LINE__ ) ")" )
-
-// #included from: catch_console_colour_impl.hpp
-
-// #included from: catch_console_colour.hpp
-
-namespace Catch {
-
-    struct ConsoleColourImpl;
-
-    class TextColour : NonCopyable {
-    public:
-
-        enum Colours {
-            None,
-
-            FileName,
-            ResultError,
-            ResultSuccess,
-
-            Error,
-            Success,
-
-            OriginalExpression,
-            ReconstructedExpression
-        };
-
-        TextColour( Colours colour = None );
-        void set( Colours colour );
-        ~TextColour();
-
-    private:
-        ConsoleColourImpl* m_impl;
-    };
-
-} // end namespace Catch
-
-#ifdef CATCH_PLATFORM_WINDOWS
-
-#include <windows.h>
+// #included from: catch_assertionresult.hpp
+#define TWOBLUECUBES_CATCH_ASSERTIONRESULT_HPP_INCLUDED
 
 namespace Catch {
 
-    namespace {
-
-        WORD mapConsoleColour( TextColour::Colours colour ) {
-            switch( colour ) {
-                case TextColour::FileName:
-                    return FOREGROUND_INTENSITY;                    // greyed out
-                case TextColour::ResultError:
-                    return FOREGROUND_RED | FOREGROUND_INTENSITY;   // bright red
-                case TextColour::ResultSuccess:
-                    return FOREGROUND_GREEN | FOREGROUND_INTENSITY; // bright green
-                case TextColour::Error:
-                    return FOREGROUND_RED;                          // dark red
-                case TextColour::Success:
-                    return FOREGROUND_GREEN;                        // dark green
-                case TextColour::OriginalExpression:
-                    return FOREGROUND_BLUE | FOREGROUND_GREEN;      // turquoise
-                case TextColour::ReconstructedExpression:
-                    return FOREGROUND_RED | FOREGROUND_GREEN;       // greeny-yellow
-                default: return 0;
-            }
-        }
-    }
-
-    struct ConsoleColourImpl {
-
-        ConsoleColourImpl()
-        :   hStdout( GetStdHandle(STD_OUTPUT_HANDLE) ),
-            wOldColorAttrs( 0 )
-        {
-            GetConsoleScreenBufferInfo( hStdout, &csbiInfo );
-            wOldColorAttrs = csbiInfo.wAttributes;
-        }
-
-        ~ConsoleColourImpl() {
-            SetConsoleTextAttribute( hStdout, wOldColorAttrs );
-        }
-
-        void set( TextColour::Colours colour ) {
-            WORD consoleColour = mapConsoleColour( colour );
-            if( consoleColour > 0 )
-                SetConsoleTextAttribute( hStdout, consoleColour );
-        }
-
-        HANDLE hStdout;
-        CONSOLE_SCREEN_BUFFER_INFO csbiInfo;
-        WORD wOldColorAttrs;
-    };
-
-    TextColour::TextColour( Colours colour )
-    : m_impl( new ConsoleColourImpl() )
-    {
-        if( colour )
-            m_impl->set( colour );
-    }
-
-    TextColour::~TextColour() {
-        delete m_impl;
-    }
-
-    void TextColour::set( Colours colour ) {
-        m_impl->set( colour );
+    AssertionInfo::AssertionInfo(   std::string const& _macroName,
+                                    SourceLineInfo const& _lineInfo,
+                                    std::string const& _capturedExpression,
+                                    ResultDisposition::Flags _resultDisposition )
+    :   macroName( _macroName ),
+        lineInfo( _lineInfo ),
+        capturedExpression( _capturedExpression ),
+        resultDisposition( _resultDisposition )
+    {}
+
+    AssertionResult::AssertionResult() {}
+
+    AssertionResult::AssertionResult( AssertionInfo const& info, AssertionResultData const& data )
+    :   m_info( info ),
+        m_resultData( data )
+    {}
+
+    AssertionResult::~AssertionResult() {}
+
+    // Result was a success
+    bool AssertionResult::succeeded() const {
+        return Catch::isOk( m_resultData.resultType );
+    }
+
+    // Result was a success, or failure is suppressed
+    bool AssertionResult::isOk() const {
+        return Catch::isOk( m_resultData.resultType ) || shouldSuppressFailure( m_info.resultDisposition );
+    }
+
+    ResultWas::OfType AssertionResult::getResultType() const {
+        return m_resultData.resultType;
+    }
+
+    bool AssertionResult::hasExpression() const {
+        return !m_info.capturedExpression.empty();
+    }
+
+    bool AssertionResult::hasMessage() const {
+        return !m_resultData.message.empty();
+    }
+
+    std::string AssertionResult::getExpression() const {
+        if( isFalseTest( m_info.resultDisposition ) )
+            return "!" + m_info.capturedExpression;
+        else
+            return m_info.capturedExpression;
+    }
+    std::string AssertionResult::getExpressionInMacro() const {
+        if( m_info.macroName.empty() )
+            return m_info.capturedExpression;
+        else
+            return m_info.macroName + "( " + m_info.capturedExpression + " )";
+    }
+
+    bool AssertionResult::hasExpandedExpression() const {
+        return hasExpression() && getExpandedExpression() != getExpression();
+    }
+
+    std::string AssertionResult::getExpandedExpression() const {
+        return m_resultData.reconstructedExpression;
+    }
+
+    std::string AssertionResult::getMessage() const {
+        return m_resultData.message;
+    }
+    SourceLineInfo AssertionResult::getSourceInfo() const {
+        return m_info.lineInfo;
+    }
+
+    std::string AssertionResult::getTestMacroName() const {
+        return m_info.macroName;
     }
 
 } // end namespace Catch
 
-#else
-
-namespace Catch {
-    TextColour::TextColour( Colours ){}
-    TextColour::~TextColour(){}
-    void TextColour::set( Colours ){}
-
-} // end namespace Catch
-
-#endif
-
-
-// #included from: catch_exception_translator_registry.hpp
-
-#ifdef __OBJC__
-#import "Foundation/Foundation.h"
-#endif
+// #included from: catch_test_case_info.hpp
+#define TWOBLUECUBES_CATCH_TEST_CASE_INFO_HPP_INCLUDED
 
 namespace Catch {
 
-    class ExceptionTranslatorRegistry : public IExceptionTranslatorRegistry {
-
-        ~ExceptionTranslatorRegistry() {
-            deleteAll( m_translators );
-        }
-
-        virtual void registerTranslator( IExceptionTranslator* translator ) {
-            m_translators.push_back( translator );
-        }
-
-        virtual std::string translateActiveException() const {
-            try {
-#ifdef __OBJC__
-                // In Objective-C try objective-c exceptions first
-                @try {
-                    throw;
-                }
-                @catch (NSException *exception) {
-                    return toString( [exception description] );
+    inline TestCaseInfo::SpecialProperties parseSpecialTag( std::string const& tag ) {
+        if( startsWith( tag, "." ) ||
+            tag == "hide" ||
+            tag == "!hide" )
+            return TestCaseInfo::IsHidden;
+        else if( tag == "!throws" )
+            return TestCaseInfo::Throws;
+        else if( tag == "!shouldfail" )
+            return TestCaseInfo::ShouldFail;
+        else if( tag == "!mayfail" )
+            return TestCaseInfo::MayFail;
+        else
+            return TestCaseInfo::None;
+    }
+    inline bool isReservedTag( std::string const& tag ) {
+        return parseSpecialTag( tag ) == TestCaseInfo::None && tag.size() > 0 && !isalnum( tag[0] );
+    }
+    inline void enforceNotReservedTag( std::string const& tag, SourceLineInfo const& _lineInfo ) {
+        if( isReservedTag( tag ) ) {
+            {
+                Colour colourGuard( Colour::Red );
+                Catch::cerr()
+                    << "Tag name [" << tag << "] not allowed.\n"
+                    << "Tag names starting with non alpha-numeric characters are reserved\n";
+            }
+            {
+                Colour colourGuard( Colour::FileName );
+                Catch::cerr() << _lineInfo << std::endl;
+            }
+            exit(1);
+        }
+    }
+
+    TestCase makeTestCase(  ITestCase* _testCase,
+                            std::string const& _className,
+                            std::string const& _name,
+                            std::string const& _descOrTags,
+                            SourceLineInfo const& _lineInfo )
+    {
+        bool isHidden( startsWith( _name, "./" ) ); // Legacy support
+
+        // Parse out tags
+        std::set<std::string> tags;
+        std::string desc, tag;
+        bool inTag = false;
+        for( std::size_t i = 0; i < _descOrTags.size(); ++i ) {
+            char c = _descOrTags[i];
+            if( !inTag ) {
+                if( c == '[' )
+                    inTag = true;
+                else
+                    desc += c;
+            }
+            else {
+                if( c == ']' ) {
+                    TestCaseInfo::SpecialProperties prop = parseSpecialTag( tag );
+                    if( prop == TestCaseInfo::IsHidden )
+                        isHidden = true;
+                    else if( prop == TestCaseInfo::None )
+                        enforceNotReservedTag( tag, _lineInfo );
+
+                    tags.insert( tag );
+                    tag.clear();
+                    inTag = false;
                 }
-#else
-                throw;
-#endif
-            }
-            catch( std::exception& ex ) {
-                return ex.what();
-            }
-            catch( std::string& msg ) {
-                return msg;
-            }
-            catch( const char* msg ) {
-                return msg;
-            }
-            catch(...) {
-                return tryTranslators( m_translators.begin() );
-            }
-        }
-
-        std::string tryTranslators( std::vector<IExceptionTranslator*>::const_iterator it ) const {
-            if( it == m_translators.end() )
-                return "Unknown exception";
-
-            try {
-                return (*it)->translate();
-            }
-            catch(...) {
-                return tryTranslators( it+1 );
-            }
-        }
-
-    private:
-        std::vector<IExceptionTranslator*> m_translators;
-    };
-}
-
-// #included from: catch_reporter_registry.hpp
-
-#include <map>
+                else
+                    tag += c;
+            }
+        }
+        if( isHidden ) {
+            tags.insert( "hide" );
+            tags.insert( "." );
+        }
+
+        TestCaseInfo info( _name, _className, desc, tags, _lineInfo );
+        return TestCase( _testCase, info );
+    }
+
+    void setTags( TestCaseInfo& testCaseInfo, std::set<std::string> const& tags )
+    {
+        testCaseInfo.tags = tags;
+        testCaseInfo.lcaseTags.clear();
+
+        std::ostringstream oss;
+        for( std::set<std::string>::const_iterator it = tags.begin(), itEnd = tags.end(); it != itEnd; ++it ) {
+            oss << "[" << *it << "]";
+            std::string lcaseTag = toLower( *it );
+            testCaseInfo.properties = static_cast<TestCaseInfo::SpecialProperties>( testCaseInfo.properties | parseSpecialTag( lcaseTag ) );
+            testCaseInfo.lcaseTags.insert( lcaseTag );
+        }
+        testCaseInfo.tagsAsString = oss.str();
+    }
+
+    TestCaseInfo::TestCaseInfo( std::string const& _name,
+                                std::string const& _className,
+                                std::string const& _description,
+                                std::set<std::string> const& _tags,
+                                SourceLineInfo const& _lineInfo )
+    :   name( _name ),
+        className( _className ),
+        description( _description ),
+        lineInfo( _lineInfo ),
+        properties( None )
+    {
+        setTags( *this, _tags );
+    }
+
+    TestCaseInfo::TestCaseInfo( TestCaseInfo const& other )
+    :   name( other.name ),
+        className( other.className ),
+        description( other.description ),
+        tags( other.tags ),
+        lcaseTags( other.lcaseTags ),
+        tagsAsString( other.tagsAsString ),
+        lineInfo( other.lineInfo ),
+        properties( other.properties )
+    {}
+
+    bool TestCaseInfo::isHidden() const {
+        return ( properties & IsHidden ) != 0;
+    }
+    bool TestCaseInfo::throws() const {
+        return ( properties & Throws ) != 0;
+    }
+    bool TestCaseInfo::okToFail() const {
+        return ( properties & (ShouldFail | MayFail ) ) != 0;
+    }
+    bool TestCaseInfo::expectedToFail() const {
+        return ( properties & (ShouldFail ) ) != 0;
+    }
+
+    TestCase::TestCase( ITestCase* testCase, TestCaseInfo const& info ) : TestCaseInfo( info ), test( testCase ) {}
+
+    TestCase::TestCase( TestCase const& other )
+    :   TestCaseInfo( other ),
+        test( other.test )
+    {}
+
+    TestCase TestCase::withName( std::string const& _newName ) const {
+        TestCase other( *this );
+        other.name = _newName;
+        return other;
+    }
+
+    void TestCase::swap( TestCase& other ) {
+        test.swap( other.test );
+        name.swap( other.name );
+        className.swap( other.className );
+        description.swap( other.description );
+        tags.swap( other.tags );
+        lcaseTags.swap( other.lcaseTags );
+        tagsAsString.swap( other.tagsAsString );
+        std::swap( TestCaseInfo::properties, static_cast<TestCaseInfo&>( other ).properties );
+        std::swap( lineInfo, other.lineInfo );
+    }
+
+    void TestCase::invoke() const {
+        test->invoke();
+    }
+
+    bool TestCase::operator == ( TestCase const& other ) const {
+        return  test.get() == other.test.get() &&
+                name == other.name &&
+                className == other.className;
+    }
+
+    bool TestCase::operator < ( TestCase const& other ) const {
+        return name < other.name;
+    }
+    TestCase& TestCase::operator = ( TestCase const& other ) {
+        TestCase temp( other );
+        swap( temp );
+        return *this;
+    }
+
+    TestCaseInfo const& TestCase::getTestCaseInfo() const
+    {
+        return *this;
+    }
+
+} // end namespace Catch
+
+// #included from: catch_version.hpp
+#define TWOBLUECUBES_CATCH_VERSION_HPP_INCLUDED
 
 namespace Catch {
 
-    class ReporterRegistry : public IReporterRegistry {
-
-    public:
-
-        ~ReporterRegistry() {
-            deleteAllValues( m_factories );
-        }
-
-        virtual IReporter* create( const std::string& name, const IReporterConfig& config ) const {
-            FactoryMap::const_iterator it =  m_factories.find( name );
-            if( it == m_factories.end() )
-                return NULL;
-            return it->second->create( config );
-        }
-
-        void registerReporter( const std::string& name, IReporterFactory* factory ) {
-            m_factories.insert( std::make_pair( name, factory ) );
-        }
-
-        const FactoryMap& getFactories() const {
-            return m_factories;
-        }
-
-    private:
-        FactoryMap m_factories;
-    };
+    Version::Version
+        (   unsigned int _majorVersion,
+            unsigned int _minorVersion,
+            unsigned int _patchNumber,
+            std::string const& _branchName,
+            unsigned int _buildNumber )
+    :   majorVersion( _majorVersion ),
+        minorVersion( _minorVersion ),
+        patchNumber( _patchNumber ),
+        branchName( _branchName ),
+        buildNumber( _buildNumber )
+    {}
+
+    std::ostream& operator << ( std::ostream& os, Version const& version ) {
+        os  << version.majorVersion << "."
+            << version.minorVersion << "."
+            << version.patchNumber;
+
+        if( !version.branchName.empty() ) {
+            os  << "-" << version.branchName
+                << "." << version.buildNumber;
+        }
+        return os;
+    }
+
+    Version libraryVersion( 1, 5, 6, "", 0 );
+
 }
 
-// #included from: catch_stream.hpp
-
-#include <stdexcept>
-#include <cstdio>
+// #included from: catch_message.hpp
+#define TWOBLUECUBES_CATCH_MESSAGE_HPP_INCLUDED
 
 namespace Catch {
 
-    template<typename WriterF, size_t bufferSize=256>
-    class StreamBufImpl : public StreamBufBase {
-        char data[bufferSize];
-        WriterF m_writer;
-
+    MessageInfo::MessageInfo(   std::string const& _macroName,
+                                SourceLineInfo const& _lineInfo,
+                                ResultWas::OfType _type )
+    :   macroName( _macroName ),
+        lineInfo( _lineInfo ),
+        type( _type ),
+        sequence( ++globalCount )
+    {}
+
+    // This may need protecting if threading support is added
+    unsigned int MessageInfo::globalCount = 0;
+
+    ////////////////////////////////////////////////////////////////////////////
+
+    ScopedMessage::ScopedMessage( MessageBuilder const& builder )
+    : m_info( builder.m_info )
+    {
+        m_info.message = builder.m_stream.str();
+        getResultCapture().pushScopedMessage( m_info );
+    }
+    ScopedMessage::ScopedMessage( ScopedMessage const& other )
+    : m_info( other.m_info )
+    {}
+
+    ScopedMessage::~ScopedMessage() {
+        getResultCapture().popScopedMessage( m_info );
+    }
+
+} // end namespace Catch
+
+// #included from: catch_legacy_reporter_adapter.hpp
+#define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_HPP_INCLUDED
+
+// #included from: catch_legacy_reporter_adapter.h
+#define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_H_INCLUDED
+
+namespace Catch
+{
+    // Deprecated
+    struct IReporter : IShared {
+        virtual ~IReporter();
+
+        virtual bool shouldRedirectStdout() const = 0;
+
+        virtual void StartTesting() = 0;
+        virtual void EndTesting( Totals const& totals ) = 0;
+        virtual void StartGroup( std::string const& groupName ) = 0;
+        virtual void EndGroup( std::string const& groupName, Totals const& totals ) = 0;
+        virtual void StartTestCase( TestCaseInfo const& testInfo ) = 0;
+        virtual void EndTestCase( TestCaseInfo const& testInfo, Totals const& totals, std::string const& stdOut, std::string const& stdErr ) = 0;
+        virtual void StartSection( std::string const& sectionName, std::string const& description ) = 0;
+        virtual void EndSection( std::string const& sectionName, Counts const& assertions ) = 0;
+        virtual void NoAssertionsInSection( std::string const& sectionName ) = 0;
+        virtual void NoAssertionsInTestCase( std::string const& testName ) = 0;
+        virtual void Aborted() = 0;
+        virtual void Result( AssertionResult const& result ) = 0;
+    };
+
+    class LegacyReporterAdapter : public SharedImpl<IStreamingReporter>
+    {
     public:
-        StreamBufImpl() {
-            setp( data, data + sizeof(data) );
-        }
-
-        ~StreamBufImpl() {
-            sync();
-        }
+        LegacyReporterAdapter( Ptr<IReporter> const& legacyReporter );
+        virtual ~LegacyReporterAdapter();
+
+        virtual ReporterPreferences getPreferences() const;
+        virtual void noMatchingTestCases( std::string const& );
+        virtual void testRunStarting( TestRunInfo const& );
+        virtual void testGroupStarting( GroupInfo const& groupInfo );
+        virtual void testCaseStarting( TestCaseInfo const& testInfo );
+        virtual void sectionStarting( SectionInfo const& sectionInfo );
+        virtual void assertionStarting( AssertionInfo const& );
+        virtual bool assertionEnded( AssertionStats const& assertionStats );
+        virtual void sectionEnded( SectionStats const& sectionStats );
+        virtual void testCaseEnded( TestCaseStats const& testCaseStats );
+        virtual void testGroupEnded( TestGroupStats const& testGroupStats );
+        virtual void testRunEnded( TestRunStats const& testRunStats );
+        virtual void skipTest( TestCaseInfo const& );
 
     private:
-        int	overflow( int c ) {
-            sync();
-
-            if( c != EOF ) {
-                if( pbase() == epptr() )
-                    m_writer( std::string( 1, static_cast<char>( c ) ) );
-                else
-                    sputc( static_cast<char>( c ) );
-            }
-            return 0;
-        }
-
-        int	sync() {
-            if( pbase() != pptr() ) {
-                m_writer( std::string( pbase(), static_cast<std::string::size_type>( pptr() - pbase() ) ) );
-                setp( pbase(), epptr() );
-            }
-            return 0;
-        }
-    };
-
-    ///////////////////////////////////////////////////////////////////////////
-
-    struct OutputDebugWriter {
-
-        void operator()( const std::string &str ) {
-            writeToDebugConsole( str );
-        }
+        Ptr<IReporter> m_legacyReporter;
     };
 }
 
+namespace Catch
+{
+    LegacyReporterAdapter::LegacyReporterAdapter( Ptr<IReporter> const& legacyReporter )
+    :   m_legacyReporter( legacyReporter )
+    {}
+    LegacyReporterAdapter::~LegacyReporterAdapter() {}
+
+    ReporterPreferences LegacyReporterAdapter::getPreferences() const {
+        ReporterPreferences prefs;
+        prefs.shouldRedirectStdOut = m_legacyReporter->shouldRedirectStdout();
+        return prefs;
+    }
+
+    void LegacyReporterAdapter::noMatchingTestCases( std::string const& ) {}
+    void LegacyReporterAdapter::testRunStarting( TestRunInfo const& ) {
+        m_legacyReporter->StartTesting();
+    }
+    void LegacyReporterAdapter::testGroupStarting( GroupInfo const& groupInfo ) {
+        m_legacyReporter->StartGroup( groupInfo.name );
+    }
+    void LegacyReporterAdapter::testCaseStarting( TestCaseInfo const& testInfo ) {
+        m_legacyReporter->StartTestCase( testInfo );
+    }
+    void LegacyReporterAdapter::sectionStarting( SectionInfo const& sectionInfo ) {
+        m_legacyReporter->StartSection( sectionInfo.name, sectionInfo.description );
+    }
+    void LegacyReporterAdapter::assertionStarting( AssertionInfo const& ) {
+        // Not on legacy interface
+    }
+
+    bool LegacyReporterAdapter::assertionEnded( AssertionStats const& assertionStats ) {
+        if( assertionStats.assertionResult.getResultType() != ResultWas::Ok ) {
+            for( std::vector<MessageInfo>::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end();
+                    it != itEnd;
+                    ++it ) {
+                if( it->type == ResultWas::Info ) {
+                    ResultBuilder rb( it->macroName.c_str(), it->lineInfo, "", ResultDisposition::Normal );
+                    rb << it->message;
+                    rb.setResultType( ResultWas::Info );
+                    AssertionResult result = rb.build();
+                    m_legacyReporter->Result( result );
+                }
+            }
+        }
+        m_legacyReporter->Result( assertionStats.assertionResult );
+        return true;
+    }
+    void LegacyReporterAdapter::sectionEnded( SectionStats const& sectionStats ) {
+        if( sectionStats.missingAssertions )
+            m_legacyReporter->NoAssertionsInSection( sectionStats.sectionInfo.name );
+        m_legacyReporter->EndSection( sectionStats.sectionInfo.name, sectionStats.assertions );
+    }
+    void LegacyReporterAdapter::testCaseEnded( TestCaseStats const& testCaseStats ) {
+        m_legacyReporter->EndTestCase
+            (   testCaseStats.testInfo,
+                testCaseStats.totals,
+                testCaseStats.stdOut,
+                testCaseStats.stdErr );
+    }
+    void LegacyReporterAdapter::testGroupEnded( TestGroupStats const& testGroupStats ) {
+        if( testGroupStats.aborting )
+            m_legacyReporter->Aborted();
+        m_legacyReporter->EndGroup( testGroupStats.groupInfo.name, testGroupStats.totals );
+    }
+    void LegacyReporterAdapter::testRunEnded( TestRunStats const& testRunStats ) {
+        m_legacyReporter->EndTesting( testRunStats.totals );
+    }
+    void LegacyReporterAdapter::skipTest( TestCaseInfo const& ) {
+    }
+}
+
+// #included from: catch_timer.hpp
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wc++11-long-long"
+#endif
+
+#ifdef CATCH_PLATFORM_WINDOWS
+#include <windows.h>
+#else
+#include <sys/time.h>
+#endif
+
 namespace Catch {
 
     namespace {
-        Context* currentContext = NULL;
-    }
-    IMutableContext& getCurrentMutableContext() {
-        if( !currentContext )
-            currentContext = new Context();
-        return *currentContext;
-    }
-    IContext& getCurrentContext() {
-        return getCurrentMutableContext();
-    }
-
-    Context::Context()
-    :   m_reporterRegistry( new ReporterRegistry ),
-        m_testCaseRegistry( new TestRegistry ),
-        m_exceptionTranslatorRegistry( new ExceptionTranslatorRegistry ),
-        m_config( NULL )
-    {}
-
-    void Context::cleanUp() {
-        delete currentContext;
-        currentContext = NULL;
-    }
-
-    void Context::setRunner( IRunner* runner ) {
-        m_runner = runner;
-    }
-
-    void Context::setResultCapture( IResultCapture* resultCapture ) {
-        m_resultCapture = resultCapture;
-    }
-
-    const IConfig* Context::getConfig() const {
-        return m_config;
-    }
-    void Context::setConfig( const IConfig* config ) {
-        m_config = config;
-    }
-
-    IResultCapture& Context::getResultCapture() {
-        return *m_resultCapture;
-    }
-
-    IRunner& Context::getRunner() {
-        return *m_runner;
-    }
-
-    IReporterRegistry& Context::getReporterRegistry() {
-        return *m_reporterRegistry.get();
-    }
-
-    ITestCaseRegistry& Context::getTestCaseRegistry() {
-        return *m_testCaseRegistry.get();
-    }
-
-    IExceptionTranslatorRegistry& Context::getExceptionTranslatorRegistry() {
-        return *m_exceptionTranslatorRegistry.get();
-    }
-
-    std::streambuf* Context::createStreamBuf( const std::string& streamName ) {
-        if( streamName == "stdout" ) return std::cout.rdbuf();
-        if( streamName == "stderr" ) return std::cerr.rdbuf();
-        if( streamName == "debug" ) return new StreamBufImpl<OutputDebugWriter>;
-
-        throw std::domain_error( "Unknown stream: " + streamName );
-    }
-
-    GeneratorsForTest* Context::findGeneratorsForCurrentTest() {
-        std::string testName = getResultCapture().getCurrentTestName();
-
-        std::map<std::string, GeneratorsForTest*>::const_iterator it =
-            m_generatorsByTestName.find( testName );
-        return it != m_generatorsByTestName.end()
-            ? it->second
-            : NULL;
-    }
-
-    GeneratorsForTest& Context::getGeneratorsForCurrentTest() {
-        GeneratorsForTest* generators = findGeneratorsForCurrentTest();
-        if( !generators ) {
-            std::string testName = getResultCapture().getCurrentTestName();
-            generators = new GeneratorsForTest();
-            m_generatorsByTestName.insert( std::make_pair( testName, generators ) );
-        }
-        return *generators;
-    }
-
-    size_t Context::getGeneratorIndex( const std::string& fileInfo, size_t totalSize ) {
-        return getGeneratorsForCurrentTest()
-            .getGeneratorInfo( fileInfo, totalSize )
-            .getCurrentIndex();
-    }
-
-    bool Context::advanceGeneratorsForCurrentTest() {
-        GeneratorsForTest* generators = findGeneratorsForCurrentTest();
-        return generators && generators->moveNext();
-    }
-}
-// #included from: internal/catch_commandline.hpp
+#ifdef CATCH_PLATFORM_WINDOWS
+        uint64_t getCurrentTicks() {
+            static uint64_t hz=0, hzo=0;
+            if (!hz) {
+                QueryPerformanceFrequency( reinterpret_cast<LARGE_INTEGER*>( &hz ) );
+                QueryPerformanceCounter( reinterpret_cast<LARGE_INTEGER*>( &hzo ) );
+            }
+            uint64_t t;
+            QueryPerformanceCounter( reinterpret_cast<LARGE_INTEGER*>( &t ) );
+            return ((t-hzo)*1000000)/hz;
+        }
+#else
+        uint64_t getCurrentTicks() {
+            timeval t;
+            gettimeofday(&t,CATCH_NULL);
+            return static_cast<uint64_t>( t.tv_sec ) * 1000000ull + static_cast<uint64_t>( t.tv_usec );
+        }
+#endif
+    }
+
+    void Timer::start() {
+        m_ticks = getCurrentTicks();
+    }
+    unsigned int Timer::getElapsedMicroseconds() const {
+        return static_cast<unsigned int>(getCurrentTicks() - m_ticks);
+    }
+    unsigned int Timer::getElapsedMilliseconds() const {
+        return static_cast<unsigned int>(getElapsedMicroseconds()/1000);
+    }
+    double Timer::getElapsedSeconds() const {
+        return getElapsedMicroseconds()/1000000.0;
+    }
+
+} // namespace Catch
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+// #included from: catch_common.hpp
+#define TWOBLUECUBES_CATCH_COMMON_HPP_INCLUDED
 
 namespace Catch {
 
-    class Command {
-    public:
-        Command(){}
-
-        explicit Command( const std::string& name ) : m_name( name ) {}
-
-        Command& operator += ( const std::string& arg ) {
-            m_args.push_back( arg );
-            return *this;
-        }
-        Command& operator += ( const Command& other ) {
-            std::copy( other.m_args.begin(), other.m_args.end(), std::back_inserter( m_args ) );
-            if( m_name.empty() )
-                m_name = other.m_name;
-            return *this;
-        }
-        Command operator + ( const Command& other ) {
-            Command newCommand( *this );
-            newCommand += other;
-            return newCommand;
-        }
-
-        operator SafeBool::type() const {
-            return SafeBool::makeSafe( !m_name.empty() );
-        }
-
-        std::string name() const { return m_name; }
-        std::string operator[]( std::size_t i ) const { return m_args[i]; }
-        std::size_t argsCount() const { return m_args.size(); }
-
-        void raiseError( const std::string& message ) const {
-            std::ostringstream oss;
-            oss << "Error while parsing " << m_name << ". " << message << ".";
-            if( m_args.size() > 0 )
-                oss << " Arguments where:";
-            for( std::size_t i = 0; i < m_args.size(); ++i )
-                oss << " " << m_args[i];
-            throw std::domain_error( oss.str() );
-        }
-
-    private:
-
-        std::string m_name;
-        std::vector<std::string> m_args;
-    };
-
-    class CommandParser {
-    public:
-        CommandParser( int argc, char const * const * argv ) : m_argc( static_cast<std::size_t>( argc ) ), m_argv( argv ) {}
-
-        Command find( const std::string& arg1,  const std::string& arg2, const std::string& arg3 ) const {
-            return find( arg1 ) + find( arg2 ) + find( arg3 );
-        }
-
-        Command find( const std::string& shortArg, const std::string& longArg ) const {
-            return find( shortArg ) + find( longArg );
-        }
-        Command find( const std::string& arg ) const {
-            for( std::size_t i = 0; i < m_argc; ++i  )
-                if( m_argv[i] == arg )
-                    return getArgs( i );
-            return Command();
-        }
-
-    private:
-        Command getArgs( std::size_t from ) const {
-            Command command( m_argv[from] );
-            for( std::size_t i = from+1; i < m_argc && m_argv[i][0] != '-'; ++i  )
-                command += m_argv[i];
-            return command;
-        }
-
-        std::size_t m_argc;
-        char const * const * m_argv;
-    };
-
-    inline bool parseIntoConfig( const CommandParser& parser, Config& config ) {
-
-        try {
-            if( Command cmd = parser.find( "-l", "--list" ) ) {
-                if( cmd.argsCount() > 2 )
-                    cmd.raiseError( "Expected upto 2 arguments" );
-
-                List::What listSpec = List::All;
-                if( cmd.argsCount() >= 1 ) {
-                    if( cmd[0] == "tests" )
-                        listSpec = List::Tests;
-                    else if( cmd[0] == "reporters" )
-                        listSpec = List::Reports;
-                    else
-                        cmd.raiseError( "Expected [tests] or [reporters]" );
-                }
-                if( cmd.argsCount() >= 2 ) {
-                    if( cmd[1] == "xml" )
-                        listSpec = static_cast<List::What>( listSpec | List::AsXml );
-                    else if( cmd[1] == "text" )
-                        listSpec = static_cast<List::What>( listSpec | List::AsText );
-                    else
-                        cmd.raiseError( "Expected [xml] or [text]" );
-                }
-                config.setListSpec( static_cast<List::What>( config.getListSpec() | listSpec ) );
-            }
-
-            if( Command cmd = parser.find( "-t", "--test" ) ) {
-                if( cmd.argsCount() == 0 )
-                    cmd.raiseError( "Expected at least one argument" );
-                for( std::size_t i = 0; i < cmd.argsCount(); ++i )
-                    config.addTestSpec( cmd[i] );
-            }
-
-            if( Command cmd = parser.find( "-r", "--reporter" ) ) {
-                if( cmd.argsCount() != 1 )
-                    cmd.raiseError( "Expected one argument" );
-                config.setReporter( cmd[0] );
-            }
-
-            if( Command cmd = parser.find( "-o", "--out" ) ) {
-                if( cmd.argsCount() == 0 )
-                    cmd.raiseError( "Expected filename" );
-                if( cmd[0][0] == '%' )
-                    config.useStream( cmd[0].substr( 1 ) );
-                else
-                    config.setFilename( cmd[0] );
-            }
-
-            if( Command cmd = parser.find( "-s", "--success" ) ) {
-                if( cmd.argsCount() != 0 )
-                    cmd.raiseError( "Does not accept arguments" );
-                config.setIncludeWhichResults( Include::SuccessfulResults );
-            }
-
-            if( Command cmd = parser.find( "-b", "--break" ) ) {
-                if( cmd.argsCount() != 0 )
-                    cmd.raiseError( "Does not accept arguments" );
-                config.setShouldDebugBreak( true );
-            }
-
-            if( Command cmd = parser.find( "-n", "--name" ) ) {
-                if( cmd.argsCount() != 1 )
-                    cmd.raiseError( "Expected a name" );
-                config.setName( cmd[0] );
-            }
-
-            if( Command cmd = parser.find( "-h", "-?", "--help" ) ) {
-                if( cmd.argsCount() != 0 )
-                    cmd.raiseError( "Does not accept arguments" );
-                config.setShowHelp( true );
-            }
-
-            if( Command cmd = parser.find( "-a", "--abort" ) ) {
-                if( cmd.argsCount() > 1 )
-                    cmd.raiseError( "Only accepts 0-1 arguments" );
-                int threshold = 1;
-                if( cmd.argsCount() == 1 )
-                {
-                    std::stringstream ss;
-                    ss << cmd[0];
-                    ss >> threshold;
-                }
-                config.setCutoff( threshold );
-            }
-
-            if( Command cmd = parser.find( "-nt", "--nothrow" ) ) {
-                if( cmd.argsCount() != 0 )
-                    cmd.raiseError( "Does not accept arguments" );
-                config.setAllowThrows( false );
-            }
-
-        }
-        catch( std::exception& ex ) {
-            config.setError( ex.what() );
-            return false;
-        }
-        return true;
+    bool startsWith( std::string const& s, std::string const& prefix ) {
+        return s.size() >= prefix.size() && s.substr( 0, prefix.size() ) == prefix;
+    }
+    bool endsWith( std::string const& s, std::string const& suffix ) {
+        return s.size() >= suffix.size() && s.substr( s.size()-suffix.size(), suffix.size() ) == suffix;
+    }
+    bool contains( std::string const& s, std::string const& infix ) {
+        return s.find( infix ) != std::string::npos;
+    }
+    void toLowerInPlace( std::string& s ) {
+        std::transform( s.begin(), s.end(), s.begin(), ::tolower );
+    }
+    std::string toLower( std::string const& s ) {
+        std::string lc = s;
+        toLowerInPlace( lc );
+        return lc;
+    }
+    std::string trim( std::string const& str ) {
+        static char const* whitespaceChars = "\n\r\t ";
+        std::string::size_type start = str.find_first_not_of( whitespaceChars );
+        std::string::size_type end = str.find_last_not_of( whitespaceChars );
+
+        return start != std::string::npos ? str.substr( start, 1+end-start ) : "";
+    }
+
+    bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ) {
+        bool replaced = false;
+        std::size_t i = str.find( replaceThis );
+        while( i != std::string::npos ) {
+            replaced = true;
+            str = str.substr( 0, i ) + withThis + str.substr( i+replaceThis.size() );
+            if( i < str.size()-withThis.size() )
+                i = str.find( replaceThis, i+withThis.size() );
+            else
+                i = std::string::npos;
+        }
+        return replaced;
+    }
+
+    pluralise::pluralise( std::size_t count, std::string const& label )
+    :   m_count( count ),
+        m_label( label )
+    {}
+
+    std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ) {
+        os << pluraliser.m_count << " " << pluraliser.m_label;
+        if( pluraliser.m_count != 1 )
+            os << "s";
+        return os;
+    }
+
+    SourceLineInfo::SourceLineInfo() : line( 0 ){}
+    SourceLineInfo::SourceLineInfo( char const* _file, std::size_t _line )
+    :   file( _file ),
+        line( _line )
+    {}
+    SourceLineInfo::SourceLineInfo( SourceLineInfo const& other )
+    :   file( other.file ),
+        line( other.line )
+    {}
+    bool SourceLineInfo::empty() const {
+        return file.empty();
+    }
+    bool SourceLineInfo::operator == ( SourceLineInfo const& other ) const {
+        return line == other.line && file == other.file;
+    }
+    bool SourceLineInfo::operator < ( SourceLineInfo const& other ) const {
+        return line < other.line || ( line == other.line  && file < other.file );
+    }
+
+    void seedRng( IConfig const& config ) {
+        if( config.rngSeed() != 0 )
+            std::srand( config.rngSeed() );
+    }
+    unsigned int rngSeed() {
+        return getCurrentContext().getConfig()->rngSeed();
+    }
+
+    std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ) {
+#ifndef __GNUG__
+        os << info.file << "(" << info.line << ")";
+#else
+        os << info.file << ":" << info.line;
+#endif
+        return os;
+    }
+
+    void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ) {
+        std::ostringstream oss;
+        oss << locationInfo << ": Internal Catch error: '" << message << "'";
+        if( alwaysTrue() )
+            throw std::logic_error( oss.str() );
+    }
+}
+
+// #included from: catch_section.hpp
+#define TWOBLUECUBES_CATCH_SECTION_HPP_INCLUDED
+
+namespace Catch {
+
+    SectionInfo::SectionInfo
+        (   SourceLineInfo const& _lineInfo,
+            std::string const& _name,
+            std::string const& _description )
+    :   name( _name ),
+        description( _description ),
+        lineInfo( _lineInfo )
+    {}
+
+    Section::Section( SectionInfo const& info )
+    :   m_info( info ),
+        m_sectionIncluded( getResultCapture().sectionStarted( m_info, m_assertions ) )
+    {
+        m_timer.start();
+    }
+
+    Section::~Section() {
+        if( m_sectionIncluded ) {
+            SectionEndInfo endInfo( m_info, m_assertions, m_timer.getElapsedSeconds() );
+            if( std::uncaught_exception() )
+                getResultCapture().sectionEndedEarly( endInfo );
+            else
+                getResultCapture().sectionEnded( endInfo );
+        }
+    }
+
+    // This indicates whether the section should be executed or not
+    Section::operator bool() const {
+        return m_sectionIncluded;
     }
 
 } // end namespace Catch
 
-// #included from: internal/catch_list.hpp
-
-#include <limits>
+// #included from: catch_debugger.hpp
+#define TWOBLUECUBES_CATCH_DEBUGGER_HPP_INCLUDED
+
+#include <iostream>
+
+#ifdef CATCH_PLATFORM_MAC
+
+    #include <assert.h>
+    #include <stdbool.h>
+    #include <sys/types.h>
+    #include <unistd.h>
+    #include <sys/sysctl.h>
+
+    namespace Catch{
+
+        // The following function is taken directly from the following technical note:
+        // http://developer.apple.com/library/mac/#qa/qa2004/qa1361.html
+
+        // Returns true if the current process is being debugged (either
+        // running under the debugger or has a debugger attached post facto).
+        bool isDebuggerActive(){
+
+            int                 mib[4];
+            struct kinfo_proc   info;
+            size_t              size;
+
+            // Initialize the flags so that, if sysctl fails for some bizarre
+            // reason, we get a predictable result.
+
+            info.kp_proc.p_flag = 0;
+
+            // Initialize mib, which tells sysctl the info we want, in this case
+            // we're looking for information about a specific process ID.
+
+            mib[0] = CTL_KERN;
+            mib[1] = KERN_PROC;
+            mib[2] = KERN_PROC_PID;
+            mib[3] = getpid();
+
+            // Call sysctl.
+
+            size = sizeof(info);
+            if( sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, CATCH_NULL, 0) != 0 ) {
+                Catch::cerr() << "\n** Call to sysctl failed - unable to determine if debugger is active **\n" << std::endl;
+                return false;
+            }
+
+            // We're being debugged if the P_TRACED flag is set.
+
+            return ( (info.kp_proc.p_flag & P_TRACED) != 0 );
+        }
+    } // namespace Catch
+
+#elif defined(_MSC_VER)
+    extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent();
+    namespace Catch {
+        bool isDebuggerActive() {
+            return IsDebuggerPresent() != 0;
+        }
+    }
+#elif defined(__MINGW32__)
+    extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent();
+    namespace Catch {
+        bool isDebuggerActive() {
+            return IsDebuggerPresent() != 0;
+        }
+    }
+#else
+    namespace Catch {
+       inline bool isDebuggerActive() { return false; }
+    }
+#endif // Platform
+
+#ifdef CATCH_PLATFORM_WINDOWS
+    extern "C" __declspec(dllimport) void __stdcall OutputDebugStringA( const char* );
+    namespace Catch {
+        void writeToDebugConsole( std::string const& text ) {
+            ::OutputDebugStringA( text.c_str() );
+        }
+    }
+#else
+    namespace Catch {
+        void writeToDebugConsole( std::string const& text ) {
+            // !TBD: Need a version for Mac/ XCode and other IDEs
+            Catch::cout() << text;
+        }
+    }
+#endif // Platform
+
+// #included from: catch_tostring.hpp
+#define TWOBLUECUBES_CATCH_TOSTRING_HPP_INCLUDED
 
 namespace Catch {
-    inline int List( Config& config ) {
-
-        IContext& context = getCurrentContext();
-        if( config.listWhat() & List::Reports ) {
-            std::cout << "Available reports:\n";
-            IReporterRegistry::FactoryMap::const_iterator it = context.getReporterRegistry().getFactories().begin();
-            IReporterRegistry::FactoryMap::const_iterator itEnd = context.getReporterRegistry().getFactories().end();
-            for(; it != itEnd; ++it ) {
-                // !TBD: consider listAs()
-                std::cout << "\t" << it->first << "\n\t\t'" << it->second->getDescription() << "'\n";
-            }
-            std::cout << std::endl;
-        }
-
-        if( config.listWhat() & List::Tests ) {
-            std::cout << "Available tests:\n";
-            std::vector<TestCaseInfo>::const_iterator it = context.getTestCaseRegistry().getAllTests().begin();
-            std::vector<TestCaseInfo>::const_iterator itEnd = context.getTestCaseRegistry().getAllTests().end();
-            for(; it != itEnd; ++it ) {
-                // !TBD: consider listAs()
-                std::cout << "\t" << it->getName() << "\n\t\t '" << it->getDescription() << "'\n";
-            }
-            std::cout << std::endl;
-        }
-
-        if( ( config.listWhat() & List::All ) == 0 ) {
-            std::cerr << "Unknown list type" << std::endl;
-            return (std::numeric_limits<int>::max)();
-        }
-
-        if( config.getReporter().get() )
-            std::cerr << "Reporters ignored when listing" << std::endl;
-        if( !config.testsSpecified() )
-            std::cerr << "Test specs ignored when listing" << std::endl;
-        return 0;
+
+namespace Detail {
+
+    const std::string unprintableString = "{?}";
+
+    namespace {
+        const int hexThreshold = 255;
+
+        struct Endianness {
+            enum Arch { Big, Little };
+
+            static Arch which() {
+                union _{
+                    int asInt;
+                    char asChar[sizeof (int)];
+                } u;
+
+                u.asInt = 1;
+                return ( u.asChar[sizeof(int)-1] == 1 ) ? Big : Little;
+            }
+        };
+    }
+
+    std::string rawMemoryToString( const void *object, std::size_t size )
+    {
+        // Reverse order for little endian architectures
+        int i = 0, end = static_cast<int>( size ), inc = 1;
+        if( Endianness::which() == Endianness::Little ) {
+            i = end-1;
+            end = inc = -1;
+        }
+
+        unsigned char const *bytes = static_cast<unsigned char const *>(object);
+        std::ostringstream os;
+        os << "0x" << std::setfill('0') << std::hex;
+        for( ; i != end; i += inc )
+             os << std::setw(2) << static_cast<unsigned>(bytes[i]);
+       return os.str();
+    }
+}
+
+std::string toString( std::string const& value ) {
+    std::string s = value;
+    if( getCurrentContext().getConfig()->showInvisibles() ) {
+        for(size_t i = 0; i < s.size(); ++i ) {
+            std::string subs;
+            switch( s[i] ) {
+            case '\n': subs = "\\n"; break;
+            case '\t': subs = "\\t"; break;
+            default: break;
+            }
+            if( !subs.empty() ) {
+                s = s.substr( 0, i ) + subs + s.substr( i+1 );
+                ++i;
+            }
+        }
+    }
+    return "\"" + s + "\"";
+}
+std::string toString( std::wstring const& value ) {
+
+    std::string s;
+    s.reserve( value.size() );
+    for(size_t i = 0; i < value.size(); ++i )
+        s += value[i] <= 0xff ? static_cast<char>( value[i] ) : '?';
+    return Catch::toString( s );
+}
+
+std::string toString( const char* const value ) {
+    return value ? Catch::toString( std::string( value ) ) : std::string( "{null string}" );
+}
+
+std::string toString( char* const value ) {
+    return Catch::toString( static_cast<const char*>( value ) );
+}
+
+std::string toString( const wchar_t* const value )
+{
+	return value ? Catch::toString( std::wstring(value) ) : std::string( "{null string}" );
+}
+
+std::string toString( wchar_t* const value )
+{
+	return Catch::toString( static_cast<const wchar_t*>( value ) );
+}
+
+std::string toString( int value ) {
+    std::ostringstream oss;
+    oss << value;
+    if( value > Detail::hexThreshold )
+        oss << " (0x" << std::hex << value << ")";
+    return oss.str();
+}
+
+std::string toString( unsigned long value ) {
+    std::ostringstream oss;
+    oss << value;
+    if( value > Detail::hexThreshold )
+        oss << " (0x" << std::hex << value << ")";
+    return oss.str();
+}
+
+std::string toString( unsigned int value ) {
+    return Catch::toString( static_cast<unsigned long>( value ) );
+}
+
+template<typename T>
+std::string fpToString( T value, int precision ) {
+    std::ostringstream oss;
+    oss << std::setprecision( precision )
+        << std::fixed
+        << value;
+    std::string d = oss.str();
+    std::size_t i = d.find_last_not_of( '0' );
+    if( i != std::string::npos && i != d.size()-1 ) {
+        if( d[i] == '.' )
+            i++;
+        d = d.substr( 0, i+1 );
+    }
+    return d;
+}
+
+std::string toString( const double value ) {
+    return fpToString( value, 10 );
+}
+std::string toString( const float value ) {
+    return fpToString( value, 5 ) + "f";
+}
+
+std::string toString( bool value ) {
+    return value ? "true" : "false";
+}
+
+std::string toString( char value ) {
+    return value < ' '
+        ? toString( static_cast<unsigned int>( value ) )
+        : Detail::makeString( value );
+}
+
+std::string toString( signed char value ) {
+    return toString( static_cast<char>( value ) );
+}
+
+std::string toString( unsigned char value ) {
+    return toString( static_cast<char>( value ) );
+}
+
+#ifdef CATCH_CONFIG_CPP11_LONG_LONG
+std::string toString( long long value ) {
+    std::ostringstream oss;
+    oss << value;
+    if( value > Detail::hexThreshold )
+        oss << " (0x" << std::hex << value << ")";
+    return oss.str();
+}
+std::string toString( unsigned long long value ) {
+    std::ostringstream oss;
+    oss << value;
+    if( value > Detail::hexThreshold )
+        oss << " (0x" << std::hex << value << ")";
+    return oss.str();
+}
+#endif
+
+#ifdef CATCH_CONFIG_CPP11_NULLPTR
+std::string toString( std::nullptr_t ) {
+    return "nullptr";
+}
+#endif
+
+#ifdef __OBJC__
+    std::string toString( NSString const * const& nsstring ) {
+        if( !nsstring )
+            return "nil";
+        return "@" + toString([nsstring UTF8String]);
+    }
+    std::string toString( NSString * CATCH_ARC_STRONG const& nsstring ) {
+        if( !nsstring )
+            return "nil";
+        return "@" + toString([nsstring UTF8String]);
+    }
+    std::string toString( NSObject* const& nsObject ) {
+        return toString( [nsObject description] );
+    }
+#endif
+
+} // end namespace Catch
+
+// #included from: catch_result_builder.hpp
+#define TWOBLUECUBES_CATCH_RESULT_BUILDER_HPP_INCLUDED
+
+namespace Catch {
+
+    std::string capturedExpressionWithSecondArgument( std::string const& capturedExpression, std::string const& secondArg ) {
+        return secondArg.empty() || secondArg == "\"\""
+            ? capturedExpression
+            : capturedExpression + ", " + secondArg;
+    }
+    ResultBuilder::ResultBuilder(   char const* macroName,
+                                    SourceLineInfo const& lineInfo,
+                                    char const* capturedExpression,
+                                    ResultDisposition::Flags resultDisposition,
+                                    char const* secondArg )
+    :   m_assertionInfo( macroName, lineInfo, capturedExpressionWithSecondArgument( capturedExpression, secondArg ), resultDisposition ),
+        m_shouldDebugBreak( false ),
+        m_shouldThrow( false )
+    {}
+
+    ResultBuilder& ResultBuilder::setResultType( ResultWas::OfType result ) {
+        m_data.resultType = result;
+        return *this;
+    }
+    ResultBuilder& ResultBuilder::setResultType( bool result ) {
+        m_data.resultType = result ? ResultWas::Ok : ResultWas::ExpressionFailed;
+        return *this;
+    }
+    ResultBuilder& ResultBuilder::setLhs( std::string const& lhs ) {
+        m_exprComponents.lhs = lhs;
+        return *this;
+    }
+    ResultBuilder& ResultBuilder::setRhs( std::string const& rhs ) {
+        m_exprComponents.rhs = rhs;
+        return *this;
+    }
+    ResultBuilder& ResultBuilder::setOp( std::string const& op ) {
+        m_exprComponents.op = op;
+        return *this;
+    }
+
+    void ResultBuilder::endExpression() {
+        m_exprComponents.testFalse = isFalseTest( m_assertionInfo.resultDisposition );
+        captureExpression();
+    }
+
+    void ResultBuilder::useActiveException( ResultDisposition::Flags resultDisposition ) {
+        m_assertionInfo.resultDisposition = resultDisposition;
+        m_stream.oss << Catch::translateActiveException();
+        captureResult( ResultWas::ThrewException );
+    }
+
+    void ResultBuilder::captureResult( ResultWas::OfType resultType ) {
+        setResultType( resultType );
+        captureExpression();
+    }
+    void ResultBuilder::captureExpectedException( std::string const& expectedMessage ) {
+        if( expectedMessage.empty() )
+            captureExpectedException( Matchers::Impl::Generic::AllOf<std::string>() );
+        else
+            captureExpectedException( Matchers::Equals( expectedMessage ) );
+    }
+
+    void ResultBuilder::captureExpectedException( Matchers::Impl::Matcher<std::string> const& matcher ) {
+
+        assert( m_exprComponents.testFalse == false );
+        AssertionResultData data = m_data;
+        data.resultType = ResultWas::Ok;
+        data.reconstructedExpression = m_assertionInfo.capturedExpression;
+
+        std::string actualMessage = Catch::translateActiveException();
+        if( !matcher.match( actualMessage ) ) {
+            data.resultType = ResultWas::ExpressionFailed;
+            data.reconstructedExpression = actualMessage;
+        }
+        AssertionResult result( m_assertionInfo, data );
+        handleResult( result );
+    }
+
+    void ResultBuilder::captureExpression() {
+        AssertionResult result = build();
+        handleResult( result );
+    }
+    void ResultBuilder::handleResult( AssertionResult const& result )
+    {
+        getResultCapture().assertionEnded( result );
+
+        if( !result.isOk() ) {
+            if( getCurrentContext().getConfig()->shouldDebugBreak() )
+                m_shouldDebugBreak = true;
+            if( getCurrentContext().getRunner()->aborting() || (m_assertionInfo.resultDisposition & ResultDisposition::Normal) )
+                m_shouldThrow = true;
+        }
+    }
+    void ResultBuilder::react() {
+        if( m_shouldThrow )
+            throw Catch::TestFailureException();
+    }
+
+    bool ResultBuilder::shouldDebugBreak() const { return m_shouldDebugBreak; }
+    bool ResultBuilder::allowThrows() const { return getCurrentContext().getConfig()->allowThrows(); }
+
+    AssertionResult ResultBuilder::build() const
+    {
+        assert( m_data.resultType != ResultWas::Unknown );
+
+        AssertionResultData data = m_data;
+
+        // Flip bool results if testFalse is set
+        if( m_exprComponents.testFalse ) {
+            if( data.resultType == ResultWas::Ok )
+                data.resultType = ResultWas::ExpressionFailed;
+            else if( data.resultType == ResultWas::ExpressionFailed )
+                data.resultType = ResultWas::Ok;
+        }
+
+        data.message = m_stream.oss.str();
+        data.reconstructedExpression = reconstructExpression();
+        if( m_exprComponents.testFalse ) {
+            if( m_exprComponents.op == "" )
+                data.reconstructedExpression = "!" + data.reconstructedExpression;
+            else
+                data.reconstructedExpression = "!(" + data.reconstructedExpression + ")";
+        }
+        return AssertionResult( m_assertionInfo, data );
+    }
+    std::string ResultBuilder::reconstructExpression() const {
+        if( m_exprComponents.op == "" )
+            return m_exprComponents.lhs.empty() ? m_assertionInfo.capturedExpression : m_exprComponents.op + m_exprComponents.lhs;
+        else if( m_exprComponents.op == "matches" )
+            return m_exprComponents.lhs + " " + m_exprComponents.rhs;
+        else if( m_exprComponents.op != "!" ) {
+            if( m_exprComponents.lhs.size() + m_exprComponents.rhs.size() < 40 &&
+                m_exprComponents.lhs.find("\n") == std::string::npos &&
+                m_exprComponents.rhs.find("\n") == std::string::npos )
+                return m_exprComponents.lhs + " " + m_exprComponents.op + " " + m_exprComponents.rhs;
+            else
+                return m_exprComponents.lhs + "\n" + m_exprComponents.op + "\n" + m_exprComponents.rhs;
+        }
+        else
+            return "{can't expand - use " + m_assertionInfo.macroName + "_FALSE( " + m_assertionInfo.capturedExpression.substr(1) + " ) instead of " + m_assertionInfo.macroName + "( " + m_assertionInfo.capturedExpression + " ) for better diagnostics}";
     }
 
 } // end namespace Catch
 
-// #included from: reporters/catch_reporter_basic.hpp
+// #included from: catch_tag_alias_registry.hpp
+#define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_HPP_INCLUDED
+
+// #included from: catch_tag_alias_registry.h
+#define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_H_INCLUDED
+
+#include <map>
+
+namespace Catch {
+
+    class TagAliasRegistry : public ITagAliasRegistry {
+    public:
+        virtual ~TagAliasRegistry();
+        virtual Option<TagAlias> find( std::string const& alias ) const;
+        virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const;
+        void add( char const* alias, char const* tag, SourceLineInfo const& lineInfo );
+        static TagAliasRegistry& get();
+
+    private:
+        std::map<std::string, TagAlias> m_registry;
+    };
+
+} // end namespace Catch
+
+#include <map>
+#include <iostream>
+
+namespace Catch {
+
+    TagAliasRegistry::~TagAliasRegistry() {}
+
+    Option<TagAlias> TagAliasRegistry::find( std::string const& alias ) const {
+        std::map<std::string, TagAlias>::const_iterator it = m_registry.find( alias );
+        if( it != m_registry.end() )
+            return it->second;
+        else
+            return Option<TagAlias>();
+    }
+
+    std::string TagAliasRegistry::expandAliases( std::string const& unexpandedTestSpec ) const {
+        std::string expandedTestSpec = unexpandedTestSpec;
+        for( std::map<std::string, TagAlias>::const_iterator it = m_registry.begin(), itEnd = m_registry.end();
+                it != itEnd;
+                ++it ) {
+            std::size_t pos = expandedTestSpec.find( it->first );
+            if( pos != std::string::npos ) {
+                expandedTestSpec =  expandedTestSpec.substr( 0, pos ) +
+                                    it->second.tag +
+                                    expandedTestSpec.substr( pos + it->first.size() );
+            }
+        }
+        return expandedTestSpec;
+    }
+
+    void TagAliasRegistry::add( char const* alias, char const* tag, SourceLineInfo const& lineInfo ) {
+
+        if( !startsWith( alias, "[@" ) || !endsWith( alias, "]" ) ) {
+            std::ostringstream oss;
+            oss << "error: tag alias, \"" << alias << "\" is not of the form [@alias name].\n" << lineInfo;
+            throw std::domain_error( oss.str().c_str() );
+        }
+        if( !m_registry.insert( std::make_pair( alias, TagAlias( tag, lineInfo ) ) ).second ) {
+            std::ostringstream oss;
+            oss << "error: tag alias, \"" << alias << "\" already registered.\n"
+                << "\tFirst seen at " << find(alias)->lineInfo << "\n"
+                << "\tRedefined at " << lineInfo;
+            throw std::domain_error( oss.str().c_str() );
+        }
+    }
+
+    TagAliasRegistry& TagAliasRegistry::get() {
+        static TagAliasRegistry instance;
+        return instance;
+
+    }
+
+    ITagAliasRegistry::~ITagAliasRegistry() {}
+    ITagAliasRegistry const& ITagAliasRegistry::get() { return TagAliasRegistry::get(); }
+
+    RegistrarForTagAliases::RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ) {
+        try {
+            TagAliasRegistry::get().add( alias, tag, lineInfo );
+        }
+        catch( std::exception& ex ) {
+            Colour colourGuard( Colour::Red );
+            Catch::cerr() << ex.what() << std::endl;
+            exit(1);
+        }
+    }
+
+} // end namespace Catch
+
+// #included from: ../reporters/catch_reporter_multi.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_MULTI_HPP_INCLUDED
+
+namespace Catch {
+
+class MultipleReporters : public SharedImpl<IStreamingReporter> {
+    typedef std::vector<Ptr<IStreamingReporter> > Reporters;
+    Reporters m_reporters;
+
+public:
+    void add( Ptr<IStreamingReporter> const& reporter ) {
+        m_reporters.push_back( reporter );
+    }
+
+public: // IStreamingReporter
+
+    virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE {
+        return m_reporters[0]->getPreferences();
+    }
+
+    virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->noMatchingTestCases( spec );
+    }
+
+    virtual void testRunStarting( TestRunInfo const& testRunInfo ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->testRunStarting( testRunInfo );
+    }
+
+    virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->testGroupStarting( groupInfo );
+    }
+
+    virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->testCaseStarting( testInfo );
+    }
+
+    virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->sectionStarting( sectionInfo );
+    }
+
+    virtual void assertionStarting( AssertionInfo const& assertionInfo ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->assertionStarting( assertionInfo );
+    }
+
+    // The return value indicates if the messages buffer should be cleared:
+    virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE {
+        bool clearBuffer = false;
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            clearBuffer |= (*it)->assertionEnded( assertionStats );
+        return clearBuffer;
+    }
+
+    virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->sectionEnded( sectionStats );
+    }
+
+    virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->testCaseEnded( testCaseStats );
+    }
+
+    virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->testGroupEnded( testGroupStats );
+    }
+
+    virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->testRunEnded( testRunStats );
+    }
+
+    virtual void skipTest( TestCaseInfo const& testInfo ) CATCH_OVERRIDE {
+        for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end();
+                it != itEnd;
+                ++it )
+            (*it)->skipTest( testInfo );
+    }
+
+    virtual MultipleReporters* tryAsMulti() CATCH_OVERRIDE {
+        return this;
+    }
+
+};
+
+Ptr<IStreamingReporter> addReporter( Ptr<IStreamingReporter> const& existingReporter, Ptr<IStreamingReporter> const& additionalReporter ) {
+    Ptr<IStreamingReporter> resultingReporter;
+
+    if( existingReporter ) {
+        MultipleReporters* multi = existingReporter->tryAsMulti();
+        if( !multi ) {
+            multi = new MultipleReporters;
+            resultingReporter = Ptr<IStreamingReporter>( multi );
+            if( existingReporter )
+                multi->add( existingReporter );
+        }
+        else
+            resultingReporter = existingReporter;
+        multi->add( additionalReporter );
+    }
+    else
+        resultingReporter = additionalReporter;
+
+    return resultingReporter;
+}
+
+} // end namespace Catch
+
+// #included from: ../reporters/catch_reporter_xml.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_XML_HPP_INCLUDED
+
+// #included from: catch_reporter_bases.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_BASES_HPP_INCLUDED
+
+#include <cstring>
+
+namespace Catch {
+
+    struct StreamingReporterBase : SharedImpl<IStreamingReporter> {
+
+        StreamingReporterBase( ReporterConfig const& _config )
+        :   m_config( _config.fullConfig() ),
+            stream( _config.stream() )
+        {
+            m_reporterPrefs.shouldRedirectStdOut = false;
+        }
+
+        virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE {
+            return m_reporterPrefs;
+        }
+
+        virtual ~StreamingReporterBase() CATCH_OVERRIDE;
+
+        virtual void noMatchingTestCases( std::string const& ) CATCH_OVERRIDE {}
+
+        virtual void testRunStarting( TestRunInfo const& _testRunInfo ) CATCH_OVERRIDE {
+            currentTestRunInfo = _testRunInfo;
+        }
+        virtual void testGroupStarting( GroupInfo const& _groupInfo ) CATCH_OVERRIDE {
+            currentGroupInfo = _groupInfo;
+        }
+
+        virtual void testCaseStarting( TestCaseInfo const& _testInfo ) CATCH_OVERRIDE {
+            currentTestCaseInfo = _testInfo;
+        }
+        virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE {
+            m_sectionStack.push_back( _sectionInfo );
+        }
+
+        virtual void sectionEnded( SectionStats const& /* _sectionStats */ ) CATCH_OVERRIDE {
+            m_sectionStack.pop_back();
+        }
+        virtual void testCaseEnded( TestCaseStats const& /* _testCaseStats */ ) CATCH_OVERRIDE {
+            currentTestCaseInfo.reset();
+        }
+        virtual void testGroupEnded( TestGroupStats const& /* _testGroupStats */ ) CATCH_OVERRIDE {
+            currentGroupInfo.reset();
+        }
+        virtual void testRunEnded( TestRunStats const& /* _testRunStats */ ) CATCH_OVERRIDE {
+            currentTestCaseInfo.reset();
+            currentGroupInfo.reset();
+            currentTestRunInfo.reset();
+        }
+
+        virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE {
+            // Don't do anything with this by default.
+            // It can optionally be overridden in the derived class.
+        }
+
+        Ptr<IConfig const> m_config;
+        std::ostream& stream;
+
+        LazyStat<TestRunInfo> currentTestRunInfo;
+        LazyStat<GroupInfo> currentGroupInfo;
+        LazyStat<TestCaseInfo> currentTestCaseInfo;
+
+        std::vector<SectionInfo> m_sectionStack;
+        ReporterPreferences m_reporterPrefs;
+    };
+
+    struct CumulativeReporterBase : SharedImpl<IStreamingReporter> {
+        template<typename T, typename ChildNodeT>
+        struct Node : SharedImpl<> {
+            explicit Node( T const& _value ) : value( _value ) {}
+            virtual ~Node() {}
+
+            typedef std::vector<Ptr<ChildNodeT> > ChildNodes;
+            T value;
+            ChildNodes children;
+        };
+        struct SectionNode : SharedImpl<> {
+            explicit SectionNode( SectionStats const& _stats ) : stats( _stats ) {}
+            virtual ~SectionNode();
+
+            bool operator == ( SectionNode const& other ) const {
+                return stats.sectionInfo.lineInfo == other.stats.sectionInfo.lineInfo;
+            }
+            bool operator == ( Ptr<SectionNode> const& other ) const {
+                return operator==( *other );
+            }
+
+            SectionStats stats;
+            typedef std::vector<Ptr<SectionNode> > ChildSections;
+            typedef std::vector<AssertionStats> Assertions;
+            ChildSections childSections;
+            Assertions assertions;
+            std::string stdOut;
+            std::string stdErr;
+        };
+
+        struct BySectionInfo {
+            BySectionInfo( SectionInfo const& other ) : m_other( other ) {}
+			BySectionInfo( BySectionInfo const& other ) : m_other( other.m_other ) {}
+            bool operator() ( Ptr<SectionNode> const& node ) const {
+                return node->stats.sectionInfo.lineInfo == m_other.lineInfo;
+            }
+        private:
+			void operator=( BySectionInfo const& );
+            SectionInfo const& m_other;
+        };
+
+        typedef Node<TestCaseStats, SectionNode> TestCaseNode;
+        typedef Node<TestGroupStats, TestCaseNode> TestGroupNode;
+        typedef Node<TestRunStats, TestGroupNode> TestRunNode;
+
+        CumulativeReporterBase( ReporterConfig const& _config )
+        :   m_config( _config.fullConfig() ),
+            stream( _config.stream() )
+        {
+            m_reporterPrefs.shouldRedirectStdOut = false;
+        }
+        ~CumulativeReporterBase();
+
+        virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE {
+            return m_reporterPrefs;
+        }
+
+        virtual void testRunStarting( TestRunInfo const& ) CATCH_OVERRIDE {}
+        virtual void testGroupStarting( GroupInfo const& ) CATCH_OVERRIDE {}
+
+        virtual void testCaseStarting( TestCaseInfo const& ) CATCH_OVERRIDE {}
+
+        virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE {
+            SectionStats incompleteStats( sectionInfo, Counts(), 0, false );
+            Ptr<SectionNode> node;
+            if( m_sectionStack.empty() ) {
+                if( !m_rootSection )
+                    m_rootSection = new SectionNode( incompleteStats );
+                node = m_rootSection;
+            }
+            else {
+                SectionNode& parentNode = *m_sectionStack.back();
+                SectionNode::ChildSections::const_iterator it =
+                    std::find_if(   parentNode.childSections.begin(),
+                                    parentNode.childSections.end(),
+                                    BySectionInfo( sectionInfo ) );
+                if( it == parentNode.childSections.end() ) {
+                    node = new SectionNode( incompleteStats );
+                    parentNode.childSections.push_back( node );
+                }
+                else
+                    node = *it;
+            }
+            m_sectionStack.push_back( node );
+            m_deepestSection = node;
+        }
+
+        virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {}
+
+        virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE {
+            assert( !m_sectionStack.empty() );
+            SectionNode& sectionNode = *m_sectionStack.back();
+            sectionNode.assertions.push_back( assertionStats );
+            return true;
+        }
+        virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE {
+            assert( !m_sectionStack.empty() );
+            SectionNode& node = *m_sectionStack.back();
+            node.stats = sectionStats;
+            m_sectionStack.pop_back();
+        }
+        virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE {
+            Ptr<TestCaseNode> node = new TestCaseNode( testCaseStats );
+            assert( m_sectionStack.size() == 0 );
+            node->children.push_back( m_rootSection );
+            m_testCases.push_back( node );
+            m_rootSection.reset();
+
+            assert( m_deepestSection );
+            m_deepestSection->stdOut = testCaseStats.stdOut;
+            m_deepestSection->stdErr = testCaseStats.stdErr;
+        }
+        virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE {
+            Ptr<TestGroupNode> node = new TestGroupNode( testGroupStats );
+            node->children.swap( m_testCases );
+            m_testGroups.push_back( node );
+        }
+        virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE {
+            Ptr<TestRunNode> node = new TestRunNode( testRunStats );
+            node->children.swap( m_testGroups );
+            m_testRuns.push_back( node );
+            testRunEndedCumulative();
+        }
+        virtual void testRunEndedCumulative() = 0;
+
+        virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE {}
+
+        Ptr<IConfig const> m_config;
+        std::ostream& stream;
+        std::vector<AssertionStats> m_assertions;
+        std::vector<std::vector<Ptr<SectionNode> > > m_sections;
+        std::vector<Ptr<TestCaseNode> > m_testCases;
+        std::vector<Ptr<TestGroupNode> > m_testGroups;
+
+        std::vector<Ptr<TestRunNode> > m_testRuns;
+
+        Ptr<SectionNode> m_rootSection;
+        Ptr<SectionNode> m_deepestSection;
+        std::vector<Ptr<SectionNode> > m_sectionStack;
+        ReporterPreferences m_reporterPrefs;
+
+    };
+
+    template<char C>
+    char const* getLineOfChars() {
+        static char line[CATCH_CONFIG_CONSOLE_WIDTH] = {0};
+        if( !*line ) {
+            memset( line, C, CATCH_CONFIG_CONSOLE_WIDTH-1 );
+            line[CATCH_CONFIG_CONSOLE_WIDTH-1] = 0;
+        }
+        return line;
+    }
+
+    struct TestEventListenerBase : StreamingReporterBase {
+        TestEventListenerBase( ReporterConfig const& _config )
+        :   StreamingReporterBase( _config )
+        {}
+
+        virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {}
+        virtual bool assertionEnded( AssertionStats const& ) CATCH_OVERRIDE {
+            return false;
+        }
+    };
+
+} // end namespace Catch
 
 // #included from: ../internal/catch_reporter_registrars.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_REGISTRARS_HPP_INCLUDED
 
 namespace Catch {
 
     template<typename T>
-    class ReporterRegistrar {
+    class LegacyReporterRegistrar {
 
         class ReporterFactory : public IReporterFactory {
-
-            virtual IReporter* create( const IReporterConfig& config ) const {
+            virtual IStreamingReporter* create( ReporterConfig const& config ) const {
+                return new LegacyReporterAdapter( new T( config ) );
+            }
+
+            virtual std::string getDescription() const {
+                return T::getDescription();
+            }
+        };
+
+    public:
+
+        LegacyReporterRegistrar( std::string const& name ) {
+            getMutableRegistryHub().registerReporter( name, new ReporterFactory() );
+        }
+    };
+
+    template<typename T>
+    class ReporterRegistrar {
+
+        class ReporterFactory : public SharedImpl<IReporterFactory> {
+
+            // *** Please Note ***:
+            // - If you end up here looking at a compiler error because it's trying to register
+            // your custom reporter class be aware that the native reporter interface has changed
+            // to IStreamingReporter. The "legacy" interface, IReporter, is still supported via
+            // an adapter. Just use REGISTER_LEGACY_REPORTER to take advantage of the adapter.
+            // However please consider updating to the new interface as the old one is now
+            // deprecated and will probably be removed quite soon!
+            // Please contact me via github if you have any questions at all about this.
+            // In fact, ideally, please contact me anyway to let me know you've hit this - as I have
+            // no idea who is actually using custom reporters at all (possibly no-one!).
+            // The new interface is designed to minimise exposure to interface changes in the future.
+            virtual IStreamingReporter* create( ReporterConfig const& config ) const {
                 return new T( config );
             }
 
@@ -3741,342 +8870,106 @@
 
     public:
 
-        ReporterRegistrar( const std::string& name ) {
-            getCurrentContext().getReporterRegistry().registerReporter( name, new ReporterFactory() );
-        }
-    };
-}
-
-#define INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) \
-    Catch::ReporterRegistrar<reporterType> catch_internal_RegistrarFor##reporterType( name );
-
-namespace Catch {
-
-    struct pluralise {
-        pluralise( std::size_t count, const std::string& label )
-        :   m_count( count ),
-            m_label( label )
-        {}
-
-        friend std::ostream& operator << ( std::ostream& os, const pluralise& pluraliser ) {
-            os << pluraliser.m_count << " " << pluraliser.m_label;
-            if( pluraliser.m_count != 1 )
-                os << "s";
-            return os;
-        }
-
-        std::size_t m_count;
-        std::string m_label;
-    };
-
-    class BasicReporter : public SharedImpl<IReporter> {
-
-        struct SpanInfo {
-
-            SpanInfo()
-            :   emitted( false )
-            {}
-
-            SpanInfo( const std::string& spanName )
-            :   name( spanName ),
-                emitted( false )
-            {}
-
-            SpanInfo( const SpanInfo& other )
-            :   name( other.name ),
-                emitted( other.emitted )
-            {}
-
-            std::string name;
-            bool emitted;
+        ReporterRegistrar( std::string const& name ) {
+            getMutableRegistryHub().registerReporter( name, new ReporterFactory() );
+        }
+    };
+
+    template<typename T>
+    class ListenerRegistrar {
+
+        class ListenerFactory : public SharedImpl<IReporterFactory> {
+
+            virtual IStreamingReporter* create( ReporterConfig const& config ) const {
+                return new T( config );
+            }
+            virtual std::string getDescription() const {
+                return "";
+            }
         };
 
     public:
-        BasicReporter( const IReporterConfig& config )
-        :   m_config( config ),
-            m_firstSectionInTestCase( true ),
-            m_aborted( false )
-        {}
-
-        static std::string getDescription() {
-            return "Reports test results as lines of text";
-        }
-
-    private:
-
-        void ReportCounts( const std::string& label, const Counts& counts, const std::string& allPrefix = "All " ) {
-            if( counts.passed )
-                m_config.stream() << counts.failed << " of " << counts.total() << " " << label << "s failed";
-            else
-                m_config.stream() << ( counts.failed > 1 ? allPrefix : "" ) << pluralise( counts.failed, label ) << " failed";
-        }
-
-        void ReportCounts( const Totals& totals, const std::string& allPrefix = "All " ) {
-            if( totals.assertions.total() == 0 ) {
-                m_config.stream() << "No tests ran";
-            }
-            else if( totals.assertions.failed ) {
-                TextColour colour( TextColour::ResultError );
-                ReportCounts( "test case", totals.testCases, allPrefix );
-                if( totals.testCases.failed > 0 ) {
-                    m_config.stream() << " (";
-                    ReportCounts( "assertion", totals.assertions, allPrefix );
-                    m_config.stream() << ")";
-                }
-            }
-            else {
-                TextColour colour( TextColour::ResultSuccess );
-                m_config.stream()   << allPrefix << "tests passed ("
-                                    << pluralise( totals.assertions.passed, "assertion" ) << " in "
-                                    << pluralise( totals.testCases.passed, "test case" ) << ")";
-            }
-        }
-
-    private: // IReporter
-
-        virtual bool shouldRedirectStdout() const {
-            return false;
-        }
-
-        virtual void StartTesting() {
-            m_testingSpan = SpanInfo();
-        }
-
-        virtual void Aborted() {
-            m_aborted = true;
-        }
-
-        virtual void EndTesting( const Totals& totals ) {
-            // Output the overall test results even if "Started Testing" was not emitted
-            if( m_aborted ) {
-                m_config.stream() << "\n[Testing aborted. ";
-                ReportCounts( totals, "The first " );
-            }
-            else {
-                m_config.stream() << "\n[Testing completed. ";
-                ReportCounts( totals );
-            }
-            m_config.stream() << "]\n" << std::endl;
-        }
-
-        virtual void StartGroup( const std::string& groupName ) {
-            m_groupSpan = groupName;
-        }
-
-        virtual void EndGroup( const std::string& groupName, const Totals& totals ) {
-            if( m_groupSpan.emitted && !groupName.empty() ) {
-                m_config.stream() << "[End of group: '" << groupName << "'. ";
-                ReportCounts( totals );
-                m_config.stream() << "]\n" << std::endl;
-                m_groupSpan = SpanInfo();
-            }
-        }
-
-        virtual void StartTestCase( const TestCaseInfo& testInfo ) {
-            m_testSpan = testInfo.getName();
-        }
-
-        virtual void StartSection( const std::string& sectionName, const std::string& ) {
-            m_sectionSpans.push_back( SpanInfo( sectionName ) );
-        }
-
-        virtual void EndSection( const std::string& sectionName, const Counts& assertions ) {
-            SpanInfo& sectionSpan = m_sectionSpans.back();
-            if( sectionSpan.emitted && !sectionSpan.name.empty() ) {
-                m_config.stream() << "[End of section: '" << sectionName << "' ";
-
-                if( assertions.failed ) {
-                    TextColour colour( TextColour::ResultError );
-                    ReportCounts( "assertion", assertions);
-                }
-                else {
-                    TextColour colour( TextColour::ResultSuccess );
-                    m_config.stream()   << ( assertions.passed > 1 ? "All " : "" )
-                                        << pluralise( assertions.passed, "assertion" ) << "passed" ;
-                }
-                m_config.stream() << "]\n" << std::endl;
-            }
-            m_sectionSpans.pop_back();
-        }
-
-        virtual void Result( const ResultInfo& resultInfo ) {
-            if( !m_config.includeSuccessfulResults() && resultInfo.getResultType() == ResultWas::Ok )
-                return;
-
-            StartSpansLazily();
-
-            if( !resultInfo.getFilename().empty() ) {
-                TextColour colour( TextColour::FileName );
-                m_config.stream() << SourceLineInfo( resultInfo.getFilename(), resultInfo.getLine() );
-            }
-
-            if( resultInfo.hasExpression() ) {
-                TextColour colour( TextColour::OriginalExpression );
-                m_config.stream() << resultInfo.getExpression();
-                if( resultInfo.ok() ) {
-                    TextColour successColour( TextColour::Success );
-                    m_config.stream() << " succeeded";
-                }
-                else {
-                    TextColour errorColour( TextColour::Error );
-                    m_config.stream() << " failed";
-                }
-            }
-            switch( resultInfo.getResultType() ) {
-                case ResultWas::ThrewException:
-                {
-                    TextColour colour( TextColour::Error );
-                    if( resultInfo.hasExpression() )
-                        m_config.stream() << " with unexpected";
-                    else
-                        m_config.stream() << "Unexpected";
-                    m_config.stream() << " exception with message: '" << resultInfo.getMessage() << "'";
-                }
-                    break;
-                case ResultWas::DidntThrowException:
-                {
-                    TextColour colour( TextColour::Error );
-                    if( resultInfo.hasExpression() )
-                        m_config.stream() << " because no exception was thrown where one was expected";
-                    else
-                        m_config.stream() << "No exception thrown where one was expected";
-                }
-                    break;
-                case ResultWas::Info:
-                    streamVariableLengthText( "info", resultInfo.getMessage() );
-                    break;
-                case ResultWas::Warning:
-                    m_config.stream() << "warning:\n'" << resultInfo.getMessage() << "'";
-                    break;
-                case ResultWas::ExplicitFailure:
-                {
-                    TextColour colour( TextColour::Error );
-                    m_config.stream() << "failed with message: '" << resultInfo.getMessage() << "'";
-                }
-                    break;
-                case ResultWas::Unknown: // These cases are here to prevent compiler warnings
-                case ResultWas::Ok:
-                case ResultWas::FailureBit:
-                case ResultWas::ExpressionFailed:
-                case ResultWas::Exception:
-                default:
-                    if( !resultInfo.hasExpression() ) {
-                        if( resultInfo.ok() ) {
-                            TextColour colour( TextColour::Success );
-                            m_config.stream() << " succeeded";
-                        }
-                        else {
-                            TextColour colour( TextColour::Error );
-                            m_config.stream() << " failed";
-                        }
-                    }
-                    break;
-            }
-
-            if( resultInfo.hasExpandedExpression() ) {
-                m_config.stream() << " for: ";
-                TextColour colour( TextColour::ReconstructedExpression );
-                m_config.stream() << resultInfo.getExpandedExpression();
-            }
-            m_config.stream() << std::endl;
-        }
-
-        virtual void EndTestCase(   const TestCaseInfo& testInfo,
-                                    const Totals& totals,
-                                    const std::string& stdOut,
-                                    const std::string& stdErr ) {
-            if( !stdOut.empty() ) {
-                StartSpansLazily();
-                streamVariableLengthText( "stdout", stdOut );
-            }
-
-            if( !stdErr.empty() ) {
-                StartSpansLazily();
-                streamVariableLengthText( "stderr", stdErr );
-            }
-
-            if( m_testSpan.emitted ) {
-                m_config.stream() << "[Finished: '" << testInfo.getName() << "' ";
-                ReportCounts( totals );
-                m_config.stream() << "]" << std::endl;
-            }
-        }
-
-    private: // helpers
-
-        void StartSpansLazily() {
-            if( !m_testingSpan.emitted ) {
-                if( m_config.getName().empty() )
-                    m_config.stream() << "[Started testing]" << std::endl;
-                else
-                    m_config.stream() << "[Started testing: " << m_config.getName() << "]" << std::endl;
-                m_testingSpan.emitted = true;
-            }
-
-            if( !m_groupSpan.emitted && !m_groupSpan.name.empty() ) {
-                m_config.stream() << "[Started group: '" << m_groupSpan.name << "']" << std::endl;
-                m_groupSpan.emitted = true;
-            }
-
-            if( !m_testSpan.emitted ) {
-                m_config.stream() << std::endl << "[Running: " << m_testSpan.name << "]" << std::endl;
-                m_testSpan.emitted = true;
-            }
-
-            if( !m_sectionSpans.empty() ) {
-                SpanInfo& sectionSpan = m_sectionSpans.back();
-                if( !sectionSpan.emitted && !sectionSpan.name.empty() ) {
-                    if( m_firstSectionInTestCase ) {
-                        m_config.stream() << "\n";
-                        m_firstSectionInTestCase = false;
-                    }
-                    std::vector<SpanInfo>::iterator it = m_sectionSpans.begin();
-                    std::vector<SpanInfo>::iterator itEnd = m_sectionSpans.end();
-                    for(; it != itEnd; ++it ) {
-                        SpanInfo& prevSpan = *it;
-                        if( !prevSpan.emitted && !prevSpan.name.empty() ) {
-                            m_config.stream() << "[Started section: '" << prevSpan.name << "']" << std::endl;
-                            prevSpan.emitted = true;
-                        }
-                    }
-                }
-            }
-        }
-
-        void streamVariableLengthText( const std::string& prefix, const std::string& text ) {
-            std::string trimmed = trim( text );
-            if( trimmed.find_first_of( "\r\n" ) == std::string::npos ) {
-                m_config.stream() << "[" << prefix << ": " << trimmed << "]\n";
-            }
-            else {
-                m_config.stream() << "\n[" << prefix << "] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" << trimmed
-                << "\n[end of " << prefix << "] <<<<<<<<<<<<<<<<<<<<<<<<\n";
-            }
-        }
-
-    private:
-        const IReporterConfig& m_config;
-        bool m_firstSectionInTestCase;
-
-        SpanInfo m_testingSpan;
-        SpanInfo m_groupSpan;
-        SpanInfo m_testSpan;
-        std::vector<SpanInfo> m_sectionSpans;
-        bool m_aborted;
-    };
-
-} // end namespace Catch
-
-// #included from: reporters/catch_reporter_xml.hpp
+
+        ListenerRegistrar() {
+            getMutableRegistryHub().registerListener( new ListenerFactory() );
+        }
+    };
+}
+
+#define INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) \
+    namespace{ Catch::LegacyReporterRegistrar<reporterType> catch_internal_RegistrarFor##reporterType( name ); }
+
+#define INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) \
+    namespace{ Catch::ReporterRegistrar<reporterType> catch_internal_RegistrarFor##reporterType( name ); }
+
+#define INTERNAL_CATCH_REGISTER_LISTENER( listenerType ) \
+    namespace{ Catch::ListenerRegistrar<listenerType> catch_internal_RegistrarFor##listenerType; }
 
 // #included from: ../internal/catch_xmlwriter.hpp
+#define TWOBLUECUBES_CATCH_XMLWRITER_HPP_INCLUDED
 
 #include <sstream>
 #include <string>
 #include <vector>
+#include <iomanip>
 
 namespace Catch {
 
+    class XmlEncode {
+    public:
+        enum ForWhat { ForTextNodes, ForAttributes };
+
+        XmlEncode( std::string const& str, ForWhat forWhat = ForTextNodes )
+        :   m_str( str ),
+            m_forWhat( forWhat )
+        {}
+
+        void encodeTo( std::ostream& os ) const {
+
+            // Apostrophe escaping not necessary if we always use " to write attributes
+            // (see: http://www.w3.org/TR/xml/#syntax)
+
+            for( std::size_t i = 0; i < m_str.size(); ++ i ) {
+                char c = m_str[i];
+                switch( c ) {
+                    case '<':   os << "&lt;"; break;
+                    case '&':   os << "&amp;"; break;
+
+                    case '>':
+                        // See: http://www.w3.org/TR/xml/#syntax
+                        if( i > 2 && m_str[i-1] == ']' && m_str[i-2] == ']' )
+                            os << "&gt;";
+                        else
+                            os << c;
+                        break;
+
+                    case '\"':
+                        if( m_forWhat == ForAttributes )
+                            os << "&quot;";
+                        else
+                            os << c;
+                        break;
+
+                    default:
+                        // Escape control chars - based on contribution by @espenalb in PR #465
+                        if ( ( c < '\x09' ) || ( c > '\x0D' && c < '\x20') || c=='\x7F' )
+                            os << "&#x" << std::uppercase << std::hex << static_cast<int>( c );
+                        else
+                            os << c;
+                }
+            }
+        }
+
+        friend std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ) {
+            xmlEncode.encodeTo( os );
+            return os;
+        }
+
+    private:
+        std::string m_str;
+        ForWhat m_forWhat;
+    };
+
     class XmlWriter {
     public:
 
@@ -4086,9 +8979,9 @@
             :   m_writer( writer )
             {}
 
-            ScopedElement( const ScopedElement& other )
+            ScopedElement( ScopedElement const& other )
             :   m_writer( other.m_writer ){
-                other.m_writer = NULL;
+                other.m_writer = CATCH_NULL;
             }
 
             ~ScopedElement() {
@@ -4096,13 +8989,13 @@
                     m_writer->endElement();
             }
 
-            ScopedElement& writeText( const std::string& text ) {
-                m_writer->writeText( text );
+            ScopedElement& writeText( std::string const& text, bool indent = true ) {
+                m_writer->writeText( text, indent );
                 return *this;
             }
 
             template<typename T>
-            ScopedElement& writeAttribute( const std::string& name, const T& attribute ) {
+            ScopedElement& writeAttribute( std::string const& name, T const& attribute ) {
                 m_writer->writeAttribute( name, attribute );
                 return *this;
             }
@@ -4114,7 +9007,7 @@
         XmlWriter()
         :   m_tagIsOpen( false ),
             m_needsNewline( false ),
-            m_os( &std::cout )
+            m_os( &Catch::cout() )
         {}
 
         XmlWriter( std::ostream& os )
@@ -4128,21 +9021,7 @@
                 endElement();
         }
 
-        XmlWriter& operator = ( const XmlWriter& other ) {
-            XmlWriter temp( other );
-            swap( temp );
-            return *this;
-        }
-
-        void swap( XmlWriter& other ) {
-            std::swap( m_tagIsOpen, other.m_tagIsOpen );
-            std::swap( m_needsNewline, other.m_needsNewline );
-            std::swap( m_tags, other.m_tags );
-            std::swap( m_indent, other.m_indent );
-            std::swap( m_os, other.m_os );
-        }
-
-        XmlWriter& startElement( const std::string& name ) {
+        XmlWriter& startElement( std::string const& name ) {
             ensureTagClosed();
             newlineIfNecessary();
             stream() << m_indent << "<" << name;
@@ -4152,7 +9031,7 @@
             return *this;
         }
 
-        ScopedElement scopedElement( const std::string& name ) {
+        ScopedElement scopedElement( std::string const& name ) {
             ScopedElement scoped( this );
             startElement( name );
             return scoped;
@@ -4172,40 +9051,37 @@
             return *this;
         }
 
-        XmlWriter& writeAttribute( const std::string& name, const std::string& attribute ) {
-            if( !name.empty() && !attribute.empty() ) {
-                stream() << " " << name << "=\"";
-                writeEncodedText( attribute );
-                stream() << "\"";
-            }
+        XmlWriter& writeAttribute( std::string const& name, std::string const& attribute ) {
+            if( !name.empty() && !attribute.empty() )
+                stream() << " " << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << "\"";
             return *this;
         }
 
-        XmlWriter& writeAttribute( const std::string& name, bool attribute ) {
+        XmlWriter& writeAttribute( std::string const& name, bool attribute ) {
             stream() << " " << name << "=\"" << ( attribute ? "true" : "false" ) << "\"";
             return *this;
         }
 
         template<typename T>
-        XmlWriter& writeAttribute( const std::string& name, const T& attribute ) {
-            if( !name.empty() )
-                stream() << " " << name << "=\"" << attribute << "\"";
-            return *this;
-        }
-
-        XmlWriter& writeText( const std::string& text ) {
+        XmlWriter& writeAttribute( std::string const& name, T const& attribute ) {
+            std::ostringstream oss;
+            oss << attribute;
+            return writeAttribute( name, oss.str() );
+        }
+
+        XmlWriter& writeText( std::string const& text, bool indent = true ) {
             if( !text.empty() ){
                 bool tagWasOpen = m_tagIsOpen;
                 ensureTagClosed();
-                if( tagWasOpen )
+                if( tagWasOpen && indent )
                     stream() << m_indent;
-                writeEncodedText( text );
+                stream() << XmlEncode( text );
                 m_needsNewline = true;
             }
             return *this;
         }
 
-        XmlWriter& writeComment( const std::string& text ) {
+        XmlWriter& writeComment( std::string const& text ) {
             ensureTagClosed();
             stream() << m_indent << "<!--" << text << "-->";
             m_needsNewline = true;
@@ -4218,7 +9094,13 @@
             return *this;
         }
 
+        void setStream( std::ostream& os ) {
+            m_os = &os;
+        }
+
     private:
+        XmlWriter( XmlWriter const& );
+        void operator=( XmlWriter const& );
 
         std::ostream& stream() {
             return *m_os;
@@ -4238,30 +9120,6 @@
             }
         }
 
-        void writeEncodedText( const std::string& text ) {
-            static const char* charsToEncode = "<&\"";
-            std::string mtext = text;
-            std::string::size_type pos = mtext.find_first_of( charsToEncode );
-            while( pos != std::string::npos ) {
-                stream() << mtext.substr( 0, pos );
-
-                switch( mtext[pos] ) {
-                    case '<':
-                        stream() << "&lt;";
-                        break;
-                    case '&':
-                        stream() << "&amp;";
-                        break;
-                    case '\"':
-                        stream() << "&quot;";
-                        break;
-                }
-                mtext = mtext.substr( pos+1 );
-                pos = mtext.find_first_of( charsToEncode );
-            }
-            stream() << mtext;
-        }
-
         bool m_tagIsOpen;
         bool m_needsNewline;
         std::vector<std::string> m_tags;
@@ -4270,466 +9128,1209 @@
     };
 
 }
+// #included from: catch_reenable_warnings.h
+
+#define TWOBLUECUBES_CATCH_REENABLE_WARNINGS_H_INCLUDED
+
+#ifdef __clang__
+#    ifdef __ICC // icpc defines the __clang__ macro
+#        pragma warning(pop)
+#    else
+#        pragma clang diagnostic pop
+#    endif
+#elif defined __GNUC__
+#    pragma GCC diagnostic pop
+#endif
+
+
 namespace Catch {
-    class XmlReporter : public SharedImpl<IReporter> {
+    class XmlReporter : public StreamingReporterBase {
     public:
-        XmlReporter( const IReporterConfig& config ) : m_config( config ) {}
+        XmlReporter( ReporterConfig const& _config )
+        :   StreamingReporterBase( _config ),
+            m_sectionDepth( 0 )
+        {
+            m_reporterPrefs.shouldRedirectStdOut = true;
+        }
+
+        virtual ~XmlReporter() CATCH_OVERRIDE;
 
         static std::string getDescription() {
             return "Reports test results as an XML document";
         }
 
-    private: // IReporter
-
-        virtual bool shouldRedirectStdout() const {
-            return true;
-        }
-
-        virtual void StartTesting() {
-            m_xml = XmlWriter( m_config.stream() );
+    public: // StreamingReporterBase
+
+        virtual void noMatchingTestCases( std::string const& s ) CATCH_OVERRIDE {
+            StreamingReporterBase::noMatchingTestCases( s );
+        }
+
+        virtual void testRunStarting( TestRunInfo const& testInfo ) CATCH_OVERRIDE {
+            StreamingReporterBase::testRunStarting( testInfo );
+            m_xml.setStream( stream );
             m_xml.startElement( "Catch" );
-            if( !m_config.getName().empty() )
-                m_xml.writeAttribute( "name", m_config.getName() );
-        }
-
-        virtual void EndTesting( const Totals& totals ) {
-            m_xml.scopedElement( "OverallResults" )
-                .writeAttribute( "successes", totals.assertions.passed )
-                .writeAttribute( "failures", totals.assertions.failed );
-            m_xml.endElement();
-        }
-
-        virtual void StartGroup( const std::string& groupName ) {
+            if( !m_config->name().empty() )
+                m_xml.writeAttribute( "name", m_config->name() );
+        }
+
+        virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE {
+            StreamingReporterBase::testGroupStarting( groupInfo );
             m_xml.startElement( "Group" )
-                .writeAttribute( "name", groupName );
-        }
-
-        virtual void EndGroup( const std::string&, const Totals& totals ) {
-            m_xml.scopedElement( "OverallResults" )
-                .writeAttribute( "successes", totals.assertions.passed )
-                .writeAttribute( "failures", totals.assertions.failed );
-            m_xml.endElement();
-        }
-
-        virtual void StartSection( const std::string& sectionName, const std::string& description ) {
-            m_xml.startElement( "Section" )
-                .writeAttribute( "name", sectionName )
-                .writeAttribute( "description", description );
-        }
-
-        virtual void EndSection( const std::string& /*sectionName*/, const Counts& assertions ) {
-            m_xml.scopedElement( "OverallResults" )
-                .writeAttribute( "successes", assertions.passed )
-                .writeAttribute( "failures", assertions.failed );
-            m_xml.endElement();
-        }
-
-        virtual void StartTestCase( const Catch::TestCaseInfo& testInfo ) {
-            m_xml.startElement( "TestCase" ).writeAttribute( "name", testInfo.getName() );
-            m_currentTestSuccess = true;
-        }
-
-        virtual void Result( const Catch::ResultInfo& resultInfo ) {
-            if( !m_config.includeSuccessfulResults() && resultInfo.getResultType() == ResultWas::Ok )
-                return;
-
-            if( resultInfo.hasExpression() ) {
+                .writeAttribute( "name", groupInfo.name );
+        }
+
+        virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE {
+            StreamingReporterBase::testCaseStarting(testInfo);
+            m_xml.startElement( "TestCase" ).writeAttribute( "name", trim( testInfo.name ) );
+
+            if ( m_config->showDurations() == ShowDurations::Always )
+                m_testCaseTimer.start();
+        }
+
+        virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE {
+            StreamingReporterBase::sectionStarting( sectionInfo );
+            if( m_sectionDepth++ > 0 ) {
+                m_xml.startElement( "Section" )
+                    .writeAttribute( "name", trim( sectionInfo.name ) )
+                    .writeAttribute( "description", sectionInfo.description );
+            }
+        }
+
+        virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { }
+
+        virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE {
+            const AssertionResult& assertionResult = assertionStats.assertionResult;
+
+            // Print any info messages in <Info> tags.
+            if( assertionStats.assertionResult.getResultType() != ResultWas::Ok ) {
+                for( std::vector<MessageInfo>::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end();
+                        it != itEnd;
+                        ++it ) {
+                    if( it->type == ResultWas::Info ) {
+                        m_xml.scopedElement( "Info" )
+                            .writeText( it->message );
+                    } else if ( it->type == ResultWas::Warning ) {
+                        m_xml.scopedElement( "Warning" )
+                            .writeText( it->message );
+                    }
+                }
+            }
+
+            // Drop out if result was successful but we're not printing them.
+            if( !m_config->includeSuccessfulResults() && isOk(assertionResult.getResultType()) )
+                return true;
+
+            // Print the expression if there is one.
+            if( assertionResult.hasExpression() ) {
                 m_xml.startElement( "Expression" )
-                    .writeAttribute( "success", resultInfo.ok() )
-                    .writeAttribute( "filename", resultInfo.getFilename() )
-                    .writeAttribute( "line", resultInfo.getLine() );
+                    .writeAttribute( "success", assertionResult.succeeded() )
+					.writeAttribute( "type", assertionResult.getTestMacroName() )
+                    .writeAttribute( "filename", assertionResult.getSourceInfo().file )
+                    .writeAttribute( "line", assertionResult.getSourceInfo().line );
 
                 m_xml.scopedElement( "Original" )
-                    .writeText( resultInfo.getExpression() );
+                    .writeText( assertionResult.getExpression() );
                 m_xml.scopedElement( "Expanded" )
-                    .writeText( resultInfo.getExpandedExpression() );
-                m_currentTestSuccess &= resultInfo.ok();
-            }
-
-            switch( resultInfo.getResultType() ) {
+                    .writeText( assertionResult.getExpandedExpression() );
+            }
+
+            // And... Print a result applicable to each result type.
+            switch( assertionResult.getResultType() ) {
                 case ResultWas::ThrewException:
                     m_xml.scopedElement( "Exception" )
-                        .writeAttribute( "filename", resultInfo.getFilename() )
-                        .writeAttribute( "line", resultInfo.getLine() )
-                        .writeText( resultInfo.getMessage() );
-                    m_currentTestSuccess = false;
+                        .writeAttribute( "filename", assertionResult.getSourceInfo().file )
+                        .writeAttribute( "line", assertionResult.getSourceInfo().line )
+                        .writeText( assertionResult.getMessage() );
+                    break;
+                case ResultWas::FatalErrorCondition:
+                    m_xml.scopedElement( "Fatal Error Condition" )
+                        .writeAttribute( "filename", assertionResult.getSourceInfo().file )
+                        .writeAttribute( "line", assertionResult.getSourceInfo().line )
+                        .writeText( assertionResult.getMessage() );
                     break;
                 case ResultWas::Info:
                     m_xml.scopedElement( "Info" )
-                        .writeText( resultInfo.getMessage() );
+                        .writeText( assertionResult.getMessage() );
                     break;
                 case ResultWas::Warning:
-                    m_xml.scopedElement( "Warning" )
-                        .writeText( resultInfo.getMessage() );
+                    // Warning will already have been written
                     break;
                 case ResultWas::ExplicitFailure:
                     m_xml.scopedElement( "Failure" )
-                        .writeText( resultInfo.getMessage() );
-                    m_currentTestSuccess = false;
+                        .writeText( assertionResult.getMessage() );
                     break;
-                case ResultWas::Unknown:
-                case ResultWas::Ok:
-                case ResultWas::FailureBit:
-                case ResultWas::ExpressionFailed:
-                case ResultWas::Exception:
-                case ResultWas::DidntThrowException:
                 default:
                     break;
             }
-            if( resultInfo.hasExpression() )
+
+            if( assertionResult.hasExpression() )
+                m_xml.endElement();
+
+            return true;
+        }
+
+        virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE {
+            StreamingReporterBase::sectionEnded( sectionStats );
+            if( --m_sectionDepth > 0 ) {
+                XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResults" );
+                e.writeAttribute( "successes", sectionStats.assertions.passed );
+                e.writeAttribute( "failures", sectionStats.assertions.failed );
+                e.writeAttribute( "expectedFailures", sectionStats.assertions.failedButOk );
+
+                if ( m_config->showDurations() == ShowDurations::Always )
+                    e.writeAttribute( "durationInSeconds", sectionStats.durationInSeconds );
+
                 m_xml.endElement();
-        }
-
-        virtual void Aborted() {
-            // !TBD
-        }
-
-        virtual void EndTestCase( const Catch::TestCaseInfo&, const Totals&, const std::string&, const std::string& ) {
-            m_xml.scopedElement( "OverallResult" ).writeAttribute( "success", m_currentTestSuccess );
+            }
+        }
+
+        virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE {
+            StreamingReporterBase::testCaseEnded( testCaseStats );
+            XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResult" );
+            e.writeAttribute( "success", testCaseStats.totals.assertions.allOk() );
+
+            if ( m_config->showDurations() == ShowDurations::Always )
+                e.writeAttribute( "durationInSeconds", m_testCaseTimer.getElapsedSeconds() );
+
+            m_xml.endElement();
+        }
+
+        virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE {
+            StreamingReporterBase::testGroupEnded( testGroupStats );
+            // TODO: Check testGroupStats.aborting and act accordingly.
+            m_xml.scopedElement( "OverallResults" )
+                .writeAttribute( "successes", testGroupStats.totals.assertions.passed )
+                .writeAttribute( "failures", testGroupStats.totals.assertions.failed )
+                .writeAttribute( "expectedFailures", testGroupStats.totals.assertions.failedButOk );
+            m_xml.endElement();
+        }
+
+        virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE {
+            StreamingReporterBase::testRunEnded( testRunStats );
+            m_xml.scopedElement( "OverallResults" )
+                .writeAttribute( "successes", testRunStats.totals.assertions.passed )
+                .writeAttribute( "failures", testRunStats.totals.assertions.failed )
+                .writeAttribute( "expectedFailures", testRunStats.totals.assertions.failedButOk );
             m_xml.endElement();
         }
 
     private:
-        const IReporterConfig& m_config;
-        bool m_currentTestSuccess;
+        Timer m_testCaseTimer;
         XmlWriter m_xml;
-    };
+        int m_sectionDepth;
+    };
+
+     INTERNAL_CATCH_REGISTER_REPORTER( "xml", XmlReporter )
 
 } // end namespace Catch
 
-// #included from: reporters/catch_reporter_junit.hpp
+// #included from: ../reporters/catch_reporter_junit.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_JUNIT_HPP_INCLUDED
+
+#include <assert.h>
 
 namespace Catch {
 
-    class JunitReporter : public SharedImpl<IReporter> {
-
-        struct TestStats {
-            std::string m_element;
-            std::string m_resultType;
-            std::string m_message;
-            std::string m_content;
-        };
-
-        struct TestCaseStats {
-
-            TestCaseStats( const std::string& name = std::string() ) :m_name( name ){}
-
-            double      m_timeInSeconds;
-            std::string m_status;
-            std::string m_className;
-            std::string m_name;
-            std::vector<TestStats> m_testStats;
-        };
-
-        struct Stats {
-
-            Stats( const std::string& name = std::string() )
-            :   m_testsCount( 0 ),
-                m_failuresCount( 0 ),
-                m_disabledCount( 0 ),
-                m_errorsCount( 0 ),
-                m_timeInSeconds( 0 ),
-                m_name( name )
-            {}
-
-            std::size_t m_testsCount;
-            std::size_t m_failuresCount;
-            std::size_t m_disabledCount;
-            std::size_t m_errorsCount;
-            double      m_timeInSeconds;
-            std::string m_name;
-
-            std::vector<TestCaseStats> m_testCaseStats;
-        };
-
+    class JunitReporter : public CumulativeReporterBase {
     public:
-        JunitReporter( const IReporterConfig& config )
-        :   m_config( config ),
-            m_testSuiteStats( "AllTests" ),
-            m_currentStats( &m_testSuiteStats )
-        {}
+        JunitReporter( ReporterConfig const& _config )
+        :   CumulativeReporterBase( _config ),
+            xml( _config.stream() )
+        {
+            m_reporterPrefs.shouldRedirectStdOut = true;
+        }
+
+        virtual ~JunitReporter() CATCH_OVERRIDE;
 
         static std::string getDescription() {
             return "Reports test results in an XML format that looks like Ant's junitreport target";
         }
 
-    private: // IReporter
-
-        virtual bool shouldRedirectStdout() const {
+        virtual void noMatchingTestCases( std::string const& /*spec*/ ) CATCH_OVERRIDE {}
+
+        virtual void testRunStarting( TestRunInfo const& runInfo ) CATCH_OVERRIDE {
+            CumulativeReporterBase::testRunStarting( runInfo );
+            xml.startElement( "testsuites" );
+        }
+
+        virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE {
+            suiteTimer.start();
+            stdOutForSuite.str("");
+            stdErrForSuite.str("");
+            unexpectedExceptions = 0;
+            CumulativeReporterBase::testGroupStarting( groupInfo );
+        }
+
+        virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE {
+            if( assertionStats.assertionResult.getResultType() == ResultWas::ThrewException )
+                unexpectedExceptions++;
+            return CumulativeReporterBase::assertionEnded( assertionStats );
+        }
+
+        virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE {
+            stdOutForSuite << testCaseStats.stdOut;
+            stdErrForSuite << testCaseStats.stdErr;
+            CumulativeReporterBase::testCaseEnded( testCaseStats );
+        }
+
+        virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE {
+            double suiteTime = suiteTimer.getElapsedSeconds();
+            CumulativeReporterBase::testGroupEnded( testGroupStats );
+            writeGroup( *m_testGroups.back(), suiteTime );
+        }
+
+        virtual void testRunEndedCumulative() CATCH_OVERRIDE {
+            xml.endElement();
+        }
+
+        void writeGroup( TestGroupNode const& groupNode, double suiteTime ) {
+            XmlWriter::ScopedElement e = xml.scopedElement( "testsuite" );
+            TestGroupStats const& stats = groupNode.value;
+            xml.writeAttribute( "name", stats.groupInfo.name );
+            xml.writeAttribute( "errors", unexpectedExceptions );
+            xml.writeAttribute( "failures", stats.totals.assertions.failed-unexpectedExceptions );
+            xml.writeAttribute( "tests", stats.totals.assertions.total() );
+            xml.writeAttribute( "hostname", "tbd" ); // !TBD
+            if( m_config->showDurations() == ShowDurations::Never )
+                xml.writeAttribute( "time", "" );
+            else
+                xml.writeAttribute( "time", suiteTime );
+            xml.writeAttribute( "timestamp", "tbd" ); // !TBD
+
+            // Write test cases
+            for( TestGroupNode::ChildNodes::const_iterator
+                    it = groupNode.children.begin(), itEnd = groupNode.children.end();
+                    it != itEnd;
+                    ++it )
+                writeTestCase( **it );
+
+            xml.scopedElement( "system-out" ).writeText( trim( stdOutForSuite.str() ), false );
+            xml.scopedElement( "system-err" ).writeText( trim( stdErrForSuite.str() ), false );
+        }
+
+        void writeTestCase( TestCaseNode const& testCaseNode ) {
+            TestCaseStats const& stats = testCaseNode.value;
+
+            // All test cases have exactly one section - which represents the
+            // test case itself. That section may have 0-n nested sections
+            assert( testCaseNode.children.size() == 1 );
+            SectionNode const& rootSection = *testCaseNode.children.front();
+
+            std::string className = stats.testInfo.className;
+
+            if( className.empty() ) {
+                if( rootSection.childSections.empty() )
+                    className = "global";
+            }
+            writeSection( className, "", rootSection );
+        }
+
+        void writeSection(  std::string const& className,
+                            std::string const& rootName,
+                            SectionNode const& sectionNode ) {
+            std::string name = trim( sectionNode.stats.sectionInfo.name );
+            if( !rootName.empty() )
+                name = rootName + "/" + name;
+
+            if( !sectionNode.assertions.empty() ||
+                !sectionNode.stdOut.empty() ||
+                !sectionNode.stdErr.empty() ) {
+                XmlWriter::ScopedElement e = xml.scopedElement( "testcase" );
+                if( className.empty() ) {
+                    xml.writeAttribute( "classname", name );
+                    xml.writeAttribute( "name", "root" );
+                }
+                else {
+                    xml.writeAttribute( "classname", className );
+                    xml.writeAttribute( "name", name );
+                }
+                xml.writeAttribute( "time", Catch::toString( sectionNode.stats.durationInSeconds ) );
+
+                writeAssertions( sectionNode );
+
+                if( !sectionNode.stdOut.empty() )
+                    xml.scopedElement( "system-out" ).writeText( trim( sectionNode.stdOut ), false );
+                if( !sectionNode.stdErr.empty() )
+                    xml.scopedElement( "system-err" ).writeText( trim( sectionNode.stdErr ), false );
+            }
+            for( SectionNode::ChildSections::const_iterator
+                    it = sectionNode.childSections.begin(),
+                    itEnd = sectionNode.childSections.end();
+                    it != itEnd;
+                    ++it )
+                if( className.empty() )
+                    writeSection( name, "", **it );
+                else
+                    writeSection( className, name, **it );
+        }
+
+        void writeAssertions( SectionNode const& sectionNode ) {
+            for( SectionNode::Assertions::const_iterator
+                    it = sectionNode.assertions.begin(), itEnd = sectionNode.assertions.end();
+                    it != itEnd;
+                    ++it )
+                writeAssertion( *it );
+        }
+        void writeAssertion( AssertionStats const& stats ) {
+            AssertionResult const& result = stats.assertionResult;
+            if( !result.isOk() ) {
+                std::string elementName;
+                switch( result.getResultType() ) {
+                    case ResultWas::ThrewException:
+                    case ResultWas::FatalErrorCondition:
+                        elementName = "error";
+                        break;
+                    case ResultWas::ExplicitFailure:
+                        elementName = "failure";
+                        break;
+                    case ResultWas::ExpressionFailed:
+                        elementName = "failure";
+                        break;
+                    case ResultWas::DidntThrowException:
+                        elementName = "failure";
+                        break;
+
+                    // We should never see these here:
+                    case ResultWas::Info:
+                    case ResultWas::Warning:
+                    case ResultWas::Ok:
+                    case ResultWas::Unknown:
+                    case ResultWas::FailureBit:
+                    case ResultWas::Exception:
+                        elementName = "internalError";
+                        break;
+                }
+
+                XmlWriter::ScopedElement e = xml.scopedElement( elementName );
+
+                xml.writeAttribute( "message", result.getExpandedExpression() );
+                xml.writeAttribute( "type", result.getTestMacroName() );
+
+                std::ostringstream oss;
+                if( !result.getMessage().empty() )
+                    oss << result.getMessage() << "\n";
+                for( std::vector<MessageInfo>::const_iterator
+                        it = stats.infoMessages.begin(),
+                        itEnd = stats.infoMessages.end();
+                            it != itEnd;
+                            ++it )
+                    if( it->type == ResultWas::Info )
+                        oss << it->message << "\n";
+
+                oss << "at " << result.getSourceInfo();
+                xml.writeText( oss.str(), false );
+            }
+        }
+
+        XmlWriter xml;
+        Timer suiteTimer;
+        std::ostringstream stdOutForSuite;
+        std::ostringstream stdErrForSuite;
+        unsigned int unexpectedExceptions;
+    };
+
+    INTERNAL_CATCH_REGISTER_REPORTER( "junit", JunitReporter )
+
+} // end namespace Catch
+
+// #included from: ../reporters/catch_reporter_console.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_CONSOLE_HPP_INCLUDED
+
+namespace Catch {
+
+    struct ConsoleReporter : StreamingReporterBase {
+        ConsoleReporter( ReporterConfig const& _config )
+        :   StreamingReporterBase( _config ),
+            m_headerPrinted( false )
+        {}
+
+        virtual ~ConsoleReporter() CATCH_OVERRIDE;
+        static std::string getDescription() {
+            return "Reports test results as plain lines of text";
+        }
+
+        virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE {
+            stream << "No test cases matched '" << spec << "'" << std::endl;
+        }
+
+        virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {
+        }
+
+        virtual bool assertionEnded( AssertionStats const& _assertionStats ) CATCH_OVERRIDE {
+            AssertionResult const& result = _assertionStats.assertionResult;
+
+            bool printInfoMessages = true;
+
+            // Drop out if result was successful and we're not printing those
+            if( !m_config->includeSuccessfulResults() && result.isOk() ) {
+                if( result.getResultType() != ResultWas::Warning )
+                    return false;
+                printInfoMessages = false;
+            }
+
+            lazyPrint();
+
+            AssertionPrinter printer( stream, _assertionStats, printInfoMessages );
+            printer.print();
+            stream << std::endl;
             return true;
         }
 
-        virtual void StartTesting(){}
-
-        virtual void StartGroup( const std::string& groupName ) {
-            m_statsForSuites.push_back( Stats( groupName ) );
-            m_currentStats = &m_statsForSuites.back();
-        }
-
-        virtual void EndGroup( const std::string&, const Totals& totals ) {
-            m_currentStats->m_testsCount = totals.assertions.total();
-            m_currentStats = &m_testSuiteStats;
-        }
-
-        virtual void StartSection( const std::string&, const std::string& ){}
-
-        virtual void EndSection( const std::string&, const Counts& ){}
-
-        virtual void StartTestCase( const Catch::TestCaseInfo& testInfo ) {
-            m_currentStats->m_testCaseStats.push_back( TestCaseStats( testInfo.getName() ) );
-        }
-
-        virtual void Result( const Catch::ResultInfo& resultInfo ) {
-            if( resultInfo.getResultType() != ResultWas::Ok || m_config.includeSuccessfulResults() ) {
-                TestCaseStats& testCaseStats = m_currentStats->m_testCaseStats.back();
-                TestStats stats;
-                std::ostringstream oss;
-                if( !resultInfo.getMessage().empty() )
-                    oss << resultInfo.getMessage() << " at ";
-                oss << SourceLineInfo( resultInfo.getFilename(), resultInfo.getLine() );
-                stats.m_content = oss.str();
-                stats.m_message = resultInfo.getExpandedExpression();
-                stats.m_resultType = resultInfo.getTestMacroName();
-
-                switch( resultInfo.getResultType() ) {
+        virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE {
+            m_headerPrinted = false;
+            StreamingReporterBase::sectionStarting( _sectionInfo );
+        }
+        virtual void sectionEnded( SectionStats const& _sectionStats ) CATCH_OVERRIDE {
+            if( _sectionStats.missingAssertions ) {
+                lazyPrint();
+                Colour colour( Colour::ResultError );
+                if( m_sectionStack.size() > 1 )
+                    stream << "\nNo assertions in section";
+                else
+                    stream << "\nNo assertions in test case";
+                stream << " '" << _sectionStats.sectionInfo.name << "'\n" << std::endl;
+            }
+            if( m_headerPrinted ) {
+                if( m_config->showDurations() == ShowDurations::Always )
+                    stream << "Completed in " << _sectionStats.durationInSeconds << "s" << std::endl;
+                m_headerPrinted = false;
+            }
+            else {
+                if( m_config->showDurations() == ShowDurations::Always )
+                    stream << _sectionStats.sectionInfo.name << " completed in " << _sectionStats.durationInSeconds << "s" << std::endl;
+            }
+            StreamingReporterBase::sectionEnded( _sectionStats );
+        }
+
+        virtual void testCaseEnded( TestCaseStats const& _testCaseStats ) CATCH_OVERRIDE {
+            StreamingReporterBase::testCaseEnded( _testCaseStats );
+            m_headerPrinted = false;
+        }
+        virtual void testGroupEnded( TestGroupStats const& _testGroupStats ) CATCH_OVERRIDE {
+            if( currentGroupInfo.used ) {
+                printSummaryDivider();
+                stream << "Summary for group '" << _testGroupStats.groupInfo.name << "':\n";
+                printTotals( _testGroupStats.totals );
+                stream << "\n" << std::endl;
+            }
+            StreamingReporterBase::testGroupEnded( _testGroupStats );
+        }
+        virtual void testRunEnded( TestRunStats const& _testRunStats ) CATCH_OVERRIDE {
+            printTotalsDivider( _testRunStats.totals );
+            printTotals( _testRunStats.totals );
+            stream << std::endl;
+            StreamingReporterBase::testRunEnded( _testRunStats );
+        }
+
+    private:
+
+        class AssertionPrinter {
+            void operator= ( AssertionPrinter const& );
+        public:
+            AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages )
+            :   stream( _stream ),
+                stats( _stats ),
+                result( _stats.assertionResult ),
+                colour( Colour::None ),
+                message( result.getMessage() ),
+                messages( _stats.infoMessages ),
+                printInfoMessages( _printInfoMessages )
+            {
+                switch( result.getResultType() ) {
+                    case ResultWas::Ok:
+                        colour = Colour::Success;
+                        passOrFail = "PASSED";
+                        //if( result.hasMessage() )
+                        if( _stats.infoMessages.size() == 1 )
+                            messageLabel = "with message";
+                        if( _stats.infoMessages.size() > 1 )
+                            messageLabel = "with messages";
+                        break;
+                    case ResultWas::ExpressionFailed:
+                        if( result.isOk() ) {
+                            colour = Colour::Success;
+                            passOrFail = "FAILED - but was ok";
+                        }
+                        else {
+                            colour = Colour::Error;
+                            passOrFail = "FAILED";
+                        }
+                        if( _stats.infoMessages.size() == 1 )
+                            messageLabel = "with message";
+                        if( _stats.infoMessages.size() > 1 )
+                            messageLabel = "with messages";
+                        break;
                     case ResultWas::ThrewException:
-                        stats.m_element = "error";
-                        m_currentStats->m_errorsCount++;
+                        colour = Colour::Error;
+                        passOrFail = "FAILED";
+                        messageLabel = "due to unexpected exception with message";
+                        break;
+                    case ResultWas::FatalErrorCondition:
+                        colour = Colour::Error;
+                        passOrFail = "FAILED";
+                        messageLabel = "due to a fatal error condition";
+                        break;
+                    case ResultWas::DidntThrowException:
+                        colour = Colour::Error;
+                        passOrFail = "FAILED";
+                        messageLabel = "because no exception was thrown where one was expected";
                         break;
                     case ResultWas::Info:
-                        stats.m_element = "info"; // !TBD ?
+                        messageLabel = "info";
                         break;
                     case ResultWas::Warning:
-                        stats.m_element = "warning"; // !TBD ?
+                        messageLabel = "warning";
                         break;
                     case ResultWas::ExplicitFailure:
-                        stats.m_element = "failure";
-                        m_currentStats->m_failuresCount++;
+                        passOrFail = "FAILED";
+                        colour = Colour::Error;
+                        if( _stats.infoMessages.size() == 1 )
+                            messageLabel = "explicitly with message";
+                        if( _stats.infoMessages.size() > 1 )
+                            messageLabel = "explicitly with messages";
                         break;
-                    case ResultWas::ExpressionFailed:
-                        stats.m_element = "failure";
-                        m_currentStats->m_failuresCount++;
-                        break;
-                    case ResultWas::Ok:
-                        stats.m_element = "success";
-                        break;
+                    // These cases are here to prevent compiler warnings
                     case ResultWas::Unknown:
                     case ResultWas::FailureBit:
                     case ResultWas::Exception:
-                    case ResultWas::DidntThrowException:
-                    default:
-                        stats.m_element = "unknown";
+                        passOrFail = "** internal error **";
+                        colour = Colour::Error;
                         break;
                 }
-                testCaseStats.m_testStats.push_back( stats );
-            }
-        }
-
-        virtual void EndTestCase( const Catch::TestCaseInfo&, const Totals&, const std::string& stdOut, const std::string& stdErr ) {
-            if( !stdOut.empty() )
-                m_stdOut << stdOut << "\n";
-            if( !stdErr.empty() )
-                m_stdErr << stdErr << "\n";
-        }
-
-        virtual void Aborted() {
-            // !TBD
-        }
-
-        virtual void EndTesting( const Totals& ) {
-            std::ostream& str = m_config.stream();
+            }
+
+            void print() const {
+                printSourceInfo();
+                if( stats.totals.assertions.total() > 0 ) {
+                    if( result.isOk() )
+                        stream << "\n";
+                    printResultType();
+                    printOriginalExpression();
+                    printReconstructedExpression();
+                }
+                else {
+                    stream << "\n";
+                }
+                printMessage();
+            }
+
+        private:
+            void printResultType() const {
+                if( !passOrFail.empty() ) {
+                    Colour colourGuard( colour );
+                    stream << passOrFail << ":\n";
+                }
+            }
+            void printOriginalExpression() const {
+                if( result.hasExpression() ) {
+                    Colour colourGuard( Colour::OriginalExpression );
+                    stream  << "  ";
+                    stream << result.getExpressionInMacro();
+                    stream << "\n";
+                }
+            }
+            void printReconstructedExpression() const {
+                if( result.hasExpandedExpression() ) {
+                    stream << "with expansion:\n";
+                    Colour colourGuard( Colour::ReconstructedExpression );
+                    stream << Text( result.getExpandedExpression(), TextAttributes().setIndent(2) ) << "\n";
+                }
+            }
+            void printMessage() const {
+                if( !messageLabel.empty() )
+                    stream << messageLabel << ":" << "\n";
+                for( std::vector<MessageInfo>::const_iterator it = messages.begin(), itEnd = messages.end();
+                        it != itEnd;
+                        ++it ) {
+                    // If this assertion is a warning ignore any INFO messages
+                    if( printInfoMessages || it->type != ResultWas::Info )
+                        stream << Text( it->message, TextAttributes().setIndent(2) ) << "\n";
+                }
+            }
+            void printSourceInfo() const {
+                Colour colourGuard( Colour::FileName );
+                stream << result.getSourceInfo() << ": ";
+            }
+
+            std::ostream& stream;
+            AssertionStats const& stats;
+            AssertionResult const& result;
+            Colour::Code colour;
+            std::string passOrFail;
+            std::string messageLabel;
+            std::string message;
+            std::vector<MessageInfo> messages;
+            bool printInfoMessages;
+        };
+
+        void lazyPrint() {
+
+            if( !currentTestRunInfo.used )
+                lazyPrintRunInfo();
+            if( !currentGroupInfo.used )
+                lazyPrintGroupInfo();
+
+            if( !m_headerPrinted ) {
+                printTestCaseAndSectionHeader();
+                m_headerPrinted = true;
+            }
+        }
+        void lazyPrintRunInfo() {
+            stream  << "\n" << getLineOfChars<'~'>() << "\n";
+            Colour colour( Colour::SecondaryText );
+            stream  << currentTestRunInfo->name
+                    << " is a Catch v"  << libraryVersion << " host application.\n"
+                    << "Run with -? for options\n\n";
+
+            if( m_config->rngSeed() != 0 )
+                stream << "Randomness seeded to: " << m_config->rngSeed() << "\n\n";
+
+            currentTestRunInfo.used = true;
+        }
+        void lazyPrintGroupInfo() {
+            if( !currentGroupInfo->name.empty() && currentGroupInfo->groupsCounts > 1 ) {
+                printClosedHeader( "Group: " + currentGroupInfo->name );
+                currentGroupInfo.used = true;
+            }
+        }
+        void printTestCaseAndSectionHeader() {
+            assert( !m_sectionStack.empty() );
+            printOpenHeader( currentTestCaseInfo->name );
+
+            if( m_sectionStack.size() > 1 ) {
+                Colour colourGuard( Colour::Headers );
+
+                std::vector<SectionInfo>::const_iterator
+                    it = m_sectionStack.begin()+1, // Skip first section (test case)
+                    itEnd = m_sectionStack.end();
+                for( ; it != itEnd; ++it )
+                    printHeaderString( it->name, 2 );
+            }
+
+            SourceLineInfo lineInfo = m_sectionStack.front().lineInfo;
+
+            if( !lineInfo.empty() ){
+                stream << getLineOfChars<'-'>() << "\n";
+                Colour colourGuard( Colour::FileName );
+                stream << lineInfo << "\n";
+            }
+            stream << getLineOfChars<'.'>() << "\n" << std::endl;
+        }
+
+        void printClosedHeader( std::string const& _name ) {
+            printOpenHeader( _name );
+            stream << getLineOfChars<'.'>() << "\n";
+        }
+        void printOpenHeader( std::string const& _name ) {
+            stream  << getLineOfChars<'-'>() << "\n";
             {
-                XmlWriter xml( str );
-
-                if( m_statsForSuites.size() > 0 )
-                    xml.startElement( "testsuites" );
-
-                std::vector<Stats>::const_iterator it = m_statsForSuites.begin();
-                std::vector<Stats>::const_iterator itEnd = m_statsForSuites.end();
-
-                for(; it != itEnd; ++it ) {
-                    XmlWriter::ScopedElement e = xml.scopedElement( "testsuite" );
-                    xml.writeAttribute( "name", it->m_name );
-                    xml.writeAttribute( "errors", it->m_errorsCount );
-                    xml.writeAttribute( "failures", it->m_failuresCount );
-                    xml.writeAttribute( "tests", it->m_testsCount );
-                    xml.writeAttribute( "hostname", "tbd" );
-                    xml.writeAttribute( "time", "tbd" );
-                    xml.writeAttribute( "timestamp", "tbd" );
-
-                    OutputTestCases( xml, *it );
+                Colour colourGuard( Colour::Headers );
+                printHeaderString( _name );
+            }
+        }
+
+        // if string has a : in first line will set indent to follow it on
+        // subsequent lines
+        void printHeaderString( std::string const& _string, std::size_t indent = 0 ) {
+            std::size_t i = _string.find( ": " );
+            if( i != std::string::npos )
+                i+=2;
+            else
+                i = 0;
+            stream << Text( _string, TextAttributes()
+                                        .setIndent( indent+i)
+                                        .setInitialIndent( indent ) ) << "\n";
+        }
+
+        struct SummaryColumn {
+
+            SummaryColumn( std::string const& _label, Colour::Code _colour )
+            :   label( _label ),
+                colour( _colour )
+            {}
+            SummaryColumn addRow( std::size_t count ) {
+                std::ostringstream oss;
+                oss << count;
+                std::string row = oss.str();
+                for( std::vector<std::string>::iterator it = rows.begin(); it != rows.end(); ++it ) {
+                    while( it->size() < row.size() )
+                        *it = " " + *it;
+                    while( it->size() > row.size() )
+                        row = " " + row;
                 }
-
-                xml.scopedElement( "system-out" ).writeText( trim( m_stdOut.str() ) );
-                xml.scopedElement( "system-err" ).writeText( trim( m_stdErr.str() ) );
-            }
-        }
-
-        void OutputTestCases( XmlWriter& xml, const Stats& stats ) {
-            std::vector<TestCaseStats>::const_iterator it = stats.m_testCaseStats.begin();
-            std::vector<TestCaseStats>::const_iterator itEnd = stats.m_testCaseStats.end();
-            for(; it != itEnd; ++it ) {
-                xml.writeBlankLine();
-                xml.writeComment( "Test case" );
-
-                XmlWriter::ScopedElement e = xml.scopedElement( "testcase" );
-                xml.writeAttribute( "classname", it->m_className );
-                xml.writeAttribute( "name", it->m_name );
-                xml.writeAttribute( "time", "tbd" );
-
-                OutputTestResult( xml, *it );
-            }
-        }
-
-        void OutputTestResult( XmlWriter& xml, const TestCaseStats& stats ) {
-            std::vector<TestStats>::const_iterator it = stats.m_testStats.begin();
-            std::vector<TestStats>::const_iterator itEnd = stats.m_testStats.end();
-            for(; it != itEnd; ++it ) {
-                if( it->m_element != "success" ) {
-                    XmlWriter::ScopedElement e = xml.scopedElement( it->m_element );
-
-                    xml.writeAttribute( "message", it->m_message );
-                    xml.writeAttribute( "type", it->m_resultType );
-                    if( !it->m_content.empty() )
-                        xml.writeText( it->m_content );
+                rows.push_back( row );
+                return *this;
+            }
+
+            std::string label;
+            Colour::Code colour;
+            std::vector<std::string> rows;
+
+        };
+
+        void printTotals( Totals const& totals ) {
+            if( totals.testCases.total() == 0 ) {
+                stream << Colour( Colour::Warning ) << "No tests ran\n";
+            }
+            else if( totals.assertions.total() > 0 && totals.testCases.allPassed() ) {
+                stream << Colour( Colour::ResultSuccess ) << "All tests passed";
+                stream << " ("
+                        << pluralise( totals.assertions.passed, "assertion" ) << " in "
+                        << pluralise( totals.testCases.passed, "test case" ) << ")"
+                        << "\n";
+            }
+            else {
+
+                std::vector<SummaryColumn> columns;
+                columns.push_back( SummaryColumn( "", Colour::None )
+                                        .addRow( totals.testCases.total() )
+                                        .addRow( totals.assertions.total() ) );
+                columns.push_back( SummaryColumn( "passed", Colour::Success )
+                                        .addRow( totals.testCases.passed )
+                                        .addRow( totals.assertions.passed ) );
+                columns.push_back( SummaryColumn( "failed", Colour::ResultError )
+                                        .addRow( totals.testCases.failed )
+                                        .addRow( totals.assertions.failed ) );
+                columns.push_back( SummaryColumn( "failed as expected", Colour::ResultExpectedFailure )
+                                        .addRow( totals.testCases.failedButOk )
+                                        .addRow( totals.assertions.failedButOk ) );
+
+                printSummaryRow( "test cases", columns, 0 );
+                printSummaryRow( "assertions", columns, 1 );
+            }
+        }
+        void printSummaryRow( std::string const& label, std::vector<SummaryColumn> const& cols, std::size_t row ) {
+            for( std::vector<SummaryColumn>::const_iterator it = cols.begin(); it != cols.end(); ++it ) {
+                std::string value = it->rows[row];
+                if( it->label.empty() ) {
+                    stream << label << ": ";
+                    if( value != "0" )
+                        stream << value;
+                    else
+                        stream << Colour( Colour::Warning ) << "- none -";
                 }
-            }
+                else if( value != "0" ) {
+                    stream  << Colour( Colour::LightGrey ) << " | ";
+                    stream  << Colour( it->colour )
+                            << value << " " << it->label;
+                }
+            }
+            stream << "\n";
+        }
+
+        static std::size_t makeRatio( std::size_t number, std::size_t total ) {
+            std::size_t ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number/ total : 0;
+            return ( ratio == 0 && number > 0 ) ? 1 : ratio;
+        }
+        static std::size_t& findMax( std::size_t& i, std::size_t& j, std::size_t& k ) {
+            if( i > j && i > k )
+                return i;
+            else if( j > k )
+                return j;
+            else
+                return k;
+        }
+
+        void printTotalsDivider( Totals const& totals ) {
+            if( totals.testCases.total() > 0 ) {
+                std::size_t failedRatio = makeRatio( totals.testCases.failed, totals.testCases.total() );
+                std::size_t failedButOkRatio = makeRatio( totals.testCases.failedButOk, totals.testCases.total() );
+                std::size_t passedRatio = makeRatio( totals.testCases.passed, totals.testCases.total() );
+                while( failedRatio + failedButOkRatio + passedRatio < CATCH_CONFIG_CONSOLE_WIDTH-1 )
+                    findMax( failedRatio, failedButOkRatio, passedRatio )++;
+                while( failedRatio + failedButOkRatio + passedRatio > CATCH_CONFIG_CONSOLE_WIDTH-1 )
+                    findMax( failedRatio, failedButOkRatio, passedRatio )--;
+
+                stream << Colour( Colour::Error ) << std::string( failedRatio, '=' );
+                stream << Colour( Colour::ResultExpectedFailure ) << std::string( failedButOkRatio, '=' );
+                if( totals.testCases.allPassed() )
+                    stream << Colour( Colour::ResultSuccess ) << std::string( passedRatio, '=' );
+                else
+                    stream << Colour( Colour::Success ) << std::string( passedRatio, '=' );
+            }
+            else {
+                stream << Colour( Colour::Warning ) << std::string( CATCH_CONFIG_CONSOLE_WIDTH-1, '=' );
+            }
+            stream << "\n";
+        }
+        void printSummaryDivider() {
+            stream << getLineOfChars<'-'>() << "\n";
         }
 
     private:
-        const IReporterConfig& m_config;
-        bool m_currentTestSuccess;
-
-        Stats m_testSuiteStats;
-        Stats* m_currentStats;
-        std::vector<Stats> m_statsForSuites;
-        std::ostringstream m_stdOut;
-        std::ostringstream m_stdErr;
-    };
+        bool m_headerPrinted;
+    };
+
+    INTERNAL_CATCH_REGISTER_REPORTER( "console", ConsoleReporter )
 
 } // end namespace Catch
 
-#include <fstream>
-#include <stdlib.h>
-#include <limits>
+// #included from: ../reporters/catch_reporter_compact.hpp
+#define TWOBLUECUBES_CATCH_REPORTER_COMPACT_HPP_INCLUDED
 
 namespace Catch {
 
-    INTERNAL_CATCH_REGISTER_REPORTER( "basic", BasicReporter )
-    INTERNAL_CATCH_REGISTER_REPORTER( "xml", XmlReporter )
-    INTERNAL_CATCH_REGISTER_REPORTER( "junit", JunitReporter )
-
-    inline int Main( Config& config ) {
-
-        // Handle list request
-        if( config.listWhat() != List::None )
-            return List( config );
-
-        // Open output file, if specified
-        std::ofstream ofs;
-        if( !config.getFilename().empty() ) {
-            ofs.open( config.getFilename().c_str() );
-            if( ofs.fail() ) {
-                std::cerr << "Unable to open file: '" << config.getFilename() << "'" << std::endl;
-                return (std::numeric_limits<int>::max)();
-            }
-            config.setStreamBuf( ofs.rdbuf() );
-        }
-
-        int result = 0;
-
-        // Scope here for the Runner so it can use the context before it is cleaned-up
-        {
-            Runner runner( config );
-
-            // Run test specs specified on the command line - or default to all
-            if( !config.testsSpecified() ) {
-                config.getReporter()->StartGroup( "" );
-                runner.runAll();
-                config.getReporter()->EndGroup( "", runner.getTotals() );
+    struct CompactReporter : StreamingReporterBase {
+
+        CompactReporter( ReporterConfig const& _config )
+        : StreamingReporterBase( _config )
+        {}
+
+        virtual ~CompactReporter();
+
+        static std::string getDescription() {
+            return "Reports test results on a single line, suitable for IDEs";
+        }
+
+        virtual ReporterPreferences getPreferences() const {
+            ReporterPreferences prefs;
+            prefs.shouldRedirectStdOut = false;
+            return prefs;
+        }
+
+        virtual void noMatchingTestCases( std::string const& spec ) {
+            stream << "No test cases matched '" << spec << "'" << std::endl;
+        }
+
+        virtual void assertionStarting( AssertionInfo const& ) {
+        }
+
+        virtual bool assertionEnded( AssertionStats const& _assertionStats ) {
+            AssertionResult const& result = _assertionStats.assertionResult;
+
+            bool printInfoMessages = true;
+
+            // Drop out if result was successful and we're not printing those
+            if( !m_config->includeSuccessfulResults() && result.isOk() ) {
+                if( result.getResultType() != ResultWas::Warning )
+                    return false;
+                printInfoMessages = false;
+            }
+
+            AssertionPrinter printer( stream, _assertionStats, printInfoMessages );
+            printer.print();
+
+            stream << std::endl;
+            return true;
+        }
+
+        virtual void testRunEnded( TestRunStats const& _testRunStats ) {
+            printTotals( _testRunStats.totals );
+            stream << "\n" << std::endl;
+            StreamingReporterBase::testRunEnded( _testRunStats );
+        }
+
+    private:
+        class AssertionPrinter {
+            void operator= ( AssertionPrinter const& );
+        public:
+            AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages )
+            : stream( _stream )
+            , stats( _stats )
+            , result( _stats.assertionResult )
+            , messages( _stats.infoMessages )
+            , itMessage( _stats.infoMessages.begin() )
+            , printInfoMessages( _printInfoMessages )
+            {}
+
+            void print() {
+                printSourceInfo();
+
+                itMessage = messages.begin();
+
+                switch( result.getResultType() ) {
+                    case ResultWas::Ok:
+                        printResultType( Colour::ResultSuccess, passedString() );
+                        printOriginalExpression();
+                        printReconstructedExpression();
+                        if ( ! result.hasExpression() )
+                            printRemainingMessages( Colour::None );
+                        else
+                            printRemainingMessages();
+                        break;
+                    case ResultWas::ExpressionFailed:
+                        if( result.isOk() )
+                            printResultType( Colour::ResultSuccess, failedString() + std::string( " - but was ok" ) );
+                        else
+                            printResultType( Colour::Error, failedString() );
+                        printOriginalExpression();
+                        printReconstructedExpression();
+                        printRemainingMessages();
+                        break;
+                    case ResultWas::ThrewException:
+                        printResultType( Colour::Error, failedString() );
+                        printIssue( "unexpected exception with message:" );
+                        printMessage();
+                        printExpressionWas();
+                        printRemainingMessages();
+                        break;
+                    case ResultWas::FatalErrorCondition:
+                        printResultType( Colour::Error, failedString() );
+                        printIssue( "fatal error condition with message:" );
+                        printMessage();
+                        printExpressionWas();
+                        printRemainingMessages();
+                        break;
+                    case ResultWas::DidntThrowException:
+                        printResultType( Colour::Error, failedString() );
+                        printIssue( "expected exception, got none" );
+                        printExpressionWas();
+                        printRemainingMessages();
+                        break;
+                    case ResultWas::Info:
+                        printResultType( Colour::None, "info" );
+                        printMessage();
+                        printRemainingMessages();
+                        break;
+                    case ResultWas::Warning:
+                        printResultType( Colour::None, "warning" );
+                        printMessage();
+                        printRemainingMessages();
+                        break;
+                    case ResultWas::ExplicitFailure:
+                        printResultType( Colour::Error, failedString() );
+                        printIssue( "explicitly" );
+                        printRemainingMessages( Colour::None );
+                        break;
+                    // These cases are here to prevent compiler warnings
+                    case ResultWas::Unknown:
+                    case ResultWas::FailureBit:
+                    case ResultWas::Exception:
+                        printResultType( Colour::Error, "** internal error **" );
+                        break;
+                }
+            }
+
+        private:
+            // Colour::LightGrey
+
+            static Colour::Code dimColour() { return Colour::FileName; }
+
+#ifdef CATCH_PLATFORM_MAC
+            static const char* failedString() { return "FAILED"; }
+            static const char* passedString() { return "PASSED"; }
+#else
+            static const char* failedString() { return "failed"; }
+            static const char* passedString() { return "passed"; }
+#endif
+
+            void printSourceInfo() const {
+                Colour colourGuard( Colour::FileName );
+                stream << result.getSourceInfo() << ":";
+            }
+
+            void printResultType( Colour::Code colour, std::string passOrFail ) const {
+                if( !passOrFail.empty() ) {
+                    {
+                        Colour colourGuard( colour );
+                        stream << " " << passOrFail;
+                    }
+                    stream << ":";
+                }
+            }
+
+            void printIssue( std::string issue ) const {
+                stream << " " << issue;
+            }
+
+            void printExpressionWas() {
+                if( result.hasExpression() ) {
+                    stream << ";";
+                    {
+                        Colour colour( dimColour() );
+                        stream << " expression was:";
+                    }
+                    printOriginalExpression();
+                }
+            }
+
+            void printOriginalExpression() const {
+                if( result.hasExpression() ) {
+                    stream << " " << result.getExpression();
+                }
+            }
+
+            void printReconstructedExpression() const {
+                if( result.hasExpandedExpression() ) {
+                    {
+                        Colour colour( dimColour() );
+                        stream << " for: ";
+                    }
+                    stream << result.getExpandedExpression();
+                }
+            }
+
+            void printMessage() {
+                if ( itMessage != messages.end() ) {
+                    stream << " '" << itMessage->message << "'";
+                    ++itMessage;
+                }
+            }
+
+            void printRemainingMessages( Colour::Code colour = dimColour() ) {
+                if ( itMessage == messages.end() )
+                    return;
+
+                // using messages.end() directly yields compilation error:
+                std::vector<MessageInfo>::const_iterator itEnd = messages.end();
+                const std::size_t N = static_cast<std::size_t>( std::distance( itMessage, itEnd ) );
+
+                {
+                    Colour colourGuard( colour );
+                    stream << " with " << pluralise( N, "message" ) << ":";
+                }
+
+                for(; itMessage != itEnd; ) {
+                    // If this assertion is a warning ignore any INFO messages
+                    if( printInfoMessages || itMessage->type != ResultWas::Info ) {
+                        stream << " '" << itMessage->message << "'";
+                        if ( ++itMessage != itEnd ) {
+                            Colour colourGuard( dimColour() );
+                            stream << " and";
+                        }
+                    }
+                }
+            }
+
+        private:
+            std::ostream& stream;
+            AssertionStats const& stats;
+            AssertionResult const& result;
+            std::vector<MessageInfo> messages;
+            std::vector<MessageInfo>::const_iterator itMessage;
+            bool printInfoMessages;
+        };
+
+        // Colour, message variants:
+        // - white: No tests ran.
+        // -   red: Failed [both/all] N test cases, failed [both/all] M assertions.
+        // - white: Passed [both/all] N test cases (no assertions).
+        // -   red: Failed N tests cases, failed M assertions.
+        // - green: Passed [both/all] N tests cases with M assertions.
+
+        std::string bothOrAll( std::size_t count ) const {
+            return count == 1 ? "" : count == 2 ? "both " : "all " ;
+        }
+
+        void printTotals( const Totals& totals ) const {
+            if( totals.testCases.total() == 0 ) {
+                stream << "No tests ran.";
+            }
+            else if( totals.testCases.failed == totals.testCases.total() ) {
+                Colour colour( Colour::ResultError );
+                const std::string qualify_assertions_failed =
+                    totals.assertions.failed == totals.assertions.total() ?
+                        bothOrAll( totals.assertions.failed ) : "";
+                stream <<
+                    "Failed " << bothOrAll( totals.testCases.failed )
+                              << pluralise( totals.testCases.failed, "test case"  ) << ", "
+                    "failed " << qualify_assertions_failed <<
+                                 pluralise( totals.assertions.failed, "assertion" ) << ".";
+            }
+            else if( totals.assertions.total() == 0 ) {
+                stream <<
+                    "Passed " << bothOrAll( totals.testCases.total() )
+                              << pluralise( totals.testCases.total(), "test case" )
+                              << " (no assertions).";
+            }
+            else if( totals.assertions.failed ) {
+                Colour colour( Colour::ResultError );
+                stream <<
+                    "Failed " << pluralise( totals.testCases.failed, "test case"  ) << ", "
+                    "failed " << pluralise( totals.assertions.failed, "assertion" ) << ".";
             }
             else {
-                // !TBD We should get all the testcases upfront, report any missing,
-                // then just run them
-                std::vector<std::string>::const_iterator it = config.getTestSpecs().begin();
-                std::vector<std::string>::const_iterator itEnd = config.getTestSpecs().end();
-                for(; it != itEnd; ++it ) {
-                    Totals prevTotals = runner.getTotals();
-                    config.getReporter()->StartGroup( *it );
-                    if( runner.runMatching( *it ) == 0 ) {
-                        // Use reporter?
-    //                    std::cerr << "\n[Unable to match any test cases with: " << *it << "]" << std::endl;
-                    }
-                    config.getReporter()->EndGroup( *it, runner.getTotals() - prevTotals );
-                }
-            }
-            result = static_cast<int>( runner.getTotals().assertions.failed );
-        }
-        Catch::Context::cleanUp();
-        return result;
-    }
-
-    inline void showUsage( std::ostream& os ) {
-        os  << "\t-l, --list <tests | reporters> [xml]\n"
-            << "\t-t, --test <testspec> [<testspec>...]\n"
-            << "\t-r, --reporter <reporter name>\n"
-            << "\t-o, --out <file name>|<%stream name>\n"
-            << "\t-s, --success\n"
-            << "\t-b, --break\n"
-            << "\t-n, --name <name>\n"
-            << "\t-a, --abort [#]\n\n"
-            << "For more detail usage please see: https://github.com/philsquared/Catch/wiki/Command-line" << std::endl;
-    }
-    inline void showHelp( std::string exeName ) {
-        std::string::size_type pos = exeName.find_last_of( "/\\" );
-        if( pos != std::string::npos ) {
-            exeName = exeName.substr( pos+1 );
-        }
-
-        std::cout << exeName << " is a CATCH host application. Options are as follows:\n\n";
-        showUsage( std::cout );
-    }
-
-    inline int Main( int argc, char* const argv[], Config& config ) {
-
-        parseIntoConfig( CommandParser( argc, argv ), config );
-
-        if( !config.getMessage().empty() ) {
-            std::cerr << config.getMessage() <<  + "\n\nUsage: ...\n\n";
-            showUsage( std::cerr );
-            Catch::Context::cleanUp();
-            return (std::numeric_limits<int>::max)();
-        }
-
-        // Handle help
-        if( config.showHelp() ) {
-            showHelp( argv[0] );
-            Catch::Context::cleanUp();
-            return 0;
-        }
-        return Main( config );
-    }
-
-    inline int Main( int argc, char* const argv[] ) {
-        Config config;
-// !TBD: This doesn't always work, for some reason
-//        if( isDebuggerActive() )
-//            config.useStream( "debug" );
-        return Main( argc, argv, config );
-    }
+                Colour colour( Colour::ResultSuccess );
+                stream <<
+                    "Passed " << bothOrAll( totals.testCases.passed )
+                              << pluralise( totals.testCases.passed, "test case"  ) <<
+                    " with "  << pluralise( totals.assertions.passed, "assertion" ) << ".";
+            }
+        }
+    };
+
+    INTERNAL_CATCH_REGISTER_REPORTER( "compact", CompactReporter )
 
 } // end namespace Catch
 
+namespace Catch {
+    // These are all here to avoid warnings about not having any out of line
+    // virtual methods
+    NonCopyable::~NonCopyable() {}
+    IShared::~IShared() {}
+    IStream::~IStream() CATCH_NOEXCEPT {}
+    FileStream::~FileStream() CATCH_NOEXCEPT {}
+    CoutStream::~CoutStream() CATCH_NOEXCEPT {}
+    DebugOutStream::~DebugOutStream() CATCH_NOEXCEPT {}
+    StreamBufBase::~StreamBufBase() CATCH_NOEXCEPT {}
+    IContext::~IContext() {}
+    IResultCapture::~IResultCapture() {}
+    ITestCase::~ITestCase() {}
+    ITestCaseRegistry::~ITestCaseRegistry() {}
+    IRegistryHub::~IRegistryHub() {}
+    IMutableRegistryHub::~IMutableRegistryHub() {}
+    IExceptionTranslator::~IExceptionTranslator() {}
+    IExceptionTranslatorRegistry::~IExceptionTranslatorRegistry() {}
+    IReporter::~IReporter() {}
+    IReporterFactory::~IReporterFactory() {}
+    IReporterRegistry::~IReporterRegistry() {}
+    IStreamingReporter::~IStreamingReporter() {}
+    AssertionStats::~AssertionStats() {}
+    SectionStats::~SectionStats() {}
+    TestCaseStats::~TestCaseStats() {}
+    TestGroupStats::~TestGroupStats() {}
+    TestRunStats::~TestRunStats() {}
+    CumulativeReporterBase::SectionNode::~SectionNode() {}
+    CumulativeReporterBase::~CumulativeReporterBase() {}
+
+    StreamingReporterBase::~StreamingReporterBase() {}
+    ConsoleReporter::~ConsoleReporter() {}
+    CompactReporter::~CompactReporter() {}
+    IRunner::~IRunner() {}
+    IMutableContext::~IMutableContext() {}
+    IConfig::~IConfig() {}
+    XmlReporter::~XmlReporter() {}
+    JunitReporter::~JunitReporter() {}
+    TestRegistry::~TestRegistry() {}
+    FreeFunctionTestCase::~FreeFunctionTestCase() {}
+    IGeneratorInfo::~IGeneratorInfo() {}
+    IGeneratorsForTest::~IGeneratorsForTest() {}
+    WildcardPattern::~WildcardPattern() {}
+    TestSpec::Pattern::~Pattern() {}
+    TestSpec::NamePattern::~NamePattern() {}
+    TestSpec::TagPattern::~TagPattern() {}
+    TestSpec::ExcludedPattern::~ExcludedPattern() {}
+
+    Matchers::Impl::StdString::Equals::~Equals() {}
+    Matchers::Impl::StdString::Contains::~Contains() {}
+    Matchers::Impl::StdString::StartsWith::~StartsWith() {}
+    Matchers::Impl::StdString::EndsWith::~EndsWith() {}
+
+    void Config::dummy() {}
+
+    namespace TestCaseTracking {
+        ITracker::~ITracker() {}
+        TrackerBase::~TrackerBase() {}
+        SectionTracker::~SectionTracker() {}
+        IndexTracker::~IndexTracker() {}
+    }
+}
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
 #endif
 
 #ifdef CATCH_CONFIG_MAIN
 // #included from: internal/catch_default_main.hpp
+#define TWOBLUECUBES_CATCH_DEFAULT_MAIN_HPP_INCLUDED
 
 #ifndef __OBJC__
 
 // Standard C/C++ main entry point
-int main (int argc, char * const argv[]) {
-    return Catch::Main( argc, argv );
+int main (int argc, char * argv[]) {
+    return Catch::Session().run( argc, argv );
 }
 
 #else // __OBJC__
@@ -4741,7 +10342,7 @@
 #endif
 
     Catch::registerTestMethods();
-    int result = Catch::Main( argc, (char* const*)argv );
+    int result = Catch::Session().run( argc, (char* const*)argv );
 
 #if !CATCH_ARC_ENABLED
     [pool drain];
@@ -4754,48 +10355,153 @@
 
 #endif
 
+#ifdef CLARA_CONFIG_MAIN_NOT_DEFINED
+#  undef CLARA_CONFIG_MAIN
+#endif
+
 //////
 
-#define REQUIRE( expr ) INTERNAL_CATCH_TEST( expr, false, true, "REQUIRE" )
-#define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( expr, true, true, "REQUIRE_FALSE" )
-
-#define REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( expr, ..., true, "REQUIRE_THROWS" )
-#define REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( expr, exceptionType, true, "REQUIRE_THROWS_AS" )
-#define REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( expr, true, "REQUIRE_NOTHROW" )
-
-#define CHECK( expr ) INTERNAL_CATCH_TEST( expr, false, false, "CHECK" )
-#define CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( expr, true, false, "CHECK_FALSE" )
-#define CHECKED_IF( expr ) INTERNAL_CATCH_IF( expr, false, false, "CHECKED_IF" )
-#define CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( expr, false, false, "CHECKED_ELSE" )
-
-#define CHECK_THROWS( expr )  INTERNAL_CATCH_THROWS( expr, ..., false, "CHECK_THROWS" )
-#define CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( expr, exceptionType, false, "CHECK_THROWS_AS" )
-#define CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( expr, false, "CHECK_NOTHROW" )
-
-#define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( arg, matcher, false, "CHECK_THAT" )
-#define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( arg, matcher, true, "REQUIRE_THAT" )
-
-#define INFO( msg ) INTERNAL_CATCH_MSG( msg, Catch::ResultWas::Info, false, "INFO" )
-#define WARN( msg ) INTERNAL_CATCH_MSG( msg, Catch::ResultWas::Warning, false, "WARN" )
-#define FAIL( msg ) INTERNAL_CATCH_MSG( msg, Catch::ResultWas::ExplicitFailure, true, "FAIL" )
-#define SCOPED_INFO( msg ) INTERNAL_CATCH_SCOPED_INFO( msg )
-#define CAPTURE( msg ) INTERNAL_CATCH_MSG( #msg " := " << msg, Catch::ResultWas::Info, false, "CAPTURE" )
-
-#define SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description )
-
-#define TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description )
-#define TEST_CASE_NORETURN( name, description ) INTERNAL_CATCH_TESTCASE_NORETURN( name, description )
-#define ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "Anonymous test case" )
-#define METHOD_AS_TEST_CASE( method, name, description ) CATCH_METHOD_AS_TEST_CASE( method, name, description )
+// If this config identifier is defined then all CATCH macros are prefixed with CATCH_
+#ifdef CATCH_CONFIG_PREFIX_ALL
+
+#define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::Normal, "CATCH_REQUIRE" )
+#define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, "CATCH_REQUIRE_FALSE" )
+
+#define CATCH_REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::Normal, "", "CATCH_REQUIRE_THROWS" )
+#define CATCH_REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( expr, exceptionType, Catch::ResultDisposition::Normal, "CATCH_REQUIRE_THROWS_AS" )
+#define CATCH_REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::Normal, matcher, "CATCH_REQUIRE_THROWS_WITH" )
+#define CATCH_REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( expr, Catch::ResultDisposition::Normal, "CATCH_REQUIRE_NOTHROW" )
+
+#define CATCH_CHECK( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECK" )
+#define CATCH_CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, "CATCH_CHECK_FALSE" )
+#define CATCH_CHECKED_IF( expr ) INTERNAL_CATCH_IF( expr, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECKED_IF" )
+#define CATCH_CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( expr, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECKED_ELSE" )
+#define CATCH_CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, "CATCH_CHECK_NOFAIL" )
+
+#define CATCH_CHECK_THROWS( expr )  INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECK_THROWS" )
+#define CATCH_CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( expr, exceptionType, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECK_THROWS_AS" )
+#define CATCH_CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::ContinueOnFailure, matcher, "CATCH_CHECK_THROWS_WITH" )
+#define CATCH_CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( expr, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECK_NOTHROW" )
+
+#define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( arg, matcher, Catch::ResultDisposition::ContinueOnFailure, "CATCH_CHECK_THAT" )
+#define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( arg, matcher, Catch::ResultDisposition::Normal, "CATCH_REQUIRE_THAT" )
+
+#define CATCH_INFO( msg ) INTERNAL_CATCH_INFO( msg, "CATCH_INFO" )
+#define CATCH_WARN( msg ) INTERNAL_CATCH_MSG( Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, "CATCH_WARN", msg )
+#define CATCH_SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( msg, "CATCH_INFO" )
+#define CATCH_CAPTURE( msg ) INTERNAL_CATCH_INFO( #msg " := " << msg, "CATCH_CAPTURE" )
+#define CATCH_SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( #msg " := " << msg, "CATCH_CAPTURE" )
+
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+    #define CATCH_TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ )
+    #define CATCH_TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ )
+    #define CATCH_METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ )
+    #define CATCH_REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ )
+    #define CATCH_SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ )
+    #define CATCH_FAIL( ... ) INTERNAL_CATCH_MSG( Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, "CATCH_FAIL", __VA_ARGS__ )
+    #define CATCH_SUCCEED( ... ) INTERNAL_CATCH_MSG( Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, "CATCH_SUCCEED", __VA_ARGS__ )
+#else
+    #define CATCH_TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description )
+    #define CATCH_TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description )
+    #define CATCH_METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description )
+    #define CATCH_REGISTER_TEST_CASE( function, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( function, name, description )
+    #define CATCH_SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description )
+    #define CATCH_FAIL( msg ) INTERNAL_CATCH_MSG( Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, "CATCH_FAIL", msg )
+    #define CATCH_SUCCEED( msg ) INTERNAL_CATCH_MSG( Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, "CATCH_SUCCEED", msg )
+#endif
+#define CATCH_ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" )
+
+#define CATCH_REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType )
+#define CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType )
+
+#define CATCH_GENERATE( expr) INTERNAL_CATCH_GENERATE( expr )
+
+// "BDD-style" convenience wrappers
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+#define CATCH_SCENARIO( ... ) CATCH_TEST_CASE( "Scenario: " __VA_ARGS__ )
+#define CATCH_SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ )
+#else
+#define CATCH_SCENARIO( name, tags ) CATCH_TEST_CASE( "Scenario: " name, tags )
+#define CATCH_SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags )
+#endif
+#define CATCH_GIVEN( desc )    CATCH_SECTION( std::string( "Given: ") + desc, "" )
+#define CATCH_WHEN( desc )     CATCH_SECTION( std::string( " When: ") + desc, "" )
+#define CATCH_AND_WHEN( desc ) CATCH_SECTION( std::string( "  And: ") + desc, "" )
+#define CATCH_THEN( desc )     CATCH_SECTION( std::string( " Then: ") + desc, "" )
+#define CATCH_AND_THEN( desc ) CATCH_SECTION( std::string( "  And: ") + desc, "" )
+
+// If CATCH_CONFIG_PREFIX_ALL is not defined then the CATCH_ prefix is not required
+#else
+
+#define REQUIRE( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::Normal, "REQUIRE" )
+#define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, "REQUIRE_FALSE" )
+
+#define REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::Normal, "", "REQUIRE_THROWS" )
+#define REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( expr, exceptionType, Catch::ResultDisposition::Normal, "REQUIRE_THROWS_AS" )
+#define REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::Normal, matcher, "REQUIRE_THROWS_WITH" )
+#define REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( expr, Catch::ResultDisposition::Normal, "REQUIRE_NOTHROW" )
+
+#define CHECK( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::ContinueOnFailure, "CHECK" )
+#define CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, "CHECK_FALSE" )
+#define CHECKED_IF( expr ) INTERNAL_CATCH_IF( expr, Catch::ResultDisposition::ContinueOnFailure, "CHECKED_IF" )
+#define CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( expr, Catch::ResultDisposition::ContinueOnFailure, "CHECKED_ELSE" )
+#define CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( expr, Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, "CHECK_NOFAIL" )
+
+#define CHECK_THROWS( expr )  INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::ContinueOnFailure, "", "CHECK_THROWS" )
+#define CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( expr, exceptionType, Catch::ResultDisposition::ContinueOnFailure, "CHECK_THROWS_AS" )
+#define CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( expr, Catch::ResultDisposition::ContinueOnFailure, matcher, "CHECK_THROWS_WITH" )
+#define CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( expr, Catch::ResultDisposition::ContinueOnFailure, "CHECK_NOTHROW" )
+
+#define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( arg, matcher, Catch::ResultDisposition::ContinueOnFailure, "CHECK_THAT" )
+#define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( arg, matcher, Catch::ResultDisposition::Normal, "REQUIRE_THAT" )
+
+#define INFO( msg ) INTERNAL_CATCH_INFO( msg, "INFO" )
+#define WARN( msg ) INTERNAL_CATCH_MSG( Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, "WARN", msg )
+#define SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( msg, "INFO" )
+#define CAPTURE( msg ) INTERNAL_CATCH_INFO( #msg " := " << msg, "CAPTURE" )
+#define SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( #msg " := " << msg, "CAPTURE" )
+
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+    #define TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ )
+    #define TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ )
+    #define METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ )
+    #define REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ )
+    #define SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ )
+    #define FAIL( ... ) INTERNAL_CATCH_MSG( Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, "FAIL", __VA_ARGS__ )
+    #define SUCCEED( ... ) INTERNAL_CATCH_MSG( Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, "SUCCEED", __VA_ARGS__ )
+#else
+    #define TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description )
+    #define TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description )
+    #define METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description )
+    #define REGISTER_TEST_CASE( method, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( method, name, description )
+    #define SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description )
+    #define FAIL( msg ) INTERNAL_CATCH_MSG( Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, "FAIL", msg )
+    #define SUCCEED( msg ) INTERNAL_CATCH_MSG( Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, "SUCCEED", msg )
+#endif
+#define ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" )
 
 #define REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType )
-#define CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature )
+#define REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType )
 
 #define GENERATE( expr) INTERNAL_CATCH_GENERATE( expr )
 
-///////////////
-// Still to be implemented
-#define CHECK_NOFAIL( expr ) // !TBD - reports violation, but doesn't fail Test
+#endif
+
+#define CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature )
+
+// "BDD-style" convenience wrappers
+#ifdef CATCH_CONFIG_VARIADIC_MACROS
+#define SCENARIO( ... ) TEST_CASE( "Scenario: " __VA_ARGS__ )
+#define SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ )
+#else
+#define SCENARIO( name, tags ) TEST_CASE( "Scenario: " name, tags )
+#define SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags )
+#endif
+#define GIVEN( desc )    SECTION( std::string("   Given: ") + desc, "" )
+#define WHEN( desc )     SECTION( std::string("    When: ") + desc, "" )
+#define AND_WHEN( desc ) SECTION( std::string("And when: ") + desc, "" )
+#define THEN( desc )     SECTION( std::string("    Then: ") + desc, "" )
+#define AND_THEN( desc ) SECTION( std::string("     And: ") + desc, "" )
 
 using Catch::Detail::Approx;
 
--- a/include/cvutils.hpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/include/cvutils.hpp	Mon Aug 24 16:02:06 2020 -0400
@@ -1,8 +1,8 @@
 #ifndef CVUTILS_HPP
 #define CVUTILS_HPP
 
-#include "opencv2/core/core.hpp"
-#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/core.hpp"
+#include "opencv2/features2d.hpp"
 
 class CvCapture;
 //template<typename T> class Point_<T>;
@@ -14,6 +14,9 @@
  use perspectiveTransform for arrays of points. */
 cv::Point2f project(const cv::Point2f& p, const cv::Mat& homography);
 
+/** Projects a point with the camera matrix */
+cv::Point2f cameraProject(const cv::Point2f& p, const cv::Mat& cameraMatrix);
+
 /** Loads a cv mat from a text file where the numbers are saved line by line separated by separator */
 cv::Mat loadMat(const std::string& filename, const std::string& separator);
 
--- a/include/utils.hpp	Fri Jun 10 15:43:02 2016 -0400
+++ b/include/utils.hpp	Mon Aug 24 16:02:06 2020 -0400
@@ -18,6 +18,9 @@
  * Warning: returns an empty string if all the lines to the end of the file are comments. */
 std::string getlineComment(std::ifstream& f);
 
+/** Get relative filename if not absolute */
+std::string getRelativeFilename(const std::string& parentDirname, const std::string& filename);
+
 /** Converts a string to an integer. */
 int toInt(const std::string& s);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python-requirements.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,9 @@
+matplotlib
+numpy
+scipy
+scikit-image
+scikit-learn
+shapely
+pandas
+munkres
+sqlalchemy
--- a/python/base.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-'''Module for few base classes to avoid issues of circular import'''
-
-class VideoFilenameAddable(object):
-    'Base class with the capability to attach a video filename'
-
-    def setVideoFilename(self, videoFilename):
-        self.videoFilename = videoFilename
--- a/python/cvutils.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,609 +0,0 @@
-#! /usr/bin/env python
-'''Image/Video utilities'''
-
-import utils, moving
-
-try:
-    import cv2
-    opencvAvailable = True
-except ImportError:
-    print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
-    opencvAvailable = False
-try:
-    import skimage
-    skimageAvailable = True
-except ImportError:
-    print('Scikit-image library could not be loaded (HoG-based classification methods will not be available)')
-    skimageAvailable = False
-    
-from sys import stdout, maxint
-from os import listdir
-from copy import deepcopy
-from math import floor, log10, ceil
-
-from numpy import dot, array, append, float32, loadtxt, savetxt, append, zeros, ones, identity, abs as npabs, logical_and, unravel_index, sum as npsum, isnan, mgrid, median, floor as npfloor, ceil as npceil
-from matplotlib.mlab import find
-from matplotlib.pyplot import imread, imsave
-
-
-
-#import aggdraw # agg on top of PIL (antialiased drawing)
-
-
-cvRed = (0,0,255)
-cvGreen = (0,255,0)
-cvBlue = (255,0,0)
-cvCyan = (255, 255, 0)
-cvYellow = (0, 255, 255)
-cvMagenta = (255, 0, 255)
-cvWhite = (255, 255, 255)
-cvBlack = (0,0,0)
-cvColors3 = utils.PlottingPropertyValues([cvRed,
-                                          cvGreen,
-                                          cvBlue])
-cvColors = utils.PlottingPropertyValues([cvRed,
-                                         cvGreen,
-                                         cvBlue,
-                                         cvCyan,
-                                         cvYellow,
-                                         cvMagenta,
-                                         cvWhite,
-                                         cvBlack])
-
-def quitKey(key):
-    return chr(key&255)== 'q' or chr(key&255) == 'Q'
-
-def saveKey(key):
-    return chr(key&255) == 's'
-
-def int2FOURCC(x):
-    fourcc = ''
-    for i in xrange(4):
-        fourcc += unichr((x >> 8*i)&255)
-    return fourcc
-
-def plotLines(filename, origins, destinations, w = 1, resultFilename='image.png'):
-    '''Draws lines over the image '''
-    import Image, ImageDraw # PIL
-    
-    img = Image.open(filename)
-
-    draw = ImageDraw.Draw(img)
-    #draw = aggdraw.Draw(img)
-    #pen = aggdraw.Pen("red", width)
-    for p1, p2 in zip(origins, destinations):
-        draw.line([p1.x, p1.y, p2.x, p2.y], width = w, fill = (256,0,0))
-        #draw.line([p1.x, p1.y, p2.x, p2.y], pen)
-    del draw
-
-    #out = utils.openCheck(resultFilename)
-    img.save(resultFilename)
-
-def rgb2gray(rgb):
-    return dot(rgb[...,:3], [0.299, 0.587, 0.144])
-
-def matlab2PointCorrespondences(filename):
-    '''Loads and converts the point correspondences saved 
-    by the matlab camera calibration tool'''
-    points = loadtxt(filename, delimiter=',')
-    savetxt(utils.removeExtension(filename)+'-point-correspondences.txt',append(points[:,:2].T, points[:,3:].T, axis=0))
-
-def loadPointCorrespondences(filename):
-    '''Loads and returns the corresponding points in world (first 2 lines) and image spaces (last 2 lines)'''
-    points = loadtxt(filename, dtype=float32)
-    return  (points[:2,:].T, points[2:,:].T) # (world points, image points)
-
-def cvMatToArray(cvmat):
-    '''Converts an OpenCV CvMat to numpy array.'''
-    print('Deprecated, use new interface')
-    a = zeros((cvmat.rows, cvmat.cols))#array([[0.0]*cvmat.width]*cvmat.height)
-    for i in xrange(cvmat.rows):
-        for j in xrange(cvmat.cols):
-            a[i,j] = cvmat[i,j]
-    return a
-
-def createWhiteImage(height, width, filename):
-    img = ones((height, width, 3), uint8)*255
-    imsave(filename, img)
-
-if opencvAvailable:
-    def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=3.0):
-        '''Returns the homography matrix mapping from srcPoints to dstPoints (dimension Nx2)'''
-        H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold)
-        return H
-
-    def arrayToCvMat(a, t = cv2.CV_64FC1):
-        '''Converts a numpy array to an OpenCV CvMat, with default type CV_64FC1.'''
-        print('Deprecated, use new interface')
-        cvmat = cv2.cv.CreateMat(a.shape[0], a.shape[1], t)
-        for i in range(cvmat.rows):
-            for j in range(cvmat.cols):
-                cvmat[i,j] = a[i,j]
-        return cvmat
-
-    def cvPlot(img, positions, color, lastCoordinate = None, **kwargs):
-        if lastCoordinate is None:
-            last = positions.length()-1
-        elif lastCoordinate >=0:
-            last = min(positions.length()-1, lastCoordinate)
-        for i in range(0, last):
-            cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color, **kwargs)
-
-    def cvImshow(windowName, img, rescale = 1.0):
-        'Rescales the image (in particular if too large)'
-        from cv2 import resize
-        if rescale != 1.:
-            size = (int(round(img.shape[1]*rescale)), int(round(img.shape[0]*rescale)))
-            resizedImg = resize(img, size)
-            cv2.imshow(windowName, resizedImg)
-        else:
-            cv2.imshow(windowName, img)
-
-    def computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients):
-        newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
-        newCameraMatrix = deepcopy(intrinsicCameraMatrix)
-        newCameraMatrix[0,2] = newImgSize[0]/2.
-        newCameraMatrix[1,2] = newImgSize[1]/2.
-        return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1)
-
-    def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1):
-        '''Plays the video'''
-        windowName = 'frame'
-        if rescale == 1.:
-            cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
-        wait = 5
-        if frameRate > 0:
-            wait = int(round(1000./frameRate))
-        if interactive:
-            wait = 0
-        capture = cv2.VideoCapture(filename)
-        if capture.isOpened():
-            key = -1
-            ret = True
-            frameNum = firstFrameNum
-            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
-            while ret and not quitKey(key):
-                #ret, img = capture.read()
-                for i in xrange(step):
-                    ret, img = capture.read()
-                if ret:
-                    if printFrames:
-                        print('frame {0}'.format(frameNum))
-                    frameNum+=step
-                    if text is not None:
-                       cv2.putText(img, text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed) 
-                    cvImshow(windowName, img, rescale)
-                    key = cv2.waitKey(wait)
-                    if saveKey(key):
-                        cv2.imwrite('image-{}.png'.format(frameNum), img)
-            cv2.destroyAllWindows()
-        else:
-            print('Video capture for {} failed'.format(filename))
-
-    def infoVideo(filename):
-        '''Provides all available info on video '''
-        cvPropertyNames = {cv2.CAP_PROP_FORMAT: "format",
-                           cv2.CAP_PROP_FOURCC: "codec (fourcc)",
-                           cv2.CAP_PROP_FPS: "fps",
-                           cv2.CAP_PROP_FRAME_COUNT: "number of frames",
-                           cv2.CAP_PROP_FRAME_HEIGHT: "heigh",
-                           cv2.CAP_PROP_FRAME_WIDTH: "width",
-                           cv2.CAP_PROP_RECTIFICATION: "rectification",
-                           cv2.CAP_PROP_SATURATION: "saturation"}
-        capture = cv2.VideoCapture(filename)
-        if capture.isOpened():
-            for cvprop in [#cv2.CAP_PROP_BRIGHTNESS
-                    #cv2.CAP_PROP_CONTRAST
-                    #cv2.CAP_PROP_CONVERT_RGB
-                    #cv2.CAP_PROP_EXPOSURE
-                    cv2.CAP_PROP_FORMAT,
-                    cv2.CAP_PROP_FOURCC,
-                    cv2.CAP_PROP_FPS,
-                    cv2.CAP_PROP_FRAME_COUNT,
-                    cv2.CAP_PROP_FRAME_HEIGHT,
-                    cv2.CAP_PROP_FRAME_WIDTH,
-                    #cv2.CAP_PROP_GAIN,
-                    #cv2.CAP_PROP_HUE
-                    #cv2.CAP_PROP_MODE
-                    #cv2.CAP_PROP_POS_AVI_RATIO
-                    #cv2.CAP_PROP_POS_FRAMES
-                    #cv2.CAP_PROP_POS_MSEC
-                    #cv2.CAP_PROP_RECTIFICATION,
-                    #cv2.CAP_PROP_SATURATION
-            ]:
-                prop = capture.get(cvprop)
-                if cvprop == cv2.CAP_PROP_FOURCC and prop > 0:
-                    prop = int2FOURCC(int(prop))
-                print('Video {}: {}'.format(cvPropertyNames[cvprop], prop))
-        else:
-            print('Video capture for {} failed'.format(filename))
-
-    def getImagesFromVideo(videoFilename, firstFrameNum = 0, lastFrameNum = 1, step = 1, saveImage = False, outputPrefix = 'image'):
-        '''Returns nFrames images from the video sequence'''
-        images = []
-        capture = cv2.VideoCapture(videoFilename)
-        if capture.isOpened():
-            rawCount = capture.get(cv2.CAP_PROP_FRAME_COUNT)
-            if rawCount < 0:
-                rawCount = lastFrameNum+1
-            nDigits = int(floor(log10(rawCount)))+1
-            ret = False
-            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
-            frameNum = firstFrameNum
-            while frameNum<=lastFrameNum and frameNum<rawCount:
-                ret, img = capture.read()
-                i = 0
-                while not ret and i<10:
-                    ret, img = capture.read()
-                    i += 1
-                if img is not None and img.size>0:
-                    if saveImage:
-                        frameNumStr = format(frameNum, '0{}d'.format(nDigits))
-                        cv2.imwrite(outputPrefix+frameNumStr+'.png', img)
-                    else:
-                        images.append(img)
-                    frameNum +=step
-                    if step > 1:
-                        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
-            capture.release()
-        else:
-            print('Video capture for {} failed'.format(videoFilename))
-        return images
-    
-    def getFPS(videoFilename):
-        capture = cv2.VideoCapture(videoFilename)
-        if capture.isOpened():
-            fps = capture.get(cv2.CAP_PROP_FPS)
-            capture.release()
-            return fps
-        else:
-            print('Video capture for {} failed'.format(videoFilename))
-            return None
-
-    def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, minNPixels = 800):
-        'Computes the bounding box of object at frameNum'
-        x = []
-        y = []
-        if obj.hasFeatures():
-            for f in obj.getFeatures():
-                if f.existsAtInstant(frameNum):
-                    projectedPosition = f.getPositionAtInstant(frameNum).project(homography)
-                    x.append(projectedPosition.x)
-                    y.append(projectedPosition.y)
-        xmin = min(x)
-        xmax = max(x)
-        ymin = min(y)
-        ymax = max(y)
-        xMm = px * (xmax - xmin)
-        yMm = py * (ymax - ymin)
-        a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm))
-        yCropMin = int(max(0, .5 * (ymin + ymax - a)))
-        yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
-        xCropMin = int(max(0, .5 * (xmin + xmax - a)))
-        xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
-        if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > minNPixels:
-            croppedImg = img[yCropMin : yCropMax, xCropMin : xCropMax]
-        else:
-            croppedImg = None
-        return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax
-
-
-    def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}):
-        '''Displays the objects overlaid frame by frame over the video '''
-        capture = cv2.VideoCapture(videoFilename)
-        width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
-        height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
-
-        windowName = 'frame'
-        if rescale == 1.:
-            cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
-
-        if undistort: # setup undistortion
-            [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
-        if capture.isOpened():
-            key = -1
-            ret = True
-            frameNum = firstFrameNum
-            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
-            if lastFrameNumArg is None:
-                lastFrameNum = maxint
-            else:
-                lastFrameNum = lastFrameNumArg
-            nZerosFilename = int(ceil(log10(lastFrameNum)))
-            objectToDeleteIds = []
-            while ret and not quitKey(key) and frameNum <= lastFrameNum:
-                ret, img = capture.read()
-                if ret:
-                    if undistort:
-                        img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
-                    if printFrames:
-                        print('frame {0}'.format(frameNum))
-                    if len(objectToDeleteIds) > 0:
-                        objects = [o for o in objects if o.getNum() not in objectToDeleteIds]
-                        objectToDeleteIds = []
-                    # plot objects
-                    for obj in objects:
-                        if obj.existsAtInstant(frameNum):
-                            if obj.getLastInstant() == frameNum:
-                                objectToDeleteIds.append(obj.getNum())
-                            if not hasattr(obj, 'projectedPositions'):
-                                if homography is not None:
-                                    obj.projectedPositions = obj.positions.project(homography)
-                                else:
-                                    obj.projectedPositions = obj.positions
-                            cvPlot(img, obj.projectedPositions, cvColors[obj.getNum()], frameNum-obj.getFirstInstant())
-                            if frameNum not in boundingBoxes.keys() and obj.hasFeatures():
-                                imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height)
-                                cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1)
-                            objDescription = '{} '.format(obj.num)
-                            if moving.userTypeNames[obj.userType] != 'unknown':
-                                objDescription += moving.userTypeNames[obj.userType][0].upper()
-                            if len(annotations) > 0: # if we loaded annotations, but there is no match
-                                if frameNum not in toMatches[obj.getNum()]:
-                                    objDescription += " FA"
-                            cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvColors[obj.getNum()])
-                    # plot object bounding boxes
-                    if frameNum in boundingBoxes.keys():
-                        for rect in boundingBoxes[frameNum]:
-                            cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[obj.getNum()])
-                    # plot ground truth
-                    if len(annotations) > 0:
-                        for gt in annotations:
-                            if gt.existsAtInstant(frameNum):
-                                if frameNum in gtMatches[gt.getNum()]:
-                                    color = cvColors[gtMatches[gt.getNum()][frameNum]] # same color as object
-                                else:
-                                    color = cvRed
-                                    cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvRed)
-                                cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color)
-                    # saving images and going to next
-                    if not saveAllImages:
-                        cvImshow(windowName, img, rescale)
-                        key = cv2.waitKey()
-                    if saveAllImages or saveKey(key):
-                        cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
-                    frameNum += nFramesStep
-                    if nFramesStep > 1:
-                        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
-            cv2.destroyAllWindows()
-        else:
-            print('Cannot load file ' + videoFilename)
-
-    def computeHomographyFromPDTV(camera):
-        '''Returns the homography matrix at ground level from PDTV camera
-        https://bitbucket.org/hakanardo/pdtv'''
-        # camera = pdtv.load(cameraFilename)
-        srcPoints = [[x,y] for x, y in zip([1.,2.,2.,1.],[1.,1.,2.,2.])] # need floats!!
-        dstPoints = []
-        for srcPoint in srcPoints:
-            projected = camera.image_to_world(tuple(srcPoint))
-            dstPoints.append([projected[0], projected[1]])
-        H, mask = cv2.findHomography(array(srcPoints), array(dstPoints), method = 0) # No need for different methods for finding homography
-        return H
-
-    def undistortedCoordinates(map1, map2, x, y, maxDistance = 1.):
-        '''Returns the coordinates of a point in undistorted image
-        map1 and map2 are the mapping functions from undistorted image
-        to distorted (original image)
-        map1(x,y) = originalx, originaly'''
-        distx = npabs(map1-x)
-        disty = npabs(map2-y)
-        indices = logical_and(distx<maxDistance, disty<maxDistance)
-        closeCoordinates = unravel_index(find(indices), distx.shape) # returns i,j, ie y,x
-        xWeights = 1-distx[indices]
-        yWeights = 1-disty[indices]
-        return dot(xWeights, closeCoordinates[1])/npsum(xWeights), dot(yWeights, closeCoordinates[0])/npsum(yWeights)
-
-    def undistortTrajectoryFromCVMapping(map1, map2, t):
-        '''test 'perfect' inversion'''
-        undistortedTrajectory = moving.Trajectory()
-        for i,p in enumerate(t):
-            res = undistortedCoordinates(map1, map2, p.x,p.y)
-            if not isnan(res).any():
-                undistortedTrajectory.addPositionXY(res[0], res[1])
-            else:
-                print('{} {} {}'.format(i,p,res))
-        return undistortedTrajectory
-
-    def computeInverseMapping(originalImageSize, map1, map2):
-        'Computes inverse mapping from maps provided by cv2.initUndistortRectifyMap'
-        invMap1 = -ones(originalImageSize)
-        invMap2 = -ones(originalImageSize)
-        for x in range(0,originalImageSize[1]):
-            for y in range(0,originalImageSize[0]):
-                res = undistortedCoordinates(x,y, map1, map2)
-                if not isnan(res).any():
-                    invMap1[y,x] = res[0]
-                    invMap2[y,x] = res[1]
-        return invMap1, invMap2
-
-    def cameraIntrinsicCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False):
-        ''' Camera calibration searches through all the images (jpg or png) located
-            in _path_ for matches to a checkerboard pattern of size checkboardSize.
-            These images should all be of the same camera with the same resolution.
-
-            For best results, use an asymetric board and ensure that the image has
-            very high contrast, including the background. Suitable checkerboard:
-            http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
-
-            The code below is based off of:
-            https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
-            Modified by Paul St-Aubin
-            '''
-        import glob, os
-
-        # termination criteria
-        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
-
-        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
-        objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32)
-        objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
-
-        # Arrays to store object points and image points from all the images.
-        objpoints = [] # 3d point in real world space
-        imgpoints = [] # 2d points in image plane.
-
-        ## Loop throuhg all images in _path_
-        images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
-        for fname in images:
-            img = cv2.imread(fname)
-            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-
-            # Find the chess board corners
-            ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
-
-            # If found, add object points, image points (after refining them)
-            if ret:
-                print('Found pattern in '+fname)
-                
-                if secondPassSearch:
-                    corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
-
-                objpoints.append(objp)
-                imgpoints.append(corners)
-
-                # Draw and display the corners
-                if display:
-                    img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
-                    if img is not None:
-                        cv2.imshow('img',img)
-                        cv2.waitKey(0)
-            else:
-                print('Pattern not found in '+fname)
-        ## Close up image loading and calibrate
-        cv2.destroyAllWindows()
-        if len(objpoints) == 0 or len(imgpoints) == 0: 
-            return False
-        try:
-            ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
-        except NameError:
-            return False
-        savetxt('intrinsic-camera.txt', camera_matrix)
-        return camera_matrix, dist_coeffs
-
-    def undistortImage(img, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., interpolation=cv2.INTER_LINEAR):
-        '''Undistorts the image passed in argument'''
-        width = img.shape[1]
-        height = img.shape[0]
-        [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
-        return cv2.remap(img, map1, map2, interpolation=interpolation)
-
-
-def printCvMat(cvmat, out = stdout):
-    '''Prints the cvmat to out'''
-    print('Deprecated, use new interface')
-    for i in xrange(cvmat.rows):
-        for j in xrange(cvmat.cols):
-            out.write('{0} '.format(cvmat[i,j]))
-        out.write('\n')
-
-def projectArray(homography, points):
-    '''Returns the coordinates of the projected points through homography
-    (format: array 2xN points)'''
-    if points.shape[0] != 2:
-        raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1]))
-
-    if (homography is not None) and homography.size>0:
-        #alternatively, on could use cv2.convertpointstohomogeneous and other conversion to/from homogeneous coordinates
-        augmentedPoints = append(points,[[1]*points.shape[1]], 0)
-        prod = dot(homography, augmentedPoints)
-        return prod[0:2]/prod[2]
-    else:
-        return points
-
-def project(homography, p):
-    '''Returns the coordinates of the projection of the point p with coordinates p[0], p[1]
-    through homography'''
-    return projectArray(homography, array([[p[0]],[p[1]]]))
-
-def projectTrajectory(homography, trajectory):
-    '''Projects a series of points in the format
-    [[x1, x2, ...],
-    [y1, y2, ...]]'''
-    return projectArray(homography, array(trajectory))
-
-def invertHomography(homography):
-    '''Returns an inverted homography
-    Unnecessary for reprojection over camera image'''
-    from numpy.linalg import inv
-    invH = inv(homography)
-    invH /= invH[2,2]
-    return invH
-
-def undistortTrajectory(invMap1, invMap2, positions):
-    floorPositions = npfloor(positions)
-    #ceilPositions = npceil(positions)
-    undistortedTrajectory = [[],[]]
-    for i in xrange(len(positions[0])):
-        x,y = None, None
-        if positions[0][i]+1 < invMap1.shape[1] and positions[1][i]+1 < invMap1.shape[0]:
-            floorX = invMap1[floorPositions[1][i], floorPositions[0][i]]
-            floorY = invMap2[floorPositions[1][i], floorPositions[0][i]]
-            ceilX = invMap1[floorPositions[1][i]+1, floorPositions[0][i]+1]
-            ceilY = invMap2[floorPositions[1][i]+1, floorPositions[0][i]+1]
-            #ceilX = invMap1[ceilPositions[1][i], ceilPositions[0][i]]
-            #ceilY = invMap2[ceilPositions[1][i], ceilPositions[0][i]]
-            if floorX >=0 and floorY >=0 and ceilX >=0 and ceilY >=0:
-                x = floorX+(positions[0][i]-floorPositions[0][i])*(ceilX-floorX)
-                y = floorY+(positions[1][i]-floorPositions[1][i])*(ceilY-floorY)
-        undistortedTrajectory[0].append(x)
-        undistortedTrajectory[1].append(y)
-    return undistortedTrajectory
-
-def projectGInputPoints(homography, points):
-    return projectTrajectory(homography, array(points+[points[0]]).T)
-
-if opencvAvailable:
-    def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)):
-        '''Computes the translation of img2 with respect to img1
-        (loaded using OpenCV as numpy arrays)
-        img1Points are used to compute the translation
-
-        TODO add diagnostic if data is all over the place, and it most likely is not a translation (eg zoom, other non linear distortion)'''
-
-        nextPoints = array([])
-        (img2Points, status, track_error) = cv2.calcOpticalFlowPyrLK(img1, img2, img1Points, nextPoints, winSize=windowSize, maxLevel=level, criteria=criteria)
-        # calcOpticalFlowPyrLK(prevImg, nextImg, prevPts[, nextPts[, status[, err[, winSize[, maxLevel[, criteria[, derivLambda[, flags]]]]]]]]) -> nextPts, status, err
-        delta = []
-        for (k, (p1,p2)) in enumerate(zip(img1Points, img2Points)):
-            if status[k] == 1:
-                dp = p2-p1
-                d = npsum(dp**2)
-                if d < maxTranslation2:
-                    delta.append(dp)
-        if len(delta) >= minNMatches:
-            return median(delta, axis=0)
-        else:
-            print(dp)
-            return None
-
-if skimageAvailable:
-    from skimage.feature import hog
-    from skimage import color, transform
-    
-    def HOG(image, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
-        bwImg = color.rgb2gray(image)
-        inputImg = transform.resize(bwImg, rescaleSize)
-        features = hog(inputImg, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
-        if visualize:
-            from matplotlib.pyplot import imshow, figure, subplot
-            hogViz = features[1]
-            features = features[0]
-            figure()
-            subplot(1,2,1)
-            imshow(inputImg)
-            subplot(1,2,2)
-            imshow(hogViz)
-        return float32(features)
-
-    def createHOGTrainingSet(imageDirectory, classLabel, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
-        inputData = []
-        for filename in listdir(imageDirectory):
-            img = imread(imageDirectory+filename)
-            features = HOG(img, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
-            inputData.append(features)
-
-        nImages = len(inputData)
-        return array(inputData, dtype = float32), array([classLabel]*nImages)
-
-        
--- a/python/events.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,322 +0,0 @@
-#! /usr/bin/env python
-'''Libraries for events
-Interactions, pedestrian crossing...'''
-
-import moving, prediction, indicators, utils, cvutils, ml
-from base import VideoFilenameAddable
-
-import numpy as np
-
-import multiprocessing
-import itertools
-
-
-def findRoute(prototypes,objects,i,j,noiseEntryNums,noiseExitNums,minSimilarity= 0.3, spatialThreshold=1.0, delta=180):
-    if i[0] not in noiseEntryNums: 
-        prototypesRoutes= [ x for x in sorted(prototypes.keys()) if i[0]==x[0]]
-    elif i[1] not in noiseExitNums:
-        prototypesRoutes=[ x for x in sorted(prototypes.keys()) if i[1]==x[1]]
-    else:
-        prototypesRoutes=[x for x in sorted(prototypes.keys())]
-    routeSim={}
-    lcss = utils.LCSS(similarityFunc=lambda x,y: (distanceForLCSS(x,y) <= spatialThreshold),delta=delta)
-    for y in prototypesRoutes: 
-        if y in prototypes.keys():
-            prototypesIDs=prototypes[y]
-            similarity=[]
-            for x in prototypesIDs:
-                s=lcss.computeNormalized(objects[j].positions, objects[x].positions)
-                similarity.append(s)
-            routeSim[y]=max(similarity)
-    route=max(routeSim, key=routeSim.get)
-    if routeSim[route]>=minSimilarity:
-        return route
-    else:
-        return i
-
-def getRoute(obj,prototypes,objects,noiseEntryNums,noiseExitNums,useDestination=True):
-    route=(obj.startRouteID,obj.endRouteID)
-    if useDestination:
-        if route not in prototypes.keys():
-            route= findRoute(prototypes,objects,route,obj.getNum(),noiseEntryNums,noiseExitNums)
-    return route
-
-class Interaction(moving.STObject, VideoFilenameAddable):
-    '''Class for an interaction between two road users 
-    or a road user and an obstacle
-    
-    link to the moving objects
-    contains the indicators in a dictionary with the names as keys
-    '''
-
-    categories = {'Head On': 0,
-                  'rearend': 1,
-                  'side': 2,
-                  'parallel': 3}
-
-    indicatorNames = ['Collision Course Dot Product',
-                      'Collision Course Angle',
-                      'Distance',
-                      'Minimum Distance',
-                      'Velocity Angle',
-                      'Speed Differential',
-                      'Collision Probability',
-                      'Time to Collision', # 7
-                      'Probability of Successful Evasive Action',
-                      'predicted Post Encroachment Time']
-
-    indicatorNameToIndices = utils.inverseEnumeration(indicatorNames)
-
-    indicatorShortNames = ['CCDP',
-                           'CCA',
-                           'Dist',
-                           'MinDist',
-                           'VA',
-                           'SD',
-                           'PoC',
-                           'TTC',
-                           'P(SEA)',
-                           'pPET']
-
-    indicatorUnits = ['',
-                      'rad',
-                      'm',
-                      'm',
-                      'rad',
-                      'm/s',
-                      '',
-                      's',
-                      '',
-                      '']
-
-    timeIndicators = ['Time to Collision', 'predicted Post Encroachment Time']
-
-    def __init__(self, num = None, timeInterval = None, roaduserNum1 = None, roaduserNum2 = None, roadUser1 = None, roadUser2 = None, categoryNum = None):
-        moving.STObject.__init__(self, num, timeInterval)
-        if timeInterval is None and roadUser1 is not None and roadUser2 is not None:
-            self.timeInterval = roadUser1.commonTimeInterval(roadUser2)
-        self.roadUser1 = roadUser1
-        self.roadUser2 = roadUser2
-        if roaduserNum1 is not None and roaduserNum2 is not None:
-            self.roadUserNumbers = set([roaduserNum1, roaduserNum2])
-        elif roadUser1 is not None and roadUser2 is not None:
-            self.roadUserNumbers = set([roadUser1.getNum(), roadUser2.getNum()])
-        else:
-            self.roadUserNumbers = None
-        self.categoryNum = categoryNum
-        self.indicators = {}
-        self.interactionInterval = None
-         # list for collison points and crossing zones
-        self.collisionPoints = None
-        self.crossingZones = None
-
-    def getRoadUserNumbers(self):
-        return self.roadUserNumbers
-
-    def setRoadUsers(self, objects):
-        nums = sorted(list(self.getRoadUserNumbers()))
-        if nums[0]<len(objects) and objects[nums[0]].getNum() == nums[0]:
-            self.roadUser1 = objects[nums[0]]
-        if nums[1]<len(objects) and objects[nums[1]].getNum() == nums[1]:
-            self.roadUser2 = objects[nums[1]]
-
-        if self.roadUser1 is None or self.roadUser2 is None:
-            self.roadUser1 = None
-            self.roadUser2 = None
-            i = 0
-            while i < len(objects) and self.roadUser2 is None:
-                if objects[i].getNum() in nums:
-                    if self.roadUser1 is None:
-                        self.roadUser1 = objects[i]
-                    else:
-                        self.roadUser2 = objects[i]
-                i += 1
-
-    def getIndicator(self, indicatorName):
-        return self.indicators.get(indicatorName, None)
-
-    def addIndicator(self, indicator):
-        if indicator is not None:
-            self.indicators[indicator.name] = indicator
-
-    def getIndicatorValueAtInstant(self, indicatorName, instant):
-        indicator = self.getIndicator(indicatorName)
-        if indicator is not None:
-            return indicator[instant]
-        else:
-            return None
-
-    def getIndicatorValuesAtInstant(self, instant):
-        '''Returns list of indicator values at instant
-        as dict (with keys from indicators dict)'''
-        values = {}
-        for k, indicator in self.indicators.iteritems():
-            values[k] = indicator[instant]
-        return values
-        
-    def plot(self, options = '', withOrigin = False, timeStep = 1, withFeatures = False, **kwargs):
-        self.roadUser1.plot(options, withOrigin, timeStep, withFeatures, **kwargs)
-        self.roadUser2.plot(options, withOrigin, timeStep, withFeatures, **kwargs)
-
-    def plotOnWorldImage(self, nPixelsPerUnitDistance, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        self.roadUser1.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, **kwargs)
-        self.roadUser2.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, **kwargs)
-
-    def play(self, videoFilename, homography = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.):
-        if self.roadUser1 is not None and self.roadUser2 is not None:
-            cvutils.displayTrajectories(videoFilename, [self.roadUser1, self.roadUser2], homography = homography, firstFrameNum = self.getFirstInstant(), lastFrameNumArg = self.getLastInstant(), undistort = undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
-        else:
-            print('Please set the interaction road user attributes roadUser1 and roadUser1 through the method setRoadUsers')
-
-    def computeIndicators(self):
-        '''Computes the collision course cosine only if the cosine is positive'''
-        collisionCourseDotProducts = {}#[0]*int(self.timeInterval.length())
-        collisionCourseAngles = {}
-        velocityAngles = {}
-        distances = {}#[0]*int(self.timeInterval.length())
-        speedDifferentials = {}
-        interactionInstants = []
-        for instant in self.timeInterval:
-            deltap = self.roadUser1.getPositionAtInstant(instant)-self.roadUser2.getPositionAtInstant(instant)
-            v1 = self.roadUser1.getVelocityAtInstant(instant)
-            v2 = self.roadUser2.getVelocityAtInstant(instant)
-            deltav = v2-v1
-            velocityAngles[instant] = np.arccos(moving.Point.dot(v1, v2)/(v1.norm2()*v2.norm2()))
-            collisionCourseDotProducts[instant] = moving.Point.dot(deltap, deltav)
-            distances[instant] = deltap.norm2()
-            speedDifferentials[instant] = deltav.norm2()
-            if collisionCourseDotProducts[instant] > 0:
-                interactionInstants.append(instant)
-            if distances[instant] != 0 and speedDifferentials[instant] != 0:
-                collisionCourseAngles[instant] = np.arccos(collisionCourseDotProducts[instant]/(distances[instant]*speedDifferentials[instant]))
-
-        if len(interactionInstants) >= 2:
-            self.interactionInterval = moving.TimeInterval(interactionInstants[0], interactionInstants[-1])
-        else:
-            self.interactionInterval = moving.TimeInterval()
-        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[0], collisionCourseDotProducts))
-        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[1], collisionCourseAngles))
-        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[2], distances, mostSevereIsMax = False))
-        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[4], velocityAngles))
-        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[5], speedDifferentials))
-
-        # if we have features, compute other indicators
-        if self.roadUser1.hasFeatures() and self.roadUser2.hasFeatures():
-            minDistances={}
-            for instant in self.timeInterval:
-                minDistances[instant] = moving.MovingObject.minDistance(self.roadUser1, self.roadUser2, instant)
-            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[3], minDistances, mostSevereIsMax = False))
-
-    def computeCrossingsCollisions(self, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None, nProcesses = 1, usePrototypes=False, route1= (-1,-1), route2=(-1,-1), prototypes={}, secondStepPrototypes={}, nMatching={}, objects=[], noiseEntryNums=[], noiseExitNums=[], minSimilarity=0.1, mostMatched=None, useDestination=True, useSpeedPrototype=True, acceptPartialLength=30, step=1):
-        '''Computes all crossing and collision points at each common instant for two road users. '''
-        TTCs = {}
-        if usePrototypes:
-            route1= getRoute(self.roadUser1,prototypes,objects,noiseEntryNums,noiseExitNums,useDestination)
-            route2= getRoute(self.roadUser2,prototypes,objects,noiseEntryNums,noiseExitNums,useDestination)
-
-        if timeInterval is not None:
-            commonTimeInterval = timeInterval
-        else:
-            commonTimeInterval = self.timeInterval
-        self.collisionPoints, crossingZones = predictionParameters.computeCrossingsCollisions(self.roadUser1, self.roadUser2, collisionDistanceThreshold, timeHorizon, computeCZ, debug, commonTimeInterval, nProcesses,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype,acceptPartialLength, step)
-        for i, cp in self.collisionPoints.iteritems():
-            TTCs[i] = prediction.SafetyPoint.computeExpectedIndicator(cp)
-        if len(TTCs) > 0:
-            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[7], TTCs, mostSevereIsMax=False))
-        
-        # crossing zones and pPET
-        if computeCZ:
-            self.crossingZones = crossingZones
-            pPETs = {}
-            for i, cz in self.crossingZones.iteritems():
-                pPETs[i] = prediction.SafetyPoint.computeExpectedIndicator(cz)
-            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[9], pPETs, mostSevereIsMax=False))
-        # TODO add probability of collision, and probability of successful evasive action
-
-    def computePET(self, collisionDistanceThreshold):
-        # TODO add crossing zone
-        self.pet = moving.MovingObject.computePET(self.roadUser1, self.roadUser2, collisionDistanceThreshold)
-
-    def setCollision(self, collision):
-        '''indicates if it is a collision: argument should be boolean'''
-        self.collision = collision
-
-    def isCollision(self):
-        if hasattr(self, 'collision'):
-            return self.collision
-        else:
-            return None
-
-    def getCollisionPoints(self):
-        return self.collisionPoints
-
-    def getCrossingZones(self):
-        return self.crossingZones
-
-def createInteractions(objects, _others = None):
-    '''Create all interactions of two co-existing road users'''
-    if _others is not None:
-        others = _others
-
-    interactions = []
-    num = 0
-    for i in xrange(len(objects)):
-        if _others is None:
-            others = objects[:i]
-        for j in xrange(len(others)):
-            commonTimeInterval = objects[i].commonTimeInterval(others[j])
-            if not commonTimeInterval.empty():
-                interactions.append(Interaction(num, commonTimeInterval, objects[i].num, others[j].num, objects[i], others[j]))
-                num += 1
-    return interactions
-
-def findInteraction(interactions, roadUserNum1, roadUserNum2):
-    'Returns the right interaction in the set'
-    i=0
-    while i<len(interactions) and set([roadUserNum1, roadUserNum2]) != interactions[i].getRoadUserNumbers():
-        i+=1
-    if i<len(interactions):
-        return interactions[i]
-    else:
-        return None
-
-def aggregateSafetyPoints(interactions, pointType = 'collision'):
-    '''Put all collision points or crossing zones in a list for display'''
-    allPoints = []
-    if pointType == 'collision':
-        for i in interactions:
-            for points in i.collisionPoints.values():
-                allPoints += points
-    elif pointType == 'crossing':
-        for i in interactions:
-            for points in i.crossingZones.values():
-                allPoints += points
-    else:
-        print('unknown type of point: '+pointType)
-    return allPoints
-
-def prototypeCluster(interactions, similarities, indicatorName, minSimilarity, similarityFunc = None, minClusterSize = None, randomInitialization = False):
-    return ml.prototypeCluster([inter.getIndicator(indicatorName) for inter in interactions], similarities, minSimilarity, similarityFunc, minClusterSize, randomInitialization)
-
-class Crossing(moving.STObject):
-    '''Class for the event of a street crossing
-
-    TODO: detecter passage sur la chaussee
-    identifier origines et destination (ou uniquement chaussee dans FOV)
-    carac traversee
-    detecter proximite veh (retirer si trop similaire simultanement
-    carac interaction'''
-    
-    def __init__(self, roaduserNum = None, num = None, timeInterval = None):
-        moving.STObject.__init__(self, num, timeInterval)
-        self.roaduserNum = roaduserNum
-
-    
-
-if __name__ == "__main__":
-    import doctest
-    import unittest
-    suite = doctest.DocFileSuite('tests/events.txt')
-    #suite = doctest.DocTestSuite()
-    unittest.TextTestRunner().run(suite)
-    
--- a/python/indicators.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,247 +0,0 @@
-#! /usr/bin/env python
-'''Class for indicators, temporal indicators, and safety indicators'''
-
-import moving
-#import matplotlib.nxutils as nx
-from matplotlib.pyplot import plot, ylim
-from matplotlib.pylab import find
-from numpy import array, arange, mean, floor, mean
-
-
-def multivariateName(indicatorNames):
-    return '_'.join(indicatorNames)
-
-# need for a class representing the indicators, their units, how to print them in graphs...
-class TemporalIndicator(object):
-    '''Class for temporal indicators
-    i.e. indicators that take a value at specific instants
-
-    values should be
-    * a dict, for the values at specific time instants
-    * or a list with a time interval object if continuous measurements
-
-    it should have more information like name, unit'''
-    
-    def __init__(self, name, values, timeInterval = None, maxValue = None):
-        self.name = name
-        if timeInterval is None:
-            self.values = values
-            instants = sorted(self.values.keys())
-            if len(instants) > 0:
-                self.timeInterval = moving.TimeInterval(instants[0], instants[-1])
-            else:
-                self.timeInterval = moving.TimeInterval()
-        else:
-            assert len(values) == timeInterval.length()
-            self.timeInterval = timeInterval
-            self.values = {}
-            for i in xrange(int(round(self.timeInterval.length()))):
-                self.values[self.timeInterval[i]] = values[i]
-        self.maxValue = maxValue
-
-    def __len__(self):
-        return len(self.values)
-
-    def empty(self):
-        return len(self.values) == 0
-
-    def __getitem__(self, t):
-        'Returns the value at time t'
-        return self.values.get(t)
-
-    def getIthValue(self, i):
-        sortedKeys = sorted(self.values.keys())
-        if 0<=i<len(sortedKeys):
-            return self.values[sortedKeys[i]]
-        else:
-            return None
-
-    def __iter__(self):
-        self.iterInstantNum = 0 # index in the interval or keys of the dict
-        return self
-
-    def next(self):
-        if self.iterInstantNum >= len(self.values):#(self.timeInterval and self.iterInstantNum>=self.timeInterval.length())\
-           #     or (self.iterInstantNum >= self.values)
-            raise StopIteration
-        else:
-            self.iterInstantNum += 1
-            return self.getIthValue(self.iterInstantNum-1)
-
-    def getTimeInterval(self):
-        return self.timeInterval
-
-    def getName(self):
-        return self.name
-
-    def getValues(self):
-        return [self.__getitem__(t) for t in self.timeInterval]
-
-    def plot(self, options = '', xfactor = 1., yfactor = 1., timeShift = 0, **kwargs):
-        if self.getTimeInterval().length() == 1:
-            marker = 'o'
-        else:
-            marker = ''
-        time = sorted(self.values.keys())
-        plot([(x+timeShift)/xfactor for x in time], [self.values[i]/yfactor for i in time], options+marker, **kwargs)
-        if self.maxValue:
-            ylim(ymax = self.maxValue)
-
-    @classmethod
-    def createMultivariate(cls, indicators):
-        '''Creates a new temporal indicator where the value at each instant is a list 
-        of the indicator values at the instant, in the same order
-        the time interval will be the union of the time intervals of the indicators
-        name is concatenation of the indicator names'''
-        if len(indicators) < 2:
-            print('Error creating multivariate indicator with only {} indicator'.format(len(indicators)))
-            return None
-
-        timeInterval = moving.TimeInterval.unionIntervals([indic.getTimeInterval() for indic in indicators])
-        values = {}
-        for t in timeInterval:
-            tmpValues = [indic[t] for indic in indicators]
-            uniqueValues = set(tmpValues)
-            if len(uniqueValues) >= 2 or uniqueValues.pop() is not None:
-                values[t] = tmpValues
-        return cls(multivariateName([indic.name for indic in indicators]), values)
-
-# TODO static method avec class en parametre pour faire des indicateurs agrege, list par instant
-
-def l1Distance(x, y): # lambda x,y:abs(x-y)
-    if x is None or y is None:
-        return float('inf')
-    else:
-        return abs(x-y)
-
-def multiL1Matching(x, y, thresholds, proportionMatching=1.):
-    n = 0
-    nDimensions = len(x)
-    for i in range(nDimensions):
-        if l1Distance(x[i], y[i]) <= thresholds[i]:
-            n += 1
-    return n >= nDimensions*proportionMatching
-
-from utils import LCSS as utilsLCSS
-
-class LCSS(utilsLCSS):
-    '''Adapted LCSS class for indicators, same pattern'''
-    def __init__(self, similarityFunc, delta = float('inf'), minLength = 0, aligned = False, lengthFunc = min):
-        utilsLCSS.__init__(self, similarityFunc = similarityFunc, delta = delta, aligned = aligned, lengthFunc = lengthFunc)
-        self.minLength = minLength
-
-    def checkIndicator(self, indicator):
-        return indicator is not None and len(indicator) >= self.minLength
-
-    def compute(self, indicator1, indicator2, computeSubSequence = False):
-        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
-            return self._compute(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
-        else:
-            return 0
-
-    def computeNormalized(self, indicator1, indicator2, computeSubSequence = False):
-        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
-            return self._computeNormalized(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
-        else:
-            return 0.
-
-    def computeDistance(self, indicator1, indicator2, computeSubSequence = False):
-        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
-            return self._computeDistance(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
-        else:
-            return 1.
-        
-class SeverityIndicator(TemporalIndicator):
-    '''Class for severity indicators 
-    field mostSevereIsMax is True 
-    if the most severe value taken by the indicator is the maximum'''
-
-    def __init__(self, name, values, timeInterval=None, mostSevereIsMax=True, maxValue = None): 
-        TemporalIndicator.__init__(self, name, values, timeInterval, maxValue)
-        self.mostSevereIsMax = mostSevereIsMax
-
-    def getMostSevereValue(self, minNInstants=1): # TODO use np.percentile
-        values = array(self.values.values())
-        indices = range(len(values))
-        if len(indices) >= minNInstants:
-            values = sorted(values[indices], reverse = self.mostSevereIsMax) # inverted if most severe is max -> take the first values
-            return mean(values[:minNInstants])
-        else:
-            return None
-
-    def getInstantOfMostSevereValue(self):
-        '''Returns the instant at which the indicator reaches its most severe value'''
-        if self.mostSevereIsMax:
-            return max(self.values, key=self.values.get)
-        else:
-            return min(self.values, key=self.values.get)
-
-# functions to aggregate discretized maps of indicators
-# TODO add values in the cells between the positions (similar to discretizing vector graphics to bitmap)
-
-def indicatorMap(indicatorValues, trajectory, squareSize):
-    '''Returns a dictionary 
-    with keys for the indices of the cells (squares)
-    in which the trajectory positions are located
-    at which the indicator values are attached
-
-    ex: speeds and trajectory'''
-
-    assert len(indicatorValues) == trajectory.length()
-    indicatorMap = {}
-    for k in xrange(trajectory.length()):
-        p = trajectory[k]
-        i = floor(p.x/squareSize)
-        j = floor(p.y/squareSize)
-        if indicatorMap.has_key((i,j)):
-            indicatorMap[(i,j)].append(indicatorValues[k])
-        else:
-            indicatorMap[(i,j)] = [indicatorValues[k]]
-    for k in indicatorMap.keys():
-        indicatorMap[k] = mean(indicatorMap[k])
-    return indicatorMap
-
-# def indicatorMapFromPolygon(value, polygon, squareSize):
-#     '''Fills an indicator map with the value within the polygon
-#     (array of Nx2 coordinates of the polygon vertices)'''
-#     points = []
-#     for x in arange(min(polygon[:,0])+squareSize/2, max(polygon[:,0]), squareSize):
-#         for y in arange(min(polygon[:,1])+squareSize/2, max(polygon[:,1]), squareSize):
-#             points.append([x,y])
-#     inside = nx.points_inside_poly(array(points), polygon)
-#     indicatorMap = {}
-#     for i in xrange(len(inside)):
-#         if inside[i]:
-#             indicatorMap[(floor(points[i][0]/squareSize), floor(points[i][1]/squareSize))] = 0
-#     return indicatorMap
-
-def indicatorMapFromAxis(value, limits, squareSize):
-    '''axis = [xmin, xmax, ymin, ymax] '''
-    indicatorMap = {}
-    for x in arange(limits[0], limits[1], squareSize):
-        for y in arange(limits[2], limits[3], squareSize):
-            indicatorMap[(floor(x/squareSize), floor(y/squareSize))] = value
-    return indicatorMap
-
-def combineIndicatorMaps(maps, squareSize, combinationFunction):
-    '''Puts many indicator maps together 
-    (averaging the values in each cell 
-    if more than one maps has a value)'''
-    indicatorMap = {}
-    for m in maps:
-        for k,v in m.iteritems():
-            if indicatorMap.has_key(k):
-                indicatorMap[k].append(v)
-            else:
-                indicatorMap[k] = [v]
-    for k in indicatorMap.keys():
-        indicatorMap[k] = combinationFunction(indicatorMap[k])
-    return indicatorMap
-
-if __name__ == "__main__":
-    import doctest
-    import unittest
-    suite = doctest.DocFileSuite('tests/indicators.txt')
-    unittest.TextTestRunner().run(suite)
-#     #doctest.testmod()
-#     #doctest.testfile("example.txt")
--- a/python/metadata.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,174 +0,0 @@
-# from moving import Point
-
-from datetime import datetime
-from os import path
-
-from sqlalchemy import create_engine, Column, Integer, Float, DateTime, String, ForeignKey
-from sqlalchemy.orm import relationship, backref, sessionmaker
-from sqlalchemy.ext.declarative import declarative_base
-
-from utils import datetimeFormat
-
-Base = declarative_base()
-
-class Site(Base):
-    __tablename__ = 'sites'
-    idx = Column(Integer, primary_key=True)
-    name = Column(String) # same as path, relative to the database position
-    description = Column(String) # longer names, eg intersection of road1 and road2
-    xcoordinate = Column(Float)  # ideally moving.Point, but needs to be 
-    ycoordinate = Column(Float)
-    mapImageFilename = Column(String) # path to filename, relative to site name, ie sitename/mapImageFilename
-    nUnitsPerPixel = Column(Float) # number of units of distance per pixel in map image
-    
-    def __init__(self, name, description = "", xcoordinate = None, ycoordinate = None, mapImageFilename = None, nUnitsPerPixel = 1.):
-        self.name = name
-        self.description = description
-        self.xcoordinate = xcoordinate
-        self.ycoordinate = ycoordinate
-        self.mapImageFilename = mapImageFilename
-        self.nUnitsPerPixel = nUnitsPerPixel
-
-    def getFilename(self):
-        return self.name
-
-class EnvironementalFactors(Base):
-    '''Represents any environmental factors that may affect the results, in particular
-    * changing weather conditions
-    * changing road configuration, geometry, signalization, etc.
-    ex: sunny, rainy, before counter-measure, after counter-measure'''
-    __tablename__ = 'environmental_factors'
-    idx = Column(Integer, primary_key=True)
-    startTime = Column(DateTime)
-    endTime = Column(DateTime)
-    description = Column(String) # eg sunny, before, after
-    siteIdx = Column(Integer, ForeignKey('sites.idx'))
-
-    site = relationship("Site", backref=backref('environmental_factors', order_by = idx))
-
-    def __init__(self, startTime, endTime, description, site):
-        'startTime is passed as string in utils.datetimeFormat, eg 2011-06-22 10:00:39'
-        self.startTime = datetime.strptime(startTime, datetimeFormat)
-        self.endTime = datetime.strptime(endTime, datetimeFormat)
-        self.description = description
-        self.site = site
-
-class CameraView(Base):
-    __tablename__ = 'camera_views'
-    idx = Column(Integer, primary_key=True)
-    frameRate = Column(Float)
-    homographyFilename = Column(String) # path to homograph filename, relative to the site name
-    cameraCalibrationFilename = Column(String) # path to full camera calibration, relative to the site name
-    siteIdx = Column(Integer, ForeignKey('sites.idx'))
-    homographyDistanceUnit = Column(String, default = 'm') # make sure it is default in the database
-    configurationFilename = Column(String) # path to configuration .cfg file, relative to site name
-
-    site = relationship("Site", backref=backref('camera_views', order_by = idx))
-
-    def __init__(self, frameRate, homographyFilename, cameraCalibrationFilename, site, configurationFilename):
-        self.frameRate = frameRate
-        self.homographyFilename = homographyFilename
-        self.site = site
-        self.configurationFilename = configurationFilename
-
-    def getHomographyFilename(self, relativeToSiteFilename = True):
-        if relativeToSiteFilename:
-            return self.site.getFilename()+path.sep+self.homographyFilename
-        else:
-            return self.homographyFilename
-
-class Alignment(Base):
-    __tablename__ = 'alignments'
-    idx = Column(Integer, primary_key=True)
-    cameraViewIdx = Column(Integer, ForeignKey('camera_views.idx'))
-    
-    cameraView = relationship("CameraView", backref=backref('alignments', order_by = idx))
-
-    def __init__(self, cameraView):
-        self.cameraView = cameraView
-
-class Point(Base):
-    __tablename__ = 'points'
-    alignmentIdx = Column(Integer, ForeignKey('alignments.idx'), primary_key=True)
-    index = Column(Integer, primary_key=True) # order of points in this alignment
-    x = Column(Float)
-    y = Column(Float)
-
-    alignment = relationship("Alignment", backref=backref('points', order_by = index))
-    
-    def __init__(self, alignmentIdx, index, x, y):
-        self.alignmentIdx = alignmentIdx
-        self.index = index
-        self.x = x
-        self.y = y
-
-class VideoSequence(Base):
-    __tablename__ = 'video_sequences'
-    idx = Column(Integer, primary_key=True)
-    name = Column(String) # path relative to the the site name
-    startTime = Column(DateTime)
-    duration = Column(Float) # video sequence duration
-    durationUnit = Column(String, default = 's')
-    siteIdx = Column(Integer, ForeignKey('sites.idx'))
-    cameraViewIdx = Column(Integer, ForeignKey('camera_views.idx'))
-    configurationFilename = Column(String)
-
-    site = relationship("Site", backref=backref('video_sequences', order_by = idx))
-    cameraView = relationship("CameraView", backref=backref('video_sequences', order_by = idx))
-
-    def __init__(self, name, startTime, duration, site, cameraView, configurationFilename = None):
-        'startTime is passed as string in utils.datetimeFormat, eg 2011-06-22 10:00:39'
-        self.name = name
-        self.startTime = datetime.strptime(startTime, datetimeFormat)
-        self.duration = duration
-        self.site = site
-        self.cameraView = cameraView
-        self.configurationFilename = configurationFilename
-
-    def getVideoSequenceFilename(self, relativeToSiteFilename = True):
-        if relativeToSiteFilename:
-            return self.site.getFilename()+path.sep+self.name
-        else:
-            return self.name
-
-        #def getConfigurationFilename(self):
-        #'returns the local configuration filename, or the one of the camera view otherwise'
-
-# add class for Analysis: foreign key VideoSequenceId, dataFilename, configFilename (get the one from camera view by default), mask? (no, can be referenced in the tracking cfg file)
-
-# class SiteDescription(Base): # list of lines and polygons describing the site, eg for sidewalks, center lines
-
-# class Analysis(Base): # parameters necessary for processing the data: free form
-# eg bounding box depends on camera view, tracking configuration depends on camera view 
-# results: sqlite
-
-def createDatabase(filename):
-    'creates a session to query the filename'
-    engine = create_engine('sqlite:///'+filename)
-    Base.metadata.create_all(engine)
-    Session = sessionmaker(bind=engine)
-    return Session()
-
-def connectDatabase(filename):
-    'creates a session to query the filename'
-    engine = create_engine('sqlite:///'+filename)
-    Session = sessionmaker(bind=engine)
-    return Session()
-
-def initializeSites(session, directoryName):
-    '''Initializes default site objects and Camera Views
-    
-    eg somedirectory/montreal/ contains intersection1, intersection2, etc.
-    The site names would be somedirectory/montreal/intersection1, somedirectory/montreal/intersection2, etc.'''
-    from os import listdir, path
-    sites = []
-    cameraViews = []
-    names = listdir(directoryName)
-    for name in names:
-        if path.isdir(directoryName+'/'+name):
-            sites.append(Site(directoryName+'/'+name, None))
-            cameraViews.append(CameraView(-1, None, None, sites[-1], None))
-    session.add_all(sites)
-    session.add_all(cameraViews)
-    session.commit()
-# TODO crawler for video files?
--- a/python/ml.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,235 +0,0 @@
-#! /usr/bin/env python
-'''Libraries for machine learning algorithms'''
-
-from os import path
-from random import shuffle
-from copy import copy, deepcopy
-
-import numpy as np
-from matplotlib.pylab import text
-import matplotlib as mpl
-import matplotlib.pyplot as plt
-from scipy.cluster.vq import kmeans, whiten, vq
-from sklearn import mixture
-import cv2
-
-import utils
-
-#####################
-# OpenCV ML models
-#####################
-
-class StatModel(object):
-    '''Abstract class for loading/saving model
-
-    Issues with OpenCV, does not seem to work'''    
-    def load(self, filename):
-        if path.exists(filename):
-            self.model.load(filename)
-        else:
-            print('Provided filename {} does not exist: model not loaded!'.format(filename))
-
-    def save(self, filename):
-        self.model.save(filename)
-
-class SVM(StatModel):
-    '''wrapper for OpenCV SimpleVectorMachine algorithm'''
-    def __init__(self, svmType = cv2.ml.SVM_C_SVC, kernelType = cv2.ml.SVM_RBF, degree = 0, gamma = 1, coef0 = 0, Cvalue = 1, nu = 0, p = 0):
-        self.model = cv2.ml.SVM_create()
-        self.model.setType(svmType)
-        self.model.setKernel(kernelType)
-        self.model.setDegree(degree)
-        self.model.setGamma(gamma)
-        self.model.setCoef0(coef0)
-        self.model.setC(Cvalue)
-        self.model.setNu(nu)
-        self.model.setP(p)
-
-    def load(self, filename):
-        if path.exists(filename):
-            cv2.ml.SVM_load(filename)
-        else:
-            print('Provided filename {} does not exist: model not loaded!'.format(filename))
-
-    def train(self, samples, layout, responses):
-        self.model.train(samples, layout, responses)
-
-    def predict(self, hog):
-        return self.model.predict(hog)
-
-
-#####################
-# Clustering
-#####################
-
-class Centroid(object):
-    'Wrapper around instances to add a counter'
-
-    def __init__(self, instance, nInstances = 1):
-        self.instance = instance
-        self.nInstances = nInstances
-
-    # def similar(instance2):
-    #     return self.instance.similar(instance2)
-
-    def add(self, instance2):
-        self.instance = self.instance.multiply(self.nInstances)+instance2
-        self.nInstances += 1
-        self.instance = self.instance.multiply(1/float(self.nInstances))
-
-    def average(c):
-        inst = self.instance.multiply(self.nInstances)+c.instance.multiply(instance.nInstances)
-        inst.multiply(1/(self.nInstances+instance.nInstances))
-        return Centroid(inst, self.nInstances+instance.nInstances)
-
-    def plot(self, options = ''):
-        self.instance.plot(options)
-        text(self.instance.position.x+1, self.instance.position.y+1, str(self.nInstances))
-
-def kMedoids(similarityMatrix, initialCentroids = None, k = None):
-    '''Algorithm that clusters any dataset based on a similarity matrix
-    Either the initialCentroids or k are passed'''
-    pass
-
-def assignCluster(data, similarFunc, initialCentroids = None, shuffleData = True):
-    '''k-means algorithm with similarity function
-    Two instances should be in the same cluster if the sameCluster function returns true for two instances. It is supposed that the average centroid of a set of instances can be computed, using the function. 
-    The number of clusters will be determined accordingly
-
-    data: list of instances
-    averageCentroid: '''
-    localdata = copy(data) # shallow copy to avoid modifying data
-    if shuffleData:
-        shuffle(localdata)
-    if initialCentroids is None:
-        centroids = [Centroid(localdata[0])]
-    else:
-        centroids = deepcopy(initialCentroids)
-    for instance in localdata[1:]:
-        i = 0
-        while i<len(centroids) and not similarFunc(centroids[i].instance, instance):
-            i += 1
-        if i == len(centroids):
-            centroids.append(Centroid(instance))
-        else:
-            centroids[i].add(instance)
-
-    return centroids
-
-# TODO recompute centroids for each cluster: instance that minimizes some measure to all other elements
-
-def spectralClustering(similarityMatrix, k, iter=20):
-	'''Spectral Clustering algorithm'''
-	n = len(similarityMatrix)
-	# create Laplacian matrix
-	rowsum = np.sum(similarityMatrix,axis=0)
-	D = np.diag(1 / np.sqrt(rowsum))
-	I = np.identity(n)
-	L = I - np.dot(D,np.dot(similarityMatrix,D))
-	# compute eigenvectors of L
-	U,sigma,V = np.linalg.svd(L)
-	# create feature vector from k first eigenvectors
-	# by stacking eigenvectors as columns
-	features = np.array(V[:k]).T
-	# k-means
-	features = whiten(features)
-	centroids,distortion = kmeans(features,k, iter)
-	code,distance = vq(features,centroids) # code starting from 0 (represent first cluster) to k-1 (last cluster)
-	return code,sigma	
-
-def prototypeCluster(instances, similarities, minSimilarity, similarityFunc = None, minClusterSize = None, randomInitialization = False):
-    '''Finds exemplar (prototype) instance that represent each cluster
-    Returns the prototype indices (in the instances list) and the cluster label of each instance
-
-    the elements in the instances list must have a length (method __len__), or one can use the random initialization
-    the positions in the instances list corresponds to the similarities
-    if similarityFunc is provided, the similarities are calculated as needed (this is faster) if not in similarities (negative if not computed)
-    similarities must still be allocated with the right size
-
-    if an instance is different enough (<minSimilarity), 
-    it will become a new prototype. 
-    Non-prototype instances will be assigned to an existing prototype
-    if minClusterSize is not None, the clusters will be refined by removing iteratively the smallest clusters
-    and reassigning all elements in the cluster until no cluster is smaller than minClusterSize'''
-
-    # sort instances based on length
-    indices = range(len(instances))
-    if randomInitialization:
-        indices = np.random.permutation(indices)
-    else:
-        def compare(i, j):
-            if len(instances[i]) > len(instances[j]):
-                return -1
-            elif len(instances[i]) == len(instances[j]):
-                return 0
-            else:
-                return 1
-        indices.sort(compare)
-    # go through all instances
-    prototypeIndices = [indices[0]]
-    for i in indices[1:]:
-        if similarityFunc is not None:
-            for j in prototypeIndices:
-                if similarities[i][j] < 0:
-                    similarities[i][j] = similarityFunc(instances[i], instances[j])
-                    similarities[j][i] = similarities[i][j]
-        if similarities[i][prototypeIndices].max() < minSimilarity:
-             prototypeIndices.append(i)
-
-    # assignment
-    indices = [i for i in range(similarities.shape[0]) if i not in prototypeIndices]
-    assign = True
-    while assign:
-        labels = [-1]*similarities.shape[0]
-        for i in prototypeIndices:
-            labels[i] = i
-        for i in indices:
-            if similarityFunc is not None:
-                for j in prototypeIndices:
-                    if similarities[i][j] < 0:
-                        similarities[i][j] = similarityFunc(instances[i], instances[j])
-                        similarities[j][i] = similarities[i][j]
-            prototypeIdx = similarities[i][prototypeIndices].argmax()
-            if similarities[i][prototypeIndices[prototypeIdx]] >= minSimilarity:
-                labels[i] = prototypeIndices[prototypeIdx]
-            else:
-                labels[i] = -1 # outlier
-        clusterSizes = {i: sum(np.array(labels) == i) for i in prototypeIndices}
-        smallestClusterIndex = min(clusterSizes, key = clusterSizes.get)
-        assign = (clusterSizes[smallestClusterIndex] < minClusterSize)
-        if assign:
-            prototypeIndices.remove(smallestClusterIndex)
-            indices.append(smallestClusterIndex)
-
-    return prototypeIndices, labels
-
-def computeClusterSizes(labels, prototypeIndices, outlierIndex = -1):
-    clusterSizes = {i: sum(np.array(labels) == i) for i in prototypeIndices}
-    clusterSizes['outlier'] = sum(np.array(labels) == outlierIndex)
-    return clusterSizes
-
-# Gaussian Mixture Models
-def plotGMMClusters(model, dataset = None, fig = None, colors = utils.colors, nUnitsPerPixel = 1., alpha = 0.3):
-    '''plot the ellipse corresponding to the Gaussians
-    and the predicted classes of the instances in the dataset'''
-    if fig is None:
-        fig = plt.figure()
-    labels = model.predict(dataset)
-    tmpDataset = dataset/nUnitsPerPixel
-    for i in xrange(model.n_components):
-        mean = model.means_[i]/nUnitsPerPixel
-        covariance = model.covars_[i]/nUnitsPerPixel
-        if dataset is not None:
-            plt.scatter(tmpDataset[labels == i, 0], tmpDataset[labels == i, 1], .8, color=colors[i])
-        plt.annotate(str(i), xy=(mean[0]+1, mean[1]+1))
-
-        # Plot an ellipse to show the Gaussian component                                                  
-        v, w = np.linalg.eigh(covariance)
-        angle = np.arctan2(w[0][1], w[0][0])
-        angle = 180*angle/np.pi  # convert to degrees                                             
-	v *= 4
-        ell = mpl.patches.Ellipse(mean, v[0], v[1], 180+angle, color=colors[i])
-        ell.set_clip_box(fig.bbox)
-        ell.set_alpha(alpha)
-        fig.axes[0].add_artist(ell)
-    return labels
--- a/python/moving.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1791 +0,0 @@
-#! /usr/bin/env python
-'''Libraries for moving objects, trajectories...'''
-
-import utils, cvutils
-from base import VideoFilenameAddable
-
-from math import sqrt, atan2, cos, sin
-from numpy import median, array, zeros, hypot, NaN, std, floor, float32
-from matplotlib.pyplot import plot
-from scipy.stats import scoreatpercentile
-from scipy.spatial.distance import cdist
-
-try:
-    from shapely.geometry import Polygon, Point as shapelyPoint
-    from shapely.prepared import prep, PreparedGeometry
-    shapelyAvailable = True
-except ImportError:
-    print('Shapely library could not be loaded')
-    shapelyAvailable = False
-
-
-class Interval(object):
-    '''Generic interval: a subset of real numbers (not iterable)'''
-    def __init__(self, first=0, last=-1, revert = False):
-        if revert and last<first:
-            self.first=last
-            self.last=first
-        else:
-            self.first=first
-            self.last=last
-
-    def __str__(self):
-        return '[{0}, {1}]'.format(self.first, self.last)
-
-    def __repr__(self):
-        return self.__str__()
-
-    def __eq__(self, other):
-        return ((self.first == other.first) and (self.last == other.last)) or ((self.first == other.last) and (self.last == other.first))
-
-    def empty(self):
-        return self.first > self.last
-
-    def center(self):
-        return (self.first+self.last)/2.
-
-    def length(self):
-        '''Returns the length of the interval'''
-        return float(max(0,self.last-self.first))
-
-    def equal(self, i2):
-        return self.first==i2.first and self.last == i2.last
-
-    def getList(self):
-        return [self.first, self.last]
-
-    def contains(self, instant):
-        return (self.first<=instant and self.last>=instant)
-
-    def inside(self, interval2):
-        '''Indicates if the temporal interval of self is comprised in interval2'''
-        return (self.first >= interval2.first) and (self.last <= interval2.last)
-
-    def shift(self, offset):
-        self.first += offset
-        self.last += offset
-
-    @classmethod
-    def union(cls, interval1, interval2):
-        '''Smallest interval comprising self and interval2'''
-        return cls(min(interval1.first, interval2.first), max(interval2.last, interval2.last))
-        
-    @classmethod
-    def intersection(cls, interval1, interval2):
-        '''Largest interval comprised in both self and interval2'''
-        return cls(max(interval1.first, interval2.first), min(interval1.last, interval2.last))
-
-    def distance(self, interval2):
-        if not Interval.intersection(self, interval2).empty():
-            return 0
-        elif self.first > interval2.last:
-            return self.first - interval2.last
-        elif self.last < interval2.first:
-            return interval2.first - self.last
-        else:
-            return None
-
-    @classmethod
-    def unionIntervals(cls, intervals):
-        'returns the smallest interval containing all intervals'
-        inter = cls(intervals[0].first, intervals[0].last)
-        for i in intervals[1:]:
-            inter = cls.union(inter, i)
-        return inter
-
-
-class TimeInterval(Interval):
-    '''Temporal interval: set of instants at fixed time step, between first and last, included
-    
-    For example: based on frame numbers (hence the modified length method)
-    It may be modified directly by setting first and last'''
-
-    def __init__(self, first=0, last=-1):
-        super(TimeInterval, self).__init__(first, last, False)
-
-    @staticmethod
-    def fromInterval(inter):
-        return TimeInterval(inter.first, inter.last)
-
-    def __getitem__(self, i):
-        if not self.empty():
-            if isinstance(i, int):
-                return self.first+i
-            else:
-                raise TypeError, "Invalid argument type."
-            #elif isinstance( key, slice ):
-
-    def __iter__(self):
-        self.iterInstantNum = -1
-        return self
-
-    def next(self):
-        if self.iterInstantNum >= self.length()-1:
-            raise StopIteration
-        else:
-            self.iterInstantNum += 1
-            return self[self.iterInstantNum]
-
-    def length(self):
-        '''Returns the length of the interval'''
-        return float(max(0,self.last-self.first+1))
-
-    def __len__(self):
-        return self.length()
-
-# class BoundingPolygon:
-#     '''Class for a polygon bounding a set of points
-#     with methods to create intersection, unions...
-#     '''
-# We will use the polygon class of Shapely
-
-class STObject(object):
-    '''Class for spatio-temporal object, i.e. with temporal and spatial existence 
-    (time interval and bounding polygon for positions (e.g. rectangle)).
-
-    It may not mean that the object is defined 
-    for all time instants within the time interval'''
-
-    def __init__(self, num = None, timeInterval = None, boundingPolygon = None):
-        self.num = num
-        self.timeInterval = timeInterval
-        self.boundingPolygon = boundingPolygon
-
-    def empty(self):
-        return self.timeInterval.empty()# or not self.boudingPolygon
-
-    def getNum(self):
-        return self.num
-
-    def __len__(self):
-        return self.timeInterval.length()
-
-    def length(self):
-        return self.timeInterval.length()
-
-    def getFirstInstant(self):
-        return self.timeInterval.first
-
-    def getLastInstant(self):
-        return self.timeInterval.last
-
-    def getTimeInterval(self):
-        return self.timeInterval
-
-    def existsAtInstant(self, t):
-        return self.timeInterval.contains(t)
-
-    def commonTimeInterval(self, obj2):
-        return TimeInterval.intersection(self.getTimeInterval(), obj2.getTimeInterval())
-
-    def shiftTimeInterval(self, offset):
-        self.timeInterval.shift(offset)
-
-class Point(object):
-    def __init__(self, x, y):
-        self.x = x
-        self.y = y
-
-    def __str__(self):
-        return '({:f},{:f})'.format(self.x,self.y)
-
-    def __repr__(self):
-        return self.__str__()
-
-    def __eq__(self, other):
-        return (self.x == other.x) and (self.y == other.y)
-
-    def __add__(self, other):
-        return Point(self.x+other.x, self.y+other.y)
-
-    def __sub__(self, other):
-        return Point(self.x-other.x, self.y-other.y)
-
-    def __neg__(self):
-        return Point(-self.x, -self.y)
-
-    def __getitem__(self, i):
-        if i == 0:
-            return self.x
-        elif i == 1:
-            return self.y
-        else:
-            raise IndexError()
-    
-    def orthogonal(self, clockwise = True):
-        'Returns the orthogonal vector'
-        if clockwise:
-            return Point(self.y, -self.x)
-        else:
-            return Point(-self.y, self.x)            
-
-    def multiply(self, alpha):
-        'Warning, returns a new Point'
-        return Point(self.x*alpha, self.y*alpha)
-
-    def divide(self, alpha):
-        'Warning, returns a new Point'
-        return Point(self.x/alpha, self.y/alpha)
-
-    def plot(self, options = 'o', **kwargs):
-        plot([self.x], [self.y], options, **kwargs)
-
-    @staticmethod
-    def plotSegment(p1, p2, options = 'o', **kwargs):
-        plot([p1.x, p2.x], [p1.y, p2.y], options, **kwargs)
-
-    def norm2Squared(self):
-        '''2-norm distance (Euclidean distance)'''
-        return self.x**2+self.y**2
-
-    def norm2(self):
-        '''2-norm distance (Euclidean distance)'''
-        return sqrt(self.norm2Squared())
-
-    def norm1(self):
-        return abs(self.x)+abs(self.y)
-    
-    def normMax(self):
-        return max(abs(self.x),abs(self.y))
-
-    def aslist(self):
-        return [self.x, self.y]
-
-    def astuple(self):
-        return (self.x, self.y)
-
-    def asint(self):
-        return Point(int(self.x), int(self.y))
-
-    if shapelyAvailable:
-        def asShapely(self):
-            return shapelyPoint(self.x, self.y)
-
-    def project(self, homography):
-        projected = cvutils.projectArray(homography, array([[self.x], [self.y]]))
-        return Point(projected[0], projected[1])
-
-    def inPolygon(self, polygon):
-        '''Indicates if the point x, y is inside the polygon
-        (array of Nx2 coordinates of the polygon vertices)
-
-        taken from http://www.ariel.com.au/a/python-point-int-poly.html
-
-        Use Polygon.contains if Shapely is installed'''
-
-        n = polygon.shape[0];
-        counter = 0;
-
-        p1 = polygon[0,:];
-        for i in range(n+1):
-            p2 = polygon[i % n,:];
-            if self.y > min(p1[1],p2[1]):
-                if self.y <= max(p1[1],p2[1]):
-                    if self.x <= max(p1[0],p2[0]):
-                        if p1[1] != p2[1]:
-                            xinters = (self.y-p1[1])*(p2[0]-p1[0])/(p2[1]-p1[1])+p1[0];
-                        if p1[0] == p2[0] or self.x <= xinters:
-                            counter+=1;
-            p1=p2
-        return (counter%2 == 1);
-
-    @staticmethod
-    def fromList(p):
-        return Point(p[0], p[1])
-
-    @staticmethod
-    def dot(p1, p2):
-        'Scalar product'
-        return p1.x*p2.x+p1.y*p2.y
-
-    @staticmethod
-    def cross(p1, p2):
-        'Cross product'
-        return p1.x*p2.y-p1.y*p2.x
-
-    @staticmethod
-    def cosine(p1, p2):
-        return Point.dot(p1,p2)/(p1.norm2()*p2.norm2())
-
-    @staticmethod
-    def distanceNorm2(p1, p2):
-        return (p1-p2).norm2()
-
-    @staticmethod
-    def plotAll(points, **kwargs):
-        from matplotlib.pyplot import scatter
-        scatter([p.x for p in points],[p.y for p in points], **kwargs)
-
-    def similarOrientation(self, refDirection, cosineThreshold):
-        'Indicates whether the cosine of the vector and refDirection is smaller than cosineThreshold'
-        return Point.cosine(self, refDirection) >= cosineThreshold
-
-    @staticmethod
-    def timeToCollision(p1, p2, v1, v2, collisionThreshold):
-        '''Computes exact time to collision with a distance threshold
-        The unknown of the equation is the time to reach the intersection
-        between the relative trajectory of one road user
-        and the circle of radius collisionThreshold around the other road user'''
-        dv = v1-v2
-        dp = p1-p2
-        a = dv.norm2Squared()#(v1.x-v2.x)**2 + (v1.y-v2.y)**2
-        b = 2*Point.dot(dv, dp)#2 * ((p1.x-p2.x) * (v1.x-v2.x) + (p1.y-p2.y) * (v1.y-v2.y))
-        c = dp.norm2Squared() - collisionThreshold**2#(p1.x-p2.x)**2 + (p1.y-p2.y)**2 - collisionThreshold**2
-
-        delta = b**2 - 4*a*c
-        if delta >= 0:
-            deltaRoot = sqrt(delta)
-            ttc1 = (-b + deltaRoot)/(2*a)
-            ttc2 = (-b - deltaRoot)/(2*a)
-            if ttc1 >= 0 and ttc2 >= 0:
-                ttc = min(ttc1,ttc2)
-            elif ttc1 >= 0:
-                ttc = ttc1
-            elif ttc2 >= 0:
-                ttc = ttc2
-            else: # ttc1 < 0 and ttc2 < 0:
-                ttc = None
-        else:
-            ttc = None
-        return ttc
-
-    @staticmethod   
-    def midPoint(p1, p2):
-        'Returns the middle of the segment [p1, p2]'
-        return Point(0.5*p1.x+0.5*p2.x, 0.5*p1.y+0.5*p2.y)
-
-if shapelyAvailable:
-    def pointsInPolygon(points, polygon):
-        '''Optimized tests of a series of points within (Shapely) polygon (not prepared)'''
-        if type(polygon) == PreparedGeometry:
-            prepared_polygon = polygon
-        else:
-            prepared_polygon = prep(polygon)
-        return filter(prepared_polygon.contains, points)
-
-# Functions for coordinate transformation
-# From Paul St-Aubin's PVA tools
-def subsec_spline_dist(splines):
-    ''' Prepare list of spline subsegments from a spline list. 
-    
-    Output:
-    =======
-    ss_spline_d[spline #][mode][station]
-    
-    where:
-        mode=0: incremental distance
-        mode=1: cumulative distance
-        mode=2: cumulative distance with trailing distance
-    '''
-    ss_spline_d = []
-    #Prepare subsegment distances
-    for spline in range(len(splines)):
-        ss_spline_d[spline]=[]#.append([[],[],[]])
-        ss_spline_d[spline].append(zeros(len(splines[spline])-1))  #Incremental distance
-        ss_spline_d[spline].append(zeros(len(splines[spline])-1))  #Cumulative distance
-        ss_spline_d[spline].append(zeros(len(splines[spline])))  #Cumulative distance with trailing distance
-        for spline_p in range(len(splines[spline])):
-            if spline_p > (len(splines[spline]) - 2):
-                break
-            ss_spline_d[spline][0][spline_p] = utils.pointDistanceL2(splines[spline][spline_p][0],splines[spline][spline_p][1],splines[spline][(spline_p+1)][0],splines[spline][(spline_p+1)][1])
-            ss_spline_d[spline][1][spline_p] = sum(ss_spline_d[spline][0][0:spline_p])
-            ss_spline_d[spline][2][spline_p] = ss_spline_d[spline][1][spline_p]#sum(ss_spline_d[spline][0][0:spline_p])
-    
-    ss_spline_d[spline][2][-1] = ss_spline_d[spline][2][-2] + ss_spline_d[spline][0][-1]
-
-    return ss_spline_d
-
-def prepareSplines(splines):
-    'Approximates slope singularity by giving some slope roundoff; account for roundoff error'
-    for spline in splines:
-        p1 = spline[0]
-        for i in xrange(len(spline)-1):
-            p2 = spline[i+1]
-            if(round(p1.x, 10) == round(p2.x, 10)):
-                p2.x += 0.0000000001
-            if(round(p1.y, 10) == round(p2.y, 10)):
-                p2.y += 0.0000000001            
-            p1 = p2
-
-def ppldb2p(qx,qy, p0x,p0y, p1x,p1y):
-    ''' Point-projection (Q) on line defined by 2 points (P0,P1). 
-        http://cs.nyu.edu/~yap/classes/visual/03s/hw/h2/math.pdf
-        '''
-    if(p0x == p1x and p0y == p1y):
-        return None
-    try:
-        #Approximate slope singularity by giving some slope roundoff; account for roundoff error
-        # if(round(p0x, 10) == round(p1x, 10)):
-        #     p1x += 0.0000000001
-        # if(round(p0y, 10) == round(p1y, 10)):
-        #     p1y += 0.0000000001            
-        #make the calculation
-        Y = (-(qx)*(p0y-p1y)-(qy*(p0y-p1y)**2)/(p0x-p1x)+p0x**2*(p0y-p1y)/(p0x-p1x)-p0x*p1x*(p0y-p1y)/(p0x-p1x)-p0y*(p0x-p1x))/(p1x-p0x-(p0y-p1y)**2/(p0x-p1x))
-        X = (-Y*(p1y-p0y)+qx*(p1x-p0x)+qy*(p1y-p0y))/(p1x-p0x)
-    except ZeroDivisionError:
-        print('Error: Division by zero in ppldb2p. Please report this error with the full traceback:')
-        print('qx={0}, qy={1}, p0x={2}, p0y={3}, p1x={4}, p1y={5}...'.format(qx, qy, p0x, p0y, p1x, p1y))
-        import pdb; pdb.set_trace()  
-    return Point(X,Y)
-
-def getSYfromXY(p, splines, goodEnoughSplineDistance = 0.5):
-    ''' Snap a point p to it's nearest subsegment of it's nearest spline (from the list splines). 
-    A spline is a list of points (class Point), most likely a trajectory. 
-    
-    Output:
-    =======
-    [spline index, 
-    subsegment leading point index, 
-    snapped point, 
-    subsegment distance, 
-    spline distance,
-    orthogonal point offset]
-
-    or None
-    '''
-    minOffsetY = float('inf')
-    #For each spline
-    for splineIdx in range(len(splines)):
-        #For each spline point index
-        for spline_p in range(len(splines[splineIdx])-1):
-            #Get closest point on spline
-            closestPoint = ppldb2p(p.x,p.y,splines[splineIdx][spline_p][0],splines[splineIdx][spline_p][1],splines[splineIdx][spline_p+1][0],splines[splineIdx][spline_p+1][1])
-            if closestPoint is None:
-                print('Error: Spline {0}, segment {1} has identical bounds and therefore is not a vector. Projection cannot continue.'.format(splineIdx, spline_p))
-                return None
-            # check if the projected point is in between the current segment of the alignment bounds
-            if utils.inBetween(splines[splineIdx][spline_p][0], splines[splineIdx][spline_p+1][0], closestPoint.x) and utils.inBetween(splines[splineIdx][spline_p][1], splines[splineIdx][spline_p+1][1], closestPoint.y): 
-                offsetY = Point.distanceNorm2(closestPoint, p)
-                if offsetY < minOffsetY:
-                    minOffsetY = offsetY
-                    snappedSplineIdx = splineIdx
-                    snappedSplineLeadingPoint = spline_p
-                    snappedPoint = Point(closestPoint.x, closestPoint.y)
-                #Jump loop if significantly close
-                if offsetY < goodEnoughSplineDistance: 
-                    break
-
-    #Get sub-segment distance
-    if minOffsetY != float('inf'):
-        subsegmentDistance = Point.distanceNorm2(snappedPoint, splines[snappedSplineIdx][snappedSplineLeadingPoint])
-        #Get cumulative alignment distance (total segment distance)
-        splineDistanceS = splines[snappedSplineIdx].getCumulativeDistance(snappedSplineLeadingPoint) + subsegmentDistance
-        orthogonalSplineVector = (splines[snappedSplineIdx][snappedSplineLeadingPoint+1]-splines[snappedSplineIdx][snappedSplineLeadingPoint]).orthogonal()
-        offsetVector = p-snappedPoint
-        if Point.dot(orthogonalSplineVector, offsetVector) < 0:
-            minOffsetY = -minOffsetY
-        return [snappedSplineIdx, snappedSplineLeadingPoint, snappedPoint, subsegmentDistance, splineDistanceS, minOffsetY]
-    else:
-        print('Offset for point {} is infinite (check with prepareSplines if some spline segments are aligned with axes)'.format(p))
-        return None
-
-def getXYfromSY(s, y, splineNum, splines, mode = 0):
-    ''' Find X,Y coordinate from S,Y data. 
-    if mode = 0 : return Snapped X,Y
-    if mode !=0 : return Real X,Y
-    ''' 
-    
-    #(buckle in, it gets ugly from here on out)
-    ss_spline_d = subsec_spline_dist(splines)
-    
-    #Find subsegment
-    snapped_x = None
-    snapped_y = None
-    for spline_ss_index in range(len(ss_spline_d[splineNum][1])):
-        if(s < ss_spline_d[splineNum][1][spline_ss_index]):
-            ss_value = s - ss_spline_d[splineNum][1][spline_ss_index-1]
-            #Get normal vector and then snap
-            vector_l_x = (splines[splineNum][spline_ss_index][0] - splines[splineNum][spline_ss_index-1][0])
-            vector_l_y = (splines[splineNum][spline_ss_index][1] - splines[splineNum][spline_ss_index-1][1])
-            magnitude  = sqrt(vector_l_x**2 + vector_l_y**2)
-            n_vector_x = vector_l_x/magnitude
-            n_vector_y = vector_l_y/magnitude
-            snapped_x  = splines[splineNum][spline_ss_index-1][0] + ss_value*n_vector_x
-            snapped_y  = splines[splineNum][spline_ss_index-1][1] + ss_value*n_vector_y
-
-            #Real values (including orthogonal projection of y))
-            real_x = snapped_x - y*n_vector_y 
-            real_y = snapped_y + y*n_vector_x            
-            break
-    
-    if mode == 0 or (not snapped_x):
-        if(not snapped_x):
-            snapped_x = splines[splineNum][-1][0]
-            snapped_y = splines[splineNum][-1][1]                
-        return [snapped_x,snapped_y]
-    else:
-        return [real_x,real_y]
-
-
-class NormAngle(object):
-    '''Alternate encoding of a point, by its norm and orientation'''
-
-    def __init__(self, norm, angle):
-        self.norm = norm
-        self.angle = angle
-    
-    @staticmethod
-    def fromPoint(p):
-        norm = p.norm2()
-        if norm > 0:
-            angle = atan2(p.y, p.x)
-        else:
-            angle = 0.
-        return NormAngle(norm, angle)
-
-    def __add__(self, other):
-        'a norm cannot become negative'
-        return NormAngle(max(self.norm+other.norm, 0), self.angle+other.angle)
-
-    def getPoint(self):
-        return Point(self.norm*cos(self.angle), self.norm*sin(self.angle))
-
-
-def predictPositionNoLimit(nTimeSteps, initialPosition, initialVelocity, initialAcceleration = Point(0,0)):
-    '''Predicts the position in nTimeSteps at constant speed/acceleration'''
-    return initialVelocity + initialAcceleration.multiply(nTimeSteps),initialPosition+initialVelocity.multiply(nTimeSteps) + initialAcceleration.multiply(nTimeSteps**2*0.5)
-
-def predictPosition(position, speedOrientation, control, maxSpeed = None):
-    '''Predicts the position (moving.Point) at the next time step with given control input (deltaSpeed, deltaTheta)
-    speedOrientation is the other encoding of velocity, (speed, orientation)
-    speedOrientation and control are NormAngle'''
-    predictedSpeedTheta = speedOrientation+control
-    if maxSpeed:
-         predictedSpeedTheta.norm = min(predictedSpeedTheta.norm, maxSpeed)
-    predictedPosition = position+predictedSpeedTheta.getPoint()
-    return predictedPosition, predictedSpeedTheta
-
-
-class FlowVector(object):
-    '''Class to represent 4-D flow vectors,
-    ie a position and a velocity'''
-    def __init__(self, position, velocity):
-        'position and velocity should be Point instances'
-        self.position = position
-        self.velocity = velocity
-
-    def __add__(self, other):
-        return FlowVector(self.position+other.position, self.velocity+other.velocity)
-
-    def multiply(self, alpha):
-        return FlowVector(self.position.multiply(alpha), self.velocity.multiply(alpha))
-
-    def plot(self, options = '', **kwargs):
-        plot([self.position.x, self.position.x+self.velocity.x], [self.position.y, self.position.y+self.velocity.y], options, **kwargs)
-        self.position.plot(options+'x', **kwargs)
-    
-    @staticmethod
-    def similar(f1, f2, maxDistance2, maxDeltavelocity2):
-        return (f1.position-f2.position).norm2Squared()<maxDistance2 and (f1.velocity-f2.velocity).norm2Squared()<maxDeltavelocity2
-
-def intersection(p1, p2, p3, p4):
-    ''' Intersection point (x,y) of lines formed by the vectors p1-p2 and p3-p4
-        http://paulbourke.net/geometry/pointlineplane/'''
-    dp12 = p2-p1
-    dp34 = p4-p3
-    #det = (p4.y-p3.y)*(p2.x-p1.x)-(p4.x-p3.x)*(p2.y-p1.y)
-    det = float(dp34.y*dp12.x-dp34.x*dp12.y)
-    if det == 0.:
-        return None
-    else:
-        ua = (dp34.x*(p1.y-p3.y)-dp34.y*(p1.x-p3.x))/det
-        return p1+dp12.multiply(ua)
-
-# def intersection(p1, p2, dp1, dp2):
-#     '''Returns the intersection point between the two lines 
-#     defined by the respective vectors (dp) and origin points (p)'''
-#     from numpy import matrix
-#     from numpy.linalg import linalg
-#     A = matrix([[dp1.y, -dp1.x],
-#                 [dp2.y, -dp2.x]])
-#     B = matrix([[dp1.y*p1.x-dp1.x*p1.y],
-#                 [dp2.y*p2.x-dp2.x*p2.y]])
-    
-#     if linalg.det(A) == 0:
-#         return None
-#     else:
-#         intersection = linalg.solve(A,B)
-#         return Point(intersection[0,0], intersection[1,0])
-
-def segmentIntersection(p1, p2, p3, p4):
-    '''Returns the intersecting point of the segments [p1, p2] and [p3, p4], None otherwise'''
-
-    if (Interval.intersection(Interval(p1.x,p2.x,True), Interval(p3.x,p4.x,True)).empty()) or (Interval.intersection(Interval(p1.y,p2.y,True), Interval(p3.y,p4.y,True)).empty()):
-        return None
-    else:
-        inter = intersection(p1, p2, p3, p4)
-        if (inter is not None 
-            and utils.inBetween(p1.x, p2.x, inter.x)
-            and utils.inBetween(p3.x, p4.x, inter.x)
-            and utils.inBetween(p1.y, p2.y, inter.y)
-            and utils.inBetween(p3.y, p4.y, inter.y)):
-            return inter
-        else:
-            return None
-
-def segmentLineIntersection(p1, p2, p3, p4):
-    '''Indicates if the line going through p1 and p2 intersects inside p3, p4'''
-    inter = intersection(p1, p2, p3, p4)
-    if inter is not None and utils.inBetween(p3.x, p4.x, inter.x) and utils.inBetween(p3.y, p4.y, inter.y):
-        return inter
-    else:
-        return None
-        
-
-class Trajectory(object):
-    '''Class for trajectories: temporal sequence of positions
-
-    The class is iterable'''
-
-    def __init__(self, positions=None):
-        if positions is not None:
-            self.positions = positions
-        else:
-            self.positions = [[],[]]
-
-    @staticmethod
-    def generate(p, v, nPoints):
-        t = Trajectory()
-        p0 = Point(p.x, p.y)
-        t.addPosition(p0)
-        for i in xrange(nPoints-1):
-            p0 += v
-            t.addPosition(p0)
-        return t, Trajectory([[v.x]*nPoints, [v.y]*nPoints])
-
-    @staticmethod
-    def load(line1, line2):
-        return Trajectory([[float(n) for n in line1.split(' ')],
-                           [float(n) for n in line2.split(' ')]])
-
-    @staticmethod
-    def fromPointList(points):
-        t = Trajectory()
-        if isinstance(points[0], list) or isinstance(points[0], tuple):
-            for p in points:
-                t.addPositionXY(p[0],p[1])
-        else:
-            for p in points:
-                t.addPosition(p)
-        return t
-
-    def __len__(self):
-        return len(self.positions[0])
-
-    def length(self):
-        return self.__len__()
-
-    def empty(self):
-        return self.__len__() == 0
-
-    def __getitem__(self, i):
-        if isinstance(i, int):
-            return Point(self.positions[0][i], self.positions[1][i])
-        else:
-            raise TypeError, "Invalid argument type."
-            #elif isinstance( key, slice ):
-
-    def __str__(self):
-        return ' '.join([self.__getitem__(i).__str__() for i in xrange(self.length())])
-
-    def __repr__(self):
-        return self.__str__()
-
-    def __iter__(self):
-        self.iterInstantNum = 0
-        return self
-
-    def next(self):
-        if self.iterInstantNum >= self.length():
-            raise StopIteration
-        else:
-            self.iterInstantNum += 1
-            return self[self.iterInstantNum-1]
-
-    def __eq__(self, other):
-        if self.length() == other.length():
-            result = True
-            for p, po in zip(self, other):
-                result = result and (p == po)
-            return result
-        else:
-            return False
-
-    def setPositionXY(self, i, x, y):
-        if i < self.__len__():
-            self.positions[0][i] = x
-            self.positions[1][i] = y
-
-    def setPosition(self, i, p):
-        self.setPositionXY(i, p.x, p.y)
-
-    def addPositionXY(self, x, y):
-        self.positions[0].append(x)
-        self.positions[1].append(y)
-
-    def addPosition(self, p):
-        self.addPositionXY(p.x, p.y)
-
-    def duplicateLastPosition(self):
-        self.positions[0].append(self.positions[0][-1])
-        self.positions[1].append(self.positions[1][-1])
-
-    @staticmethod
-    def _plot(positions, options = '', withOrigin = False, lastCoordinate = None, timeStep = 1, **kwargs):
-        if lastCoordinate is None:
-            plot(positions[0][::timeStep], positions[1][::timeStep], options, **kwargs)
-        elif 0 <= lastCoordinate <= len(positions[0]):
-            plot(positions[0][:lastCoordinate:timeStep], positions[1][:lastCoordinate:timeStep], options, **kwargs)
-        if withOrigin:
-            plot([positions[0][0]], [positions[1][0]], 'ro', **kwargs)
-
-    def project(self, homography):
-        return Trajectory(cvutils.projectTrajectory(homography, self.positions).tolist())
-
-    def plot(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        Trajectory._plot(self.positions, options, withOrigin, None, timeStep, **kwargs)
-
-    def plotAt(self, lastCoordinate, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        Trajectory._plot(self.positions, options, withOrigin, lastCoordinate, timeStep, **kwargs)
-
-    def plotOnWorldImage(self, nPixelsPerUnitDistance, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        imgPositions = [[x*nPixelsPerUnitDistance for x in self.positions[0]],
-                        [x*nPixelsPerUnitDistance for x in self.positions[1]]]
-        Trajectory._plot(imgPositions, options, withOrigin, None, timeStep, **kwargs)
-
-    def getXCoordinates(self):
-        return self.positions[0]
-
-    def getYCoordinates(self):
-        return self.positions[1]
-
-    def asArray(self):
-        return array(self.positions)
-    
-    def xBounds(self):
-        # look for function that does min and max in one pass
-        return Interval(min(self.getXCoordinates()), max(self.getXCoordinates()))
-    
-    def yBounds(self):
-        # look for function that does min and max in one pass
-        return Interval(min(self.getYCoordinates()), max(self.getYCoordinates()))
-    
-    def add(self, traj2):
-        '''Returns a new trajectory of the same length'''
-        if self.length() != traj2.length():
-            print 'Trajectories of different lengths'
-            return None
-        else:
-            return Trajectory([[a+b for a,b in zip(self.getXCoordinates(),traj2.getXCoordinates())],
-                               [a+b for a,b in zip(self.getYCoordinates(),traj2.getYCoordinates())]])
-
-    def subtract(self, traj2):
-        '''Returns a new trajectory of the same length'''
-        if self.length() != traj2.length():
-            print 'Trajectories of different lengths'
-            return None
-        else:
-            return Trajectory([[a-b for a,b in zip(self.getXCoordinates(),traj2.getXCoordinates())],
-                               [a-b for a,b in zip(self.getYCoordinates(),traj2.getYCoordinates())]])
-
-    def multiply(self, alpha):
-        '''Returns a new trajectory of the same length'''
-        return Trajectory([[alpha*x for x in self.getXCoordinates()],
-                           [alpha*y for y in self.getYCoordinates()]])
-
-    def differentiate(self, doubleLastPosition = False):
-        diff = Trajectory()
-        for i in xrange(1, self.length()):
-            diff.addPosition(self[i]-self[i-1])
-        if doubleLastPosition:
-            diff.addPosition(diff[-1])
-        return diff
-
-    def differentiateSG(self, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0, removeBothEnds = 2):
-        '''Differentiates the trajectory using the Savitsky Golay filter
-
-        window_length : The length of the filter window (i.e. the number of coefficients). window_length must be a positive odd integer.
-        polyorder : The order of the polynomial used to fit the samples. polyorder must be less than window_length.
-        deriv : The order of the derivative to compute. This must be a nonnegative integer. The default is 0, which means to filter the data without differentiating.
-        delta : The spacing of the samples to which the filter will be applied. This is only used if deriv > 0. Default is 1.0.
-        axis : The axis of the array x along which the filter is to be applied. Default is -1.
-        mode : Must be mirror, constant, nearest, wrap or interp. This determines the type of extension to use for the padded signal to which the filter is applied. When mode is constant, the padding value is given by cval. See the Notes for more details on mirror, constant, wrap, and nearest. When the interp mode is selected (the default), no extension is used. Instead, a degree polyorder polynomial is fit to the last window_length values of the edges, and this polynomial is used to evaluate the last window_length // 2 output values.
-        cval : Value to fill past the edges of the input if mode is constant. Default is 0.0.'''
-        from scipy.signal import savgol_filter
-
-        if removeBothEnds >=1:
-            pos = [self.positions[0][removeBothEnds:-removeBothEnds],
-                   self.positions[1][removeBothEnds:-removeBothEnds]]
-        else:
-            pos = self.positions
-        filtered = savgol_filter(pos, window_length, polyorder, deriv, delta, axis, mode, cval)
-        return Trajectory(filtered)
-
-    def norm(self):
-        '''Returns the list of the norms at each instant'''
-#        def add(x, y): return x+y
-#        sq = map(add, [x*x for x in self.positions[0]], [y*y for y in self.positions[1]])
-#        return sqrt(sq)
-        return hypot(self.positions[0], self.positions[1])
-
-    # def cumulatedDisplacement(self):
-    #     'Returns the sum of the distances between each successive point'
-    #     displacement = 0
-    #     for i in xrange(self.length()-1):
-    #         displacement += Point.distanceNorm2(self.__getitem__(i),self.__getitem__(i+1))
-    #     return displacement
-
-    def computeCumulativeDistances(self):
-        '''Computes the distance from each point to the next and the cumulative distance up to the point
-        Can be accessed through getDistance(idx) and getCumulativeDistance(idx)'''
-        self.distances = []
-        self.cumulativeDistances = [0.]
-        p1 = self[0]
-        cumulativeDistance = 0.
-        for i in xrange(self.length()-1):
-            p2 = self[i+1]
-            self.distances.append(Point.distanceNorm2(p1,p2))
-            cumulativeDistance += self.distances[-1]
-            self.cumulativeDistances.append(cumulativeDistance)
-            p1 = p2
-
-    def getDistance(self,i):
-        '''Return the distance between points i and i+1'''
-        if i < self.length()-1:
-            return self.distances[i]
-        else:
-            print('Index {} beyond trajectory length {}-1'.format(i, self.length()))
-
-    def getCumulativeDistance(self, i):
-        '''Return the cumulative distance between the beginning and point i'''
-        if i < self.length():
-            return self.cumulativeDistances[i]
-        else:
-            print('Index {} beyond trajectory length {}'.format(i, self.length()))
-
-    def getMaxDistance(self, metric):
-        'Returns the maximum distance between points in the trajectory' 
-        positions = self.getPositions().asArray().T
-        return cdist(positions, positions, metric = metric).max()
-
-    def similarOrientation(self, refDirection, cosineThreshold, minProportion = 0.5):
-        '''Indicates whether the minProportion (<=1.) (eg half) of the trajectory elements (vectors for velocity) 
-        have a cosine with refDirection is smaller than cosineThreshold'''
-        count = 0
-        lengthThreshold = float(self.length())*minProportion
-        for p in self:
-            if p.similarOrientation(refDirection, cosineThreshold):
-                count += 1
-        return count >= lengthThreshold
-
-    def wiggliness(self):
-        straightDistance = Point.distanceNorm2(self.__getitem__(0),self.__getitem__(self.length()-1))
-        if straightDistance > 0:
-            return self.getCumulativeDistance(self.length()-1)/float(straightDistance)
-        else:
-            return None
-
-    def getIntersections(self, p1, p2):
-        '''Returns a list of the indices at which the trajectory 
-        intersects with the segment of extremities p1 and p2 
-        Returns an empty list if there is no crossing'''
-        indices = []
-        intersections = []
-
-        for i in xrange(self.length()-1):
-            q1=self.__getitem__(i)
-            q2=self.__getitem__(i+1)
-            p = segmentIntersection(q1, q2, p1, p2)
-            if p is not None:
-                if q1.x != q2.x:
-                    ratio = (p.x-q1.x)/(q2.x-q1.x)
-                elif q1.y != q2.y:
-                    ratio = (p.y-q1.y)/(q2.y-q1.y)
-                else:
-                    ratio = 0
-                indices.append(i+ratio)
-                intersections.append(p)
-        return indices, intersections
-
-    def getLineIntersections(self, p1, p2):
-        '''Returns a list of the indices at which the trajectory 
-        intersects with the line going through p1 and p2 
-        Returns an empty list if there is no crossing'''
-        indices = []
-        intersections = []
-        
-        for i in xrange(self.length()-1):
-            q1=self.__getitem__(i)
-            q2=self.__getitem__(i+1)
-            p = segmentLineIntersection(p1, p2, q1, q2)
-            if p is not None:
-                if q1.x != q2.x:
-                    ratio = (p.x-q1.x)/(q2.x-q1.x)
-                elif q1.y != q2.y:
-                    ratio = (p.y-q1.y)/(q2.y-q1.y)
-                else:
-                    ratio = 0
-                indices.append(i+ratio)
-                intersections.append(p)
-        return indices, intersections
-
-    def getTrajectoryInInterval(self, inter):
-        'Returns all position between index inter.first and index.last (included)'
-        if inter.first >=0 and inter.last<= self.length():
-            return Trajectory([self.positions[0][inter.first:inter.last+1],
-                               self.positions[1][inter.first:inter.last+1]])
-        else:
-            return None
-
-    def subSample(self, step):
-        'Returns the positions very step'
-        return Trajectory([self.positions[0][::step],
-                           self.positions[1][::step]])
-
-    if shapelyAvailable:
-        def getTrajectoryInPolygon(self, polygon, t2 = None):
-            '''Returns the trajectory built with the set of points inside the (shapely) polygon
-            The polygon could be a prepared polygon (faster) from prepared.prep
-
-            t2 is another trajectory (could be velocities) 
-            which is filtered based on the first (self) trajectory'''
-            traj = Trajectory()
-            inPolygon = []
-            for x, y in zip(self.positions[0], self.positions[1]):
-                inPolygon.append(polygon.contains(shapelyPoint(x, y)))
-                if inPolygon[-1]:
-                    traj.addPositionXY(x, y)
-            traj2 = Trajectory()
-            if t2 is not None:
-                for inp, x, y in zip(inPolygon, t2.positions[0], t2.positions[1]):
-                    if inp:
-                        traj2.addPositionXY(x, y)
-            return traj, traj2
-
-        def proportionInPolygon(self, polygon, minProportion = 0.5):
-            inPolygon = [polygon.contains(shapelyPoint(x, y)) for x, y in zip(self.positions[0], self.positions[1])]
-            lengthThreshold = float(self.length())*minProportion
-            return sum(inPolygon) >= lengthThreshold
-    else:
-        def getTrajectoryInPolygon(self, polygon, t2 = None):
-            '''Returns the trajectory built with the set of points inside the polygon
-            (array of Nx2 coordinates of the polygon vertices)'''
-            traj = Trajectory()
-            inPolygon = []
-            for p in self:
-                inPolygon.append(p.inPolygon(polygon))
-                if inPolygon[-1]:
-                    traj.addPosition(p)
-            traj2 = Trajectory()
-            if t2 is not None:
-                for inp, x, y in zip(inPolygon, t2.positions[0], t2.positions[1]):
-                    if inp:
-                        traj2.addPositionXY(p.x, p.y)
-            return traj, traj2
-
-        def proportionInPolygon(self, polygon, minProportion = 0.5):
-            inPolygon = [p.inPolygon(polygon) for p in self]
-            lengthThreshold = float(self.length())*minProportion
-            return sum(inPolygon) >= lengthThreshold
-
-    @staticmethod
-    def lcss(t1, t2, lcss):
-        return lcss.compute(t1, t2)
-
-class CurvilinearTrajectory(Trajectory):
-    '''Sub class of trajectory for trajectories with curvilinear coordinates and lane assignements
-    longitudinal coordinate is stored as first coordinate (exterior name S)
-    lateral coordiante is stored as second coordinate'''
-
-    def __init__(self, S = None, Y = None, lanes = None):
-        if S is None or Y is None or len(S) != len(Y):
-            self.positions = [[],[]]
-            if S is not None and Y is not None and len(S) != len(Y):
-                print("S and Y coordinates of different lengths\nInitializing to empty lists")
-        else:
-            self.positions = [S,Y]
-        if lanes is None or len(lanes) != self.length():
-            self.lanes = []
-        else:
-            self.lanes = lanes
-        
-    def __getitem__(self,i): 
-        if isinstance(i, int):
-            return [self.positions[0][i], self.positions[1][i], self.lanes[i]]
-        else:
-            raise TypeError, "Invalid argument type."
-            #elif isinstance( key, slice ):
-
-    def getSCoordinates(self):
-        return self.getXCoordinates()
-    
-    def getLanes(self):
-        return self.lanes
-
-    def addPositionSYL(self, s, y, lane):
-        self.addPositionXY(s,y)
-        self.lanes.append(lane)
-
-    def addPosition(self, p):
-        'Adds position in the point format for curvilinear of list with 3 values'
-        self.addPositionSYL(p[0], p[1], p[2])
-
-    def setPosition(self, i, s, y, lane):
-        self.setPositionXY(i, s, y)
-        if i < self.__len__():
-            self.lanes[i] = lane
-
-    def differentiate(self, doubleLastPosition = False):
-        diff = CurvilinearTrajectory()
-        p1 = self[0]
-        for i in xrange(1, self.length()):
-            p2 = self[i]
-            diff.addPositionSYL(p2[0]-p1[0], p2[1]-p1[1], p1[2])
-            p1=p2
-        if doubleLastPosition and self.length() > 1:
-            diff.addPosition(diff[-1])
-        return diff
-
-    def getIntersections(self, S1, lane = None):
-        '''Returns a list of the indices at which the trajectory 
-        goes past the curvilinear coordinate S1
-        (in provided lane if lane is not None)
-        Returns an empty list if there is no crossing'''
-        indices = []
-        for i in xrange(self.length()-1):
-            q1=self.__getitem__(i)
-            q2=self.__getitem__(i+1)
-            if q1[0] <= S1 < q2[0] and (lane is None or (self.lanes[i] == lane and self.lanes[i+1] == lane)):
-                indices.append(i+(S1-q1[0])/(q2[0]-q1[0]))
-        return indices
-
-##################
-# Moving Objects
-##################
-
-userTypeNames = ['unknown',
-                 'car',
-                 'pedestrian',
-                 'motorcycle',
-                 'bicycle',
-                 'bus',
-                 'truck']
-
-userType2Num = utils.inverseEnumeration(userTypeNames)
-
-class MovingObject(STObject, VideoFilenameAddable):
-    '''Class for moving objects: a spatio-temporal object 
-    with a trajectory and a geometry (constant volume over time) 
-    and a usertype (e.g. road user) coded as a number (see userTypeNames)
-    '''
-
-    def __init__(self, num = None, timeInterval = None, positions = None, velocities = None, geometry = None, userType = userType2Num['unknown']):
-        super(MovingObject, self).__init__(num, timeInterval)
-        self.positions = positions
-        self.velocities = velocities
-        self.geometry = geometry
-        self.userType = userType
-        self.features = None
-        # compute bounding polygon from trajectory
-
-    @staticmethod
-    def generate(p, v, timeInterval):
-        positions, velocities = Trajectory.generate(p, v, int(timeInterval.length())) 
-        return MovingObject(timeInterval = timeInterval, positions = positions, velocities = velocities)
-
-    @staticmethod
-    def concatenate(obj1, obj2, num = None):
-        '''Concatenates two objects supposed to overlap temporally '''
-        commonTimeInterval = obj1.commonTimeInterval(obj2)
-        if commonTimeInterval.empty():
-            print('The two objects\' time intervals do not overlap: obj1 {} and obj2 {}'.format(obj1.getTimeInterval(), obj2.getTimeInterval()))
-            return None
-        else:
-            if num is None:
-                newNum = obj1.getNum()
-            else:
-                newNum = num
-            newTimeInterval = TimeInterval.union(obj1.getTimeInterval(), obj2.getTimeInterval())
-            # positions
-            positions = Trajectory()
-            for t in newTimeInterval:
-                nTotal = 0.
-                p = Point(0.,0.)
-                for obj in [obj1, obj2]:
-                    if obj.existsAtInstant(t):
-                        if obj.hasFeatures():
-                            n = len([f for f in obj.getFeatures() if f.existsAtInstant(t)])
-                        else:
-                            n = 1.
-                        p += obj.getPositionAtInstant(t).multiply(n)
-                        nTotal += n
-                assert nTotal>0, 'there should be at least one point for each instant'
-                positions.addPosition(p.divide(nTotal))
-            # velocities: if any
-            if hasattr(obj1, 'velocities') and hasattr(obj2, 'velocities'):
-                velocities = Trajectory()
-                for t in newTimeInterval:
-                    nTotal = 0.
-                    p = Point(0.,0.)
-                    for obj in [obj1, obj2]:
-                        if obj.existsAtInstant(t):
-                            if obj.hasFeatures():
-                                n = len([f for f in obj.getFeatures() if f.existsAtInstant(t)])
-                            else:
-                                n = 1.
-                            p += obj.getVelocityAtInstant(t).multiply(n)
-                            nTotal += n
-                    assert n>0, 'there should be at least one point for each instant'
-                    velocities.addPosition(p.divide(nTotal))
-            else:
-                velocities = None
-            # TODO object envelop (polygon)
-            # user type
-            if obj1.getUserType() != obj2.getUserType():
-                print('The two moving objects have different user types: obj1 {} obj2 {}'.format(userTypeNames[obj1.getUserType()], userTypeNames[obj2.getUserType()]))
-
-            newObject = MovingObject(newNum, newTimeInterval, positions, velocities, userType = obj1.getUserType())
-            if obj1.hasFeatures() and obj2.hasFeatures():
-                newObject.features = obj1.getFeatures()+obj2.getFeatures()
-            return newObject
-
-    def getObjectInTimeInterval(self, inter):
-        '''Returns a new object extracted from self,
-        restricted to time interval inter'''
-        intersection = TimeInterval.intersection(inter, self.getTimeInterval())
-        if not intersection.empty():
-            trajectoryInterval = TimeInterval(intersection.first-self.getFirstInstant(), intersection.last-self.getFirstInstant())
-            obj = MovingObject(self.num, intersection, self.positions.getTrajectoryInInterval(trajectoryInterval), self.geometry, self.userType)
-            if self.velocities:
-                obj.velocities = self.velocities.getTrajectoryInInterval(trajectoryInterval)
-            return obj
-        else:
-            print 'The object does not exist at '+str(inter)
-            return None
-
-    def getObjectsInMask(self, mask, homography = None, minLength = 1):
-        '''Returns new objects made of the positions in the mask
-        mask is in the destination of the homography space'''
-        if homography is not None:
-            self.projectedPositions = self.positions.project(homography)
-        else:
-            self.projectedPositions = self.positions
-        def inMask(positions, i, mask):
-            p = positions[i]
-            return mask[p.y, p.x] != 0.
-
-        #subTimeIntervals self.getFirstInstant()+i
-        filteredIndices = [inMask(self.projectedPositions, i, mask) for i in range(int(self.length()))]
-        # 'connected components' in subTimeIntervals
-        l = 0
-        intervalLabels = []
-        prev = True
-        for i in filteredIndices:
-            if i:
-                if not prev: # new interval
-                    l += 1
-                intervalLabels.append(l)
-            else:
-                intervalLabels.append(-1)
-            prev = i
-        intervalLabels = array(intervalLabels)
-        subObjects = []
-        for l in set(intervalLabels):
-            if l >= 0:
-                if sum(intervalLabels == l) >= minLength:
-                    times = [self.getFirstInstant()+i for i in range(len(intervalLabels)) if intervalLabels[i] == l]
-                    subTimeInterval = TimeInterval(min(times), max(times))
-                    subObjects.append(self.getObjectInTimeInterval(subTimeInterval))
-
-        return subObjects
-
-    def getPositions(self):
-        return self.positions
-
-    def getVelocities(self):
-        return self.velocities
-
-    def getUserType(self):
-        return self.userType
-
-    def getCurvilinearPositions(self):
-        if hasattr(self, 'curvilinearPositions'):
-            return self.curvilinearPositions
-        else:
-            return None
-
-    def plotCurvilinearPositions(self, lane = None, options = '', withOrigin = False, **kwargs):
-        if hasattr(self, 'curvilinearPositions'):
-            if lane is None:
-                plot(list(self.getTimeInterval()), self.curvilinearPositions.positions[0], options, **kwargs)
-                if withOrigin:
-                    plot([self.getFirstInstant()], [self.curvilinearPositions.positions[0][0]], 'ro', **kwargs)
-            else:
-                instants = []
-                coords = []
-                for t, p in zip(self.getTimeInterval(), self.curvilinearPositions):
-                    if p[2] == lane:
-                        instants.append(t)
-                        coords.append(p[0])
-                    else:
-                        instants.append(NaN)
-                        coords.append(NaN)
-                plot(instants, coords, options, **kwargs)
-                if withOrigin and len(instants)>0:
-                    plot([instants[0]], [coords[0]], 'ro', **kwargs)
-        else:
-            print('Object {} has no curvilinear positions'.format(self.getNum()))        
-
-    def setUserType(self, userType):
-        self.userType = userType
-
-    def setFeatures(self, features):
-        self.features = [features[i] for i in self.featureNumbers]
-
-    def getFeatures(self):
-        return self.features
-
-    def hasFeatures(self):
-        return (self.features is not None)
-
-    def getFeature(self, i):
-        if self.hasFeatures() and i<len(self.features):
-            return self.features[i]
-        else:
-            return None
-
-    def getFeatureNumbers(self):
-        '''Returns the number of features at each instant
-        dict instant -> number of features'''
-        if self.hasFeatures():
-            featureNumbers = {}
-            for t in self.getTimeInterval():
-                n = 0
-                for f in self.getFeatures():
-                    if f.existsAtInstant(t):
-                        n += 1
-                featureNumbers[t]=n
-            return featureNumbers
-        else:
-            print('Object {} has no features loaded.'.format(self.getNum()))
-            return None
-
-    def getSpeeds(self, nInstantsIgnoredAtEnds = 0):
-        speeds = self.getVelocities().norm()
-        if nInstantsIgnoredAtEnds > 0:
-            n = min(nInstantsIgnoredAtEnds, int(floor(self.length()/2.)))
-            return speeds[n:-n]
-        else:
-            return speeds
-
-    def getSpeedIndicator(self):
-        from indicators import SeverityIndicator
-        return SeverityIndicator('Speed', {t:self.getVelocityAtInstant(t).norm2() for t in self.getTimeInterval()})
-
-    def getPositionAt(self, i):
-        return self.positions[i]
-
-    def getVelocityAt(self, i):
-        return self.velocities[i]
-
-    def getPositionAtInstant(self, i):
-        return self.positions[i-self.getFirstInstant()]
-
-    def getVelocityAtInstant(self, i):
-        return self.velocities[i-self.getFirstInstant()]
-
-    def getXCoordinates(self):
-        return self.positions.getXCoordinates()
-    
-    def getYCoordinates(self):
-        return self.positions.getYCoordinates()
-    
-    def plot(self, options = '', withOrigin = False, timeStep = 1, withFeatures = False, **kwargs):
-        if withFeatures and self.hasFeatures():
-            for f in self.getFeatures():
-                f.positions.plot('r', True, timeStep, **kwargs)
-            self.positions.plot('bx-', True, timeStep, **kwargs)
-        else:
-            self.positions.plot(options, withOrigin, timeStep, **kwargs)
-
-    def plotOnWorldImage(self, nPixelsPerUnitDistance, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        self.positions.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, **kwargs)
-
-    def play(self, videoFilename, homography = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.):
-        cvutils.displayTrajectories(videoFilename, [self], homography = homography, firstFrameNum = self.getFirstInstant(), lastFrameNumArg = self.getLastInstant(), undistort = undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
-
-    def speedDiagnostics(self, framerate = 1., display = False):
-        speeds = framerate*self.getSpeeds()
-        coef = utils.linearRegression(range(len(speeds)), speeds)
-        print('min/5th perc speed: {} / {}\nspeed diff: {}\nspeed stdev: {}\nregression: {}'.format(min(speeds), scoreatpercentile(speeds, 5), speeds[-2]-speeds[1], std(speeds), coef[0]))
-        if display:
-            from matplotlib.pyplot import figure, axis
-            figure(1)
-            self.plot()
-            axis('equal')
-            figure(2)
-            plot(list(self.getTimeInterval()), speeds)
-
-    @staticmethod
-    def minMaxDistance(obj1, obj2):
-        '''Computes the min max distance used for feature grouping'''
-        commonTimeInterval = obj1.commonTimeInterval(obj2)
-        if not commonTimeInterval.empty():
-            minDistance = (obj1.getPositionAtInstant(commonTimeInterval.first)-obj2.getPositionAtInstant(commonTimeInterval.first)).norm2()
-            maxDistance = minDistance
-            for t in list(commonTimeInterval)[1:]:
-                d = (obj1.getPositionAtInstant(t)-obj2.getPositionAtInstant(t)).norm2()
-                if d<minDistance:
-                    minDistance = d
-                elif d>maxDistance:
-                    maxDistance = d
-            return int(commonTimeInterval.length()), minDistance, maxDistance
-        else:
-            return int(commonTimeInterval.length()), None, None
-
-    @staticmethod
-    def distances(obj1, obj2, instant1, _instant2 = None):
-        '''Returns the distances between all features of the 2 objects 
-        at the same instant instant1
-        or at instant1 and instant2'''
-        if _instant2 is None:
-            instant2 = instant1
-        else:
-            instant2 = _instant2
-        positions1 = [f.getPositionAtInstant(instant1).astuple() for f in obj1.features if f.existsAtInstant(instant1)]
-        positions2 = [f.getPositionAtInstant(instant2).astuple() for f in obj2.features if f.existsAtInstant(instant2)]
-        return cdist(positions1, positions2, metric = 'euclidean')
-        
-    @staticmethod
-    def minDistance(obj1, obj2, instant1, instant2 = None):
-        return MovingObject.distances(obj1, obj2, instant1, instant2).min()
-
-    @staticmethod
-    def maxDistance(obj1, obj2, instant, instant2 = None):
-        return MovingObject.distances(obj1, obj2, instant1, instant2).max()
-
-    def maxSize(self):
-        '''Returns the max distance between features
-        at instant there are the most features'''
-        if hasattr(self, 'features'):
-            nFeatures = -1
-            tMaxFeatures = 0
-            for t in self.getTimeInterval():
-                n = len([f for f in self.features if f.existsAtInstant(t)])
-                if n > nFeatures:
-                    nFeatures = n
-                    tMaxFeatures = t
-            return MovingObject.maxDistance(self, self, tMaxFeatures)
-        else:
-            print('Load features to compute a maximum size')
-            return None
-    
-    def setRoutes(self, startRouteID, endRouteID):
-        self.startRouteID = startRouteID
-        self.endRouteID = endRouteID
-           
-    def getInstantsCrossingLane(self, p1, p2):
-        '''Returns the instant(s)
-        at which the object passes from one side of the segment to the other
-        empty list if there is no crossing'''
-        indices, intersections = self.positions.getIntersections(p1, p2)
-        return [t+self.getFirstInstant() for t in indices]
-
-    @staticmethod
-    def computePET(obj1, obj2, collisionDistanceThreshold):
-        '''Post-encroachment time based on distance threshold
-
-        Returns the smallest time difference when the object positions are within collisionDistanceThreshold'''
-        #for i in xrange(int(obj1.length())-1):
-        #    for j in xrange(int(obj2.length())-1):
-        #        inter = segmentIntersection(obj1.getPositionAt(i), obj1.getPositionAt(i+1), obj2.getPositionAt(i), obj2.getPositionAt(i+1))
-        positions1 = [p.astuple() for p in obj1.getPositions()]
-        positions2 = [p.astuple() for p in obj2.getPositions()]
-        pets = zeros((int(obj1.length()), int(obj2.length())))
-        for i,t1 in enumerate(obj1.getTimeInterval()):
-            for j,t2 in enumerate(obj2.getTimeInterval()):
-                pets[i,j] = abs(t1-t2)
-        distances = cdist(positions1, positions2, metric = 'euclidean')
-        if distances.min() <= collisionDistanceThreshold:
-            return pets[distances <= collisionDistanceThreshold].min()
-        else:
-            return None
-
-    def predictPosition(self, instant, nTimeSteps, externalAcceleration = Point(0,0)):
-        '''Predicts the position of object at instant+deltaT, 
-        at constant speed'''
-        return predictPositionNoLimit(nTimeSteps, self.getPositionAtInstant(instant), self.getVelocityAtInstant(instant), externalAcceleration)
-
-    def projectCurvilinear(self, alignments, ln_mv_av_win=3):
-        ''' Add, for every object position, the class 'moving.CurvilinearTrajectory()'
-            (curvilinearPositions instance) which holds information about the
-            curvilinear coordinates using alignment metadata.
-            From Paul St-Aubin's PVA tools
-            ======
-
-            Input:
-            ======
-            alignments   = a list of alignments, where each alignment is a list of
-                           points (class Point).
-            ln_mv_av_win = moving average window (in points) in which to smooth
-                           lane changes. As per tools_math.cat_mvgavg(), this term
-                           is a search *radius* around the center of the window.
-
-            '''
-
-        self.curvilinearPositions = CurvilinearTrajectory()
-
-        #For each point
-        for i in xrange(int(self.length())):
-            result = getSYfromXY(self.getPositionAt(i), alignments)
-
-            # Error handling
-            if(result is None):
-                print('Warning: trajectory {} at point {} {} has alignment errors (spline snapping)\nCurvilinear trajectory could not be computed'.format(self.getNum(), i, self.getPositionAt(i)))
-            else:
-                [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result
-                self.curvilinearPositions.addPositionSYL(S, Y, align)
-
-        ## Go back through points and correct lane  
-        #Run through objects looking for outlier point
-        smoothed_lanes = utils.cat_mvgavg(self.curvilinearPositions.getLanes(),ln_mv_av_win)
-        ## Recalculate projected point to new lane
-        lanes = self.curvilinearPositions.getLanes()
-        if(lanes != smoothed_lanes):
-            for i in xrange(len(lanes)):
-                if(lanes[i] != smoothed_lanes[i]):
-                    result = getSYfromXY(self.getPositionAt(i),[alignments[smoothed_lanes[i]]])
-
-                    # Error handling
-                    if(result is None):
-                        ## This can be triggered by tracking errors when the trajectory jumps around passed another alignment.
-                        print('    Warning: trajectory {} at point {} {} has alignment errors during trajectory smoothing and will not be corrected.'.format(self.getNum(), i, self.getPositionAt(i)))
-                    else:
-                        [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result
-                        self.curvilinearPositions.setPosition(i, S, Y, align)
-
-    def computeSmoothTrajectory(self, minCommonIntervalLength):
-        '''Computes the trajectory as the mean of all features
-        if a feature exists, its position is 
-        
-        Warning work in progress
-        TODO? not use the first/last 1-.. positions'''
-        nFeatures = len(self.features)
-        if nFeatures == 0:
-            print('Empty object features\nCannot compute smooth trajectory')
-        else:
-            # compute the relative position vectors
-            relativePositions = {} # relativePositions[(i,j)] is the position of j relative to i
-            for i in xrange(nFeatures):
-                for j in xrange(i):
-                    fi = self.features[i]
-                    fj = self.features[j]
-                    inter = fi.commonTimeInterval(fj)
-                    if inter.length() >= minCommonIntervalLength:
-                        xi = array(fi.getXCoordinates()[inter.first-fi.getFirstInstant():int(fi.length())-(fi.getLastInstant()-inter.last)])
-                        yi = array(fi.getYCoordinates()[inter.first-fi.getFirstInstant():int(fi.length())-(fi.getLastInstant()-inter.last)])
-                        xj = array(fj.getXCoordinates()[inter.first-fj.getFirstInstant():int(fj.length())-(fj.getLastInstant()-inter.last)])
-                        yj = array(fj.getYCoordinates()[inter.first-fj.getFirstInstant():int(fj.length())-(fj.getLastInstant()-inter.last)])
-                        relativePositions[(i,j)] = Point(median(xj-xi), median(yj-yi))
-                        relativePositions[(j,i)] = -relativePositions[(i,j)]
-
-    ###
-    # User Type Classification
-    ###
-    def classifyUserTypeSpeedMotorized(self, threshold, aggregationFunc = median, nInstantsIgnoredAtEnds = 0):
-        '''Classifies slow and fast road users
-        slow: non-motorized -> pedestrians
-        fast: motorized -> cars
-        
-        aggregationFunc can be any function that can be applied to a vector of speeds, including percentile:
-        aggregationFunc = lambda x: percentile(x, percentileFactor) # where percentileFactor is 85 for 85th percentile'''
-        speeds = self.getSpeeds(nInstantsIgnoredAtEnds)
-        if aggregationFunc(speeds) >= threshold:
-            self.setUserType(userType2Num['car'])
-        else:
-            self.setUserType(userType2Num['pedestrian'])
-
-    def classifyUserTypeSpeed(self, speedProbabilities, aggregationFunc = median, nInstantsIgnoredAtEnds = 0):
-        '''Classifies road user per road user type
-        speedProbabilities are functions return P(speed|class)
-        in a dictionary indexed by user type names
-        Returns probabilities for each class
-
-        for simple threshold classification, simply pass non-overlapping indicator functions (membership)
-        e.g. def indic(x):
-        if abs(x-mu) < sigma:
-        return 1
-        else:
-        return x'''
-        if not hasattr(self, 'aggregatedSpeed'):
-            self.aggregatedSpeed = aggregationFunc(self.getSpeeds(nInstantsIgnoredAtEnds))
-        userTypeProbabilities = {}
-        for userTypename in speedProbabilities:
-            userTypeProbabilities[userType2Num[userTypename]] = speedProbabilities[userTypename](self.aggregatedSpeed)
-        self.setUserType(utils.argmaxDict(userTypeProbabilities))
-        return userTypeProbabilities
-
-    def initClassifyUserTypeHoGSVM(self, aggregationFunc, pedBikeCarSVM, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), nInstantsIgnoredAtEnds = 0):
-        '''Initializes the data structures for classification
-
-        TODO? compute speed for longest feature?'''
-        self.aggregatedSpeed = aggregationFunc(self.getSpeeds(nInstantsIgnoredAtEnds))
-        if self.aggregatedSpeed < pedBikeSpeedTreshold or bikeCarSVM is None:
-            self.appearanceClassifier = pedBikeCarSVM
-        elif self.aggregatedSpeed < bikeCarSpeedThreshold:
-            self.appearanceClassifier = bikeCarSVM
-        else:
-            class CarClassifier:
-                def predict(self, hog):
-                    return userType2Num['car']
-            self.appearanceClassifier = CarClassifier()
-        
-        self.userTypes = {}
-
-    def classifyUserTypeHoGSVMAtInstant(self, img, instant, homography, width, height, px = 0.2, py = 0.2, minNPixels = 800):
-        '''Extract the image box around the object and 
-        applies the SVM model on it'''
-        croppedImg, yCropMin, yCropMax, xCropMin, xCropMax = cvutils.imageBox(img, self, instant, homography, width, height, px, py, minNPixels)
-        if croppedImg is not None and len(croppedImg) > 0:
-            hog = cvutils.HOG(croppedImg)#HOG(image, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False)
-            self.userTypes[instant] = int(self.appearanceClassifier.predict(hog))
-        else:
-            self.userTypes[instant] = userType2Num['unknown']
-
-    def classifyUserTypeHoGSVM(self, pedBikeCarSVM = None, width = 0, height = 0, homography = None, images = None, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), minSpeedEquiprobable = -1, speedProbabilities = None, aggregationFunc = median, nInstantsIgnoredAtEnds = 0, px = 0.2, py = 0.2, minNPixels = 800):
-        '''Agregates SVM detections in each image and returns probability
-        (proportion of instants with classification in each category)
-
-        images is a dictionary of images indexed by instant
-        With default parameters, the general (ped-bike-car) classifier will be used
-        
-        Considered categories are the keys of speedProbabilities'''
-        if not hasattr(self, 'aggregatedSpeed') or not hasattr(self, 'userTypes'):
-            print('Initilize the data structures for classification by HoG-SVM')
-            self.initClassifyUserTypeHoGSVM(aggregationFunc, pedBikeCarSVM, bikeCarSVM, pedBikeSpeedTreshold, bikeCarSpeedThreshold, nInstantsIgnoredAtEnds)
-
-        if len(self.userTypes) != self.length() and images is not None: # if classification has not been done previously
-            for t in self.getTimeInterval():
-                if t not in self.userTypes:
-                    self.classifyUserTypeHoGSVMAtInstant(images[t], t, homography, width, height, px, py, minNPixels)
-        # compute P(Speed|Class)
-        if speedProbabilities is None or self.aggregatedSpeed < minSpeedEquiprobable: # equiprobable information from speed
-            userTypeProbabilities = {userType2Num['car']: 1., userType2Num['pedestrian']: 1., userType2Num['bicycle']: 1.}
-        else:
-            userTypeProbabilities = {userType2Num[userTypename]: speedProbabilities[userTypename](self.aggregatedSpeed) for userTypename in speedProbabilities}
-        # result is P(Class|Appearance) x P(Speed|Class)
-        nInstantsUserType = {userTypeNum: 0 for userTypeNum in userTypeProbabilities}# number of instants the object is classified as userTypename
-        for t in self.userTypes:
-            nInstantsUserType[self.userTypes[t]] = nInstantsUserType.get(self.userTypes[t], 0) + 1
-        for userTypeNum in userTypeProbabilities:
-            userTypeProbabilities[userTypeNum] *= nInstantsUserType[userTypeNum]
-        # class is the user type that maximizes usertype probabilities
-        self.setUserType(utils.argmaxDict(userTypeProbabilities))
-
-    def classifyUserTypeArea(self, areas, homography):
-        '''Classifies the object based on its location (projected to image space)
-        areas is a dictionary of matrix of the size of the image space 
-        for different road users possible locations, indexed by road user type names
-
-        TODO: areas could be a wrapper object with a contains method that would work for polygons and images (with wrapper class)
-        skip frames at beginning/end?'''
-        print('not implemented/tested yet')
-        if not hasattr(self, projectedPositions):
-            if homography is not None:
-                self.projectedPositions = obj.positions.project(homography)
-            else:
-                self.projectedPositions = obj.positions
-        possibleUserTypes = {userType: 0 for userType in range(len(userTypenames))}
-        for p in self.projectedPositions:
-            for userTypename in areas:
-                if areas[userTypename][p.x, p.y] != 0:
-                    possibleUserTypes[userType2Enum[userTypename]] += 1
-        # what to do: threshold for most common type? self.setUserType()
-        return possibleUserTypes
-
-    @staticmethod
-    def collisionCourseDotProduct(movingObject1, movingObject2, instant):
-        'A positive result indicates that the road users are getting closer'
-        deltap = movingObject1.getPositionAtInstant(instant)-movingObject2.getPositionAtInstant(instant)
-        deltav = movingObject2.getVelocityAtInstant(instant)-movingObject1.getVelocityAtInstant(instant)
-        return Point.dot(deltap, deltav)
-
-    @staticmethod
-    def collisionCourseCosine(movingObject1, movingObject2, instant):
-        'A positive result indicates that the road users are getting closer'
-        return Point.cosine(movingObject1.getPositionAtInstant(instant)-movingObject2.getPositionAtInstant(instant), #deltap
-                            movingObject2.getVelocityAtInstant(instant)-movingObject1.getVelocityAtInstant(instant)) #deltav
-
-
-##################
-# Annotations
-##################
-
-class BBMovingObject(MovingObject):
-    '''Class for a moving object represented as a bounding box
-    used for series of ground truth annotations using bounding boxes
-     and for the output of Urban Tracker http://www.jpjodoin.com/urbantracker/
-
-    By default in image space
-
-    Its center is the center of the box (generalize to other shapes?) 
-    (computed after projecting if homography available)
-    '''
-
-    def __init__(self, num = None, timeInterval = None, topLeftPositions = None, bottomRightPositions = None, userType = userType2Num['unknown']):
-        super(BBMovingObject, self).__init__(num, timeInterval, userType = userType)
-        self.topLeftPositions = topLeftPositions.getPositions()
-        self.bottomRightPositions = bottomRightPositions.getPositions()
-
-    def computeCentroidTrajectory(self, homography = None):
-        self.positions = self.topLeftPositions.add(self.bottomRightPositions).multiply(0.5)
-        if homography is not None:
-            self.positions = self.positions.project(homography)
-
-    def matches(self, obj, instant, matchingDistance):
-        '''Indicates if the annotation matches obj (MovingObject)
-        with threshold matchingDistance
-        Returns distance if below matchingDistance, matchingDistance+1 otherwise
-        (returns an actual value, otherwise munkres does not terminate)'''
-        d = Point.distanceNorm2(self.getPositionAtInstant(instant), obj.getPositionAtInstant(instant))
-        if d < matchingDistance:
-            return d
-        else:
-            return matchingDistance + 1
-
-def computeClearMOT(annotations, objects, matchingDistance, firstInstant, lastInstant, returnMatches = False, debug = False):
-    '''Computes the CLEAR MOT metrics 
-
-    Reference:
-    Keni, Bernardin, and Stiefelhagen Rainer. "Evaluating multiple object tracking performance: the CLEAR MOT metrics." EURASIP Journal on Image and Video Processing 2008 (2008)
-
-    objects and annotations are supposed to in the same space
-    current implementation is BBMovingObject (bounding boxes)
-    mathingDistance is threshold on matching between annotation and object
-
-    TO: tracker output (objects)
-    GT: ground truth (annotations)
-
-    Output: returns motp, mota, mt, mme, fpt, gt
-    mt number of missed GT.frames (sum of the number of GT not detected in each frame)
-    mme number of mismatches
-    fpt number of false alarm.frames (tracker objects without match in each frame)
-    gt number of GT.frames
-
-    if returnMatches is True, return as 2 new arguments the GT and TO matches
-    matches is a dict
-    matches[i] is the list of matches for GT/TO i
-    the list of matches is a dict, indexed by time, for the TO/GT id matched at time t 
-    (an instant t not present in matches[i] at which GT/TO exists means a missed detection or false alarm)
-
-    TODO: Should we use the distance as weights or just 1/0 if distance below matchingDistance?
-    (add argument useDistanceForWeights = False)'''
-    from munkres import Munkres
-    
-    munk = Munkres()
-    dist = 0. # total distance between GT and TO
-    ct = 0 # number of associations between GT and tracker output in each frame
-    gt = 0 # number of GT.frames
-    mt = 0 # number of missed GT.frames (sum of the number of GT not detected in each frame)
-    fpt = 0 # number of false alarm.frames (tracker objects without match in each frame)
-    mme = 0 # number of mismatches
-    matches = {} # match[i] is the tracker track associated with GT i (using object references)
-    if returnMatches:
-        gtMatches = {a.getNum():{} for a in annotations}
-        toMatches = {o.getNum():{} for o in objects}
-    for t in xrange(firstInstant, lastInstant+1):
-        previousMatches = matches.copy()
-        # go through currently matched GT-TO and check if they are still matched withing matchingDistance
-        toDelete = []
-        for a in matches:
-            if a.existsAtInstant(t) and matches[a].existsAtInstant(t):
-                d = a.matches(matches[a], t, matchingDistance)
-                if d < matchingDistance:
-                    dist += d
-                else:
-                    toDelete.append(a)
-            else:
-                toDelete.append(a)
-        for a in toDelete:
-            del matches[a]
-
-        # match all unmatched GT-TO
-        matchedGTs = matches.keys()
-        matchedTOs = matches.values()
-        costs = []
-        unmatchedGTs = [a for a in annotations if a.existsAtInstant(t) and a not in matchedGTs]
-        unmatchedTOs = [o for o in objects if o.existsAtInstant(t) and o not in matchedTOs]
-        nGTs = len(matchedGTs)+len(unmatchedGTs)
-        nTOs = len(matchedTOs)+len(unmatchedTOs)
-        if len(unmatchedTOs) > 0:
-            for a in unmatchedGTs:
-                costs.append([a.matches(o, t, matchingDistance) for o in unmatchedTOs])
-        if len(costs) > 0:
-            newMatches = munk.compute(costs)
-            for k,v in newMatches:
-                if costs[k][v] < matchingDistance:
-                    matches[unmatchedGTs[k]]=unmatchedTOs[v]
-                    dist += costs[k][v]
-        if debug:
-            print('{} '.format(t)+', '.join(['{} {}'.format(k.getNum(), v.getNum()) for k,v in matches.iteritems()]))
-        if returnMatches:
-            for a,o in matches.iteritems():
-                gtMatches[a.getNum()][t] = o.getNum()
-                toMatches[o.getNum()][t] = a.getNum()
-        
-        # compute metrics elements
-        ct += len(matches)
-        mt += nGTs-len(matches)
-        fpt += nTOs-len(matches)
-        gt += nGTs
-        # compute mismatches
-        # for gt that do not appear in both frames, check if the corresponding to was matched to another gt in previous/next frame
-        mismatches = []
-        for a in matches:
-            if a in previousMatches:
-                if matches[a] != previousMatches[a]:
-                    mismatches.append(a)
-            elif matches[a] in previousMatches.values():
-                mismatches.append(matches[a])
-        for a in previousMatches:
-            if a not in matches and previousMatches[a] in matches.values():
-                mismatches.append(previousMatches[a])
-        if debug: 
-            for mm in set(mismatches):
-                print type(mm), mm.getNum()
-        # some object mismatches may appear twice
-        mme += len(set(mismatches))
-        
-    if ct > 0:
-        motp = dist/ct
-    else:
-        motp = None
-    if gt > 0:
-        mota = 1.-float(mt+fpt+mme)/gt
-    else:
-        mota = None
-    if returnMatches:
-        return motp, mota, mt, mme, fpt, gt, gtMatches, toMatches
-    else:
-        return motp, mota, mt, mme, fpt, gt
-
-def plotRoadUsers(objects, colors):
-    '''Colors is a PlottingPropertyValues instance'''
-    from matplotlib.pyplot import figure, axis
-    figure()
-    for obj in objects:
-        obj.plot(colors.get(obj.userType))
-    axis('equal')
-
-
-if __name__ == "__main__":
-    import doctest
-    import unittest
-    suite = doctest.DocFileSuite('tests/moving.txt')
-    #suite = doctest.DocTestSuite()
-    unittest.TextTestRunner().run(suite)
-    #doctest.testmod()
-    #doctest.testfile("example.txt")
-    if shapelyAvailable: 
-        suite = doctest.DocFileSuite('tests/moving_shapely.txt')
-        unittest.TextTestRunner().run(suite)
--- a/python/objectsmoothing.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,246 +0,0 @@
-import storage, moving, utils
-
-from math import atan2, degrees, sin, cos, pi
-from numpy import median
-
-import matplotlib.pyplot as plt
-
-def findNearest(feat, featureSet,t,reverse=True):
-    dist={}
-    for f in featureSet:
-        if reverse:
-            dist[f]= moving.Point.distanceNorm2(feat.getPositionAtInstant(t+1),f.getPositionAtInstant(t))
-        else:
-            dist[f]= moving.Point.distanceNorm2(feat.getPositionAtInstant(t-1),f.getPositionAtInstant(t))
-    return min(dist, key=dist.get) # = utils.argmaxDict(dist)
-    
-def getFeatures(obj, featureID):
-    currentFeature = obj.getFeature(featureID)
-    first = currentFeature.getFirstInstant()
-    last = currentFeature.getLastInstant()
-    featureList=[[currentFeature,first,last,moving.Point(0,0)]]
-    # find the features to fill in the beginning of the object existence
-    while first != obj.getFirstInstant():
-        delta=featureList[-1][3]
-        featureSet = [f for f in obj.getFeatures() if f.existsAtInstant(first-1)]
-        feat = findNearest(currentFeature,featureSet,first-1,reverse=True)
-        if feat.existsAtInstant(first):
-            featureList.append([feat,feat.getFirstInstant(),first-1,(currentFeature.getPositionAtInstant(first)-feat.getPositionAtInstant(first))+delta])
-        else:
-            featureList.append([feat,feat.getFirstInstant(),first-1,(currentFeature.getPositionAtInstant(first)-feat.getPositionAtInstant(first-1))+delta])
-        currentFeature = feat
-        first= feat.getFirstInstant()
-    # find the features to fill in the end of the object existence
-    delta=moving.Point(0,0)
-    currentFeature = obj.getFeature(featureID) # need to reinitialize
-    while last!= obj.getLastInstant():
-        featureSet = [f for f in obj.getFeatures() if f.existsAtInstant(last+1)]
-        feat = findNearest(currentFeature,featureSet,last+1,reverse=False)
-        if feat.existsAtInstant(last):
-            featureList.append([feat,last+1,feat.getLastInstant(),(currentFeature.getPositionAtInstant(last)-feat.getPositionAtInstant(last))+delta])
-        else:
-            featureList.append([feat,last+1,feat.getLastInstant(),(currentFeature.getPositionAtInstant(last)-feat.getPositionAtInstant(last+1))+delta])
-        currentFeature = feat
-        last= feat.getLastInstant()
-        delta=featureList[-1][3]
-    return featureList
-    
-def buildFeature(obj, featureID, num = 1):
-    featureList= getFeatures(obj, featureID)
-    tmp={}
-    delta={}
-    for i in featureList:
-        for t in xrange(i[1],i[2]+1):
-            tmp[t]=[i[0],i[3]]
-    newTraj = moving.Trajectory()
-    
-    for instant in obj.getTimeInterval():
-        newTraj.addPosition(tmp[instant][0].getPositionAtInstant(instant)+tmp[instant][1])
-    newFeature= moving.MovingObject(num,timeInterval=obj.getTimeInterval(),positions=newTraj)
-    return newFeature
-
-def getBearing(p1,p2,p3):
-    angle = degrees(atan2(p3.y -p1.y, p3.x -p1.x))
-    bearing1 = (90 - angle) % 360
-    angle2 = degrees(atan2(p2.y -p1.y, p2.x -p1.x))
-    bearing2 = (90 - angle2) % 360    
-    dist= moving.Point.distanceNorm2(p1, p2)
-    return [dist,bearing1,bearing2,bearing2-bearing1]
-
-#Quantitative analysis "CSJ" functions    
-def computeVelocities(obj, smoothing=True, halfWidth=3):  #compute velocities from positions
-    velocities={}
-    for i in list(obj.timeInterval)[:-1]:
-        p1= obj.getPositionAtInstant(i)
-        p2= obj.getPositionAtInstant(i+1)
-        velocities[i]=p2-p1        
-    velocities[obj.getLastInstant()]= velocities[obj.getLastInstant()-1]  # duplicate last point
-    if smoothing:
-        velX= [velocities[y].aslist()[0] for y in sorted(velocities.keys())]
-        velY= [velocities[y].aslist()[1] for y in sorted(velocities.keys())]
-        v1= list(utils.filterMovingWindow(velX, halfWidth))
-        v2= list(utils.filterMovingWindow(velY, halfWidth))
-        smoothedVelocity={}
-        for t,i in enumerate(sorted(velocities.keys())):
-            smoothedVelocity[i]=moving.Point(v1[t], v2[t])
-        velocities=smoothedVelocity
-    return velocities
-    
-def computeAcceleration(obj,fromPosition=True):
-    acceleration={}
-    if fromPosition:
-        velocities=computeVelocities(obj,False,1)
-        for i in sorted(velocities.keys()):
-            if i != sorted(velocities.keys())[-1]:
-                acceleration[i]= velocities[i+1]-velocities[i]
-    else:
-        for i in list(obj.timeInterval)[:-1]:
-            v1= obj.getVelocityAtInstant(i)
-            v2= obj.getVelocityAtInstant(i+1)
-            acceleration[i]= v2-v1
-    return acceleration
-    
-def computeJerk(obj,fromPosition=True):
-    jerk={}
-    acceleration=computeAcceleration(obj,fromPosition=fromPosition)
-    for i in sorted(acceleration.keys()):
-        if i != sorted(acceleration.keys())[-1]:
-            jerk[i] = (acceleration[i+1]-acceleration[i]).norm2()
-    return jerk
-    
-def sumSquaredJerk(obj,fromPosition=True):
-    jerk= computeJerk(obj,fromPosition=fromPosition)
-    t=0
-    for i in sorted(jerk.keys()):
-        t+= jerk[i]* jerk[i]
-    return t
-    
-def smoothObjectTrajectory(obj, featureID,newNum,smoothing=False,halfWidth=3,create=False):
-    results=[]    
-    bearing={}
-    if create:
-        feature = buildFeature(obj, featureID , num=1) # why num=1
-    else:
-        feature = obj.getFeature(featureID)
-    for t in feature.getTimeInterval():
-        p1= feature.getPositionAtInstant(t)
-        p2= obj.getPositionAtInstant(t)
-        if t!=feature.getLastInstant():
-            p3= feature.getPositionAtInstant(t+1)
-        else:
-            p1= feature.getPositionAtInstant(t-1)
-            p3= feature.getPositionAtInstant(t)
-        bearing[t]= getBearing(p1,p2,p3)[1]        
-        results.append(getBearing(p1,p2,p3))
-    
-    medianResults=median(results,0)
-    dist= medianResults[0]
-    angle= medianResults[3]
-    
-    for i in sorted(bearing.keys()):
-        bearing[i]= bearing[i]+angle
-
-    if smoothing:
-        bearingInput=[]
-        for i in sorted(bearing.keys()):
-            bearingInput.append(bearing[i])
-        import utils
-        bearingOut=utils.filterMovingWindow(bearingInput, halfWidth)
-        for t,i in enumerate(sorted(bearing.keys())):
-            bearing[i]=bearingOut[t]
-        
-        #solve a smoothing problem in case of big drop in computing bearing (0,360)    
-        for t,i in enumerate(sorted(bearing.keys())):
-            if i!= max(bearing.keys()) and abs(bearingInput[t] - bearingInput[t+1])>=340:
-                for x in xrange(max(i-halfWidth,min(bearing.keys())),min(i+halfWidth,max(bearing.keys()))+1):
-                    bearing[x]=bearingInput[t-i+x]
-
-    translated = moving.Trajectory()
-    for t in feature.getTimeInterval():
-        p1= feature.getPositionAtInstant(t)
-        p1.x = p1.x + dist*sin(bearing[t]*pi/180)
-        p1.y = p1.y + dist*cos(bearing[t]*pi/180)
-        translated.addPosition(p1)
-        
-    #modify first and last un-smoothed positions (half width)
-    if smoothing:
-        d1= translated[halfWidth]- feature.positions[halfWidth]
-        d2= translated[-halfWidth-1]- feature.positions[-halfWidth-1]
-        for i in xrange(halfWidth):
-            p1= feature.getPositionAt(i)+d1
-            p2= feature.getPositionAt(-i-1)+d2
-            translated.setPosition(i,p1)
-            translated.setPosition(-i-1,p2)
-        
-    newObj= moving.MovingObject(newNum,timeInterval=feature.getTimeInterval(),positions=translated)
-    return newObj
-    
-def smoothObject(obj, newNum, minLengthParam = 0.7, smoothing = False, plotResults = True, halfWidth = 3, _computeVelocities = True, optimize = True, create = False):
-    '''Computes a smoother trajectory for the object
-    and optionnally smoother velocities
-    
-    The object should have its features in obj.features
-    TODO: check whether features are necessary'''
-    if not obj.hasFeatures():
-        print('Object {} has an empty list of features: please load and add them using obj.setFeatures(features)'.format(obj.getNum()))
-        from sys import exit
-        exit()
-
-    featureList=[i for i,f in enumerate(obj.getFeatures()) if f.length() >= minLengthParam*obj.length()]
-    if featureList==[]:
-        featureList.append(utils.argmaxDict({i:f.length() for i,f in enumerate(obj.getFeatures())}))
-        create = True
-    newObjects = []
-    for featureID in featureList: # featureID should be the index in the list of obj.features
-        newObjects.append(smoothObjectTrajectory(obj, featureID, newNum, smoothing = smoothing, halfWidth = halfWidth, create = create))
-
-    newTranslated = moving.Trajectory()
-    newInterval = []
-    for t in obj.getTimeInterval():
-        xCoord=[]
-        yCoord=[]
-        for i in newObjects:
-            if i.existsAtInstant(t):
-                p1= i.getPositionAtInstant(t)
-                xCoord.append(p1.x)
-                yCoord.append(p1.y)
-        if xCoord != []:
-            tmp= moving.Point(median(xCoord), median(yCoord))
-            newInterval.append(t)
-            newTranslated.addPosition(tmp)
-    
-    newObj= moving.MovingObject(newNum, timeInterval = moving.TimeInterval(min(newInterval),max(newInterval)),positions=newTranslated)
-        
-    if _computeVelocities:
-        tmpTraj = moving.Trajectory()
-        velocities= computeVelocities(newObj,True,5)
-        for i in sorted(velocities.keys()):
-            tmpTraj.addPosition(velocities[i])
-        newObj.velocities=tmpTraj
-    else:
-        newObj.velocities=obj.velocities
-    
-    if optimize:
-        csj1= sumSquaredJerk(obj,fromPosition=True)
-        csj2= sumSquaredJerk(newObj,fromPosition=True)
-        if csj1<csj2:
-            newObj=obj
-            newObj.velocities=obj.velocities
-        if _computeVelocities and csj1>=csj2:
-            csj3= sumSquaredJerk(obj,fromPosition=False)
-            csj4= sumSquaredJerk(newObj,fromPosition=False)
-            if csj4<=csj3:
-                newObj.velocities= obj.velocities
-
-    newObj.featureNumbers=obj.featureNumbers
-    newObj.features=obj.getFeatures()
-    newObj.userType=obj.userType
-
-    if plotResults:
-        plt.figure()
-        plt.title('objects_id = {}'.format(obj.num))
-        for i in featureList:
-            obj.getFeature(i).plot('cx-')
-        obj.plot('rx-')
-        newObj.plot('gx-')        
-    return newObj
--- a/python/pavement.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,313 +0,0 @@
-#! /usr/bin/env python
-'''Tools for processing and analyzing pavement marking data'''
-
-import utils
-
-import numpy as np
-
-
-paintTypes = {0: "Non-existant",
-              1: "Eau",
-              2: "Epoxy",
-              3: "Alkyde",
-              4: "Autre"}
-
-durabilities = {1: 98, #96 to 100
-                2: 85, #75 to 96
-                3: 62, #50 to 75
-                4: 32, #15 to 50
-                5: 7 #0 to 15
-                }
-
-roadFunctionalClasses = {40: "Collectrice",
-                         20: "Nationale",
-                         30: "Regionale",
-                         10: "Autoroute",
-                         60: "Acces ressources",
-                         51: "Local 1",
-                         52: "Local 2",
-                         53: "Local 3",
-                         15: "Aut (PRN)",
-                         25: "Nat (PRN)",
-                         70: "Acces isolees",
-                         99: "Autres"}
-
-def caracteristiques(rtss, maintenanceLevel, rtssWeatherStation, fmr, paintType):
-    '''Computes characteristic data for the RTSS (class rtss) 
-    maintenanceLevel = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\exigence_circuits.txt', delimiter = ';')
-    rtssWeatherStation = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\stations_environnement_canada\\rtssWeatherStation\juste_pour_rtss_avec_donnees_entretien_hiv\\rtssWeatherStation_EC3.txt', delimiter = ',')
-    fmr = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\fmr.txt', delimiter = ';')
-    paintType = pylab.csv2rec('C:\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\type_peinture.txt', delimiter = ';')
-    '''
-    # determination exigence deneigement
-    if rtss.id in maintenanceLevel['rtss_debut']:
-        for i in range(len(maintenanceLevel)):
-            if maintenanceLevel['rtss_debut'][i] == rtss.id:
-                exigence = maintenanceLevel['exigence'][i]
-    else:
-        exigence = ''
-
-    # determination x/y
-    if rtss.id in rtssWeatherStation['rtss']:
-        for i in range(len(rtssWeatherStation)):		
-            if rtssWeatherStation['rtss'][i] == rtss.id:
-                x_moy = rtssWeatherStation['x_moy'][i]
-                y_moy = rtssWeatherStation['y_moy'][i]
-    else:
-        x_moy, y_moy = '',''	
-
-    # determination info fmr
-    age_revtm, classe_fonct, type_revtm, milieu, djma, pourc_camions, vit_max = [], [], [], [], [], [], []
-    if rtss.id in fmr['rtss_debut']:
-        for i in range(len(fmr)):
-            if fmr['rtss_debut'][i] == rtss.id:
-                age_revtm.append(fmr['age_revtm'][i])
-                classe_fonct.append(fmr['des_clasf_fonct'][i])
-                type_revtm.append(fmr['des_type_revtm'][i])
-                milieu.append(fmr['des_cod_mil'][i])
-                djma.append(fmr['val_djma'][i])
-                pourc_camions.append(fmr['val_pourc_camns'][i])
-                vit_max.append(fmr['val_limt_vitss'][i])
-        age_revtm = utils.mostCommon(age_revtm)
-        classe_fonct = utils.mostCommon(classe_fonct)
-        type_revtm = utils.mostCommon(type_revtm)
-        milieu = utils.mostCommon(milieu)
-        djma = utils.mostCommon(djma)
-        vit_max = utils.mostCommon(vit_max)
-        if vit_max < 0:
-            vit_max = ''
-        pourc_camions = utils.mostCommon(pourc_camions)
-        if pourc_camions == "" or pourc_camions < 0:
-            djma_camions = ""
-        else:
-            djma_camions = pourc_camions*djma/100
-    else:
-        age_revtm, classe_fonct, type_revtm, milieu, djma, djma_camions, vit_max  = '','','','','','',''
-
-    # determination type peinture
-    peinture_rd, peinture_rg, peinture_cl = [], [], []
-    peinture_lrd, peinture_lrg, peinture_lc = 0,0,0
-    if rtss.id in paintType['rtss_debut_orig']:
-        for i in range(len(paintType)):
-            if paintType['rtss_debut_orig'][i] == rtss.id:
-                peinture_rd.append((paintType['peinture_rd'][i]))
-                peinture_rg.append((paintType['peinture_rg'][i]))
-                peinture_cl.append((paintType['peinture_cl'][i]))
-        peinture_lrd = utils.mostCommon(peinture_rd)
-        peinture_lrg = utils.mostCommon(peinture_rg)
-        peinture_lc = utils.mostCommon(peinture_cl)
-    else:
-        peinture_lrd, peinture_lrg, peinture_lc = '','',''		
-
-    return (exigence, x_moy, y_moy, age_revtm, classe_fonct, type_revtm, milieu, djma, djma_camions, vit_max, peinture_lrd, peinture_lrg, peinture_lc)
-
-def winterMaintenanceIndicators(data, startDate, endDate, circuitReference, snowThreshold):
-    '''Computes several winter maintenance indicators
-    data = entretien_hivernal = pylab.csv2rec('C:\Users\Alexandre\Documents\Cours\Poly\Projet\mesures_entretien_hivernal\mesures_deneigement.txt', delimiter = ',')'''
-    import datetime
-    somme_eau, somme_neige, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, compteur_premiere_neige, compteur_somme_abrasif = 0,0,0,0,0,0,0,0,0
-
-    if circuitReference in data['ref_circuit']:
-        for i in range(len(data)):
-            if data['ref_circuit'][i] == circuitReference and (data['date'][i] + datetime.timedelta(days = 6)) <= endDate and (data['date'][i] + datetime.timedelta(days = 6)) > startDate:
-                compteur_premiere_neige += float(data['premiere_neige'][i])
-                somme_neige += float(data['neige'][i])
-                somme_eau += float(data['eau'][i])
-                somme_abrasif += float(data['abrasif'][i])
-                somme_sel += float(data['sel'][i])
-                somme_lc += float(data['lc'][i])
-                somme_lrg += float(data['lrg'][i])
-                somme_lrd += float(data['lrd'][i])
-                compteur_somme_abrasif += float(data['autre_abrasif_binaire'][i])
-        if compteur_premiere_neige >= 1:
-            premiere_neige = 1
-        else:
-            premiere_neige = 0
-        if compteur_somme_abrasif >= 1:
-            autres_abrasifs = 1
-        else:
-            autres_abrasifs = 0
-        if somme_neige < snowThreshold:
-            neigeMTQ_sup_seuil = 0
-        else:
-            neigeMTQ_sup_seuil = 1
-    else:
-        somme_eau, somme_neige, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, premiere_neige, autres_abrasifs, neigeMTQ_sup_seuil = '','','','','','','','','',''
-
-    return (somme_eau, somme_neige, neigeMTQ_sup_seuil, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, premiere_neige, autres_abrasifs)
-
-def weatherIndicators(data, startDate, endDate, snowThreshold, weatherDatatype, minProportionMeasures = 0.):
-    '''Computes the indicators from Environment Canada files
-    (loaded as a recarray using csv2rec in data),
-    between start and end dates (datetime.datetime objects)
-
-    weatherDataType is to indicate Environnement Canada data ('ec') or else MTQ
-    minProportionMeasures is proportion of measures necessary to consider the indicators'''
-    from matplotlib.mlab import find
-    nbre_jours_T_negatif,nbre_jours_gel_degel,pluie_tot,neige_tot,ecart_type_T = 0,0,0,0,0
-    compteur,nbre_jours_gel_consecutifs=0,0
-    tmoys = []
-    seuils_T = [20,15,10,5]
-    deltas_T = [0,0,0,0]
-    startIndex = find(data['date'] == startDate)
-    nDays = int((endDate - startDate).days)+1
-    if len(startIndex) > 0 and startIndex+nDays <= len(data):
-        startIndex = startIndex[0]
-        for i in range(startIndex, startIndex+nDays):
-            if not np.isnan(data['tmax'][i]):
-                tmax = data['tmax'][i]
-            else:
-                tmax = None
-            if not np.isnan(data['tmin'][i]):
-                tmin = data['tmin'][i]
-            else:
-                tmin = None
-            if weatherDatatype == 'ec':
-                if data['pluie_tot'][i] is not None and not np.isnan(data['pluie_tot'][i]):
-                    pluie_tot  += data['pluie_tot'][i]
-                if data['neige_tot'][i] is not None and not np.isnan(data['neige_tot'][i]):
-                    neige_tot  += data['neige_tot'][i]
-            if tmax is not None:
-                if tmax < 0:
-                    nbre_jours_T_negatif += 1
-            if tmax is not None and tmin is not None:
-                if tmax > 0 and tmin < 0:
-                    nbre_jours_gel_degel += 1
-                for l in range(len(seuils_T)):
-                    if tmax - tmin >=seuils_T[l]:
-                        deltas_T[l] += 1
-            if not np.isnan(data['tmoy'][i]):
-                tmoys.append(data['tmoy'][i])
-            if tmax is not None:
-                if tmax < 0:
-                    compteur += 1
-                elif tmax >= 0 and compteur >= nbre_jours_gel_consecutifs:
-                    nbre_jours_gel_consecutifs = compteur
-                    compteur = 0
-                else:
-                    compteur = 0
-            nbre_jours_gel_consecutifs = max(nbre_jours_gel_consecutifs,compteur)
-    if len(tmoys) > 0 and float(len(tmoys))/nDays >= minProportionMeasures:
-        if tmoys != []:
-            ecart_type_T = np.std(tmoys)
-        else:
-            ecart_type = None
-        if neige_tot < snowThreshold:
-            neigeEC_sup_seuil = 0
-        else:
-            neigeEC_sup_seuil = 1
-        return (nbre_jours_T_negatif,nbre_jours_gel_degel, deltas_T, nbre_jours_gel_consecutifs, pluie_tot, neige_tot, neigeEC_sup_seuil, ecart_type_T)
-    else:
-        return [None]*2+[[None]*len(seuils_T)]+[None]*5
-
-def mtqWeatherIndicators(data, startDate, endDate,tmax,tmin,tmoy):
-    print("Deprecated, use weatherIndicators")
-    from matplotlib.mlab import find
-    nbre_jours_T_negatif,nbre_jours_gel_degel,ecart_type_T = 0,0,0
-    compteur,nbre_jours_gel_consecutifs=0,0
-    tmoys = []
-    seuils_T = [20,15,10,5]
-    deltas_T = [0,0,0,0]
-    startIndex = find(data['date'] == startDate)
-    nDays = (endDate - startDate).days+1
-    for i in range(startIndex, startIndex+nDays):
-        if tmax[i] < 0:
-            nbre_jours_T_negatif += 1
-        if tmax[i] > 0 and tmin[i] < 0:
-            nbre_jours_gel_degel += 1
-        for l in range(len(seuils_T)):
-            if tmax[i] - tmin[i] >=seuils_T[l]:
-                deltas_T[l] += 1
-        tmoys.append(tmoy[i])
-        if tmax[i] < 0:
-            compteur += 1
-        elif tmax[i] >= 0 and compteur >= nbre_jours_gel_consecutifs:
-            nbre_jours_gel_consecutifs = compteur
-            compteur = 0
-        else:
-            compteur = 0
-        nbre_jours_gel_consecutifs = max(nbre_jours_gel_consecutifs,compteur)
-        if tmoys != []:
-            ecart_type_T = np.std(tmoys)
-        else:
-            ecart_type = None
-
-    return (nbre_jours_T_negatif,nbre_jours_gel_degel, deltas_T, nbre_jours_gel_consecutifs, ecart_type_T)
-
-class RTSS(object):
-    '''class for data related to a RTSS:
-    - agregating pavement marking measurements
-    - RTSS characteristics from FMR: pavement type, age, AADT, truck AADT
-    - winter maintenance level from V155
-
-    If divided highway, the RTSS ends with G or D and are distinct: there is no ambiguity
-    - retroreflectivity types: there are CB, RJ and RB
-    If undivided, ending with C
-    - durability is fine: ETAT_MARQG_RG ETAT_MARQG_CL ETAT_MARQG_RD (+SG/SD, but recent)
-    - retroreflectivity: CJ is center line, RB and SB are left/right if DEBUT-FIN>0 or <0
-    '''
-
-    def __init__(self, _id, name, data):
-        self.id = _id
-        self.name = name
-        self.data = data
-
-class MarkingTest(object):
-    '''class for a test site for a given product
-
-    including the series of measurements over the years'''
-
-    def __init__(self, _id, paintingDate, paintingType, color, data):
-        self.id = _id
-        self.paintingDate = paintingDate
-        self.paintingType = paintingType
-        self.color = color
-        self.data = data
-        self.nMeasures = len(data)
-
-    def getSite(self):
-        return int(self.id[:2])
-
-    def getTestAttributes(self):
-        return [self.paintingType, self.color, self.paintingDate.year]
-
-    def plot(self, measure, options = 'o', dayRatio = 1., **kwargs):
-        from matplotlib.pyplot import plot
-        plot(self.data['jours']/float(dayRatio), 
-             self.data[measure], options, **kwargs)
-
-    def getMarkingMeasures(self, dataLabel):
-        nonZeroIndices = ~np.isnan(self.data[dataLabel])
-        return self.data[nonZeroIndices]['jours'], self.data[nonZeroIndices][dataLabel]
-
-    def plotMarkingMeasures(self, measure, options = 'o', dayRatio = 1., **kwargs):
-        for i in range(1,7):
-            self.plot('{}_{}'.format(measure, i), options, dayRatio, **kwargs)
-
-    def computeMarkingMeasureVariations(self, dataLabel, lanePositions, weatherData, snowThreshold, weatherDataType = 'ec', minProportionMeasures = 0.):
-        '''Computes for each successive measurement
-        lanePositions = None
-        measure variation, initial measure, time duration, weather indicators
-        
-        TODO if measurements per lane, add a variable for lane position (position1 to 6)
-        lanePositions = list of integers (range(1,7))
-        measure variation, initial measure, time duration, lane position1, weather indicators
-        measure variation, initial measure, time duration, lane position2, weather indicators
-        ...'''
-        variationData = []
-        if lanePositions is None:
-            nonZeroIndices = ~np.isnan(self.data[dataLabel])
-            days = self.data[nonZeroIndices]['jours']
-            dates = self.data[nonZeroIndices]['date_mesure']
-            measures = self.data[nonZeroIndices][dataLabel]
-            for i in range(1, len(dates)):
-                nDaysTNegative, nDaysThawFreeze, deltaTemp, nConsecutiveFrozenDays, totalRain, totalSnow, snowAboveThreshold, stdevTemp = weatherIndicators(weatherData, dates[i-1], dates[i], snowThreshold, weatherDataType, minProportionMeasures)
-                if dates[i-1].year+1 == dates[i].year:
-                    winter = 1
-                    if days[i-1]<365:
-                        firstWinter = 1
-                else:
-                    winter = 0
-                    firstWinter = 0
-                variationData.append([measures[i-1]-measures[i], measures[i-1], days[i]-days[i-1], days[i-1], winter, firstWinter, nDaysTNegative, nDaysThawFreeze] + deltaTemp + [nConsecutiveFrozenDays, totalRain, totalSnow, snowAboveThreshold, stdevTemp])
-        return variationData
--- a/python/poly-utils.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-#! /usr/bin/env python
-'''Various utilities to load data saved by the POLY new output(s)'''
-
-from moving import  TimeInterval
-from indicators import SeverityIndicator
-
-import sys, utils
-import numpy as np
-
-
-def loadNewInteractions(videoFilename,interactionType,dirname, extension, indicatorsNames, roaduserNum1,roaduserNum2, selectedIndicators=[]):
-    '''Loads interactions from the POLY traffic event format'''
-    from events import Interaction 
-    filename= dirname + videoFilename + extension
-    #filename= dirname + interactionType+ '-' + videoFilename + extension # case of min distance todo: change the saving format to be matched with all outputs
-    file = utils.openCheck(filename)
-    if (not file):
-        return []
-    #interactions = []
-    interactionNum = 0
-    data= np.loadtxt(filename)
-    indicatorFrameNums= data[:,0]
-    inter = Interaction(interactionNum, TimeInterval(indicatorFrameNums[0],indicatorFrameNums[-1]), roaduserNum1, roaduserNum2) 
-    inter.addVideoFilename(videoFilename)
-    inter.addInteractionType(interactionType)
-    for key in indicatorsNames.keys():
-        values= {}
-        for i,t in enumerate(indicatorFrameNums):
-            values[t] = data[i,key]
-        inter.addIndicator(SeverityIndicator(indicatorsNames[key], values))
-    if selectedIndicators !=[]:
-        values= {}
-        for i,t in enumerate(indicatorFrameNums):
-            values[t] = [data[i,index] for index in selectedIndicators]
-        inter.addIndicator(SeverityIndicator('selectedIndicators', values))    
-        
-    #interactions.append(inter)
-    file.close()
-    #return interactions
-    return inter
-
-# Plotting results
-
-frameRate = 15.
-
-# To run in directory that contains the directories that contain the results (Miss-xx and Incident-xx)
-#dirname = '/home/nicolas/Research/Data/kentucky-db/'
-
-interactingRoadUsers = {'Miss/0404052336': [(0,3)] # 0,2 and 1 vs 3
-                        #,
-                        #'Incident/0306022035': [(1,3)]
-                        #,
-                        #'Miss/0208030956': [(4,5),(5,7)]
-                        }
-
-
-def getIndicatorName(filename, withUnit = False):
-    if withUnit:
-        unit = ' (s)'
-    else:
-        unit = ''
-    if 'collision-point' in filename:
-        return 'TTC'+unit
-    elif 'crossing' in filename:
-        return 'pPET'+unit
-    elif 'probability' in filename:
-        return 'P(UEA)'
-
-def getMethodName(fileprefix):
-    if fileprefix == 'constant-velocity':
-        return 'Con. Vel.'
-    elif fileprefix == 'normal-adaptation':
-        return 'Norm. Ad.'
-    elif fileprefix == 'point-set':
-        return 'Pos. Set'
-    elif fileprefix == 'evasive-action':
-        return 'Ev. Act.'
-    elif fileprefix == 'point-set-evasive-action':
-        return 'Pos. Set'
-
-indicator2TimeIdx = {'TTC':2,'pPET':2, 'P(UEA)':3}
-
-def getDataAtInstant(data, i):
-    return data[data[:,2] == i]
-
-def getPointsAtInstant(data, i):
-    return getDataAtInstant(i)[3:5]
-
-def getIndicator(data, roadUserNumbers, indicatorName):
-    if data.ndim ==1:
-        data.shape = (1,data.shape[0])
-
-    # find the order for the roadUserNumbers
-    uniqueObj1 = np.unique(data[:,0])
-    uniqueObj2 = np.unique(data[:,1])
-    found = False
-    if roadUserNumbers[0] in uniqueObj1 and roadUserNumbers[1] in uniqueObj2:
-        objNum1 = roadUserNumbers[0]
-        objNum2 = roadUserNumbers[1]
-        found = True
-    if roadUserNumbers[1] in uniqueObj1 and roadUserNumbers[0] in uniqueObj2:
-        objNum1 = roadUserNumbers[1]
-        objNum2 = roadUserNumbers[0]
-        found = True
-
-    # get subset of data for road user numbers
-    if found:
-        roadUserData = data[np.logical_and(data[:,0] == objNum1, data[:,1] == objNum2),:]
-        if roadUserData.size > 0:
-            time = np.unique(roadUserData[:,indicator2TimeIdx[indicatorName]])
-            values = {}
-            if indicatorName == 'P(UEA)':
-                tmp = roadUserData[:,4]
-                for k,v in zip(time, tmp):
-                    values[k]=v
-                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False, maxValue = 1.), roadUserData
-            else:
-                for i in xrange(time[0],time[-1]+1):
-                    try:
-                        tmp = getDataAtInstant(roadUserData, i)
-                        values[i] = np.sum(tmp[:,5]*tmp[:,6])/np.sum(tmp[:,5])/frameRate
-                    except IOError:
-                        values[i] = np.inf
-                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False), roadUserData
-    return None, None
--- a/python/prediction.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,602 +0,0 @@
-#! /usr/bin/env python
-'''Library for motion prediction methods'''
-
-import moving
-from utils import LCSS
-
-import math, random
-import numpy as np
-from multiprocessing import Pool
-
-
-class PredictedTrajectory(object):
-    '''Class for predicted trajectories with lazy evaluation
-    if the predicted position has not been already computed, compute it
-
-    it should also have a probability'''
-
-    def __init__(self):
-        self.probability = 0.
-        self.predictedPositions = {}
-        self.predictedSpeedOrientations = {}
-        #self.collisionPoints = {}
-        #self.crossingZones = {}
-
-    def predictPosition(self, nTimeSteps):
-        if nTimeSteps > 0 and not nTimeSteps in self.predictedPositions.keys():
-            self.predictPosition(nTimeSteps-1)
-            self.predictedPositions[nTimeSteps], self.predictedSpeedOrientations[nTimeSteps] = moving.predictPosition(self.predictedPositions[nTimeSteps-1], self.predictedSpeedOrientations[nTimeSteps-1], self.getControl(), self.maxSpeed)
-        return self.predictedPositions[nTimeSteps]
-
-    def getPredictedTrajectory(self):
-        return moving.Trajectory.fromPointList(self.predictedPositions.values())
-
-    def getPredictedSpeeds(self):
-        return [so.norm for so in self.predictedSpeedOrientations.values()]
-
-    def plot(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
-        self.getPredictedTrajectory().plot(options, withOrigin, timeStep, **kwargs)
-
-class PredictedTrajectoryConstant(PredictedTrajectory):
-    '''Predicted trajectory at constant speed or acceleration
-    TODO generalize by passing a series of velocities/accelerations'''
-
-    def __init__(self, initialPosition, initialVelocity, control = moving.NormAngle(0,0), probability = 1., maxSpeed = None):
-        self.control = control
-        self.maxSpeed = maxSpeed
-        self.probability = probability
-        self.predictedPositions = {0: initialPosition}
-        self.predictedSpeedOrientations = {0: moving.NormAngle.fromPoint(initialVelocity)}
-
-    def getControl(self):
-        return self.control
-        
-def findNearestParams(initialPosition,prototypeTrajectory):
-    ''' nearest parameters are the index of minDistance and the orientation  '''
-    distances=[]
-    for position in prototypeTrajectory.positions:
-        distances.append(moving.Point.distanceNorm2(initialPosition, position))
-    minDistanceIndex= np.argmin(distances)
-    return minDistanceIndex, moving.NormAngle.fromPoint(prototypeTrajectory.velocities[minDistanceIndex]).angle
-
-class PredictedTrajectoryPrototype(PredictedTrajectory):
-    '''Predicted trajectory that follows a prototype trajectory
-    The prototype is in the format of a moving.Trajectory: it could be
-    1. an observed trajectory (extracted from video)
-    2. a generic polyline (eg the road centerline) that a vehicle is supposed to follow
-
-    Prediction can be done
-    1. at constant speed (the instantaneous user speed)
-    2. following the trajectory path, at the speed of the user
-    (applying a constant ratio equal 
-    to the ratio of the user instantaneous speed and the trajectory closest speed)'''
-
-    def __init__(self, initialPosition, initialVelocity, prototypeTrajectory, constantSpeed = True, probability = 1.):
-        self.prototypeTrajectory = prototypeTrajectory
-        self.constantSpeed = constantSpeed
-        self.probability = probability
-        self.predictedPositions = {0: initialPosition}
-        self.predictedSpeedOrientations = {0: moving.NormAngle(moving.NormAngle.fromPoint(initialVelocity).norm, findNearestParams(initialPosition,prototypeTrajectory)[1])}#moving.NormAngle.fromPoint(initialVelocity)}
-    
-    def predictPosition(self, nTimeSteps):
-        if nTimeSteps > 0 and not nTimeSteps in self.predictedPositions.keys():
-            if self.constantSpeed:
-                # calculate cumulative distance
-                speedNorm= self.predictedSpeedOrientations[0].norm #moving.NormAngle.fromPoint(initialVelocity).norm
-                anglePrototype = findNearestParams(self.predictedPositions[nTimeSteps-1],self.prototypeTrajectory)[1]
-                self.predictedSpeedOrientations[nTimeSteps]= moving.NormAngle(speedNorm, anglePrototype)
-                self.predictedPositions[nTimeSteps],tmp= moving.predictPosition(self.predictedPositions[nTimeSteps-1], self.predictedSpeedOrientations[nTimeSteps-1], moving.NormAngle(0,0), None)
-            
-            else: # see c++ code, calculate ratio
-                speedNorm= self.predictedSpeedOrientations[0].norm
-                instant=findNearestParams(self.predictedPositions[0],self.prototypeTrajectory)[0]
-                prototypeSpeeds= self.prototypeTrajectory.getSpeeds()[instant:]
-                ratio=float(speedNorm)/prototypeSpeeds[0]
-                resampledSpeeds=[sp*ratio for sp in prototypeSpeeds]
-                anglePrototype = findNearestParams(self.predictedPositions[nTimeSteps-1],self.prototypeTrajectory)[1]
-                if nTimeSteps<len(resampledSpeeds):
-                    self.predictedSpeedOrientations[nTimeSteps]= moving.NormAngle(resampledSpeeds[nTimeSteps], anglePrototype)
-                    self.predictedPositions[nTimeSteps],tmp= moving.predictPosition(self.predictedPositions[nTimeSteps-1], self.predictedSpeedOrientations[nTimeSteps-1], moving.NormAngle(0,0), None)                
-                else:
-                    self.predictedSpeedOrientations[nTimeSteps]= moving.NormAngle(resampledSpeeds[-1], anglePrototype)
-                    self.predictedPositions[nTimeSteps],tmp= moving.predictPosition(self.predictedPositions[nTimeSteps-1], self.predictedSpeedOrientations[nTimeSteps-1], moving.NormAngle(0,0), None)
-          
-        return self.predictedPositions[nTimeSteps]
-
-class PredictedTrajectoryRandomControl(PredictedTrajectory):
-    '''Random vehicle control: suitable for normal adaptation'''
-    def __init__(self, initialPosition, initialVelocity, accelerationDistribution, steeringDistribution, probability = 1., maxSpeed = None):
-        '''Constructor
-        accelerationDistribution and steeringDistribution are distributions 
-        that return random numbers drawn from them'''
-        self.accelerationDistribution = accelerationDistribution
-        self.steeringDistribution = steeringDistribution
-        self.maxSpeed = maxSpeed
-        self.probability = probability
-        self.predictedPositions = {0: initialPosition}
-        self.predictedSpeedOrientations = {0: moving.NormAngle.fromPoint(initialVelocity)}
-
-    def getControl(self):
-        return moving.NormAngle(self.accelerationDistribution(),self.steeringDistribution())
-
-class SafetyPoint(moving.Point):
-    '''Can represent a collision point or crossing zone 
-    with respective safety indicator, TTC or pPET'''
-    def __init__(self, p, probability = 1., indicator = -1):
-        self.x = p.x
-        self.y = p.y
-        self.probability = probability
-        self.indicator = indicator
-
-    def __str__(self):
-        return '{0} {1} {2} {3}'.format(self.x, self.y, self.probability, self.indicator)
-
-    @staticmethod
-    def save(out, points, predictionInstant, objNum1, objNum2):
-        for p in points:
-            out.write('{0} {1} {2} {3}\n'.format(objNum1, objNum2, predictionInstant, p))
-
-    @staticmethod
-    def computeExpectedIndicator(points):
-        return np.sum([p.indicator*p.probability for p in points])/sum([p.probability for p in points])
-
-def computeCollisionTime(predictedTrajectory1, predictedTrajectory2, collisionDistanceThreshold, timeHorizon):
-    '''Computes the first instant 
-    at which two predicted trajectories are within some distance threshold
-    Computes all the times including timeHorizon
-    
-    User has to check the first variable collision to know about a collision'''
-    t = 1
-    p1 = predictedTrajectory1.predictPosition(t)
-    p2 = predictedTrajectory2.predictPosition(t)
-    collision = (p1-p2).norm2() <= collisionDistanceThreshold
-    while t < timeHorizon and not collision:
-        t += 1
-        p1 = predictedTrajectory1.predictPosition(t)
-        p2 = predictedTrajectory2.predictPosition(t)
-        collision = (p1-p2).norm2() <= collisionDistanceThreshold
-    return collision, t, p1, p2
-
-def savePredictedTrajectoriesFigure(currentInstant, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon):
-    from matplotlib.pyplot import figure, axis, title, close, savefig
-    figure()
-    for et in predictedTrajectories1:
-        et.predictPosition(int(np.round(timeHorizon)))
-        et.plot('rx')
-
-    for et in predictedTrajectories2:
-        et.predictPosition(int(np.round(timeHorizon)))
-        et.plot('bx')
-    obj1.plot('r')
-    obj2.plot('b')
-    title('instant {0}'.format(currentInstant))
-    axis('equal')
-    savefig('predicted-trajectories-t-{0}.png'.format(currentInstant))
-    close()
-
-def calculateProbability(nMatching,similarity,objects):
-    sumFrequencies=sum([nMatching[p] for p in similarity.keys()])
-    prototypeProbability={}
-    for i in similarity.keys():
-        prototypeProbability[i]= similarity[i] * float(nMatching[i])/sumFrequencies
-    sumProbabilities= sum([prototypeProbability[p] for p in prototypeProbability.keys()])
-    probabilities={}
-    for i in prototypeProbability.keys():
-        probabilities[objects[i]]= float(prototypeProbability[i])/sumProbabilities
-    return probabilities
-
-def findPrototypes(prototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity=0.1,mostMatched=None,spatialThreshold=1.0, delta=180):
-    ''' behaviour prediction first step'''
-    if route[0] not in noiseEntryNums: 
-        prototypesRoutes= [ x for x in sorted(prototypes.keys()) if route[0]==x[0]]
-    elif route[1] not in noiseExitNums:
-        prototypesRoutes=[ x for x in sorted(prototypes.keys()) if route[1]==x[1]]
-    else:
-        prototypesRoutes=[x for x in sorted(prototypes.keys())]
-    lcss = LCSS(similarityFunc=lambda x,y: (distanceForLCSS(x,y) <= spatialThreshold),delta=delta)
-    similarity={}
-    for y in prototypesRoutes: 
-        if y in prototypes.keys():
-            prototypesIDs=prototypes[y]            
-            for x in prototypesIDs:
-                s=lcss.computeNormalized(partialObjPositions, objects[x].positions)
-                if s >= minSimilarity:
-                    similarity[x]=s
-    
-    if mostMatched==None:
-        probabilities= calculateProbability(nMatching,similarity,objects)        
-        return probabilities
-    else:
-        mostMatchedValues=sorted(similarity.values(),reverse=True)[:mostMatched]
-        keys=[k for k in similarity.keys() if similarity[k] in mostMatchedValues]
-        newSimilarity={}
-        for i in keys:
-            newSimilarity[i]=similarity[i]
-        probabilities= calculateProbability(nMatching,newSimilarity,objects)        
-        return probabilities        
-        
-def findPrototypesSpeed(prototypes,secondStepPrototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity=0.1,mostMatched=None,useDestination=True,spatialThreshold=1.0, delta=180):
-    if useDestination:
-        prototypesRoutes=[route]
-    else:
-        if route[0] not in noiseEntryNums: 
-            prototypesRoutes= [ x for x in sorted(prototypes.keys()) if route[0]==x[0]]
-        elif route[1] not in noiseExitNums:
-            prototypesRoutes=[ x for x in sorted(prototypes.keys()) if route[1]==x[1]]
-        else:
-            prototypesRoutes=[x for x in sorted(prototypes.keys())]
-    lcss = LCSS(similarityFunc=lambda x,y: (distanceForLCSS(x,y) <= spatialThreshold),delta=delta)
-    similarity={}
-    for y in prototypesRoutes: 
-        if y in prototypes.keys():
-            prototypesIDs=prototypes[y]    
-            for x in prototypesIDs:
-                s=lcss.computeNormalized(partialObjPositions, objects[x].positions)
-                if s >= minSimilarity:
-                    similarity[x]=s
-    
-    newSimilarity={}
-    for i in similarity.keys():
-        if i in secondStepPrototypes.keys():
-            for j in secondStepPrototypes[i]:
-                newSimilarity[j]=similarity[i]
-    probabilities= calculateProbability(nMatching,newSimilarity,objects)        
-    return probabilities
-    
-def getPrototypeTrajectory(obj,route,currentInstant,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity=0.1,mostMatched=None,useDestination=True,useSpeedPrototype=True):
-    partialInterval=moving.Interval(obj.getFirstInstant(),currentInstant)
-    partialObjPositions= obj.getObjectInTimeInterval(partialInterval).positions    
-    if useSpeedPrototype:
-        prototypeTrajectories=findPrototypesSpeed(prototypes,secondStepPrototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination)
-    else:
-        prototypeTrajectories=findPrototypes(prototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched)
-    return prototypeTrajectories
-
-def computeCrossingsCollisionsAtInstant(predictionParams,currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, usePrototypes = False,route1= (-1,-1),route2=(-1,-1),prototypes={},secondStepPrototypes={},nMatching={},objects=[],noiseEntryNums=[],noiseExitNums=[],minSimilarity=0.1,mostMatched=None,useDestination=True,useSpeedPrototype=True):  
-    '''returns the lists of collision points and crossing zones'''
-    if usePrototypes:
-        prototypeTrajectories1 = getPrototypeTrajectory(obj1,route1,currentInstant,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)
-        prototypeTrajectories2 = getPrototypeTrajectory(obj2,route2,currentInstant,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)
-        predictedTrajectories1 = predictionParams.generatePredictedTrajectories(obj1, currentInstant,prototypeTrajectories1)
-        predictedTrajectories2 = predictionParams.generatePredictedTrajectories(obj2, currentInstant,prototypeTrajectories2)     
-    else:
-        predictedTrajectories1 = predictionParams.generatePredictedTrajectories(obj1, currentInstant)
-        predictedTrajectories2 = predictionParams.generatePredictedTrajectories(obj2, currentInstant)        
-
-    collisionPoints = []
-    crossingZones = []
-    for et1 in predictedTrajectories1:
-        for et2 in predictedTrajectories2:
-            collision, t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
-
-            if collision:
-                collisionPoints.append(SafetyPoint((p1+p2).multiply(0.5), et1.probability*et2.probability, t))
-            elif computeCZ: # check if there is a crossing zone
-                # TODO? zone should be around the points at which the traj are the closest
-                # look for CZ at different times, otherwise it would be a collision
-                # an approximation would be to look for close points at different times, ie the complementary of collision points
-                cz = None
-                t1 = 0
-                while not cz and t1 < timeHorizon: # t1 <= timeHorizon-1
-                    t2 = 0
-                    while not cz and t2 < timeHorizon:
-                        #if (et1.predictPosition(t1)-et2.predictPosition(t2)).norm2() < collisionDistanceThreshold:
-                        #    cz = (et1.predictPosition(t1)+et2.predictPosition(t2)).multiply(0.5)
-                        cz = moving.segmentIntersection(et1.predictPosition(t1), et1.predictPosition(t1+1), et2.predictPosition(t2), et2.predictPosition(t2+1))
-                        if cz is not None:
-                            deltaV= (et1.predictPosition(t1)- et1.predictPosition(t1+1) - et2.predictPosition(t2)+ et2.predictPosition(t2+1)).norm2()
-                            crossingZones.append(SafetyPoint(cz, et1.probability*et2.probability, abs(t1-t2)-(float(collisionDistanceThreshold)/deltaV)))
-                        t2 += 1
-                    t1 += 1                        
-
-    if debug:
-        savePredictedTrajectoriesFigure(currentInstant, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon)
-
-    return currentInstant, collisionPoints, crossingZones
-
-
-class PredictionParameters(object):
-    def __init__(self, name, maxSpeed):
-        self.name = name
-        self.maxSpeed = maxSpeed
-
-    def __str__(self):
-        return '{0} {1}'.format(self.name, self.maxSpeed)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        return []
-
-    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False,usePrototypes = False,route1= (-1,-1),route2=(-1,-1),prototypes={},secondStepPrototypes={},nMatching={},objects=[],noiseEntryNums=[],noiseExitNums=[],minSimilarity=0.1,mostMatched=None,useDestination=True,useSpeedPrototype=True):
-        return computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)
-
-    def computeCrossingsCollisions(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None, nProcesses = 1,usePrototypes = False,route1= (-1,-1),route2=(-1,-1),prototypes={},secondStepPrototypes={},nMatching={},objects=[],noiseEntryNums=[],noiseExitNums=[],minSimilarity=0.1,mostMatched=None,useDestination=True,useSpeedPrototype=True,acceptPartialLength=30, step=1):
-        #def computeCrossingsCollisions(predictionParams, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None,nProcesses = 1, usePrototypes = False,route1= (-1,-1),route2=(-1,-1),prototypes={},secondStepPrototypes={},nMatching={},objects=[],noiseEntryNums=[],noiseExitNums=[],minSimilarity=0.1,mostMatched=None,useDestination=True,useSpeedPrototype=True,acceptPartialLength=30, step=1):
-        '''Computes all crossing and collision points at each common instant for two road users. '''
-        collisionPoints={}
-        crossingZones={}
-        if timeInterval:
-            commonTimeInterval = timeInterval
-        else:
-            commonTimeInterval = obj1.commonTimeInterval(obj2)
-        if nProcesses == 1:
-            if usePrototypes:
-                firstInstant= next( (x for x in xrange(commonTimeInterval.first,commonTimeInterval.last) if x-obj1.getFirstInstant() >= acceptPartialLength and x-obj2.getFirstInstant() >= acceptPartialLength), commonTimeInterval.last)
-                commonTimeIntervalList1= range(firstInstant,commonTimeInterval.last-1) # do not look at the 1 last position/velocities, often with errors
-                commonTimeIntervalList2= range(firstInstant,commonTimeInterval.last-1,step) # do not look at the 1 last position/velocities, often with errors
-                for i in commonTimeIntervalList2: 
-                    i, cp, cz = self.computeCrossingsCollisionsAtInstant(i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)
-                    if len(cp) != 0:
-                        collisionPoints[i] = cp
-                    if len(cz) != 0:
-                        crossingZones[i] = cz
-                if collisionPoints!={} or crossingZones!={}:
-                    for i in commonTimeIntervalList1:
-                        if i not in commonTimeIntervalList2:
-                            i, cp, cz = self.computeCrossingsCollisionsAtInstant(i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)
-                            if len(cp) != 0:
-                                collisionPoints[i] = cp
-                            if len(cz) != 0:
-                                crossingZones[i] = cz                        
-            else:
-                for i in list(commonTimeInterval)[:-1]: # do not look at the 1 last position/velocities, often with errors
-                    i, cp, cz = self.computeCrossingsCollisionsAtInstant(i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)
-                    if len(cp) != 0:
-                        collisionPoints[i] = cp
-                    if len(cz) != 0:
-                        crossingZones[i] = cz
-        else:
-            pool = Pool(processes = nProcesses)
-            jobs = [pool.apply_async(computeCrossingsCollisionsAtInstant, args = (self, i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype)) for i in list(commonTimeInterval)[:-1]]
-            #results = [j.get() for j in jobs]
-            #results.sort()
-            for j in jobs:
-                i, cp, cz = j.get()
-                #if len(cp) != 0 or len(cz) != 0:
-                if len(cp) != 0:
-                    collisionPoints[i] = cp
-                if len(cz) != 0:
-                    crossingZones[i] = cz
-            pool.close()
-        return collisionPoints, crossingZones
-#return computeCrossingsCollisions(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug, timeInterval, nProcesses,usePrototypes,route1,route2,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination,useSpeedPrototype,acceptPartialLength, step)
-
-    def computeCollisionProbability(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, debug = False, timeInterval = None):
-        '''Computes only collision probabilities
-        Returns for each instant the collision probability and number of samples drawn'''
-        collisionProbabilities = {}
-        if timeInterval:
-            commonTimeInterval = timeInterval
-        else:
-            commonTimeInterval = obj1.commonTimeInterval(obj2)
-        for i in list(commonTimeInterval)[:-1]:
-            nCollisions = 0
-            predictedTrajectories1 = self.generatePredictedTrajectories(obj1, i)
-            predictedTrajectories2 = self.generatePredictedTrajectories(obj2, i)
-            for et1 in predictedTrajectories1:
-                for et2 in predictedTrajectories2:
-                    collision, t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
-                    if collision:
-                        nCollisions += 1
-            # take into account probabilities ??
-            nSamples = float(len(predictedTrajectories1)*len(predictedTrajectories2))
-            collisionProbabilities[i] = [nSamples, float(nCollisions)/nSamples]
-
-            if debug:
-                savePredictedTrajectoriesFigure(i, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon)
-
-        return collisionProbabilities
-
-class ConstantPredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed):
-        PredictionParameters.__init__(self, 'constant velocity', maxSpeed)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        return [PredictedTrajectoryConstant(obj.getPositionAtInstant(instant), obj.getVelocityAtInstant(instant), 
-                                               maxSpeed = self.maxSpeed)]
-
-class NormalAdaptationPredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed, nPredictedTrajectories, accelerationDistribution, steeringDistribution, useFeatures = False):
-        '''An example of acceleration and steering distributions is
-        lambda: random.triangular(-self.maxAcceleration, self.maxAcceleration, 0.)
-        '''
-        if useFeatures:
-            name = 'point set normal adaptation'
-        else:
-            name = 'normal adaptation'
-        PredictionParameters.__init__(self, name, maxSpeed)
-        self.nPredictedTrajectories = nPredictedTrajectories
-        self.useFeatures = useFeatures
-        self.accelerationDistribution = accelerationDistribution
-        self.steeringDistribution = steeringDistribution
-        
-    def __str__(self):
-        return PredictionParameters.__str__(self)+' {0} {1} {2}'.format(self.nPredictedTrajectories, 
-                                                                        self.maxAcceleration, 
-                                                                        self.maxSteering)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        predictedTrajectories = []
-        if self.useFeatures and obj.hadFeatures():
-            features = [f for f in obj.getFeatures() if f.existsAtInstant(instant)]
-            positions = [f.getPositionAtInstant(instant) for f in features]
-            velocities = [f.getVelocityAtInstant(instant) for f in features]
-        else:
-            positions = [obj.getPositionAtInstant(instant)]
-            velocities = [obj.getVelocityAtInstant(instant)]
-        for i in xrange(self.nPredictedTrajectories):
-            for initialPosition,initialVelocity in zip(positions, velocities):
-                predictedTrajectories.append(PredictedTrajectoryRandomControl(initialPosition, 
-                                                                              initialVelocity, 
-                                                                              self.accelerationDistribution, 
-                                                                              self.steeringDistribution, 
-                                                                              maxSpeed = self.maxSpeed))
-        return predictedTrajectories
-
-class PointSetPredictionParameters(PredictionParameters):
-    # todo generate several trajectories with normal adaptatoins from each position (feature)
-    def __init__(self, maxSpeed):
-        PredictionParameters.__init__(self, 'point set', maxSpeed)
-        #self.nPredictedTrajectories = nPredictedTrajectories
-    
-    def generatePredictedTrajectories(self, obj, instant):
-        predictedTrajectories = []
-        if obj.hasFeatures():
-            features = [f for f in obj.getFeatures() if f.existsAtInstant(instant)]
-            positions = [f.getPositionAtInstant(instant) for f in features]
-            velocities = [f.getVelocityAtInstant(instant) for f in features]
-            #for i in xrange(self.nPredictedTrajectories):
-            for initialPosition,initialVelocity in zip(positions, velocities):
-                predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, initialVelocity, 
-                                                                         maxSpeed = self.maxSpeed))
-            return predictedTrajectories
-        else:
-            print('Object {} has no features'.format(obj.getNum()))
-            return None
-
-class EvasiveActionPredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed, nPredictedTrajectories, accelerationDistribution, steeringDistribution, useFeatures = False):
-        '''Suggested acceleration distribution may not be symmetric, eg
-        lambda: random.triangular(self.minAcceleration, self.maxAcceleration, 0.)'''
-
-        if useFeatures:
-            name = 'point set evasive action'
-        else:
-            name = 'evasive action'
-        PredictionParameters.__init__(self, name, maxSpeed)
-        self.nPredictedTrajectories = nPredictedTrajectories
-        self.useFeatures = useFeatures
-        self.accelerationDistribution = accelerationDistribution
-        self.steeringDistribution = steeringDistribution
-
-    def __str__(self):
-        return PredictionParameters.__str__(self)+' {0} {1} {2} {3}'.format(self.nPredictedTrajectories, self.minAcceleration, self.maxAcceleration, self.maxSteering)
-
-    def generatePredictedTrajectories(self, obj, instant):
-        predictedTrajectories = []
-        if self.useFeatures and obj.hasFeatures():
-            features = [f for f in obj.getFeatures() if f.existsAtInstant(instant)]
-            positions = [f.getPositionAtInstant(instant) for f in features]
-            velocities = [f.getVelocityAtInstant(instant) for f in features]
-        else:
-            positions = [obj.getPositionAtInstant(instant)]
-            velocities = [obj.getVelocityAtInstant(instant)]
-        for i in xrange(self.nPredictedTrajectories):
-            for initialPosition,initialVelocity in zip(positions, velocities):
-                predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, 
-                                                                         initialVelocity, 
-                                                                         moving.NormAngle(self.accelerationDistribution(), 
-                                                                                          self.steeringDistribution()), 
-                                                                         maxSpeed = self.maxSpeed))
-        return predictedTrajectories
-
-
-class CVDirectPredictionParameters(PredictionParameters):
-    '''Prediction parameters of prediction at constant velocity
-    using direct computation of the intersecting point
-    Warning: the computed time to collision may be higher than timeHorizon (not used)'''
-    
-    def __init__(self):
-        PredictionParameters.__init__(self, 'constant velocity (direct computation)', None)
-
-    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, *kwargs):
-        collisionPoints = []
-        crossingZones = []
-
-        p1 = obj1.getPositionAtInstant(currentInstant)
-        p2 = obj2.getPositionAtInstant(currentInstant)
-        if (p1-p2).norm2() <= collisionDistanceThreshold:
-            collisionPoints = [SafetyPoint((p1+p1).multiply(0.5), 1., 0.)]
-        else:
-            v1 = obj1.getVelocityAtInstant(currentInstant)
-            v2 = obj2.getVelocityAtInstant(currentInstant)
-            intersection = moving.intersection(p1, p1+v1, p2, p2+v2)
-
-            if intersection is not None:
-                dp1 = intersection-p1
-                dp2 = intersection-p2
-                dot1 = moving.Point.dot(dp1, v1)
-                dot2 = moving.Point.dot(dp2, v2)
-                #print dot1, dot2
-                # (computeCZ and (dot1 > 0 or dot2 > 0)) or (
-                if (computeCZ and (dot1 > 0 or dot2 > 0)) or (dot1 > 0 and dot2 > 0): # if the road users are moving towards the intersection or if computing pPET
-                    dist1 = dp1.norm2()
-                    dist2 = dp2.norm2()
-                    s1 = math.copysign(v1.norm2(), dot1)
-                    s2 = math.copysign(v2.norm2(), dot2)
-                    halfCollisionDistanceThreshold = collisionDistanceThreshold/2.
-                    timeInterval1 = moving.TimeInterval(max(0,dist1-halfCollisionDistanceThreshold)/s1, (dist1+halfCollisionDistanceThreshold)/s1)
-                    timeInterval2 = moving.TimeInterval(max(0,dist2-halfCollisionDistanceThreshold)/s2, (dist2+halfCollisionDistanceThreshold)/s2)
-                    collisionTimeInterval = moving.TimeInterval.intersection(timeInterval1, timeInterval2)
-                    
-                    if collisionTimeInterval.empty():
-                        if computeCZ:
-                            crossingZones = [SafetyPoint(intersection, 1., timeInterval1.distance(timeInterval2))]
-                    else:
-                        collisionPoints = [SafetyPoint(intersection, 1., collisionTimeInterval.center())]
-    
-        if debug and intersection is not None:
-            from matplotlib.pyplot import plot, figure, axis, title
-            figure()
-            plot([p1.x, intersection.x], [p1.y, intersection.y], 'r')
-            plot([p2.x, intersection.x], [p2.y, intersection.y], 'b')
-            intersection.plot()            
-            obj1.plot('r')
-            obj2.plot('b')
-            title('instant {0}'.format(currentInstant))
-            axis('equal')
-
-        return currentInstant, collisionPoints, crossingZones
-
-class CVExactPredictionParameters(PredictionParameters):
-    '''Prediction parameters of prediction at constant velocity
-    using direct computation of the intersecting point (solving the equation)
-    Warning: the computed time to collision may be higher than timeHorizon (not used)'''
-    
-    def __init__(self):
-        PredictionParameters.__init__(self, 'constant velocity (direct exact computation)', None)
-
-    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, *kwargs):
-        'TODO add collision point coordinates, compute pPET'
-        #collisionPoints = []
-        #crossingZones = []
-
-        p1 = obj1.getPositionAtInstant(currentInstant)
-        p2 = obj2.getPositionAtInstant(currentInstant)
-        v1 = obj1.getVelocityAtInstant(currentInstant)
-        v2 = obj2.getVelocityAtInstant(currentInstant)
-        intersection = moving.intersection(p1, p1+v1, p2, p2+v2)
-
-        if intersection is not None:
-            ttc = moving.Point.timeToCollision(p1, p2, v1, v2, collisionDistanceThreshold)
-            if ttc:
-                return currentInstant, [SafetyPoint(intersection, 1., ttc)], [] # (p1+v1.multiply(ttc)+p2+v2.multiply(ttc)).multiply(0.5)
-            else:
-                return currentInstant, [],[]
-
-####
-# Other Methods
-####
-class PrototypePredictionParameters(PredictionParameters):
-    def __init__(self, maxSpeed, nPredictedTrajectories, constantSpeed = True):
-        name = 'prototype'
-        PredictionParameters.__init__(self, name, maxSpeed)
-        self.nPredictedTrajectories = nPredictedTrajectories
-        self.constantSpeed = constantSpeed
-        
-    def generatePredictedTrajectories(self, obj, instant,prototypeTrajectories):
-        predictedTrajectories = []
-        initialPosition = obj.getPositionAtInstant(instant)
-        initialVelocity = obj.getVelocityAtInstant(instant)
-        for prototypeTraj in prototypeTrajectories.keys():
-            predictedTrajectories.append(PredictedTrajectoryPrototype(initialPosition, initialVelocity, prototypeTraj, constantSpeed = self.constantSpeed, probability = prototypeTrajectories[prototypeTraj])) 
-        return predictedTrajectories
-
-if __name__ == "__main__":
-    import doctest
-    import unittest
-    suite = doctest.DocFileSuite('tests/prediction.txt')
-    #suite = doctest.DocTestSuite()
-    unittest.TextTestRunner().run(suite)
-    #doctest.testmod()
-    #doctest.testfile("example.txt")
-
--- a/python/processing.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-#! /usr/bin/env python
-'''Algorithms to process trajectories and moving objects'''
-
-import moving
-
-import numpy as np
-
-
-def extractSpeeds(objects, zone):
-    speeds = {}
-    objectsNotInZone = []
-    import matplotlib.nxutils as nx        
-    for o in objects:
-        inPolygon = nx.points_inside_poly(o.getPositions().asArray().T, zone.T)
-        if inPolygon.any():
-            objspeeds = [o.getVelocityAt(i).norm2() for i in xrange(int(o.length()-1)) if inPolygon[i]]
-            speeds[o.num] = np.mean(objspeeds) # km/h
-        else:
-            objectsNotInZone.append(o)
-    return speeds.values(), speeds, objectsNotInZone
--- a/python/requirements.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,11 +0,0 @@
-matplotlib
-numpy
-
-The following libraries are necessary for (sometimes very) specific classes/functions.
-
-CV functionalities (cvutils.py): opencv
-Image functionalities (cvutils.py): Python Image Library (new version is called Pillow)
-
-Machine learning (ml.py): scipy
-
-Moving object geometry (currently commented) (moving.py) and plotting shapely polygons (utils.py): shapely
--- a/python/run-tests.sh	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-#!/bin/sh
-# for file in tests/*... basename
-for f in ./*.py
-do
-    python $f
-done
-for f in ./tests/*.py
-do
-    python $f
-done
--- a/python/storage.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1257 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-'''Various utilities to save and load data'''
-
-import utils, moving, events, indicators, shutil
-from base import VideoFilenameAddable
-
-import sqlite3, logging
-from numpy import log, min as npmin, max as npmax, round as npround, array, sum as npsum, loadtxt
-from pandas import read_csv, merge
-
-
-commentChar = '#'
-
-delimiterChar = '%';
-
-ngsimUserTypes = {'twowheels':1,
-                  'car':2,
-                  'truck':3}
-
-#########################
-# Sqlite
-#########################
-
-# utils
-def printDBError(error):
-    print('DB Error: {}'.format(error))
-
-def dropTables(connection, tableNames):
-    'deletes the table with names in tableNames'
-    try:
-        cursor = connection.cursor()
-        for tableName in tableNames:
-            cursor.execute('DROP TABLE IF EXISTS '+tableName)
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-
-def tableExists(filename, tableName):
-    'indicates if the table exists in the database'
-    try:
-        connection = sqlite3.connect(filename)
-        cursor = connection.cursor()
-        cursor.execute('SELECT COUNT(*) FROM SQLITE_MASTER WHERE type = \'table\' AND name = \''+tableName+'\'')
-        return cursor.fetchone()[0] == 1
-    except sqlite3.OperationalError as error:
-        printDBError(error)        
-
-def createIndex(connection, tableName, columnName, unique = False):
-    '''Creates an index for the column in the table
-    I will make querying with a condition on this column faster'''
-    try:
-        #connection = sqlite3.connect(filename)
-        cursor = connection.cursor()
-        s = "CREATE "
-        if unique:
-            s += "UNIQUE "
-        cursor.execute(s+"INDEX IF NOT EXISTS "+tableName+"_"+columnName+"_index ON "+tableName+"("+columnName+")")
-        connection.commit()
-        #connection.close()
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-
-def getNumberRowsTable(connection, tableName, columnName = None):
-    '''Returns the number of rows for the table
-    If columnName is not None, means we want the number of distinct values for that column
-    (otherwise, we can just count(*))'''
-    try:
-        cursor = connection.cursor()
-        if columnName is None:
-            cursor.execute("SELECT COUNT(*) from "+tableName)
-        else:
-            cursor.execute("SELECT COUNT(DISTINCT "+columnName+") from "+tableName)
-        return cursor.fetchone()[0]
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-
-def getMinMax(connection, tableName, columnName, minmax):
-    '''Returns max/min or both for given column in table
-    minmax must be string max, min or minmax'''
-    try:
-        cursor = connection.cursor()
-        if minmax == 'min' or minmax == 'max':
-            cursor.execute("SELECT "+minmax+"("+columnName+") from "+tableName)
-        elif minmax == 'minmax':
-            cursor.execute("SELECT MIN("+columnName+"), MAX("+columnName+") from "+tableName)
-        else:
-            print("Argument minmax unknown: {}".format(minmax))
-        return cursor.fetchone()[0]
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-
-def loadPrototypeMatchIndexesFromSqlite(filename):
-    """
-    This function loads the prototypes table in the database of name <filename>.
-    It returns a list of tuples representing matching ids : [(prototype_id, matched_trajectory_id),...]
-    """
-    matched_indexes = []
-
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-
-    try:
-        cursor.execute('SELECT * from prototypes order by prototype_id, trajectory_id_matched')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-
-    for row in cursor:
-        matched_indexes.append((row[0],row[1]))
-
-    connection.close()
-    return matched_indexes
-
-def getObjectCriteria(objectNumbers):
-    if objectNumbers is None:
-        query = ''
-    elif type(objectNumbers) == int:
-        query = 'between 0 and {0}'.format(objectNumbers-1)
-    elif type(objectNumbers) == list:
-        query = 'in ('+', '.join([str(n) for n in objectNumbers])+')'
-    else:
-        print('objectNumbers {} are not a known type ({})'.format(objectNumbers, type(objectNumbers)))
-        query = ''
-    return query
-
-def loadTrajectoriesFromTable(connection, tableName, trajectoryType, objectNumbers = None):
-    '''Loads trajectories (in the general sense) from the given table
-    can be positions or velocities
-
-    returns a moving object'''
-    cursor = connection.cursor()
-
-    try:
-        objectCriteria = getObjectCriteria(objectNumbers)
-        queryStatement = None
-        if trajectoryType == 'feature':
-            queryStatement = 'SELECT * from '+tableName
-            if objectNumbers is not None:
-                queryStatement += ' WHERE trajectory_id '+objectCriteria
-            queryStatement += ' ORDER BY trajectory_id, frame_number'
-        elif trajectoryType == 'object':
-            queryStatement = 'SELECT OF.object_id, P.frame_number, avg(P.x_coordinate), avg(P.y_coordinate) from '+tableName+' P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id'
-            if objectNumbers is not None:
-                queryStatement += ' and OF.object_id '+objectCriteria
-            queryStatement += ' GROUP BY OF.object_id, P.frame_number ORDER BY OF.object_id, P.frame_number'
-        elif trajectoryType in ['bbtop', 'bbbottom']:
-            if trajectoryType == 'bbtop':
-                corner = 'top_left'
-            elif trajectoryType == 'bbbottom':
-                corner = 'bottom_right'
-            queryStatement = 'SELECT object_id, frame_number, x_'+corner+', y_'+corner+' FROM '+tableName
-            if objectNumbers is not None:
-                queryStatement += ' WHERE object_id '+objectCriteria
-            queryStatement += ' ORDER BY object_id, frame_number'
-        else:
-            print('Unknown trajectory type {}'.format(trajectoryType))
-        if queryStatement is not None:
-            cursor.execute(queryStatement)
-            logging.debug(queryStatement)
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-
-    objId = -1
-    obj = None
-    objects = []
-    for row in cursor:
-        if row[0] != objId:
-            objId = row[0]
-            if obj is not None and obj.length() == obj.positions.length():
-                objects.append(obj)
-            elif obj is not None:
-                print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length()))
-            obj = moving.MovingObject(row[0], timeInterval = moving.TimeInterval(row[1], row[1]), positions = moving.Trajectory([[row[2]],[row[3]]]))
-        else:
-            obj.timeInterval.last = row[1]
-            obj.positions.addPositionXY(row[2],row[3])
-
-    if obj is not None and obj.length() == obj.positions.length():
-        objects.append(obj)
-    elif obj is not None:
-        print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length()))
-
-    return objects
-
-def loadUserTypesFromTable(cursor, trajectoryType, objectNumbers):
-    objectCriteria = getObjectCriteria(objectNumbers)
-    queryStatement = 'SELECT object_id, road_user_type from objects'
-    if objectNumbers is not None:
-        queryStatement += ' WHERE object_id '+objectCriteria
-    cursor.execute(queryStatement)
-    userTypes = {}
-    for row in cursor:
-        userTypes[row[0]] = row[1]
-    return userTypes
-
-def loadTrajectoriesFromSqlite(filename, trajectoryType, objectNumbers = None, withFeatures = False):
-    '''Loads the trajectories (in the general sense, 
-    either features, objects (feature groups) or bounding box series) 
-    The number loaded is either the first objectNumbers objects,
-    or the indices in objectNumbers from the database'''
-    connection = sqlite3.connect(filename)
-
-    objects = loadTrajectoriesFromTable(connection, 'positions', trajectoryType, objectNumbers)
-    objectVelocities = loadTrajectoriesFromTable(connection, 'velocities', trajectoryType, objectNumbers)
-
-    if len(objectVelocities) > 0:
-        for o,v in zip(objects, objectVelocities):
-            if o.getNum() == v.getNum():
-                o.velocities = v.positions
-                o.velocities.duplicateLastPosition() # avoid having velocity shorter by one position than positions
-            else:
-                print('Could not match positions {0} with velocities {1}'.format(o.getNum(), v.getNum()))
-
-    if trajectoryType == 'object':
-        cursor = connection.cursor()
-        try:
-            # attribute feature numbers to objects
-            objectCriteria = getObjectCriteria(objectNumbers)
-            queryStatement = 'SELECT P.trajectory_id, OF.object_id from positions P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id'
-            if objectNumbers is not None:
-                queryStatement += ' and OF.object_id '+objectCriteria
-            queryStatement += ' group by P.trajectory_id order by OF.object_id' # order is important to group all features per object
-            cursor.execute(queryStatement) 
-            logging.debug(queryStatement)
-
-            featureNumbers = {}
-            for row in cursor:
-                objId = row[1]
-                if objId not in featureNumbers:
-                    featureNumbers[objId] = [row[0]]
-                else:
-                    featureNumbers[objId].append(row[0])
-                    
-            for obj in objects:
-                obj.featureNumbers = featureNumbers[obj.getNum()]
-
-            # load userType
-            userTypes = loadUserTypesFromTable(cursor, trajectoryType, objectNumbers)
-            for obj in objects:
-                obj.userType = userTypes[obj.getNum()]
-
-            if withFeatures:
-                nFeatures = 0
-                for obj in objects:
-                    nFeatures = max(nFeatures, max(obj.featureNumbers))
-                features = loadTrajectoriesFromSqlite(filename, 'feature', nFeatures+1)
-                for obj in objects:
-                    obj.setFeatures(features)
-             
-        except sqlite3.OperationalError as error:
-            printDBError(error)
-            objects = []
-
-    connection.close()
-    return objects
-
-def addCurvilinearTrajectoriesFromSqlite(filename, objects):
-    '''Adds curvilinear positions (s_coordinate, y_coordinate, lane)
-    from a database to an existing MovingObject dict (indexed by each objects's num)'''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-
-    try:
-        cursor.execute('SELECT * from curvilinear_positions order by trajectory_id, frame_number')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-    
-    missingObjectNumbers = []
-    objNum = None
-    for row in cursor:
-        if objNum != row[0]:
-            objNum = row[0]
-            if objNum in objects:
-                objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
-            else:
-                missingObjectNumbers.append(objNum)
-        if objNum in objects:
-            objects[objNum].curvilinearPositions.addPositionSYL(row[2],row[3],row[4])
-    if len(missingObjectNumbers) > 0:
-        print('List of missing objects to attach corresponding curvilinear trajectories: {}'.format(missingObjectNumbers))
-
-def saveTrajectoriesToSqlite(outputFilename, objects, trajectoryType, withFeatures = False):
-    '''Writes features, ie the trajectories positions (and velocities if exist)
-    with their instants to a specified sqlite file
-    Either feature positions (and velocities if they exist)
-    or curvilinear positions will be saved at a time
-
-    TODO: Not implemented for trajectoryType MovingObject with features
-    For objects, with features will control whether the features
-    corresponding to the object are also saved'''
-
-    connection = sqlite3.connect(outputFilename)
-    try:
-        cursor = connection.cursor()
-
-        if trajectoryType == 'feature':
-            cursor.execute("CREATE TABLE IF NOT EXISTS positions (trajectory_id INTEGER, frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))")
-            cursor.execute("CREATE TABLE IF NOT EXISTS velocities (trajectory_id INTEGER, frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))")
-
-            positionQuery = "insert into positions (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?,?,?,?)"
-            velocityQuery = "insert into velocities (trajectory_id, frame_number, x_coordinate, y_coordinate) values (?,?,?,?)"
-            for obj in objects:
-                num = obj.getNum()
-                frame_number = obj.getFirstInstant()
-                for position in obj.getPositions():
-                    cursor.execute(positionQuery, (num, frame_number, position.x, position.y))
-                    frame_number += 1
-                # velocities
-                velocities = obj.getVelocities()
-                if velocities is not None:
-                    frame_number = obj.getFirstInstant()
-                    for i in xrange(velocities.length()-1):
-                        v = velocities[i]
-                        cursor.execute(velocityQuery, (num, frame_number, v.x, v.y))
-                        frame_number += 1
-        elif trajectoryType == 'curvilinear':
-            cursor.execute("CREATE TABLE IF NOT EXISTS curvilinear_positions (trajectory_id INTEGER, frame_number INTEGER, s_coordinate REAL, y_coordinate REAL, lane TEXT, PRIMARY KEY(trajectory_id, frame_number))")
-            curvilinearQuery = "insert into curvilinear_positions (trajectory_id, frame_number, s_coordinate, y_coordinate, lane) values (?,?,?,?,?)"
-            for obj in objects:
-                num = obj.getNum()
-                frame_number = obj.getFirstInstant()
-                for position in obj.getCurvilinearPositions():
-                    cursor.execute(curvilinearQuery, (num, frame_number, position[0], position[1], position[2]))
-                    frame_number += 1
-        #elif trajectoryType == 'object':
-        else:
-            print('Unknown trajectory type {}'.format(trajectoryType))
-        connection.commit()
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.close()
-
-def savePrototypesToSqlite(filename, prototypes, trajectoryType = 'feature'):
-    'Work in progress, do not use'
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    try:
-        cursor.execute('CREATE TABLE IF NOT EXISTS prototypes (id INTEGER PRIMARY KEY, object_id INTEGER, trajectory_id INTEGER, nMatchings INTEGER, FOREIGN KEY(object_id) REFERENCES objects(id), FOREIGN KEY(trajectory_id) REFERENCES positions(trajectory_id))')
-        #for inter in interactions:
-        #    saveInteraction(cursor, inter)
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.commit()
-    connection.close()
-
-def loadPrototypesFromSqlite(filename):
-    pass
-
-def loadBBMovingObjectsFromSqlite(filename, objectType = 'bb', objectNumbers = None):
-    '''Loads bounding box moving object from an SQLite
-    (format of SQLite output by the ground truth annotation tool
-    or Urban Tracker
-
-    Load descriptions?'''
-    connection = sqlite3.connect(filename)
-    objects = []
-
-    if objectType == 'bb':
-        topCorners = loadTrajectoriesFromTable(connection, 'bounding_boxes', 'bbtop', objectNumbers)
-        bottomCorners = loadTrajectoriesFromTable(connection, 'bounding_boxes', 'bbbottom', objectNumbers)
-        userTypes = loadUserTypesFromTable(connection.cursor(), 'object', objectNumbers) # string format is same as object
-        
-        for t, b in zip(topCorners, bottomCorners):
-            num = t.getNum()
-            if t.getNum() == b.getNum():
-                annotation = moving.BBMovingObject(num, t.getTimeInterval(), t, b, userTypes[num])
-                objects.append(annotation)
-    else:
-        print ('Unknown type of bounding box {}'.format(objectType))
-
-    connection.close()
-    return objects
-
-def deleteFromSqlite(filename, dataType):
-    'Deletes (drops) some tables in the filename depending on type of data'
-    import os
-    if os.path.isfile(filename):
-        connection = sqlite3.connect(filename)
-        if dataType == 'object':
-            dropTables(connection, ['objects', 'objects_features'])
-        elif dataType == 'interaction':
-            dropTables(connection, ['interactions', 'indicators'])
-        elif dataType == 'bb':
-            dropTables(connection, ['bounding_boxes'])
-        elif dataType == 'pois':
-            dropTables(connection, ['gaussians2d'])
-        else:
-            print('Unknown data type {} to delete from database'.format(dataType))
-        connection.close()
-    else:
-        print('{} does not exist'.format(filename))
-
-def createInteractionTable(cursor):
-    cursor.execute('CREATE TABLE IF NOT EXISTS interactions (id INTEGER PRIMARY KEY, object_id1 INTEGER, object_id2 INTEGER, first_frame_number INTEGER, last_frame_number INTEGER, FOREIGN KEY(object_id1) REFERENCES objects(id), FOREIGN KEY(object_id2) REFERENCES objects(id))')
-
-def createIndicatorTables(cursor):
-    # cursor.execute('CREATE TABLE IF NOT EXISTS indicators (id INTEGER PRIMARY KEY, interaction_id INTEGER, indicator_type INTEGER, FOREIGN KEY(interaction_id) REFERENCES interactions(id))')
-    # cursor.execute('CREATE TABLE IF NOT EXISTS indicator_values (indicator_id INTEGER, frame_number INTEGER, value REAL, FOREIGN KEY(indicator_id) REFERENCES indicators(id), PRIMARY KEY(indicator_id, frame_number))')
-    cursor.execute('CREATE TABLE IF NOT EXISTS indicators (interaction_id INTEGER, indicator_type INTEGER, frame_number INTEGER, value REAL, FOREIGN KEY(interaction_id) REFERENCES interactions(id), PRIMARY KEY(interaction_id, indicator_type, frame_number))')
-
-def saveInteraction(cursor, interaction):
-    roadUserNumbers = list(interaction.getRoadUserNumbers())
-    cursor.execute('INSERT INTO interactions VALUES({}, {}, {}, {}, {})'.format(interaction.getNum(), roadUserNumbers[0], roadUserNumbers[1], interaction.getFirstInstant(), interaction.getLastInstant()))
-
-def saveInteractions(filename, interactions):
-    'Saves the interactions in the table'
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    try:
-        createInteractionTable(cursor)
-        for inter in interactions:
-            saveInteraction(cursor, inter)
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.commit()
-    connection.close()
-
-def saveIndicator(cursor, interactionNum, indicator):
-    for instant in indicator.getTimeInterval():
-        if indicator[instant]:
-            cursor.execute('INSERT INTO indicators VALUES({}, {}, {}, {})'.format(interactionNum, events.Interaction.indicatorNameToIndices[indicator.getName()], instant, indicator[instant]))
-
-def saveIndicators(filename, interactions, indicatorNames = events.Interaction.indicatorNames):
-    'Saves the indicator values in the table'
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    try:
-        createInteractionTable(cursor)
-        createIndicatorTables(cursor)
-        for inter in interactions:
-            saveInteraction(cursor, inter)
-            for indicatorName in indicatorNames:
-                indicator = inter.getIndicator(indicatorName)
-                if indicator is not None:
-                    saveIndicator(cursor, inter.getNum(), indicator)
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.commit()
-    connection.close()
-
-def loadInteractions(filename):
-    '''Loads interaction and their indicators
-    
-    TODO choose the interactions to load'''
-    interactions = []
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    try:
-        cursor.execute('select INT.id, INT.object_id1, INT.object_id2, INT.first_frame_number, INT.last_frame_number, IND.indicator_type, IND.frame_number, IND.value from interactions INT, indicators IND WHERE INT.id = IND.interaction_id ORDER BY INT.id, IND.indicator_type, IND.frame_number')
-        interactionNum = -1
-        indicatorTypeNum = -1
-        tmpIndicators = {}
-        for row in cursor:
-            if row[0] != interactionNum:
-                interactionNum = row[0]
-                interactions.append(events.Interaction(interactionNum, moving.TimeInterval(row[3],row[4]), row[1], row[2]))
-                interactions[-1].indicators = {}
-            if indicatorTypeNum != row[5] or row[0] != interactionNum:
-                indicatorTypeNum = row[5]
-                indicatorName = events.Interaction.indicatorNames[indicatorTypeNum]
-                indicatorValues = {row[6]:row[7]}
-                interactions[-1].indicators[indicatorName] = indicators.SeverityIndicator(indicatorName, indicatorValues, mostSevereIsMax = not indicatorName in events.Interaction.timeIndicators)
-            else:
-                indicatorValues[row[6]] = row[7]
-                interactions[-1].indicators[indicatorName].timeInterval.last = row[6]
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-    connection.close()
-    return interactions
-# load first and last object instants
-# CREATE TEMP TABLE IF NOT EXISTS object_instants AS SELECT OF.object_id, min(frame_number) as first_instant, max(frame_number) as last_instant from positions P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id group by OF.object_id order by OF.object_id
-
-def createBoundingBoxTable(filename, invHomography = None):
-    '''Create the table to store the object bounding boxes in image space
-    '''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    try:
-        cursor.execute('CREATE TABLE IF NOT EXISTS bounding_boxes (object_id INTEGER, frame_number INTEGER, x_top_left REAL, y_top_left REAL, x_bottom_right REAL, y_bottom_right REAL,  PRIMARY KEY(object_id, frame_number))')
-        cursor.execute('INSERT INTO bounding_boxes SELECT object_id, frame_number, min(x), min(y), max(x), max(y) from '
-              '(SELECT object_id, frame_number, (x*{}+y*{}+{})/w as x, (x*{}+y*{}+{})/w as y from '
-              '(SELECT OF.object_id, P.frame_number, P.x_coordinate as x, P.y_coordinate as y, P.x_coordinate*{}+P.y_coordinate*{}+{} as w from positions P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id)) '.format(invHomography[0,0], invHomography[0,1], invHomography[0,2], invHomography[1,0], invHomography[1,1], invHomography[1,2], invHomography[2,0], invHomography[2,1], invHomography[2,2])+
-              'GROUP BY object_id, frame_number')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.commit()
-    connection.close()
-
-def loadBoundingBoxTableForDisplay(filename):
-    '''Loads bounding boxes from bounding_boxes table for display over trajectories'''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    boundingBoxes = {} # list of bounding boxes for each instant
-    try:
-        cursor.execute('SELECT name FROM sqlite_master WHERE type=\'table\' AND name=\'bounding_boxes\'')
-        result = cursor.fetchall()
-        if len(result) > 0:
-            cursor.execute('SELECT * FROM bounding_boxes')
-            for row in cursor:
-                boundingBoxes.setdefault(row[1], []).append([moving.Point(row[2], row[3]), moving.Point(row[4], row[5])])
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return boundingBoxes
-    connection.close()
-    return boundingBoxes
-
-#########################
-# saving and loading for scene interpretation
-#########################
-
-def savePOIs(filename, gmm, gmmType, gmmId):
-    '''Saves a Gaussian mixture model (of class sklearn.mixture.GMM)
-    gmmType is a type of GMM, learnt either from beginnings or ends of trajectories'''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    if gmmType not in ['beginning', 'end']:
-        print('Unknown POI type {}. Exiting'.format(gmmType))
-        import sys
-        sys.exit()
-    try:
-        cursor.execute('CREATE TABLE IF NOT EXISTS gaussians2d (id INTEGER, type VARCHAR, x_center REAL, y_center REAL, covar00 REAL, covar01 REAL, covar10 REAL, covar11 REAL, covariance_type VARCHAR, weight, mixture_id INTEGER, PRIMARY KEY(id, mixture_id))')
-        for i in xrange(gmm.n_components):
-            cursor.execute('INSERT INTO gaussians2d VALUES({}, \'{}\', {}, {}, {}, {}, {}, {}, \'{}\', {}, {})'.format(i, gmmType, gmm.means_[i][0], gmm.means_[i][1], gmm.covars_[i][0,0], gmm.covars_[i][0,1], gmm.covars_[i][1,0], gmm.covars_[i][1,1], gmm.covariance_type, gmm.weights_[i], gmmId))
-        connection.commit()
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.close()
-
-def loadPOIs(filename):
-    'Loads all 2D Gaussians in the database'
-    from sklearn import mixture # todo if not avalaible, load data in duck-typed class with same fields
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    pois = []
-    try:
-        cursor.execute('SELECT * from gaussians2d')
-        gmmId = None
-        gmm = []
-        for row in cursor:
-            if gmmId is None or row[10] != gmmId:
-                if len(gmm) > 0:
-                    tmp = mixture.GMM(len(gmm), covarianceType)
-                    tmp.means_ = array([gaussian['mean'] for gaussian in gmm])
-                    tmp.covars_ = array([gaussian['covar'] for gaussian in gmm])
-                    tmp.weights_ = array([gaussian['weight'] for gaussian in gmm])
-                    tmp.gmmTypes = [gaussian['type'] for gaussian in gmm]
-                    pois.append(tmp)
-                gaussian = {'type': row[1],
-                            'mean': row[2:4],
-                            'covar': array(row[4:8]).reshape(2,2),
-                            'weight': row[9]}
-                gmm = [gaussian]
-                covarianceType = row[8]
-                gmmId = row[10]
-            else:
-                gmm.append({'type': row[1],
-                            'mean': row[2:4],
-                            'covar': array(row[4:8]).reshape(2,2),
-                            'weight': row[9]})
-        if len(gmm) > 0:
-            tmp = mixture.GMM(len(gmm), covarianceType)
-            tmp.means_ = array([gaussian['mean'] for gaussian in gmm])
-            tmp.covars_ = array([gaussian['covar'] for gaussian in gmm])
-            tmp.weights_ = array([gaussian['weight'] for gaussian in gmm])
-            tmp.gmmTypes = [gaussian['type'] for gaussian in gmm]
-            pois.append(tmp)
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-    connection.close()
-    return pois
-    
-#########################
-# saving and loading for scene interpretation (Mohamed Gomaa Mohamed's PhD)
-#########################
-
-def writePrototypesToSqlite(prototypes,nMatching, outputFilename):
-    """ prototype dataset is a dictionary with  keys== routes, values== prototypes Ids """
-    connection = sqlite3.connect(outputFilename)
-    cursor = connection.cursor()
-
-    cursor.execute("CREATE TABLE IF NOT EXISTS prototypes (prototype_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, nMatching INTEGER, PRIMARY KEY(prototype_id))")
-    
-    for route in prototypes.keys():
-        if prototypes[route]!=[]:
-            for i in prototypes[route]:
-                cursor.execute("insert into prototypes (prototype_id, routeIDstart,routeIDend, nMatching) values (?,?,?,?)",(i,route[0],route[1],nMatching[route][i]))
-                    
-    connection.commit()
-    connection.close()
-    
-def readPrototypesFromSqlite(filename):
-    """
-    This function loads the prototype file in the database 
-    It returns a dictionary for prototypes for each route and nMatching
-    """
-    prototypes = {}
-    nMatching={}
-
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-
-    try:
-        cursor.execute('SELECT * from prototypes order by prototype_id, routeIDstart,routeIDend, nMatching')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-
-    for row in cursor:
-        route=(row[1],row[2])
-        if route not in prototypes.keys():
-            prototypes[route]=[]
-        prototypes[route].append(row[0])
-        nMatching[row[0]]=row[3]
-
-    connection.close()
-    return prototypes,nMatching
-    
-def writeLabelsToSqlite(labels, outputFilename):
-    """ labels is a dictionary with  keys: routes, values: prototypes Ids
-    """
-    connection = sqlite3.connect(outputFilename)
-    cursor = connection.cursor()
-
-    cursor.execute("CREATE TABLE IF NOT EXISTS labels (object_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, prototype_id INTEGER, PRIMARY KEY(object_id))")
-    
-    for route in labels.keys():
-        if labels[route]!=[]:
-            for i in labels[route]:
-                for j in labels[route][i]:
-                    cursor.execute("insert into labels (object_id, routeIDstart,routeIDend, prototype_id) values (?,?,?,?)",(j,route[0],route[1],i))
-                    
-    connection.commit()
-    connection.close()
-    
-def loadLabelsFromSqlite(filename):
-    labels = {}
-
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-
-    try:
-        cursor.execute('SELECT * from labels order by object_id, routeIDstart,routeIDend, prototype_id')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-
-    for row in cursor:
-        route=(row[1],row[2])
-        p=row[3]
-        if route not in labels.keys():
-            labels[route]={}
-        if p not in labels[route].keys():
-            labels[route][p]=[]
-        labels[route][p].append(row[0])
-
-    connection.close()
-    return labels
-
-def writeSpeedPrototypeToSqlite(prototypes,nmatching, outFilename):
-    """ to match the format of second layer prototypes"""
-    connection = sqlite3.connect(outFilename)
-    cursor = connection.cursor()
-
-    cursor.execute("CREATE TABLE IF NOT EXISTS speedprototypes (spdprototype_id INTEGER,prototype_id INTEGER,routeID_start INTEGER, routeID_end INTEGER, nMatching INTEGER, PRIMARY KEY(spdprototype_id))")
-    
-    for route in prototypes.keys():
-        if prototypes[route]!={}:
-            for i in prototypes[route]:
-                if prototypes[route][i]!= []:
-                    for j in prototypes[route][i]:
-                        cursor.execute("insert into speedprototypes (spdprototype_id,prototype_id, routeID_start, routeID_end, nMatching) values (?,?,?,?,?)",(j,i,route[0],route[1],nmatching[j]))
-                    
-    connection.commit()
-    connection.close()
-    
-def loadSpeedPrototypeFromSqlite(filename):
-    """
-    This function loads the prototypes table in the database of name <filename>.
-    """
-    prototypes = {}
-    nMatching={}
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-
-    try:
-        cursor.execute('SELECT * from speedprototypes order by spdprototype_id,prototype_id, routeID_start, routeID_end, nMatching')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-
-    for row in cursor:
-        route=(row[2],row[3])
-        if route not in prototypes.keys():
-            prototypes[route]={}
-        if row[1] not in prototypes[route].keys():
-            prototypes[route][row[1]]=[]
-        prototypes[route][row[1]].append(row[0])
-        nMatching[row[0]]=row[4]
-
-    connection.close()
-    return prototypes,nMatching
-
-
-def writeRoutesToSqlite(Routes, outputFilename):
-    """ This function writes the activity path define by start and end IDs"""
-    connection = sqlite3.connect(outputFilename)
-    cursor = connection.cursor()
-
-    cursor.execute("CREATE TABLE IF NOT EXISTS routes (object_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, PRIMARY KEY(object_id))")
-    
-    for route in Routes.keys():
-        if Routes[route]!=[]:
-            for i in Routes[route]:
-                cursor.execute("insert into routes (object_id, routeIDstart,routeIDend) values (?,?,?)",(i,route[0],route[1]))
-                    
-    connection.commit()
-    connection.close()
-    
-def loadRoutesFromSqlite(filename):
-    Routes = {}
-
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-
-    try:
-        cursor.execute('SELECT * from routes order by object_id, routeIDstart,routeIDend')
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-        return []
-
-    for row in cursor:
-        route=(row[1],row[2])
-        if route not in Routes.keys():
-            Routes[route]=[]
-        Routes[route].append(row[0])
-
-    connection.close()
-    return Routes
-
-def setRoutes(filename, objects):
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    for obj in objects:
-        cursor.execute('update objects set startRouteID = {} WHERE object_id = {}'.format(obj.startRouteID, obj.getNum()))
-        cursor.execute('update objects set endRouteID = {} WHERE object_id = {}'.format(obj.endRouteID, obj.getNum()))        
-    connection.commit()
-    connection.close()
-
-def setRoadUserTypes(filename, objects):
-    '''Saves the user types of the objects in the sqlite database stored in filename
-    The objects should exist in the objects table'''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    for obj in objects:
-        cursor.execute('update objects set road_user_type = {} WHERE object_id = {}'.format(obj.getUserType(), obj.getNum()))
-    connection.commit()
-    connection.close()
-
-#########################
-# txt files
-#########################
-
-def openCheck(filename, option = 'r', quitting = False):
-    '''Open file filename in read mode by default
-    and checks it is open'''
-    try:
-        return open(filename, option)
-    except IOError:
-        print 'File %s could not be opened.' % filename
-        if quitting:
-            from sys import exit
-            exit()
-        return None
-
-def readline(f, commentCharacters = commentChar):
-    '''Modified readline function to skip comments
-    Can take a list of characters or a string (in will work in both)'''
-    s = f.readline()
-    while (len(s) > 0) and s[0] in commentCharacters:
-        s = f.readline()
-    return s.strip()
-
-def getLines(f, delimiterChar = delimiterChar, commentCharacters = commentChar):
-    '''Gets a complete entry (all the lines) in between delimiterChar.'''
-    dataStrings = []
-    s = readline(f, commentCharacters)
-    while len(s) > 0 and s[0] != delimiterChar:
-        dataStrings += [s.strip()]
-        s = readline(f, commentCharacters)
-    return dataStrings
-
-def writeList(filename, l):
-    f = openCheck(filename, 'w')
-    for x in l:
-        f.write('{}\n'.format(x))
-    f.close()
-
-def loadListStrings(filename, commentCharacters = commentChar):
-    f = openCheck(filename, 'r')
-    result = getLines(f, commentCharacters)
-    f.close()
-    return result
-
-def getValuesFromINIFile(filename, option, delimiterChar = '=', commentCharacters = commentChar):
-    values = []
-    for l in loadListStrings(filename, commentCharacters):
-        if l.startswith(option):
-            values.append(l.split(delimiterChar)[1].strip())
-    return values
-
-class FakeSecHead(object):
-    '''Add fake section header [asection]
-
-    from http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/2819788#2819788
-    use read_file in Python 3.2+
-    '''
-    def __init__(self, fp):
-        self.fp = fp
-        self.sechead = '[main]\n'
-
-    def readline(self):
-        if self.sechead:
-            try: return self.sechead
-            finally: self.sechead = None
-        else: return self.fp.readline()
-
-def generatePDLaneColumn(data):
-    data['LANE'] = data['LANE\LINK\NO'].astype(str)+'_'+data['LANE\INDEX'].astype(str)
-
-def convertTrajectoriesVissimToSqlite(filename):
-    '''Relies on a system call to sqlite3
-    sqlite3 [file.sqlite] < import_fzp.sql'''
-    sqlScriptFilename = "import_fzp.sql"
-    # create sql file
-    out = openCheck(sqlScriptFilename, "w")
-    out.write(".separator \";\"\n"+
-              "CREATE TABLE IF NOT EXISTS curvilinear_positions (t REAL, trajectory_id INTEGER, link_id INTEGER, lane_id INTEGER, s_coordinate REAL, y_coordinate REAL, speed REAL, PRIMARY KEY (t, trajectory_id));\n"+
-              ".import "+filename+" curvilinear_positions\n"+
-              "DELETE FROM curvilinear_positions WHERE trajectory_id IS NULL OR trajectory_id = \"NO\";\n")
-    out.close()
-    # system call
-    from subprocess import check_call
-    out = openCheck("err.log", "w")
-    check_call("sqlite3 "+utils.removeExtension(filename)+".sqlite < "+sqlScriptFilename, stderr = out, shell = True)
-    out.close()
-    shutil.os.remove(sqlScriptFilename)
-
-def loadObjectNumbersInLinkFromVissimFile(filename, linkIds):
-    '''Finds the ids of the objects that go through any of the link in the list linkIds'''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    queryStatement = 'SELECT DISTINCT trajectory_id FROM curvilinear_positions where link_id IN ('+','.join([str(id) for id in linkIds])+')'
-    try:
-        cursor.execute(queryStatement)
-        return [row[0] for row in cursor]
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-
-def getNObjectsInLinkFromVissimFile(filename, linkIds):
-    '''Returns the number of objects that traveled through the link ids'''
-    connection = sqlite3.connect(filename)
-    cursor = connection.cursor()
-    queryStatement = 'SELECT link_id, COUNT(DISTINCT trajectory_id) FROM curvilinear_positions where link_id IN ('+','.join([str(id) for id in linkIds])+') GROUP BY link_id'
-    try:
-        cursor.execute(queryStatement)
-        return {row[0]:row[1] for row in cursor}
-    except sqlite3.OperationalError as error:
-        printDBError(error)
-
-def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, objectNumbers = None, warmUpLastInstant = None, usePandas = False, nDecimals = 2, lowMemory = True):
-    '''Reads data from VISSIM .fzp trajectory file
-    simulationStepsPerTimeUnit is the number of simulation steps per unit of time used by VISSIM (second)
-    for example, there seems to be 10 simulation steps per simulated second in VISSIM, 
-    so simulationStepsPerTimeUnit should be 10, 
-    so that all times correspond to the number of the simulation step (and can be stored as integers)
-    
-    Objects positions will be considered only after warmUpLastInstant 
-    (if the object has no such position, it won't be loaded)
-
-    Assumed to be sorted over time
-    Warning: if reading from SQLite a limited number of objects, objectNumbers will be the maximum object id'''
-    objects = {} # dictionary of objects index by their id
-
-    if usePandas:
-        data = read_csv(filename, delimiter=';', comment='*', header=0, skiprows = 1, low_memory = lowMemory)
-        generatePDLaneColumn(data)
-        data['TIME'] = data['$VEHICLE:SIMSEC']*simulationStepsPerTimeUnit
-        if warmUpLastInstant is not None:
-            data = data[data['TIME']>=warmUpLastInstant]
-        grouped = data.loc[:,['NO','TIME']].groupby(['NO'], as_index = False)
-        instants = grouped['TIME'].agg({'first': npmin, 'last': npmax})
-        for row_index, row in instants.iterrows():
-            objNum = int(row['NO'])
-            tmp = data[data['NO'] == objNum]
-            objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(row['first'], row['last']))
-            # positions should be rounded to nDecimals decimals only
-            objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory(S = npround(tmp['POS'].tolist(), nDecimals), Y = npround(tmp['POSLAT'].tolist(), nDecimals), lanes = tmp['LANE'].tolist())
-            if objectNumbers is not None and objectNumbers > 0 and len(objects) >= objectNumbers:
-                objects.values()
-    else:
-        if filename.endswith(".fzp"):
-            inputfile = openCheck(filename, quitting = True)
-            line = readline(inputfile, '*$')
-            while len(line) > 0:#for line in inputfile:
-                data = line.strip().split(';')
-                objNum = int(data[1])
-                instant = float(data[0])*simulationStepsPerTimeUnit
-                s = float(data[4])
-                y = float(data[5])
-                lane = data[2]+'_'+data[3]
-                if objNum not in objects:
-                    if warmUpLastInstant is None or instant >= warmUpLastInstant:
-                        if objectNumbers is None or len(objects) < objectNumbers:
-                            objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant))
-                            objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
-                if (warmUpLastInstant is None or instant >= warmUpLastInstant) and objNum in objects:
-                    objects[objNum].timeInterval.last = instant
-                    objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane)
-                line = readline(inputfile, '*$')
-        elif filename.endswith(".sqlite"):
-            connection = sqlite3.connect(filename)
-            cursor = connection.cursor()
-            queryStatement = 'SELECT t, trajectory_id, link_id, lane_id, s_coordinate, y_coordinate FROM curvilinear_positions'
-            if objectNumbers is not None:
-                queryStatement += ' WHERE trajectory_id '+getObjectCriteria(objectNumbers)
-            queryStatement += ' ORDER BY trajectory_id, t'
-            try:
-                cursor.execute(queryStatement)
-                for row in cursor:
-                    objNum = row[1]
-                    instant = row[0]*simulationStepsPerTimeUnit
-                    s = row[4]
-                    y = row[5]
-                    lane = '{}_{}'.format(row[2], row[3])
-                    if objNum not in objects:
-                        if warmUpLastInstant is None or instant >= warmUpLastInstant:
-                            if objectNumbers is None or len(objects) < objectNumbers:
-                                objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant))
-                                objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
-                    if (warmUpLastInstant is None or instant >= warmUpLastInstant) and objNum in objects:
-                        objects[objNum].timeInterval.last = instant
-                        objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane)
-            except sqlite3.OperationalError as error:
-                printDBError(error)
-        else:
-            print("File type of "+filename+" not supported (only .sqlite and .fzp files)")
-        return objects.values()
-
-def selectPDLanes(data, lanes = None):
-    '''Selects the subset of data for the right lanes
-
-    Lane format is a string 'x_y' where x is link index and y is lane index'''
-    if lanes is not None:
-        if 'LANE' not in data.columns:
-            generatePDLaneColumn(data)
-        indices = (data['LANE'] == lanes[0])
-        for l in lanes[1:]:
-            indices = indices | (data['LANE'] == l)
-        return data[indices]
-    else:
-        return data
-
-def countStoppedVehiclesVissim(filename, lanes = None, proportionStationaryTime = 0.7):
-    '''Counts the number of vehicles stopped for a long time in a VISSIM trajectory file
-    and the total number of vehicles
-
-    Vehicles are considered finally stationary
-    if more than proportionStationaryTime of their total time
-    If lanes is not None, only the data for the selected lanes will be provided
-    (format as string x_y where x is link index and y is lane index)'''
-    if filename.endswith(".fzp"):
-        columns = ['NO', '$VEHICLE:SIMSEC', 'POS']
-        if lanes is not None:
-            columns += ['LANE\LINK\NO', 'LANE\INDEX']
-        data = read_csv(filename, delimiter=';', comment='*', header=0, skiprows = 1, usecols = columns, low_memory = lowMemory)
-        data = selectPDLanes(data, lanes)
-        data.sort(['$VEHICLE:SIMSEC'], inplace = True)
-
-        nStationary = 0
-        nVehicles = 0
-        for name, group in data.groupby(['NO'], sort = False):
-            nVehicles += 1
-            positions = array(group['POS'])
-            diff = positions[1:]-positions[:-1]
-            if npsum(diff == 0.) >= proportionStationaryTime*(len(positions)-1):
-                nStationary += 1
-    elif filename.endswith(".sqlite"):
-        # select trajectory_id, t, s_coordinate, speed from curvilinear_positions where trajectory_id between 1860 and 1870 and speed < 0.1
-        # pb of the meaning of proportionStationaryTime in arterial network? Why proportion of existence time?
-        pass
-    else:
-        print("File type of "+filename+" not supported (only .sqlite and .fzp files)")
-
-    return nStationary, nVehicles
-
-def countCollisionsVissim(filename, lanes = None, collisionTimeDifference = 0.2, lowMemory = True):
-    '''Counts the number of collisions per lane in a VISSIM trajectory file
-
-    To distinguish between cars passing and collision, 
-    one checks when the sign of the position difference inverts
-    (if the time are closer than collisionTimeDifference)
-    If lanes is not None, only the data for the selected lanes will be provided
-    (format as string x_y where x is link index and y is lane index)'''
-    data = read_csv(filename, delimiter=';', comment='*', header=0, skiprows = 1, usecols = ['LANE\LINK\NO', 'LANE\INDEX', '$VEHICLE:SIMSEC', 'NO', 'POS'], low_memory = lowMemory)
-    data = selectPDLanes(data, lanes)
-    data = data.convert_objects(convert_numeric=True)
-
-    merged = merge(data, data, how='inner', left_on=['LANE\LINK\NO', 'LANE\INDEX', '$VEHICLE:SIMSEC'], right_on=['LANE\LINK\NO', 'LANE\INDEX', '$VEHICLE:SIMSEC'], sort = False)
-    merged = merged[merged['NO_x']>merged['NO_y']]
-
-    nCollisions = 0
-    for name, group in merged.groupby(['LANE\LINK\NO', 'LANE\INDEX', 'NO_x', 'NO_y']):
-        diff = group['POS_x']-group['POS_y']
-        # diff = group['POS_x']-group['POS_y'] # to check the impact of convert_objects and the possibility of using type conversion in read_csv or function to convert strings if any
-        if len(diff) >= 2 and npmin(diff) < 0 and npmax(diff) > 0:
-            xidx = diff[diff < 0].argmax()
-            yidx = diff[diff > 0].argmin()
-            if abs(group.loc[xidx, '$VEHICLE:SIMSEC'] - group.loc[yidx, '$VEHICLE:SIMSEC']) <= collisionTimeDifference:
-                nCollisions += 1
-
-    # select TD1.link_id, TD1.lane_id from temp.diff_positions as TD1, temp.diff_positions as TD2 where TD1.link_id = TD2.link_id and TD1.lane_id = TD2.lane_id and TD1.id1 = TD2.id1 and TD1.id2 = TD2.id2 and TD1.t = TD2.t+0.1 and TD1.diff*TD2.diff < 0; # besoin de faire un group by??
-    # create temp table diff_positions as select CP1.t as t, CP1.link_id as link_id, CP1.lane_id as lane_id, CP1.trajectory_id as id1, CP2.trajectory_id as id2, CP1.s_coordinate - CP2.s_coordinate as diff from curvilinear_positions CP1, curvilinear_positions CP2 where CP1.link_id = CP2.link_id and CP1.lane_id = CP2.lane_id and CP1.t = CP2.t and CP1.trajectory_id > CP2.trajectory_id;
-    # SQL select link_id, lane_id, id1, id2, min(diff), max(diff) from (select CP1.t as t, CP1.link_id as link_id, CP1.lane_id as lane_id, CP1.trajectory_id as id1, CP2.trajectory_id as id2, CP1.s_coordinate - CP2.s_coordinate as diff from curvilinear_positions CP1, curvilinear_positions CP2 where CP1.link_id = CP2.link_id and CP1.lane_id = CP2.lane_id and CP1.t = CP2.t and CP1.trajectory_id > CP2.trajectory_id) group by link_id, lane_id, id1, id2 having min(diff)*max(diff) < 0
-    return nCollisions
-    
-def loadTrajectoriesFromNgsimFile(filename, nObjects = -1, sequenceNum = -1):
-    '''Reads data from the trajectory data provided by NGSIM project 
-    and returns the list of Feature objects'''
-    objects = []
-
-    inputfile = openCheck(filename, quitting = True)
-
-    def createObject(numbers):
-        firstFrameNum = int(numbers[1])
-        # do the geometry and usertype
-
-        firstFrameNum = int(numbers[1])
-        lastFrameNum = firstFrameNum+int(numbers[2])-1
-        #time = moving.TimeInterval(firstFrameNum, firstFrameNum+int(numbers[2])-1)
-        obj = moving.MovingObject(num = int(numbers[0]), 
-                                  timeInterval = moving.TimeInterval(firstFrameNum, lastFrameNum), 
-                                  positions = moving.Trajectory([[float(numbers[6])],[float(numbers[7])]]), 
-                                  userType = int(numbers[10]))
-        obj.userType = int(numbers[10])
-        obj.laneNums = [int(numbers[13])]
-        obj.precedingVehicles = [int(numbers[14])] # lead vehicle (before)
-        obj.followingVehicles = [int(numbers[15])] # following vehicle (after)
-        obj.spaceHeadways = [float(numbers[16])] # feet
-        obj.timeHeadways = [float(numbers[17])] # seconds
-        obj.curvilinearPositions = moving.CurvilinearTrajectory([float(numbers[5])],[float(numbers[4])], obj.laneNums) # X is the longitudinal coordinate
-        obj.speeds = [float(numbers[11])]
-        obj.size = [float(numbers[8]), float(numbers[9])] # 8 lengh, 9 width # TODO: temporary, should use a geometry object
-        return obj
-
-    numbers = readline(inputfile).strip().split()
-    if (len(numbers) > 0):
-        obj = createObject(numbers)
-
-    for line in inputfile:
-        numbers = line.strip().split()
-        if obj.getNum() != int(numbers[0]):
-            # check and adapt the length to deal with issues in NGSIM data
-            if (obj.length() != obj.positions.length()):
-                print 'length pb with object %s (%d,%d)' % (obj.getNum(),obj.length(),obj.positions.length())
-                obj.last = obj.getFirstInstant()+obj.positions.length()-1
-                #obj.velocities = utils.computeVelocities(f.positions) # compare norm to speeds ?
-            objects.append(obj)
-            if (nObjects>0) and (len(objects)>=nObjects):
-                break
-            obj = createObject(numbers)
-        else:
-            obj.laneNums.append(int(numbers[13]))
-            obj.positions.addPositionXY(float(numbers[6]), float(numbers[7]))
-            obj.curvilinearPositions.addPositionSYL(float(numbers[5]), float(numbers[4]), obj.laneNums[-1])
-            obj.speeds.append(float(numbers[11]))
-            obj.precedingVehicles.append(int(numbers[14]))
-            obj.followingVehicles.append(int(numbers[15]))
-            obj.spaceHeadways.append(float(numbers[16]))
-            obj.timeHeadways.append(float(numbers[17]))
-
-            if (obj.size[0] != float(numbers[8])):
-                print 'changed length obj %d' % (obj.getNum())
-            if (obj.size[1] != float(numbers[9])):
-                print 'changed width obj %d' % (obj.getNum())
-    
-    inputfile.close()
-    return objects
-
-def convertNgsimFile(inputfile, outputfile, append = False, nObjects = -1, sequenceNum = 0):
-    '''Reads data from the trajectory data provided by NGSIM project
-    and converts to our current format.'''
-    if append:
-        out = openCheck(outputfile,'a')
-    else:
-        out = openCheck(outputfile,'w')
-    nObjectsPerType = [0,0,0]
-
-    features = loadNgsimFile(inputfile, sequenceNum)
-    for f in features:
-        nObjectsPerType[f.userType-1] += 1
-        f.write(out)
-
-    print nObjectsPerType
-        
-    out.close()
-
-def writePositionsToCsv(f, obj):
-    timeInterval = obj.getTimeInterval()
-    positions = obj.getPositions()
-    curvilinearPositions = obj.getCurvilinearPositions()
-    for i in xrange(int(obj.length())):
-        p1 = positions[i]
-        s = '{},{},{},{}'.format(obj.num,timeInterval[i],p1.x,p1.y)
-        if curvilinearPositions is not None:
-            p2 = curvilinearPositions[i]
-            s += ',{},{}'.format(p2[0],p2[1])
-        f.write(s+'\n')
-
-def writeTrajectoriesToCsv(filename, objects):
-    f = openCheck(filename, 'w')
-    for i,obj in enumerate(objects):
-        writePositionsToCsv(f, obj)
-    f.close()
-
-
-#########################
-# Utils to read .ini type text files for configuration, meta data...
-#########################
-
-class ProcessParameters(VideoFilenameAddable):
-    '''Class for all parameters controlling data processing: input,
-    method parameters, etc. for tracking, classification and safety
-
-    Note: framerate is already taken into account'''
-
-    def loadConfigFile(self, filename):
-        from ConfigParser import ConfigParser
-        from os import path
-
-        config = ConfigParser()
-        config.readfp(FakeSecHead(openCheck(filename)))
-        self.sectionHeader = config.sections()[0]
-        # Tracking/display parameters
-        self.videoFilename = config.get(self.sectionHeader, 'video-filename')
-        self.databaseFilename = config.get(self.sectionHeader, 'database-filename')
-        self.homographyFilename = config.get(self.sectionHeader, 'homography-filename')
-        if (path.exists(self.homographyFilename)):
-            self.homography = loadtxt(self.homographyFilename)
-        else:
-            self.homography = None
-        self.intrinsicCameraFilename = config.get(self.sectionHeader, 'intrinsic-camera-filename')
-        if (path.exists(self.intrinsicCameraFilename)):
-            self.intrinsicCameraMatrix = loadtxt(self.intrinsicCameraFilename)
-        else:
-            self.intrinsicCameraMatrix = None
-        distortionCoefficients = getValuesFromINIFile(filename, 'distortion-coefficients', '=')        
-        self.distortionCoefficients = [float(x) for x in distortionCoefficients]
-        self.undistortedImageMultiplication  = config.getfloat(self.sectionHeader, 'undistorted-size-multiplication')
-        self.undistort = config.getboolean(self.sectionHeader, 'undistort')
-        self.firstFrameNum = config.getint(self.sectionHeader, 'frame1')
-        self.videoFrameRate = config.getfloat(self.sectionHeader, 'video-fps')
-
-        # Classification parameters
-        self.speedAggregationMethod = config.get(self.sectionHeader, 'speed-aggregation-method')
-        self.nFramesIgnoreAtEnds = config.getint(self.sectionHeader, 'nframes-ignore-at-ends')
-        self.speedAggregationQuantile = config.getint(self.sectionHeader, 'speed-aggregation-quantile')
-        self.minSpeedEquiprobable = config.getfloat(self.sectionHeader, 'min-speed-equiprobable')
-        self.minNPixels = config.getint(self.sectionHeader, 'min-npixels-crop')
-        self.pedBikeCarSVMFilename = config.get(self.sectionHeader, 'pbv-svm-filename')
-        self.bikeCarSVMFilename = config.get(self.sectionHeader, 'bv-svm-filename')
-        self.maxPedestrianSpeed = config.getfloat(self.sectionHeader, 'max-ped-speed')
-        self.maxCyclistSpeed = config.getfloat(self.sectionHeader, 'max-cyc-speed')
-        self.meanPedestrianSpeed = config.getfloat(self.sectionHeader, 'mean-ped-speed')
-        self.stdPedestrianSpeed = config.getfloat(self.sectionHeader, 'std-ped-speed')
-        self.locationCyclistSpeed = config.getfloat(self.sectionHeader, 'cyc-speed-loc')
-        self.scaleCyclistSpeed = config.getfloat(self.sectionHeader, 'cyc-speed-scale')
-        self.meanVehicleSpeed = config.getfloat(self.sectionHeader, 'mean-veh-speed')
-        self.stdVehicleSpeed = config.getfloat(self.sectionHeader, 'std-veh-speed')
-
-        # Safety parameters
-        self.maxPredictedSpeed = config.getfloat(self.sectionHeader, 'max-predicted-speed')/3.6/self.videoFrameRate
-        self.predictionTimeHorizon = config.getfloat(self.sectionHeader, 'prediction-time-horizon')*self.videoFrameRate
-        self.collisionDistance = config.getfloat(self.sectionHeader, 'collision-distance')
-        self.crossingZones = config.getboolean(self.sectionHeader, 'crossing-zones')
-        self.predictionMethod = config.get(self.sectionHeader, 'prediction-method')
-        self.nPredictedTrajectories = config.getint(self.sectionHeader, 'npredicted-trajectories')
-        self.maxNormalAcceleration = config.getfloat(self.sectionHeader, 'max-normal-acceleration')/self.videoFrameRate**2
-        self.maxNormalSteering = config.getfloat(self.sectionHeader, 'max-normal-steering')/self.videoFrameRate
-        self.minExtremeAcceleration = config.getfloat(self.sectionHeader, 'min-extreme-acceleration')/self.videoFrameRate**2
-        self.maxExtremeAcceleration = config.getfloat(self.sectionHeader, 'max-extreme-acceleration')/self.videoFrameRate**2
-        self.maxExtremeSteering = config.getfloat(self.sectionHeader, 'max-extreme-steering')/self.videoFrameRate
-        self.useFeaturesForPrediction = config.getboolean(self.sectionHeader, 'use-features-prediction')
-
-    def __init__(self, filename = None):
-        if filename is not None:
-            self.loadConfigFile(filename)
-
-    def convertToFrames(self, speedRatio = 3.6):
-        '''Converts parameters with a relationship to time in 'native' frame time
-        speedRatio is the conversion from the speed unit in the config file
-        to the distance per second
-
-        ie param(config file) = speedRatio x fps x param(used in program)
-        eg km/h = 3.6 (m/s to km/h) x frame/s x m/frame'''
-        denominator = self.videoFrameRate*speedRatio
-        denominator2 = denominator**2
-        self.minSpeedEquiprobable = self.minSpeedEquiprobable/denominator
-        self.maxPedestrianSpeed = self.maxPedestrianSpeed/denominator
-        self.maxCyclistSpeed = self.maxCyclistSpeed/denominator
-        self.meanPedestrianSpeed = self.meanPedestrianSpeed/denominator
-        self.stdPedestrianSpeed = self.stdPedestrianSpeed/denominator
-        self.meanVehicleSpeed = self.meanVehicleSpeed/denominator
-        self.stdVehicleSpeed = self.stdVehicleSpeed/denominator
-        # special case for the lognormal distribution
-        self.locationCyclistSpeed = self.locationCyclistSpeed-log(denominator)
-        #self.scaleCyclistSpeed = self.scaleCyclistSpeed
-
-class SceneParameters(object):
-    def __init__(self, config, sectionName):
-        from ConfigParser import NoOptionError
-        from ast import literal_eval
-        try:
-            self.sitename = config.get(sectionName, 'sitename')
-            self.databaseFilename = config.get(sectionName, 'data-filename')
-            self.homographyFilename = config.get(sectionName, 'homography-filename')
-            self.calibrationFilename = config.get(sectionName, 'calibration-filename') 
-            self.videoFilename = config.get(sectionName, 'video-filename')
-            self.frameRate = config.getfloat(sectionName, 'framerate')
-            self.date = datetime.strptime(config.get(sectionName, 'date'), datetimeFormat) # 2011-06-22 11:00:39
-            self.translation = literal_eval(config.get(sectionName, 'translation')) #         = [0.0, 0.0]
-            self.rotation = config.getfloat(sectionName, 'rotation')
-            self.duration = config.getint(sectionName, 'duration')
-        except NoOptionError as e:
-            print(e)
-            print('Not a section for scene meta-data')
-
-    @staticmethod
-    def loadConfigFile(filename):
-        from ConfigParser import ConfigParser
-        config = ConfigParser()
-        config.readfp(openCheck(filename))
-        configDict = dict()
-        for sectionName in config.sections():
-            configDict[sectionName] = SceneParameters(config, sectionName) 
-        return configDict
-
-
-if __name__ == "__main__":
-    import doctest
-    import unittest
-    suite = doctest.DocFileSuite('tests/storage.txt')
-    unittest.TextTestRunner().run(suite)
-#     #doctest.testmod()
-#     #doctest.testfile("example.txt")
--- a/python/tests/events.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
->>> from events import *
->>> from moving import MovingObject, TimeInterval, Point
->>> from prediction import ConstantPredictionParameters
-
->>> objects = [MovingObject(num = i, timeInterval = TimeInterval(0,10)) for i in range(10)]
->>> interactions = createInteractions(objects)
->>> len([i for i in interactions if len(i.roadUserNumbers) == 1])
-0
->>> objects2 = [MovingObject(num = i, timeInterval = TimeInterval(0,10)) for i in range(100, 110)]
->>> interactions = createInteractions(objects, objects2)
->>> len([i for i in interactions if len(i.roadUserNumbers) == 1])
-0
-
->>> o1 = MovingObject.generate(Point(-5.,0.), Point(1.,0.), TimeInterval(0,10))
->>> o2 = MovingObject.generate(Point(0.,-5.), Point(0.,1.), TimeInterval(0,10))
->>> inter = Interaction(roadUser1 = o1, roadUser2 = o2)
->>> inter.computeIndicators()
->>> predictionParams = ConstantPredictionParameters(10.)
->>> inter.computeCrossingsCollisions(predictionParams, 0.1, 10)
->>> ttc = inter.getIndicator("Time to Collision")
->>> ttc[0]
-5.0
->>> ttc[1]
-4.0
->>> (inter.collisionPoints[0][0] - Point(0.,0.)).norm2() < 0.0001
-True
->>> (inter.collisionPoints[4][0] - Point(0.,0.)).norm2() < 0.0001
-True
->>> inter.getIndicator(Interaction.indicatorNames[1])[4] < 0.000001 # collision angle
-True
->>> inter.getIndicator(Interaction.indicatorNames[1])[5] is None
-True
->>> inter.getIndicator(Interaction.indicatorNames[1])[6] # doctest:+ELLIPSIS
-3.1415...
--- a/python/tests/indicators.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
->>> from indicators import *
->>> from moving import TimeInterval,Trajectory
-
->>> indic1 = TemporalIndicator('bla', [0,3,-4], TimeInterval(4,6))
->>> indic1.empty()
-False
->>> indic1.getIthValue(1)
-3
->>> indic1.getIthValue(3)
->>> indic1[6]
--4
->>> indic1[7]
->>> [v for v in indic1]
-[0, 3, -4]
->>> indic1 = TemporalIndicator('bla', {2:0,4:3,5:-5})
->>> indic1.getIthValue(1)
-3
->>> indic1.getIthValue(3)
->>> indic1[2]
-0
-
->>> t1 = Trajectory([[0.5,1.5,2.5],[0.5,3.5,6.5]])
->>> indicatorMap([1,2,3], t1, 1)
-{(1.0, 3.0): 2.0, (2.0, 6.0): 3.0, (0.0, 0.0): 1.0}
->>> indicatorMap([1,2,3], t1, 4)
-{(0.0, 1.0): 3.0, (0.0, 0.0): 1.5}
--- a/python/tests/moving.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,230 +0,0 @@
->>> from moving import *
->>> import storage
->>> import numpy as np
-
->>> Interval().empty()
-True
->>> Interval(0,1).empty()
-False
->>> Interval(0,1)
-[0, 1]
->>> Interval(0,1).length()
-1.0
->>> Interval(23.2,24.9).length()
-1.6999999999999993
->>> Interval(10,8).length()
-0.0
-
->>> TimeInterval(0,1).length()
-2.0
->>> TimeInterval(10,8).length()
-0.0
->>> TimeInterval(10,8) == TimeInterval(10,8)
-True
->>> TimeInterval(10,8) == TimeInterval(8,10)
-True
->>> TimeInterval(11,8) == TimeInterval(10,8)
-False
-
->>> [i for i in TimeInterval(9,13)]
-[9, 10, 11, 12, 13]
-
->>> TimeInterval(2,5).equal(TimeInterval(2,5))
-True
->>> TimeInterval(2,5).equal(TimeInterval(2,4))
-False
->>> TimeInterval(2,5).equal(TimeInterval(5,2))
-False
-
->>> TimeInterval(3,6).distance(TimeInterval(4,6))
-0
->>> TimeInterval(3,6).distance(TimeInterval(6,10))
-0
->>> TimeInterval(3,6).distance(TimeInterval(8,10))
-2
->>> TimeInterval(20,30).distance(TimeInterval(3,15))
-5
->>> TimeInterval.unionIntervals([TimeInterval(3,6), TimeInterval(8,10),TimeInterval(11,15)])
-[3, 15]
-
->>> Point(0,3) == Point(0,3)
-True
->>> Point(0,3) == Point(0,3.2)
-False
->>> Point(3,4)-Point(1,7)
-(2.000000,-3.000000)
->>> -Point(1,2)
-(-1.000000,-2.000000)
->>> Point(1,2).multiply(0.5)
-(0.500000,1.000000)
-
->>> Point(3,2).norm2Squared()
-13
-
->>> Point.distanceNorm2(Point(3,4),Point(1,7))
-3.605551275463989
-
->>> Point(3,2).inPolygon(np.array([[0,0],[1,0],[1,1],[0,1]]))
-False
->>> Point(3,2).inPolygon(np.array([[0,0],[4,0],[4,3],[0,3]]))
-True
-
->>> predictPositionNoLimit(10, Point(0,0), Point(1,1)) # doctest:+ELLIPSIS
-((1.0...,1.0...), (10.0...,10.0...))
-
->>> segmentIntersection(Point(0,0), Point(0,1), Point(1,1), Point(2,3))
->>> segmentIntersection(Point(0,1), Point(0,3), Point(1,0), Point(3,1))
->>> segmentIntersection(Point(0.,0.), Point(2.,2.), Point(0.,2.), Point(2.,0.))
-(1.000000,1.000000)
->>> segmentIntersection(Point(0,0), Point(4,4), Point(0,4), Point(4,0))
-(2.000000,2.000000)
->>> segmentIntersection(Point(0,1), Point(1,2), Point(2,0), Point(3,2))
-
->>> t1 = Trajectory.fromPointList([(92.2, 102.9), (56.7, 69.6)])
->>> t2 = Trajectory.fromPointList([(92.2, 102.9), (56.7, 69.6)])
->>> t1 == t2
-True
->>> t3 = Trajectory.fromPointList([(92.24, 102.9), (56.7, 69.6)])
->>> t1 == t3
-False
->>> t3 = Trajectory.fromPointList([(92.2, 102.9), (56.7, 69.6), (56.7, 69.6)])
->>> t1 == t3
-False
-
->>> left = Trajectory.fromPointList([(92.291666666666686, 102.99239033124439), (56.774193548387103, 69.688898836168306)])
->>> middle = Trajectory.fromPointList([(87.211021505376351, 93.390778871978512), (59.032258064516128, 67.540286481647257)])
->>> right = Trajectory.fromPointList([(118.82392473118281, 115.68263205013426), (63.172043010752688, 66.600268576544309)])
->>> alignments = [left, middle, right]
->>> for a in alignments: a.computeCumulativeDistances()
->>> getSYfromXY(Point(73, 82), alignments)
-[1, 0, (73.819977,81.106170), 18.172277808821125, 18.172277808821125, 1.2129694042343868]
->>> getSYfromXY(Point(78, 83), alignments, 0.5)
-[1, 0, (77.033188,84.053889), 13.811799123113715, 13.811799123113715, -1.4301775140225983]
-
->>> Trajectory().length()
-0
->>> t1 = Trajectory([[0.5,1.5,2.5],[0.5,3.5,6.5]])
->>> t1.length() == 3.
-True
->>> t1[1]
-(1.500000,3.500000)
-
->>> t1.differentiate()
-(1.000000,3.000000) (1.000000,3.000000)
->>> t1.differentiate(True)
-(1.000000,3.000000) (1.000000,3.000000) (1.000000,3.000000)
->>> t1 = Trajectory([[0.5,1.5,3.5],[0.5,2.5,7.5]])
->>> t1.differentiate()
-(1.000000,2.000000) (2.000000,5.000000)
-
->>> t1.computeCumulativeDistances()
->>> t1.getDistance(0)
-2.23606797749979
->>> t1.getDistance(1)
-5.385164807134504
->>> t1.getDistance(2)
-Index 2 beyond trajectory length 3-1
->>> t1.getCumulativeDistance(0)
-0.0
->>> t1.getCumulativeDistance(1)
-2.23606797749979
->>> t1.getCumulativeDistance(2)
-7.6212327846342935
->>> t1.getCumulativeDistance(3)
-Index 3 beyond trajectory length 3
-
-
->>> from utils import LCSS
->>> lcss = LCSS(lambda x,y: Point.distanceNorm2(x,y) <= 0.1)
->>> Trajectory.lcss(t1, t1, lcss)
-3
->>> lcss = LCSS(lambda p1, p2: (p1-p2).normMax() <= 0.1)
->>> Trajectory.lcss(t1, t1, lcss)
-3
-
->>> p1=Point(0,0)
->>> p2=Point(1,0)
->>> v1 = Point(0.1,0.1)
->>> v2 = Point(-0.1, 0.1)
->>> abs(Point.timeToCollision(p1, p2, v1, v2, 0.)-5.0) < 0.00001
-True
->>> abs(Point.timeToCollision(p1, p2, v1, v2, 0.1)-4.5) < 0.00001
-True
->>> p1=Point(0,1)
->>> p2=Point(1,0)
->>> v1 = Point(0,0.1)
->>> v2 = Point(0.1, 0)
->>> Point.timeToCollision(p1, p2, v1, v2, 0.) == None
-True
->>> Point.timeToCollision(p2, p1, v2, v1, 0.) == None
-True
->>> Point.midPoint(p1, p2)
-(0.500000,0.500000)
-
->>> objects = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'object')
->>> len(objects)
-5
->>> objects[0].hasFeatures()
-False
->>> features = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'feature')
->>> for o in objects: o.setFeatures(features)
->>> objects[0].hasFeatures()
-True
-
->>> o1 = MovingObject.generate(Point(-5.,0.), Point(1.,0.), TimeInterval(0,10))
->>> o2 = MovingObject.generate(Point(0.,-5.), Point(0.,1.), TimeInterval(0,10))
->>> MovingObject.computePET(o1, o2, 0.1)
-0.0
->>> o2 = MovingObject.generate(Point(0.,-5.), Point(0.,1.), TimeInterval(5,15))
->>> MovingObject.computePET(o1, o2, 0.1)
-5.0
-
->>> t = CurvilinearTrajectory(S = [1., 2., 3., 5.], Y = [0.5, 0.5, 0.6, 0.7], lanes = ['1']*4)
->>> t.differentiate() # doctest:+ELLIPSIS
-[1.0, 0.0, '1'] [1.0, 0.099..., '1'] [2.0, 0.099..., '1']
->>> t.differentiate(True) # doctest:+ELLIPSIS
-[1.0, 0.0, '1'] [1.0, 0.099..., '1'] [2.0, 0.099..., '1'] [2.0, 0.099..., '1']
->>> t = CurvilinearTrajectory(S = [1.], Y = [0.5], lanes = ['1'])
->>> t.differentiate().empty()
-True
-
->>> o1 = MovingObject.generate(Point(0., 2.), Point(0., 1.), TimeInterval(0,2))
->>> o1.classifyUserTypeSpeedMotorized(0.5, np.median)
->>> userTypeNames[o1.getUserType()]
-'car'
->>> o1.classifyUserTypeSpeedMotorized(1.5, np.median)
->>> userTypeNames[o1.getUserType()]
-'pedestrian'
-
->>> o1 = MovingObject.generate(Point(0.,0.), Point(1.,0.), TimeInterval(0,10))
->>> gt1 = BBMovingObject(1, TimeInterval(0,10), MovingObject.generate(Point(0.2,0.6), Point(1.,0.), TimeInterval(0,10)), MovingObject.generate(Point(-0.2,-0.4), Point(1.,0.), TimeInterval(0,10)))
->>> gt1.computeCentroidTrajectory()
->>> computeClearMOT([gt1], [], 0.2, 0, 10)
-(None, 0.0, 11, 0, 0, 11)
->>> computeClearMOT([], [o1], 0.2, 0, 10)
-(None, None, 0, 0, 11, 0)
->>> computeClearMOT([gt1], [o1], 0.2, 0, 10) # doctest:+ELLIPSIS
-(0.0999..., 1.0, 0, 0, 0, 11)
->>> computeClearMOT([gt1], [o1], 0.05, 0, 10)
-(None, -1.0, 11, 0, 11, 11)
-
->>> o1 = MovingObject(1, TimeInterval(0,3), positions = Trajectory([range(4), [0.1, 0.1, 1.1, 1.1]]))
->>> o2 = MovingObject(2, TimeInterval(0,3), positions = Trajectory([range(4), [0.9, 0.9, -0.1, -0.1]]))
->>> gt1 = BBMovingObject(1, TimeInterval(0,3), MovingObject(positions = Trajectory([range(4), [0.]*4])), MovingObject(positions = Trajectory([range(4), [0.]*4])))
->>> gt1.computeCentroidTrajectory()
->>> gt2 = BBMovingObject(2, TimeInterval(0,3), MovingObject(positions = Trajectory([range(4), [1.]*4])), MovingObject(positions = Trajectory([range(4), [1.]*4])))
->>> gt2.computeCentroidTrajectory()
->>> computeClearMOT([gt1, gt2], [o1, o2], 0.2, 0, 3) # doctest:+ELLIPSIS
-(0.1..., 0.75, 0, 2, 0, 8)
->>> computeClearMOT([gt2, gt1], [o2, o1], 0.2, 0, 3) # doctest:+ELLIPSIS
-(0.1..., 0.75, 0, 2, 0, 8)
->>> computeClearMOT([gt1], [o1, o2], 0.2, 0, 3)
-(0.1, -0.25, 0, 1, 4, 4)
->>> computeClearMOT([gt1], [o2, o1], 0.2, 0, 3) # symmetry
-(0.1, -0.25, 0, 1, 4, 4)
->>> computeClearMOT([gt1, gt2], [o1], 0.2, 0, 3) # doctest:+ELLIPSIS
-(0.100..., 0.375, 4, 1, 0, 8)
->>> computeClearMOT([gt2, gt1], [o1], 0.2, 0, 3) # doctest:+ELLIPSIS
-(0.100..., 0.375, 4, 1, 0, 8)
->>> computeClearMOT([gt1, gt2], [o1, o2], 0.08, 0, 3)
-(None, -1.0, 8, 0, 8, 8)
--- a/python/tests/moving_shapely.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
->>> from moving import *
->>> from shapely.geometry import Polygon
->>> from shapely.prepared import prep
-
->>> t1 = Trajectory([[0.5,1.5,2.5],[0.5,3.5,6.5]])
->>> poly = Polygon([[0,0],[4,0],[4,3],[0,3]])
->>> sub1, sub2 = t1.getTrajectoryInPolygon(poly)
->>> sub1
-(0.500000,0.500000)
->>> sub1, sub2 = t1.getTrajectoryInPolygon(Polygon([[10,10],[14,10],[14,13],[10,13]]))
->>> sub1.length()
-0
->>> sub1, sub2 = t1.getTrajectoryInPolygon(prep(poly))
->>> sub1
-(0.500000,0.500000)
->>> t2 = t1.differentiate(True)
->>> sub1, sub2 = t1.getTrajectoryInPolygon(prep(poly), t2)
->>> sub1.length() == sub2.length()
-True
->>> sub1
-(0.500000,0.500000)
->>> sub2
-(1.000000,3.000000)
-
->>> t1.proportionInPolygon(poly, 0.5)
-False
->>> t1.proportionInPolygon(poly, 0.3)
-True
--- a/python/tests/prediction.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
->>> from prediction import *
->>> import moving
-
->>> et = PredictedTrajectoryConstant(moving.Point(0,0), moving.Point(1,0))
->>> et.predictPosition(4) # doctest:+ELLIPSIS
-(4.0...,0.0...)
->>> et.predictPosition(1) # doctest:+ELLIPSIS
-(1.0...,0.0...)
-
->>> et = PredictedTrajectoryConstant(moving.Point(0,0), moving.Point(1,0), moving.NormAngle(0.1,0), maxSpeed = 2)
->>> et.predictPosition(10) # doctest:+ELLIPSIS
-(15.5...,0.0...)
->>> et.predictPosition(11) # doctest:+ELLIPSIS
-(17.5...,0.0...)
->>> et.predictPosition(12) # doctest:+ELLIPSIS
-(19.5...,0.0...)
-
->>> import random
->>> acceleration = lambda: random.uniform(-0.5,0.5)
->>> steering = lambda: random.uniform(-0.1,0.1)
->>> et = PredictedTrajectoryRandomControl(moving.Point(0,0),moving.Point(1,1), acceleration, steering, maxSpeed = 2)
->>> p = et.predictPosition(500)
->>> from numpy import max
->>> max(et.getPredictedSpeeds()) <= 2.
-True
-
->>> p = moving.Point(3,4)
->>> sp = SafetyPoint(p, 0.1, 0)
->>> print(sp)
-3 4 0.1 0
-
->>> et1 = PredictedTrajectoryConstant(moving.Point(-5.,0.), moving.Point(1.,0.))
->>> et2 = PredictedTrajectoryConstant(moving.Point(0.,-5.), moving.Point(0.,1.))
->>> collision, t, cp1, cp2 = computeCollisionTime(et1, et2, 0.1, 10)
->>> collision
-True
->>> t
-5
->>> collision, t, cp1, cp2 = computeCollisionTime(et1, et2, 0.1, 5)
->>> collision
-True
->>> t
-5
->>> collision, t, cp1, cp2 = computeCollisionTime(et1, et2, 0.1, 4)
->>> collision
-False
--- a/python/tests/storage.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,92 +0,0 @@
->>> from storage import *
->>> from StringIO import StringIO
->>> from moving import MovingObject, Point, TimeInterval, Trajectory, prepareSplines
-
->>> f = openCheck('non_existant_file.txt')
-File non_existant_file.txt could not be opened.
-
->>> nonexistentFilename = "nonexistent"
->>> loadPrototypeMatchIndexesFromSqlite(nonexistentFilename)
-DB Error: no such table: prototypes
-[]
->>> loadTrajectoriesFromSqlite(nonexistentFilename, 'feature')
-DB Error: no such table: positions
-DB Error: no such table: velocities
-[]
->>> from os import remove
->>> remove(nonexistentFilename)
-
->>> o1 = MovingObject.generate(Point(0.,0.), Point(1.,0.), TimeInterval(0,10))
->>> o1.num = 2
->>> o2 = MovingObject.generate(Point(1.,1.), Point(-0.5,-0.2), TimeInterval(0,9))
->>> o2.num = 3
->>> saveTrajectoriesToSqlite('test.sqlite', [o1, o2], 'feature')
->>> objects = loadTrajectoriesFromSqlite('test.sqlite', 'feature')
->>> objects[0].getNum() == o1.num
-True
->>> objects[1].getNum() == o2.num
-True
->>> o1.getTimeInterval() == objects[0].getTimeInterval()
-True
->>> o2.getTimeInterval() == objects[1].getTimeInterval()
-True
->>> o1.getVelocities() == objects[0].getVelocities()
-True
->>> o2.getVelocities() == objects[1].getVelocities()
-True
->>> o1.getPositions() == objects[0].getPositions()
-True
->>> o2.getPositions() == objects[1].getPositions()
-True
->>> align1 = Trajectory.fromPointList([Point(-1, 0), Point(20, 0)])
->>> align2 = Trajectory.fromPointList([Point(-9, -3), Point(6, 3)])
->>> align1.computeCumulativeDistances()
->>> align2.computeCumulativeDistances()
->>> prepareSplines([align1, align2])
->>> o1.projectCurvilinear([align1, align2])
->>> o2.projectCurvilinear([align1, align2])
->>> saveTrajectoriesToSqlite('test.sqlite', [o1, o2], 'curvilinear')
->>> addCurvilinearTrajectoriesFromSqlite('test.sqlite', {o.num: o for o in objects})
->>> o1.curvilinearPositions[3][:2] == objects[0].curvilinearPositions[3][:2]
-True
->>> o1.curvilinearPositions[7][:2] == objects[0].curvilinearPositions[7][:2]
-True
->>> [str(l) for l in o1.curvilinearPositions.getLanes()] == objects[0].curvilinearPositions.getLanes()
-True
->>> o2.curvilinearPositions[2][:2] == objects[1].curvilinearPositions[2][:2]
-True
->>> o2.curvilinearPositions[6][:2] == objects[1].curvilinearPositions[6][:2]
-True
->>> [str(l) for l in o2.curvilinearPositions.getLanes()] == objects[1].curvilinearPositions.getLanes()
-True
->>> remove('test.sqlite')
-
->>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
->>> readline(strio)
-'sadlkfjsdlakjf'
->>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
->>> readline(strio, ['#'])
-'sadlkfjsdlakjf'
->>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
->>> readline(strio, ['%'])
-'# asdlfjasdlkj0'
->>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
->>> readline(strio, '%*$')
-'# asdlfjasdlkj0'
->>> readline(strio, '%#')
-'sadlkfjsdlakjf'
-
->>> from sklearn.mixture import GMM
->>> from numpy.random import random_sample
->>> nPoints = 50
->>> points = random_sample(nPoints*2).reshape(nPoints,2)
->>> gmm = GMM(4, covariance_type = 'full')
->>> tmp = gmm.fit(points)
->>> id = 0
->>> savePOIs('pois-tmp.sqlite', gmm, 'end', id)
->>> reloadedGmm = loadPOIs('pois-tmp.sqlite')
->>> sum(gmm.predict(points) == reloadedGmm[id].predict(points)) == nPoints
-True
->>> reloadedGmm[id].gmmTypes[0] == 'end'
-True
->>> remove('pois-tmp.sqlite')
--- a/python/tests/tutorials.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-import unittest
-
-class TestNGSIM(unittest.TestCase):
-    'Tutorial example for NGSIM data'
-
-    def test_ex1(self):
-        import storage
-        objects = storage.loadTrajectoriesFromNgsimFile('../samples/trajectories-0400-0415.txt',100)
-        for o in objects: o.plot()
-
-class TestTrajectoryLoading(unittest.TestCase):
-    'Tutorial example for NGSIM data'
-
-    def test_ex1(self):
-        import storage
-        objects = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'object')
-
-        speed = objects[0].getVelocityAtInstant(10).norm2()
-        timeInterval = objects[0].getTimeInterval()
-        speeds = [objects[0].getVelocityAtInstant(t).norm2() for t in range(timeInterval.first, timeInterval.last)]
-        speeds = [v.norm2() for v in objects[0].getVelocities()]
-
-        from matplotlib.pyplot import plot, close, axis
-        plot(range(timeInterval.first, timeInterval.last+1), speeds)
-
-        close('all')
-        objects[0].plot()
-        axis('equal')
-
-        features = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'feature')
-        objects[0].setFeatures(features)
-
-        for f in objects[0].features:
-            f.plot()
-        axis('equal')
-
-
-if __name__ == '__main__':
-    unittest.main()
--- a/python/tests/utils.txt	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
->>> from utils import *
->>> from moving import Point
-
->>> upperCaseFirstLetter('mmmm... donuts')
-'Mmmm... Donuts'
->>> s = upperCaseFirstLetter('much ado about nothing')
->>> s == 'Much Ado About Nothing'
-True
->>> upperCaseFirstLetter(s) == s
-True
-
->>> computeChi2([],[])
-0.0
->>> computeChi2(range(1,10),range(1,10))
-0.0
->>> computeChi2(range(1,9),range(1,10))
-0.0
-
->>> ceilDecimals(1.23, 0)
-2.0
->>> ceilDecimals(1.23, 1)
-1.3
-
->>> inBetween(1,2,1.5)
-True
->>> inBetween(2.1,1,1.5)
-True
->>> inBetween(1,2,0)
-False
-
->>> removeExtension('test-adfasdf.asdfa.txt')
-'test-adfasdf.asdfa'
->>> removeExtension('test-adfasdf')
-'test-adfasdf'
-
->>> values = line2Ints('1 2 3 5 6')
->>> values[0]
-1
->>> values[-1]
-6
->>> values = line2Floats('1.3 2.45 7.158e+01 5 6')
->>> values[0]
-1.3
->>> values[2] #doctest: +ELLIPSIS
-71.5...
->>> values[-1]
-6.0
-
->>> stepPlot([3, 5, 7, 8], 1, 10, 0)
-([1, 3, 3, 5, 5, 7, 7, 8, 8, 10], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
-
->>> mostCommon(['a','b','c','b'])
-'b'
->>> mostCommon(['a','b','c','b', 'c'])
-'b'
->>> mostCommon(range(10)+[1])
-1
->>> mostCommon([range(2), range(4), range(2)])
-[0, 1]
-
->>> res = sortByLength([range(3), range(4), range(1)])
->>> [len(r) for r in res]
-[1, 3, 4]
->>> res = sortByLength([range(3), range(4), range(1), range(5)], reverse = True)
->>> [len(r) for r in res]
-[5, 4, 3, 1]
-
->>> lcss = LCSS(similarityFunc = lambda x,y: abs(x-y) <= 0.1)
->>> lcss.compute(range(5), range(5))
-5
->>> lcss.compute(range(1,5), range(5))
-4
->>> lcss.compute(range(5,10), range(5))
-0
->>> lcss.compute(range(5), range(10))
-5
->>> lcss.similarityFunc = lambda x,y: x == y
->>> lcss.compute(['a','b','c'], ['a','b','c', 'd'])
-3
->>> lcss.computeNormalized(['a','b','c'], ['a','b','c', 'd']) #doctest: +ELLIPSIS
-1.0
->>> lcss.computeNormalized(['a','b','c','x'], ['a','b','c', 'd']) #doctest: +ELLIPSIS
-0.75
->>> lcss.compute(['a','b','c'], ['a','b','c', 'd'])
-3
->>> lcss.compute(['a','x','b','c'], ['a','b','c','d','x'])
-3
->>> lcss.compute(['a','b','c','x','d'], ['a','b','c','d','x'])
-4
->>> lcss.delta = 1
->>> lcss.compute(['a','b','c'], ['a','b','x','x','c'])
-2
-
->>> lcss.delta = float('inf')
->>> lcss.compute(['a','b','c'], ['a','b','c', 'd'], computeSubSequence = True)
-3
->>> lcss.subSequenceIndices
-[(0, 0), (1, 1), (2, 2)]
->>> lcss.compute(['a','b','c'], ['x','a','b','c'], computeSubSequence = True)
-3
->>> lcss.subSequenceIndices
-[(0, 1), (1, 2), (2, 3)]
->>> lcss.compute(['a','g','b','c'], ['a','b','c', 'd'], computeSubSequence = True)
-3
->>> lcss.subSequenceIndices
-[(0, 0), (2, 1), (3, 2)]
-
->>> alignedLcss = LCSS(lambda x,y:(abs(x-y) <= 0.1), delta = 2, aligned = True)
->>> alignedLcss.compute(range(5), range(5))
-5
->>> alignedLcss.compute(range(1,5), range(5))
-4
-
->>> alignedLcss.compute(range(5,10), range(10))
-5
-
->>> lcss.delta = 2
->>> lcss.compute(range(5,10), range(10))
-0
->>> alignedLcss.delta = 6
->>> alignedLcss.compute(range(5), range(5))
-5
->>> alignedLcss.compute(range(5), range(6))
-5
->>> lcss.delta = 10
->>> alignedLcss.compute(range(1,7), range(6))
-5
->>> lcss = LCSS(lambda x,y: x == y, delta = 2, aligned = True)
->>> lcss.compute(range(20), [2,4,6,7,8,9,11,13], True)
-8
->>> lcss.subSequenceIndices
-[(2, 0), (4, 1), (6, 2), (7, 3), (8, 4), (9, 5), (11, 6), (13, 7)]
-
->>> lcss = LCSS(metric = 'cityblock', epsilon = 0.1)
->>> lcss.compute([[i] for i in range(5)], [[i] for i in range(5)])
-5
->>> lcss.compute([[i] for i in range(1,5)], [[i] for i in range(5)])
-4
->>> lcss.compute([[i] for i in range(5,10)], [[i] for i in range(5)])
-0
->>> lcss.compute([[i] for i in range(5)], [[i] for i in range(10)])
-5
-
--- a/python/traffic_engineering.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,337 +0,0 @@
-#! /usr/bin/env python
-''' Traffic Engineering Tools and Examples'''
-
-import prediction
-
-from math import ceil
-
-
-#########################
-# Simulation
-#########################
-
-def generateTimeHeadways(meanTimeHeadway, simulationTime):
-    '''Generates the time headways between arrivals 
-    given the meanTimeHeadway and the negative exponential distribution
-    over a time interval of length simulationTime (assumed to be in same time unit as headway'''
-    from random import expovariate
-    headways = []
-    totalTime = 0
-    flow = 1/meanTimeHeadway
-    while totalTime < simulationTime:
-        h = expovariate(flow)
-        headways.append(h)
-        totalTime += h
-    return headways
-
-class RoadUser(object):
-    '''Simple example of inheritance to plot different road users '''
-    def __init__(self, position, velocity):
-        'Both fields are 2D numpy arrays'
-        self.position = position.astype(float)        
-        self.velocity = velocity.astype(float)
-
-    def move(self, deltaT):
-        self.position += deltaT*self.velocity
-
-    def draw(self, init = False):
-        from matplotlib.pyplot import plot
-        if init:
-            self.plotLine = plot(self.position[0], self.position[1], self.getDescriptor())[0]
-        else:
-            self.plotLine.set_data(self.position[0], self.position[1])
-
-
-class PassengerVehicle(RoadUser):
-    def getDescriptor(self):
-        return 'dr'
-
-class Pedestrian(RoadUser):
-    def getDescriptor(self):
-        return 'xb'
-
-class Cyclist(RoadUser):
-    def getDescriptor(self):
-        return 'og'
-
-#########################
-# queueing models
-#########################
-
-class CapacityReduction(object):
-    def __init__(self, beta, reductionDuration, demandCapacityRatio = None, demand = None, capacity = None):
-        '''reduction duration should be positive
-        demandCapacityRatio is demand/capacity (q/s)'''
-        if demandCapacityRatio is None and demand is None and capacity is None:
-            print('Missing too much information (demand, capacity and ratio)')
-            import sys
-            sys.exit()
-        if 0 <= beta < 1:
-            self.beta = beta
-            self.reductionDuration = reductionDuration
-
-            if demandCapacityRatio is not None:
-                self.demandCapacityRatio = demandCapacityRatio
-            if demand is not None:
-                self.demand = demand
-            if capacity is not None:
-                self.capacity = capacity
-            if capacity is not None and demand is not None:
-                self.demandCapacityRatio = float(self.demand)/self.capacity
-                if demand <= beta*capacity:
-                    print('There is no queueing as the demand {} is inferior to the reduced capacity {}'.format(demand, beta*capacity))
-        else:
-            print('reduction coefficient (beta={}) is not in [0, 1['.format(beta))
-
-    def queueingDuration(self):
-        return self.reductionDuration*(1-self.beta)/(1-self.demandCapacityRatio)
-
-    def nArrived(self, t):
-        if self.demand is None:
-            print('Missing demand field')
-            return None
-        return self.demand*t
-
-    def nServed(self, t):
-        if self.capacity is None:
-            print('Missing capacity field')
-            return None
-        if 0<=t<=self.reductionDuration:
-            return self.beta*self.capacity*t
-        elif self.reductionDuration < t <= self.queueingDuration():
-            return self.beta*self.capacity*self.reductionDuration+self.capacity*(t-self.reductionDuration)
-
-    def nQueued(self, t):
-        return self.nArrived(t)-self.nServed(t)
-
-    def maxNQueued(self):
-        return self.nQueued(self.reductionDuration)
-
-    def totalDelay(self):
-        if self.capacity is None:
-            print('Missing capacity field')
-            return None
-        return self.capacity*self.reductionDuration**2*(1-self.beta)*(self.demandCapacityRatio-self.beta)/(2*(1-self.demandCapacityRatio))
-    
-    def averageDelay(self):
-        return self.reductionDuration*(self.demandCapacityRatio-self.beta)/(2*self.demandCapacityRatio)
-
-    def averageNQueued(self):
-        return self.totalDelay()/self.queueingDuration()
-
-
-#########################
-# fundamental diagram
-#########################
-
-class FundamentalDiagram(object):
-    ''' '''
-    def __init__(self, name):
-        self.name = name
-
-    def q(self, k):
-        return k*self.v(k)
-
-    @staticmethod
-    def meanHeadway(k):
-        return 1/k
-    
-    @staticmethod
-    def meanSpacing(q):
-        return 1/q
-
-    def plotVK(self, language='fr', units={}):
-        from numpy import arange
-        from matplotlib.pyplot import figure,plot,xlabel,ylabel
-        densities = [k for k in arange(1, self.kj+1)]
-        figure()
-        plot(densities, [self.v(k) for k in densities])
-        xlabel('Densite (veh/km)') # todo other languages and adapt to units
-        ylabel('Vitesse (km/h)')
-
-    def plotQK(self, language='fr', units={}):
-        from numpy import arange
-        from matplotlib.pyplot import figure,plot,xlabel,ylabel
-        densities = [k for k in arange(1, self.kj+1)]
-        figure()
-        plot(densities, [self.q(k) for k in densities])
-        xlabel('Densite (veh/km)') # todo other languages and adapt to units
-        ylabel('Debit (km/h)')
-
-class GreenbergFD(FundamentalDiagram):
-    '''Speed is the logarithm of density'''
-    def __init__(self, vc, kj):
-        FundamentalDiagram.__init__(self,'Greenberg')
-        self.vc=vc
-        self.kj=kj
-    
-    def v(self,k):
-        from numpy import log
-        return self.vc*log(self.kj/k)
-
-    def criticalDensity(self): 
-        from numpy import e
-        self.kc = self.kj/e
-        return self.kc
-
-    def capacity(self):
-        self.qmax = self.kc*self.vc
-        return self.qmax
-
-#########################
-# intersection
-#########################
-
-class FourWayIntersection(object):
-    '''Simple class for simple intersection outline'''
-    def __init__(self, dimension, coordX, coordY):
-        self.dimension = dimension
-        self.coordX = coordX
-        self.coordY = coordY
-
-    def plot(self, options = 'k'):
-        from matplotlib.pyplot import plot, axis
-    
-        minX = min(self.dimension[0])
-        maxX = max(self.dimension[0])
-        minY = min(self.dimension[1])
-        maxY = max(self.dimension[1])
-        
-        plot([minX, self.coordX[0], self.coordX[0]], [self.coordY[0], self.coordY[0], minY],options)
-        plot([self.coordX[1], self.coordX[1], maxX], [minY, self.coordY[0], self.coordY[0]],options)
-        plot([minX, self.coordX[0], self.coordX[0]], [self.coordY[1], self.coordY[1], maxY],options)
-        plot([self.coordX[1], self.coordX[1], maxX], [maxY, self.coordY[1], self.coordY[1]],options)
-        axis('equal')
-
-#########################
-# traffic signals
-#########################
-
-class Volume(object):
-    '''Class to represent volumes with varied vehicule types '''
-    def __init__(self, volume, types = ['pc'], proportions = [1], equivalents = [1], nLanes = 1):
-        '''mvtEquivalent is the equivalent if the movement is right of left turn'''
-
-        # check the sizes of the lists
-        if sum(proportions) == 1:
-            self.volume = volume
-            self.types = types
-            self.proportions = proportions
-            self.equivalents = equivalents
-            self.nLanes = nLanes
-        else:
-            print('Proportions do not sum to 1')
-            pass
-
-    def checkProtected(self, opposedThroughMvt):
-        '''Checks if this left movement should be protected,
-        ie if one of the main two conditions on left turn is verified'''
-        return self.volume >= 200 or self.volume*opposedThroughMvt.volume/opposedThroughMvt.nLanes > 50000
-
-    def getPCUVolume(self):
-        '''Returns the passenger-car equivalent for the input volume'''
-        v = 0
-        for p, e in zip(self.proportions, self.equivalents):
-            v += p*e
-        return v*self.volume
-
-class IntersectionMovement(object):
-    '''Represents an intersection movement
-    with a volume, a type (through, left or right)
-    and an equivalent for movement type'''
-    def __init__(self, volume, mvtEquivalent = 1):
-        self.volume = volume
-        self.mvtEquivalent = mvtEquivalent
-
-    def getTVUVolume(self):
-        return self.mvtEquivalent*self.volume.getPCUVolume()    
-
-class LaneGroup(object):
-    '''Class that represents a group of mouvements'''
-
-    def __init__(self, movements, nLanes):
-        self.movements = movements
-        self.nLanes = nLanes
-
-    def getTVUVolume(self):
-        return sum([mvt.getTVUVolume() for mvt in self.movements])
-
-    def getCharge(self, saturationVolume):
-        return self.getTVUVolume()/(self.nLanes*saturationVolume)
-
-def optimalCycle(lostTime, criticalCharge):
-    return (1.5*lostTime+5)/(1-criticalCharge)
-
-def minimumCycle(lostTime, criticalCharge, degreeSaturation=1.):
-    'degree of saturation can be used as the peak hour factor too'
-    return lostTime/(1-criticalCharge/degreeSaturation)
-
-class Cycle(object):
-    '''Class to compute optimal cycle and the split of effective green times'''
-    def __init__(self, phases, lostTime, saturationVolume):
-        '''phases is a list of phases
-        a phase is a list of lanegroups'''
-        self.phases = phases
-        self.lostTime = lostTime
-        self.saturationVolume = saturationVolume
-
-    def computeCriticalCharges(self):
-        self.criticalCharges = [max([lg.getCharge(self.saturationVolume) for lg in phase]) for phase in self.phases]
-        self.criticalCharge = sum(self.criticalCharges)
-        
-    def computeOptimalCycle(self):
-        self.computeCriticalCharges()
-        self.C = optimalCycle(self.lostTime, self.criticalCharge)
-        return self.C
-
-    def computeMinimumCycle(self, degreeSaturation=1.):
-        self.computeCriticalCharges()
-        self.C = minimumCycle(self.lostTime, self.criticalCharge, degreeSaturation)
-        return self.C
-
-    def computeEffectiveGreen(self):
-        #from numpy import round
-        #self.computeCycle() # in case it was not done before
-        effectiveGreenTime = self.C-self.lostTime
-        self.effectiveGreens = [round(c*effectiveGreenTime/self.criticalCharge,1) for c in self.criticalCharges]
-        return self.effectiveGreens
-
-
-def computeInterGreen(perceptionReactionTime, initialSpeed, intersectionLength, vehicleAverageLength = 6, deceleration = 3):
-    '''Computes the intergreen time (yellow/amber plus all red time)
-    Deceleration is positive
-    All variables should be in the same units'''
-    if deceleration > 0:
-        return [perceptionReactionTime+float(initialSpeed)/(2*deceleration), float(intersectionLength+vehicleAverageLength)/initialSpeed]
-    else:
-        print 'Issue deceleration should be strictly positive'
-        return None
-
-def uniformDelay(cycleLength, effectiveGreen, saturationDegree):
-    '''Computes the uniform delay'''
-    return 0.5*cycleLength*(1-float(effectiveGreen)/cycleLength)**2/(1-float(effectiveGreen*saturationDegree)/cycleLength)
-
-def randomDelay(volume, saturationDegree):
-    '''Computes the random delay = queueing time for M/D/1'''
-    return saturationDegree**2/(2*volume*(1-saturationDegree))
-
-def incrementalDelay(T, X, c, k=0.5, I=1):
-    '''Computes the incremental delay (HCM)
-    T in hours
-    c capacity of the lane group
-    k default for fixed time signal
-    I=1 for isolated intersection (Poisson arrival)'''
-    from math import sqrt
-    return 900*T*(X - 1 + sqrt((X - 1)**2 + 8*k*I*X/(c*T)))
-
-#########################
-# misc
-#########################
-
-def timeChangingSpeed(v0, vf, a, TPR):
-    'for decelerations, a < 0'
-    return TPR-(vf-v0)/a
-
-def distanceChangingSpeed(v0, vf, a, TPR):
-    'for decelerations, a < 0'
-    return TPR*v0-(vf**2-v0**2)/(2*a)
--- a/python/ubc_utils.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,226 +0,0 @@
-#! /usr/bin/env python
-'''Various utilities to load data saved by the UBC tool(s)'''
-
-import utils, events, storage
-from moving import MovingObject, TimeInterval, Trajectory
-
-
-fileTypeNames = ['feature',
-                 'object',
-                 'prototype',
-                 'contoursequence']
-
-severityIndicatorNames = ['Distance',
-                          'Collision Course Cosine',
-                          'Velocity Cosine',
-                          'Speed Differential',
-                          'Collision Probability',
-                          'Severity Index',
-                          'Time to Collision']
-
-userTypeNames = ['car',
-                 'pedestrian',
-                 'twowheels',
-                 'bus',
-                 'truck']
-
-# severityIndicator = {'Distance': 0,
-#                      'Cosine': 1,
-#                      'Velocity Cosine': 2,
-#                      'Speed Differential': 3,
-#                      'Collision Probability': 4,
-#                      'Severity Index': 5,
-#                      'TTC': 6}
-
-mostSevereIsMax = [False, 
-                   False, 
-                   True, 
-                   True, 
-                   True, 
-                   True, 
-                   False]
-
-ignoredValue = [None, None, None, None, None, None, -1]
-
-def getFileType(s):
-    'Finds the type in fileTypeNames'
-    for fileType in fileTypeNames:
-        if s.find(fileType)>0:
-            return fileType
-    return ''
-
-def isFileType(s, fileType):
-    return (s.find(fileType)>0)
-
-def saveTrajectoryUserTypes(inFilename, outFilename, objects):
-    '''The program saves the objects, 
-    by just copying the corresponding trajectory and velocity data
-    from the inFilename, and saving the characteristics in objects (first line)
-    into outFilename'''
-    infile = storage.openCheck(inFilename)
-    outfile = storage.openCheck(outFilename,'w')
-
-    if (inFilename.find('features') >= 0) or (not infile) or (not outfile):
-        return
-
-    lines = storage.getLines(infile)
-    objNum = 0 # in inFilename
-    while lines != []:
-        # find object in objects (index i)
-        i = 0
-        while (i<len(objects)) and (objects[i].num != objNum):
-            i+=1
-
-        if i<len(objects):
-            l = lines[0].split(' ')
-            l[3] = str(objects[i].userType)
-            outfile.write(' '.join(l)+'\n')
-            for l in lines[1:]:
-                outfile.write(l+'\n')
-            outfile.write(utils.delimiterChar+'\n')
-        # next object
-        objNum += 1
-        lines = storage.getLines(infile)
-
-    print('read {0} objects'.format(objNum))
-
-def modifyTrajectoryFile(modifyLines, filenameIn, filenameOut):
-    '''Reads filenameIn, replaces the lines with the result of modifyLines and writes the result in filenameOut'''
-    fileIn = storage.openCheck(filenameIn, 'r', True)
-    fileOut = storage.openCheck(filenameOut, "w", True)
-
-    lines = storage.getLines(fileIn)
-    trajNum = 0
-    while (lines != []):
-        modifiedLines = modifyLines(trajNum, lines)
-        if modifiedLines:
-            for l in modifiedLines:
-                fileOut.write(l+"\n")
-            fileOut.write(utils.delimiterChar+"\n")
-        lines = storage.getLines(fileIn)
-        trajNum += 1
-         
-    fileIn.close()
-    fileOut.close()
-
-def copyTrajectoryFile(keepTrajectory, filenameIn, filenameOut):
-    '''Reads filenameIn, keeps the trajectories for which the function keepTrajectory(trajNum, lines) is True
-    and writes the result in filenameOut'''
-    fileIn = storage.openCheck(filenameIn, 'r', True)
-    fileOut = storage.openCheck(filenameOut, "w", True)
-
-    lines = storage.getLines(fileIn)
-    trajNum = 0
-    while (lines != []):
-        if keepTrajectory(trajNum, lines):
-            for l in lines:
-                fileOut.write(l+"\n")
-            fileOut.write(utils.delimiterChar+"\n")
-        lines = storage.getLines(fileIn)
-        trajNum += 1
-        
-    fileIn.close()
-    fileOut.close()
-
-def loadTrajectories(filename, nObjects = -1):
-    '''Loads trajectories'''
-
-    file = storage.openCheck(filename)
-    if (not file):
-        return []
-
-    objects = []
-    objNum = 0
-    objectType = getFileType(filename)
-    lines = storage.getLines(file)
-    while (lines != []) and ((nObjects<0) or (objNum<nObjects)):
-        l = lines[0].split(' ')
-        parsedLine = [int(n) for n in l[:4]]
-        obj = MovingObject(num = objNum, timeInterval = TimeInterval(parsedLine[1],parsedLine[2]))
-        #add = True
-        if len(lines) >= 3:
-            obj.positions = Trajectory.load(lines[1], lines[2])
-            if len(lines) >= 5:
-                obj.velocities = Trajectory.load(lines[3], lines[4])
-                if objectType == 'object':
-                    obj.userType = parsedLine[3]
-                    obj.nObjects = float(l[4])
-                    obj.featureNumbers = [int(n) for n in l[5:]]
-                    
-                    # load contour data if available
-                    if len(lines) >= 6:
-                        obj.contourType = utils.line2Floats(lines[6])
-                        obj.contourOrigins = Trajectory.load(lines[7], lines[8])
-                        obj.contourSizes = Trajectory.load(lines[9], lines[10])
-                elif objectType == 'prototype':
-                    obj.userType = parsedLine[3]
-                    obj.nMatchings = int(l[4])
-
-        if len(lines) != 2:
-            objects.append(obj)
-            objNum+=1
-        else:
-            print("Error two lines of data for feature %d"%(f.num))
-
-        lines = storage.getLines(file)
-
-    file.close()
-    return objects
-   
-def getFeatureNumbers(objects):
-    featureNumbers=[]
-    for o in objects:
-        featureNumbers += o.featureNumbers
-    return featureNumbers
-
-def loadInteractions(filename, nInteractions = -1):
-    'Loads interactions from the old UBC traffic event format'
-    from events import Interaction 
-    from indicators import SeverityIndicator
-    file = storage.openCheck(filename)
-    if (not file):
-        return []
-
-    interactions = []
-    interactionNum = 0
-    lines = storage.getLines(file)
-    while (lines != []) and ((nInteractions<0) or (interactionNum<nInteractions)):
-        parsedLine = [int(n) for n in lines[0].split(' ')]
-        inter = Interaction(interactionNum, TimeInterval(parsedLine[1],parsedLine[2]), parsedLine[3], parsedLine[4], categoryNum = parsedLine[5])
-        
-        indicatorFrameNums = [int(n) for n in lines[1].split(' ')]
-        for indicatorNum,line in enumerate(lines[2:]):
-            values = {}
-            for i,v in enumerate([float(n) for n in line.split(' ')]):
-                if not ignoredValue[indicatorNum] or v != ignoredValue[indicatorNum]:
-                    values[indicatorFrameNums[i]] = v
-            inter.addIndicator(SeverityIndicator(severityIndicatorNames[indicatorNum], values, None, mostSevereIsMax[indicatorNum]))
-
-        interactions.append(inter)
-        interactionNum+=1
-        lines = storage.getLines(file)
-
-    file.close()
-    return interactions
-
-def loadCollisionPoints(filename, nPoints = -1):
-    '''Loads collision points and returns a dict
-    with keys as a pair of the numbers of the two interacting objects'''
-    file = storage.openCheck(filename)
-    if (not file):
-        return []
-
-    points = {}
-    num = 0
-    lines = storage.getLines(file)
-    while (lines != []) and ((nPoints<0) or (num<nPoints)):
-        parsedLine = [int(n) for n in lines[0].split(' ')]
-        protagonistNums = (parsedLine[0], parsedLine[1])
-        points[protagonistNums] = [[float(n) for n in lines[1].split(' ')],
-                                   [float(n) for n in lines[2].split(' ')]]
-
-        num+=1
-        lines = storage.getLines(file)
-
-    file.close()
-    return points
--- a/python/utils.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1016 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-''' Generic utilities.'''
-
-import matplotlib.pyplot as plt
-from datetime import time, datetime
-from math import sqrt, ceil, floor
-from scipy.stats import kruskal, shapiro
-from scipy.spatial import distance
-from numpy import zeros, array, exp, sum as npsum, int as npint, arange, cumsum, median, isnan, ones, convolve,  dtype, isnan, NaN, mean, ma, isinf
-
-
-datetimeFormat = "%Y-%m-%d %H:%M:%S"
-
-#########################
-# Strings
-#########################
-
-def upperCaseFirstLetter(s):
-    words = s.split(' ')
-    lowerWords = [w[0].upper()+w[1:].lower() for w in words]
-    return ' '.join(lowerWords)
-
-#########################
-# Enumerations
-#########################
-
-def inverseEnumeration(l):
-    'Returns the dictionary that provides for each element in the input list its index in the input list'
-    result = {}
-    for i,x in enumerate(l):
-        result[x] = i
-    return result
-
-#########################
-# Simple statistics
-#########################
-
-def logNormalMeanVar(loc, scale):
-    '''location and scale are respectively the mean and standard deviation of the normal in the log-normal distribution
-    https://en.wikipedia.org/wiki/Log-normal_distribution'''
-    mean = exp(loc+(scale**2)/2)
-    var = (exp(loc**2)-1)*exp(2*loc+scale**2)
-    return mean, var
-
-def sampleSize(stdev, tolerance, percentConfidence, printLatex = False):
-    from scipy.stats.distributions import norm
-    k = round(norm.ppf(0.5+percentConfidence/200., 0, 1)*100)/100. # 1.-(100-percentConfidence)/200.
-    if printLatex:
-        print('${0}^2\\frac{{{1}^2}}{{{2}^2}}$'.format(k, stdev, tolerance))
-    return (k*stdev/tolerance)**2
-
-def confidenceInterval(mean, stdev, nSamples, percentConfidence, trueStd = True, printLatex = False):
-    '''if trueStd, use normal distribution, otherwise, Student
-
-    Use otherwise t.interval or norm.interval
-    ex: norm.interval(0.95, loc = 0., scale = 2.3/sqrt(11))
-    t.interval(0.95, 10, loc=1.2, scale = 2.3/sqrt(nSamples))
-    loc is mean, scale is sigma/sqrt(n) (for Student, 10 is df)'''
-    from scipy.stats.distributions import norm, t
-    if trueStd:
-        k = round(norm.ppf(0.5+percentConfidence/200., 0, 1)*100)/100. # 1.-(100-percentConfidence)/200.
-    else: # use Student
-         k = round(t.ppf(0.5+percentConfidence/200., nSamples-1)*100)/100.
-    e = k*stdev/sqrt(nSamples)
-    if printLatex:
-        print('${0} \pm {1}\\frac{{{2}}}{{\sqrt{{{3}}}}}$'.format(mean, k, stdev, nSamples))
-    return mean-e, mean+e
-
-def computeChi2(expected, observed):
-    '''Returns the Chi2 statistics'''
-    result = 0.
-    for e, o in zip(expected, observed):
-        result += ((e-o)*(e-o))/e
-    return result
-
-class DistributionSample(object):
-    def nSamples(self):
-        return sum(self.counts)
-
-def cumulativeDensityFunction(sample, normalized = False):
-    '''Returns the cumulative density function of the sample of a random variable'''
-    xaxis = sorted(sample)
-    counts = arange(1,len(sample)+1) # dtype = float
-    if normalized:
-        counts /= float(len(sample))
-    return xaxis, counts
-
-class DiscreteDistributionSample(DistributionSample):
-    '''Class to represent a sample of a distribution for a discrete random variable'''
-    def __init__(self, categories, counts):
-        self.categories = categories
-        self.counts = counts
-
-    def mean(self):
-        result = [float(x*y) for x,y in zip(self.categories, self.counts)]
-        return npsum(result)/self.nSamples()
-
-    def var(self, mean = None):
-        if not mean:
-            m = self.mean()
-        else:
-            m = mean
-        result = 0.
-        squares = [float((x-m)*(x-m)*y) for x,y in zip(self.categories, self.counts)]
-        return npsum(squares)/(self.nSamples()-1)
-
-    def referenceCounts(self, probability):
-        '''probability is a function that returns the probability of the random variable for the category values'''
-        refProba = [probability(c) for c in self.categories]
-        refProba[-1] = 1-npsum(refProba[:-1])
-        refCounts = [r*self.nSamples() for r in refProba]
-        return refCounts, refProba
-
-class ContinuousDistributionSample(DistributionSample):
-    '''Class to represent a sample of a distribution for a continuous random variable
-    with the number of observations for each interval
-    intervals (categories variable) are defined by their left limits, the last one being the right limit
-    categories contain therefore one more element than the counts'''
-    def __init__(self, categories, counts):
-        # todo add samples for initialization and everything to None? (or setSamples?)
-        self.categories = categories
-        self.counts = counts
-
-    @staticmethod
-    def generate(sample, categories):
-        if min(sample) < min(categories):
-            print('Sample has lower min than proposed categories ({}, {})'.format(min(sample), min(categories)))
-        if max(sample) > max(categories):
-            print('Sample has higher max than proposed categories ({}, {})'.format(max(sample), max(categories)))
-        dist = ContinuousDistributionSample(sorted(categories), [0]*(len(categories)-1))
-        for s in sample:
-            i = 0
-            while  i<len(dist.categories) and dist.categories[i] <= s:
-                i += 1
-            if i <= len(dist.counts):
-                dist.counts[i-1] += 1
-                #print('{} in {} {}'.format(s, dist.categories[i-1], dist.categories[i]))
-            else:
-                print('Element {} is not in the categories'.format(s))
-        return dist
-
-    def mean(self):
-        result = 0.
-        for i in range(len(self.counts)-1):
-            result += self.counts[i]*(self.categories[i]+self.categories[i+1])/2
-        return result/self.nSamples()
-
-    def var(self, mean = None):
-        if not mean:
-            m = self.mean()
-        else:
-            m = mean
-        result = 0.
-        for i in range(len(self.counts)-1):
-            mid = (self.categories[i]+self.categories[i+1])/2
-            result += self.counts[i]*(mid - m)*(mid - m)
-        return result/(self.nSamples()-1)
-
-    def referenceCounts(self, cdf):
-        '''cdf is a cumulative distribution function
-        returning the probability of the variable being less that x'''
-        # refCumulativeCounts = [0]#[cdf(self.categories[0][0])]
-#         for inter in self.categories:
-#             refCumulativeCounts.append(cdf(inter[1]))
-        refCumulativeCounts = [cdf(x) for x in self.categories[1:-1]]
-
-        refProba = [refCumulativeCounts[0]]
-        for i in xrange(1,len(refCumulativeCounts)):
-            refProba.append(refCumulativeCounts[i]-refCumulativeCounts[i-1])
-        refProba.append(1-refCumulativeCounts[-1])
-        refCounts = [p*self.nSamples() for p in refProba]
-        
-        return refCounts, refProba
-
-    def printReferenceCounts(self, refCounts=None):
-        if refCounts:
-            ref = refCounts
-        else:
-            ref = self.referenceCounts
-        for i in xrange(len(ref[0])):
-            print('{0}-{1} & {2:0.3} & {3:0.3} \\\\'.format(self.categories[i],self.categories[i+1],ref[1][i], ref[0][i]))
-
-
-#########################
-# maths section
-#########################
-
-# def kernelSmoothing(sampleX, X, Y, weightFunc, halfwidth):
-#     '''Returns a smoothed weighted version of Y at the predefined values of sampleX
-#     Sum_x weight(sample_x,x) * y(x)'''
-#     from numpy import zeros, array
-#     smoothed = zeros(len(sampleX))
-#     for i,x in enumerate(sampleX):
-#         weights = array([weightFunc(x,xx, halfwidth) for xx in X])
-#         if sum(weights)>0:
-#             smoothed[i] = sum(weights*Y)/sum(weights)
-#         else:
-#             smoothed[i] = 0
-#     return smoothed
-
-def kernelSmoothing(x, X, Y, weightFunc, halfwidth):
-    '''Returns the smoothed estimate of (X,Y) at x
-    Sum_x weight(sample_x,x) * y(x)'''
-    weights = array([weightFunc(x,observedx, halfwidth) for observedx in X])
-    if sum(weights)>0:
-        return sum(weights*Y)/sum(weights)
-    else:
-        return 0
-
-def uniform(center, x, halfwidth):
-    if abs(center-x)<halfwidth:
-        return 1.
-    else:
-        return 0.
-
-def gaussian(center, x, halfwidth):
-    return exp(-((center-x)/halfwidth)**2/2)
-
-def epanechnikov(center, x, halfwidth):
-    diff = abs(center-x)
-    if diff<halfwidth:
-        return 1.-(diff/halfwidth)**2
-    else:
-        return 0.
-    
-def triangular(center, x, halfwidth):
-    diff = abs(center-x)
-    if diff<halfwidth:
-        return 1.-abs(diff/halfwidth)
-    else:
-        return 0.
-
-def medianSmoothing(x, X, Y, halfwidth):
-    '''Returns the media of Y's corresponding to X's in the interval [x-halfwidth, x+halfwidth]'''
-    return median([y for observedx, y in zip(X,Y) if abs(x-observedx)<halfwidth])
-
-def argmaxDict(d):
-    return max(d, key=d.get)
-
-def framesToTime(nFrames, frameRate, initialTime = time()):
-    '''returns a datetime.time for the time in hour, minutes and seconds
-    initialTime is a datetime.time'''
-    seconds = int(floor(float(nFrames)/float(frameRate))+initialTime.hour*3600+initialTime.minute*60+initialTime.second)
-    h = int(floor(seconds/3600.))
-    seconds = seconds - h*3600
-    m = int(floor(seconds/60))
-    seconds = seconds - m*60
-    return time(h, m, seconds)
-
-def timeToFrames(t, frameRate):
-    return frameRate*(t.hour*3600+t.minute*60+t.second)
-
-def sortXY(X,Y):
-    'returns the sorted (x, Y(x)) sorted on X'
-    D = {}
-    for x, y in zip(X,Y):
-        D[x]=y
-    xsorted = sorted(D.keys())
-    return xsorted, [D[x] for x in xsorted]
-
-def compareLengthForSort(i, j):
-    if len(i) < len(j):
-        return -1
-    elif len(i) == len(j):
-        return 0
-    else:
-        return 1
-
-def sortByLength(instances, reverse = False):
-    '''Returns a new list with the instances sorted by length (method __len__)
-    reverse is passed to sorted'''
-    return sorted(instances, cmp = compareLengthForSort, reverse = reverse)
-
-def ceilDecimals(v, nDecimals):
-    '''Rounds the number at the nth decimal
-    eg 1.23 at 0 decimal is 2, at 1 decimal is 1.3'''
-    tens = 10**nDecimals
-    return ceil(v*tens)/tens
-
-def inBetween(bound1, bound2, x):
-    'useful if one does not know the order of bound1/bound2'
-    return bound1 <= x <= bound2 or bound2 <= x <= bound1
-
-def pointDistanceL2(x1,y1,x2,y2):
-    ''' Compute point-to-point distance (L2 norm, ie Euclidean distance)'''
-    return sqrt((x2-x1)**2+(y2-y1)**2)
-
-def crossProduct(l1, l2):
-    return l1[0]*l2[1]-l1[1]*l2[0]
-
-def cat_mvgavg(cat_list, halfWidth):
-    ''' Return a list of categories/values smoothed according to a window. 
-        halfWidth is the search radius on either side'''
-    from copy import deepcopy
-    smoothed = deepcopy(cat_list)
-    for point in range(len(cat_list)):
-        lower_bound_check = max(0,point-halfWidth)
-        upper_bound_check = min(len(cat_list)-1,point+halfWidth+1)
-        window_values = cat_list[lower_bound_check:upper_bound_check]
-        smoothed[point] = max(set(window_values), key=window_values.count)
-    return smoothed
-
-def filterMovingWindow(inputSignal, halfWidth):
-    '''Returns an array obtained after the smoothing of the input by a moving average
-    The first and last points are copied from the original.'''
-    width = float(halfWidth*2+1)
-    win = ones(width,'d')
-    result = convolve(win/width,array(inputSignal),'same')
-    result[:halfWidth] = inputSignal[:halfWidth]
-    result[-halfWidth:] = inputSignal[-halfWidth:]
-    return result
-
-def linearRegression(x, y, deg = 1, plotData = False):
-    '''returns the least square estimation of the linear regression of y = ax+b
-    as well as the plot'''
-    from numpy.lib.polynomial import polyfit
-    from numpy.core.multiarray import arange
-    coef = polyfit(x, y, deg)
-    if plotData:
-        def poly(x):
-            result = 0
-            for i in range(len(coef)):
-                result += coef[i]*x**(len(coef)-i-1)
-            return result
-        plt.plot(x, y, 'x')
-        xx = arange(min(x), max(x),(max(x)-min(x))/1000)
-        plt.plot(xx, [poly(z) for z in xx])
-    return coef
-
-def correlation(data, correlationMethod = 'pearson', plotFigure = False, displayNames = None, figureFilename = None):
-    '''Computes (and displays) the correlation matrix for a pandas DataFrame'''
-    columns = data.columns.tolist()
-    for var in data.columns:
-        uniqueValues = data[var].unique()
-        if len(uniqueValues) == 1 or data.dtypes[var] == dtype('O') or (len(uniqueValues) == 2 and len(data.loc[~isnan(data[var]), var].unique()) == 1): # last condition: only one other value than nan
-            columns.remove(var)
-    c=data[columns].corr(correlationMethod)
-    if plotFigure:
-        fig = plt.figure(figsize=(4+0.4*c.shape[0], 0.4*c.shape[0]))
-        fig.add_subplot(1,1,1)
-        #plt.imshow(np.fabs(c), interpolation='none')
-        plt.imshow(c, vmin=-1., vmax = 1., interpolation='none', cmap = 'RdYlBu_r') # coolwarm
-        colnames = [displayNames.get(s.strip(), s.strip()) for s in columns]
-        #correlation.plot_corr(c, xnames = colnames, normcolor=True, title = filename)
-        plt.xticks(range(len(colnames)), colnames, rotation=90)
-        plt.yticks(range(len(colnames)), colnames)
-        plt.tick_params('both', length=0)
-        plt.subplots_adjust(bottom = 0.29)
-        plt.colorbar()
-        plt.title('Correlation ({})'.format(correlationMethod))
-        plt.tight_layout()
-        if len(colnames) > 50:
-            plt.subplots_adjust(left=.06)
-        if figureFilename is not None:
-            plt.savefig(figureFilename, dpi = 150, transparent = True)
-    return c
-
-def addDummies(data, variables, allVariables = True):
-    '''Add binary dummy variables for each value of a nominal variable 
-    in a pandas DataFrame'''
-    newVariables = []
-    for var in variables:
-        if var in data.columns and data.dtypes[var] == dtype('O') and len(data[var].unique()) > 2:
-            values = data[var].unique()
-            if not allVariables:
-                values = values[:-1]
-            for val in values:
-                if val is not NaN:
-                    newVariable = (var+'_{}'.format(val)).replace('.','').replace(' ','').replace('-','')
-                    data[newVariable] = (data[var] == val)
-                    newVariables.append(newVariable)
-    return newVariables
-
-def kruskalWallis(data, dependentVariable, independentVariable, plotFigure = False, filenamePrefix = None, figureFileType = 'pdf', saveLatex = False, renameVariables = lambda s: s, kwCaption = u''):
-    '''Studies the influence of (nominal) independent variable over the dependent variable
-
-    Makes tests if the conditional distributions are normal
-    using the Shapiro-Wilk test (in which case ANOVA could be used)
-    Implements uses the non-parametric Kruskal Wallis test'''
-    tmp = data[data[independentVariable].notnull()]
-    independentVariableValues = sorted(tmp[independentVariable].unique().tolist())
-    if len(independentVariableValues) >= 2:
-        if saveLatex:
-            from storage import openCheck
-            out = openCheck(filenamePrefix+'-{}-{}.tex'.format(dependentVariable, independentVariable), 'w')
-        for x in independentVariableValues:
-            print('Shapiro-Wilk normality test for {} when {}={}: {} obs'.format(dependentVariable,independentVariable, x, len(tmp.loc[tmp[independentVariable] == x, dependentVariable])))
-            if len(tmp.loc[tmp[independentVariable] == x, dependentVariable]) >= 3:
-                print shapiro(tmp.loc[tmp[independentVariable] == x, dependentVariable])
-        if plotFigure:
-            plt.figure()
-            plt.boxplot([tmp.loc[tmp[independentVariable] == x, dependentVariable] for x in independentVariableValues])
-            #q25, q75 = tmp[dependentVariable].quantile([.25, .75])
-            #plt.ylim(ymax = q75+1.5*(q75-q25))
-            plt.xticks(range(1,len(independentVariableValues)+1), independentVariableValues)
-            plt.title('{} vs {}'.format(dependentVariable, independentVariable))
-            if filenamePrefix is not None:
-                plt.savefig(filenamePrefix+'-{}-{}.{}'.format(dependentVariable, independentVariable, figureFileType))
-        table = tmp.groupby([independentVariable])[dependentVariable].describe().unstack().sort(['50%'], ascending = False)
-        table['count'] = table['count'].astype(int)
-        testResult = kruskal(*[tmp.loc[tmp[independentVariable] == x, dependentVariable] for x in independentVariableValues])
-        if saveLatex:
-            out.write('\\begin{minipage}{\\linewidth}\n'
-                      +'\\centering\n'
-                      +'\\captionof{table}{'+(kwCaption.format(dependentVariable, independentVariable, *testResult))+'}\n'
-                      +table.to_latex(float_format = lambda x: '{:.3f}'.format(x)).encode('ascii')+'\n'
-                      +'\\end{minipage}\n'
-                      +'\\ \\vspace{0.5cm}\n')
-        else:
-            print table
-        return testResult
-    else:
-        return None
-
-def prepareRegression(data, dependentVariable, independentVariables, maxCorrelationThreshold, correlations, maxCorrelationP, correlationFunc, stdoutText = ['Removing {} (constant: {})', 'Removing {} (correlation {} with {})', 'Removing {} (no correlation: {}, p={})'], saveFiles = False, filenamePrefix = None, latexHeader = '', latexTable = None, latexFooter=''):
-    '''Removes variables from candidate independent variables if
-    - if two independent variables are correlated (> maxCorrelationThreshold), one is removed
-    - if an independent variable is not correlated with the dependent variable (p>maxCorrelationP)
-    Returns the remaining non-correlated variables, correlated with the dependent variable
-
-    correlationFunc is spearmanr or pearsonr from scipy.stats
-    text is the template to display for the two types of printout (see default): 3 elements if no saving to latex file, 8 otherwise
-
-    TODO: pass the dummies for nominal variables and remove if all dummies are correlated, or none is correlated with the dependentvariable'''    
-    from copy import copy
-    from pandas import DataFrame
-    result = copy(independentVariables)
-    table1 = ''
-    table2 = {}
-    # constant variables
-    for var in independentVariables:
-        uniqueValues = data[var].unique()
-        if (len(uniqueValues) == 1) or (len(uniqueValues) == 2 and uniqueValues.dtype != dtype('O') and len(data.loc[~isnan(data[var]), var].unique()) == 1):
-            print(stdoutText[0].format(var, uniqueValues))
-            if saveFiles:
-                table1 += latexTable[0].format(var, *uniqueValues)
-            result.remove(var)
-    # correlated variables
-    for v1 in copy(result):
-        if v1 in correlations.index:
-            for v2 in copy(result):
-                if v2 != v1 and v2 in correlations.index:
-                    if abs(correlations.loc[v1, v2]) > maxCorrelationThreshold:
-                        if v1 in result and v2 in result:
-                            if saveFiles:
-                                table1 += latexTable[1].format(v2, v1, correlations.loc[v1, v2])
-                            print(stdoutText[1].format(v2, v1, correlations.loc[v1, v2]))
-                            result.remove(v2)
-    # not correlated with dependent variable
-    table2['Correlations'] = []
-    table2['Valeurs p'] = []
-    for var in copy(result):
-        if data.dtypes[var] != dtype('O'):
-            cor, p = correlationFunc(data[dependentVariable], data[var])
-            if p > maxCorrelationP:
-                if saveFiles:
-                    table1 += latexTable[2].format(var, cor, p)
-                print(stdoutText[2].format(var, cor, p))
-                result.remove(var)
-            else:
-                table2['Correlations'].append(cor)
-                table2['Valeurs p'].append(p)
-
-    if saveFiles:
-        from storage import openCheck
-        out = openCheck(filenamePrefix+'-removed-variables.tex', 'w')
-        out.write(latexHeader)
-        out.write(table1)
-        out.write(latexFooter)
-        out.close()
-        out = openCheck(filenamePrefix+'-correlations.html', 'w')
-        table2['Variables'] = [var for var in result if data.dtypes[var] != dtype('O')]
-        out.write(DataFrame(table2)[['Variables', 'Correlations', 'Valeurs p']].to_html(formatters = {'Correlations': lambda x: '{:.2f}'.format(x), 'Valeurs p': lambda x: '{:.3f}'.format(x)}, index = False))
-        out.close()
-    return result
-
-
-#########################
-# regression analysis using statsmodels (and pandas)
-#########################
-
-# TODO make class for experiments?
-# TODO add tests with public dataset downloaded from Internet (IRIS et al)
-def modelString(experiment, dependentVariable, independentVariables):
-    return dependentVariable+' ~ '+' + '.join([independentVariable for independentVariable in independentVariables if experiment[independentVariable]])
-
-def runModel(experiment, data, dependentVariable, independentVariables, regressionType = 'ols'):
-    import statsmodels.formula.api as smf
-    modelStr = modelString(experiment, dependentVariable, independentVariables)
-    if regressionType == 'ols':
-        model = smf.ols(modelStr, data = data)
-    elif regressionType == 'gls':
-        model = smf.gls(modelStr, data = data)
-    elif regressionType == 'rlm':
-        model = smf.rlm(modelStr, data = data)
-    else:
-        print('Unknown regression type {}. Exiting'.format(regressionType))
-        import sys
-        sys.exit()
-    return model.fit()
-
-def runModels(experiments, data, dependentVariable, independentVariables, regressionType = 'ols'):
-    '''Runs several models and stores 3 statistics
-    adjusted R2, condition number (should be small, eg < 1000)
-    and p-value for Shapiro-Wilk test of residual normality'''
-    for i,experiment in experiments.iterrows():
-        if experiment[independentVariables].any():
-            results = runModel(experiment, data, dependentVariable, independentVariables, regressionType = 'ols')
-            experiments.loc[i,'r2adj'] = results.rsquared_adj
-            experiments.loc[i,'condNum'] = results.condition_number
-            experiments.loc[i, 'shapiroP'] = shapiro(results.resid)[1]
-            experiments.loc[i,'nobs'] = int(results.nobs)
-    return experiments
-
-def generateExperiments(independentVariables):
-    '''Generates all possible models for including or not each independent variable'''
-    from pandas import DataFrame
-    experiments = {}
-    nIndependentVariables = len(independentVariables)
-    if nIndependentVariables != len(set(independentVariables)):
-        print("Duplicate variables. Exiting")
-        import sys
-        sys.exit()
-    nModels = 2**nIndependentVariables
-    for i,var in enumerate(independentVariables):
-        pattern = [False]*(2**i)+[True]*(2**i)
-        experiments[var] = pattern*(2**(nIndependentVariables-i-1))
-    experiments = DataFrame(experiments)
-    experiments['r2adj'] = 0.
-    experiments['condNum'] = NaN
-    experiments['shapiroP'] = -1
-    experiments['nobs'] = -1
-    return experiments
-
-def findBestModel(data, dependentVariable, independentVariables, regressionType = 'ols', nProcesses = 1):
-    '''Generates all possible model with the independentVariables
-    and runs them, saving the results in experiments
-    with multiprocess option'''
-    from pandas import concat
-    from multiprocessing import Pool
-    experiments = generateExperiments(independentVariables)
-    nModels = len(experiments)
-    print("Running {} models with {} processes".format(nModels, nProcesses))
-    print("IndependentVariables: {}".format(independentVariables))
-    if nProcesses == 1:
-        return runModels(experiments, data, dependentVariable, independentVariables, regressionType)
-    else:
-        pool = Pool(processes = nProcesses)
-        chunkSize = int(ceil(nModels/nProcesses))
-        jobs = [pool.apply_async(runModels, args = (experiments[i*chunkSize:(i+1)*chunkSize], data, dependentVariable, independentVariables, regressionType)) for i in range(nProcesses)]
-        return concat([job.get() for job in jobs])
-
-def findBestModelFwd(data, dependentVariable, independentVariables, modelFunc, experiments = None):
-    '''Forward search for best model (based on adjusted R2)
-    Randomly starting with one variable and adding randomly variables 
-    if they improve the model
-    
-    The results are added to experiments if provided as argument
-    Storing in experiment relies on the index being the number equal 
-    to the binary code derived from the independent variables'''
-    from numpy.random import permutation as nppermutation
-    if experiments is None:
-        experiments = generateExperiments(independentVariables)
-    nIndependentVariables = len(independentVariables)
-    permutation = nppermutation(range(nIndependentVariables)).tolist()
-    variableMapping = {j: independentVariables[i] for i,j in enumerate(permutation)}
-    print('Tested variables '+', '.join([variableMapping[i] for i in xrange(nIndependentVariables)]))
-    bestModel = [False]*nIndependentVariables
-    currentVarNum = 0
-    currentR2Adj = 0.
-    for currentVarNum in xrange(nIndependentVariables):
-        currentModel = [i for i in bestModel]
-        currentModel[currentVarNum] = True
-        rowIdx = sum([0]+[2**i for i in xrange(nIndependentVariables) if currentModel[permutation[i]]])
-        #print currentVarNum, sum(currentModel), ', '.join([independentVariables[i] for i in xrange(nIndependentVariables) if currentModel[permutation[i]]])
-        if experiments.loc[rowIdx, 'shapiroP'] < 0:
-            modelStr = modelString(experiments.loc[rowIdx], dependentVariable, independentVariables)
-            model = modelFunc(modelStr, data = data)
-            results = model.fit()
-            experiments.loc[rowIdx, 'r2adj'] = results.rsquared_adj
-            experiments.loc[rowIdx, 'condNum'] = results.condition_number
-            experiments.loc[rowIdx, 'shapiroP'] = shapiro(results.resid)[1]
-            experiments.loc[rowIdx, 'nobs'] = int(results.nobs)
-        if currentR2Adj < experiments.loc[rowIdx, 'r2adj']:
-            currentR2Adj = experiments.loc[rowIdx, 'r2adj']
-            bestModel[currentVarNum] = True
-    return experiments
-
-def displayModelResults(results, model = None, plotFigures = True, filenamePrefix = None, figureFileType = 'pdf', text = {'title-shapiro': 'Shapiro-Wilk normality test for residuals: {:.2f} (p={:.3f})', 'true-predicted.xlabel': 'Predicted values', 'true-predicted.ylabel': 'True values', 'residuals-predicted.xlabel': 'Predicted values', 'residuals-predicted.ylabel': 'Residuals'}):
-    import statsmodels.api as sm
-    '''Displays some model results
-
-    3 graphics, true-predicted, residuals-predicted, '''
-    print(results.summary())
-    shapiroResult = shapiro(results.resid)
-    print(shapiroResult)
-    if plotFigures:
-        fig = plt.figure(figsize=(7,6.3*(2+int(model is not None))))
-        if model is not None:
-            ax = fig.add_subplot(3,1,1)
-            plt.plot(results.predict(), model.endog, 'x')
-            x=plt.xlim()
-            y=plt.ylim()
-            plt.plot([max(x[0], y[0]), min(x[1], y[1])], [max(x[0], y[0]), min(x[1], y[1])], 'r')
-            #plt.axis('equal')
-            if text is not None:
-                plt.title(text['title-shapiro'].format(*shapiroResult))
-                #plt.title(text['true-predicted.title'])
-                plt.xlabel(text['true-predicted.xlabel'])
-                plt.ylabel(text['true-predicted.ylabel'])
-            fig.add_subplot(3,1,2, sharex = ax)
-            plt.plot(results.predict(), results.resid, 'x')
-            nextSubplotNum = 3
-        else:
-            fig.add_subplot(2,1,1)
-            plt.plot(results.predict(), results.resid, 'x')
-            nextSubplotNum = 2
-        if text is not None:
-            if model is None:
-                plt.title(text['title-shapiro'].format(*shapiroResult))
-            plt.xlabel(text['residuals-predicted.xlabel'])
-            plt.ylabel(text['residuals-predicted.ylabel'])
-        qqAx = fig.add_subplot(nextSubplotNum,1,nextSubplotNum)
-        sm.qqplot(results.resid, fit = True, line = '45', ax = qqAx)
-        plt.axis('equal')
-        if text is not None and 'qqplot.xlabel' in text:
-            plt.xlabel(text['qqplot.xlabel'])
-            plt.ylabel(text['qqplot.ylabel'])
-        plt.tight_layout()
-        if filenamePrefix is not None:
-            from storage import openCheck
-            out = openCheck(filenamePrefix+'-coefficients.html', 'w')
-            out.write(results.summary().as_html())
-            plt.savefig(filenamePrefix+'-model-results.'+figureFileType)
-
-#########################
-# iterable section
-#########################
-
-def mostCommon(L):
-    '''Returns the most frequent element in a iterable
-
-    taken from http://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list'''
-    from itertools import groupby
-    from operator import itemgetter
-    # get an iterable of (item, iterable) pairs
-    SL = sorted((x, i) for i, x in enumerate(L))
-    # print 'SL:', SL
-    groups = groupby(SL, key=itemgetter(0))
-    # auxiliary function to get "quality" for an item
-    def _auxfun(g):
-        item, iterable = g
-        count = 0
-        min_index = len(L)
-        for _, where in iterable:
-            count += 1
-            min_index = min(min_index, where)
-            # print 'item %r, count %r, minind %r' % (item, count, min_index)
-        return count, -min_index
-    # pick the highest-count/earliest item
-    return max(groups, key=_auxfun)[0]
-
-#########################
-# sequence section
-#########################
-
-class LCSS(object):
-    '''Class that keeps the LCSS parameters
-    and puts together the various computations
-
-    the methods with names starting with _ are not to be shadowed
-    in child classes, who will shadow the other methods, 
-    ie compute and computeXX methods'''
-    def __init__(self, similarityFunc = None, metric = None, epsilon = None, delta = float('inf'), aligned = False, lengthFunc = min):
-        '''One should provide either a similarity function
-        that indicates (return bool) whether elements in the compares lists are similar
-
-        eg distance(p1, p2) < epsilon
-        
-        or a type of metric usable in scipy.spatial.distance.cdist with an epsilon'''
-        if similarityFunc is None and metric is None:
-            print("No way to compute LCSS, similarityFunc and metric are None. Exiting")
-            import sys
-            sys.exit()
-        elif metric is not None and epsilon is None:
-            print("Please provide a value for epsilon if using a cdist metric. Exiting")
-            import sys
-            sys.exit()
-        else:
-            if similarityFunc is None and metric is not None and not isinf(delta):
-                print('Warning: you are using a cdist metric and a finite delta, which will make probably computation slower than using the equivalent similarityFunc (since all pairwise distances will be computed by cdist).')
-            self.similarityFunc = similarityFunc
-            self.metric = metric
-            self.epsilon = epsilon
-            self.aligned = aligned
-            self.delta = delta
-            self.lengthFunc = lengthFunc
-            self.subSequenceIndices = [(0,0)]
-
-    def similarities(self, l1, l2, jshift=0):
-        n1 = len(l1)
-        n2 = len(l2)
-        self.similarityTable = zeros((n1+1,n2+1), dtype = npint)
-        if self.similarityFunc is not None:
-            for i in xrange(1,n1+1):
-                for j in xrange(max(1,i-jshift-self.delta),min(n2,i-jshift+self.delta)+1):
-                    if self.similarityFunc(l1[i-1], l2[j-1]):
-                        self.similarityTable[i,j] = self.similarityTable[i-1,j-1]+1
-                    else:
-                        self.similarityTable[i,j] = max(self.similarityTable[i-1,j], self.similarityTable[i,j-1])
-        elif self.metric is not None:
-            similarElements = distance.cdist(l1, l2, self.metric) <= self.epsilon
-            for i in xrange(1,n1+1):
-                for j in xrange(max(1,i-jshift-self.delta),min(n2,i-jshift+self.delta)+1):
-                    if similarElements[i-1, j-1]:
-                        self.similarityTable[i,j] = self.similarityTable[i-1,j-1]+1
-                    else:
-                        self.similarityTable[i,j] = max(self.similarityTable[i-1,j], self.similarityTable[i,j-1])
-            
-
-    def subSequence(self, i, j):
-        '''Returns the subsequence of two sequences
-        http://en.wikipedia.org/wiki/Longest_common_subsequence_problem'''
-        if i == 0 or j == 0:
-            return []
-        elif self.similarityTable[i][j] == self.similarityTable[i][j-1]:
-            return self.subSequence(i, j-1)
-        elif self.similarityTable[i][j] == self.similarityTable[i-1][j]:
-            return self.subSequence(i-1, j)
-        else:
-            return self.subSequence(i-1, j-1) + [(i-1,j-1)]
-
-    def _compute(self, _l1, _l2, computeSubSequence = False):
-        '''returns the longest common subsequence similarity
-        l1 and l2 should be the right format
-        eg list of tuple points for cdist 
-        or elements that can be compare using similarityFunc
-
-        if aligned, returns the best matching if using a finite delta by shifting the series alignments
-        '''
-        if len(_l2) < len(_l1): # l1 is the shortest
-            l1 = _l2
-            l2 = _l1
-            revertIndices = True
-        else:
-            l1 = _l1
-            l2 = _l2
-            revertIndices = False
-        n1 = len(l1)
-        n2 = len(l2)
-
-        if self.aligned:
-            lcssValues = {}
-            similarityTables = {}
-            for i in xrange(-n2-self.delta+1, n1+self.delta): # interval such that [i-shift-delta, i-shift+delta] is never empty, which happens when i-shift+delta < 1 or when i-shift-delta > n2
-                self.similarities(l1, l2, i)
-                lcssValues[i] = self.similarityTable.max()
-                similarityTables[i] = self.similarityTable
-                #print self.similarityTable
-            alignmentShift = argmaxDict(lcssValues) # ideally get the medium alignment shift, the one that minimizes distance
-            self.similarityTable = similarityTables[alignmentShift]
-        else:
-            alignmentShift = 0
-            self.similarities(l1, l2)
-
-        # threshold values for the useful part of the similarity table are n2-n1-delta and n1-n2-delta
-        self.similarityTable = self.similarityTable[:min(n1, n2+alignmentShift+self.delta)+1, :min(n2, n1-alignmentShift+self.delta)+1]
-
-        if computeSubSequence:
-            self.subSequenceIndices = self.subSequence(self.similarityTable.shape[0]-1, self.similarityTable.shape[1]-1)
-            if revertIndices:
-                self.subSequenceIndices = [(j,i) for i,j in self.subSequenceIndices]
-        return self.similarityTable[-1,-1]
-
-    def compute(self, l1, l2, computeSubSequence = False):
-        '''get methods are to be shadowed in child classes '''
-        return self._compute(l1, l2, computeSubSequence)
-
-    def computeAlignment(self):
-        return mean([j-i for i,j in self.subSequenceIndices])
-
-    def _computeNormalized(self, l1, l2, computeSubSequence = False):
-        ''' compute the normalized LCSS
-        ie, the LCSS divided by the min or mean of the indicator lengths (using lengthFunc)
-        lengthFunc = lambda x,y:float(x,y)/2'''
-        return float(self._compute(l1, l2, computeSubSequence))/self.lengthFunc(len(l1), len(l2))
-
-    def computeNormalized(self, l1, l2, computeSubSequence = False):
-        return self._computeNormalized(l1, l2, computeSubSequence)
-
-    def _computeDistance(self, l1, l2, computeSubSequence = False):
-        ''' compute the LCSS distance'''
-        return 1-self._computeNormalized(l1, l2, computeSubSequence)
-
-    def computeDistance(self, l1, l2, computeSubSequence = False):
-        return self._computeDistance(l1, l2, computeSubSequence)
-    
-#########################
-# plotting section
-#########################
-
-def plotPolygon(poly, options = ''):
-    'Plots shapely polygon poly'
-    from numpy.core.multiarray import array
-    from matplotlib.pyplot import plot
-    from shapely.geometry import Polygon
-
-    tmp = array(poly.exterior)
-    plot(tmp[:,0], tmp[:,1], options)
-
-def stepPlot(X, firstX, lastX, initialCount = 0, increment = 1):
-    '''for each value in X, increment by increment the initial count
-    returns the lists that can be plotted 
-    to obtain a step plot increasing by one for each value in x, from first to last value
-    firstX and lastX should be respectively smaller and larger than all elements in X'''
-    
-    sortedX = []
-    counts = [initialCount]
-    for x in sorted(X):
-        sortedX += [x,x]
-        counts.append(counts[-1])
-        counts.append(counts[-1]+increment)
-    counts.append(counts[-1])
-    return [firstX]+sortedX+[lastX], counts
-
-class PlottingPropertyValues(object):
-    def __init__(self, values):
-        self.values = values
-
-    def __getitem__(self, i):
-        return self.values[i%len(self.values)]
-
-markers = PlottingPropertyValues(['+', '*', ',', '.', 'x', 'D', 's', 'o'])
-scatterMarkers = PlottingPropertyValues(['s','o','^','>','v','<','d','p','h','8','+','x'])
-
-linestyles = PlottingPropertyValues(['-', '--', '-.', ':'])
-
-colors = PlottingPropertyValues('brgmyck') # 'w'
-
-def plotIndicatorMap(indicatorMap, squareSize, masked = True, defaultValue=-1):
-    from matplotlib.pyplot import pcolor
-    coords = array(indicatorMap.keys())
-    minX = min(coords[:,0])
-    minY = min(coords[:,1])
-    X = arange(minX, max(coords[:,0])+1.1)*squareSize
-    Y = arange(minY, max(coords[:,1])+1.1)*squareSize
-    C = defaultValue*ones((len(Y), len(X)))
-    for k,v in indicatorMap.iteritems():
-        C[k[1]-minY,k[0]-minX] = v
-    if masked:
-        pcolor(X, Y, ma.masked_where(C==defaultValue,C))
-    else:
-        pcolor(X, Y, C)
-
-#########################
-# Data download
-#########################
-
-def downloadECWeather(stationID, years, months = [], outputDirectoryname = '.', english = True):
-    '''Downloads monthly weather data from Environment Canada
-    If month is provided (number 1 to 12), it means hourly data for the whole month
-    Otherwise, means the data for each day, for the whole year
-
-    Example: MONTREAL MCTAVISH	10761
-             MONTREALPIERRE ELLIOTT TRUDEAU INTL A	5415
-
-    To get daily data for 2010 and 2011, downloadECWeather(10761, [2010,2011], [], '/tmp')
-    To get hourly data for 2009 and 2012, January, March and October, downloadECWeather(10761, [2009,2012], [1,3,10], '/tmp')'''
-    import urllib2
-    if english:
-        language = 'e'
-    else:
-        language = 'f'
-    if len(months) == 0:
-        timeFrame = 2
-        months = [1]
-    else:
-        timeFrame = 1
-
-    for year in years:
-        for month in months:
-            url = urllib2.urlopen('http://climat.meteo.gc.ca/climateData/bulkdata_{}.html?format=csv&stationID={}&Year={}&Month={}&Day=1&timeframe={}&submit=++T%C3%A9l%C3%A9charger+%0D%0Ades+donn%C3%A9es'.format(language, stationID, year, month, timeFrame))
-            data = url.read()
-            outFilename = '{}/{}-{}'.format(outputDirectoryname, stationID, year)
-            if timeFrame == 1:
-                outFilename += '-{}-hourly'.format(month)
-            else:
-                outFilename += '-daily'
-            outFilename += '.csv'
-            out = open(outFilename, 'w')
-            out.write(data)
-            out.close()
-
-#########################
-# File I/O
-#########################
-
-def removeExtension(filename, delimiter = '.'):
-    '''Returns the filename minus the extension (all characters after last .)'''
-    i = filename.rfind(delimiter)
-    if i>0:
-        return filename[:i]
-    else:
-        return filename
-
-def cleanFilename(s):
-    'cleans filenames obtained when contatenating figure characteristics'
-    return s.replace(' ','-').replace('.','').replace('/','-').replace(',','')
-
-def listfiles(dirname, extension, remove = False):
-    '''Returns the list of files with the extension in the directory dirname
-    If remove is True, the filenames are stripped from the extension'''
-    from os import listdir
-    tmp = [f for f in listdir(dirname) if f.endswith(extension)]
-    tmp.sort()
-    if remove:
-        return [removeExtension(f, extension) for f in tmp]
-    else:
-        return tmp
-
-def mkdir(dirname):
-    'Creates a directory if it does not exist'
-    import os
-    if not os.path.exists(dirname):
-        os.mkdir(dirname)
-    else:
-        print(dirname+' already exists')
-
-def removeFile(filename):
-    '''Deletes the file while avoiding raising an error 
-    if the file does not exist'''
-    import os
-    if (os.path.exists(filename)):
-        os.remove(filename)
-    else:
-        print(filename+' does not exist')
-
-def line2Floats(l, separator=' '):
-    '''Returns the list of floats corresponding to the string'''
-    return [float(x) for x in l.split(separator)]
-
-def line2Ints(l, separator=' '):
-    '''Returns the list of ints corresponding to the string'''
-    return [int(x) for x in l.split(separator)]
-
-#########################
-# CLI utils
-#########################
-
-def parseCLIOptions(helpMessage, options, cliArgs, optionalOptions=[]):
-    ''' Simple function to handle similar argument parsing
-    Returns the dictionary of options and their values
-
-    * cliArgs are most likely directly sys.argv 
-    (only the elements after the first one are considered)
-    
-    * options should be a list of strings for getopt options, 
-    eg ['frame=','correspondences=','video=']
-    A value must be provided for each option, or the program quits'''
-    import sys, getopt
-    from numpy.core.fromnumeric import all
-    optionValues, args = getopt.getopt(cliArgs[1:], 'h', ['help']+options+optionalOptions)
-    optionValues = dict(optionValues)
-
-    if '--help' in optionValues.keys() or '-h' in optionValues.keys():
-        print(helpMessage+
-              '\n - Compulsory options: '+' '.join([opt.replace('=','') for opt in options])+
-              '\n - Non-compulsory options: '+' '.join([opt.replace('=','') for opt in optionalOptions]))
-        sys.exit()
-
-    missingArgument = [('--'+opt.replace('=','') in optionValues.keys()) for opt in options]
-    if not all(missingArgument):
-        print('Missing argument')
-        print(optionValues)
-        sys.exit()
-
-    return optionValues
-
-
-#########################
-# Profiling
-#########################
-
-def analyzeProfile(profileFilename, stripDirs = True):
-    '''Analyze the file produced by cProfile 
-
-    obtained by for example: 
-    - call in script (for main() function in script)
-    import cProfile, os
-    cProfile.run('main()', os.path.join(os.getcwd(),'main.profile'))
-
-    - or on the command line:
-    python -m cProfile [-o profile.bin] [-s sort] scriptfile [arg]'''
-    import pstats, os
-    p = pstats.Stats(os.path.join(os.pardir, profileFilename))
-    if stripDirs:
-        p.strip_dirs()
-    p.sort_stats('time')
-    p.print_stats(.2)
-    #p.sort_stats('time')
-    # p.print_callees(.1, 'int_prediction.py:')
-    return p
-
-#########################
-# running tests
-#########################
-
-if __name__ == "__main__":
-    import doctest
-    import unittest
-    suite = doctest.DocFileSuite('tests/utils.txt')
-    #suite = doctest.DocTestSuite()
-    unittest.TextTestRunner().run(suite)
-    #doctest.testmod()
-    #doctest.testfile("example.txt")
--- a/run-tests.sh	Fri Jun 10 15:43:02 2016 -0400
+++ b/run-tests.sh	Mon Aug 24 16:02:06 2020 -0400
@@ -1,7 +1,7 @@
 #!/bin/sh
 echo "------------"
 echo "Python tests"
-cd python
+cd trafficintelligence
 ./run-tests.sh
 cd ..
 echo "------------"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/samples/CMakeLists-nonfunctional.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,49 @@
+CMAKE_MINIMUM_REQUIRED( VERSION 2.6 )
+
+FIND_PACKAGE(
+	OpenCV REQUIRED
+)
+
+FIND_LIBRARY(
+	SQLite3_LIBS sqlite3
+)
+
+#FIND_PACKAGE(TrajectoryManagement)
+
+SET(
+	CMAKE_CXX_FLAGS "-g -Wall"
+)
+
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY bin)
+
+add_executable(feature-based-tracking
+	c/cvutils.cpp
+	c/feature-based-tracking.cpp
+	c/Motion.cpp
+	c/Parameters.cpp
+	c/utils.cpp
+	c/InputFrameListModule.cpp
+	c/InputVideoFileModule.cpp
+	)
+
+find_package(Boost REQUIRED program_options filesystem system)
+find_library(TrajectoryManagement_LIBRARY TrajectoryManagementAndAnalysis)
+find_path(TrajectoryManagement_INCLUDE_DIR src/Trajectory.h)
+
+add_definitions(
+	-DUSE_OPENCV
+	)
+
+include_directories(
+	${PROJECT_SOURCE_DIR}/include
+	${TrajectoryManagement_INCLUDE_DIR}
+	)
+
+target_link_libraries(feature-based-tracking
+	${TrajectoryManagement_LIBRARY}
+	${SQLite3_LIBS}
+	${OpenCV_LIBS}
+	${Boost_LIBRARIES}
+	)
+
+install(TARGETS feature-based-tracking DESTINATION bin)
Binary file samples/val-dor-117-111.png has changed
--- a/scripts/classify-objects.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/classify-objects.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,94 +1,88 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
-import cvutils, moving, ml, storage
+import sys, argparse
 
 import numpy as np
-import sys, argparse
-from cv2.ml import SVM_RBF, SVM_C_SVC
 import cv2
 from scipy.stats import norm, lognorm
 
+from trafficintelligence import cvutils, moving, ml, storage, utils
+
 # TODO add mode detection live, add choice of kernel and svm type (to be saved in future classifier format)
 
 parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
-parser.add_argument('--kernel', dest = 'kernelType', help = 'kernel type for the support vector machine (SVM)', default = SVM_RBF, type = long)
-parser.add_argument('--svm', dest = 'svmType', help = 'SVM type', default = SVM_C_SVC, type = long)
 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
 parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to classify', type = int, default = None)
+parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true')
 parser.add_argument('--plot-speed-distributions', dest = 'plotSpeedDistribution', help = 'simply plots the distributions used for each user type', action = 'store_true')
-parser.add_argument('--max-speed-distribution-plot', dest = 'maxSpeedDistributionPlot', help = 'if plotting the user distributions, the maximum speed to display', type = float, default = 50.)
+parser.add_argument('--max-speed-distribution-plot', dest = 'maxSpeedDistributionPlot', help = 'if plotting the user distributions, the maximum speed to display (km/h)', type = float, default = 50.)
+parser.add_argument('--verbose', dest = 'verbose', help = 'verbose information', action = 'store_true')
 
 args = parser.parse_args()
-params = storage.ProcessParameters(args.configFilename)
-
-if args.videoFilename is not None:
-    videoFilename = args.videoFilename
-else:
-    videoFilename = params.videoFilename
-if args.databaseFilename is not None:
-    databaseFilename = args.databaseFilename
-else:
-    databaseFilename = params.databaseFilename
+params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
+classifierParams = storage.ClassifierParameters(params.classifierFilename)
+classifierParams.convertToFrames(params.videoFrameRate, 3.6) # conversion from km/h to m/frame
 
-params.convertToFrames(3.6)
-if params.homography is not None:
-    invHomography = np.linalg.inv(params.homography)
-else:
-    invHomography = None
-
-if params.speedAggregationMethod == 'median':
-    speedAggregationFunc = np.median
-elif params.speedAggregationMethod == 'mean':
-    speedAggregationFunc = np.mean
-elif params.speedAggregationMethod == 'quantile':
-    speedAggregationFunc = lambda speeds: np.percentile(speeds, args.speedAggregationQuantile)
-else:
-    print('Unknown speed aggregation method: {}. Exiting'.format(params.speedAggregationMethod))
+speedAggregationFunc = utils.aggregationFunction(classifierParams.speedAggregationMethod)
+if speedAggregationFunc is None:
     sys.exit()
 
-pedBikeCarSVM = ml.SVM(args.svmType, args.kernelType)
-pedBikeCarSVM.load(params.pedBikeCarSVMFilename)
-bikeCarSVM = ml.SVM(args.svmType, args.kernelType)
-bikeCarSVM.load(params.bikeCarSVMFilename)
+pedBikeCarSVM = ml.SVM_load(classifierParams.pedBikeCarSVMFilename)
+bikeCarSVM = ml.SVM_load(classifierParams.bikeCarSVMFilename)
 
 # log logistic for ped and bik otherwise ((pedBeta/pedAlfa)*((sMean/pedAlfa)**(pedBeta-1)))/((1+(sMean/pedAlfa)**pedBeta)**2.)
-speedProbabilities = {'car': lambda s: norm(params.meanVehicleSpeed, params.stdVehicleSpeed).pdf(s),
-                      'pedestrian': lambda s: norm(params.meanPedestrianSpeed, params.stdPedestrianSpeed).pdf(s), 
-                      'bicycle': lambda s: lognorm(params.scaleCyclistSpeed, loc = 0., scale = np.exp(params.locationCyclistSpeed)).pdf(s)} # numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is location (mean of the normal)
+carNorm = norm(classifierParams.meanVehicleSpeed, classifierParams.stdVehicleSpeed)
+pedNorm = norm(classifierParams.meanPedestrianSpeed, classifierParams.stdPedestrianSpeed)
+# numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is exp(location) (loc=mean of the normal)
+bicLogNorm = lognorm(classifierParams.scaleCyclistSpeed, loc = 0., scale = np.exp(classifierParams.locationCyclistSpeed))
+speedProbabilities = {'car': lambda s: carNorm.pdf(s),
+                      'pedestrian': lambda s: pedNorm.pdf(s), 
+                      'bicycle': lambda s: bicLogNorm.pdf(s)}
 
 if args.plotSpeedDistribution:
     import matplotlib.pyplot as plt
     plt.figure()
+    speeds = np.arange(0.1, args.maxSpeedDistributionPlot, 0.1)
     for k in speedProbabilities:
-        plt.plot(np.arange(0.1, args.maxSpeedDistributionPlot, 0.1), [speedProbabilities[k](s/3.6/25) for s in np.arange(0.1, args.maxSpeedDistributionPlot, 0.1)], label = k)
+        plt.plot(speeds, [speedProbabilities[k](s/(3.6*params.videoFrameRate)) for s in speeds], label = k) # the distribution parameters are in video intrinsic units, unit of distance per frame
+    maxProb = -1.
+    for k in speedProbabilities:
+        maxProb = max(maxProb, np.max([speedProbabilities[k](s/(3.6*params.videoFrameRate)) for s in speeds]))
+    plt.plot([classifierParams.minSpeedEquiprobable*3.6*params.videoFrameRate]*2, [0., maxProb], 'k-')
+    plt.text(classifierParams.minSpeedEquiprobable*3.6*params.videoFrameRate, maxProb, 'threshold for equiprobable class')
     plt.xlabel('Speed (km/h)')
-    plt.ylabel('Probability')
+    plt.ylabel('Probability density function')
     plt.legend()
-    plt.title('Probability Density Function')
+    #plt.title('Probability Density Function')
     plt.show()
     sys.exit()
 
 objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True)
-#features = storage.loadTrajectoriesFromSqlite(databaseFilename, 'feature')
-intervals = []
-for obj in objects:
-    #obj.setFeatures(features)
-    intervals.append(obj.getTimeInterval())
-timeInterval = moving.TimeInterval.unionIntervals(intervals)
+timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
+if args.startFrame0:
+    timeInterval.first = 0
 
 capture = cv2.VideoCapture(videoFilename)
 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
 height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
+#if undistort: # setup undistortion
+#     [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+#     height, width = map1.shape
+#    newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
+#    newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, newImgSize, True)
+#else:
+#    newCameraMatrix = None
+
 pastObjects = []
-if params.undistort: # setup undistortion
-    [map1, map2] = cvutils.computeUndistortMaps(width, height, params.undistortedImageMultiplication, params.intrinsicCameraMatrix, params.distortionCoefficients)
+currentObjects = []
 if capture.isOpened():
     ret = True
     frameNum = timeInterval.first
-    capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
+    if not args.startFrame0:
+        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
     lastFrameNum = timeInterval.last
 
     while ret and frameNum <= lastFrameNum:
@@ -96,25 +90,27 @@
         if ret:
             if frameNum%50 == 0:
                 print('frame number: {}'.format(frameNum))
-                currentObjects = []
-                for obj in objects:
-                    if obj.getLastInstant() < frameNum:
-                        obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = params.minSpeedEquiprobable, speedProbabilities = speedProbabilities)
-                        pastObjects.append(obj)
-                    else:
-                        currentObjects.append(obj)
-                objects = currentObjects
-            if params.undistort:
-                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
-            for obj in objects:
-                if obj.existsAtInstant(frameNum):
-                    if obj.getFirstInstant() == frameNum:
-                        obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, params.maxPedestrianSpeed, params.maxCyclistSpeed, params.nFramesIgnoreAtEnds)
-                    obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, invHomography, width, height, 0.2, 0.2, 800) # px, py, pixelThreshold
+            #if undistort:
+            #    img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)            
+            for obj in objects[:]:
+                if obj.getFirstInstant() <= frameNum: # if images are skipped
+                    obj.initClassifyUserTypeHoGSVM(speedAggregationFunc, pedBikeCarSVM, bikeCarSVM, classifierParams.maxPedestrianSpeed, classifierParams.maxCyclistSpeed, classifierParams.nFramesIgnoreAtEnds, invHomography, intrinsicCameraMatrix, distortionCoefficients)
+                    currentObjects.append(obj)
+                    objects.remove(obj)
+
+            for obj in currentObjects[:]:
+                if obj.getLastInstant() <= frameNum:  # if images are skipped
+                    obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
+                    pastObjects.append(obj)
+                    currentObjects.remove(obj)
+                else:
+                    obj.classifyUserTypeHoGSVMAtInstant(img, frameNum, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels, classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogNCellsPerBlock, classifierParams.hogBlockNorm)
+                    if args.verbose:
+                        print('obj {}@{}: {}'.format(obj.getNum(), frameNum, moving.userTypeNames[obj.userTypes[frameNum]]))
         frameNum += 1
     
-    for obj in objects:
-        obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = params.minSpeedEquiprobable, speedProbabilities = speedProbabilities)
+    for obj in currentObjects:
+        obj.classifyUserTypeHoGSVM(minSpeedEquiprobable = classifierParams.minSpeedEquiprobable, speedProbabilities = speedProbabilities, maxPercentUnknown = classifierParams.maxPercentUnknown)
         pastObjects.append(obj)
     print('Saving user types')
     storage.setRoadUserTypes(databaseFilename, pastObjects)
--- a/scripts/clean-ground-truth.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/clean-ground-truth.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-#! /usr/bin/env python                                                                                
+#! /usr/bin/env python3
 import argparse
 import pandas as pd
 import sqlite3
--- a/scripts/compute-clearmot.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/compute-clearmot.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,9 +1,10 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
 from numpy import loadtxt
 from numpy.linalg import inv
-import moving, storage, cvutils
+
+from trafficintelligence import moving, storage, cvutils
 
 # TODO: need to trim objects to same mask ?
 
@@ -23,13 +24,19 @@
 parser.add_argument('--offset', dest = 'nFramesOffsetAnnotations', help = 'number of frames to offset the ground truth annotations', type = int)
 parser.add_argument('--displayOffset', dest = 'nFramesOffsetDisplay', help = 'number of frames to offset annotations and objects for display', type = int)
 parser.add_argument('--display', dest = 'display', help = 'display the ground truth to object matches (graphically)', action = 'store_true')
+parser.add_argument('--undistort', dest = 'undistort', help = 'undistort the video (because features have been extracted that way)', action = 'store_true')
+parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
+parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
+parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
+
 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (for display)')
+parser.add_argument('--csv', dest = 'csvOutput', help = 'output comma-separated metrics', action = 'store_true')
 args = parser.parse_args()
 
 if args.homographyFilename is not None:
-    homography = loadtxt(args.homographyFilename)
+    invHomography = inv(loadtxt(args.homographyFilename))
 else:
-    homography = None
+    invHomography = None
 
 objects = storage.loadTrajectoriesFromSqlite(args.trackerDatabaseFilename, 'object')
 
@@ -40,47 +47,42 @@
     if len(mask) > 1:
         mask = mask[:,:,0]
     for obj in objects:
-        maskObjects += obj.getObjectsInMask(mask, inv(homography), 2) # TODO add option to keep object if at least one feature in mask
+        maskObjects += obj.getObjectsInMask(mask, invHomography, 10) # TODO add option to keep object if at least one feature in mask
     objects = maskObjects    
 
 annotations = storage.loadBBMovingObjectsFromSqlite(args.groundTruthDatabaseFilename)
 for a in annotations:
-    a.computeCentroidTrajectory(homography)
+    a.computeCentroidTrajectory(invHomography)
 
 if args.nFramesOffsetAnnotations is not None:
     for a in annotations:
         a.shiftTimeInterval(args.nFramesOffsetAnnotations)
 
-if args.display:
-    motp, mota, mt, mme, fpt, gt, gtMatches, toMatches = moving.computeClearMOT(annotations, objects, args.matchingDistance, args.firstInstant, args.lastInstant, True)
-else:
-    motp, mota, mt, mme, fpt, gt = moving.computeClearMOT(annotations, objects, args.matchingDistance, args.firstInstant, args.lastInstant)
-
+motp, mota, mt, mme, fpt, gt, gtMatches, toMatches = moving.computeClearMOT(annotations, objects, args.matchingDistance, args.firstInstant, args.lastInstant, args.display)
 
-print 'MOTP: {}'.format(motp)
-print 'MOTA: {}'.format(mota)
-print 'Number of missed objects.frames: {}'.format(mt)
-print 'Number of mismatches: {}'.format(mme)
-print 'Number of false alarms.frames: {}'.format(fpt)
+if args.csvOutput:
+    print('{},{},{},{},{}'.format(motp, mota, mt, mme, fpt))
+else:
+    print 'MOTP: {}'.format(motp)
+    print 'MOTA: {}'.format(mota)
+    print 'Number of missed objects.frames: {}'.format(mt)
+    print 'Number of mismatches: {}'.format(mme)
+    print 'Number of false alarms.frames: {}'.format(fpt)
 
 def shiftMatches(matches, offset):
     shifted = {}
     for k in matches:
-        shifted[k] = {t+offset:v for t, v in matches[k].iteritems()}
+        shifted[k] = {t+offset:v for t, v in matches[k].items()}
     return shifted
 
 if args.display:
-    if args.nFramesOffsetDisplay is not None:
-        firstInstant = args.firstInstant+args.nFramesOffsetDisplay
-        lastInstant = args.lastInstant+args.nFramesOffsetDisplay
-        for a in annotations:
-            a.shiftTimeInterval(args.nFramesOffsetDisplay)
-        for o in objects:
-            o.shiftTimeInterval(args.nFramesOffsetDisplay)
-        gtMatches = shiftMatches(gtMatches, args.nFramesOffsetDisplay)
-        toMatches = shiftMatches(toMatches, args.nFramesOffsetDisplay)
-    cvutils.displayTrajectories(args.videoFilename, objects, {}, inv(homography), firstInstant, lastInstant, annotations = annotations, gtMatches = gtMatches, toMatches = toMatches)#, rescale = args.rescale, nFramesStep = args.nFramesStep, saveAllImages = args.saveAllImages, undistort = (undistort or args.undistort), intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
-
+    if args.undistort and args.intrinsicCameraMatrixFilename is not None:
+        intrinsicCameraMatrix = loadtxt(args.intrinsicCameraMatrixFilename)
+    else:
+        intrinsicCameraMatrix = None
+    firstInstant = args.firstInstant
+    lastInstant = args.lastInstant
+    cvutils.displayTrajectories(args.videoFilename, objects, {}, invHomography, firstInstant, lastInstant, annotations = annotations, undistort = args.undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = args.distortionCoefficients, undistortedImageMultiplication = args.undistortedImageMultiplication, gtMatches = gtMatches, toMatches = toMatches)
     #print('Ground truth matches')
     #print(gtMatches)
     #print('Object matches')
--- a/scripts/compute-homography.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/compute-homography.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
 
@@ -6,7 +6,7 @@
 import numpy as np
 import cv2
 
-import cvutils, utils, storage
+from trafficintelligence import cvutils, utils, storage
 
 # TODO add option to use RANSAC or other robust homography estimation method?
 
@@ -30,79 +30,36 @@
 parser.add_argument('--display', dest = 'displayPoints', help = 'display original and projected points on both images', action = 'store_true')
 parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
 parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
-parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
+parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float, default = 1.)
 parser.add_argument('--undistort', dest = 'undistort', help = 'undistort the video (because features have been extracted that way', action = 'store_true')
 parser.add_argument('--save', dest = 'saveImages', help = 'save the undistorted video frame (display option must be chosen)', action = 'store_true')
 
 args = parser.parse_args()
 
-# TODO process camera intrinsic and extrinsic parameters to obtain image to world homography, taking example from Work/src/python/generate-homography.py script
-# cameraMat = load(videoFilenamePrefix+'-camera.txt');
-# T1 = cameraMat[3:6,:].copy();
-# A = cameraMat[0:3,0:3].copy();
-
-# # pay attention, rotation may be the transpose
-# # R = T1[:,0:3].T;
-# R = T1[:,0:3];
-# rT = dot(R, T1[:,3]/1000);
-# T = zeros((3,4),'f');
-# T[:,0:3] = R[:];
-# T[:,3] = rT;
-
-# AT = dot(A,T);
-
-# nPoints = 4;
-# worldPoints = cvCreateMat(nPoints, 3, CV_64FC1);
-# imagePoints = cvCreateMat(nPoints, 3, CV_64FC1);
-
-# # extract homography from the camera calibration
-# worldPoints = cvCreateMat(4, 3, CV_64FC1);
-# imagePoints = cvCreateMat(4, 3, CV_64FC1);
-
-# worldPoints[0,:] = [[1, 1, 0]];
-# worldPoints[1,:] = [[1, 2, 0]];
-# worldPoints[2,:] = [[2, 1, 0]];
-# worldPoints[3,:] = [[2, 2, 0]];
-
-# wPoints = [[1,1,2,2],
-#            [1,2,1,2],
-#            [0,0,0,0]];
-# iPoints = utils.worldToImage(AT, wPoints);
-
-# for i in range(nPoints):
-#     imagePoints[i,:] = [iPoints[:,i].tolist()];
-
-# H = cvCreateMat(3, 3, CV_64FC1);
-
-# cvFindHomography(imagePoints, worldPoints, H);
-
-
 homography = np.array([])
 if args.pointCorrespondencesFilename is not None:
     worldPts, videoPts = cvutils.loadPointCorrespondences(args.pointCorrespondencesFilename)
     homography, mask = cv2.findHomography(videoPts, worldPts) # method=0, ransacReprojThreshold=3
 elif args.tsaiCameraFilename is not None: # hack using PDTV
     from pdtv import TsaiCamera
-    f = storage.openCheck(args.tsaiCameraFilename, quitting = True)
-    content = storage.getLines(f)
-    cameraData = {}
-    for l in content:
-        tmp = l.split(':')
-        cameraData[tmp[0]] = float(tmp[1].strip().replace(',','.'))
+    cameraData = storage.loadPinholeCameraModel(args.tsaiCameraFilename)
     camera = TsaiCamera(Cx=cameraData['Cx'], Cy=cameraData['Cy'], Sx=cameraData['Sx'], Tx=cameraData['Tx'], Ty=cameraData['Ty'], Tz=cameraData['Tz'], dx=cameraData['dx'], dy=cameraData['dy'], f=cameraData['f'], k=cameraData['k'], r1=cameraData['r1'], r2=cameraData['r2'], r3=cameraData['r3'], r4=cameraData['r4'], r5=cameraData['r5'], r6=cameraData['r6'], r7=cameraData['r7'], r8=cameraData['r8'], r9=cameraData['r9'])
     homography = cvutils.computeHomographyFromPDTV(camera)
 elif args.videoFrameFilename is not None and args.worldFilename is not None:
     worldImg = plt.imread(args.worldFilename)
     videoImg = plt.imread(args.videoFrameFilename)
     if args.undistort:        
-        [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients)
+        [map1, map2], newCameraMatrix = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients)
         videoImg = cv2.remap(videoImg, map1, map2, interpolation=cv2.INTER_LINEAR)
-    print('Click on {0} points in the video frame'.format(args.nPoints))
+    print('Click on {} points in the video frame'.format(args.nPoints))
+    plt.ion()
     plt.figure()
     plt.imshow(videoImg)
     plt.tight_layout()
     videoPts = np.array(plt.ginput(args.nPoints, timeout=3000))
-    print('Click on {0} points in the world image'.format(args.nPoints))
+    if args.undistort:
+        videoPts = cvutils.newCameraProject(videoPts.T, np.linalg.inv(newCameraMatrix)).T
+    print('Click on {} points in the world image'.format(args.nPoints))
     plt.figure()
     plt.imshow(worldImg)
     plt.tight_layout()
@@ -122,22 +79,25 @@
     worldImg = cv2.imread(args.worldFilename)
     videoImg = cv2.imread(args.videoFrameFilename)
     if args.undistort:
-        [map1, map2] = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients)
+        [map1, map2], newCameraMatrix = cvutils.computeUndistortMaps(videoImg.shape[1], videoImg.shape[0], args.undistortedImageMultiplication, np.loadtxt(args.intrinsicCameraMatrixFilename), args.distortionCoefficients)
         videoImg = cv2.remap(videoImg, map1, map2, interpolation=cv2.INTER_LINEAR)
         if args.saveImages:
             cv2.imwrite(utils.removeExtension(args.videoFrameFilename)+'-undistorted.png', videoImg)
     invHomography = np.linalg.inv(homography)
-    projectedWorldPts = cvutils.projectArray(invHomography, worldPts.T).T
-    projectedVideoPts = cvutils.projectArray(homography, videoPts.T).T
+    projectedWorldPts = cvutils.homographyProject(worldPts.T, invHomography).T
+    projectedVideoPts = cvutils.homographyProject(videoPts.T, homography).T
+    if args.undistort:
+        projectedWorldPts = cvutils.newCameraProject(projectedWorldPts.T, newCameraMatrix).T
+        videoPts = cvutils.newCameraProject(videoPts.T, newCameraMatrix).T
     for i in range(worldPts.shape[0]):
         # world image
-        cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvBlue)
-        cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed)
-        cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
+        cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))),2,cvutils.cvBlue['default'])
+        cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/args.unitsPerPixel))),2,cvutils.cvRed['default'])
+        cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/args.unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue['default'], 2)
         # video image
-        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvBlue)
-        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvRed)
-        cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue, 2)
+        cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))),2,cvutils.cvBlue['default'])
+        cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))),2,cvutils.cvRed['default'])
+        cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., cvutils.cvBlue['default'], 2)
     cv2.imshow('video frame',videoImg)
     cv2.imshow('world image',worldImg)
     cv2.waitKey()
--- a/scripts/create-bounding-boxes.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/create-bounding-boxes.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,12 +1,12 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import argparse
 
-import storage
-
 from numpy.linalg.linalg import inv
 from numpy import loadtxt
 
+from trafficintelligence import storage
+
 parser = argparse.ArgumentParser(description='The program creates bounding boxes in image space around all features (for display and for comparison to ground truth in the form of bouding boxes.')
 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
 parser.add_argument('-o', dest = 'homography', help = 'name of the image to world homography')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/create-metadata.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,30 @@
+#! /usr/bin/env python3
+
+import argparse
+from datetime import datetime
+
+from trafficintelligence import metadata, utils
+
+timeConverter = utils.TimeConverter()
+
+parser = argparse.ArgumentParser(description='The program add camera views (metadata.CameraView) for a site or video sequences (metadata.VideoSequence) for a site and a view.')
+parser.add_argument('-i', dest = 'databaseFilename', help = 'name of the metadata filename', required = True)
+parser.add_argument('-d', dest = 'dirname', help = 'directory name containing sites or video sequences for a given view')
+#parser.add_argument('-s', dest = 'siteId', help = 'site id (if provided, the program adds video sequences for the camera view)')
+parser.add_argument('-v', dest = 'viewId', help = 'camera view id')
+parser.add_argument('--nviews', dest = 'nViewsPerSite', help = 'default number of camera views', type = int, default = 1)
+parser.add_argument('-s', dest = 'startTime', help = 'starting time of the first video (format %%Y-%%m-%%d %%H:%%M:%%S, eg 2011-06-22 10:00:39)', type = timeConverter.convert)
+parser.add_argument('--timeformat', dest = 'timeFormat', help = 'time format of the video filenames (optional) (eg %%Y_%%m%%d_%%H%%M%%S, eg 2017_0627_163231)')
+args = parser.parse_args()
+
+session = metadata.connectDatabase(args.databaseFilename)
+if args.viewId is not None:
+    # sites = metadata.getSite(session, args.siteId)
+    # if len(sites) > 1:
+    #     print('{} sites found matching {}, using the first {}'.format(len(sites), args.siteId, sites[0].name))
+    # site = sites[0]
+    cameraView = metadata.getCameraView(session, args.viewId)
+    metadata.initializeVideos(session, cameraView, args.dirname, args.startTime, args.timeFormat)
+else:
+    metadata.initializeSites(session, args.dirname, args.nViewsPerSite)
+
--- a/scripts/delete-tables.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/delete-tables.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,14 +1,13 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
 
-import utils
-import storage
+from trafficintelligence import utils, storage
 
 parser = argparse.ArgumentParser(description='The program deletes (drops) the tables in the database before saving new results (for objects, tables object_features and objects are dropped; for interactions, the tables interactions and indicators are dropped')
 #parser.add_argument('configFilename', help = 'name of the configuration file')
 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database', required = True)
-parser.add_argument('-t', dest = 'dataType', help = 'type of the data to remove', required = True, choices = ['object','interaction', 'bb', 'pois'])
+parser.add_argument('-t', dest = 'dataType', help = 'type of the data to remove', required = True, choices = ['object','interaction', 'bb', 'pois', 'prototype'])
 args = parser.parse_args()
 
 storage.deleteFromSqlite(args.databaseFilename, args.dataType)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/display-synced-trajectories.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,121 @@
+#! /usr/bin/env python3
+
+import sys, argparse, os.path
+from datetime import datetime, timedelta
+
+import numpy as np
+import cv2
+
+from trafficintelligence import cvutils, utils, storage
+from trafficintelligence.metadata import connectDatabase, Site, CameraView, VideoSequence
+
+parser = argparse.ArgumentParser(description='The program displays several views of the same site synchronously.')
+parser.add_argument('--db', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
+#parser.add_argument('-n', dest = 'siteId', help = 'site id or site name', required = True)
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
+parser.add_argument('-f', dest = 'startTime', help = 'time to start playing (format %%Y-%%m-%%d %%H:%%M:%%S, eg 2011-06-22 10:00:39)', required = True)
+parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'object')
+parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
+parser.add_argument('-s', dest = 'step', help = 'display every s image', default = 1, type = int)
+parser.add_argument('-u', dest = 'undistort', help = 'undistort the video (because features have been extracted that way)', action = 'store_true')
+
+args = parser.parse_args()
+
+session = connectDatabase(args.metadataFilename)
+
+mergedSequence = session.query(VideoSequence).filter(VideoSequence.databaseFilename == args.databaseFilename).first()
+if mergedSequence is None:
+    print('Video sequence {} was not found in {}. Exiting'.format(args.databaseFilename, args.metadataFilename))
+    sys.exit()
+
+dirname = os.path.split(args.metadataFilename)[0]
+
+frameRate = mergedSequence.cameraView.cameraType.frameRate
+startTime = datetime.strptime(args.startTime, utils.datetimeFormat)
+mergedFirstFrameNum = utils.deltaFrames(mergedSequence.startTime, startTime, frameRate)
+
+cameraViews = session.query(CameraView).filter(CameraView.site == mergedSequence.cameraView.site).filter(CameraView.virtual == False).all()
+videoSequences = session.query(VideoSequence).filter(VideoSequence.virtual == False).all()
+#videoSequences.remove(mergedSequence)
+videoSequences = [v for v in videoSequences if v.cameraView in cameraViews and (v.containsInstant(startTime) or v.startTime > startTime)]
+filenames = [dirname+os.path.sep+v.getVideoSequenceFilename() for v in videoSequences]
+firstFrameNums = [utils.deltaFrames(v.startTime, startTime, frameRate) for v in videoSequences] # use pos/neg first frame nums
+windowNames = [v.cameraView.description for v in videoSequences]
+
+# homography and undistort
+homographies = [np.linalg.inv(np.loadtxt(dirname+os.path.sep+v.cameraView.getHomographyFilename())) for v in videoSequences]
+if args.undistort:
+    cameraTypes = set([cv.cameraType for cv in cameraViews])
+    for cameraType in cameraTypes:
+        cameraType.computeUndistortMaps()
+
+objects = storage.loadTrajectoriesFromSqlite(dirname+os.path.sep+mergedSequence.getDatabaseFilename(), args.trajectoryType)
+for obj in objects:
+    obj.projectedPositions = {}
+
+#def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1):
+if len(filenames) == 0:
+    print('Empty filename list')
+    sys.exit()
+
+if windowNames is None:
+    windowNames = ['frame{}'.format(i) for i in range(len(filenames))]
+#wait = 5
+#if rescale == 1.:
+for windowName in windowNames:
+    cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
+#if frameRate > 0:
+#    wait = int(round(1000./frameRate))
+#if interactive:
+wait = 0
+rescale = 1.
+captures = [cv2.VideoCapture(fn) for fn in filenames]
+if np.array([cap.isOpened() for cap in captures]).all():
+    key = -1
+    ret = True
+    nFramesShown = 0
+    for i in range(len(captures)):
+        if firstFrameNums[i] > 0:
+            captures[i].set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i])
+    while ret and not cvutils.quitKey(key):
+        rets = []
+        images = []
+        for i in range(len(captures)):
+            if firstFrameNums[i]+nFramesShown>=0:
+                ret, img = captures[i].read()
+                if ret and args.undistort:
+                    img = cv2.remap(img, videoSequences[i].cameraView.cameraType.map1, videoSequences[i].cameraView.cameraType.map2, interpolation=cv2.INTER_LINEAR)
+                rets.append(ret)
+                images.append(img)
+            else:
+                rets.append(False)
+                images.append(None)                
+        if np.array(rets).any():
+            #if printFrames:
+            print('frame shown {0}'.format(nFramesShown))
+            for i in range(len(filenames)):
+                if rets[i]:#firstFrameNums[i]+nFramesShown>=0:
+                    for obj in objects:
+                        if obj.existsAtInstant(mergedFirstFrameNum+nFramesShown):
+                            #print obj.num, obj.timeInterval, mergedFirstFrameNum, nFramesShown
+                            if i not in obj.projectedPositions:
+                                if homographies[i] is not None:
+                                    obj.projectedPositions[i] = obj.positions.homographyProject(homographies[i])
+                                else:
+                                    obj.projectedPositions[i] = obj.positions
+                            cvutils.cvPlot(images[i], obj.projectedPositions[i], cvutils.cvColors['default'][obj.getNum()], int(mergedFirstFrameNum+nFramesShown)-obj.getFirstInstant())
+
+                    #if text is not None:
+                    #    cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed['default'])
+                    cvutils.cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img)
+            key = cv2.waitKey(wait)
+            #if cvutils.saveKey(key):
+            #    cv2.imwrite('image-{}.png'.format(frameNum), img)
+        nFramesShown += args.step
+        if args.step > 1:
+            for i in range(len(captures)):
+                if firstFrameNums[i]+nFramesShown >= 0:
+                    captures[i].set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNums[i]+nFramesShown)
+    cv2.destroyAllWindows()
+else:
+    print('Video captures for {} failed'.format(filenames))
--- a/scripts/display-trajectories.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/display-trajectories.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,16 +1,16 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
 
-import storage, cvutils, utils
+from numpy.linalg import inv
+from numpy import loadtxt
 
-from numpy.linalg.linalg import inv
-from numpy import loadtxt
+from trafficintelligence import storage, cvutils, utils
 
 parser = argparse.ArgumentParser(description='The program displays feature or object trajectories overlaid over the video frames.', epilog = 'Either the configuration filename or the other parameters (at least video and database filenames) need to be provided.')
 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
-parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file')
-parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file')
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
 parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'feature')
 parser.add_argument('-o', dest = 'homographyFilename', help = 'name of the image to world homography file')
 parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
@@ -18,50 +18,32 @@
 parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
 parser.add_argument('-u', dest = 'undistort', help = 'undistort the video (because features have been extracted that way)', action = 'store_true')
 parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', type = int)
+parser.add_argument('-l', dest = 'lastFrameNum', help = 'number of last frame number to save (for image saving, no display is made)', type = int)
 parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
 parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each display', default = 1, type = int)
+parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to display', type = int)
 parser.add_argument('--save-images', dest = 'saveAllImages', help = 'save all images', action = 'store_true')
-parser.add_argument('--last-frame', dest = 'lastFrameNum', help = 'number of last frame number to save (for image saving, no display is made)', type = int)
+parser.add_argument('--nzeros', dest = 'nZerosFilenameArg', help = 'number of digits in filenames', type = int)
 
 args = parser.parse_args()
 
-if args.configFilename: # consider there is a configuration file
-    params = storage.ProcessParameters(args.configFilename)
-    videoFilename = params.videoFilename
-    databaseFilename = params.databaseFilename
-    if params.homography is not None:
-        homography = inv(params.homography)
-    else:
-        homography = None
-    intrinsicCameraMatrix = params.intrinsicCameraMatrix
-    distortionCoefficients = params.distortionCoefficients
-    undistortedImageMultiplication = params.undistortedImageMultiplication
-    undistort = params.undistort
-    firstFrameNum = params.firstFrameNum
-else:
-    homography = None
-    undistort = False
-    intrinsicCameraMatrix = None
-    distortionCoefficients = []
-    undistortedImageMultiplication = None
-    firstFrameNum = 0
+params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
 
-if not args.configFilename and args.videoFilename is not None:
-    videoFilename = args.videoFilename
-if not args.configFilename and args.databaseFilename is not None:
-    databaseFilename = args.databaseFilename
-if not args.configFilename and args.homographyFilename is not None:
-    homography = inv(loadtxt(args.homographyFilename))            
-if not args.configFilename and args.intrinsicCameraMatrixFilename is not None:
+if args.homographyFilename is not None:
+    invHomography = inv(loadtxt(args.homographyFilename))            
+if args.intrinsicCameraMatrixFilename is not None:
     intrinsicCameraMatrix = loadtxt(args.intrinsicCameraMatrixFilename)
-if not args.configFilename and args.distortionCoefficients is not None:
+if args.distortionCoefficients is not None:
     distortionCoefficients = args.distortionCoefficients
-if not args.configFilename and args.undistortedImageMultiplication is not None:
+if args.undistortedImageMultiplication is not None:
     undistortedImageMultiplication = args.undistortedImageMultiplication
 if args.firstFrameNum is not None:
     firstFrameNum = args.firstFrameNum
-
+if args.nObjects is not None:
+    nObjects = args.nObjects
+else:
+    nObjects = None
 
-objects = storage.loadTrajectoriesFromSqlite(databaseFilename, args.trajectoryType)
+objects = storage.loadTrajectoriesFromSqlite(databaseFilename, args.trajectoryType, nObjects)
 boundingBoxes = storage.loadBoundingBoxTableForDisplay(databaseFilename)
-cvutils.displayTrajectories(videoFilename, objects, boundingBoxes, homography, firstFrameNum, args.lastFrameNum, rescale = args.rescale, nFramesStep = args.nFramesStep, saveAllImages = args.saveAllImages, undistort = (undistort or args.undistort), intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
+cvutils.displayTrajectories(videoFilename, objects, boundingBoxes, invHomography, firstFrameNum, args.lastFrameNum, rescale = args.rescale, nFramesStep = args.nFramesStep, saveAllImages = args.saveAllImages, nZerosFilenameArg = args.nZerosFilenameArg, undistort = (undistort or args.undistort), intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/extract-appearance-images.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,88 @@
+#! /usr/bin/env python3
+
+import numpy as np, cv2
+import argparse, os
+from pandas import read_csv
+from matplotlib.pyplot import imshow, figure
+
+from trafficintelligence import cvutils, moving, ml, storage
+
+parser = argparse.ArgumentParser(description='The program extracts labeled image patches to train the HoG-SVM classifier, and optionnally speed information')
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file (overrides the configuration file)')
+parser.add_argument('--gt', dest = 'classificationAnnotationFilename', help = 'name of the file containing the correct classes (user types)', required = True)
+parser.add_argument('--delimiter', dest = 'classificationAnnotationFilenameDelimiter', help = 'delimiter for the fields in the correct classification file', default= ' ')
+parser.add_argument('-s', dest = 'nFramesStep', help = 'number of frames between each saved patch', default = 50, type = int)
+parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to use to extract patches from', type = int, default = None)
+parser.add_argument('--start-frame0', dest = 'startFrame0', help = 'starts with first frame for videos with index problem where frames cannot be reached', action = 'store_true')
+parser.add_argument('-o', dest = 'overlap', help = 'maximum intersection over union of the features nFramesStep apart to save image', type = float, default = 0.2)
+parser.add_argument('--extract-all', dest = 'extractAllObjectImages', help = 'extracts the images for all objects, well classified or not (otherwise, extracts only for the misclassified)', action = 'store_true')
+parser.add_argument('--prefix', dest = 'imagePrefix', help = 'image prefix', default = 'img')
+parser.add_argument('--ouput', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', default = '.')
+parser.add_argument('--compute-speed-distributions', dest = 'computeSpeedDistribution', help = 'computes the distribution of the road users of each type and fits parameters to each', action = 'store_true')
+
+args = parser.parse_args()
+params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum = storage.processVideoArguments(args)
+classifierParams = storage.ClassifierParameters(params.classifierFilename)
+
+classificationAnnotations = read_csv(args.classificationAnnotationFilename, index_col=0, delimiter = args.classificationAnnotationFilenameDelimiter, names = ["object_num", "road_user_type"])
+annotatedObjectNumbers = classificationAnnotations.index.tolist()
+
+# objects has the objects for which we want to extract labeled images
+if args.extractAllObjectImages:
+    objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', args.nObjects, withFeatures = True)
+else:
+    if len(annotatedObjectNumbers) > args.nObjects:
+        classificationAnnotations = classificationAnnotations[:args.nObjects]
+        annotatedObjectNumbers = classificationAnnotations.index.tolist()
+    objects = storage.loadTrajectoriesFromSqlite(databaseFilename, 'object', annotatedObjectNumbers, withFeatures = True)
+for obj in objects:
+    if obj.getNum() in annotatedObjectNumbers:
+        obj.setUserType(classificationAnnotations.loc[obj.getNum(), 'road_user_type'])
+timeInterval = moving.TimeInterval.unionIntervals([obj.getTimeInterval() for obj in objects])
+if args.startFrame0:
+    timeInterval.first = 0
+
+for userType in classificationAnnotations['road_user_type'].unique():
+    if not os.path.exists(args.directoryName+os.sep+moving.userTypeNames[userType]):
+        os.mkdir(args.directoryName+os.sep+moving.userTypeNames[userType])
+
+capture = cv2.VideoCapture(videoFilename)
+width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
+height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
+
+if undistort: # setup undistortion
+    [map1, map2] = cvutils.computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+    height, width = map1.shape
+
+if capture.isOpened():
+    ret = True
+    frameNum = timeInterval.first
+    if not args.startFrame0:
+        capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
+    lastFrameNum = timeInterval.last
+    while ret and frameNum <= timeInterval.last:
+        ret, img = capture.read()
+        distorted = True
+        if ret:
+            if frameNum%50 == 0:
+                print('frame number: {}'.format(frameNum))
+            for obj in objects[:]:
+                if obj.existsAtInstant(frameNum):
+                    if (10+frameNum-obj.getFirstInstant())%args.nFramesStep == 0:
+                        currentImageFeatures = set([f.num for f in obj.getFeatures() if f.existsAtInstant(frameNum)])
+                        if not hasattr(obj, 'lastImageFeatures') or len(currentImageFeatures.intersection(obj.lastImageFeatures))/len(currentImageFeatures.union(obj.lastImageFeatures)) < args.overlap:
+                            obj.lastImageFeatures = currentImageFeatures
+                            if undistort and distorted: # undistort only if necessary
+                                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+                                distorted = False
+                            croppedImg = cvutils.imageBox(img, obj, frameNum, invHomography, width, height, classifierParams.percentIncreaseCrop, classifierParams.percentIncreaseCrop, classifierParams.minNPixels)
+                            if croppedImg is not None:
+                                cv2.imwrite(args.directoryName+os.sep+moving.userTypeNames[obj.getUserType()]+os.sep+args.imagePrefix+'-{}-{}.png'.format(obj.getNum(), frameNum), croppedImg)
+                    elif obj.getLastInstant() == frameNum:
+                        objects.remove(obj)
+        frameNum += 1
+
+# todo speed info: distributions AND min speed equiprobable
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/extract-camera-parameters.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,19 @@
+#! /usr/bin/env python3
+
+import argparse
+
+from trafficintelligence import storage, cvutils
+
+parser = argparse.ArgumentParser(description='The program extracts the intrinsic camera from the tacal camera calibration file used by T-Analyst (http://www.tft.lth.se/en/research/video-analysis/co-operation/software/t-analyst/).')
+parser.add_argument('-i', dest = 'filename', help = 'filename of the camera calibration (.tacal)', required = True)
+parser.add_argument('-o', dest = 'outputIntrinsicFilename', help = 'filename of the intrinsic camera matrix', default = 'intrinsic-camera.txt')
+
+args = parser.parse_args()
+
+cameraData = storage.loadPinholeCameraModel(args.filename, True)
+if cameraData is not None:
+    from numpy import savetxt
+    intrinsicCameraMatrix = cvutils.getIntrinsicCameraMatrix(cameraData)
+    distortionCoefficients = cvutils.getDistortionCoefficients(cameraData)
+    savetxt(args.outputIntrinsicFilename, intrinsicCameraMatrix)
+    print(distortionCoefficients)
--- a/scripts/info-video.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/info-video.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,12 +1,14 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
-import cvutils
 
+from trafficintelligence import cvutils
 
 parser = argparse.ArgumentParser(description='The program displays the video.')
 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file', required = True)
 
 args = parser.parse_args()
 
-cvutils.infoVideo(args.videoFilename)
+videoProperties = cvutils.infoVideo(args.videoFilename)
+for k,v in videoProperties.items():
+    print('Video {}: {}'.format(k, v))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/init-tracking.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,67 @@
+#! /usr/bin/env python3
+
+import sys, argparse, os.path
+from shutil import copy
+
+from trafficintelligence.cvutils import getImagesFromVideo
+from matplotlib.pyplot import imsave
+
+from trafficintelligence import storage, utils
+
+# could try to guess the video
+# check if there is already a tracking.cfg file
+
+parser = argparse.ArgumentParser(description='The program initilizes the files for tracking: copy tracking.cfg, sets up with the video filename, generates a frame image (frame.png) and prints the next commands')
+
+parser.add_argument('-i', dest = 'videoFilename', help = 'filename of the video sequence', required = True)
+parser.add_argument('-n', dest = 'nFrames', help = 'number of frames to extract', type = int)
+
+args = parser.parse_args()
+
+# assumes tracking.cfg is in the parent directory to the directory of the traffic intelligence python modules
+matchingPaths = [s for s in sys.path if 'traffic-intelligence' in s]
+#if len(matchingPaths) > 1:
+#    print('Too many matching paths for Traffic Intelligence modules: {}'.format(matchingPaths))
+if len(matchingPaths) == 0:
+    print('No environment path to Traffic Intelligence modules.\nExiting')
+    sys.exit()
+else:
+    directoryName = matchingPaths[0]
+    if directoryName.endswith('/'):
+        directoryName = directoryName[:-1]
+    if os.path.exists(directoryName+'/tracking.cfg') and not os.path.exists('./tracking.cfg'):
+        f = utils.openCheck(directoryName+'/tracking.cfg')
+        out = utils.openCheck('./tracking.cfg', 'w')
+        for l in f:
+            if 'video-filename' in l:
+                tmp = l.split('=')
+                out.write(tmp[0]+'= '+args.videoFilename+'\n')
+            elif 'database-filename' in l:
+                tmp = l.split('=')
+                out.write(tmp[0]+'= '+utils.removeExtension(args.videoFilename)+'.sqlite\n')                
+            else:
+                out.write(l)
+        f.close()
+        out.close()
+        print('Configuration file tracking.cfg successfully copied to the current directory with video and database filename adapted')
+    if os.path.exists(directoryName+'/classifier.cfg') and not os.path.exists('./classifier.cfg'):
+        copy(directoryName+'/classifier.cfg', 'classifier.cfg')
+        print('Configuration file classifier.cfg successfully copied to the current directory')
+        
+# extract image from video
+if args.nFrames is not None:
+    image = getImagesFromVideo(args.videoFilename, 0, args.nFrames, saveImage = True, outputPrefix = 'frame')
+else:
+    image = getImagesFromVideo(args.videoFilename, saveImage = True, outputPrefix = 'frame')
+print('first video frame successfully copied to the current directory')
+
+# next commands
+print('''--------------------------------------
+Here are a sample of the next command to compute the homography,
+track features, group them in objects and display object trajectories
+--------------------------------------''')
+print('compute-homography -i [frame.png] -w [world_image] -n [npoints] -u [unit_per_pixel]')
+print('(beware of camera distortion)')
+print('feature-based-tracking tracking.cfg --tf')
+print('feature-based-tracking tracking.cfg --gf')
+print('display-trajectories --cfg tracking.cfg -t object')
--- a/scripts/init_tracking.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-#! /usr/bin/env python
-
-import sys, argparse, os.path, storage, utils
-from cvutils import getImagesFromVideo
-from matplotlib.pyplot import imsave
-
-# could try to guess the video
-# check if there is already a tracking.cfg file
-
-parser = argparse.ArgumentParser(description='The program initilizes the files for tracking: copy tracking.cfg, sets up with the video filename, generates a frame image (frame.png) and prints the next commands')
-
-parser.add_argument('-i', dest = 'videoFilename', help = 'filename of the video sequence', required = True)
-
-args = parser.parse_args()
-
-# assumes tracking.cfg is in the parent directory to the directory of the traffic intelligence python modules
-matchingPaths = [s for s in sys.path if 'traffic-intelligence' in s]
-#if len(matchingPaths) > 1:
-#    print('Too many matching paths for Traffic Intelligence modules: {}'.format(matchingPaths))
-if len(matchingPaths) == 0:
-    print('No environment path to Traffic Intelligence modules.\nExiting')
-    sys.exit()
-else:
-    directoryName = matchingPaths[0]
-    if directoryName.endswith('/'):
-        directoryName = directoryName[:-1]
-    if os.path.exists(directoryName+'/../tracking.cfg') and not os.path.exists('./tracking.cfg'):
-        f = storage.openCheck(directoryName+'/../tracking.cfg')
-        out = storage.openCheck('./tracking.cfg', 'w')
-        for l in f:
-            if 'video-filename' in l:
-                tmp = l.split('=')
-                out.write(tmp[0]+'= '+args.videoFilename+'\n')
-            elif 'database-filename' in l:
-                tmp = l.split('=')
-                out.write(tmp[0]+'= '+utils.removeExtension(args.videoFilename)+'.sqlite\n')                
-            else:
-                out.write(l)
-        f.close()
-        out.close()
-        print('Configuration file tracking.cfg successfully copied to the current directory with video and database filename adapted')
-
-# extract image from video
-image = getImagesFromVideo(args.videoFilename, saveImage = True, outputPrefix = 'frame')
-print('first video frame successfully copied to the current directory')
-
-# next commands
-print('--------------------------------------\nHere are a sample of the next command to compute the homography,\ntrack features, group them in objects and display object trajectories\n--------------------------------------')
-print('compute_homography -i [frame.png] -w [world_image] -n [npoints] -u [unit_per_pixel]')
-print('feature-based-tracking tracking.cfg --tf')
-print('feature-based-tracking tracking.cfg --gf')
-print('display-trajectories --cfg tracking.cfg -t object')
--- a/scripts/learn-motion-patterns.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/learn-motion-patterns.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,65 +1,142 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
 
-#import matplotlib.pyplot as plt
 import numpy as np
+import matplotlib.pyplot as plt
 
-import ml, utils, storage
+from trafficintelligence import ml, utils, storage, moving, processing
 
-parser = argparse.ArgumentParser(description='The program learns prototypes for the motion patterns') #, epilog = ''
+parser = argparse.ArgumentParser(description='''The program clusters trajectories, each cluster being represented by a trajectory. It can either work on the same dataset (database) or different ones, but only does learning or assignment at a time to avoid issues''') #, epilog = ''
 #parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
-parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['objectfeatures', 'feature', 'object'], default = 'objectfeatures')
-parser.add_argument('-n', dest = 'nTrajectories', help = 'number of the object or feature trajectories to load', type = int, default = None)
+parser.add_argument('-o', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes')
+parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
+parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to process', choices = ['feature', 'object'], default = 'feature')
+parser.add_argument('--nfeatures-per-object', dest = 'nLongestFeaturesPerObject', help = 'maximum number of features per object to load', type = int)
+parser.add_argument('-n', dest = 'nObjects', help = 'number of the object or feature trajectories to load', type = int, default = None)
 parser.add_argument('-e', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float, required = True)
 parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
 parser.add_argument('-s', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float, required = True)
-parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = None)
-parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true') # default is manhattan distance
+#parser.add_argument('-c', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0)
+parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true')
+parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true')
+parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
+parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)
+parser.add_argument('--display', dest = 'display', help = 'display trajectories', action = 'store_true')
+parser.add_argument('--similarities-filename', dest = 'similaritiesFilename', help = 'filename of the similarities')
+parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true')
+parser.add_argument('--save-assignments', dest = 'saveAssignments', help = 'saves the assignments of the objects to the prototypes', action = 'store_true')
+parser.add_argument('--assign', dest = 'assign', help = 'assigns the objects to the prototypes and saves the assignments', action = 'store_true')
 
 args = parser.parse_args()
 
-# TODO parameters (random init?) and what to learn from: objects, features, longest features from objects
-# TODO add possibility to cluter with velocities
+# use cases
+# 1. learn proto from one file, save in same or another
+# 2. load proto, load objects (from same or other db), update proto matchings, save proto
+# TODO 3. on same dataset, learn and assign trajectories (could be done with min cluster size)
+# TODO? 4. when assigning, allow min cluster size only to avoid assigning to small clusters (but prototypes are not removed even if in small clusters, can be done after assignment with nmatchings)
 
-trajectoryType = args.trajectoryType
-if args.trajectoryType == 'objectfeatures':
-    trajectoryType = 'object'
+# TODO add possibility to cluster with velocities
+# TODO add possibility to load all trajectories and use minclustersize
+
+if args.learn and args.assign:
+    print('Cannot learn and assign simultaneously')
+    sys.exit(0)
 
-#features = storage.loadTrajectoriesFromSqlite(databaseFilename, args.trajectoryType)
-objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, trajectoryType, withFeatures = (args.trajectoryType == 'objectfeatures'), objectNumbers = args.nTrajectories)
+objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, args.trajectoryType, args.nObjects, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
+if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None:
+    objectsWithFeatures = objects
+    objects = [f for o in objectsWithFeatures for f in o.getFeatures()]
+    prototypeType = 'feature'
+else:
+    prototypeType = args.trajectoryType
 
-if args.trajectoryType == 'objectfeatures':
-    features = []
-    for o in objects:
-        tmp = utils.sortByLength(o.getFeatures(), reverse = True)
-        features += tmp[:min(len(tmp), 3)]
-    objects = features
-
-trajectories = [o.getPositions().asArray().T for o in objects]
+# load initial prototypes, if any    
+if args.inputPrototypeDatabaseFilename is not None:
+    initialPrototypes = storage.loadPrototypesFromSqlite(args.inputPrototypeDatabaseFilename, True)
+else:
+    initialPrototypes = []
 
 lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
-nTrajectories = len(trajectories)
+similarityFunc = lambda x,y : lcss.computeNormalized(x, y)
+nTrajectories = len(initialPrototypes)+len(objects)
+if args.similaritiesFilename is not None:
+    similarities = np.loadtxt(args.similaritiesFilename)
+if args.similaritiesFilename is None or similarities.shape[0] != nTrajectories or similarities.shape[1] != nTrajectories:
+    similarities = -np.ones((nTrajectories, nTrajectories))
+
+prototypeIndices, labels = processing.learnAssignMotionPatterns(args.learn, args.assign, objects, similarities, args.minSimilarity, similarityFunc, 0, args.optimizeCentroid, args.randomInitialization, False, initialPrototypes)
 
-similarities = -np.ones((nTrajectories, nTrajectories))
-# for i in xrange(nTrajectories):
-#     for j in xrange(i):
-#         similarities[i,j] = lcss.computeNormalized(trajectories[i], trajectories[j])
-#         similarities[j,i] = similarities[i,j]
+if args.learn:# and not args.assign:
+    prototypes = []
+    for i in prototypeIndices:
+        if i<len(initialPrototypes):
+            prototypes.append(initialPrototypes[i])
+        else:
+            prototypes.append(moving.Prototype(args.databaseFilename, objects[i-len(initialPrototypes)].getNum(), prototypeType))
 
-prototypeIndices, labels = ml.prototypeCluster(trajectories, similarities, args.minSimilarity, lambda x,y : lcss.computeNormalized(x, y), args.minClusterSize) # this line can be called again without reinitializing similarities
+    if args.outputPrototypeDatabaseFilename is None:
+        outputPrototypeDatabaseFilename = args.databaseFilename
+    else:
+        outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
+        if args.inputPrototypeDatabaseFilename == args.outputPrototypeDatabaseFilename:
+            storage.deleteFromSqlite(args.outputPrototypeDatabaseFilename, 'prototype')
+    storage.savePrototypesToSqlite(outputPrototypeDatabaseFilename, prototypes)
+    if args.display:
+        plt.figure()
+        for p in prototypes:
+            p.getMovingObject().plot()
+        plt.axis('equal')
+        plt.show()
 
-if args.display:
-    from matplotlib.pyplot import figure
-    figure()
-    for i,o in enumerate(objects):
-        if i not in prototypeIndices:
-            if labels[i] < 0:
-                o.plot('kx')
+if args.assign: # not args.learn and  no modification to prototypes, can work with initialPrototypes
+    clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1)
+    for i in prototypeIndices:
+        nMatchings = clusterSizes[i]-1 # external prototypes
+        if initialPrototypes[i].nMatchings is None:
+            initialPrototypes[i].nMatchings = nMatchings
+        else:
+            initialPrototypes[i].nMatchings += nMatchings
+    if args.outputPrototypeDatabaseFilename is None:
+        outputPrototypeDatabaseFilename = args.databaseFilename
+    else:
+        outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
+    storage.setPrototypeMatchingsInSqlite(outputPrototypeDatabaseFilename, initialPrototypes)
+    if args.saveAssignments:
+        if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None:
+            # consider that the object is assigned through its longest features
+            # issues are inconsistencies in the number of matchings per prototype and display (will display features, not objects)
+            objectNumbers = []
+            objectLabels = []
+            i = 0
+            for obj in objectsWithFeatures:
+                objLabels = []
+                for f in obj.getFeatures():
+                    if f == objects[i]:
+                        objLabels.append(labels[i+len(initialPrototypes)])
+                        i += 1
+                    else:
+                        print('Issue with obj {} and feature {} (trajectory {})'.format(obj.getNum(), f.getNum(), i))
+                objectLabels.append(utils.mostCommon(objLabels))
+                objectNumbers.append(obj.getNum())
+            storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, objectNumbers, 'object', objectLabels, initialPrototypes)
+        else:
+            storage.savePrototypeAssignmentsToSqlite(args.databaseFilename, [obj.getNum() for obj in objects], args.trajectoryType, labels[len(initialPrototypes):], initialPrototypes)
+    if args.display:
+        plt.figure()
+        for i,o in enumerate(objects):
+            if labels[i+len(initialPrototypes)] < 0:
+                o.plot('kx-')
             else:
-                o.plot(utils.colors[labels[i]])
-    for i in prototypeIndices:
-            objects[i].plot(utils.colors[i]+'o')
+                o.plot(utils.colors[labels[i+len(initialPrototypes)]])
+        for i,p in enumerate(initialPrototypes):
+            p.getMovingObject().plot(utils.colors[i]+'o')
+        plt.axis('equal')
+        plt.show()
 
-# TODO store the prototypes (if features, easy, if objects, info must be stored about the type)
+if (args.learn or args.assign) and args.saveSimilarities:
+    if args.similaritiesFilename is not None:
+        np.savetxt(args.similaritiesFilename, similarities, '%.4f')
+    else:
+        np.savetxt(utils.removeExtension(args.databaseFilename)+'-prototype-similarities.txt.gz', similarities, '%.4f')
--- a/scripts/learn-poi.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/learn-poi.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import argparse
 
@@ -6,26 +6,34 @@
 from sklearn import mixture
 import matplotlib.pyplot as plt
 
-import storage, ml
+from trafficintelligence import storage, ml
 
-parser = argparse.ArgumentParser(description='The program learns and displays Gaussians fit to beginnings and ends of object trajectories (based on Mohamed Gomaa Mohamed 2015 PhD). TODO: save the data')
+parser = argparse.ArgumentParser(description='The program learns and displays Gaussians fit to beginnings and ends of object trajectories (based on Mohamed Gomaa Mohamed 2015 PhD).')
 parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file', required = True)
 parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories to display', choices = ['feature', 'object'], default = 'object')
+parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to display', type = int)
 parser.add_argument('-norigins', dest = 'nOriginClusters', help = 'number of clusters for trajectory origins', required = True, type = int)
 parser.add_argument('-ndestinations', dest = 'nDestinationClusters', help = 'number of clusters for trajectory destinations (=norigins if not provided)', type = int)
 parser.add_argument('--covariance-type', dest = 'covarianceType', help = 'type of covariance of Gaussian model', default = "full")
 parser.add_argument('-w', dest = 'worldImageFilename', help = 'filename of the world image')
 parser.add_argument('-u', dest = 'unitsPerPixel', help = 'number of units of distance per pixel', type = float, default = 1.)
+parser.add_argument('--display', dest = 'display', help = 'displays points of interests', action = 'store_true') # default is manhattan distance
+parser.add_argument('--assign', dest = 'assign', help = 'assigns the trajectories to the POIs and saves the assignments', action = 'store_true')
+parser.add_argument('--display-paths', dest = 'displayPaths', help = 'displays all possible origin destination if assignment is done', action = 'store_true')
+
+# TODO test Variational Bayesian Gaussian Mixture BayesianGaussianMixture
 
 args = parser.parse_args()
 
-objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, args.trajectoryType)
+objects = storage.loadTrajectoriesFromSqlite(args.databaseFilename, args.trajectoryType, args.nObjects)
 
 beginnings = []
 ends = []
 for o in objects:
     beginnings.append(o.getPositionAt(0).aslist())
     ends.append(o.getPositionAt(int(o.length())-1).aslist())
+    if args.assign:
+        o.od = [-1, -1]
 
 beginnings = np.array(beginnings)
 ends = np.array(ends)
@@ -35,32 +43,56 @@
     nDestinationClusters = args.nOriginClusters
 
 gmmId=0
+models = {}
 for nClusters, points, gmmType in zip([args.nOriginClusters, nDestinationClusters],
-                                   [beginnings, ends],
-                                   ['beginning', 'end']):
+                                      [beginnings, ends],
+                                      ['beginning', 'end']):
     # estimation
-    gmm = mixture.GMM(n_components=nClusters, covariance_type = args.covarianceType)
-    model=gmm.fit(beginnings)
-    if not model.converged_:
+    gmm = mixture.GaussianMixture(n_components=nClusters, covariance_type = args.covarianceType)
+    models[gmmType]=gmm.fit(points)
+    if not models[gmmType].converged_:
         print('Warning: model for '+gmmType+' points did not converge')
+    if args.display or args.assign:
+        labels = models[gmmType].predict(points)
     # plot
-    fig = plt.figure()
-    if args.worldImageFilename is not None and args.unitsPerPixel is not None:
-        img = plt.imread(args.worldImageFilename)
-        plt.imshow(img)
-    labels = ml.plotGMMClusters(model, points, fig, nUnitsPerPixel = args.unitsPerPixel)
-    plt.axis('image')
-    plt.title(gmmType)
-    print(gmmType+' Clusters:\n{}'.format(ml.computeClusterSizes(labels, range(model.n_components))))
+    if args.display:
+        fig = plt.figure()
+        if args.worldImageFilename is not None and args.unitsPerPixel is not None:
+            img = plt.imread(args.worldImageFilename)
+            plt.imshow(img)
+        ml.plotGMMClusters(models[gmmType], labels, points, fig, nUnitsPerPixel = args.unitsPerPixel)
+        plt.axis('image')
+        plt.title(gmmType)
+        print(gmmType+' Clusters:\n{}'.format(ml.computeClusterSizes(labels, range(models[gmmType].n_components))))
     # save
-    storage.savePOIs(args.databaseFilename, model, gmmType, gmmId)
+    storage.savePOIsToSqlite(args.databaseFilename, models[gmmType], gmmType, gmmId)
+    # save assignments
+    if args.assign:
+        for o, l in zip(objects, labels):
+            if gmmType == 'beginning':
+                o.od[0] = l
+            elif gmmType == 'end':
+                o.od[1] = l
     gmmId += 1
-                     
-# fig = plt.figure()
-# if args.worldImageFilename is not None and args.pixelsPerUnit is not None:
-#     img = plt.imread(args.worldImageFilename)
-#     plt.imshow(img)
-# ml.plotGMMClusters(, , fig, nPixelsPerUnit = args.pixelsPerUnit)
-# plt.axis('equal')
-# plt.title()
-# print('Destination Clusters:\n{}'.format(ml.computeClusterSizes(endModel.predict(ends), range(args.nClusters))))
+
+if args.assign:
+    storage.savePOIAssignments(args.databaseFilename, objects)
+    if args.displayPaths:
+        for i in range(args.nOriginClusters):
+            for j in range(args.nDestinationClusters):
+                odObjects = [o for o in objects if o.od[0] == i and o.od[1] == j]
+                if len(odObjects) > 0:
+                    fig = plt.figure()
+                    ax = fig.add_subplot(111)
+                    ml.plotGMM(models['beginning'].means_[i], models['beginning'].covariances_[i], i, fig, 'b')
+                    ml.plotGMM(models['end'].means_[j], models['end'].covariances_[j], j, fig, 'r')
+                    for o in odObjects:
+                        o.plot(withOrigin = True)
+                    plt.title('OD {} to {}'.format(i,j))
+                    plt.axis('equal')
+                    plt.show()
+
+
+if args.display:
+    plt.axis('equal')
+    plt.show()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/manual-video-analysis.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,171 @@
+#! /usr/bin/env python3
+
+import sys, argparse, cv2, numpy as np
+
+parser = argparse.ArgumentParser(description='''The program replays the video and allows to manually id vehicles and mark instants, eg when they cross given areas in the scene. Use this program in combination with a screen marker program (For example, Presentation Assistant) to draw multiple lines on the screen.''',
+                                 epilog = '''The output should give you a .csv file with the same name as your video file with columns in this format:
+vehicle number, frame number
+You can easily spot mistakes in the csv file for a line with number, SKIP. If this happens, just delete the previous vehicle observation.''',
+                                 formatter_class=argparse.RawDescriptionHelpFormatter)
+parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file', required = True)
+parser.add_argument('-o', dest = 'outputFilename', help = 'name of the output file (csv file)')
+parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', default = 0, type = int)
+parser.add_argument('-n', dest = 'nAttributes', help = 'number of attributes characterizing users', default = 0, type = int)
+
+args = parser.parse_args()
+
+print('''Commands:
+Press o when you make a mistake in input
+Press d to skip 100 frames
+Press s to skip 10 frames
+Press c to go back 100 frames
+Press x to go back 10 frames
+Press spacebar to go forward one frame
+Press l to skip to frame number
+Press s to finish inputting user characteristics (if any in pop up window)
+Press q to quit and end program''')
+# configuration of keys and user types (see moving)
+userTypeNames = ['unknown',
+                 'car',
+                 'pedestrian',
+                 'motorcycle',
+                 'bicycle',
+                 'bus',
+                 'truck']
+class UserConfiguration(object):
+    def __init__(self, name, keyNew, keyAddInstant, nAttributes):
+        self.name = name
+        self.keyNew = ord(keyNew)
+        self.keyAddInstant = ord(keyAddInstant)
+        self.userNum = 0
+        self.nAttributes = nAttributes
+        self.resetAttributes()
+
+    def getHelpStr(self):
+        return 'Press {} for new {}, {} for new instant for current {}'.format(chr(self.keyNew), self.name, chr(self.keyAddInstant), self.name)
+        
+    def resetAttributes(self):
+        self.userInstant = 0
+        self.attributes = [-1]*self.nAttributes
+
+    def setAttribute(self, i, value):
+        self.attributes[i%self.nAttributes] = value
+
+    def getAttributeStr(self):
+        if self.nAttributes > 0:
+            return ','.join([str(i) for i in self.attributes])+','
+        else:
+            return ''
+        
+    def isKeyNew(self, k):
+        return (k == self.keyNew)
+
+    def isKeyAddInstant(self, k):
+        return (k == self.keyAddInstant)
+    
+    def isKey(self, k):
+        return self.isKeyNew(k) or self.isKeyAddInstant(k)
+
+    @staticmethod
+    def getConfigurationWithKey(configurations, k):
+        for c in configurations:
+            if c.isKey(k):
+                return c
+        return None
+
+userConfigurations = [UserConfiguration(userTypeNames[1],'u','i', args.nAttributes),
+                      UserConfiguration(userTypeNames[2],'j','k', args.nAttributes)]
+
+print(' ')
+for c in userConfigurations:
+    print(c.getHelpStr())
+
+# start of program
+cap = cv2.VideoCapture(args.videoFilename)
+cap.set(cv2.CAP_PROP_POS_FRAMES, args.firstFrameNum)
+fps = cap.get(cv2.CAP_PROP_FPS)
+print('Video at {} frames/s'.format(fps))
+cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
+
+# output filename
+if args.outputFilename is None:
+    i = args.videoFilename.rfind('.')
+    if i>0:
+        outputFilename = args.videoFilename[:i]+'.csv'
+    else:
+        outputFilename = args.videoFilename+'.csv'
+else:
+    outputFilename = args.outputFilename
+vehNumber = 0
+lineNum = -1
+out = open(outputFilename, 'a')
+
+while(cap.isOpened()):
+    ret, frame = cap.read()
+    frameNum = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
+    cv2.putText(frame, str(frameNum), (1,20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0,0))
+    cv2.imshow('Video',frame)
+
+    key= cv2.waitKey(0)
+
+    if key == ord('q'):
+        break
+    else:
+        config = UserConfiguration.getConfigurationWithKey(userConfigurations, key)
+        if config is not None:
+            if config.isKeyNew(key):
+                config.userNum += 1
+                config.resetAttributes()
+                print('New {} {}'.format(config.name, config.userNum))
+                if args.nAttributes > 0:
+                    key2 = ord('1')
+                    cv2.namedWindow('Input', cv2.WINDOW_NORMAL)
+                    attributeNum = 0
+                    while key2 != ord('s'):
+                        attrImg = 255*np.ones((20*args.nAttributes, 20, 3))
+                        for i in range(args.nAttributes):
+                            if i == (attributeNum%args.nAttributes):
+                                cv2.putText(attrImg, str(config.attributes[i]), (1,20*(i+1)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255))
+                            else:
+                                cv2.putText(attrImg, str(config.attributes[i]), (1,20*(i+1)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
+                        cv2.imshow('Input', attrImg)
+                        key2 = cv2.waitKey(0)
+                        if chr(key2).isdigit():
+                            config.setAttribute(attributeNum, chr(key2))
+                            attributeNum += 1
+                    cv2.destroyWindow('Input')
+            elif config.isKeyAddInstant(key):
+                config.userInstant += 1
+                print('User {} no {} at line {}'.format(config.name, config.userNum, config.userInstant))
+            out.write('{},{},{}{},{}\n'.format(config.userNum, config.name, config.getAttributeStr(), config.userInstant, frameNum))
+            oldUserConfig = config
+        if key == ord('o'):
+            print('SKIPPED')
+            out.write('{},{},SKIP\n'.format(oldUserConfig.userNum, oldUserConfig.name))
+        elif key == ord('d'):
+            cap.set(cv2.CAP_PROP_POS_FRAMES,frameNum+100)
+        elif key == ord('s'):
+            cap.set(cv2.CAP_PROP_POS_FRAMES,frameNum+10)
+        elif key == ord('a'):
+            cap.set(cv2.CAP_PROP_POS_FRAMES,frameNum+1)
+        elif key == ord('x'):
+            cap.set(cv2.CAP_PROP_POS_FRAMES,frameNum-10)
+        elif key == ord('c'):
+            cap.set(cv2.CAP_PROP_POS_FRAMES,frameNum-100)
+        elif key == ord('l'):
+            frameNum = int(input("Please enter the frame number you would like to skip to\n"))
+            cap.set(cv2.CAP_PROP_POS_FRAMES,frameNum)
+    
+out.close()
+cap.release()
+cv2.destroyAllWindows()
+
+#97a
+#115s
+#100d
+#102f
+#103g
+#104h
+#106j
+#107k
+#108l
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/merge-features.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,107 @@
+#! /usr/bin/env python3
+
+import sys, argparse, os.path, sqlite3
+from datetime import datetime, timedelta
+
+from trafficintelligence import cvutils, utils, moving, storage
+from trafficintelligence.metadata import connectDatabase, Site, VideoSequence, CameraView, getSite
+
+timeConverter = utils.TimeConverter()
+
+parser = argparse.ArgumentParser(description='The program merges feature trajectories recorded from the same site synchronously between start and end time.')
+parser.add_argument('--db', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
+parser.add_argument('-n', dest = 'siteId', help = 'site id or site name', required = True)
+parser.add_argument('-f', dest = 'startTime', help = 'time to start merging features (format %%Y-%%m-%%d %%H:%%M:%%S, eg 2011-06-22 10:00:39)', type = timeConverter.convert) # if not provided, take common time interval
+parser.add_argument('-l', dest = 'endTime', help = 'time to stop merging features (format %%Y-%%m-%%d %%H:%%M:%%S, eg 2011-06-22 10:00:39)', type = timeConverter.convert)
+parser.add_argument('-o', dest = 'outputDBFilename', help = 'name of the output SQLite file', required = True)
+
+args = parser.parse_args()
+
+session = connectDatabase(args.metadataFilename)
+
+site = getSite(session, args.siteId)
+if site is None:
+    print('Site {} was not found in {}. Exiting'.format(args.siteId, args.metadataFilename))
+    sys.exit()
+else:
+    site = site[0]
+
+startTime = datetime.strptime(args.startTime, utils.datetimeFormat)
+endTime = datetime.strptime(args.endTime, utils.datetimeFormat)
+processInterval = moving.TimeInterval(startTime, endTime)
+cameraViews = session.query(CameraView).filter(CameraView.site == site).filter(CameraView.virtual == False)
+videoSequences = session.query(VideoSequence).filter(VideoSequence.virtual == False).order_by(VideoSequence.startTime.asc()).all() #.order_by(VideoSequence.cameraViewIdx) .filter(VideoSequence.startTime <= startTime)
+videoSequences = [vs for vs in videoSequences if vs.cameraView in cameraViews]
+#timeIntervals = [v.intersection(startTime, endTime) for v in videoSequences]
+#cameraViews = set([v.cameraView for v in videoSequences])
+
+videoSequences = {cv: [v for v in videoSequences if v.cameraView == cv] for cv in cameraViews}
+timeIntervals = {}
+for cv in videoSequences:
+    timeIntervals[cv] = moving.TimeInterval.unionIntervals([v.getTimeInterval() for v in videoSequences[cv]])
+
+# intersection of the time interval (union) for each camera view
+commonTimeInterval = list(timeIntervals.values())[0]
+for inter in timeIntervals.values()[1:]:
+    commonTimeInterval = moving.TimeInterval.intersection(commonTimeInterval, inter)
+commonTimeInterval = moving.TimeInterval.intersection(commonTimeInterval, processInterval)
+
+if commonTimeInterval.empty():
+    print('Empty time interval. Exiting')
+    sys.exit()
+
+if len(set([cv.cameraType.frameRate for cv in cameraViews])) > 1:
+    print('Different framerates of the cameras ({}) are not handled yet. Exiting'.format([cv.cameraType.frameRate for cv in cameraViews]))
+else:
+    frameRate = cv.cameraType.frameRate
+
+try:
+    outConnection = sqlite3.connect(args.outputDBFilename)
+    outCursor = outConnection.cursor()
+    storage.createTrajectoryTable(outCursor, 'positions')
+    storage.createTrajectoryTable(outCursor, 'velocities')
+    storage.createFeatureCorrespondenceTable(outCursor)
+    outConnection.commit()
+except sqlite3.OperationalError as error:
+    storage.printDBError(error)
+    sys.exit()
+
+dirname = os.path.split(args.metadataFilename)[0]
+if len(dirname) == 0:
+    dirname = '.'
+
+newTrajectoryId = -1
+# first frame num is commonTimeInterval
+for cv, vs in videoSequences.items():
+    print(cv.idx, cv.description)
+    for videoSequence in vs:
+        try:
+            vsConnection = sqlite3.connect(dirname+os.path.sep+videoSequence.getDatabaseFilename())
+            vsCursor = vsConnection.cursor()
+            firstFrameNum = utils.deltaFrames(videoSequence.startTime, commonTimeInterval.first, frameRate)
+            lastFrameNum = (commonTimeInterval.last-videoSequence.startTime).seconds*frameRate
+            # positions table
+            vsCursor.execute('SELECT * FROM positions WHERE frame_number BETWEEN {} AND {} ORDER BY trajectory_id'.format(firstFrameNum, lastFrameNum))
+            featureIdCorrespondences = {}
+            currentTrajectoryId = -1
+            for row in vsCursor:
+                if row[0] != currentTrajectoryId:
+                    currentTrajectoryId = row[0]
+                    newTrajectoryId += 1
+                    featureIdCorrespondences[currentTrajectoryId] = newTrajectoryId
+                outCursor.execute(storage.insertTrajectoryQuery('positions'), (newTrajectoryId, row[1]-firstFrameNum, row[2], row[3]))
+            # velocities table
+            for row in vsCursor:
+                outCursor.execute(storage.insertTrajectoryQuery('velocities'), (featureIdCorrespondences[row[0]], row[1]-firstFrameNum, row[2], row[3]))
+            # saving the id correspondences
+            for oldId, newId in featureIdCorrespondences.items():
+                outCursor.execute("INSERT INTO feature_correspondences (trajectory_id, source_dbname, db_trajectory_id) VALUES ({},\"{}\",{})".format(newId, videoSequence.name, oldId))
+            outConnection.commit()
+        except sqlite3.OperationalError as error:
+            storage.printDBError(error)
+            
+# save the information of the new virtual sequence and camera view in the metadata
+mergedCameraView = CameraView('merged', None, site, cv.cameraType, None, None, virtual = True)
+session.add(mergedCameraView)
+session.add(VideoSequence('merged', commonTimeInterval.first, commonTimeInterval.last-commonTimeInterval.first, mergedCameraView, virtual = True))
+session.commit()
--- a/scripts/performance-db.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/performance-db.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,8 +1,8 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
-import sys, shutil, os, sqlite3, timeit#, argparse
+import sys, shutil, os, sqlite3, timeit
 
-import storage
+from trafficintelligence import storage
 
 if len(sys.argv) >= 2:
     dbFilename = sys.argv[1]
@@ -15,14 +15,14 @@
 connection = sqlite3.connect(dbFilename)
 
 nFeatures=storage.getNumberRowsTable(connection, "positions", "trajectory_id")
-print dbFilename, nFeatures
+print(dbFilename, nFeatures)
 
 ####
 # test query tmp trajectory instant table
 ####
 def selectTrajectoryIdInstant(connection, lastInstant):
     cursor = connection.cursor()
-    for i in xrange(lastInstant):
+    for i in range(lastInstant):
 	cursor.execute("select trajectory_id from trajectory_instants where last_instant = {}".format(lastInstant))
         cursor.fetchall()
 
@@ -44,7 +44,7 @@
 ####
 def selectTrajectories(connection, nFeatures):
     cursor = connection.cursor()
-    for i in xrange(nFeatures):
+    for i in range(nFeatures):
 	cursor.execute("select * from positions where trajectory_id = {} order by frame_number".format(i))
         cursor.fetchall()
 
@@ -66,7 +66,7 @@
     ####
     print("with index on trajectory_id")
     storage.createIndex(connection, "positions", "trajectory_id")#sqlite3 $dbFilename "create index trajectory_id_index on positions(trajectory_id)"
-    print timeit.timeit("selectTrajectories(connection, nFeatures)", setup="from __main__ import selectTrajectories, connection, nFeatures", number = 100)
+    print(timeit.timeit("selectTrajectories(connection, nFeatures)", setup="from __main__ import selectTrajectories, connection, nFeatures", number = 100))
 
 #### Cleanup
 os.remove(dbFilename)
--- a/scripts/performance-lcss.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/performance-lcss.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import timeit
 
@@ -6,7 +6,7 @@
 number = 10
 
 print('Default Python implementation with lambda')
-print timeit.timeit('lcss.compute(random_sample(({},2)), random_sample(({}, 2)))'.format(vectorLength, vectorLength*2), setup = 'from utils import LCSS; from numpy.random import random_sample; lcss = LCSS(similarityFunc = lambda x,y: (abs(x[0]-y[0]) <= 0.1) and (abs(x[1]-y[1]) <= 0.1));', number = number)
+print(timeit.timeit('lcss.compute(random_sample(({},2)), random_sample(({}, 2)))'.format(vectorLength, vectorLength*2), setup = 'from utils import LCSS; from numpy.random import random_sample; lcss = LCSS(similarityFunc = lambda x,y: (abs(x[0]-y[0]) <= 0.1) and (abs(x[1]-y[1]) <= 0.1));', number = number))
 
 print('Using scipy distance.cdist')
-print timeit.timeit('lcss.compute(random_sample(({},2)), random_sample(({}, 2)))'.format(vectorLength, vectorLength*2), setup = 'from utils import LCSS; from numpy.random import random_sample; lcss = LCSS(metric = "cityblock", epsilon = 0.1);', number = number)
+print(timeit.timeit('lcss.compute(random_sample(({},2)), random_sample(({}, 2)))'.format(vectorLength, vectorLength*2), setup = 'from utils import LCSS; from numpy.random import random_sample; lcss = LCSS(metric = "cityblock", epsilon = 0.1);', number = number))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/play-synced-videos.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,40 @@
+#! /usr/bin/env python3
+
+import sys, argparse, os.path
+from datetime import datetime, timedelta
+
+from trafficintelligence import cvutils, utils
+from trafficintelligence.metadata import connectDatabase, Site, CameraView, VideoSequence, getSite
+
+timeConverter = utils.TimeConverter()
+
+parser = argparse.ArgumentParser(description='The program displays several views of the same site synchronously.')
+parser.add_argument('--db', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
+parser.add_argument('-n', dest = 'siteId', help = 'site id or site name', required = True)
+parser.add_argument('-f', dest = 'startTime', help = 'time to start playing (format %%Y-%%m-%%d %%H:%%M:%%S, eg 2011-06-22 10:00:39)', required = True, type = timeConverter.convert)
+parser.add_argument('--fps', dest = 'frameRate', help = 'approximate frame rate to replay', default = -1, type = float)
+parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
+parser.add_argument('-s', dest = 'step', help = 'display every s image', default = 1, type = int)
+
+args = parser.parse_args()
+
+session = connectDatabase(args.metadataFilename)
+
+site = getSite(session, args.siteId)
+if site is None:
+    print('Site {} was not found in {}. Exiting'.format(args.siteId, args.metadataFilename))
+    sys.exit()
+else:
+    site = site[0]
+
+dirname = os.path.split(args.metadataFilename)[0]
+
+startTime = datetime.strptime(args.startTime, utils.datetimeFormat)
+cameraViews = session.query(CameraView).filter(CameraView.site == site)
+videoSequences = session.query(VideoSequence).filter(VideoSequence.name != None).filter(VideoSequence.startTime <= startTime).all()
+#videoSequences = session.query(VideoSequence).filter(VideoSequence.site == site).filter(VideoSequence.startTime <= startTime).all()
+videoSequences = [v for v in videoSequences if v.containsInstant(startTime) and v.cameraView in cameraViews]
+filenames = [dirname+os.path.sep+v.getVideoSequenceFilename() for v in videoSequences]
+firstFrameNums = [v.getFrameNum(startTime) for v in videoSequences]
+
+cvutils.playVideo(filenames, [v.cameraView.description for v in videoSequences], firstFrameNums, args.frameRate, rescale = args.rescale, step = args.step)
--- a/scripts/play-video.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/play-video.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,13 +1,13 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
-import cvutils
 
+from trafficintelligence import cvutils
 
 parser = argparse.ArgumentParser(description='The program displays the video.')
 parser.add_argument('-i', dest = 'videoFilename', help = 'name of the video file', required = True)
 parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', default = 0, type = int)
-parser.add_argument('--fps', dest = 'frameRate', help = 'approximate frame rate to replay', type = float)
+parser.add_argument('--fps', dest = 'frameRate', help = 'approximate frame rate to replay', default = -1, type = float)
 parser.add_argument('-r', dest = 'rescale', help = 'rescaling factor for the displayed image', default = 1., type = float)
 parser.add_argument('-s', dest = 'step', help = 'display every s image', default = 1, type = int)
 
@@ -17,8 +17,4 @@
 if args.firstFrameNum is not None:
     firstFrameNum = args.firstFrameNum
 
-frameRate = -1
-if args.frameRate is not None:
-    frameRate = args.frameRate
-
-cvutils.playVideo(args.videoFilename, firstFrameNum, frameRate, rescale = args.rescale, step = args.step)
+cvutils.playVideo([args.videoFilename], None, [firstFrameNum], args.frameRate, rescale = args.rescale, step = args.step)
--- a/scripts/polytracktopdtv.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/polytracktopdtv.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,14 +1,12 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
-from pdtv import TsaiCamera, ZipVideo, SyncedVideos, TrackSet, Track, State
 import sys, os, datetime, argparse
-import shutil
-import sqlite3
-import zipfile
-import utils
-import cvutils
+import shutil, sqlite3, zipfile
+
 import cv2
+from pdtv import TsaiCamera, ZipVideo, SyncedVideos, TrackSet, Track, State
 
+from trafficintelligence import utils, cvutils
 
 def zipFolder(inputFolder, outputFile):
     '''Method to compress the content of the inputFolder in the outputFile'''
@@ -68,7 +66,6 @@
     frameList = cvutils.getImagesFromVideo(videoFile, firstFrameNum = currentIdx, nFrames = inc)
     
     while len(frameList) == inc and inc > 0:
-        
         for f in frameList:
             cv2.imwrite(os.path.join(framePath,time.strftime("%Y%m%d-%H%M%S.%f")[:-3]+'.jpg'), f)
             time += datetime.timedelta(microseconds=deltaTimestamp*1000)
@@ -80,7 +77,7 @@
                 inc = delta        
         if inc:
             frameList = cvutils.getImagesFromVideo(videoFile, firstFrameNum = currentIdx, nFrames = inc)
-        print('Extracting frame ' + str(currentIdx))
+        print('Extracting frame {}'.format(currentIdx))
     return len(frameList) > 0
 
     
@@ -115,7 +112,7 @@
     
     if videoFile is not None:
         fps = cvutils.getFPS(videoFile)
-        print('Video should run at ' + str(fps) + ' fps')
+        print('Video should run at {} fps'.format(fps))
         deltaTimestamp = 1000.0/float(fps);
         if videoFolderExist == False:
             if os.path.exists(videoFolderPath):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/process.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,415 @@
+#! /usr/bin/env python3
+
+import sys, argparse
+from pathlib import Path
+from multiprocessing.pool import Pool
+
+#import matplotlib
+#atplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+
+from trafficintelligence import storage, events, prediction, cvutils, utils, moving, processing, ml
+from trafficintelligence.metadata import *
+
+parser = argparse.ArgumentParser(description='This program manages the processing of several files based on a description of the sites and video data in an SQLite database following the metadata module.')
+# input
+parser.add_argument('--db', dest = 'metadataFilename', help = 'name of the metadata file', required = True)
+parser.add_argument('--videos', dest = 'videoIds', help = 'indices of the video sequences', nargs = '*')
+parser.add_argument('--sites', dest = 'siteIds', help = 'indices of the video sequences', nargs = '*')
+
+# main function
+parser.add_argument('--delete', dest = 'delete', help = 'data to delete', choices = ['feature', 'object', 'classification', 'interaction'])
+parser.add_argument('--process', dest = 'process', help = 'data to process', choices = ['feature', 'object', 'classification', 'prototype', 'interaction'])
+parser.add_argument('--display', dest = 'display', help = 'data to display (replay over video)', choices = ['feature', 'object', 'classification', 'interaction'])
+parser.add_argument('--progress', dest = 'progress', help = 'information about the progress of processing', action = 'store_true')
+parser.add_argument('--analyze', dest = 'analyze', help = 'data to analyze (results)', choices = ['feature', 'object', 'classification', 'interaction', 'event-speed', 'event-interaction'])
+
+# common options
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file')
+parser.add_argument('-n', dest = 'nObjects', help = 'number of objects/interactions to process', type = int)
+parser.add_argument('-t', dest = 'trajectoryType', help = 'type of trajectories', choices = ['feature', 'object'], default = 'feature')
+parser.add_argument('--dry', dest = 'dryRun', help = 'dry run of processing', action = 'store_true')
+parser.add_argument('--nthreads', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
+parser.add_argument('--subsample', dest = 'positionSubsamplingRate', help = 'rate of position subsampling (1 every n positions)', type = int)
+
+### process options
+# motion pattern learning and assignment
+parser.add_argument('--prototype-filename', dest = 'outputPrototypeDatabaseFilename', help = 'name of the Sqlite database file to save prototypes', default = 'prototypes.sqlite')
+#parser.add_argument('-i', dest = 'inputPrototypeDatabaseFilename', help = 'name of the Sqlite database file for prototypes to start the algorithm with')
+parser.add_argument('--nobjects-mp', dest = 'nMPObjects', help = 'number of objects/interactions to process', type = int)
+parser.add_argument('--nfeatures-per-object', dest = 'nLongestFeaturesPerObject', help = 'maximum number of features per object to load', type = int)
+parser.add_argument('--epsilon', dest = 'epsilon', help = 'distance for the similarity of trajectory points', type = float)
+parser.add_argument('--metric', dest = 'metric', help = 'metric for the similarity of trajectory points', default = 'cityblock') # default is manhattan distance
+parser.add_argument('--minsimil', dest = 'minSimilarity', help = 'minimum similarity to put a trajectory in a cluster', type = float)
+parser.add_argument('--min-cluster-size', dest = 'minClusterSize', help = 'minimum cluster size', type = int, default = 0)
+#parser.add_argument('--learn', dest = 'learn', help = 'learn', action = 'store_true')
+parser.add_argument('--optimize', dest = 'optimizeCentroid', help = 'recompute centroid at each assignment', action = 'store_true')
+parser.add_argument('--random', dest = 'randomInitialization', help = 'random initialization of clustering algorithm', action = 'store_true')
+#parser.add_argument('--similarities-filename', dest = 'similaritiesFilename', help = 'filename of the similarities')
+parser.add_argument('--save-similarities', dest = 'saveSimilarities', help = 'save computed similarities (in addition to prototypes)', action = 'store_true')
+parser.add_argument('--save-assignments', dest = 'saveAssignments', help = 'saves the assignments of the objects to the prototypes', action = 'store_true')
+parser.add_argument('--assign', dest = 'assign', help = 'assigns the objects to the prototypes and saves the assignments', action = 'store_true')
+
+# safety analysis
+parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (cvd: vector computation (approximate); cve: equation solving; cv: discrete time (approximate)), normal adaptation, point set prediction)', choices = ['cvd', 'cve', 'cv', 'na', 'ps', 'mp'])
+parser.add_argument('--pet', dest = 'computePET', help = 'computes PET', action = 'store_true')
+# override other tracking config, erase sqlite?
+
+
+# analysis options
+parser.add_argument('--output', dest = 'output', help = 'kind of output to produce (interval means)', choices = ['figure', 'interval', 'event'])
+parser.add_argument('--min-duration', dest = 'minDuration', help = 'mininum duration we have to see the user or interaction to take into account in the analysis (s)', type = float)
+parser.add_argument('--interval-duration', dest = 'intervalDuration', help = 'length of time interval to aggregate data (min)', type = int, default = 15)
+parser.add_argument('--aggregation', dest = 'aggMethods', help = 'aggregation method per user/interaction and per interval', choices = ['mean', 'median', 'centile'], nargs = '*', default = ['median'])
+parser.add_argument('--aggregation-centiles', dest = 'aggCentiles', help = 'centile(s) to compute from the observations', nargs = '*', type = int)
+parser.add_argument('--event-thresholds', dest = 'eventThresholds', help = 'threshold to count severe situations', nargs = '*', type = float)
+parser.add_argument('--event-filename', dest = 'eventFilename', help = 'filename of the event data')
+dpi = 150
+# unit of analysis: site - camera-view
+
+# need way of selecting sites as similar as possible to sql alchemy syntax
+# override tracking.cfg from db
+# manage cfg files, overwrite them (or a subset of parameters)
+# delete sqlite files
+# info of metadata
+
+args = parser.parse_args()
+
+#################################
+# Data preparation
+#################################
+session = connectDatabase(args.metadataFilename)
+parentPath = Path(args.metadataFilename).parent # files are relative to metadata location
+videoSequences = []
+sites = []
+if args.videoIds is not None:
+    for videoId in args.videoIds:
+        if '-' in videoId:
+            videoSequences.extend([session.query(VideoSequence).get(i) for i in moving.TimeInterval.parse(videoId)])
+        else:
+            videoSequences.append(session.query(VideoSequence).get(int(videoId)))
+    videoSequences = [vs for vs in videoSequences if vs is not None]
+    sites = set([vs.cameraView.site for vs in videoSequences])
+elif args.siteIds is not None:
+    for siteId in args.siteIds:
+        if '-' in siteId:
+            sites.extend([session.query(Site).get(i) for i in moving.TimeInterval.parse(siteId)])
+        else:
+            sites.append(session.query(Site).get(int(siteId)))
+    sites = [s for s in sites if s is not None]
+    for site in sites:
+        videoSequences.extend(getSiteVideoSequences(site))
+else:
+    print('No video/site to process')
+
+if args.nProcesses > 1:
+    pool = Pool(args.nProcesses)
+
+#################################
+# Report progress in the processing
+#################################
+if args.progress: # TODO find video sequences that have null camera view, to work with them
+    print('Providing information on progress of data processing')
+    headers = ['site', 'vs', 'features', 'objects', 'interactions'] # todo add prototypes and object classification
+    data = []
+    for site in sites:
+        unprocessedVideoSequences = []
+        for vs in getSiteVideoSequences(site):
+            if (parentPath/vs.getDatabaseFilename()).is_file(): # TODO check time of file?
+                tableNames = storage.tableNames(str(parentPath.absolute()/vs.getDatabaseFilename()))
+                data.append([site.name, vs.idx, 'positions' in tableNames, 'objects' in tableNames, 'interactions' in tableNames])
+            else:
+                unprocessedVideoSequences.append(vs)
+                data.append([site.name, vs.idx, False, False, False])
+        #if len(unprocessedVideoSequences):
+        #    print('Site {} ({}) has {} completely unprocessed video sequences'.format (site.name, site.idx, len(unprocessedVideoSequences)))
+    videoSequences = session.query(VideoSequence).filter(VideoSequence.cameraViewIdx.is_(None)).all()
+    data = pd.DataFrame(data, columns = headers)
+    print('-'*80)
+    print('\t'+' '.join(headers[2:]))
+    print('-'*80)
+    for name, group in data.groupby(['site']): #.agg({'vs': 'count'}))
+        n = group.vs.count()
+        print('{}: {} % / {} % / {} % ({})'.format(name, 100*group.features.sum()/float(n), 100*group.objects.sum()/float(n), 100*group.interactions.sum()/float(n), n))
+    print('-'*80)
+    if len(videoSequences) > 0:
+        print('{} video sequences without a camera view:'.format(len(videoSequences)))
+        print([vs.idx for vs in videoSequences])
+        print('-'*80)
+    print(data)
+
+#################################
+# Delete
+#################################
+if args.delete is not None:
+    if args.delete == 'feature':
+        response = input('Are you sure you want to delete the tracking results (SQLite files) of all these sites (y/n)?')
+        if response == 'y':
+            for vs in videoSequences:
+                p = parentPath.absolute()/vs.getDatabaseFilename()
+                p.unlink()
+    elif args.delete in ['object', 'interaction']:
+        #parser.add_argument('-t', dest = 'dataType', help = 'type of the data to remove', required = True, choices = ['object','interaction', 'bb', 'pois', 'prototype'])
+        for vs in videoSequences:
+            storage.deleteFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.delete)
+
+#################################
+# Process
+#################################
+if args.process in ['feature', 'object']: # tracking
+    if args.nProcesses == 1:
+        for vs in videoSequences:
+            if not (parentPath/vs.getDatabaseFilename()).is_file() or args.process == 'object':
+                if args.configFilename is None:
+                    configFilename = str(parentPath/vs.cameraView.getTrackingConfigurationFilename())
+                else:
+                    configFilename = args.configFilename
+                if vs.cameraView.cameraType is None:
+                    cvutils.tracking(configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), False, None, None, args.dryRun)
+                else: #caution: cameratype can be not none, but without parameters for undistortion
+                    cvutils.tracking(configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), True, vs.cameraView.cameraType.intrinsicCameraMatrix, vs.cameraView.cameraType.distortionCoefficients, args.dryRun)
+            else:
+                print('SQLite already exists: {}'.format(parentPath/vs.getDatabaseFilename()))
+    else:
+        for vs in videoSequences:
+            if not (parentPath/vs.getDatabaseFilename()).is_file() or args.process == 'object':
+                if args.configFilename is None:
+                    configFilename = str(parentPath/vs.cameraView.getTrackingConfigurationFilename())
+                else:
+                    configFilename = args.configFilename
+                if vs.cameraView.cameraType is None:
+                    pool.apply_async(cvutils.tracking, args = (configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), False, None, None, args.dryRun))
+                else:
+                    pool.apply_async(cvutils.tracking, args = (configFilename, args.process == 'object', str(parentPath.absolute()/vs.getVideoSequenceFilename()), str(parentPath.absolute()/vs.getDatabaseFilename()), str(parentPath.absolute()/vs.cameraView.getHomographyFilename()), str(parentPath.absolute()/vs.cameraView.getMaskFilename()), True, vs.cameraView.cameraType.intrinsicCameraMatrix, vs.cameraView.cameraType.distortionCoefficients, args.dryRun))
+            else:
+                print('SQLite already exists: {}'.format(parentPath/vs.getDatabaseFilename()))
+        pool.close()
+        pool.join()
+
+elif args.process == 'prototype': # motion pattern learning
+    # learn by site by default -> group videos by camera view TODO
+    # by default, load all objects, learn and then assign (BUT not save the assignments)
+    for site in sites:
+        print('Learning motion patterns for site {} ({})'.format(site.idx, site.name))
+        objects = {}
+        object2VideoSequences = {}
+        for cv in site.cameraViews:
+            for vs in cv.videoSequences:
+                print('Loading '+vs.getDatabaseFilename())
+                objects[vs.idx] = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), args.trajectoryType, args.nObjects, timeStep = args.positionSubsamplingRate, nLongestFeaturesPerObject = args.nLongestFeaturesPerObject)
+                if args.trajectoryType == 'object' and args.nLongestFeaturesPerObject is not None:
+                    objectsWithFeatures = objects[vs.idx]
+                    objects[vs.idx] = [f for o in objectsWithFeatures for f in o.getFeatures()]
+                    prototypeType = 'feature'
+                else:
+                    prototypeType = args.trajectoryType
+                for obj in objects[vs.idx]:
+                    object2VideoSequences[obj] = vs
+        lcss = utils.LCSS(metric = args.metric, epsilon = args.epsilon)
+        similarityFunc = lambda x,y : lcss.computeNormalized(x, y)
+        trainingObjects = [o for tmpobjects in objects.values() for o in tmpobjects]
+        if args.nMPObjects is not None and args.nMPObjects < len(trainingObjects):
+            m = int(np.floor(float(len(trainingObjects))/args.nMPObjects))
+            trainingObjects = trainingObjects[::m]
+        similarities = -np.ones((len(trainingObjects), len(trainingObjects)))
+        prototypeIndices, labels = processing.learnAssignMotionPatterns(True, True, trainingObjects, similarities, args.minSimilarity, similarityFunc, args.minClusterSize, args.optimizeCentroid, args.randomInitialization, True, [])
+        if args.outputPrototypeDatabaseFilename is None:
+            outputPrototypeDatabaseFilename = args.databaseFilename
+        else:
+            outputPrototypeDatabaseFilename = args.outputPrototypeDatabaseFilename
+        clusterSizes = ml.computeClusterSizes(labels, prototypeIndices, -1)
+        storage.savePrototypesToSqlite(str(parentPath/site.getPath()/outputPrototypeDatabaseFilename), [moving.Prototype(object2VideoSequences[trainingObjects[i]].getDatabaseFilename(False), trainingObjects[i].getNum(), prototypeType, clusterSizes[i]) for i in prototypeIndices])
+
+elif args.process == 'interaction':
+    # safety analysis TODO make function in safety analysis script
+    if args.predictionMethod == 'cvd':
+        predictionParameters = prediction.CVDirectPredictionParameters()
+    elif args.predictionMethod == 'cve':
+        predictionParameters = prediction.CVExactPredictionParameters()
+    for vs in videoSequences:
+        print('Processing '+vs.getDatabaseFilename())
+        if args.configFilename is None:
+            params = storage.ProcessParameters(str(parentPath/vs.cameraView.getTrackingConfigurationFilename()))
+        else:
+            params = storage.ProcessParameters(args.configFilename)  
+        objects = storage.loadTrajectoriesFromSqlite(str(parentPath/vs.getDatabaseFilename()), 'object')#, args.nObjects, withFeatures = (params.useFeaturesForPrediction or predictionMethod == 'ps' or predictionMethod == 'mp'))
+        interactions = events.createInteractions(objects)
+        if args.nProcesses == 1:
+            #print(len(interactions), args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones)
+            processed = events.computeIndicators(interactions, True, args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, False, False, None) # params.crossingZones
+        else:
+            #pool = Pool(processes = args.nProcesses)
+            nInteractionPerProcess = int(np.ceil(len(interactions)/float(args.nProcesses)))
+            jobs = [pool.apply_async(events.computeIndicators, args = (interactions[i*nInteractionPerProcess:(i+1)*nInteractionPerProcess], True, args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, False, False, None)) for i in range(args.nProcesses)] # params.crossingZones
+            processed = []
+            for job in jobs:
+                processed += job.get()
+            #pool.close()
+        storage.saveIndicatorsToSqlite(str(parentPath/vs.getDatabaseFilename()), processed)
+            
+#################################
+# Analyze
+#################################
+if args.analyze == 'object':
+    # user speeds, accelerations
+    # aggregation per site
+    if args.eventFilename is None:
+        print('Missing output filename (event-filename). Exiting')
+        sys.exit(0)
+    data = [] # list of observation per site-user with time
+    headers = ['site', 'date', 'time', 'user_type']
+    aggFunctions, tmpheaders = utils.aggregationMethods(args.aggMethods, args.aggCentiles)
+    headers.extend(tmpheaders)
+    if args.nProcesses == 1:
+        for vs in videoSequences:
+            data.extend(processing.extractVideoSequenceSpeeds(str(parentPath/vs.getDatabaseFilename()), vs.cameraView.site.name, args.nObjects, vs.startTime, vs.cameraView.cameraType.frameRate, vs.cameraView.cameraType.frameRate*args.minDuration, args.aggMethods, args.aggCentiles))
+    else:
+        jobs = [pool.apply_async(processing.extractVideoSequenceSpeeds, args = (str(parentPath/vs.getDatabaseFilename()), vs.cameraView.site.name, args.nObjects, vs.startTime, vs.cameraView.cameraType.frameRate, vs.cameraView.cameraType.frameRate*args.minDuration, args.aggMethods, args.aggCentiles)) for vs in videoSequences]
+        for job in jobs:
+            data.extend(job.get())
+        pool.close()
+    data = pd.DataFrame(data, columns = headers)
+    if args.output == 'figure':
+        for name in headers[4:]:
+            plt.ioff()
+            plt.figure()
+            plt.boxplot([data.loc[data['site']==site.name, name] for site in sites], labels = [site.name for site in sites])
+            plt.ylabel(name+' Speeds (km/h)')
+            plt.savefig(name.lower()+'-speeds.png', dpi=dpi)
+            plt.close()
+    elif args.output == 'event':
+        data.to_csv(args.eventFilename, index = False)
+
+if args.analyze == 'interaction': # redo as for object, export in dataframe all interaction data
+    indicatorIds = [2,5,7,10]
+    conversionFactors = {2: 1., 5: 30.*3.6, 7:1./30, 10:1./30}
+    #maxIndicatorValue = {2: float('inf'), 5: float('inf'), 7:10., 10:10.}
+    data = [] # list of observation per site-user with time
+    headers = ['site', 'date', 'time', events.Interaction.indicatorNames[10].replace(' ','-')] # user types?
+    aggFunctions, tmpheaders = utils.aggregationMethods(args.aggMethods, args.aggCentiles)
+    for i in indicatorIds[:3]:
+        for h in tmpheaders:
+            headers.append(events.Interaction.indicatorNames[i].replace(' ','-')+'-'+h)
+    indicators = {}
+    interactions = {}
+    for vs in videoSequences:
+        print('Extracting SMoS from '+vs.getDatabaseFilename())
+        interactions = storage.loadInteractionsFromSqlite(str(parentPath/vs.getDatabaseFilename()))
+        minDuration = vs.cameraView.cameraType.frameRate*args.minDuration
+        for inter in interactions:
+            if inter.length() > minDuration:
+                d = vs.startTime.date()
+                t = vs.startTime.time()
+                row = [vs.cameraView.site.name, d, utils.framesToTime(inter.getFirstInstant(), vs.cameraView.cameraType.frameRate, t)]
+                pet = inter.getIndicator('Post Encroachment Time')
+                if pet is None:
+                    row.append(None)
+                else:
+                    row.append(conversionFactors[10]*pet.getValues()[0])
+                for i in indicatorIds[:3]:
+                    indic = inter.getIndicator(events.Interaction.indicatorNames[i])
+                    if indic is not None:
+                        #v = indic.getMostSevereValue()*
+                        tmp = list(indic.values.values())
+                        for method,func in aggFunctions.items():
+                            agg = conversionFactors[i]*func(tmp)
+                            if method == 'centile':
+                                row.extend(agg.tolist())
+                            else:
+                                row.append(agg)
+                    else:
+                        row.extend([None]*len(aggFunctions))
+                data.append(row)
+    data = pd.DataFrame(data, columns = headers)
+    if args.output == 'figure':
+        for i in indicatorIds:
+            pass # tmp = [indicators[siteId][i] for siteId in indicators]
+            # plt.ioff()
+            # plt.figure()
+            # plt.boxplot(tmp, labels = [session.query(Site).get(siteId).name for siteId in indicators])
+            # plt.ylabel(events.Interaction.indicatorNames[i]+' ('+events.Interaction.indicatorUnits[i]+')')
+            # plt.savefig(events.Interaction.indicatorNames[i]+'.png', dpi=150)
+            # plt.close()
+    elif args.output == 'event':
+        data.to_csv(args.eventFilename, index = False)
+
+if args.analyze == 'event-speed': # aggregate event data by 15 min interval (args.intervalDuration), count events with thresholds
+    data = pd.read_csv(args.eventFilename, parse_dates = [2])
+    #data = pd.read_csv('./speeds.csv', converters = {'time': lambda s: datetime.datetime.strptime(s, "%H:%M:%S").time()}, nrows = 5000)
+    # create time for end of each 15 min, then group by, using the agg method for each data column
+    headers = ['site', 'date', 'intervalend15', 'duration', 'count']
+    aggFunctions, tmpheaders = utils.aggregationMethods(args.aggMethods, args.aggCentiles)
+    dataColumns = list(data.columns[4:])
+    print(dataColumns)
+    for h in dataColumns:
+        for h2 in tmpheaders:
+            headers.append(h+'-'+h2)
+    if args.eventThresholds is not None:
+        for h in dataColumns:
+            for t in args.eventThresholds:
+                headers.append('n-{}-{}'.format(h, t))
+    data['intervalend15'] = data.time.apply(lambda t: (pd.Timestamp(year = t.year, month = t.month, day = t.day,hour = t.hour, minute = (t.minute // args.intervalDuration)*args.intervalDuration)+pd.Timedelta(minutes = 15)).time())
+    outputData = []
+    for name, group in data.groupby(['site', 'date', 'intervalend15']):
+        row = []
+        row.extend(name)
+        groupStartTime = group.time.min()
+        groupEndTime = group.time.max()
+        row.append((groupEndTime.minute+1-groupStartTime.minute) % 60)#(name[2].minute*60+name[2].second-groupStartTime.minute*60+groupStartTime.second) % 3600)
+        row.append(len(group))
+        for h in dataColumns:
+            for method,func in aggFunctions.items():
+                aggregated = func(group[h])
+                if method == 'centile':
+                    row.extend(aggregated)
+                else:
+                    row.append(aggregated)
+        if args.eventThresholds is not None:
+            for h in dataColumns:
+                for t in args.eventThresholds:
+                    row.append((group[h] > t).sum())
+        outputData.append(row)
+    pd.DataFrame(outputData, columns = headers).to_csv(utils.removeExtension(args.eventFilename)+'-aggregated.csv', index = False)
+
+elif args.analyze == 'event-interaction': # aggregate event data by 15 min interval (args.intervalDuration), count events with thresholds
+    data = pd.read_csv(args.eventFilename, parse_dates = [2])
+    headers = ['site', 'date', 'intervalend15', 'duration', 'count']
+    aggFunctions, tmpheaders = utils.aggregationMethods(args.aggMethods, args.aggCentiles)
+    dataColumns = list(data.columns[3:])
+    for h in dataColumns:
+        if not 'speed' in h.lower(): # proximity indicators are reversed, taking 85th centile of this column will yield the 15th centile (which we have to take the opposite again)
+            data[h] = -data[h]
+    for h in dataColumns:
+        for h2 in tmpheaders:
+            headers.append(h+'-'+h2)
+    for h,t in zip(dataColumns, args.eventThresholds): # each threshold in this case applies to one indicator
+        headers.append('n-{}-{}'.format(h, t))
+    data['intervalend15'] = data.time.apply(lambda t: (pd.Timestamp(year = t.year, month = t.month, day = t.day,hour = t.hour, minute = (t.minute // args.intervalDuration)*args.intervalDuration)+pd.Timedelta(minutes = 15)).time())
+    outputData = []
+    for name, group in data.groupby(['site', 'date', 'intervalend15']):
+        row = []
+        row.extend(name)
+        groupStartTime = group.time.min()
+        groupEndTime = group.time.max()
+        row.append((groupEndTime.minute+1-groupStartTime.minute) % 60)#(name[2].minute*60+name[2].second-groupStartTime.minute*60+groupStartTime.second) % 3600)
+        row.append(len(group))
+        for h in dataColumns:
+            for method,func in aggFunctions.items():
+                tmp = group.loc[~group[h].isna(), h]
+                if len(tmp)>0:
+                    aggregated = func(tmp) # todo invert if the resulting stat is negative
+                    if method == 'centile':
+                        row.extend(np.abs(aggregated))
+                    else:
+                        row.append(np.abs(aggregated))
+                else:
+                    row.extend([None]*len(aggFunctions))
+        for h,t in zip(dataColumns, args.eventThresholds): # each threshold in this case applies to one indicator
+            if 'speed' in h.lower():
+                row.append((group[h] > t).sum())
+            else:
+                row.append((group[h] > -t).sum()) # take larger than than negative threshold for proximity indicators
+        outputData.append(row)
+    pd.DataFrame(outputData, columns = headers).to_csv(utils.removeExtension(args.eventFilename)+'-aggregated.csv', index = False)
--- a/scripts/replay-event-annotation.py	Fri Jun 10 15:43:02 2016 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-#! /usr/bin/env python
-
-import sys, argparse, datetime
-
-import storage, cvutils, utils
-
-import matplotlib.pylab as pylab
-import matplotlib.pyplot as plt
-import numpy as np
-
-
-annotations = pylab.csv2rec(sys.argv[1])
-
-frameRate = 30
-dirname = "/home/nicolas/Research/Data/montreal/infractions-pietons/"
-videoDirnames = {'amherst': '2011-06-22-sherbrooke-amherst/',
-                 'iberville': '2011-06-28-sherbrooke-iberville/'}
-
-# for amherst, subtract 40 seconds: add a delta
-
-for annotation in annotations:
-    video = annotation['video_name'].lower()
-    print('{} {}'.format(annotation['conflict_start_time'], annotation['conflict_end_time']))
-    print(annotation['road_user_1']+' '+annotation['road_user_2']+' '+annotation['conflict_quality'])
-    print(annotation['comments'])
-    cvutils.playVideo(dirname+videoDirnames[video]+video+'-{}.avi'.format(annotation['video_start_time']), utils.timeToFrames(annotation['conflict_start_time']+datetime.timedelta(seconds=-40), frameRate), frameRate, True, False, annotation['road_user_1']+' '+annotation['road_user_2']+' '+annotation['conflict_quality'])
--- a/scripts/rescale-homography.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/rescale-homography.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys
 
@@ -6,8 +6,7 @@
 import numpy as np
 import cv2
 
-import cvutils
-import utils
+from trafficintelligence import cvutils, utils
 
 if len(sys.argv) < 4:
    print('Usage: {} homography_filename original_size new_size (size can be width or height)'.format(sys.argv[0]))
@@ -20,7 +19,7 @@
                       [20,20],
                       [20,10]])
 
-wldPoints = cvutils.projectArray(homography, imgPoints.T).T
+wldPoints = cvutils.homographyProject(imgPoints.T, homography).T
 
 newSize = float(sys.argv[3])
 originalSize = float(sys.argv[2])
--- a/scripts/safety-analysis.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/safety-analysis.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,26 +1,39 @@
-#! /usr/bin/env python
-
-import storage, prediction, events, moving
+#! /usr/bin/env python3
 
 import sys, argparse, random
+from multiprocessing import Pool
 
 import matplotlib.pyplot as plt
 import numpy as np
 
+from trafficintelligence import storage, prediction, events, moving
+
 # todo: very slow if too many predicted trajectories
 # add computation of probality of unsucessful evasive action
 
 parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
 parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the configuration file', required = True)
-parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (vector computation), constant velocity, normal adaptation, point set prediction)', choices = ['cvd', 'cv', 'na', 'ps'])
+parser.add_argument('-d', dest = 'databaseFilename', help = 'name of the Sqlite database file (overrides the configuration file)')
+parser.add_argument('-n', dest = 'nObjects', help = 'number of objects to analyse', type = int)
+# TODO analyze only 
+parser.add_argument('--prediction-method', dest = 'predictionMethod', help = 'prediction method (constant velocity (cvd: vector computation (approximate); cve: equation solving; cv: discrete time (approximate)), normal adaptation, point set prediction)', choices = ['cvd', 'cve', 'cv', 'na', 'ps', 'mp'])
+parser.add_argument('-p', dest = 'prototypeDatabaseFilename', help = 'name of the database containing the prototypes')
+parser.add_argument('-c', dest = 'minPrototypeNMatchings', help = 'minimum number of matchings per prototype', type = int, default = 1)
+# parser.add_argument('--categorize', dest = 'categorize', help = 'computes interaction categories', action = 'store_true') TODO, add angle parameters in tracking.cfg - the safety analysis parameters should probably be spun off tracking.cfg
+parser.add_argument('--no-motion-prediction', dest = 'noMotionPrediction', help = 'does not compute indicators like TTC depending on motion prediction', action = 'store_true')
+parser.add_argument('--pet', dest = 'computePET', help = 'computes PET', action = 'store_true')
 parser.add_argument('--display-cp', dest = 'displayCollisionPoints', help = 'display collision points', action = 'store_true')
-parser.add_argument('-n', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
+parser.add_argument('--nthreads', dest = 'nProcesses', help = 'number of processes to run in parallel', type = int, default = 1)
 args = parser.parse_args()
 
 params = storage.ProcessParameters(args.configFilename)
 
+# selected database to overide the configuration file
+if args.databaseFilename is not None:
+    params.databaseFilename = args.databaseFilename
+
 # parameters for prediction methods
-if args.predictionMethod:
+if args.predictionMethod is not None:
     predictionMethod = args.predictionMethod
 else:
     predictionMethod = params.predictionMethod
@@ -30,8 +43,10 @@
 def steeringDistribution():
     return random.triangular(-params.maxNormalSteering, params.maxNormalSteering, 0.)
 
-if predictionMethod == 'cvd': # TODO add cve: constant velocity exact (Sohail's)
+if predictionMethod == 'cvd':
     predictionParameters = prediction.CVDirectPredictionParameters()
+if predictionMethod == 'cve':
+    predictionParameters = prediction.CVExactPredictionParameters()
 elif predictionMethod == 'cv':
     predictionParameters = prediction.ConstantPredictionParameters(params.maxPredictedSpeed)
 elif predictionMethod == 'na':
@@ -42,6 +57,26 @@
                                                                            params.useFeaturesForPrediction)
 elif predictionMethod == 'ps':
     predictionParameters = prediction.PointSetPredictionParameters(params.maxPredictedSpeed)
+elif predictionMethod == 'mp':
+    if args.prototypeDatabaseFilename is None:
+        prototypes = storage.loadPrototypesFromSqlite(params.databaseFilename)
+    else:
+        prototypes = storage.loadPrototypesFromSqlite(args.prototypeDatabaseFilename)
+    if args.minPrototypeNMatchings > 0:
+        prototypes = [p for p in prototypes if p.getNMatchings() >= args.minPrototypeNMatchings]
+    else:
+        nProto0Matching = 0
+        for p in prototypes:
+            if p.getNMatchings() == 0:
+                nProto0Matching += 1
+                print("Prototype {} has 0 matchings".format(p))
+        if len(prototypes) == 0 or nProto0Matching > 0:
+            print('Database has {} prototypes without any matching. Exiting'.format(nProto0Matching))
+            sys.exit()
+    for p in prototypes:
+        p.getMovingObject().computeCumulativeDistances()
+    predictionParameters = prediction.PrototypePredictionParameters(prototypes, params.nPredictedTrajectories, params.maxLcssDistance, params.minLcssSimilarity, params.lcssMetric, params.minFeatureTime, params.constantSpeedPrototypePrediction, params.useFeaturesForPrediction)
+# else:
 # no else required, since parameters is required as argument
 
 # evasiveActionPredictionParameters = prediction.EvasiveActionPredictionParameters(params.maxPredictedSpeed, 
@@ -51,23 +86,25 @@
 #                                                                                  params.maxExtremeSteering,
 #                                                                                  params.useFeaturesForPrediction)
 
-objects = storage.loadTrajectoriesFromSqlite(params.databaseFilename,'object')
-if params.useFeaturesForPrediction:
-    features = storage.loadTrajectoriesFromSqlite(params.databaseFilename,'feature') # needed if normal adaptation
-    for obj in objects:
-        obj.setFeatures(features)
+objects = storage.loadTrajectoriesFromSqlite(params.databaseFilename, 'object', args.nObjects, withFeatures = (params.useFeaturesForPrediction or predictionMethod == 'ps' or predictionMethod == 'mp'))
 
 interactions = events.createInteractions(objects)
-for inter in interactions:
-    inter.computeIndicators()
-    inter.computeCrossingsCollisions(predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, nProcesses = args.nProcesses)
-
-storage.saveIndicators(params.databaseFilename, interactions)
+if args.nProcesses == 1:
+    processed = events.computeIndicators(interactions, not args.noMotionPrediction, args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, False, None)
+else:
+    pool = Pool(processes = args.nProcesses)
+    nInteractionPerProcess = int(np.ceil(len(interactions)/float(args.nProcesses)))
+    jobs = [pool.apply_async(events.computeIndicators, args = (interactions[i*nInteractionPerProcess:(i+1)*nInteractionPerProcess], not args.noMotionPrediction, args.computePET, predictionParameters, params.collisionDistance, params.predictionTimeHorizon, params.crossingZones, False, None)) for i in range(args.nProcesses)]
+    processed = []
+    for job in jobs:
+        processed += job.get()
+    pool.close()
+storage.saveIndicatorsToSqlite(params.databaseFilename, processed)
 
 if args.displayCollisionPoints:
     plt.figure()
     allCollisionPoints = []
-    for inter in interactions:
+    for inter in processed:
         for collisionPoints in inter.collisionPoints.values():
             allCollisionPoints += collisionPoints
     moving.Point.plotAll(allCollisionPoints)
--- a/scripts/setup-tracking.sh	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/setup-tracking.sh	Mon Aug 24 16:02:06 2020 -0400
@@ -1,24 +1,26 @@
-version="$(wget -q -O - http://sourceforge.net/projects/opencvlibrary/files/opencv-unix | egrep -m1 -o '\"[0-9](\.[0-9])+' | cut -c2-)"
+version="$(wget -q -O - http://sourceforge.net/projects/opencvlibrary/files/opencv-unix | egrep -m1 -o '\"[0-2](\.[0-9]+)+' | cut -c2-)"
+#'\"[0-9](\.[0-9])+'
 echo "Removing any pre-installed ffmpeg and x264"
-sudo apt-get -qq remove ffmpeg x264 libx264-dev
+sudo apt -qq remove ffmpeg x264 libx264-dev
 echo "Installing Dependencies"
-sudo apt-get -qq install libopencv-dev build-essential checkinstall cmake pkg-config yasm libtiff4-dev libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libfaac-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils ffmpeg
-sudo apt-get -qq install libavfilter-dev libboost-dev libboost-program-options-dev libboost-graph-dev python-setuptools python-dev libcppunit-dev sqlite3 libsqlite3-dev cmake-qt-gui libboost-all-dev
-sudo easy_install -U mercurial
+sudo apt -qq install build-essential checkinstall cmake pkg-config yasm libtiff5-dev libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev libtbb-dev libgtk2.0-dev libfaac-dev libmp3lame-dev libtheora-dev libvorbis-dev libxvidcore-dev x264
+#  libdc1394-22-dev libxine-dev python-numpy libqt4-dev libopencore-amrnb-dev libopencore-amrwb-dev v4l-utils ffmpeg libboost-all-dev
+sudo apt -qq install libavfilter-dev libboost-dev libboost-program-options-dev libboost-graph-dev python-pip sqlite3 libsqlite3-dev cmake-qt-gui libgeos-dev
 echo "Installing OpenCV" $version
 cd
 mkdir OpenCV
 cd OpenCV
 echo "Downloading OpenCV" $version
-wget -O OpenCV-$version.tar.gz http://sourceforge.net/projects/opencvlibrary/files/opencv-unix/$version/opencv-"$version".tar.gz/download
+wget -O OpenCV-$version.zip http://sourceforge.net/projects/opencvlibrary/files/opencv-unix/$version/opencv-"$version".zip/download
 echo "Installing OpenCV" $version
-tar -xvf OpenCV-$version.tar.gz
+unzip OpenCV-$version.tar.gz
+#tar -xvf
 cd opencv-$version
 mkdir release
 cd release
 cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local ..
 make
-sudo make install
+sudo make -j4 install
 echo "OpenCV" $version "ready to be used"
 
 echo "Installing Traffic Intelligence..."
@@ -33,6 +35,9 @@
 cmake .
 make TrajectoryManagementAndAnalysis
 cd
+wget https://bootstrap.pypa.io/get-pip.py
+sudo -H python3 get-pip.py
+sudo -H pip3 install -r trafficintelligence/python-requirements.txt --upgrade
 cd trafficintelligence/c/
 make feature-based-tracking
 cd
--- a/scripts/test-compute-object-position-from-features.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/test-compute-object-position-from-features.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 import sys
 
@@ -41,7 +41,7 @@
     yCoordinates = -np.ones((len(features),int(timeInterval.length())))
     for i,f in enumerate(features):
         traj = f.getPositions().asArray()
-        imgTraj = cvutils.projectArray(homography, traj)
+        imgTraj = cvutils.homographyProject(traj, homography)
         yCoordinates[i,f.getFirstInstant()-timeInterval.first:f.getLastInstant()+1-timeInterval.first] = imgTraj[1,:]
 
     indices = np.argmax(yCoordinates,0)
@@ -70,7 +70,7 @@
 # TODO version median: conversion to large matrix will not work, have to do it frame by frame
 
 def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov):
-    kalman=cv.CreateKalman(6, 4)
+    kalman=cv2.CreateKalman(6, 4)
     kalman.transition_matrix[0,2]=1
     kalman.transition_matrix[0,4]=1./2
     kalman.transition_matrix[1,3]=1
@@ -97,15 +97,15 @@
     filteredPositions = moving.Trajectory()
     filteredVelocities = moving.Trajectory()
     measurement = cv.CreateMat(4,1,cv.CV_32FC1)
-    for i in xrange(positions.length()):
-        cv.KalmanPredict(kalman) # no control
+    for i in range(positions.length()):
+        kalman.predict() # no control
         p = positions[i]
         v = velocities[i]
         measurement[0,0] = p.x
         measurement[1,0] = p.y
         measurement[2,0] = v.x
         measurement[3,0] = v.y
-        cv.KalmanCorrect(kalman, measurement)
+        kalman.correct(measurement)
         filteredPositions.addPositionXY(kalman.state_post[0,0], kalman.state_post[1,0])
         filteredVelocities.addPositionXY(kalman.state_post[2,0], kalman.state_post[3,0])
 
--- a/scripts/train-object-classification.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/train-object-classification.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,25 +1,26 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import numpy as np
-import sys, argparse
-from cv2.ml import SVM_RBF, SVM_C_SVC, ROW_SAMPLE
+import argparse
+from cv2.ml import SVM_RBF, SVM_C_SVC, ROW_SAMPLE # row_sample for layout in cv2.ml.SVM_load
 
-import cvutils, moving, ml
+import cvutils, moving, ml, storage
 
 parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
 parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True)
 parser.add_argument('--kernel', dest = 'kernelType', help = 'kernel type for the support vector machine (SVM)', default = SVM_RBF, type = long)
 parser.add_argument('--svm', dest = 'svmType', help = 'SVM type', default = SVM_C_SVC, type = long)
-# TODO make other SVM parameters apparent: C, C0, Nu, etc.
-parser.add_argument('-s', dest = 'rescaleSize', help = 'rescale size of image samples', default = 64, type = int)
-parser.add_argument('-o', dest = 'nOrientations', help = 'number of orientations in HoG', default = 9, type = int)
-parser.add_argument('-p', dest = 'nPixelsPerCell', help = 'number of pixels per cell', default = 8, type = int)
-parser.add_argument('-c', dest = 'nCellsPerBlock', help = 'number of cells per block', default = 2, type = int)
+parser.add_argument('--deg', dest = 'degree', help = 'SVM degree', default = 0, type = int)
+parser.add_argument('--gamma', dest = 'gamma', help = 'SVM gamma', default = 1, type = int)
+parser.add_argument('--coef0', dest = 'coef0', help = 'SVM coef0', default = 0, type = int)
+parser.add_argument('--cvalue', dest = 'cvalue', help = 'SVM Cvalue', default = 1, type = int)
+parser.add_argument('--nu', dest = 'nu', help = 'SVM nu', default = 0, type = int)
+parser.add_argument('--svmp', dest = 'svmP', help = 'SVM p', default = 0, type = int)
+parser.add_argument('--cfg', dest = 'configFilename', help = 'name of the classifier configuration file', required = True)
+parser.add_argument('--confusion-matrix', dest = 'computeConfusionMatrix', help = 'compute the confusion matrix on the training data', action = 'store_true')
+
 args = parser.parse_args()
-
-rescaleSize = (args.rescaleSize, args.rescaleSize)
-nPixelsPerCell = (args.nPixelsPerCell, args.nPixelsPerCell)
-nCellsPerBlock = (args.nCellsPerBlock, args.nCellsPerBlock)
+classifierParams = storage.ClassifierParameters(args.configFilename)
 
 imageDirectories = {'pedestrian': args.directoryName + "/Pedestrians/",
                     'bicycle': args.directoryName + "/Cyclists/",
@@ -34,9 +35,9 @@
 trainingSamplesPV = {}
 trainingLabelsPV = {}
 
-for k, v in imageDirectories.iteritems():
+for k, v in imageDirectories.items():
     print('Loading {} samples'.format(k))
-    trainingSamples, trainingLabels = cvutils.createHOGTrainingSet(v, moving.userType2Num[k], rescaleSize, args.nOrientations, nPixelsPerCell, nCellsPerBlock)
+    trainingSamples, trainingLabels = cvutils.createHOGTrainingSet(v, moving.userType2Num[k], classifierParams.hogRescaleSize, classifierParams.hogNOrientations, classifierParams.hogNPixelsPerCell, classifierParams.hogBlockNorm, classifierParams.hogNCellsPerBlock)
     trainingSamplesPBV[k], trainingLabelsPBV[k] = trainingSamples, trainingLabels
     if k != 'pedestrian':
 	trainingSamplesBV[k], trainingLabelsBV[k] = trainingSamples, trainingLabels
@@ -46,22 +47,30 @@
 	trainingSamplesPV[k], trainingLabelsPV[k] = trainingSamples, trainingLabels
 
 # Training the Support Vector Machine
-print "Training Pedestrian-Cyclist-Vehicle Model"
-model = ml.SVM(args.svmType, args.kernelType)
-model.train(np.concatenate(trainingSamplesPBV.values()), ROW_SAMPLE, np.concatenate(trainingLabelsPBV.values()))
+print("Training Pedestrian-Cyclist-Vehicle Model")
+model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
+classifications = model.train(np.concatenate(list(trainingSamplesPBV.values())), ROW_SAMPLE, np.concatenate(list(trainingLabelsPBV.values())), True)
+if args.computeConfusionMatrix:
+    print(classifications)
 model.save(args.directoryName + "/modelPBV.xml")
 
-print "Training Cyclist-Vehicle Model"
-model = ml.SVM(args.svmType, args.kernelType)
-model.train(np.concatenate(trainingSamplesBV.values()), ROW_SAMPLE, np.concatenate(trainingLabelsBV.values()))
+print("Training Cyclist-Vehicle Model")
+model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
+classifications = model.train(np.concatenate(list(trainingSamplesBV.values())), ROW_SAMPLE, np.concatenate(list(trainingLabelsBV.values())), True)
+if args.computeConfusionMatrix:
+    print(classifications)
 model.save(args.directoryName + "/modelBV.xml")
 
-print "Training Pedestrian-Cyclist Model"
-model = ml.SVM(args.svmType, args.kernelType)
-model.train(np.concatenate(trainingSamplesPB.values()), ROW_SAMPLE, np.concatenate(trainingLabelsPB.values()))
+print("Training Pedestrian-Cyclist Model")
+model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
+classifications = model.train(np.concatenate(list(trainingSamplesPB.values())), ROW_SAMPLE, np.concatenate(list(trainingLabelsPB.values())), True)
+if args.computeConfusionMatrix:
+    print(classifications)
 model.save(args.directoryName + "/modelPB.xml")
 
-print "Training Pedestrian-Vehicle Model"
-model = ml.SVM(args.svmType, args.kernelType)
-model.train(np.concatenate(trainingSamplesPV.values()), ROW_SAMPLE, np.concatenate(trainingLabelsPV.values()))
+print("Training Pedestrian-Vehicle Model")
+model = ml.SVM(args.svmType, args.kernelType, args.degree, args.gamma, args.coef0, args.cvalue, args.nu, args.svmP)
+classifications = model.train(np.concatenate(list(trainingSamplesPV.values())), ROW_SAMPLE, np.concatenate(list(trainingLabelsPV.values())), True)
+if args.computeConfusionMatrix:
+    print(classifications)
 model.save(args.directoryName + "/modelPV.xml")
--- a/scripts/undistort-video.py	Fri Jun 10 15:43:02 2016 -0400
+++ b/scripts/undistort-video.py	Mon Aug 24 16:02:06 2020 -0400
@@ -1,13 +1,13 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 import sys, argparse
 
 import numpy as np
 import cv2
 
-import cvutils
+from trafficintelligence import cvutils
 from math import ceil, log10
-from os import path, mkdir
+from pathlib import Path
 
 parser = argparse.ArgumentParser(description='''The program converts a video into a series of images corrected for distortion. One can then use mencoder to generate a movie, eg
 $ mencoder 'mf://./*.png' -mf fps=[framerate]:type=png -ovc xvid -xvidencopts bitrate=[bitrate] -nosound -o [output.avi]''')
@@ -15,8 +15,9 @@
 parser.add_argument('-i', dest = 'videoFilename', help = 'filename of the video sequence')
 parser.add_argument('--intrinsic', dest = 'intrinsicCameraMatrixFilename', help = 'name of the intrinsic camera file')
 parser.add_argument('--distortion-coefficients', dest = 'distortionCoefficients', help = 'distortion coefficients', nargs = '*', type = float)
-parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float)
-parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', type = int)
+parser.add_argument('--undistorted-multiplication', dest = 'undistortedImageMultiplication', help = 'undistorted image multiplication', type = float, default = 1.)
+parser.add_argument('--mask', dest = 'maskFilename', help = 'name of the mask file, to undistort to see how it covers the undistortion errors')
+parser.add_argument('-f', dest = 'firstFrameNum', help = 'number of first frame number to display', type = int, default = 0)
 parser.add_argument('-l', dest = 'lastFrameNum', help = 'number of last frame number to save', type = int)
 parser.add_argument('-d', dest = 'destinationDirname', help = 'name of the directory where the undistorted frames are saved')
 parser.add_argument('--encode', dest = 'encodeVideo', help = 'indicate if video is generated at the end (default Xvid)', action = 'store_true')
@@ -26,44 +27,43 @@
 args = parser.parse_args()
 
 intrinsicCameraMatrix = np.loadtxt(args.intrinsicCameraMatrixFilename)
-#distortionCoefficients = args.distortionCoefficients
-#undistortedImageMultiplication = args.undistortedImageMultiplication
-#firstFrameNum = params.firstFrameNum
 if args.destinationDirname is None:
-    destinationDirname = ''
+    destinationPath = Path('.')
 else:
-    if not args.destinationDirname.endswith('/'):
-        destinationDirname = args.destinationDirname+'/'
-    else:
-        destinationDirname = args.destinationDirname
-    if not path.exists(destinationDirname):
-        mkdir(destinationDirname)
+    destinationPath = Path(destinationPath)
+    if not destinationPath.exists():
+        destinationPath.mkdir()
 
 capture = cv2.VideoCapture(args.videoFilename)
 width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
 height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
-[map1, map2] = cvutils.computeUndistortMaps(width, height, args.undistortedImageMultiplication, intrinsicCameraMatrix, args.distortionCoefficients)
+[map1, map2], newCameraMatrix = cvutils.computeUndistortMaps(width, height, args.undistortedImageMultiplication, intrinsicCameraMatrix, args.distortionCoefficients)
+if args.maskFilename is not None:
+    mask = cv2.imread(args.maskFilename)
+    undistortedMask = cv2.remap(mask, map1, map2, interpolation=cv2.INTER_LINEAR)/255
+
 if capture.isOpened():
     ret = True
     frameNum = args.firstFrameNum
     capture.set(cv2.CAP_PROP_POS_FRAMES, args.firstFrameNum)
     if args.lastFrameNum is None:
-        from sys import maxint
-        lastFrameNum = maxint
+        lastFrameNum = float('inf')
     else:
         lastFrameNum = args.lastFrameNum
-        nZerosFilename = int(ceil(log10(lastFrameNum)))
-        while ret and frameNum < lastFrameNum:
-            ret, img = capture.read()
-            if ret:
-                img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
-                cv2.imwrite(destinationDirname+'undistorted-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
-            frameNum += 1
+    nZerosFilename = int(ceil(log10(lastFrameNum)))
+    while ret and frameNum < lastFrameNum:
+        ret, img = capture.read()
+        if ret:
+            img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+            cv2.imwrite(str(destinationPath/Path('undistorted-{{:0{}}}.png'.format(nZerosFilename).format(frameNum))), img)
+            if args.maskFilename is not None:
+                cv2.imwrite(str(destinationPath/Path('undistorted+mask-{{:0{}}}.png'.format(nZerosFilename).format(frameNum))), cv2.multiply(img, undistortedMask, dtype = 16))
+        frameNum += 1
 
 if args.encodeVideo:
     print('Encoding the images files in video')
     from subprocess import check_call
     from storage import openCheck
     out = openCheck("err.log", "w")
-    check_call("mencoder \'mf://"+destinationDirname+"*.png\' -mf fps={}:type=png -ovc xvid -xvidencopts bitrate={} -nosound -o ".format(args.fps, args.bitrate)+destinationDirname+"undistort.avi", stderr = out, shell = True)
+    check_call("mencoder \'mf://"+destinationPath+"*.png\' -mf fps={}:type=png -ovc xvid -xvidencopts bitrate={} -nosound -o ".format(args.fps, args.bitrate)+destinationDirname+"undistort.avi", stderr = out, shell = True)
     out.close()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/setup.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,21 @@
+import setuptools
+
+with open('README') as file:
+    long_description = file.read()
+    
+setuptools.setup(
+    name='trafficintelligence',
+    version='0.2.5',
+    author='Nicolas Saunier',
+    author_email='nicolas.saunier@polymtl.ca',
+    url='https://bitbucket.org/Nicolas/trafficintelligence',
+    packages=setuptools.find_packages(),
+    description='Python modules of the Traffic Intelligence project',
+    long_description=long_description,
+    license = 'MIT License',
+    classifiers=(
+        "Programming Language :: Python :: 3",
+        "License :: OSI Approved :: MIT License",
+        "Operating System :: OS Independent",
+    ),
+)
--- a/tracking.cfg	Fri Jun 10 15:43:02 2016 -0400
+++ b/tracking.cfg	Mon Aug 24 16:02:06 2020 -0400
@@ -1,4 +1,4 @@
-# filename of the video to process
+# filename of the video to process (can be images, eg image%04d.png)
 video-filename = laurier.avi
 # filename of the database where results are saved
 database-filename = laurier.sqlite
@@ -51,7 +51,7 @@
 pyramid-level = 5
 # number of displacement to test minimum feature motion
 ndisplacements = 3
-# minimum displacement to keep features (px)
+# minimum displacement to keep features (world distance unit or px)
 min-feature-displacement = 0.05
 # maximum feature acceleration
 acceleration-bound = 3
@@ -68,7 +68,7 @@
 # minimum eigen value of a 2x2 normal matrix of optical flow equations
 min-feature-eig-threshold = 1e-4
 # minimum length of a feature (number of frames) to consider a feature for grouping
-min-feature-time = 9
+min-feature-time = 15
 # Min Max similarity parameters (Beymer et al. method)
 # connection distance in feature grouping (world distance unit or px)
 mm-connection-distance = 2.68813545522
@@ -80,44 +80,18 @@
 min-velocity-cosine = 0.8
 # minimum average number of features per frame to create a vehicle hypothesis
 min-nfeatures-group = 3.16747690802
-# Road user classification
-# min number of pixels in cropped image to classify by SVM
-min-npixels-crop = 400
-# method to aggregate road user speed
-speed-aggregation-method = median
-# number of frames to ignore at both ends of a series (noisy)
-nframes-ignore-at-ends = 2
-# quantile for the speed aggregation, if quantile is chosen
-speed-aggregation-quantile = 50
-# speed value below which all classes are equiprobable (distributions give odd values there) (km/h)
-min-speed-equiprobable = 3.33
-# filename of the general ped/cyc/veh SVM classifier
-pbv-svm-filename = modelPBV.xml
-# filename of the cyc/veh SVM classifier
-bv-svm-filename = modelBV.xml
-# maximum pedestrian speed (agregate: mean, median, 85th centile, etc.) 10 km/h
-max-ped-speed = 10.0
-# maximum cyclist speed (agregate: mean, median, 85th centile, etc.) 30 km/h (3xped)
-max-cyc-speed = 30.0
-# mean pedestrian speed and standard deviation (in a normal distribution) 4.91+-0.88 km/h
-mean-ped-speed = 4.91
-std-ped-speed = 0.88
-# mean cyclist speed and standard deviation (in a log-normal distribution) 11.+-4.83 km/h
-cyc-speed-loc = 2.31
-cyc-speed-scale = 0.42
-# mean vehicle speed and standard deviation (in a normal distribution) 18.45+-7.6 km/h
-mean-veh-speed = 18.45
-std-veh-speed = 7.6
+# name of the configuration file for all classifier information
+classifier-filename = classifier.cfg
 # Safety analysis
 # maximum speed when predicting future motion (km/h)
 max-predicted-speed = 50
 # time horizon for collision prediction (s)
-prediction-time-horizon = 5
+prediction-time-horizon = 5.0
 # collision distance threshold (m)
 collision-distance = 1.8
 # option to compute crossing zones and predicted PET
 crossing-zones = false
-# prediction method: cv, na, ps
+# prediction method: cv, cvd, na, ps, mp
 prediction-method = na
 # number of predicted trajectories (use depends on prediction method)
 npredicted-trajectories = 10
@@ -133,3 +107,12 @@
 max-extreme-steering = 0.5
 # use feature positions and velocities for prediction
 use-features-prediction = true
+# use constant speed (motion pattern based prediction)
+constant-speed = false
+# point distance threshold, for the chosen metric for trajectory matching using LCSS
+max-lcss-distance = 2.
+# distance metric for trajectory matching using LCSS
+lcss-metric = cityblock
+# similarity threshold for trajectory matching on normalized LCSS
+min-lcss-similarity = 0.4
+# minimum past feature length for past trajectory matching for motion prediction -> using min-feature-time
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/base.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,7 @@
+'''Module for few base classes to avoid issues of circular import'''
+
+class VideoFilenameAddable(object):
+    'Base class with the capability to attach a video filename'
+
+    def setVideoFilename(self, videoFilename):
+        self.videoFilename = videoFilename
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/cvutils.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,660 @@
+#! /usr/bin/env python
+'''Image/Video utilities'''
+
+from sys import stdout
+from os import listdir
+from subprocess import run
+from math import floor, log10, ceil
+from time import time
+
+from numpy import dot, array, append, float32, loadtxt, savetxt, append, zeros, ones, identity, abs as npabs, logical_and, unravel_index, sum as npsum, isnan, mgrid, median, floor as npfloor, ceil as npceil, nonzero
+from numpy.linalg import inv
+from matplotlib.pyplot import imread, imsave, imshow, figure, subplot
+
+try:
+    import cv2
+    opencvAvailable = True
+except ImportError:
+    print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
+    opencvAvailable = False
+try:
+    import skimage
+    skimageAvailable = True
+except ImportError:
+    print('Scikit-image library could not be loaded (HoG-based classification methods will not be available)')
+    skimageAvailable = False
+    
+from trafficintelligence import utils, moving
+
+videoFilenameExtensions = ['mov', 'avi', 'mp4', 'MOV', 'AVI', 'MP4']
+trackerExe = 'feature-based-tracking'
+#importaggdraw # agg on top of PIL (antialiased drawing)
+
+cvRed = {'default': (0,0,255),
+         'colorblind': (0,114,178)}
+cvGreen = {'default': (0,255,0),
+           'colorblind': (0,158,115)}
+cvBlue = {'default': (255,0,0),
+          'colorblind': (213,94,0)}
+cvCyan = {'default': (255, 255, 0),
+          'colorblind': (240,228,66)}
+cvYellow = {'default': (0, 255, 255),
+            'colorblind': (86,180,233)}
+cvMagenta = {'default': (255, 0, 255),
+             'colorblind': (204,121,167)}
+cvWhite = {k: (255, 255, 255) for k in ['default', 'colorblind']}
+cvBlack = {k: (0,0,0) for k in ['default', 'colorblind']}
+
+cvColors3 = {k: utils.PlottingPropertyValues([cvRed[k], cvGreen[k], cvBlue[k]]) for k in ['default', 'colorblind']}
+cvColors = {k: utils.PlottingPropertyValues([cvRed[k], cvGreen[k], cvBlue[k], cvCyan[k], cvYellow[k], cvMagenta[k], cvWhite[k], cvBlack[k]]) for k in ['default', 'colorblind']}
+
+def quitKey(key):
+    return chr(key&255)== 'q' or chr(key&255) == 'Q'
+
+def saveKey(key):
+    return chr(key&255) == 's'
+
+def int2FOURCC(x):
+    fourcc = ''
+    for i in range(4):
+        fourcc += chr((x >> 8*i)&255)
+    return fourcc
+
+def rgb2gray(rgb):
+    return dot(rgb[...,:3], [0.299, 0.587, 0.144])
+
+def matlab2PointCorrespondences(filename):
+    '''Loads and converts the point correspondences saved 
+    by the matlab camera calibration tool'''
+    points = loadtxt(filename, delimiter=',')
+    savetxt(utils.removeExtension(filename)+'-point-correspondences.txt',append(points[:,:2].T, points[:,3:].T, axis=0))
+
+def loadPointCorrespondences(filename):
+    '''Loads and returns the corresponding points in world (first 2 lines) and image spaces (last 2 lines)'''
+    points = loadtxt(filename, dtype=float32)
+    return  (points[:2,:].T, points[2:,:].T) # (world points, image points)
+
+def cvMatToArray(cvmat):
+    '''Converts an OpenCV CvMat to numpy array.'''
+    print('Deprecated, use new interface')
+    a = zeros((cvmat.rows, cvmat.cols))#array([[0.0]*cvmat.width]*cvmat.height)
+    for i in range(cvmat.rows):
+        for j in range(cvmat.cols):
+            a[i,j] = cvmat[i,j]
+    return a
+
+def createWhiteImage(height, width, filename):
+    img = ones((height, width, 3), uint8)*255
+    imsave(filename, img)
+
+if opencvAvailable:
+    def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=3.0):
+        '''Returns the homography matrix mapping from srcPoints to dstPoints (dimension Nx2)'''
+        H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold)
+        return H
+
+    def cvPlot(img, positions, color, lastCoordinate = None, **kwargs):
+        if lastCoordinate is None:
+            last = positions.length()-1
+        elif lastCoordinate >=0:
+            last = min(positions.length()-1, lastCoordinate)
+        for i in range(0, last):
+            cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color, **kwargs)
+
+    def cvImshow(windowName, img, rescale = 1.0):
+        'Rescales the image (in particular if too large)'
+        if rescale != 1.:
+            size = (int(round(img.shape[1]*rescale)), int(round(img.shape[0]*rescale)))
+            resizedImg = cv2.resize(img, size)
+            cv2.imshow(windowName, resizedImg)
+        else:
+            cv2.imshow(windowName, img)
+
+    def computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients):
+        newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
+        newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, newImgSize, True)
+        return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), None, newCameraMatrix, newImgSize, cv2.CV_32FC1), newCameraMatrix
+
+    def playVideo(filenames, windowNames = None, firstFrameNums = None, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1, colorBlind = False):
+        '''Plays the video(s)'''
+        if colorBlind:
+            colorType = 'colorblind'
+        else:
+            colorType = 'default'
+        if len(filenames) == 0:
+            print('Empty filename list')
+            return
+        if windowNames is None:
+            windowNames = ['frame{}'.format(i) for i in range(len(filenames))]
+        wait = 5
+        if rescale == 1.:
+            for windowName in windowNames:
+                cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
+        if frameRate > 0:
+            wait = int(round(1000./frameRate))
+        if interactive:
+            wait = 0
+        captures = [cv2.VideoCapture(fn) for fn in filenames]
+        if array([cap.isOpened() for cap in captures]).all():
+            key = -1
+            ret = True
+            nFramesShown = 0
+            if firstFrameNums is not None:
+                for i in range(len(captures)):
+                    captures[i].set(cv2.CAP_PROP_POS_FRAMES, firstFrameNums[i])
+            while ret and not quitKey(key):
+                rets = []
+                images = []
+                for cap in captures:
+                    ret, img = cap.read()
+                    rets.append(ret)
+                    images.append(img)
+                ret = array(rets).all()
+                if ret:
+                    if printFrames:
+                        print('frame shown {0}'.format(nFramesShown))
+                    for i in range(len(filenames)):
+                        if text is not None:
+                            cv2.putText(images[i], text, (10,50), cv2.FONT_HERSHEY_PLAIN, 1, cvRed[colorType])
+                        cvImshow(windowNames[i], images[i], rescale) # cv2.imshow('frame', img)
+                    key = cv2.waitKey(wait)
+                    if saveKey(key):
+                        cv2.imwrite('image-{}.png'.format(frameNum), img)
+                    nFramesShown += step
+                    if step > 1:
+                        for i in range(len(captures)):
+                            captures[i].set(cv2.CAP_PROP_POS_FRAMES, firstFrameNums[i]+nFramesShown)
+            cv2.destroyAllWindows()
+        else:
+            print('Video captures for {} failed'.format(filenames))
+
+    def infoVideo(filename):
+        '''Provides all available info on video '''
+        cvPropertyNames = {cv2.CAP_PROP_FORMAT: "format",
+                           cv2.CAP_PROP_FOURCC: "codec (fourcc)",
+                           cv2.CAP_PROP_FPS: "fps",
+                           cv2.CAP_PROP_FRAME_COUNT: "number of frames",
+                           cv2.CAP_PROP_FRAME_HEIGHT: "heigh",
+                           cv2.CAP_PROP_FRAME_WIDTH: "width",
+                           cv2.CAP_PROP_RECTIFICATION: "rectification",
+                           cv2.CAP_PROP_SATURATION: "saturation"}
+        capture = cv2.VideoCapture(filename)
+        videoProperties = {}
+        if capture.isOpened():
+            for cvprop in [#cv2.CAP_PROP_BRIGHTNESS
+                    #cv2.CAP_PROP_CONTRAST
+                    #cv2.CAP_PROP_CONVERT_RGB
+                    #cv2.CAP_PROP_EXPOSURE
+                    cv2.CAP_PROP_FORMAT,
+                    cv2.CAP_PROP_FOURCC,
+                    cv2.CAP_PROP_FPS,
+                    cv2.CAP_PROP_FRAME_COUNT,
+                    cv2.CAP_PROP_FRAME_HEIGHT,
+                    cv2.CAP_PROP_FRAME_WIDTH,
+                    #cv2.CAP_PROP_GAIN,
+                    #cv2.CAP_PROP_HUE
+                    #cv2.CAP_PROP_MODE
+                    #cv2.CAP_PROP_POS_AVI_RATIO
+                    #cv2.CAP_PROP_POS_FRAMES
+                    #cv2.CAP_PROP_POS_MSEC
+                    #cv2.CAP_PROP_RECTIFICATION,
+                    #cv2.CAP_PROP_SATURATION
+            ]:
+                prop = capture.get(cvprop)
+                if cvprop == cv2.CAP_PROP_FOURCC and prop > 0:
+                    prop = int2FOURCC(int(prop))
+                videoProperties[cvPropertyNames[cvprop]] = prop
+        else:
+            print('Video capture for {} failed'.format(filename))
+        return videoProperties
+
+    def getImagesFromVideo(videoFilename, firstFrameNum = 0, lastFrameNum = 1, step = 1, saveImage = False, outputPrefix = 'image'):
+        '''Returns nFrames images from the video sequence'''
+        images = []
+        capture = cv2.VideoCapture(videoFilename)
+        if capture.isOpened():
+            rawCount = capture.get(cv2.CAP_PROP_FRAME_COUNT)
+            if rawCount < 0:
+                rawCount = lastFrameNum+1
+            nDigits = int(floor(log10(rawCount)))+1
+            ret = False
+            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
+            frameNum = firstFrameNum
+            while frameNum<lastFrameNum and frameNum<rawCount:
+                ret, img = capture.read()
+                i = 0
+                while not ret and i<10:
+                    ret, img = capture.read()
+                    i += 1
+                if img is not None and img.size>0:
+                    if saveImage:
+                        frameNumStr = format(frameNum, '0{}d'.format(nDigits))
+                        cv2.imwrite(outputPrefix+frameNumStr+'.png', img)
+                    else:
+                        images.append(img)
+                    frameNum +=step
+                    if step > 1:
+                        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
+            capture.release()
+        else:
+            print('Video capture for {} failed'.format(videoFilename))
+        return images
+    
+    def getFPS(videoFilename):
+        capture = cv2.VideoCapture(videoFilename)
+        if capture.isOpened():
+            fps = capture.get(cv2.CAP_PROP_FPS)
+            capture.release()
+            return fps
+        else:
+            print('Video capture for {} failed'.format(videoFilename))
+            return None
+        
+    def imageBoxSize(obj, frameNum, width, height, px = 0.2, py = 0.2):
+        'Computes the bounding box size of object at frameNum'
+        x = []
+        y = []
+        if obj.hasFeatures():
+            for f in obj.getFeatures():
+                if f.existsAtInstant(frameNum):
+                    p = f.getPositionAtInstant(frameNum)
+                    x.append(p.x)
+                    y.append(p.y)
+        xmin = min(x)
+        xmax = max(x)
+        ymin = min(y)
+        ymax = max(y)
+        xMm = px * (xmax - xmin)
+        yMm = py * (ymax - ymin)
+        a = max(ymax - ymin + (2 * yMm), xmax - xmin + (2 * xMm))
+        yCropMin = int(max(0, .5 * (ymin + ymax - a)))
+        yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
+        xCropMin = int(max(0, .5 * (xmin + xmax - a)))
+        xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
+        return yCropMin, yCropMax, xCropMin, xCropMax
+        
+    def imageBox(img, obj, frameNum, width, height, px = 0.2, py = 0.2, minNPixels = 800):
+        'Computes the bounding box of object at frameNum'
+        yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, width, height, px, py)
+        if yCropMax > yCropMin and xCropMax > xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > minNPixels:
+            return img[yCropMin : yCropMax, xCropMin : xCropMax]
+        else:
+            return None
+
+    def tracking(configFilename, grouping, videoFilename = None, dbFilename = None, homographyFilename = None, maskFilename = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, dryRun = False):
+        '''Runs the tracker in a subprocess
+        if grouping is True, it is feature grouping
+        otherwise it is feature tracking'''
+        if grouping:
+            trackingMode = '--gf'
+        else:
+            trackingMode = '--tf'
+        cmd = [trackerExe, configFilename, trackingMode, '--quiet']
+        
+        if videoFilename is not None:
+            cmd += ['--video-filename', videoFilename]
+        if dbFilename is not None:
+            cmd += ['--database-filename', dbFilename]
+        if homographyFilename is not None:
+            cmd += ['--homography-filename', homographyFilename]
+        if maskFilename is not None:
+            cmd += ['--mask-filename', maskFilename]
+        if undistort:
+            cmd += ['--undistort', 'true']
+            if intrinsicCameraMatrix is not None: # we currently have to save a file
+                intrinsicCameraFilename = '/tmp/intrinsic-{}.txt'.format(time())
+                savetxt(intrinsicCameraFilename, intrinsicCameraMatrix)
+                cmd += ['--intrinsic-camera-filename', intrinsicCameraFilename]
+            if distortionCoefficients is not None:
+                cmd += ['--distortion-coefficients '+' '.join([str(x) for x in distortionCoefficients])]
+        if dryRun:
+            print(cmd)
+        else:
+            run(cmd)
+        
+    def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, nZerosFilenameArg = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., annotations = [], gtMatches = {}, toMatches = {}, colorBlind = False):
+        '''Displays the objects overlaid frame by frame over the video '''
+        if colorBlind:
+            colorType = 'colorblind'
+        else:
+            colorType = 'default'
+
+        capture = cv2.VideoCapture(videoFilename)
+        width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+        height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+        windowName = 'frame'
+        if rescale == 1.:
+            cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
+
+        if undistort: # setup undistortion
+            [map1, map2], newCameraMatrix = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+        if capture.isOpened():
+            key = -1
+            ret = True
+            frameNum = firstFrameNum
+            capture.set(cv2.CAP_PROP_POS_FRAMES, firstFrameNum)
+            if lastFrameNumArg is None:
+                lastFrameNum = float("inf")
+            else:
+                lastFrameNum = lastFrameNumArg
+            if nZerosFilenameArg is None:
+                if lastFrameNumArg is None:
+                    nZerosFilename = int(ceil(log10(objects[-1].getLastInstant())))
+                else:
+                    nZerosFilename = int(ceil(log10(lastFrameNum)))
+            else:
+                nZerosFilename = nZerosFilenameArg
+            while ret and not quitKey(key) and frameNum <= lastFrameNum:
+                ret, img = capture.read()
+                if ret:
+                    if undistort:
+                        img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+                    if printFrames:
+                        print('frame {0}'.format(frameNum))
+                    # plot objects
+                    for obj in objects[:]:
+                        if obj.existsAtInstant(frameNum):
+                            if not hasattr(obj, 'projectedPositions'):
+                                obj.projectedPositions = obj.getPositions().homographyProject(homography)
+                                if undistort:
+                                    obj.projectedPositions = obj.projectedPositions.newCameraProject(newCameraMatrix)
+                            cvPlot(img, obj.projectedPositions, cvColors[colorType][obj.getNum()], frameNum-obj.getFirstInstant())
+                            if frameNum not in boundingBoxes and obj.hasFeatures():
+                                yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSize(obj, frameNum, homography, width, height)
+                                cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue[colorType], 1)
+                            objDescription = '{} '.format(obj.num)
+                            if moving.userTypeNames[obj.userType] != 'unknown':
+                                objDescription += moving.userTypeNames[obj.userType][0].upper()
+                            if len(annotations) > 0: # if we loaded annotations, but there is no match
+                                if frameNum not in toMatches[obj.getNum()]:
+                                    objDescription += " FA"
+                            cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, cvColors[colorType][obj.getNum()])
+                        if obj.getLastInstant() == frameNum:
+                            objects.remove(obj)
+                    # plot object bounding boxes
+                    if frameNum in boundingBoxes:
+                        for rect in boundingBoxes[frameNum]:
+                            cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvColors[colorType][obj.getNum()])
+                    # plot ground truth
+                    if len(annotations) > 0:
+                        for gt in annotations:
+                            if gt.existsAtInstant(frameNum):
+                                if frameNum in gtMatches[gt.getNum()]:
+                                    color = cvColors[colorType][gtMatches[gt.getNum()][frameNum]] # same color as object
+                                else:
+                                    color = cvRed[colorType]
+                                    cv2.putText(img, 'Miss', gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), cv2.FONT_HERSHEY_PLAIN, 1, color)
+                                cv2.rectangle(img, gt.topLeftPositions[frameNum-gt.getFirstInstant()].asint().astuple(), gt.bottomRightPositions[frameNum-gt.getFirstInstant()].asint().astuple(), color)
+                    # saving images and going to next
+                    if not saveAllImages:
+                        cvImshow(windowName, img, rescale)
+                        key = cv2.waitKey()
+                    if saveAllImages or saveKey(key):
+                        cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
+                    frameNum += nFramesStep
+                    if nFramesStep > 1:
+                        capture.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
+            cv2.destroyAllWindows()
+        else:
+            print('Cannot load file ' + videoFilename)
+
+    def computeHomographyFromPDTV(camera):
+        '''Returns the homography matrix at ground level from PDTV camera
+        https://bitbucket.org/hakanardo/pdtv'''
+        # camera = pdtv.load(cameraFilename)
+        srcPoints = [[x,y] for x, y in zip([1.,2.,2.,1.],[1.,1.,2.,2.])] # need floats!!
+        dstPoints = []
+        for srcPoint in srcPoints:
+            projected = camera.image_to_world(tuple(srcPoint))
+            dstPoints.append([projected[0], projected[1]])
+        H, mask = cv2.findHomography(array(srcPoints), array(dstPoints), method = 0) # No need for different methods for finding homography
+        return H
+
+    def getIntrinsicCameraMatrix(cameraData):
+        return array([[cameraData['f']*cameraData['Sx']/cameraData['dx'], 0, cameraData['Cx']],
+                      [0, cameraData['f']/cameraData['dy'], cameraData['Cy']],
+                      [0, 0, 1.]])
+
+    def getDistortionCoefficients(cameraData):
+        return array([cameraData['k']]+4*[0])
+    
+    def undistortedCoordinates(map1, map2, x, y, maxDistance = 1.):
+        '''Returns the coordinates of a point in undistorted image
+        map1 and map2 are the mapping functions from undistorted image
+        to distorted (original image)
+        map1(x,y) = originalx, originaly'''
+        distx = npabs(map1-x)
+        disty = npabs(map2-y)
+        indices = logical_and(distx<maxDistance, disty<maxDistance)
+        closeCoordinates = unravel_index(nonzero(indices), distx.shape) # returns i,j, ie y,x
+        xWeights = 1-distx[indices]
+        yWeights = 1-disty[indices]
+        return dot(xWeights, closeCoordinates[1])/npsum(xWeights), dot(yWeights, closeCoordinates[0])/npsum(yWeights)
+
+    def undistortTrajectoryFromCVMapping(map1, map2, t):
+        '''test 'perfect' inversion'''
+        undistortedTrajectory = moving.Trajectory()
+        for i,p in enumerate(t):
+            res = undistortedCoordinates(map1, map2, p.x,p.y)
+            if not isnan(res).any():
+                undistortedTrajectory.addPositionXY(res[0], res[1])
+            else:
+                print('{} {} {}'.format(i,p,res))
+        return undistortedTrajectory
+
+    def computeInverseMapping(originalImageSize, map1, map2):
+        'Computes inverse mapping from maps provided by cv2.initUndistortRectifyMap'
+        invMap1 = -ones(originalImageSize)
+        invMap2 = -ones(originalImageSize)
+        for x in range(0,originalImageSize[1]):
+            for y in range(0,originalImageSize[0]):
+                res = undistortedCoordinates(x,y, map1, map2)
+                if not isnan(res).any():
+                    invMap1[y,x] = res[0]
+                    invMap2[y,x] = res[1]
+        return invMap1, invMap2
+
+    def intrinsicCameraCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False, fixK2 = True, fixK3 = True, zeroTangent = True):
+        ''' Camera calibration searches through all the images (jpg or png) located
+        in _path_ for matches to a checkerboard pattern of size checkboardSize.
+        These images should all be of the same camera with the same resolution.
+        
+        For best results, use an asymetric board and ensure that the image has
+        very high contrast, including the background. 
+
+        cherckerBoardSize is the number of internal corners (7x10 squares have 6x9 internal corners) 
+        
+        The code below is based off of:
+        https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
+        Modified by Paul St-Aubin
+        '''
+        import glob, os
+
+        # termination criteria
+        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
+
+        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
+        objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32)
+        objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
+
+        # Arrays to store object points and image points from all the images.
+        objpoints = [] # 3d point in real world space
+        imgpoints = [] # 2d points in image plane.
+
+        ## Loop throuhg all images in _path_
+        images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
+        for fname in images:
+            img = cv2.imread(fname)
+            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+
+            # Find the chess board corners
+            ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
+
+            # If found, add object points, image points (after refining them)
+            if ret:
+                print('Found pattern in '+fname)
+                
+                if secondPassSearch:
+                    corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
+
+                objpoints.append(objp)
+                imgpoints.append(corners)
+
+                # Draw and display the corners
+                if display:
+                    cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
+                    if img is not None:
+                        cv2.imshow('img',img)
+                        cv2.waitKey(0)
+            else:
+                print('Pattern not found in '+fname)
+        ## Close up image loading and calibrate
+        cv2.destroyAllWindows()
+        if len(objpoints) == 0 or len(imgpoints) == 0: 
+            return None
+        try:
+            flags = 0
+            if fixK2:
+                flags += cv2.CALIB_FIX_K2
+            if fixK3:
+                flags += cv2.CALIB_FIX_K3
+            if zeroTangent:
+                flags += cv2.CALIB_ZERO_TANGENT_DIST
+            ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None, flags = flags)
+        except NameError:
+            return None
+        savetxt('intrinsic-camera.txt', camera_matrix)
+        print('error: {}'.format(ret))
+        return camera_matrix, dist_coeffs
+
+    def undistortImage(img, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., interpolation=cv2.INTER_LINEAR):
+        '''Undistorts the image passed in argument'''
+        width = img.shape[1]
+        height = img.shape[0]
+        [map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
+        return cv2.remap(img, map1, map2, interpolation=interpolation)
+
+def homographyProject(points, homography, output3D = False):
+    '''Returns the coordinates of the points (2xN array) projected through homography'''
+    if points.shape[0] != 2:
+        raise Exception('points of dimension {}'.format(points.shape))
+
+    if homography is not None and homography.size>0:
+        if output3D:
+            outputDim = 3
+        else:
+            outputDim = 2
+        augmentedPoints = append(points,[[1]*points.shape[1]], 0) # 3xN
+        prod = dot(homography, augmentedPoints)
+        return prod[:outputDim,:]/prod[2]
+    elif output3D:
+        return append(points,[[1]*points.shape[1]], 0) # 3xN
+    else:
+        return points
+
+def imageToWorldProject(points, intrinsicCameraMatrix = None, distortionCoefficients = None, homography = None):
+    '''Projects points (2xN array) from image (video) space to world space
+    1. through undistorting if provided by intrinsic camera matrix and distortion coefficients
+    2. through homograph projection (from ideal point (no camera) to world)'''
+    if points.shape[0] != 2:
+        raise Exception('points of dimension {}'.format(points.shape))
+
+    if intrinsicCameraMatrix is not None and distortionCoefficients is not None:
+        undistortedPoints = cv2.undistortPoints(points.T.reshape(1,points.shape[1], 2), intrinsicCameraMatrix, distortionCoefficients).reshape(-1,2)
+        return homographyProject(undistortedPoints.T, homography)
+    else:
+        return homographyProject(points, homography)
+
+def worldToImageProject(points, intrinsicCameraMatrix = None, distortionCoefficients = None, homography = None):
+    '''Projects points (2xN array) from image (video) space to world space
+    1. through undistorting if provided by intrinsic camera matrix and distortion coefficients
+    2. through homograph projection (from ideal point (no camera) to world)'''
+    if points.shape[0] != 2:
+        raise Exception('points of dimension {}'.format(points.shape))
+
+    if intrinsicCameraMatrix is not None and distortionCoefficients is not None:
+        projected3D = homographyProject(points, homography, True)
+        projected, jacobian = cv2.projectPoints(projected3D.T, (0.,0.,0.), (0.,0.,0.), intrinsicCameraMatrix, distortionCoefficients) # in: 3xN, out: 2x1xN
+        return projected.reshape(-1,2).T
+    else:
+        return homographyProject(points, homography)
+    
+def newCameraProject(points, newCameraMatrix):
+    '''Projects points (2xN array) as if seen by camera
+    (or reverse by inverting the camera matrix)'''
+    if points.shape[0] != 2:
+        raise Exception('points of dimension {}'.format(points.shape))
+
+    if newCameraMatrix is not None:
+        augmentedPoints = append(points,[[1]*points.shape[1]], 0) # 3xN
+        projected = dot(newCameraMatrix, augmentedPoints)
+        return projected[:2,:]
+    else:
+        return points
+
+if opencvAvailable:
+    def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)):
+        '''Computes the translation of img2 with respect to img1
+        (loaded using OpenCV as numpy arrays)
+        img1Points are used to compute the translation
+
+        TODO add diagnostic if data is all over the place, and it most likely is not a translation (eg zoom, other non linear distortion)'''
+
+        nextPoints = array([])
+        (img2Points, status, track_error) = cv2.calcOpticalFlowPyrLK(img1, img2, img1Points, nextPoints, winSize=windowSize, maxLevel=level, criteria=criteria)
+        # calcOpticalFlowPyrLK(prevImg, nextImg, prevPts[, nextPts[, status[, err[, winSize[, maxLevel[, criteria[, derivLambda[, flags]]]]]]]]) -> nextPts, status, err
+        delta = []
+        for (k, (p1,p2)) in enumerate(zip(img1Points, img2Points)):
+            if status[k] == 1:
+                dp = p2-p1
+                d = npsum(dp**2)
+                if d < maxTranslation2:
+                    delta.append(dp)
+        if len(delta) >= minNMatches:
+            return median(delta, axis=0)
+        else:
+            print(dp)
+            return None
+
+if skimageAvailable:
+    from skimage.feature import hog
+    from skimage import color, transform
+    
+    def HOG(image, rescaleSize = (64, 64), orientations = 9, pixelsPerCell = (8,8), cellsPerBlock = (2,2), blockNorm = 'L1', visualize = False, transformSqrt = False):
+        bwImg = color.rgb2gray(image)
+        inputImg = transform.resize(bwImg, rescaleSize)
+        features = hog(inputImg, orientations, pixelsPerCell, cellsPerBlock, blockNorm, visualize, transformSqrt, True)
+        if visualize:
+            hogViz = features[1]
+            features = features[0]
+            figure()
+            subplot(1,2,1)
+            imshow(inputImg)
+            subplot(1,2,2)
+            imshow(hogViz)
+        return float32(features)
+
+    def createHOGTrainingSet(imageDirectory, classLabel, rescaleSize = (64,64), orientations = 9, pixelsPerCell = (8,8), blockNorm = 'L1', cellsPerBlock = (2, 2), visualize = False, transformSqrt = False):
+        inputData = []
+        for filename in listdir(imageDirectory):
+            img = imread(imageDirectory+filename)
+            features = HOG(img, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm, visualize, transformSqrt)
+            inputData.append(features)
+
+        nImages = len(inputData)
+        return array(inputData, dtype = float32), array([classLabel]*nImages)
+
+        
+#########################
+# running tests
+#########################
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/cvutils.txt')
+    #suite = doctest.DocTestSuite()
+    unittest.TextTestRunner().run(suite)
+    #doctest.testmod()
+    #doctest.testfile("example.txt")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/events.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,374 @@
+#! /usr/bin/env python
+'''Libraries for events
+Interactions, pedestrian crossing...'''
+
+from trafficintelligence import moving, prediction, indicators, utils, cvutils, ml
+from trafficintelligence.base import VideoFilenameAddable
+
+import numpy as np
+
+import multiprocessing
+import itertools, logging
+
+
+def findRoute(prototypes,objects,i,j,noiseEntryNums,noiseExitNums,minSimilarity= 0.3, spatialThreshold=1.0, delta=180):
+    if i[0] not in noiseEntryNums: 
+        prototypesRoutes= [ x for x in sorted(prototypes.keys()) if i[0]==x[0]]
+    elif i[1] not in noiseExitNums:
+        prototypesRoutes=[ x for x in sorted(prototypes.keys()) if i[1]==x[1]]
+    else:
+        prototypesRoutes=[x for x in sorted(prototypes.keys())]
+    routeSim={}
+    lcss = utils.LCSS(similarityFunc=lambda x,y: (distanceForLCSS(x,y) <= spatialThreshold),delta=delta)
+    for y in prototypesRoutes: 
+        if y in prototypes:
+            prototypesIDs=prototypes[y]
+            similarity=[]
+            for x in prototypesIDs:
+                s=lcss.computeNormalized(objects[j].positions, objects[x].positions)
+                similarity.append(s)
+            routeSim[y]=max(similarity)
+    route=max(routeSim, key=routeSim.get)
+    if routeSim[route]>=minSimilarity:
+        return route
+    else:
+        return i
+
+def getRoute(obj,prototypes,objects,noiseEntryNums,noiseExitNums,useDestination=True):
+    route=(obj.startRouteID,obj.endRouteID)
+    if useDestination:
+        if route not in prototypes:
+            route= findRoute(prototypes,objects,route,obj.getNum(),noiseEntryNums,noiseExitNums)
+    return route
+
+class Interaction(moving.STObject, VideoFilenameAddable):
+    '''Class for an interaction between two road users 
+    or a road user and an obstacle
+    
+    link to the moving objects
+    contains the indicators in a dictionary with the names as keys
+    '''
+
+    categories = {'headon': 0,
+                  'rearend': 1,
+                  'side': 2,
+                  'parallel': 3}
+
+    indicatorNames = ['Collision Course Dot Product',
+                      'Collision Course Angle',
+                      'Distance',
+                      'Minimum Distance',
+                      'Velocity Angle',
+                      'Speed Differential',
+                      'Collision Probability',
+                      'Time to Collision', # 7
+                      'Probability of Successful Evasive Action',
+                      'predicted Post Encroachment Time',
+                      'Post Encroachment Time']
+
+    indicatorNameToIndices = utils.inverseEnumeration(indicatorNames)
+
+    indicatorShortNames = ['CCDP',
+                           'CCA',
+                           'Dist',
+                           'MinDist',
+                           'VA',
+                           'SD',
+                           'PoC',
+                           'TTC',
+                           'P(SEA)',
+                           'pPET',
+                           'PET']
+
+    indicatorUnits = ['',
+                      'rad',
+                      'm',
+                      'm',
+                      'rad',
+                      'm/s',
+                      '',
+                      's',
+                      '',
+                      's',
+                      's']
+
+    timeIndicators = ['Time to Collision', 'predicted Post Encroachment Time']
+
+    def __init__(self, num = None, timeInterval = None, roaduserNum1 = None, roaduserNum2 = None, roadUser1 = None, roadUser2 = None):
+        moving.STObject.__init__(self, num, timeInterval)
+        if timeInterval is None and roadUser1 is not None and roadUser2 is not None:
+            self.timeInterval = roadUser1.commonTimeInterval(roadUser2)
+        self.roadUser1 = roadUser1
+        self.roadUser2 = roadUser2
+        if roaduserNum1 is not None and roaduserNum2 is not None:
+            self.roadUserNumbers = set([roaduserNum1, roaduserNum2])
+        elif roadUser1 is not None and roadUser2 is not None:
+            self.roadUserNumbers = set([roadUser1.getNum(), roadUser2.getNum()])
+        else:
+            self.roadUserNumbers = None
+        self.indicators = {}
+        self.interactionInterval = None
+         # list for collison points and crossing zones
+        self.collisionPoints = None
+        self.crossingZones = None
+
+    def getRoadUserNumbers(self):
+        return self.roadUserNumbers
+
+    def setRoadUsers(self, objects):
+        nums = sorted(list(self.getRoadUserNumbers()))
+        if nums[0]<len(objects) and objects[nums[0]].getNum() == nums[0]:
+            self.roadUser1 = objects[nums[0]]
+        if nums[1]<len(objects) and objects[nums[1]].getNum() == nums[1]:
+            self.roadUser2 = objects[nums[1]]
+
+        if self.roadUser1 is None or self.roadUser2 is None:
+            self.roadUser1 = None
+            self.roadUser2 = None
+            i = 0
+            while i < len(objects) and self.roadUser2 is None:
+                if objects[i].getNum() in nums:
+                    if self.roadUser1 is None:
+                        self.roadUser1 = objects[i]
+                    else:
+                        self.roadUser2 = objects[i]
+                i += 1
+
+    def getIndicator(self, indicatorName):
+        return self.indicators.get(indicatorName, None)
+
+    def addIndicator(self, indicator):
+        if indicator is not None:
+            self.indicators[indicator.name] = indicator
+
+    def getIndicatorValueAtInstant(self, indicatorName, instant):
+        indicator = self.getIndicator(indicatorName)
+        if indicator is not None:
+            return indicator[instant]
+        else:
+            return None
+
+    def getIndicatorValuesAtInstant(self, instant):
+        '''Returns list of indicator values at instant
+        as dict (with keys from indicators dict)'''
+        values = {}
+        for k, indicator in self.indicators.items():
+            values[k] = indicator[instant]
+        return values
+        
+    def plot(self, options = '', withOrigin = False, timeStep = 1, withFeatures = False, restricted = True, **kwargs):
+        if restricted:
+            self.roadUser1.getObjectInTimeInterval(self.timeInterval).plot(options, withOrigin, timeStep, withFeatures, **kwargs)
+            self.roadUser2.getObjectInTimeInterval(self.timeInterval).plot(options, withOrigin, timeStep, withFeatures, **kwargs)
+        else:
+            self.roadUser1.plot(options, withOrigin, timeStep, withFeatures, **kwargs)
+            self.roadUser2.plot(options, withOrigin, timeStep, withFeatures, **kwargs)
+
+    def plotOnWorldImage(self, nPixelsPerUnitDistance, options = '', withOrigin = False, timeStep = 1, **kwargs):
+        self.roadUser1.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, **kwargs)
+        self.roadUser2.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, **kwargs)
+
+    def play(self, videoFilename, homography = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., allUserInstants = False):
+        if self.roadUser1 is not None and self.roadUser2 is not None:
+            if allUserInstants:
+                firstFrameNum = min(self.roadUser1.getFirstInstant(), self.roadUser2.getFirstInstant())
+                lastFrameNum = max(self.roadUser1.getLastInstant(), self.roadUser2.getLastInstant())
+            else:
+                firstFrameNum = self.getFirstInstant()
+                lastFrameNum = self.getLastInstant()
+            cvutils.displayTrajectories(videoFilename, [self.roadUser1, self.roadUser2], homography = homography, firstFrameNum = firstFrameNum, lastFrameNumArg = lastFrameNum, undistort = undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
+        else:
+            print('Please set the interaction road user attributes roadUser1 and roadUser1 through the method setRoadUsers')
+
+    def computeIndicators(self):
+        '''Computes the collision course cosine only if the cosine is positive'''
+        collisionCourseDotProducts = {}
+        collisionCourseAngles = {}
+        velocityAngles = {}
+        distances = {}
+        speedDifferentials = {}
+        interactionInstants = []
+        for instant in self.timeInterval:
+            deltap = self.roadUser1.getPositionAtInstant(instant)-self.roadUser2.getPositionAtInstant(instant)
+            v1 = self.roadUser1.getVelocityAtInstant(instant)
+            v2 = self.roadUser2.getVelocityAtInstant(instant)
+            deltav = v2-v1
+            v1Norm = v1.norm2()
+            v2Norm = v2.norm2()
+            if v1Norm != 0. and v2Norm != 0.:
+                velocityAngles[instant] = np.arccos(moving.Point.dot(v1, v2)/(v1Norm*v2Norm))
+            collisionCourseDotProducts[instant] = moving.Point.dot(deltap, deltav)
+            distances[instant] = deltap.norm2()
+            speedDifferentials[instant] = deltav.norm2()
+            if collisionCourseDotProducts[instant] > 0:
+                interactionInstants.append(instant)
+            if distances[instant] != 0 and speedDifferentials[instant] != 0:
+                collisionCourseAngles[instant] = np.arccos(collisionCourseDotProducts[instant]/(distances[instant]*speedDifferentials[instant]))
+
+        if len(interactionInstants) >= 2:
+            self.interactionInterval = moving.TimeInterval(interactionInstants[0], interactionInstants[-1])
+        else:
+            self.interactionInterval = moving.TimeInterval()
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[0], collisionCourseDotProducts))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[1], collisionCourseAngles))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[2], distances, mostSevereIsMax = False))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[4], velocityAngles))
+        self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[5], speedDifferentials))
+
+        # if we have features, compute other indicators
+        if self.roadUser1.hasFeatures() and self.roadUser2.hasFeatures():
+            minDistances={}
+            for instant in self.timeInterval:
+                minDistances[instant] = moving.MovingObject.minDistance(self.roadUser1, self.roadUser2, instant)
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[3], minDistances, mostSevereIsMax = False))
+
+    def categorize(self, velocityAngleTolerance, parallelAngleTolerance, headonCollisionCourseAngleTolerance = None):
+        '''Computes the interaction category by instant
+        velocityAngleTolerance and parallelAngleTolerance in radian
+        velocityAngleTolerance: indicates the angle threshold for rear and head on (180-velocityAngleTolerance), as well as the maximum collision course angle for head on
+        velocityAngleTolerance: indicates the angle between velocity vector (average for parallel) and position vector'''
+        parallelAngleToleranceCosine = np.cos(parallelAngleTolerance)
+        if headonCollisionCourseAngleTolerance is None:
+            headonCollisionCourseAngleTolerance = velocityAngleTolerance
+            
+        self.categories = {}
+        collisionCourseDotProducts = self.getIndicator(Interaction.indicatorNames[0])
+        collisionCourseAngles = self.getIndicator(Interaction.indicatorNames[1])
+        distances = self.getIndicator(Interaction.indicatorNames[2])
+        velocityAngles = self.getIndicator(Interaction.indicatorNames[4])
+        for instant in self.timeInterval:
+            if velocityAngles[instant] < velocityAngleTolerance: # parallel or rear end
+                midVelocity = self.roadUser1.getVelocityAtInstant(instant) + self.roadUser2.getVelocityAtInstant(instant)
+                deltap = self.roadUser1.getPositionAtInstant(instant)-self.roadUser2.getPositionAtInstant(instant)
+                if abs(moving.Point.dot(midVelocity, deltap)/(midVelocity.norm2()*distances[instant])) < parallelAngleToleranceCosine:
+                    self.categories[instant] = Interaction.categories["parallel"]
+                else:
+                    self.categories[instant] = Interaction.categories["rearend"]
+            elif velocityAngles[instant] > np.pi - velocityAngleTolerance and collisionCourseAngles[instant] < headonCollisionCourseAngleTolerance: # head on
+                self.categories[instant] = Interaction.categories["headon"]
+            elif collisionCourseDotProducts[instant] > 0:
+                self.categories[instant] = Interaction.categories["side"]
+
+    def computeCrossingsCollisions(self, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None):
+        '''Computes all crossing and collision points at each common instant for two road users. '''
+        TTCs = {}
+        collisionProbabilities = {}
+        if timeInterval is not None:
+            commonTimeInterval = timeInterval
+        else:
+            commonTimeInterval = self.timeInterval
+        self.collisionPoints, crossingZones = predictionParameters.computeCrossingsCollisions(self.roadUser1, self.roadUser2, collisionDistanceThreshold, timeHorizon, computeCZ, debug, commonTimeInterval)
+        for i, cps in self.collisionPoints.items():
+            TTCs[i] = prediction.SafetyPoint.computeExpectedIndicator(cps)
+            collisionProbabilities[i] = sum([p.probability for p in cps])
+        if len(TTCs) > 0:
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[7], TTCs, mostSevereIsMax=False))
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[6], collisionProbabilities))
+        
+        # crossing zones and pPET
+        if computeCZ:
+            self.crossingZones = crossingZones
+            pPETs = {}
+            for i, cz in self.crossingZones.items():
+                pPETs[i] = prediction.SafetyPoint.computeExpectedIndicator(cz)
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[9], pPETs, mostSevereIsMax=False))
+        # TODO add probability of collision, and probability of successful evasive action
+
+    def computePET(self, collisionDistanceThreshold):
+        pet, t1, t2=  moving.MovingObject.computePET(self.roadUser1, self.roadUser2, collisionDistanceThreshold)
+        if pet is not None:
+            self.addIndicator(indicators.SeverityIndicator(Interaction.indicatorNames[10], {min(t1, t2): pet}, mostSevereIsMax = False))
+
+    def setCollision(self, collision):
+        '''indicates if it is a collision: argument should be boolean'''
+        self.collision = collision
+
+    def isCollision(self):
+        if hasattr(self, 'collision'):
+            return self.collision
+        else:
+            return None
+
+    def getCollisionPoints(self):
+        return self.collisionPoints
+
+    def getCrossingZones(self):
+        return self.crossingZones
+
+def createInteractions(objects, _others = None, maxDurationApart = 0):
+    '''Create all interactions of two co-existing road users'''
+    if _others is not None:
+        others = _others
+
+    interactions = []
+    num = 0
+    for i in range(len(objects)):
+        if _others is None:
+            others = objects[:i]
+        for j in range(len(others)):
+            commonTimeInterval = objects[i].commonTimeInterval(others[j])
+            if not commonTimeInterval.empty() or (maxDurationApart > 0 and objects[i].getTimeInterval().distance(objects[j].getTimeInterval()) < maxDurationApart):
+                interactions.append(Interaction(num, commonTimeInterval, objects[i].num, others[j].num, objects[i], others[j]))
+                num += 1
+    return interactions
+
+def findInteraction(interactions, roadUserNum1, roadUserNum2):
+    'Returns the right interaction in the set'
+    i=0
+    while i<len(interactions) and set([roadUserNum1, roadUserNum2]) != interactions[i].getRoadUserNumbers():
+        i+=1
+    if i<len(interactions):
+        return interactions[i]
+    else:
+        return None
+
+def computeIndicators(interactions, computeMotionPrediction, computePET, predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None):
+    for inter in interactions:
+        print('processing interaction {}'.format(inter.getNum())) # logging.debug('processing interaction {}'.format(inter.getNum()))
+        inter.computeIndicators()
+        if computeMotionPrediction:
+            inter.computeCrossingsCollisions(predictionParameters, collisionDistanceThreshold, timeHorizon, computeCZ, debug, timeInterval)
+        if computePET:
+            inter.computePET(collisionDistanceThreshold)
+    return interactions
+    
+def aggregateSafetyPoints(interactions, pointType = 'collision'):
+    '''Put all collision points or crossing zones in a list for display'''
+    allPoints = []
+    if pointType == 'collision':
+        for i in interactions:
+            for points in i.collisionPoints.values():
+                allPoints += points
+    elif pointType == 'crossing':
+        for i in interactions:
+            for points in i.crossingZones.values():
+                allPoints += points
+    else:
+        print('unknown type of point: '+pointType)
+    return allPoints
+
+def prototypeCluster(interactions, similarities, indicatorName, minSimilarity, similarityFunc = None, minClusterSize = None, randomInitialization = False):
+    return ml.prototypeCluster([inter.getIndicator(indicatorName) for inter in interactions], similarities, minSimilarity, similarityFunc, minClusterSize, randomInitialization)
+
+class Crossing(moving.STObject):
+    '''Class for the event of a street crossing
+
+    TODO: detecter passage sur la chaussee
+    identifier origines et destination (ou uniquement chaussee dans FOV)
+    carac traversee
+    detecter proximite veh (retirer si trop similaire simultanement
+    carac interaction'''
+    
+    def __init__(self, roaduserNum = None, num = None, timeInterval = None):
+        moving.STObject.__init__(self, num, timeInterval)
+        self.roaduserNum = roaduserNum
+
+    
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/events.txt')
+    #suite = doctest.DocTestSuite()
+    unittest.TextTestRunner().run(suite)
+    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/indicators.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,262 @@
+#! /usr/bin/env python
+'''Class for indicators, temporal indicators, and safety indicators'''
+
+from matplotlib.pyplot import plot, ylim
+from numpy import array, arange, mean, floor, mean
+from scipy import percentile
+
+from trafficintelligence import moving
+from trafficintelligence.utils import LCSS as utilsLCSS
+
+def multivariateName(indicatorNames):
+    return '_'.join(indicatorNames)
+
+# need for a class representing the indicators, their units, how to print them in graphs...
+class TemporalIndicator(object):
+    '''Class for temporal indicators
+    i.e. indicators that take a value at specific instants
+
+    values should be
+    * a dict, for the values at specific time instants
+    * or a list with a time interval object if continuous measurements
+
+    it should have more information like name, unit'''
+    
+    def __init__(self, name, values, timeInterval = None, maxValue = None):
+        self.name = name
+        if timeInterval is None:
+            self.values = values
+            instants = sorted(self.values.keys())
+            if len(instants) > 0:
+                self.timeInterval = moving.TimeInterval(instants[0], instants[-1])
+            else:
+                self.timeInterval = moving.TimeInterval()
+        else:
+            assert len(values) == timeInterval.length()
+            self.timeInterval = timeInterval
+            self.values = {}
+            for i in range(int(round(self.timeInterval.length()))):
+                self.values[self.timeInterval[i]] = values[i]
+        self.maxValue = maxValue
+
+    def __len__(self):
+        return len(self.values)
+
+    def empty(self):
+        return len(self.values) == 0
+
+    def __getitem__(self, t):
+        'Returns the value at time t'
+        return self.values.get(t)
+
+    def getIthValue(self, i):
+        sortedKeys = sorted(self.values.keys())
+        if 0<=i<len(sortedKeys):
+            return self.values[sortedKeys[i]]
+        else:
+            return None
+
+    def __iter__(self):
+        self.iterInstantNum = 0 # index in the interval or keys of the dict
+        return self
+
+    def __next__(self):
+        if self.iterInstantNum >= len(self.values):#(self.timeInterval and self.iterInstantNum>=self.timeInterval.length())\
+           #     or (self.iterInstantNum >= self.values)
+            raise StopIteration
+        else:
+            self.iterInstantNum += 1
+            return self.getIthValue(self.iterInstantNum-1)
+
+    def getTimeInterval(self):
+        return self.timeInterval
+
+    def getName(self):
+        return self.name
+
+    def getValues(self, withNone = True):
+        result = [self.__getitem__(t) for t in self.timeInterval]
+        if withNone:
+            return result
+        else:
+            return [x for x in result if x is not None]
+
+    def getInstants(self):
+        return list(self.values.keys())
+
+    def plot(self, options = '', xfactor = 1., yfactor = 1., timeShift = 0, **kwargs):
+        if self.getTimeInterval().length() == 1:
+            marker = 'o'
+        else:
+            marker = ''
+        time = sorted(self.values.keys())
+        plot([(x+timeShift)/xfactor for x in time], [self.values[i]/yfactor for i in time], options+marker, **kwargs)
+        if self.maxValue:
+            ylim(ymax = self.maxValue)
+
+    @classmethod
+    def createMultivariate(cls, indicators):
+        '''Creates a new temporal indicator where the value at each instant is a list 
+        of the indicator values at the instant, in the same order
+        the time interval will be the union of the time intervals of the indicators
+        name is concatenation of the indicator names'''
+        if len(indicators) < 2:
+            print('Error creating multivariate indicator with only {} indicator'.format(len(indicators)))
+            return None
+
+        timeInterval = moving.TimeInterval.unionIntervals([indic.getTimeInterval() for indic in indicators])
+        values = {}
+        for t in timeInterval:
+            tmpValues = [indic[t] for indic in indicators]
+            uniqueValues = set(tmpValues)
+            if len(uniqueValues) >= 2 or uniqueValues.pop() is not None:
+                values[t] = tmpValues
+        return cls(multivariateName([indic.name for indic in indicators]), values)
+
+# TODO static method avec class en parametre pour faire des indicateurs agrege, list par instant
+
+def l1Distance(x, y): # lambda x,y:abs(x-y)
+    if x is None or y is None:
+        return float('inf')
+    else:
+        return abs(x-y)
+
+def multiL1Matching(x, y, thresholds, proportionMatching=1.):
+    n = 0
+    nDimensions = len(x)
+    for i in range(nDimensions):
+        if l1Distance(x[i], y[i]) <= thresholds[i]:
+            n += 1
+    return n >= nDimensions*proportionMatching
+
+class LCSS(utilsLCSS):
+    '''Adapted LCSS class for indicators, same pattern'''
+    def __init__(self, similarityFunc, delta = float('inf'), minLength = 0, aligned = False, lengthFunc = min):
+        utilsLCSS.__init__(self, similarityFunc = similarityFunc, delta = delta, aligned = aligned, lengthFunc = lengthFunc)
+        self.minLength = minLength
+
+    def checkIndicator(self, indicator):
+        return indicator is not None and len(indicator) >= self.minLength
+
+    def compute(self, indicator1, indicator2, computeSubSequence = False):
+        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
+            return self._compute(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
+        else:
+            return 0
+
+    def computeNormalized(self, indicator1, indicator2, computeSubSequence = False):
+        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
+            return self._computeNormalized(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
+        else:
+            return 0.
+
+    def computeDistance(self, indicator1, indicator2, computeSubSequence = False):
+        if self.checkIndicator(indicator1) and self.checkIndicator(indicator2):
+            return self._computeDistance(indicator1.getValues(), indicator2.getValues(), computeSubSequence)
+        else:
+            return 1.
+        
+class SeverityIndicator(TemporalIndicator):
+    '''Class for severity indicators 
+    field mostSevereIsMax is True 
+    if the most severe value taken by the indicator is the maximum'''
+
+    def __init__(self, name, values, timeInterval=None, mostSevereIsMax=True, maxValue = None): 
+        TemporalIndicator.__init__(self, name, values, timeInterval, maxValue)
+        self.mostSevereIsMax = mostSevereIsMax
+
+    def getMostSevereValue(self, minNInstants=None, centile=None):
+        '''if there are more than minNInstants observations, 
+        returns either the average of these maximum values 
+        or if centile is not None the n% centile from the most severe value
+
+        eg for TTC, centile = 15 returns the 15th centile (value such that 15% of observations are lower)'''
+        values = list(self.values.values())
+        if centile is not None:
+            if self.mostSevereIsMax:
+                c = 100-centile
+            else:
+                c = centile
+            return percentile(values, c)
+        elif minNInstants is not None and minNInstants <= self.__len__():
+            values = sorted(values, reverse = self.mostSevereIsMax) # inverted if most severe is max -> take the first values
+            return mean(values[:minNInstants])
+        else:
+            return None
+
+    def getInstantOfMostSevereValue(self):
+        '''Returns the instant at which the indicator reaches its most severe value'''
+        if self.mostSevereIsMax:
+            return max(self.values, key=self.values.get)
+        else:
+            return min(self.values, key=self.values.get)
+
+# functions to aggregate discretized maps of indicators
+# TODO add values in the cells between the positions (similar to discretizing vector graphics to bitmap)
+
+def indicatorMap(indicatorValues, trajectory, squareSize):
+    '''Returns a dictionary 
+    with keys for the indices of the cells (squares)
+    in which the trajectory positions are located
+    at which the indicator values are attached
+
+    ex: speeds and trajectory'''
+
+    assert len(indicatorValues) == trajectory.length()
+    indicatorMap = {}
+    for k in range(trajectory.length()):
+        p = trajectory[k]
+        i = floor(p.x/squareSize)
+        j = floor(p.y/squareSize)
+        if (i,j) in indicatorMap:
+            indicatorMap[(i,j)].append(indicatorValues[k])
+        else:
+            indicatorMap[(i,j)] = [indicatorValues[k]]
+    for k in indicatorMap:
+        indicatorMap[k] = mean(indicatorMap[k])
+    return indicatorMap
+
+# def indicatorMapFromPolygon(value, polygon, squareSize):
+#     '''Fills an indicator map with the value within the polygon
+#     (array of Nx2 coordinates of the polygon vertices)'''
+#     points = []
+#     for x in arange(min(polygon[:,0])+squareSize/2, max(polygon[:,0]), squareSize):
+#         for y in arange(min(polygon[:,1])+squareSize/2, max(polygon[:,1]), squareSize):
+#             points.append([x,y])
+#     inside = nx.points_inside_poly(array(points), polygon)
+#     indicatorMap = {}
+#     for i in range(len(inside)):
+#         if inside[i]:
+#             indicatorMap[(floor(points[i][0]/squareSize), floor(points[i][1]/squareSize))] = 0
+#     return indicatorMap
+
+def indicatorMapFromAxis(value, limits, squareSize):
+    '''axis = [xmin, xmax, ymin, ymax] '''
+    indicatorMap = {}
+    for x in arange(limits[0], limits[1], squareSize):
+        for y in arange(limits[2], limits[3], squareSize):
+            indicatorMap[(floor(x/squareSize), floor(y/squareSize))] = value
+    return indicatorMap
+
+def combineIndicatorMaps(maps, squareSize, combinationFunction):
+    '''Puts many indicator maps together 
+    (averaging the values in each cell 
+    if more than one maps has a value)'''
+    indicatorMap = {}
+    for m in maps:
+        for k,v in m.items():
+            if k in indicatorMap:
+                indicatorMap[k].append(v)
+            else:
+                indicatorMap[k] = [v]
+    for k in indicatorMap:
+        indicatorMap[k] = combinationFunction(indicatorMap[k])
+    return indicatorMap
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/indicators.txt')
+    unittest.TextTestRunner().run(suite)
+#     #doctest.testmod()
+#     #doctest.testfile("example.txt")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/metadata.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,447 @@
+from datetime import datetime, timedelta
+from pathlib import Path
+from os import path, listdir, sep
+from math import floor
+
+from numpy import zeros, loadtxt, array
+
+from sqlalchemy import orm, create_engine, Column, Integer, Float, DateTime, String, ForeignKey, Boolean, Interval
+from sqlalchemy.orm import relationship, backref, sessionmaker
+from sqlalchemy.ext.declarative import declarative_base
+
+from trafficintelligence.utils import datetimeFormat, removeExtension, getExtension, TimeConverter
+from trafficintelligence.cvutils import computeUndistortMaps, videoFilenameExtensions, infoVideo
+from trafficintelligence.moving import TimeInterval, Trajectory
+
+"""
+Metadata to describe how video data and configuration files for video analysis are stored
+
+Typical example is 
+
+site1/view1/2012-06-01/video.avi
+           /2012-06-02/video.avi
+                       ...
+     /view2/2012-06-01/video.avi
+           /2012-06-02/video.avi
+     ...
+
+- where site1 is the path to the directory containing all information pertaining to the site, 
+relative to directory of the SQLite file storing the metadata
+represented by Site class
+(can contain for example the aerial or map image of the site, used for projection)
+
+- view1 is the directory for the first camera field of view (camera fixed position) at site site1
+represented by CameraView class
+(can contain for example the homography file, mask file and tracking configuration file)
+
+- YYYY-MM-DD is the directory containing all the video files for that day
+with camera view view1 at site site1
+
+
+"""
+
+Base = declarative_base()
+
+class Site(Base):
+    __tablename__ = 'sites'
+    idx = Column(Integer, primary_key=True)
+    name = Column(String) # path to directory containing all site information (in subdirectories), relative to the database position
+    description = Column(String) # longer names, eg intersection of road1 and road2
+    xcoordinate = Column(Float)  # ideally moving.Point, but needs to be 
+    ycoordinate = Column(Float)
+    mapImageFilename = Column(String) # path to map image file, relative to site name, ie sitename/mapImageFilename
+    nUnitsPerPixel = Column(Float) # number of units of distance per pixel in map image
+    worldDistanceUnit = Column(String, default = 'm') # make sure it is default in the database
+    
+    def __init__(self, name, description = "", xcoordinate = None, ycoordinate = None, mapImageFilename = None, nUnitsPerPixel = 1., worldDistanceUnit = 'm'):
+        self.name = name
+        self.description = description
+        self.xcoordinate = xcoordinate
+        self.ycoordinate = ycoordinate
+        self.mapImageFilename = mapImageFilename
+        self.nUnitsPerPixel = nUnitsPerPixel
+        self.worldDistanceUnit = worldDistanceUnit
+
+    def getPath(self):
+        return self.name
+
+    def getMapImageFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.getPath(), self.mapImageFilename)
+        else:
+            return self.mapImageFilename
+
+
+class EnvironementalFactors(Base):
+    '''Represents any environmental factors that may affect the results, in particular
+    * changing weather conditions
+    * changing road configuration, geometry, signalization, etc.
+    ex: sunny, rainy, before counter-measure, after counter-measure'''
+    __tablename__ = 'environmental_factors'
+    idx = Column(Integer, primary_key=True)
+    startTime = Column(DateTime)
+    endTime = Column(DateTime)
+    description = Column(String) # eg sunny, before, after
+    siteIdx = Column(Integer, ForeignKey('sites.idx'))
+
+    site = relationship("Site", backref = backref('environmentalFactors'))
+
+    def __init__(self, startTime, endTime, description, site):
+        'startTime is passed as string in utils.datetimeFormat, eg 2011-06-22 10:00:39'
+        self.startTime = datetime.strptime(startTime, datetimeFormat)
+        self.endTime = datetime.strptime(endTime, datetimeFormat)
+        self.description = description
+        self.site = site
+
+class CameraType(Base):
+    ''' Represents parameters of the specific camera used. 
+
+    Taken and adapted from tvalib'''
+    __tablename__ = 'camera_types'
+    idx = Column(Integer, primary_key=True)
+    name = Column(String)
+    resX = Column(Integer)
+    resY = Column(Integer)
+    frameRate = Column(Float)
+    frameRateTimeUnit = Column(String, default = 's')
+    intrinsicCameraMatrixStr = Column(String)
+    distortionCoefficientsStr = Column(String)
+    
+    def __init__(self, name, resX, resY, frameRate, frameRateTimeUnit = 's', trackingConfigurationFilename = None, intrinsicCameraFilename = None, intrinsicCameraMatrix = None, distortionCoefficients = None):
+        self.name = name
+        self.resX = resX
+        self.resY = resY
+        self.frameRate = frameRate
+        self.frameRateTimeUnit = frameRateTimeUnit
+        self.intrinsicCameraMatrix = None # should be np.array
+        self.distortionCoefficients = None # list
+        
+        if trackingConfigurationFilename is not None:
+            from storage import ProcessParameters
+            params = ProcessParameters(trackingConfigurationFilename)
+            self.intrinsicCameraMatrix = params.intrinsicCameraMatrix
+            self.distortionCoefficients = params.distortionCoefficients
+        elif intrinsicCameraFilename is not None:
+            self.intrinsicCameraMatrix = loadtxt(intrinsicCameraFilename)
+            self.distortionCoefficients = distortionCoefficients
+        else:
+            self.intrinsicCameraMatrix = intrinsicCameraMatrix
+            self.distortionCoefficients = distortionCoefficients
+            
+        if self.intrinsicCameraMatrix is not None:
+            self.intrinsicCameraMatrixStr = str(self.intrinsicCameraMatrix.tolist())
+        if self.distortionCoefficients is not None and len(self.distortionCoefficients) == 5:
+            self.distortionCoefficientsStr = str(self.distortionCoefficients)
+
+    @orm.reconstructor
+    def initOnLoad(self):
+        if self.intrinsicCameraMatrixStr is not None:
+            from ast import literal_eval
+            self.intrinsicCameraMatrix = array(literal_eval(self.intrinsicCameraMatrixStr))
+        else:
+            self.intrinsicCameraMatrix = None
+        if self.distortionCoefficientsStr is not None:
+            self.distortionCoefficients = literal_eval(self.distortionCoefficientsStr)
+        else:
+            self.distortionCoefficients = None
+
+    def computeUndistortMaps(self, undistortedImageMultiplication = None):
+        if undistortedImageMultiplication is not None and self.intrinsicCameraMatrix is not None and self.distortionCoefficients is not None:
+            [self.map1, self.map2], newCameraMatrix = computeUndistortMaps(self.resX, self.resY, undistortedImageMultiplication, self.intrinsicCameraMatrix, self.distortionCoefficients)
+        else:
+            self.map1 = None
+            self.map2 = None
+
+    @staticmethod
+    def getCameraType(session, cameraTypeId, resX = None):
+        'Returns the site(s) matching the index or the name'
+        if str.isdigit(cameraTypeId):
+            return session.query(CameraType).filter(CameraType.idx == int(cameraTypeId)).all()
+        else:
+            if resX is not None:
+                return session.query(CameraType).filter(CameraType.name.like('%'+cameraTypeId+'%')).filter(CameraType.resX == resX).all()
+            else:
+                return session.query(CameraType).filter(CameraType.name.like('%'+cameraTypeId+'%')).all()
+
+# class SiteDescription(Base): # list of lines and polygons describing the site, eg for sidewalks, center lines
+            
+class CameraView(Base):
+    __tablename__ = 'camera_views'
+    idx = Column(Integer, primary_key=True)
+    description = Column(String)
+    homographyFilename = Column(String) # path to homograph file, relative to the site name
+    siteIdx = Column(Integer, ForeignKey('sites.idx'))
+    cameraTypeIdx = Column(Integer, ForeignKey('camera_types.idx'))
+    trackingConfigurationFilename = Column(String) # path to configuration .cfg file, relative to site name
+    maskFilename = Column(String) # path to mask file, relative to site name
+    virtual = Column(Boolean) # indicates it is not a real camera view, eg merged
+    
+    site = relationship("Site", backref = backref('cameraViews'))
+    cameraType = relationship('CameraType', backref = backref('cameraViews'))
+
+    def __init__(self, description, homographyFilename, site, cameraType, trackingConfigurationFilename, maskFilename, virtual = False):
+        self.description = description
+        self.homographyFilename = homographyFilename
+        self.site = site
+        self.cameraType = cameraType
+        self.trackingConfigurationFilename = trackingConfigurationFilename
+        self.maskFilename = maskFilename
+        self.virtual = virtual
+
+    def getHomographyFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.site.getPath(), self.homographyFilename)
+        else:
+            return self.homographyFilename
+
+    def getTrackingConfigurationFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.site.getPath(), self.trackingConfigurationFilename)
+        else:
+            return self.trackingConfigurationFilename
+
+    def getMaskFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.site.getPath(), self.maskFilename)
+        else:
+            return self.maskFilename
+
+    def getTrackingParameters(self):
+        return ProcessParameters(self.getTrackingConfigurationFilename())
+
+    def getHomographyDistanceUnit(self):
+        return self.site.worldDistanceUnit
+    
+class Alignment(Base):
+    __tablename__ = 'alignments'
+    idx = Column(Integer, primary_key=True)
+    siteIdx = Column(Integer, ForeignKey('sites.idx'))
+    
+    site = relationship("Site", backref = backref('alignments'))
+
+    def __init__(self, site):
+        self.site = site
+
+    def getTrajectory(self):
+        t = Trajectory()
+        for p in self.points:
+            t.addPositionXY(p.x_coordinate, p.y_coordinate)
+        return t
+
+class Point(Base):
+    __tablename__ = 'positions'
+    trajectory_id = Column(Integer, ForeignKey('alignments.idx'), primary_key=True)
+    frame_number = Column(Integer, primary_key=True) # order of points in this alignment, as index
+    x_coordinate = Column(Float)
+    y_coordinate = Column(Float)
+
+    alignment = relationship("Alignment", backref = backref('points', order_by = trajectory_id))
+    
+    def __init__(self, alignment, index, x, y):
+        self.alignment = alignment
+        self.frame_number = index
+        self.x_coordinate = x
+        self.y_coordinate = y
+
+class VideoSequence(Base):
+    __tablename__ = 'video_sequences'
+    idx = Column(Integer, primary_key=True)
+    name = Column(String) # path to the video file relative to the the site name
+    startTime = Column(DateTime)
+    duration = Column(Interval) # video sequence duration
+    databaseFilename = Column(String) # path to the database file relative to the the site name
+    virtual = Column(Boolean) # indicates it is not a real video sequence (no video file), eg merged
+    cameraViewIdx = Column(Integer, ForeignKey('camera_views.idx'))
+
+    cameraView = relationship("CameraView", backref = backref('videoSequences', order_by = idx))
+
+    def __init__(self, name, startTime, duration, cameraView, databaseFilename = None, virtual = False):
+        '''startTime is passed as string in utils.datetimeFormat, eg 2011-06-22 10:00:39
+        duration is a timedelta object'''
+        self.name = name
+        if isinstance(startTime, str):
+            self.startTime = datetime.strptime(startTime, datetimeFormat)
+        else:
+            self.startTime = startTime
+        self.duration = duration
+        self.cameraView = cameraView
+        if databaseFilename is None and len(self.name) > 0:
+            self.databaseFilename = removeExtension(self.name)+'.sqlite'
+        else:
+            self.databaseFilename = databaseFilename
+        self.virtual = virtual
+
+    def getVideoSequenceFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.cameraView.site.getPath(), self.name)
+        else:
+            return self.name
+
+    def getDatabaseFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.cameraView.site.getPath(), self.databaseFilename)
+        else:
+            return self.databaseFilename
+
+    def getTimeInterval(self):
+        return TimeInterval(self.startTime, self.startTime+self.duration)
+        
+    def containsInstant(self, instant):
+        'instant is a datetime'
+        return self.startTime <= instant and self.startTime+self.duration
+
+    def intersection(self, startTime, endTime):
+        'returns the moving.TimeInterval intersection with [startTime, endTime]'
+        return TimeInterval.intersection(self.getTimeInterval(), TimeInterval(startTime, endTime)) 
+        
+    def getFrameNum(self, instant):
+        'Warning, there is no check of correct time units'
+        if self.containsInstant(instant):
+            return int(floor((instant-self.startTime).seconds*self.cameraView.cameraType.frameRate))
+        else:
+            return None
+
+class TrackingAnnotation(Base):
+    __tablename__ = 'tracking_annotations'
+    idx = Column(Integer, primary_key=True)
+    description = Column(String) # description
+    groundTruthFilename = Column(String)
+    firstFrameNum = Column(Integer) # first frame num of annotated data (could be computed on less data)
+    lastFrameNum = Column(Integer)
+    videoSequenceIdx = Column(Integer, ForeignKey('video_sequences.idx'))
+    maskFilename = Column(String) # path to mask file (can be different from camera view, for annotations), relative to site name
+    undistorted = Column(Boolean) # indicates whether the annotations were done in undistorted video space
+
+    videoSequence = relationship("VideoSequence", backref = backref('trackingAnnotations'))
+    
+    def __init__(self, description, groundTruthFilename, firstFrameNum, lastFrameNum, videoSequence, maskFilename, undistorted = True):
+        self.description = description
+        self.groundTruthFilename = groundTruthFilename
+        self.firstFrameNum = firstFrameNum
+        self.lastFrameNum = lastFrameNum
+        self.videoSequence = videoSequence
+        self.undistorted = undistorted
+        self.maskFilename = maskFilename
+
+    def getGroundTruthFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.videoSequence.cameraView.site.getPath(), self.groundTruthFilename)
+        else:
+            return self.groundTruthFilename
+
+    def getMaskFilename(self, relativeToSiteFilename = True):
+        if relativeToSiteFilename:
+            return path.join(self.videoSequence.cameraView.site.getPath(), self.maskFilename)
+        else:
+            return self.maskFilename
+
+    def getTimeInterval(self):
+        return TimeInterval(self.firstFrameNum, self.lastFrameNum)
+        
+# add class for Analysis: foreign key VideoSequenceId, dataFilename, configFilename (get the one from camera view by default), mask? (no, can be referenced in the tracking cfg file)
+
+# class Analysis(Base): # parameters necessary for processing the data: free form
+# eg bounding box depends on camera view, tracking configuration depends on camera view 
+# results: sqlite
+
+def createDatabase(filename):
+    'creates a session to query the filename'
+    if Path(filename).is_file():
+        print('The file '+filename+' exists')
+        return None
+    else:
+        engine = create_engine('sqlite:///'+filename)
+        Base.metadata.create_all(engine)
+        Session = sessionmaker(bind=engine)
+        return Session()
+
+def connectDatabase(filename):
+    'creates a session to query the filename'
+    if Path(filename).is_file():
+        engine = create_engine('sqlite:///'+filename)
+        Session = sessionmaker(bind=engine)
+        return Session()
+    else:
+        print('The file '+filename+' does not exist')
+        return None
+
+def getSite(session, siteId = None, name = None, description = None):
+    'Returns the site(s) matching the index or the name'
+    if siteId is not None:
+        return session.query(Site).filter(Site.idx == int(siteId)).all()
+    elif name is not None:
+        return session.query(Site).filter(Site.name.like('%'+name+'%')).all()
+    elif description is not None:
+        return session.query(Site).filter(Site.description.like('%'+description+'%')).all()
+    else:
+        print('No siteId, name or description have been provided to the function')
+        return []
+
+def getCameraView(session, viewId):
+    'Returns the site(s) matching the index'
+    return session.query(CameraView).filter(CameraView.idx == int(viewId)).first()
+
+def getSiteVideoSequences(site):
+    return [vs for cv in site.cameraViews for vs in cv.videoSequences]
+
+def initializeSites(session, directoryName, nViewsPerSite = 1):
+    '''Initializes default site objects and n camera views per site
+    
+    eg somedirectory/montreal/ contains intersection1, intersection2, etc.
+    The site names would be somedirectory/montreal/intersection1, somedirectory/montreal/intersection2, etc.
+    The views should be directories in somedirectory/montreal/intersection1'''
+    sites = []
+    cameraViews = []
+    names = sorted(listdir(directoryName))
+    for name in names:
+        if path.isdir(directoryName+sep+name):
+            sites.append(Site(directoryName+sep+name, None))
+            for cameraViewIdx in range(1, nViewsPerSite+1):
+                cameraViews.append(CameraView('view{}'.format(cameraViewIdx), None, sites[-1], None, None, None))
+    session.add_all(sites)
+    session.add_all(cameraViews)
+    session.commit()
+
+def initializeVideos(session, cameraView, directoryName, startTime = None, datetimeFormat = None):
+    '''Initializes videos with time or tries to guess it from filename
+    directoryName should contain the videos to find and be the relative path from the site location'''
+    names = sorted(listdir(directoryName))
+    videoSequences = []
+    if datetimeFormat is not None:
+        timeConverter = TimeConverter(datetimeFormat)
+    for name in names:
+        prefix = removeExtension(name)
+        extension = getExtension(name)
+        if extension in videoFilenameExtensions:
+            if datetimeFormat is not None:
+                from argparse import ArgumentTypeError
+                try:
+                    t1 = timeConverter.convert(name[:name.rfind('_')])
+                    print('DB time {} / Time from filename {}'.format(startTime, t1))
+                except ArgumentTypeError as e:
+                    print('File format error for time {} (prefix {})'.format(name, prefix))
+            vidinfo = infoVideo(directoryName+sep+name)
+            duration = vidinfo['number of frames']#timedelta(minutes = 27, seconds = 33)
+            fps = vidinfo['fps']
+            duration = timedelta(seconds=duration/fps)
+            videoSequences.append(VideoSequence(directoryName+sep+name, startTime, duration, cameraView, directoryName+sep+prefix+'.sqlite'))
+            startTime += duration
+    session.add_all(videoSequences)
+    session.commit()
+
+def generateTimeIntervals(videoSequences, maxTimeGap):
+    ''
+
+def addAlignment(session, site, t):
+    'Adds trajectory (moving.Trajectory) t to metadata of site'
+    al = Alignment(site)
+    session.add(al)
+    session.commit()
+    points = []
+    for i,p in enumerate(t):
+        points.append(Point(al, i, p.x, p.y))
+    session.add_all(points)
+    session.commit()
+    
+# management
+# TODO need to be able to copy everything from a site from one sqlite to another, and delete everything attached to a site
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/ml.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,313 @@
+#! /usr/bin/env python
+'''Libraries for machine learning algorithms'''
+
+from os import path
+from random import shuffle
+from copy import copy, deepcopy
+
+import numpy as np
+from matplotlib.pylab import text
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+from scipy.cluster.vq import kmeans, whiten, vq
+from sklearn import mixture
+try:
+    import cv2
+    opencvAvailable = True
+except ImportError:
+    print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
+    opencvAvailable = False
+
+from trafficintelligence import utils
+
+#####################
+# OpenCV ML models
+#####################
+
+def computeConfusionMatrix(model, samples, responses):
+    '''computes the confusion matrix of the classifier (model)
+
+    samples should be n samples by m variables'''
+    classifications = {}
+    predictions = model.predict(samples)
+    for predicted, y in zip(predictions, responses):
+        classifications[(y, predicted)] = classifications.get((y, predicted), 0)+1
+    return classifications
+
+if opencvAvailable:
+    class SVM(object):
+        '''wrapper for OpenCV SimpleVectorMachine algorithm'''
+        def __init__(self, svmType = cv2.ml.SVM_C_SVC, kernelType = cv2.ml.SVM_RBF, degree = 0, gamma = 1, coef0 = 0, Cvalue = 1, nu = 0, p = 0):
+            self.model = cv2.ml.SVM_create()
+            self.model.setType(svmType)
+            self.model.setKernel(kernelType)
+            self.model.setDegree(degree)
+            self.model.setGamma(gamma)
+            self.model.setCoef0(coef0)
+            self.model.setC(Cvalue)
+            self.model.setNu(nu)
+            self.model.setP(p)
+
+        def save(self, filename):
+            self.model.save(filename)
+            
+        def train(self, samples, layout, responses, computePerformance = False):
+            self.model.train(samples, layout, responses)
+            if computePerformance:
+                return computeConfusionMatrix(self, samples, responses)
+
+        def predict(self, hog):
+            retval, predictions = self.model.predict(hog)
+            if hog.shape[0] == 1:
+                return predictions[0][0]
+            else:
+                return np.asarray(predictions, dtype = np.int).ravel().tolist()
+
+    def SVM_load(filename):
+        if path.exists(filename):
+            svm = SVM()
+            svm.model = cv2.ml.SVM_load(filename)
+            return svm
+        else:
+            print('Provided filename {} does not exist: model not loaded!'.format(filename))
+        
+#####################
+# Clustering
+#####################
+
+class Centroid(object):
+    'Wrapper around instances to add a counter'
+
+    def __init__(self, instance, nInstances = 1):
+        self.instance = instance
+        self.nInstances = nInstances
+
+    # def similar(instance2):
+    #     return self.instance.similar(instance2)
+
+    def add(self, instance2):
+        self.instance = self.instance.multiply(self.nInstances)+instance2
+        self.nInstances += 1
+        self.instance = self.instance.multiply(1/float(self.nInstances))
+
+    def average(c):
+        inst = self.instance.multiply(self.nInstances)+c.instance.multiply(instance.nInstances)
+        inst.multiply(1/(self.nInstances+instance.nInstances))
+        return Centroid(inst, self.nInstances+instance.nInstances)
+
+    def plot(self, options = ''):
+        self.instance.plot(options)
+        text(self.instance.position.x+1, self.instance.position.y+1, str(self.nInstances))
+
+def kMedoids(similarityMatrix, initialCentroids = None, k = None):
+    '''Algorithm that clusters any dataset based on a similarity matrix
+    Either the initialCentroids or k are passed'''
+    pass
+
+def assignCluster(data, similarFunc, initialCentroids = None, shuffleData = True):
+    '''k-means algorithm with similarity function
+    Two instances should be in the same cluster if the sameCluster function returns true for two instances. It is supposed that the average centroid of a set of instances can be computed, using the function. 
+    The number of clusters will be determined accordingly
+
+    data: list of instances
+    averageCentroid: '''
+    localdata = copy(data) # shallow copy to avoid modifying data
+    if shuffleData:
+        shuffle(localdata)
+    if initialCentroids is None:
+        centroids = [Centroid(localdata[0])]
+    else:
+        centroids = deepcopy(initialCentroids)
+    for instance in localdata[1:]:
+        i = 0
+        while i<len(centroids) and not similarFunc(centroids[i].instance, instance):
+            i += 1
+        if i == len(centroids):
+            centroids.append(Centroid(instance))
+        else:
+            centroids[i].add(instance)
+
+    return centroids
+
+# TODO recompute centroids for each cluster: instance that minimizes some measure to all other elements
+
+def spectralClustering(similarityMatrix, k, iter=20):
+    '''Spectral Clustering algorithm'''
+    n = len(similarityMatrix)
+    # create Laplacian matrix
+    rowsum = np.sum(similarityMatrix,axis=0)
+    D = np.diag(1 / np.sqrt(rowsum))
+    I = np.identity(n)
+    L = I - np.dot(D,np.dot(similarityMatrix,D))
+    # compute eigenvectors of L
+    U,sigma,V = np.linalg.svd(L)
+    # create feature vector from k first eigenvectors
+    # by stacking eigenvectors as columns
+    features = np.array(V[:k]).T
+    # k-means
+    features = whiten(features)
+    centroids,distortion = kmeans(features,k, iter)
+    code,distance = vq(features,centroids) # code starting from 0 (represent first cluster) to k-1 (last cluster)
+    return code,sigma
+
+def assignToPrototypeClusters(instances, initialPrototypeIndices, similarities, minSimilarity, similarityFunc, minClusterSize = 0):
+    '''Assigns instances to prototypes 
+    if minClusterSize is not 0, the clusters will be refined by removing iteratively the smallest clusters
+    and reassigning all elements in the cluster until no cluster is smaller than minClusterSize
+
+    labels are indices in the prototypeIndices'''
+    prototypeIndices = copy(initialPrototypeIndices)
+    indices = [i for i in range(len(instances)) if i not in prototypeIndices]
+    labels = [-1]*len(instances)
+    assign = True
+    while assign:
+        for i in prototypeIndices:
+            labels[i] = i
+        for i in indices:
+            for j in prototypeIndices:
+                if similarities[i][j] < 0:
+                    similarities[i][j] = similarityFunc(instances[i], instances[j])
+                    similarities[j][i] = similarities[i][j]
+            label = similarities[i][prototypeIndices].argmax()
+            if similarities[i][prototypeIndices[label]] >= minSimilarity:
+                labels[i] = prototypeIndices[label]
+            else:
+                labels[i] = -1 # outlier
+        clusterSizes = {i: sum(np.array(labels) == i) for i in prototypeIndices}
+        smallestClusterIndex = min(clusterSizes, key = clusterSizes.get)
+        assign = (clusterSizes[smallestClusterIndex] < minClusterSize)
+        if assign:
+            prototypeIndices.remove(smallestClusterIndex)
+            indices = [i for i in range(similarities.shape[0]) if labels[i] == smallestClusterIndex]
+    return prototypeIndices, labels
+
+def prototypeCluster(instances, similarities, minSimilarity, similarityFunc, optimizeCentroid = False, randomInitialization = False, initialPrototypeIndices = None):
+    '''Finds exemplar (prototype) instance that represent each cluster
+    Returns the prototype indices (in the instances list)
+
+    the elements in the instances list must have a length (method __len__), or one can use the optimizeCentroid
+    the positions in the instances list corresponds to the similarities
+    if similarityFunc is provided, the similarities are calculated as needed (this is faster) if not in similarities (negative if not computed)
+    similarities must still be allocated with the right size
+
+    if an instance is different enough (<minSimilarity), 
+    it will become a new prototype. 
+    Non-prototype instances will be assigned to an existing prototype
+
+    if optimizeCentroid is True, each time an element is added, we recompute the centroid trajectory as the most similar to all in the cluster
+
+    initialPrototypeIndices are indices in instances
+
+    TODO: check how similarity evolves in clusters'''
+    if len(instances) == 0:
+        print('no instances to cluster (empty list)')
+        return None
+
+    # sort instances based on length
+    indices = list(range(len(instances)))
+    if randomInitialization or optimizeCentroid:
+        indices = np.random.permutation(indices).tolist()
+    else:
+        indices.sort(key=lambda i: len(instances[i]))
+    # initialize clusters
+    clusters = []
+    if initialPrototypeIndices is None:
+        prototypeIndices = [indices[0]]
+    else:
+        prototypeIndices = initialPrototypeIndices # think of the format: if indices, have to be in instances
+    for i in prototypeIndices:
+        clusters.append([i])
+        indices.remove(i)
+    # go through all instances
+    for i in indices:
+        for j in prototypeIndices:
+            if similarities[i][j] < 0:
+                similarities[i][j] = similarityFunc(instances[i], instances[j])
+                similarities[j][i] = similarities[i][j]
+        label = similarities[i][prototypeIndices].argmax() # index in prototypeIndices
+        if similarities[i][prototypeIndices[label]] < minSimilarity:
+            prototypeIndices.append(i)
+            clusters.append([])
+        else:
+            clusters[label].append(i)
+            if optimizeCentroid:
+                if len(clusters[label]) >= 2: # no point if only one element in cluster
+                    for j in clusters[label][:-1]:
+                        if similarities[i][j] < 0:
+                            similarities[i][j] = similarityFunc(instances[i], instances[j])
+                            similarities[j][i] = similarities[i][j]
+                    clusterIndices = clusters[label]
+                    clusterSimilarities = similarities[clusterIndices][:,clusterIndices]
+                    newCentroidIdx = clusterIndices[clusterSimilarities.sum(0).argmax()]
+                    if prototypeIndices[label] != newCentroidIdx:
+                        prototypeIndices[label] = newCentroidIdx
+            elif len(instances[prototypeIndices[label]]) < len(instances[i]): # replace prototype by current instance i if longer # otherwise, possible to test if randomInitialization or initialPrototypes is not None
+                prototypeIndices[label] = i
+    return prototypeIndices
+
+def computeClusterSizes(labels, prototypeIndices, outlierIndex = -1):
+    clusterSizes = {i: sum(np.array(labels) == i) for i in prototypeIndices}
+    clusterSizes['outlier'] = sum(np.array(labels) == outlierIndex)
+    return clusterSizes
+
+def computeClusterStatistics(labels, prototypeIndices, instances, similarities, similarityFunc, clusters = None, outlierIndex = -1):
+    if clusters is None:
+        clusters = {protoId:[] for protoId in prototypeIndices+[-1]}
+        for i,l in enumerate(labels):
+            clusters[l].append(i)
+        clusters = [clusters[protoId] for protoId in prototypeIndices]
+    for i, cluster in enumerate(clusters):
+        n = len(cluster)
+        print('cluster {}: {} elements'.format(prototypeIndices[i], n))
+        if n >=2:
+            for j,k in enumerate(cluster):
+                for l in cluster[:j]:
+                    if similarities[k][l] < 0:
+                        similarities[k][l] = similarityFunc(instances[k], instances[l])
+                        similarities[l][k] = similarities[k][l]
+            print('Mean similarity to prototype: {}'.format((similarities[prototypeIndices[i]][cluster].sum()+1)/(n-1)))
+            print('Mean overall similarity: {}'.format((similarities[cluster][:,cluster].sum()+n)/(n*(n-1))))
+
+# Gaussian Mixture Models
+def plotGMM(mean, covariance, gmmId, fig, color, alpha = 0.3):
+    v, w = np.linalg.eigh(covariance)
+    angle = 180*np.arctan2(w[0][1], w[0][0])/np.pi
+    v *= 4
+    ell = mpl.patches.Ellipse(mean, v[0], v[1], 180+angle, color=color)
+    ell.set_clip_box(fig.bbox)
+    ell.set_alpha(alpha)
+    fig.axes[0].add_artist(ell)
+    plt.plot([mean[0]], [mean[1]], 'x'+color)
+    plt.annotate(str(gmmId), xy=(mean[0]+1, mean[1]+1))
+
+def plotGMMClusters(model, labels = None, dataset = None, fig = None, colors = utils.colors, nUnitsPerPixel = 1., alpha = 0.3):
+    '''plot the ellipse corresponding to the Gaussians
+    and the predicted classes of the instances in the dataset'''
+    if fig is None:
+        fig = plt.figure()
+    if len(fig.get_axes()) == 0:
+        fig.add_subplot(111)
+    for i in range(model.n_components):
+        mean = model.means_[i]/nUnitsPerPixel
+        covariance = model.covariances_[i]/nUnitsPerPixel
+        # plot points
+        if dataset is not None:
+            tmpDataset = dataset/nUnitsPerPixel
+            plt.scatter(tmpDataset[labels == i, 0], tmpDataset[labels == i, 1], .8, color=colors[i])
+        # plot an ellipse to show the Gaussian component
+        plotGMM(mean, covariance, i, fig, colors[i], alpha)
+    if dataset is None: # to address issues without points, the axes limits are not redrawn
+        minima = model.means_.min(0)
+        maxima = model.means_.max(0)
+        xwidth = 0.5*(maxima[0]-minima[0])
+        ywidth = 0.5*(maxima[1]-minima[1])
+        plt.xlim(minima[0]-xwidth,maxima[0]+xwidth)
+        plt.ylim(minima[1]-ywidth,maxima[1]+ywidth)
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/ml.txt')
+    unittest.TextTestRunner().run(suite)
+#     #doctest.testmod()
+#     #doctest.testfile("example.txt")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/moving.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,2110 @@
+#! /usr/bin/env python
+'''Libraries for moving objects, trajectories...'''
+
+import copy
+from math import sqrt, atan2, cos, sin
+
+from numpy import median, mean, array, arange, zeros, ones, hypot, NaN, std, floor, ceil, float32, argwhere, minimum
+from matplotlib.pyplot import plot, text
+from scipy.stats import scoreatpercentile
+from scipy.spatial.distance import cdist
+from scipy.signal import savgol_filter
+
+try:
+    from shapely.geometry import Polygon, Point as shapelyPoint
+    from shapely.prepared import prep, PreparedGeometry
+    shapelyAvailable = True
+except ImportError:
+    print('Shapely library could not be loaded')
+    shapelyAvailable = False
+
+from trafficintelligence import utils, cvutils
+from trafficintelligence.base import VideoFilenameAddable
+
+
+class Interval(object):
+    '''Generic interval: a subset of real numbers (not iterable)'''
+    def __init__(self, first=0, last=-1, revert = False):
+        if revert and last<first:
+            self.first=last
+            self.last=first
+        else:
+            self.first=first
+            self.last=last
+
+    def __str__(self):
+        return '{0}-{1}'.format(self.first, self.last)
+
+    def __repr__(self):
+        return self.__str__()
+
+    def __eq__(self, other):
+        return ((self.first == other.first) and (self.last == other.last)) or ((self.first == other.last) and (self.last == other.first))
+
+    def empty(self):
+        return self.first > self.last
+
+    def center(self):
+        return (self.first+self.last)/2.
+
+    def length(self):
+        '''Returns the length of the interval'''
+        return float(max(0,self.last-self.first))
+
+    def equal(self, i2):
+        return self.first==i2.first and self.last == i2.last
+
+    def getList(self):
+        return [self.first, self.last]
+
+    def contains(self, instant):
+        return (self.first<=instant and self.last>=instant)
+
+    def inside(self, interval2):
+        '''Indicates if the temporal interval of self is comprised in interval2'''
+        return (self.first >= interval2.first) and (self.last <= interval2.last)
+
+    def shift(self, offset):
+        self.first += offset
+        self.last += offset
+
+    @classmethod
+    def parse(cls, s):
+        if '-' in s:
+            tmp = s.split('-')
+            if len(tmp) == 2:
+                return cls(int(tmp[0]), int(tmp[1])) # TODO with floats?
+        print(s+' is not a valid representation of an interval')
+        return None
+    
+    @classmethod
+    def union(cls, interval1, interval2):
+        '''Smallest interval comprising self and interval2'''
+        return cls(min(interval1.first, interval2.first), max(interval1.last, interval2.last))
+        
+    @classmethod
+    def intersection(cls, interval1, interval2):
+        '''Largest interval comprised in both self and interval2'''
+        return cls(max(interval1.first, interval2.first), min(interval1.last, interval2.last))
+
+    def distance(self, interval2):
+        if not Interval.intersection(self, interval2).empty():
+            return 0
+        elif self.first > interval2.last:
+            return self.first - interval2.last
+        elif self.last < interval2.first:
+            return interval2.first - self.last
+        else:
+            return None
+
+    @classmethod
+    def unionIntervals(cls, intervals):
+        'returns the smallest interval containing all intervals'
+        inter = cls(intervals[0].first, intervals[0].last)
+        for i in intervals[1:]:
+            inter = cls.union(inter, i)
+        return inter
+
+
+class TimeInterval(Interval):
+    '''Temporal interval: set of instants at fixed time step, between first and last, included
+    
+    For example: based on frame numbers (hence the modified length method)
+    It may be modified directly by setting first and last
+    It also (mostly) works with datetime.datetime'''
+
+    def __init__(self, first=0, last=-1, revert = False):
+        super(TimeInterval, self).__init__(first, last, revert)
+
+    @staticmethod
+    def fromInterval(inter):
+        return TimeInterval(inter.first, inter.last)
+
+    def __getitem__(self, i):
+        if not self.empty():
+            if isinstance(i, int):
+                return self.first+i
+            else:
+                raise TypeError("Invalid argument type.")
+            #elif isinstance( key, slice ):
+
+    def __iter__(self):
+        self.iterInstantNum = -1
+        return self
+
+    def __next__(self):
+        if self.iterInstantNum >= self.length()-1:
+            raise StopIteration
+        else:
+            self.iterInstantNum += 1
+            return self[self.iterInstantNum]
+
+    def length(self):
+        '''Returns the length of the interval'''
+        return float(max(0,self.last-self.first+1))
+
+    def __len__(self):
+        return self.length()
+
+# class BoundingPolygon:
+#     '''Class for a polygon bounding a set of points
+#     with methods to create intersection, unions...
+#     '''
+# We will use the polygon class of Shapely
+
+class STObject(object):
+    '''Class for spatio-temporal object, i.e. with temporal and spatial existence
+    (time interval and bounding polygon for positions (e.g. rectangle)).
+
+    It may not mean that the object is defined
+    for all time instants within the time interval'''
+
+    def __init__(self, num = None, timeInterval = None, boundingPolygon = None):
+        self.num = num
+        self.timeInterval = timeInterval
+        self.boundingPolygon = boundingPolygon
+
+    def empty(self):
+        return self.timeInterval.empty()# or not self.boudingPolygon
+
+    def getNum(self):
+        return self.num
+
+    def __len__(self):
+        return self.timeInterval.length()
+
+    def length(self):
+        return self.timeInterval.length()
+
+    def getFirstInstant(self):
+        return self.timeInterval.first
+
+    def getLastInstant(self):
+        return self.timeInterval.last
+
+    def setFirstInstant(self, t):
+        if t <= self.timeInterval.last:
+            self.timeInterval.first = t
+        else:
+            print('new first instant is after last, not changing')
+
+    def setLastInstant(self, t):
+        if t >= self.timeInterval.first:
+            self.timeInterval.last = t
+        else:
+            print('new last instant is before first, not changing')
+
+    def getTimeInterval(self):
+        return self.timeInterval
+
+    def existsAtInstant(self, t):
+        return self.timeInterval.contains(t)
+
+    def commonTimeInterval(self, obj2):
+        return TimeInterval.intersection(self.getTimeInterval(), obj2.getTimeInterval())
+
+    def shiftTimeInterval(self, offset):
+        self.timeInterval.shift(offset)
+
+class Point(object):
+    def __init__(self, x, y):
+        self.x = x
+        self.y = y
+
+    def __str__(self):
+        return '({:f},{:f})'.format(self.x,self.y)
+
+    def __repr__(self):
+        return self.__str__()
+
+    def __eq__(self, other):
+        return (self.x == other.x) and (self.y == other.y)
+
+    def __add__(self, other):
+        return Point(self.x+other.x, self.y+other.y)
+
+    def __sub__(self, other):
+        return Point(self.x-other.x, self.y-other.y)
+
+    def __neg__(self):
+        return Point(-self.x, -self.y)
+
+    def __mul__(self, alpha):
+        'Warning, returns a new Point'
+        return Point(self.x*alpha, self.y*alpha)
+
+    def divide(self, alpha):
+        'Warning, returns a new Point'
+        return Point(self.x/alpha, self.y/alpha)
+
+    def __getitem__(self, i):
+        if i == 0:
+            return self.x
+        elif i == 1:
+            return self.y
+        else:
+            raise IndexError()
+    
+    def orthogonal(self, clockwise = True):
+        'Returns the orthogonal vector'
+        if clockwise:
+            return Point(self.y, -self.x)
+        else:
+            return Point(-self.y, self.x)
+
+    def normalize(self):
+        return self.divide(self.norm2())
+
+    def projectLocal(self, v, clockwise = True):
+        'Projects point projected on v, v.orthogonal()'
+        e1 = v.normalize()
+        e2 = e1.orthogonal(clockwise)
+        return Point(Point.dot(self, e1), Point.dot(self, e2))
+
+    def rotate(self, theta):
+        return Point(self.x*cos(theta)-self.y*sin(theta), self.x*sin(theta)+self.y*cos(theta))
+
+    def plot(self, options = 'o', **kwargs):
+        plot([self.x], [self.y], options, **kwargs)
+
+    @staticmethod
+    def plotSegment(p1, p2, options = 'o', **kwargs):
+        plot([p1.x, p2.x], [p1.y, p2.y], options, **kwargs)
+
+    def angle(self):
+        return atan2(self.y, self.x)
+
+    def norm2Squared(self):
+        '''2-norm distance (Euclidean distance)'''
+        return self.x**2+self.y**2
+
+    def norm2(self):
+        '''2-norm distance (Euclidean distance)'''
+        return sqrt(self.norm2Squared())
+
+    def norm1(self):
+        return abs(self.x)+abs(self.y)
+
+    def normMax(self):
+        return max(abs(self.x),abs(self.y))
+
+    def aslist(self):
+        return [self.x, self.y]
+
+    def astuple(self):
+        return (self.x, self.y)
+
+    def asint(self):
+        return Point(int(self.x), int(self.y))
+
+    if shapelyAvailable:
+        def asShapely(self):
+            return shapelyPoint(self.x, self.y)
+
+    def homographyProject(self, homography):
+        projected = cvutils.homographyProject(array([[self.x], [self.y]]), homography)
+        return Point(projected[0], projected[1])
+
+    def inPolygon(self, polygon):
+        '''Indicates if the point x, y is inside the polygon
+        (array of Nx2 coordinates of the polygon vertices)
+
+        taken from http://www.ariel.com.au/a/python-point-int-poly.html
+
+        Use Polygon.contains if Shapely is installed'''
+
+        n = polygon.shape[0];
+        counter = 0;
+
+        p1 = polygon[0,:];
+        for i in range(n+1):
+            p2 = polygon[i % n,:];
+            if self.y > min(p1[1],p2[1]):
+                if self.y <= max(p1[1],p2[1]):
+                    if self.x <= max(p1[0],p2[0]):
+                        if p1[1] != p2[1]:
+                            xinters = (self.y-p1[1])*(p2[0]-p1[0])/(p2[1]-p1[1])+p1[0];
+                        if p1[0] == p2[0] or self.x <= xinters:
+                            counter+=1;
+            p1=p2
+        return (counter%2 == 1);
+
+    @staticmethod
+    def fromList(p):
+        return Point(p[0], p[1])
+
+    @staticmethod
+    def dot(p1, p2):
+        'Scalar product'
+        return p1.x*p2.x+p1.y*p2.y
+
+    @staticmethod
+    def cross(p1, p2):
+        'Cross product'
+        return p1.x*p2.y-p1.y*p2.x
+
+    @staticmethod
+    def parallel(p1, p2):
+        return Point.cross(p1, p2) == 0.
+
+    @staticmethod
+    def cosine(p1, p2):
+        return Point.dot(p1,p2)/(p1.norm2()*p2.norm2())
+
+    @staticmethod
+    def distanceNorm2(p1, p2):
+        return (p1-p2).norm2()
+
+    @staticmethod
+    def plotAll(points, options = '', **kwargs):
+        plot([p.x for p in points], [p.y for p in points], options, **kwargs)
+
+    def similarOrientation(self, refDirection, cosineThreshold):
+        'Indicates whether the cosine of the vector and refDirection is smaller than cosineThreshold'
+        return Point.cosine(self, refDirection) >= cosineThreshold
+
+    @staticmethod
+    def timeToCollision(p1, p2, v1, v2, collisionThreshold):
+        '''Computes exact time to collision with a distance threshold
+        The unknown of the equation is the time to reach the intersection
+        between the relative trajectory of one road user
+        and the circle of radius collisionThreshold around the other road user'''
+        dv = v1-v2
+        dp = p1-p2
+        a = dv.norm2Squared()#(v1.x-v2.x)**2 + (v1.y-v2.y)**2
+        b = 2*Point.dot(dv, dp)#2 * ((p1.x-p2.x) * (v1.x-v2.x) + (p1.y-p2.y) * (v1.y-v2.y))
+        c = dp.norm2Squared() - collisionThreshold**2#(p1.x-p2.x)**2 + (p1.y-p2.y)**2 - collisionThreshold**2
+
+        delta = b**2 - 4*a*c
+        if delta >= 0:
+            deltaRoot = sqrt(delta)
+            ttc1 = (-b + deltaRoot)/(2*a)
+            ttc2 = (-b - deltaRoot)/(2*a)
+            if ttc1 >= 0 and ttc2 >= 0:
+                return min(ttc1,ttc2)
+            elif ttc1 >= 0:
+                return ttc1
+            elif ttc2 >= 0:
+                return ttc2
+            else: # ttc1 < 0 and ttc2 < 0:
+                return None
+        else:
+            return None
+
+    @staticmethod
+    def midPoint(p1, p2):
+        'Returns the middle of the segment [p1, p2]'
+        return Point(0.5*p1.x+0.5*p2.x, 0.5*p1.y+0.5*p2.y)
+
+    @staticmethod
+    def agg(points, aggFunc = mean):
+        return Point(aggFunc([p.x for p in points]), aggFunc([p.y for p in points]))
+
+    @staticmethod
+    def boundingRectangle(points, v):
+        '''Returns the bounding rectangle of the points, aligned on the vector v
+        A list of points is returned: front left, front right, rear right, rear left'''
+        e1 = v.normalize()
+        e2 = e1.orthogonal()
+        xCoords = []
+        yCoords = []
+        for p in points:
+            xCoords.append(Point.dot(e1, p))
+            yCoords.append(Point.dot(e2, p))
+        xmin = min(xCoords)
+        xmax = max(xCoords)
+        ymin = min(yCoords)
+        ymax = max(yCoords)
+        frontLeft = Point(xmax, ymin)
+        frontRight = Point(xmax, ymax)
+        rearLeft = Point(xmin, ymin)
+        rearRight = Point(xmin, ymax)
+        return [Point(Point.dot(e1, p), Point.dot(e2, p)) for p in [frontLeft, frontRight, rearRight, rearLeft]]
+
+if shapelyAvailable:
+    def pointsInPolygon(points, polygon):
+        '''Optimized tests of a series of points within (Shapely) polygon (not prepared)'''
+        if type(polygon) == PreparedGeometry:
+            prepared_polygon = polygon
+        else:
+            prepared_polygon = prep(polygon)
+        return list(filter(prepared_polygon.contains, points))
+
+# Functions for coordinate transformation
+# From Paul St-Aubin's PVA tools
+def prepareAlignments(alignments):
+    '''Prepares alignments (list of splines, each typically represented as a Trajectory)
+    - computes cumulative distances
+    - approximates slope singularity by giving some slope roundoff (account for roundoff error)'''
+    for alignment in alignments:
+        alignment.computeCumulativeDistances()
+        p1 = alignment[0]
+        for i in range(len(alignment)-1):
+            p2 = alignment[i+1]
+            if(round(p1.x, 10) == round(p2.x, 10)):
+                p2.x += 0.0000000001
+            if(round(p1.y, 10) == round(p2.y, 10)):
+                p2.y += 0.0000000001
+            p1 = p2
+
+def ppldb2p(qx,qy, p0x,p0y, p1x,p1y):
+    ''' Point-projection (Q) on line defined by 2 points (P0,P1).
+        http://cs.nyu.edu/~yap/classes/visual/03s/hw/h2/math.pdf
+        '''
+    if(p0x == p1x and p0y == p1y):
+        return None
+    try:
+        #Approximate slope singularity by giving some slope roundoff; account for roundoff error
+        # if(round(p0x, 10) == round(p1x, 10)):
+        #     p1x += 0.0000000001
+        # if(round(p0y, 10) == round(p1y, 10)):
+        #     p1y += 0.0000000001
+        #make the calculation
+        Y = (-(qx)*(p0y-p1y)-(qy*(p0y-p1y)**2)/(p0x-p1x)+p0x**2*(p0y-p1y)/(p0x-p1x)-p0x*p1x*(p0y-p1y)/(p0x-p1x)-p0y*(p0x-p1x))/(p1x-p0x-(p0y-p1y)**2/(p0x-p1x))
+        X = (-Y*(p1y-p0y)+qx*(p1x-p0x)+qy*(p1y-p0y))/(p1x-p0x)
+    except ZeroDivisionError:
+        print('Error: Division by zero in ppldb2p. Please report this error with the full traceback:')
+        print('qx={0}, qy={1}, p0x={2}, p0y={3}, p1x={4}, p1y={5}...'.format(qx, qy, p0x, p0y, p1x, p1y))
+        import pdb;
+        pdb.set_trace()
+    return Point(X, Y)
+
+def getSYfromXY(p, alignments, goodEnoughAlignmentDistance = 0.5):
+    ''' Snap a point p to its nearest subsegment of it's nearest alignment (from the list alignments).
+    A alignment is a list of points (class Point), most likely a trajectory.
+
+    Output:
+    =======
+    [alignment index,
+    subsegment leading point index,
+    snapped point,
+    subsegment distance,
+    alignment distance,
+    orthogonal point offset]
+
+    or None
+    '''
+    minOffsetY = float('inf')
+    #For each alignment
+    for alignmentIdx in range(len(alignments)):
+        #For each alignment point index
+        for alignment_p in range(len(alignments[alignmentIdx])-1):
+            #Get closest point on alignment
+            closestPoint = ppldb2p(p.x,p.y,alignments[alignmentIdx][alignment_p][0],alignments[alignmentIdx][alignment_p][1],alignments[alignmentIdx][alignment_p+1][0],alignments[alignmentIdx][alignment_p+1][1])
+            if closestPoint is None:
+                print('Error: Alignment {0}, segment {1} has identical bounds and therefore is not a vector. Projection cannot continue.'.format(alignmentIdx, alignment_p))
+                return None
+            # check if the projected point is in between the current segment of the alignment bounds
+            if utils.inBetween(alignments[alignmentIdx][alignment_p][0], alignments[alignmentIdx][alignment_p+1][0], closestPoint.x) and utils.inBetween(alignments[alignmentIdx][alignment_p][1], alignments[alignmentIdx][alignment_p+1][1], closestPoint.y):
+                offsetY = Point.distanceNorm2(closestPoint, p)
+                if offsetY < minOffsetY:
+                    minOffsetY = offsetY
+                    snappedAlignmentIdx = alignmentIdx
+                    snappedAlignmentLeadingPoint = alignment_p
+                    snappedPoint = Point(closestPoint.x, closestPoint.y)
+                #Jump loop if significantly close
+                if offsetY < goodEnoughAlignmentDistance:
+                    break
+
+    #Get sub-segment distance
+    if minOffsetY != float('inf'):
+        subsegmentDistance = Point.distanceNorm2(snappedPoint, alignments[snappedAlignmentIdx][snappedAlignmentLeadingPoint])
+        #Get cumulative alignment distance (total segment distance)
+        alignmentDistanceS = alignments[snappedAlignmentIdx].getCumulativeDistance(snappedAlignmentLeadingPoint) + subsegmentDistance
+        orthogonalAlignmentVector = (alignments[snappedAlignmentIdx][snappedAlignmentLeadingPoint+1]-alignments[snappedAlignmentIdx][snappedAlignmentLeadingPoint]).orthogonal()
+        offsetVector = p-snappedPoint
+        if Point.dot(orthogonalAlignmentVector, offsetVector) < 0:
+            minOffsetY = -minOffsetY
+        return [snappedAlignmentIdx, snappedAlignmentLeadingPoint, snappedPoint, subsegmentDistance, alignmentDistanceS, minOffsetY]
+    else:
+        print('Offset for point {} is infinite (check with prepareAlignments if some alignment segments are aligned with axes)'.format(p))
+        return None
+
+def getXYfromSY(s, y, alignmentNum, alignments):
+    ''' Find X,Y coordinate from S,Y data.
+    if mode = 0 : return Snapped X,Y
+    if mode !=0 : return Real X,Y
+    '''
+    alignment = alignments[alignmentNum]
+    i = 1
+    while s > alignment.getCumulativeDistance(i) and i < len(alignment):
+        i += 1
+    if i < len(alignment):
+        d = s - alignment.getCumulativeDistance(i-1) # distance on subsegment
+        #Get difference vector and then snap
+        dv = alignment[i] - alignment[i-1]
+        normalizedV = dv.normalize()
+        #snapped = alignment[i-1] + normalizedV*d # snapped point coordinate along alignment
+        # add offset finally
+        orthoNormalizedV = normalizedV.orthogonal()
+        return alignment[i-1] + normalizedV*d + orthoNormalizedV*y
+    else:
+        print('Curvilinear point {} is past the end of the alignement'.format((s, y, alignmentNum)))
+        return None
+
+
+class NormAngle(object):
+    '''Alternate encoding of a point, by its norm and orientation'''
+
+    def __init__(self, norm, angle):
+        self.norm = norm
+        self.angle = angle
+
+    @staticmethod
+    def fromPoint(p):
+        norm = p.norm2()
+        if norm > 0:
+            angle = p.angle()
+        else:
+            angle = 0.
+        return NormAngle(norm, angle)
+
+    def __add__(self, other):
+        'a norm cannot become negative'
+        return NormAngle(max(self.norm+other.norm, 0), self.angle+other.angle)
+
+    def getPoint(self):
+        return Point(self.norm*cos(self.angle), self.norm*sin(self.angle))
+
+
+def predictPositionNoLimit(nTimeSteps, initialPosition, initialVelocity, initialAcceleration = Point(0,0)):
+    '''Predicts the position in nTimeSteps at constant speed/acceleration'''
+    return initialVelocity + initialAcceleration.__mul__(nTimeSteps),initialPosition+initialVelocity.__mul__(nTimeSteps) + initialAcceleration.__mul__(nTimeSteps**2*0.5)
+
+def predictPosition(position, speedOrientation, control, maxSpeed = None):
+    '''Predicts the position (moving.Point) at the next time step with given control input (deltaSpeed, deltaTheta)
+    speedOrientation is the other encoding of velocity, (speed, orientation)
+    speedOrientation and control are NormAngle'''
+    predictedSpeedTheta = speedOrientation+control
+    if maxSpeed is not None:
+         predictedSpeedTheta.norm = min(predictedSpeedTheta.norm, maxSpeed)
+    predictedPosition = position+predictedSpeedTheta.getPoint()
+    return predictedPosition, predictedSpeedTheta
+
+
+class FlowVector(object):
+    '''Class to represent 4-D flow vectors,
+    ie a position and a velocity'''
+    def __init__(self, position, velocity):
+        'position and velocity should be Point instances'
+        self.position = position
+        self.velocity = velocity
+
+    def __add__(self, other):
+        return FlowVector(self.position+other.position, self.velocity+other.velocity)
+
+    def __mul__(self, alpha):
+        return FlowVector(self.position.__mul__(alpha), self.velocity.__mul__(alpha))
+
+    def plot(self, options = '', **kwargs):
+        plot([self.position.x, self.position.x+self.velocity.x], [self.position.y, self.position.y+self.velocity.y], options, **kwargs)
+        self.position.plot(options+'x', **kwargs)
+
+    @staticmethod
+    def similar(f1, f2, maxDistance2, maxDeltavelocity2):
+        return (f1.position-f2.position).norm2Squared()<maxDistance2 and (f1.velocity-f2.velocity).norm2Squared()<maxDeltavelocity2
+
+def intersection(p1, p2, p3, p4):
+    ''' Intersection point (x,y) of lines formed by the vectors p1-p2 and p3-p4
+        http://paulbourke.net/geometry/pointlineplane/'''
+    dp12 = p2-p1
+    dp34 = p4-p3
+    #det = (p4.y-p3.y)*(p2.x-p1.x)-(p4.x-p3.x)*(p2.y-p1.y)
+    det = float(dp34.y*dp12.x-dp34.x*dp12.y)
+    if det == 0.:
+        return None
+    else:
+        ua = (dp34.x*(p1.y-p3.y)-dp34.y*(p1.x-p3.x))/det
+        return p1+dp12.__mul__(ua)
+
+# def intersection(p1, p2, dp1, dp2):
+#     '''Returns the intersection point between the two lines
+#     defined by the respective vectors (dp) and origin points (p)'''
+#     from numpy import matrix
+#     from numpy.linalg import linalg
+#     A = matrix([[dp1.y, -dp1.x],
+#                 [dp2.y, -dp2.x]])
+#     B = matrix([[dp1.y*p1.x-dp1.x*p1.y],
+#                 [dp2.y*p2.x-dp2.x*p2.y]])
+
+#     if linalg.det(A) == 0:
+#         return None
+#     else:
+#         intersection = linalg.solve(A,B)
+#         return Point(intersection[0,0], intersection[1,0])
+
+def segmentIntersection(p1, p2, p3, p4):
+    '''Returns the intersecting point of the segments [p1, p2] and [p3, p4], None otherwise'''
+
+    if (Interval.intersection(Interval(p1.x,p2.x,True), Interval(p3.x,p4.x,True)).empty()) or (Interval.intersection(Interval(p1.y,p2.y,True), Interval(p3.y,p4.y,True)).empty()):
+        return None
+    else:
+        inter = intersection(p1, p2, p3, p4)
+        if (inter is not None
+            and utils.inBetween(p1.x, p2.x, inter.x)
+            and utils.inBetween(p3.x, p4.x, inter.x)
+            and utils.inBetween(p1.y, p2.y, inter.y)
+            and utils.inBetween(p3.y, p4.y, inter.y)):
+            return inter
+        else:
+            return None
+
+def segmentLineIntersection(p1, p2, p3, p4):
+    '''Indicates if the line going through p1 and p2 intersects inside p3, p4'''
+    inter = intersection(p1, p2, p3, p4)
+    if inter is not None and utils.inBetween(p3.x, p4.x, inter.x) and utils.inBetween(p3.y, p4.y, inter.y):
+        return inter
+    else:
+        return None
+
+
+class Trajectory(object):
+    '''Class for trajectories: temporal sequence of positions
+
+    The class is iterable'''
+
+    def __init__(self, positions=None):
+        if positions is not None:
+            self.positions = positions
+        else:
+            self.positions = [[],[]]
+
+    @staticmethod
+    def generate(p, v, nPoints):
+        t = Trajectory()
+        p0 = Point(p.x, p.y)
+        t.addPosition(p0)
+        for i in range(nPoints-1):
+            p0 += v
+            t.addPosition(p0)
+        return t
+
+    @staticmethod
+    def load(line1, line2):
+        return Trajectory([[float(n) for n in line1.split(' ')],
+                           [float(n) for n in line2.split(' ')]])
+
+    @staticmethod
+    def fromPointList(points):
+        t = Trajectory()
+        if isinstance(points[0], list) or isinstance(points[0], tuple):
+            for p in points:
+                t.addPositionXY(p[0],p[1])
+        else:
+            for p in points:
+                t.addPosition(p)
+        return t
+
+    def __len__(self):
+        return len(self.positions[0])
+
+    def length(self):
+        return self.__len__()
+
+    def empty(self):
+        return self.__len__() == 0
+
+    def __getitem__(self, i):
+        if isinstance(i, int):
+            return Point(self.positions[0][i], self.positions[1][i])
+        elif isinstance(i, slice):
+            return Trajectory([self.positions[0][i],self.positions[1][i]])
+        else:
+            raise TypeError("Invalid argument type.")
+
+    def __str__(self):
+        return ' '.join([self.__getitem__(i).__str__() for i in range(self.length())])
+
+    def __repr__(self):
+        return self.__str__()
+
+    def __iter__(self):
+        self.iterInstantNum = 0
+        return self
+
+    def __next__(self):
+        if self.iterInstantNum >= self.length():
+            raise StopIteration
+        else:
+            self.iterInstantNum += 1
+            return self[self.iterInstantNum-1]
+
+    def __eq__(self, other):
+        if self.length() == other.length():
+            result = True
+            for p, po in zip(self, other):
+                result = result and (p == po)
+            return result
+        else:
+            return False
+
+    def append(self,other):
+        '''adds positions of other to the trajectory (in-place modification)'''
+        for p in other:
+            self.addPosition(p)
+
+    def setPositionXY(self, i, x, y):
+        if i < self.__len__():
+            self.positions[0][i] = x
+            self.positions[1][i] = y
+
+    def setPosition(self, i, p):
+        self.setPositionXY(i, p.x, p.y)
+
+    def addPositionXY(self, x, y):
+        self.positions[0].append(x)
+        self.positions[1].append(y)
+
+    def addPosition(self, p):
+        self.addPositionXY(p.x, p.y)
+
+    def duplicateLastPosition(self):
+        self.positions[0].append(self.positions[0][-1])
+        self.positions[1].append(self.positions[1][-1])
+
+    @staticmethod
+    def _plot(positions, options = '', withOrigin = False, lastCoordinate = None, timeStep = 1, objNum = None, **kwargs):
+        if lastCoordinate is None:
+            plot(positions[0][::timeStep], positions[1][::timeStep], options, **kwargs)
+        elif 0 <= lastCoordinate <= len(positions[0]):
+            plot(positions[0][:lastCoordinate:timeStep], positions[1][:lastCoordinate:timeStep], options, **kwargs)
+        if withOrigin:
+            plot([positions[0][0]], [positions[1][0]], 'ro', **kwargs)
+        if objNum is not None:
+            text(positions[0][0], positions[1][0], '{}'.format(objNum))
+
+    def homographyProject(self, homography):
+        return Trajectory(cvutils.homographyProject(array(self.positions), homography).tolist())
+
+    def newCameraProject(self, newCameraMatrix):
+        return Trajectory(cvutils.newCameraProject(array(self.positions), newCameraMatrix).tolist())
+
+    def plot(self, options = '', withOrigin = False, timeStep = 1, objNum = None, **kwargs):
+        Trajectory._plot(self.positions, options, withOrigin, None, timeStep, objNum, **kwargs)
+
+    def plotAt(self, lastCoordinate, options = '', withOrigin = False, timeStep = 1, objNum = None, **kwargs):
+        Trajectory._plot(self.positions, options, withOrigin, lastCoordinate, timeStep, objNum, **kwargs)
+
+    def plotOnWorldImage(self, nPixelsPerUnitDistance, options = '', withOrigin = False, timeStep = 1, objNum = None, **kwargs):
+        imgPositions = [[x*nPixelsPerUnitDistance for x in self.positions[0]],
+                        [x*nPixelsPerUnitDistance for x in self.positions[1]]]
+        Trajectory._plot(imgPositions, options, withOrigin, None, timeStep, objNum, **kwargs)
+
+    def getXCoordinates(self):
+        return self.positions[0]
+
+    def getYCoordinates(self):
+        return self.positions[1]
+
+    def asArray(self):
+        return array(self.positions)
+
+    def xBounds(self):
+        # look for function that does min and max in one pass
+        return Interval(min(self.getXCoordinates()), max(self.getXCoordinates()))
+
+    def yBounds(self):
+        # look for function that does min and max in one pass
+        return Interval(min(self.getYCoordinates()), max(self.getYCoordinates()))
+
+    def add(self, traj2):
+        '''Returns a new trajectory of the same length'''
+        if self.length() != traj2.length():
+            print('Trajectories of different lengths')
+            return None
+        else:
+            return Trajectory([[a+b for a,b in zip(self.getXCoordinates(),traj2.getXCoordinates())],
+                               [a+b for a,b in zip(self.getYCoordinates(),traj2.getYCoordinates())]])
+
+    def subtract(self, traj2):
+        '''Returns a new trajectory of the same length'''
+        if self.length() != traj2.length():
+            print('Trajectories of different lengths')
+            return None
+        else:
+            return Trajectory([[a-b for a,b in zip(self.getXCoordinates(),traj2.getXCoordinates())],
+                               [a-b for a,b in zip(self.getYCoordinates(),traj2.getYCoordinates())]])
+
+    def __mul__(self, alpha):
+        '''Returns a new trajectory of the same length'''
+        return Trajectory([[alpha*x for x in self.getXCoordinates()],
+                           [alpha*y for y in self.getYCoordinates()]])
+
+    def differentiate(self, doubleLastPosition = False):
+        diff = Trajectory()
+        for i in range(1, self.length()):
+            diff.addPosition(self[i]-self[i-1])
+        if doubleLastPosition:
+            diff.addPosition(diff[-1])
+        return diff
+
+    def differentiateSG(self, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0, nInstantsIgnoredAtEnds = 2):
+        '''Differentiates the trajectory using the Savitsky Golay filter
+
+        window_length : The length of the filter window (i.e. the number of coefficients). window_length must be a positive odd integer.
+        polyorder : The order of the polynomial used to fit the samples. polyorder must be less than window_length.
+        deriv : The order of the derivative to compute. This must be a nonnegative integer. The default is 0, which means to filter the data without differentiating.
+        delta : The spacing of the samples to which the filter will be applied. This is only used if deriv > 0. Default is 1.0.
+        axis : The axis of the array x along which the filter is to be applied. Default is -1.
+        mode : Must be mirror, constant, nearest, wrap or interp. This determines the type of extension to use for the padded signal to which the filter is applied. When mode is constant, the padding value is given by cval. See the Notes for more details on mirror, constant, wrap, and nearest. When the interp mode is selected (the default), no extension is used. Instead, a degree polyorder polynomial is fit to the last window_length values of the edges, and this polynomial is used to evaluate the last window_length // 2 output values.
+        cval : Value to fill past the edges of the input if mode is constant. Default is 0.0.
+
+        https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html#scipy.signal.savgol_filter'''
+        if removeBothEnds >=1:
+            pos = [self.positions[0][nInstantsIgnoredAtEnds:-nInstantsIgnoredAtEnds],
+                   self.positions[1][nInstantsIgnoredAtEnds:-nInstantsIgnoredAtEnds]]
+        else:
+            pos = self.positions
+        filtered = savgol_filter(pos, window_length, polyorder, deriv, delta, axis, mode, cval)
+        return Trajectory(filtered)
+
+    def norm(self):
+        '''Returns the list of the norms at each instant'''
+        return hypot(self.positions[0], self.positions[1])
+
+    def computeCumulativeDistances(self):
+        '''Computes the distance from each point to the next and the cumulative distance up to the point
+        Can be accessed through getDistance(idx) and getCumulativeDistance(idx)'''
+        self.distances = []
+        self.cumulativeDistances = [0.]
+        p1 = self[0]
+        cumulativeDistance = 0.
+        for i in range(self.length()-1):
+            p2 = self[i+1]
+            self.distances.append(Point.distanceNorm2(p1,p2))
+            cumulativeDistance += self.distances[-1]
+            self.cumulativeDistances.append(cumulativeDistance)
+            p1 = p2
+
+    def getDistance(self,i):
+        '''Return the distance between points i and i+1'''
+        if i < self.length()-1:
+            return self.distances[i]
+        else:
+            print('Index {} beyond trajectory length {}-1'.format(i, self.length()))
+
+    def getCumulativeDistance(self, i):
+        '''Returns the cumulative distance between the beginning and point i'''
+        if i < self.length():
+            return self.cumulativeDistances[i]
+        else:
+            print('Index {} beyond trajectory length {}'.format(i, self.length()))
+
+    def getTotalDistance(self):
+        '''Returns the total distance (shorthand for cumulative distance [-1]'''
+        return self.getCumulativeDistance(-1)
+            
+    def getMaxDistance(self, metric):
+        'Returns the maximum distance between points in the trajectory'
+        positions = self.getPositions().asArray().T
+        return cdist(positions, positions, metric = metric).max()
+
+    def getClosestPoint(self, p1, maxDist2 = None):
+        '''Returns the instant of the closest position in trajectory to p1 (and the point)
+        if maxDist is not None, will check the distance is smaller
+        TODO: could use cdist for different metrics'''
+        distances2 = []
+        minDist2 = float('inf')
+        i = -1
+        for p2 in self:
+            distances2.append(Point.distanceNorm2(p1, p2))
+            if distances2[-1] < minDist2:
+                minDist2 = distances2[-1]
+                i = len(distances2)-1
+        if maxDist2 is not None and minDist2 < maxDist2:
+            return None
+        else:
+            return i
+
+    def similarOrientation(self, refDirection, cosineThreshold, minProportion = 0.5):
+        '''Indicates whether the minProportion (<=1.) (eg half) of the trajectory elements (vectors for velocity)
+        have a cosine with refDirection is smaller than cosineThreshold'''
+        count = 0
+        lengthThreshold = float(self.length())*minProportion
+        for p in self:
+            if p.similarOrientation(refDirection, cosineThreshold):
+                count += 1
+        return count >= lengthThreshold
+
+    def wiggliness(self):
+        straightDistance = Point.distanceNorm2(self.__getitem__(0),self.__getitem__(self.length()-1))
+        if straightDistance > 0:
+            return self.getCumulativeDistance(self.length()-1)/float(straightDistance)
+        else:
+            return None
+
+    def getIntersections(self, p1, p2):
+        '''Returns a list of the indices at which the trajectory
+        intersects with the segment of extremities p1 and p2
+        Returns an empty list if there is no crossing'''
+        indices = []
+        intersections = []
+
+        for i in range(self.length()-1):
+            q1=self.__getitem__(i)
+            q2=self.__getitem__(i+1)
+            p = segmentIntersection(q1, q2, p1, p2)
+            if p is not None:
+                if q1.x != q2.x:
+                    ratio = (p.x-q1.x)/(q2.x-q1.x)
+                elif q1.y != q2.y:
+                    ratio = (p.y-q1.y)/(q2.y-q1.y)
+                else:
+                    ratio = 0
+                indices.append(i+ratio)
+                intersections.append(p)
+        return indices, intersections
+
+    def getLineIntersections(self, p1, p2):
+        '''Returns a list of the indices at which the trajectory
+        intersects with the line going through p1 and p2
+        Returns an empty list if there is no crossing'''
+        indices = []
+        intersections = []
+
+        for i in range(self.length()-1):
+            q1=self.__getitem__(i)
+            q2=self.__getitem__(i+1)
+            p = segmentLineIntersection(p1, p2, q1, q2)
+            if p is not None:
+                if q1.x != q2.x:
+                    ratio = (p.x-q1.x)/(q2.x-q1.x)
+                elif q1.y != q2.y:
+                    ratio = (p.y-q1.y)/(q2.y-q1.y)
+                else:
+                    ratio = 0
+                indices.append(i+ratio)
+                intersections.append(p)
+        return indices, intersections
+
+    def subTrajectoryInInterval(self, inter):
+        'Returns all position between index inter.first and index.last (included)'
+        if inter.first >=0 and inter.last<= self.length():
+            return Trajectory([self.positions[0][inter.first:inter.last+1],
+                               self.positions[1][inter.first:inter.last+1]])
+        else:
+            return None
+
+    def subSample(self, step):
+        'Returns the positions very step'
+        return Trajectory([self.positions[0][::step],
+                           self.positions[1][::step]])
+
+    if shapelyAvailable:
+        def getInstantsInPolygon(self, polygon):
+            '''Returns the list of instants at which the trajectory is in the polygon'''
+            instants = []
+            n = self.length()
+            for t, x, y in zip(range(n), self.positions[0], self.positions[1]):
+                if polygon.contains(shapelyPoint(x, y)):
+                    instants.append(t)
+            return instants
+
+        def getTrajectoryInPolygon(self, polygon, t2 = None):
+            '''Returns the trajectory built with the set of points inside the (shapely) polygon
+            The polygon could be a prepared polygon (faster) from prepared.prep
+
+            t2 is another trajectory (could be velocities)
+            which is filtered based on the first (self) trajectory'''
+            traj = Trajectory()
+            inPolygon = []
+            for x, y in zip(self.positions[0], self.positions[1]):
+                inPolygon.append(polygon.contains(shapelyPoint(x, y)))
+                if inPolygon[-1]:
+                    traj.addPositionXY(x, y)
+            traj2 = Trajectory()
+            if t2 is not None:
+                for inp, x, y in zip(inPolygon, t2.positions[0], t2.positions[1]):
+                    if inp:
+                        traj2.addPositionXY(x, y)
+            return traj, traj2
+
+        def proportionInPolygon(self, polygon, minProportion = 0.5):
+            instants = self.getInstantsInPolygon(polygon)
+            lengthThreshold = float(self.length())*minProportion
+            return len(instants) >= lengthThreshold
+    else:
+        def getTrajectoryInPolygon(self, polygon, t2 = None):
+            '''Returns the trajectory built with the set of points inside the polygon
+            (array of Nx2 coordinates of the polygon vertices)'''
+            traj = Trajectory()
+            inPolygon = []
+            for p in self:
+                inPolygon.append(p.inPolygon(polygon))
+                if inPolygon[-1]:
+                    traj.addPosition(p)
+            traj2 = Trajectory()
+            if t2 is not None:
+                for inp, x, y in zip(inPolygon, t2.positions[0], t2.positions[1]):
+                    if inp:
+                        traj2.addPositionXY(p.x, p.y)
+            return traj, traj2
+
+        def proportionInPolygon(self, polygon, minProportion = 0.5):
+            inPolygon = [p.inPolygon(polygon) for p in self]
+            lengthThreshold = float(self.length())*minProportion
+            return sum(inPolygon) >= lengthThreshold
+
+    @staticmethod
+    def lcss(t1, t2, lcss):
+        return lcss.compute(t1, t2)
+
+class CurvilinearTrajectory(Trajectory):
+    '''Sub class of trajectory for trajectories with curvilinear coordinates and lane assignements
+    longitudinal coordinate is stored as first coordinate (exterior name S)
+    lateral coordinate is stored as second coordinate
+    the third "lane" coordinate is for an alignment id,
+    whether explicit for a list/dict of alignments,
+    or implicit for a road with lane numbers'''
+
+    def __init__(self, S = None, Y = None, lanes = None):
+        if S is None or Y is None or len(S) != len(Y):
+            self.positions = [[],[]]
+            if S is not None and Y is not None and len(S) != len(Y):
+                print("S and Y coordinates of different lengths\nInitializing to empty lists")
+        else:
+            self.positions = [S,Y]
+        if lanes is None or len(lanes) != self.length():
+            self.lanes = [None]*int(self.length())
+        else:
+            self.lanes = lanes
+
+    @staticmethod
+    def generate(s, v, nPoints, lane, y = 0):
+        '''s is initial position, v is velocity
+        0 in lateral coordinate by default
+        TODO 2D velocity for lane change?'''
+        S = [s]
+        for i in range(nPoints-1):
+            S.append(S[-1]+v)
+        Y = [y]*nPoints
+        lanes = [lane]*nPoints
+        return CurvilinearTrajectory(S, Y, lanes)
+
+    @staticmethod
+    def fromTrajectoryProjection(t, alignments, halfWidth = 3):
+        ''' Add, for every object position, the class 'moving.CurvilinearTrajectory()'
+            (curvilinearPositions instance) which holds information about the
+            curvilinear coordinates using alignment metadata.
+            From Paul St-Aubin's PVA tools
+            ======
+
+            Input:
+            ======
+            alignments   = a list of alignments, where each alignment is a list of
+                           points (class Point).
+            halfWidth = moving average window (in points) in which to smooth
+                           lane changes. As per tools_math.cat_mvgavg(), this term
+                           is a search *radius* around the center of the window.
+
+            '''
+        curvilinearPositions = CurvilinearTrajectory()
+
+        #For each point
+        for i in range(int(t.length())):
+            result = getSYfromXY(t[i], alignments)
+
+            # Error handling
+            if(result is None):
+                print('Warning: trajectory at point {} {} has alignment errors (alignment snapping)\nCurvilinear trajectory could not be computed'.format(i, t[i]))
+            else:
+                [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result
+                curvilinearPositions.addPositionSYL(S, Y, align)
+
+        ## Go back through points and correct lane
+        #Run through objects looking for outlier point
+        smoothed_lanes = utils.filterCategoricalMovingWindow(curvilinearPositions.getLanes(), halfWidth)
+        ## Recalculate projected point to new lane
+        lanes = curvilinearPositions.getLanes()
+        if(lanes != smoothed_lanes):
+            for i in range(len(lanes)):
+                if(lanes[i] != smoothed_lanes[i]):
+                    result = getSYfromXY(t[i],[alignments[smoothed_lanes[i]]])
+
+                    # Error handling
+                    if(result is None):
+                        ## This can be triggered by tracking errors when the trajectory jumps around passed another alignment.
+                        print('    Warning: trajectory at point {} {} has alignment errors during trajectory smoothing and will not be corrected.'.format(i, t[i]))
+                    else:
+                        [align, alignPoint, snappedPoint, subsegmentDistance, S, Y] = result
+                        curvilinearPositions.setPosition(i, S, Y, align)
+        return curvilinearPositions
+
+    def __getitem__(self,i):
+        if isinstance(i, int):
+            return [self.positions[0][i], self.positions[1][i], self.lanes[i]]
+        else:
+            raise TypeError("Invalid argument type.")
+            #elif isinstance( key, slice ):
+
+    def getSCoordinates(self):
+        return self.getXCoordinates()
+
+    def getLanes(self):
+        return self.lanes
+
+    def getSCoordAt(self, i):
+        return self.positions[0][i]
+
+    def getYCoordAt(self, i):
+        return self.positions[1][i]
+
+    def getLaneAt(self, i):
+        return self.lanes[i]
+
+    def subTrajectoryInInterval(self, inter):
+        'Returns all curvilinear positions between index inter.first and index.last (included)'
+        if inter.first >=0 and inter.last<= self.length():
+            return CurvilinearTrajectory(self.positions[0][inter.first:inter.last+1],
+                                         self.positions[1][inter.first:inter.last+1],
+                                         self.lanes[inter.first:inter.last+1])
+        else:
+            return None
+
+    def addPositionSYL(self, s, y, lane = None):
+        self.addPositionXY(s,y)
+        self.lanes.append(lane)
+
+    def addPosition(self, p):
+        'Adds position in the point format for curvilinear of list with 3 values'
+        self.addPositionSYL(p[0], p[1], p[2])
+
+    def duplicateLastPosition(self):
+        super(CurvilinearTrajectory, self).duplicateLastPosition()
+        self.lanes.append(self.lanes[-1])
+
+    def setPosition(self, i, s, y, lane):
+        self.setPositionXY(i, s, y)
+        if i < self.__len__():
+            self.lanes[i] = lane
+
+    def differentiate(self, doubleLastPosition = False):
+        diff = CurvilinearTrajectory()
+        p1 = self[0]
+        for i in range(1, self.length()):
+            p2 = self[i]
+            if p2[2] == p1[2]:
+                laneChange = None
+            else:
+                laneChange = (p1[2], p2[2])
+            diff.addPositionSYL(p2[0]-p1[0], p2[1]-p1[1], laneChange)
+            p1=p2
+        if doubleLastPosition and self.length() > 1:
+            diff.addPosition(diff[-1])
+        return diff
+
+    def getIntersections(self, S1, lane = None):
+        '''Returns a list of the indices at which the trajectory
+        goes past the curvilinear coordinate S1
+        (in provided lane if lane is not None)
+        Returns an empty list if there is no crossing'''
+        indices = []
+        for i in range(self.length()-1):
+            q1=self.__getitem__(i)
+            q2=self.__getitem__(i+1)
+            if q1[0] <= S1 < q2[0] and (lane is None or (self.lanes[i] == lane and self.lanes[i+1] == lane)):
+                indices.append(i+(S1-q1[0])/(q2[0]-q1[0]))
+        return indices
+
+##################
+# Moving Objects
+##################
+
+userTypeNames = ['unknown',
+                 'car',
+                 'pedestrian',
+                 'motorcycle',
+                 'bicycle',
+                 'bus',
+                 'truck',
+                 'automated']
+
+userType2Num = utils.inverseEnumeration(userTypeNames)
+
+class CarClassifier:
+    def predict(self, hog):
+        return userType2Num['car']
+carClassifier = CarClassifier()
+
+class MovingObject(STObject, VideoFilenameAddable):
+    '''Class for moving objects: a spatio-temporal object
+    with a trajectory and a geometry (constant volume over time)
+    and a usertype (e.g. road user) coded as a number (see userTypeNames)
+    '''
+
+    def __init__(self, num = None, timeInterval = None, positions = None, velocities = None, geometry = None, userType = userType2Num['unknown'], nObjects = None, initCurvilinear = False):
+        super(MovingObject, self).__init__(num, timeInterval)
+        if initCurvilinear:
+            self.curvilinearPositions = positions
+            self.curvilinearVelocities = velocities # third component is (previousAlignmentIdx, newAlignmentIdx) or None if no change
+        else:
+            self.positions = positions
+            self.velocities = velocities
+        self.geometry = geometry
+        self.userType = userType
+        self.setNObjects(nObjects) # a feature has None for nObjects
+        self.features = None
+        # compute bounding polygon from trajectory
+
+    @staticmethod
+    def croppedTimeInterval(obj, value, after = True):
+        newTimeInterval = TimeInterval(obj.getFirstInstant(), min(value, obj.getLastInstant())) if after else TimeInterval(max(obj.getFirstInstant(), value), obj.getLastInstant())
+        if obj.positions is not None :
+            newPositions = obj.positions[slice(newTimeInterval.first - obj.getLastInstant(), newTimeInterval.last + 1 - obj.getLastInstant())]
+        else:
+            newPositions = None
+        if obj.velocities is not None :
+            newVelocities = obj.velocities[slice(newTimeInterval.first - obj.getLastInstant(), newTimeInterval.last + 1 - obj.getLastInstant())]
+        else:
+            newVelocities = None
+        if obj.hasFeatures():
+            newFeatures = [f.croppedTimeInterval(value, after) for f in obj.features]
+        else:
+            newFeatures = None
+        res = MovingObject(obj.getNum(), newTimeInterval, newPositions, newVelocities, obj.geometry, obj.userType, obj.nObjects)
+        res.features = newFeatures
+        res.featureNumbers = obj.featureNumbers
+        #if hasattr(obj, 'projectedPositions'):
+        #    res.projectedPositions = obj.projectedPositions[slice(newTimeInterval.first, newTimeInterval.last+1)]
+        return res
+
+
+    @staticmethod
+    def aggregateTrajectories(features, interval = None, aggFunc = mean):
+        'Computes the aggregate trajectory from list of MovingObject features'
+        positions = Trajectory()
+        velocities = Trajectory()
+        if interval is None:
+            inter = TimeInterval.unionIntervals([f.getTimeInterval() for f in features])
+        else:
+            inter = interval
+        for t in inter:
+            points = []
+            vels = []
+            for f in features:
+                if f.existsAtInstant(t):
+                    points.append(f.getPositionAtInstant(t))
+                    vels.append(f.getVelocityAtInstant(t))
+            positions.addPosition(Point.agg(points, aggFunc))
+            velocities.addPosition(Point.agg(vels, aggFunc))
+        return inter, positions, velocities
+
+    @staticmethod
+    def generate(num, p, v, timeInterval):
+        nPoints = int(timeInterval.length())
+        positions = Trajectory.generate(p, v, nPoints)
+        return MovingObject(num = num, timeInterval = timeInterval, positions = positions, velocities = Trajectory([[v.x]*nPoints, [v.y]*nPoints]))
+
+    def updatePositions(self):
+        inter, self.positions, self.velocities = MovingObject.aggregateTrajectories(self.features, self.getTimeInterval())
+
+    @staticmethod
+    def concatenate(obj1, obj2, num = None, newFeatureNum = None, minFeatureLength = 5):
+        '''Concatenates two objects, whether overlapping temporally or not
+
+        Positions will be recomputed features are merged
+        Otherwise, only featureNumbers and/or features will be merged
+        minFeatureLength enforces a minimum length to avoid small features
+        (and smaller velocities that are not saved)'''
+        if num is None:
+            newNum = obj1.getNum()
+        else:
+            newNum = num
+        commonTimeInterval = obj1.commonTimeInterval(obj2)
+        if commonTimeInterval.empty():
+            #print('The two objects\' time intervals do not overlap: obj1 {} and obj2 {}'.format(obj1.getTimeInterval(), obj2.getTimeInterval()))
+            emptyInterval = TimeInterval(min(obj1.getLastInstant(),obj2.getLastInstant()), max(obj1.getFirstInstant(),obj2.getFirstInstant()))
+            if obj1.existsAtInstant(emptyInterval.last):
+                firstObject = obj2
+                secondObject = obj1
+            else:
+                firstObject = obj1
+                secondObject = obj2
+            v = (secondObject.getPositionAtInstant(emptyInterval.last)-firstObject.getPositionAtInstant(emptyInterval.first)).divide(emptyInterval.length()-1)
+            positions = copy.deepcopy(firstObject.getPositions())
+            velocities = copy.deepcopy(firstObject.getPositions())
+            featurePositions = Trajectory()
+            featureVelocities = Trajectory()
+            p = firstObject.getPositionAtInstant(emptyInterval.first)+v
+            for t in range(emptyInterval.first+1, emptyInterval.last+1):
+            	positions.addPosition(p)
+            	velocities.addPosition(v)
+            	featurePositions.addPosition(p)
+            	featureVelocities.addPosition(v)
+            	p=p+v
+            for t in secondObject.getTimeInterval():
+                p = secondObject.getPositionAtInstant(t)
+                v = secondObject.getVelocityAtInstant(t)
+                positions.addPosition(p)
+                velocities.addPosition(v)
+                if featurePositions.length() < minFeatureLength:
+                    featurePositions.addPosition(p)
+                    featureVelocities.addPosition(v)
+            newObject = MovingObject(newNum, TimeInterval(firstObject.getFirstInstant(), secondObject.getLastInstant()), positions, velocities, nObjects = 1)
+            if hasattr(obj1, 'featureNumbers') and hasattr(obj2, 'featureNumbers'):
+                if newFeatureNum is not None:
+                    newObject.featureNumbers = obj1.featureNumbers+obj2.featureNumbers+[newFeatureNum]
+                else:
+                    print('Issue, new created feature has no num id')
+            if obj1.hasFeatures() and obj2.hasFeatures():
+                newObject.features = obj1.getFeatures()+obj2.getFeatures()+[MovingObject(newFeatureNum, TimeInterval(emptyInterval.first+1, emptyInterval.first+featurePositions.length()), featurePositions, featureVelocities)]
+                newObject.updatePositions()
+        else: # time intervals overlap
+            newTimeInterval = TimeInterval.union(obj1.getTimeInterval(), obj2.getTimeInterval())
+            newObject = MovingObject(newNum, newTimeInterval, nObjects = 1) # hypothesis is that it's the same object being reunited
+            if hasattr(obj1, 'featureNumbers') and hasattr(obj2, 'featureNumbers'):
+                newObject.featureNumbers = obj1.featureNumbers+obj2.featureNumbers
+            if obj1.hasFeatures() and obj2.hasFeatures():
+                newObject.features = obj1.getFeatures()+obj2.getFeatures()
+                newObject.updatePositions()
+            else:
+                print('Cannot update object positions without features')
+        # user type
+        if obj1.getUserType() != obj2.getUserType():
+            print('The two moving objects have different user types: obj1 {} obj2 {}'.format(userTypeNames[obj1.getUserType()], userTypeNames[obj2.getUserType()]))
+        newObject.setUserType(obj1.getUserType())
+        return newObject
+
+    def getObjectInTimeInterval(self, inter):
+        '''Returns a new object extracted from self,
+        restricted to time interval inter'''
+        intersection = TimeInterval.intersection(inter, self.getTimeInterval())
+        if not intersection.empty():
+            trajectoryInterval = TimeInterval(intersection.first-self.getFirstInstant(), intersection.last-self.getFirstInstant())
+            obj = MovingObject(self.num, intersection, self.positions.subTrajectoryInInterval(trajectoryInterval), self.geometry, self.userType, self.nObjects)
+            if self.velocities is not None:
+                obj.velocities = self.velocities.subTrajectoryInInterval(trajectoryInterval)
+            return obj
+        else:
+            print('The object does not exist at {}'.format(inter))
+            return None
+
+    def getObjectsInMask(self, mask, homography = None, minLength = 1):
+        '''Returns new objects made of the positions in the mask
+        mask is in the destination of the homography space'''
+        if homography is not None:
+            self.projectedPositions = self.positions.homographyProject(homography)
+        else:
+            self.projectedPositions = self.positions
+        def inMask(positions, i, mask):
+            p = positions[i]
+            return mask[int(p.y), int(p.x)] != 0.
+
+        #subTimeIntervals self.getFirstInstant()+i
+        filteredIndices = [inMask(self.projectedPositions, i, mask) for i in range(int(self.length()))]
+        # 'connected components' in subTimeIntervals
+        l = 0
+        intervalLabels = []
+        prev = True
+        for i in filteredIndices:
+            if i:
+                if not prev: # new interval
+                    l += 1
+                intervalLabels.append(l)
+            else:
+                intervalLabels.append(-1)
+            prev = i
+        intervalLabels = array(intervalLabels)
+        subObjects = []
+        for l in set(intervalLabels):
+            if l >= 0:
+                if sum(intervalLabels == l) >= minLength:
+                    times = [self.getFirstInstant()+i for i in range(len(intervalLabels)) if intervalLabels[i] == l]
+                    subTimeInterval = TimeInterval(min(times), max(times))
+                    subObjects.append(self.getObjectInTimeInterval(subTimeInterval))
+
+        return subObjects
+
+    def getPositions(self):
+        return self.positions
+
+    def getVelocities(self):
+        return self.velocities
+
+    def getUserType(self):
+        return self.userType
+
+    def computeCumulativeDistances(self):
+        self.positions.computeCumulativeDistances()
+
+    def getCurvilinearPositions(self):
+        if hasattr(self, 'curvilinearPositions'):
+            return self.curvilinearPositions
+        else:
+            return None
+
+    def getCurvilinearVelocities(self):
+        if hasattr(self, 'curvilinearVelocities'):
+            return self.curvilinearVelocities
+        else:
+            return None
+
+    def plotCurvilinearPositions(self, lane = None, options = '', withOrigin = False, **kwargs):
+        if hasattr(self, 'curvilinearPositions'):
+            if lane is None:
+                plot(list(self.getTimeInterval()), self.curvilinearPositions.positions[0], options, **kwargs)
+                if withOrigin:
+                    plot([self.getFirstInstant()], [self.curvilinearPositions.positions[0][0]], 'ro', **kwargs)
+            else:
+                instants = []
+                coords = []
+                for t, p in zip(self.getTimeInterval(), self.curvilinearPositions):
+                    if p[2] == lane:
+                        instants.append(t)
+                        coords.append(p[0])
+                    else:
+                        instants.append(NaN)
+                        coords.append(NaN)
+                plot(instants, coords, options, **kwargs)
+                if withOrigin and len(instants)>0:
+                    plot([instants[0]], [coords[0]], 'ro', **kwargs)
+        else:
+            print('Object {} has no curvilinear positions'.format(self.getNum()))
+
+    def interpolateCurvilinearPositions(self, t, alignments = None):
+        '''Linear interpolation of curvilinear positions, t being a float'''
+        if hasattr(self, 'curvilinearPositions'):
+            if self.existsAtInstant(t):
+                i = int(floor(t))
+                p1 = self.getCurvilinearPositionAtInstant(i)
+                p2 = self.getCurvilinearPositionAtInstant(i+1)
+                if p1[2] == p2[2]:
+                    alpha = t-float(i)
+                    return [(1-alpha)*p1[0]+alpha*p2[0], (1-alpha)*p1[1]+alpha*p2[1], p1[2]]
+                elif alignments is not None: # can be done only if assuming there is no missing alignmentn where the object has no coordinate
+                    pass # TODO
+                else:
+                    print('Object {} changes lane at {} and alignments are not provided'.format(self.getNum(), t))
+            else:
+                print('Object {} does not exist at {}'.format(self.getNum(), t))
+        else:
+            print('Object {} has no curvilinear positions'.format(self.getNum()))
+
+    def setUserType(self, userType):
+        self.userType = userType
+
+    def getNObjects(self):
+        return self.nObjects
+
+    def setNObjects(self, nObjects):
+        if nObjects is None or nObjects >= 1:
+            self.nObjects = nObjects
+        else:
+            print('Number of objects represented by object {} must be greater or equal to 1 ({})'.format(self.getNum(), nObjects))
+            self.nObjects = None
+
+    def setFeatures(self, features, featuresOrdered = False):
+        '''Sets the features in the features field based on featureNumbers
+        if not all features are loaded from 0, one needs to renumber in a dict'''
+        if featuresOrdered:
+            tmp = features
+        else:
+            tmp = {f.getNum():f for f in features}
+        self.features = [tmp[i] for i in self.featureNumbers]
+
+    def getFeatures(self):
+        return self.features
+
+    def hasFeatures(self):
+        return (self.features is not None)
+
+    def getFeature(self, i):
+        if self.hasFeatures() and i<len(self.features):
+            return self.features[i]
+        else:
+            return None
+
+    def getNLongestFeatures(self, nFeatures = 1):
+        if self.features is None:
+            return []
+        else:
+            tmp = utils.sortByLength(self.getFeatures(), reverse = True)
+            return tmp[:min(len(tmp), nFeatures)]
+
+    def getFeatureNumbersOverTime(self):
+        '''Returns the number of features at each instant
+        dict instant -> number of features'''
+        if self.hasFeatures():
+            featureNumbers = {}
+            for t in self.getTimeInterval():
+                n = 0
+                for f in self.getFeatures():
+                    if f.existsAtInstant(t):
+                        n += 1
+                featureNumbers[t]=n
+            return featureNumbers
+        else:
+            print('Object {} has no features loaded.'.format(self.getNum()))
+            return None
+
+    def getSpeeds(self, nInstantsIgnoredAtEnds = 0):
+        speeds = self.getVelocities().norm()
+        if nInstantsIgnoredAtEnds > 0:
+            n = min(nInstantsIgnoredAtEnds, int(floor(self.length()/2.)))
+            return speeds[n:-n]
+        else:
+            return speeds
+
+    def getAccelerations(self, window_length, polyorder, delta=1.0, axis=-1, mode='interp', cval=0.0, speeds = None, nInstantsIgnoredAtEnds = 0):
+        '''Returns the 1-D acceleration from the 1-D speeds
+        Caution about previously filtered data'''
+        if speeds is None:
+            speeds = self.getSpeeds(nInstantsIgnoredAtEnds)
+        return savgol_filter(speeds, window_length, polyorder, 1, delta, axis, mode, cval)
+
+    def getSpeedIndicator(self):
+        from indicators import SeverityIndicator
+        return SeverityIndicator('Speed', {t:self.getVelocityAtInstant(t).norm2() for t in self.getTimeInterval()})
+
+    def getPositionAt(self, i):
+        return self.positions[i]
+
+    def getVelocityAt(self, i):
+        return self.velocities[i]
+
+    def getPositionAtInstant(self, i):
+        return self.positions[i-self.getFirstInstant()]
+
+    def getVelocityAtInstant(self, i):
+        return self.velocities[i-self.getFirstInstant()]
+
+    def getCurvilinearPositionAt(self, i):
+        return self.curvilinearPositions[i]
+
+    def getCurvilinearVelocityAt(self, i):
+        return self.curvilinearVelocities[i]
+
+    def getCurvilinearPositionAtInstant(self, i):
+        return self.curvilinearPositions[i-self.getFirstInstant()]
+
+    def getCurvilinearVelocityAtInstant(self, i):
+        return self.curvilinearVelocities[i-self.getFirstInstant()]
+
+    def getXCoordinates(self):
+        return self.positions.getXCoordinates()
+
+    def getYCoordinates(self):
+        return self.positions.getYCoordinates()
+
+    def plot(self, options = '', withOrigin = False, timeStep = 1, withFeatures = False, withIds = False, **kwargs):
+        if withIds:
+            objNum = self.getNum()
+        else:
+            objNum = None
+        if withFeatures and self.hasFeatures():
+            for f in self.getFeatures():
+                f.positions.plot('r', True, timeStep, **kwargs)
+            self.positions.plot('bx-', True, timeStep, objNum, **kwargs)
+        else:
+            self.positions.plot(options, withOrigin, timeStep, objNum, **kwargs)
+
+    def plotOnWorldImage(self, nPixelsPerUnitDistance, options = '', withOrigin = False, timeStep = 1, withIds = False, **kwargs):
+        if withIds:
+            self.positions.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, self.getNum(), **kwargs)
+        else:
+            self.positions.plotOnWorldImage(nPixelsPerUnitDistance, options, withOrigin, timeStep, None, **kwargs)
+
+    def play(self, videoFilename, homography = None, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.):
+        cvutils.displayTrajectories(videoFilename, [self], homography = homography, firstFrameNum = self.getFirstInstant(), lastFrameNumArg = self.getLastInstant(), undistort = undistort, intrinsicCameraMatrix = intrinsicCameraMatrix, distortionCoefficients = distortionCoefficients, undistortedImageMultiplication = undistortedImageMultiplication)
+
+    def speedDiagnostics(self, framerate = 1., display = False, nInstantsIgnoredAtEnds=0):
+        speeds = framerate*self.getSpeeds(nInstantsIgnoredAtEnds)
+        coef = utils.linearRegression(list(range(len(speeds))), speeds)
+        print('min/5th perc speed: {} / {}\nspeed diff: {}\nspeed stdev: {}\nregression: {}'.format(min(speeds), scoreatpercentile(speeds, 5), speeds[-2]-speeds[1], std(speeds), coef[0]))
+        if display:
+            from matplotlib.pyplot import figure, axis
+            figure(1)
+            self.plot()
+            axis('equal')
+            figure(2)
+            plot(list(self.getTimeInterval()), speeds)
+            figure(3)
+            plot(list(self.getTimeInterval()), self.getAccelerations(9, 3, speeds = speeds)) # arbitrary parameter
+
+    @staticmethod
+    def minMaxDistance(obj1, obj2):
+        '''Computes the min max distance used for feature grouping'''
+        commonTimeInterval = obj1.commonTimeInterval(obj2)
+        if not commonTimeInterval.empty():
+            minDistance = (obj1.getPositionAtInstant(commonTimeInterval.first)-obj2.getPositionAtInstant(commonTimeInterval.first)).norm2()
+            maxDistance = minDistance
+            for t in list(commonTimeInterval)[1:]:
+                d = (obj1.getPositionAtInstant(t)-obj2.getPositionAtInstant(t)).norm2()
+                if d<minDistance:
+                    minDistance = d
+                elif d>maxDistance:
+                    maxDistance = d
+            return int(commonTimeInterval.length()), minDistance, maxDistance
+        else:
+            return int(commonTimeInterval.length()), None, None
+
+    @staticmethod
+    def distances(obj1, obj2, instant1, _instant2 = None):
+        '''Returns the distances between all features of the 2 objects
+        at the same instant instant1
+        or at instant1 and instant2'''
+        if _instant2 is None:
+            instant2 = instant1
+        else:
+            instant2 = _instant2
+        positions1 = [f.getPositionAtInstant(instant1).astuple() for f in obj1.features if f.existsAtInstant(instant1)]
+        positions2 = [f.getPositionAtInstant(instant2).astuple() for f in obj2.features if f.existsAtInstant(instant2)]
+        return cdist(positions1, positions2, metric = 'euclidean')
+
+    @staticmethod
+    def minDistance(obj1, obj2, instant1, instant2 = None):
+        return MovingObject.distances(obj1, obj2, instant1, instant2).min()
+
+    @staticmethod
+    def maxDistance(obj1, obj2, instant, instant2 = None):
+        return MovingObject.distances(obj1, obj2, instant1, instant2).max()
+
+    def maxSize(self):
+        '''Returns the max distance between features
+        at instant there are the most features'''
+        if hasattr(self, 'features'):
+            nFeatures = -1
+            tMaxFeatures = 0
+            for t in self.getTimeInterval():
+                n = len([f for f in self.features if f.existsAtInstant(t)])
+                if n > nFeatures:
+                    nFeatures = n
+                    tMaxFeatures = t
+            return MovingObject.maxDistance(self, self, tMaxFeatures)
+        else:
+            print('Load features to compute a maximum size')
+            return None
+
+    def setRoutes(self, startRouteID, endRouteID):
+        self.startRouteID = startRouteID
+        self.endRouteID = endRouteID
+
+    def getInstantsCrossingLane(self, p1, p2):
+        '''Returns the instant(s)
+        at which the object passes from one side of the segment to the other
+        empty list if there is no crossing'''
+        indices, intersections = self.positions.getIntersections(p1, p2)
+        return [t+self.getFirstInstant() for t in indices]
+
+    def computeTrajectorySimilarities(self, prototypes, lcss):
+        'Computes the similarities to the prototypes using the LCSS'
+        if not hasattr(self, 'prototypeSimilarities'):
+            self.prototypeSimilarities = []
+            for proto in prototypes:
+                lcss.similarities(proto.getMovingObject().getPositions().asArray().T, self.getPositions().asArray().T)
+                similarities = lcss.similarityTable[-1, :-1].astype(float)
+                self.prototypeSimilarities.append(similarities/minimum(arange(1., len(similarities)+1), proto.getMovingObject().length()*ones(len(similarities))))
+
+    @staticmethod
+    def computePET(obj1, obj2, collisionDistanceThreshold):
+        '''Post-encroachment time based on distance threshold
+
+        Returns the smallest time difference when the object positions are within collisionDistanceThreshold
+        and the instants at which each object is passing through its corresponding position'''
+        positions1 = [p.astuple() for p in obj1.getPositions()]
+        positions2 = [p.astuple() for p in obj2.getPositions()]
+        n1 = len(positions1)
+        n2 = len(positions2)
+        pets = zeros((n1, n2))
+        for i,t1 in enumerate(obj1.getTimeInterval()):
+            for j,t2 in enumerate(obj2.getTimeInterval()):
+                pets[i,j] = abs(t1-t2)
+        distances = cdist(positions1, positions2, metric = 'euclidean')
+        smallDistances = (distances <= collisionDistanceThreshold)
+        if smallDistances.any():
+            smallPets = pets[smallDistances]
+            petIdx = smallPets.argmin()
+            distanceIndices = argwhere(smallDistances)[petIdx]
+            return smallPets[petIdx], obj1.getFirstInstant()+distanceIndices[0], obj2.getFirstInstant()+distanceIndices[1]
+        else:
+            return None, None, None
+
+    def predictPosition(self, instant, nTimeSteps, externalAcceleration = Point(0,0)):
+        '''Predicts the position of object at instant+deltaT,
+        at constant speed'''
+        return predictPositionNoLimit(nTimeSteps, self.getPositionAtInstant(instant), self.getVelocityAtInstant(instant), externalAcceleration)
+
+    def projectCurvilinear(self, alignments, halfWidth = 3):
+        self.curvilinearPositions = CurvilinearTrajectory.fromTrajectoryProjection(self.getPositions(), alignments, halfWidth)
+
+    def computeSmoothTrajectory(self, minCommonIntervalLength):
+        '''Computes the trajectory as the mean of all features
+        if a feature exists, its position is
+
+        Warning work in progress
+        TODO? not use the first/last 1-.. positions'''
+        nFeatures = len(self.features)
+        if nFeatures == 0:
+            print('Empty object features\nCannot compute smooth trajectory')
+        else:
+            # compute the relative position vectors
+            relativePositions = {} # relativePositions[(i,j)] is the position of j relative to i
+            for i in range(nFeatures):
+                for j in range(i):
+                    fi = self.features[i]
+                    fj = self.features[j]
+                    inter = fi.commonTimeInterval(fj)
+                    if inter.length() >= minCommonIntervalLength:
+                        xi = array(fi.getXCoordinates()[inter.first-fi.getFirstInstant():int(fi.length())-(fi.getLastInstant()-inter.last)])
+                        yi = array(fi.getYCoordinates()[inter.first-fi.getFirstInstant():int(fi.length())-(fi.getLastInstant()-inter.last)])
+                        xj = array(fj.getXCoordinates()[inter.first-fj.getFirstInstant():int(fj.length())-(fj.getLastInstant()-inter.last)])
+                        yj = array(fj.getYCoordinates()[inter.first-fj.getFirstInstant():int(fj.length())-(fj.getLastInstant()-inter.last)])
+                        relativePositions[(i,j)] = Point(median(xj-xi), median(yj-yi))
+                        relativePositions[(j,i)] = -relativePositions[(i,j)]
+
+    def computeBoundingPolygon(self, instant):
+        '''Returns a bounding box for the feature positions at instant
+        bounding box format is a list of points (4 in this case for a rectangle)
+
+        TODO add method argument if using different methods/shapes'''
+        if self.hasFeatures():
+            positions = [f.getPositionAtInstant(instant) for f in self.getFeatures() if f.existsAtInstant(instant)]
+            return Point.boundingRectangle(positions, self.getVelocityAtInstant(instant))
+        else:
+            print('Object {} has no features'.format(self.getNum()))
+            return None
+
+    def motDistanceAtInstant(self, obj, instant):
+        '''Returns distance for computing CLEAR MOT metrics
+        (returns an actual value, otherwise munkres does not terminate)'''
+        return Point.distanceNorm2(self.getPositionAtInstant(instant), obj.getPositionAtInstant(instant))
+
+    ###
+    # User Type Classification
+    ###
+    def classifyUserTypeSpeedMotorized(self, threshold, aggregationFunc = median, nInstantsIgnoredAtEnds = 0):
+        '''Classifies slow and fast road users
+        slow: non-motorized -> pedestrians
+        fast: motorized -> cars
+
+        aggregationFunc can be any function that can be applied to a vector of speeds, including percentile:
+        aggregationFunc = lambda x: percentile(x, percentileFactor) # where percentileFactor is 85 for 85th percentile'''
+        speeds = self.getSpeeds(nInstantsIgnoredAtEnds)
+        if aggregationFunc(speeds) >= threshold:
+            self.setUserType(userType2Num['car'])
+        else:
+            self.setUserType(userType2Num['pedestrian'])
+
+    def classifyUserTypeSpeed(self, speedProbabilities, aggregationFunc = median, nInstantsIgnoredAtEnds = 0):
+        '''Classifies road user per road user type
+        speedProbabilities are functions return P(speed|class)
+        in a dictionary indexed by user type names
+        Returns probabilities for each class
+
+        for simple threshold classification, simply pass non-overlapping indicator functions (membership)
+        e.g. def indic(x):
+        if abs(x-mu) < sigma:
+        return 1
+        else:
+        return x'''
+        if not hasattr(self, 'aggregatedSpeed'):
+            self.aggregatedSpeed = aggregationFunc(self.getSpeeds(nInstantsIgnoredAtEnds))
+        userTypeProbabilities = {}
+        for userTypename in speedProbabilities:
+            userTypeProbabilities[userType2Num[userTypename]] = speedProbabilities[userTypename](self.aggregatedSpeed)
+        self.setUserType(utils.argmaxDict(userTypeProbabilities))
+        return userTypeProbabilities
+
+    def initClassifyUserTypeHoGSVM(self, aggregationFunc, pedBikeCarSVM, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), nInstantsIgnoredAtEnds = 0, homography = None, intrinsicCameraMatrix = None, distortionCoefficients = None):
+        '''Initializes the data structures for classification
+
+        TODO? compute speed for longest feature?'''
+        self.aggregatedSpeed = aggregationFunc(self.getSpeeds(nInstantsIgnoredAtEnds))
+        if self.aggregatedSpeed < pedBikeSpeedTreshold or bikeCarSVM is None:
+            self.appearanceClassifier = pedBikeCarSVM
+        elif self.aggregatedSpeed < bikeCarSpeedThreshold:
+            self.appearanceClassifier = bikeCarSVM
+        else:
+            self.appearanceClassifier = carClassifier
+        # project feature positions
+        if self.hasFeatures():
+            for f in self.getFeatures():
+                pp = cvutils.worldToImageProject(f.getPositions().asArray(), intrinsicCameraMatrix, distortionCoefficients, homography).tolist()
+                f.positions = Trajectory(pp)
+        self.userTypes = {}
+
+    def classifyUserTypeHoGSVMAtInstant(self, img, instant, width, height, px, py, minNPixels, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm):
+        '''Extracts the image box around the object
+        (of square size max(width, height) of the box around the features,
+        with an added px or py for width and height (around the box))
+        computes HOG on this cropped image (with parameters rescaleSize, orientations, pixelsPerCell, cellsPerBlock)
+        and applies the SVM model on it'''
+        croppedImg = cvutils.imageBox(img, self, instant, width, height, px, py, minNPixels)
+        if croppedImg is not None and len(croppedImg) > 0:
+            hog = cvutils.HOG(croppedImg, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, blockNorm)
+            self.userTypes[instant] = self.appearanceClassifier.predict(hog.reshape(1,hog.size))
+        else:
+            self.userTypes[instant] = userType2Num['unknown']
+
+    def classifyUserTypeHoGSVM(self, pedBikeCarSVM = None, width = 0, height = 0, homography = None, images = None, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), minSpeedEquiprobable = -1, speedProbabilities = None, aggregationFunc = median, maxPercentUnknown = 0.5, nInstantsIgnoredAtEnds = 0, px = 0.2, py = 0.2, minNPixels = 800, rescaleSize = (64, 64), orientations = 9, pixelsPerCell = (8,8), cellsPerBlock = (2,2)):
+        '''Agregates SVM detections in each image and returns probability
+        (proportion of instants with classification in each category)
+
+        images is a dictionary of images indexed by instant
+        With default parameters, the general (ped-bike-car) classifier will be used
+
+        Considered categories are the keys of speedProbabilities'''
+        if not hasattr(self, 'aggregatedSpeed') or not hasattr(self, 'userTypes'):
+            print('Initializing the data structures for classification by HoG-SVM')
+            self.initClassifyUserTypeHoGSVM(aggregationFunc, pedBikeCarSVM, bikeCarSVM, pedBikeSpeedTreshold, bikeCarSpeedThreshold, nInstantsIgnoredAtEnds)
+
+        if len(self.userTypes) != self.length() and images is not None: # if classification has not been done previously
+            for t in self.getTimeInterval():
+                if t not in self.userTypes:
+                    self.classifyUserTypeHoGSVMAtInstant(images[t], t, homography, width, height, px, py, minNPixels, rescaleSize, orientations, pixelsPerCell, cellsPerBlock)
+        # compute P(Speed|Class)
+        if speedProbabilities is None or self.aggregatedSpeed < minSpeedEquiprobable: # equiprobable information from speed
+            userTypeProbabilities = {userType2Num['car']: 1., userType2Num['pedestrian']: 1., userType2Num['bicycle']: 1.}
+        else:
+            userTypeProbabilities = {userType2Num[userTypename]: speedProbabilities[userTypename](self.aggregatedSpeed) for userTypename in speedProbabilities}
+        # compute P(Class|Appearance)
+        nInstantsUserType = {userTypeNum: 0 for userTypeNum in userTypeProbabilities}# number of instants the object is classified as userTypename
+        nInstantsUserType[userType2Num['unknown']] = 0
+        for t in self.userTypes:
+            nInstantsUserType[self.userTypes[t]] += 1 #nInstantsUserType.get(self.userTypes[t], 0) + 1
+        # result is P(Class|Appearance) x P(Speed|Class)
+        if nInstantsUserType[userType2Num['unknown']] < maxPercentUnknown*self.length(): # if not too many unknowns
+            for userTypeNum in userTypeProbabilities:
+                userTypeProbabilities[userTypeNum] *= nInstantsUserType[userTypeNum]
+        # class is the user type that maximizes usertype probabilities
+        if nInstantsUserType[userType2Num['unknown']] >= maxPercentUnknown*self.length() and (speedProbabilities is None or self.aggregatedSpeed < minSpeedEquiprobable): # if no speed information and too many unknowns
+            self.setUserType(userType2Num['unknown'])
+        else:
+            self.setUserType(utils.argmaxDict(userTypeProbabilities))
+
+    def classifyUserTypeArea(self, areas, homography):
+        '''Classifies the object based on its location (projected to image space)
+        areas is a dictionary of matrix of the size of the image space
+        for different road users possible locations, indexed by road user type names
+
+        TODO: areas could be a wrapper object with a contains method that would work for polygons and images (with wrapper class)
+        skip frames at beginning/end?'''
+        print('not implemented/tested yet')
+        if not hasattr(self, projectedPositions):
+            if homography is not None:
+                self.projectedPositions = obj.positions.homographyProject(homography)
+            else:
+                self.projectedPositions = obj.positions
+        possibleUserTypes = {userType: 0 for userType in range(len(userTypenames))}
+        for p in self.projectedPositions:
+            for userTypename in areas:
+                if areas[userTypename][p.x, p.y] != 0:
+                    possibleUserTypes[userType2Enum[userTypename]] += 1
+        # what to do: threshold for most common type? self.setUserType()
+        return possibleUserTypes
+
+    @staticmethod
+    def collisionCourseDotProduct(movingObject1, movingObject2, instant):
+        'A positive result indicates that the road users are getting closer'
+        deltap = movingObject1.getPositionAtInstant(instant)-movingObject2.getPositionAtInstant(instant)
+        deltav = movingObject2.getVelocityAtInstant(instant)-movingObject1.getVelocityAtInstant(instant)
+        return Point.dot(deltap, deltav)
+
+    @staticmethod
+    def collisionCourseCosine(movingObject1, movingObject2, instant):
+        'A positive result indicates that the road users are getting closer'
+        return Point.cosine(movingObject1.getPositionAtInstant(instant)-movingObject2.getPositionAtInstant(instant), #deltap
+                            movingObject2.getVelocityAtInstant(instant)-movingObject1.getVelocityAtInstant(instant)) #deltav
+
+
+class Prototype(object):
+    'Class for a prototype'
+
+    def __init__(self, filename, num, trajectoryType, nMatchings = None):
+        self.filename = filename
+        self.num = num
+        self.trajectoryType = trajectoryType
+        self.nMatchings = nMatchings
+        self.movingObject = None
+
+    def getFilename(self):
+        return self.filename
+    def getNum(self):
+        return self.num
+    def getTrajectoryType(self):
+        return self.trajectoryType
+    def getNMatchings(self):
+        return self.nMatchings
+    def getMovingObject(self):
+        return self.movingObject
+    def setMovingObject(self, o):
+        self.movingObject = o
+    def __str__(self):
+        return '{} {} {}'.format(self.filename, self.num, self.trajectoryType)
+    def __eq__(self, p2):
+        return self.filename == p2.filename and self.num == p2.num and self.trajectoryType == p2.trajectoryType
+    def __hash__(self):
+        return hash((self.filename, self.num, self.trajectoryType))
+
+##################
+# Annotations
+##################
+
+class BBMovingObject(MovingObject):
+    '''Class for a moving object represented as a bounding box
+    used for series of ground truth annotations using bounding boxes
+     and for the output of Urban Tracker http://www.jpjodoin.com/urbantracker/
+
+    By default in image space
+
+    Its center is the center of the box (generalize to other shapes?)
+    (computed after projecting if homography available)
+    '''
+
+    def __init__(self, topLeftPositions, bottomRightPositions, num = None, timeInterval = None, userType = userType2Num['unknown']):
+        super(BBMovingObject, self).__init__(num, timeInterval, userType = userType)
+        self.topLeftPositions = topLeftPositions.getPositions()
+        self.bottomRightPositions = bottomRightPositions.getPositions()
+
+    def computeCentroidTrajectory(self, homography = None):
+        self.positions = self.topLeftPositions.add(self.bottomRightPositions).__mul__(0.5)
+        if homography is not None:
+            self.positions = self.positions.homographyProject(homography)
+
+def matchObjects(obj1, obj2, instant, matchingDistance):
+    '''Indicates if obj matches obj2 with threshold matchingDistance
+        Returns distance if below matchingDistance, matchingDistance+1 otherwise
+    (returns an actual value, otherwise munkres does not terminate)'''
+    d = Point.distanceNorm2(obj1.getPositionAtInstant(instant), obj2.getPositionAtInstant(instant))
+    if d < matchingDistance:
+        return d
+    else:
+        return matchingDistance + 1
+
+# TODO class to have different matching methods, eg with something like matchObjects
+def computeClearMOT(annotations, objects, matchingDistance, firstInstant, lastInstant, returnMatches = False, debug = False):
+    '''Computes the CLEAR MOT metrics 
+
+    Reference:
+    Keni, Bernardin, and Stiefelhagen Rainer. "Evaluating multiple object tracking performance: the CLEAR MOT metrics." EURASIP Journal on Image and Video Processing 2008 (2008)
+
+    objects and annotations are supposed to in the same space
+    current implementation is BBMovingObject (bounding boxes)
+    mathingDistance is threshold on matching between annotation and object
+
+    TO: tracker output (objects)
+    GT: ground truth (annotations)
+
+    Output: returns motp, mota, mt, mme, fpt, gt
+    mt number of missed GT.frames (sum of the number of GT not detected in each frame)
+    mme number of mismatches
+    fpt number of false alarm.frames (tracker objects without match in each frame)
+    gt number of GT.frames
+
+    if returnMatches is True, return as 2 new arguments the GT and TO matches
+    matches is a dict
+    matches[i] is the list of matches for GT/TO i
+    the list of matches is a dict, indexed by time, for the TO/GT id matched at time t
+    (an instant t not present in matches[i] at which GT/TO exists means a missed detection or false alarm)
+
+    TODO: Should we use the distance as weights or just 1/0 if distance below matchingDistance?
+    (add argument useDistanceForWeights = False)'''
+    from munkres import Munkres
+
+    munk = Munkres()
+    dist = 0. # total distance between GT and TO
+    ct = 0 # number of associations between GT and tracker output in each frame
+    gt = 0 # number of GT.frames
+    mt = 0 # number of missed GT.frames (sum of the number of GT not detected in each frame)
+    fpt = 0 # number of false alarm.frames (tracker objects without match in each frame)
+    mme = 0 # number of mismatches
+    matches = {} # match[i] is the tracker track associated with GT i (using object references)
+    if returnMatches:
+        gtMatches = {a.getNum():{} for a in annotations}
+        toMatches = {o.getNum():{} for o in objects}
+    else:
+        gtMatches = None
+        toMatches = None
+    for t in range(firstInstant, lastInstant+1):
+        previousMatches = matches.copy()
+        # go through currently matched GT-TO and check if they are still matched withing matchingDistance
+        toDelete = []
+        for a in matches:
+            if a.existsAtInstant(t) and matches[a].existsAtInstant(t):
+                d = a.motDistanceAtInstant(matches[a], t)
+                if d < matchingDistance:
+                    dist += d
+                else:
+                    toDelete.append(a)
+            else:
+                toDelete.append(a)
+        for a in toDelete:
+            del matches[a]
+
+        # match all unmatched GT-TO
+        matchedGTs = list(matches.keys())
+        matchedTOs = list(matches.values())
+        costs = []
+        unmatchedGTs = [a for a in annotations if a.existsAtInstant(t) and a not in matchedGTs]
+        unmatchedTOs = [o for o in objects if o.existsAtInstant(t) and o not in matchedTOs]
+        nGTs = len(matchedGTs)+len(unmatchedGTs)
+        nTOs = len(matchedTOs)+len(unmatchedTOs)
+        if len(unmatchedTOs) > 0:
+            for a in unmatchedGTs:
+                costs.append([a.motDistanceAtInstant(o, t) for o in unmatchedTOs])
+        if len(costs) > 0:
+            newMatches = munk.compute(costs)
+            for k,v in newMatches:
+                if costs[k][v] < matchingDistance:
+                    matches[unmatchedGTs[k]]=unmatchedTOs[v]
+                    dist += costs[k][v]
+        if debug:
+            print('{} '.format(t)+', '.join(['{} {}'.format(k.getNum(), v.getNum()) for k,v in matches.items()]))
+        if returnMatches:
+            for a,o in matches.items():
+                gtMatches[a.getNum()][t] = o.getNum()
+                toMatches[o.getNum()][t] = a.getNum()
+
+        # compute metrics elements
+        ct += len(matches)
+        mt += nGTs-len(matches)
+        fpt += nTOs-len(matches)
+        gt += nGTs
+        # compute mismatches
+        # for gt that do not appear in both frames, check if the corresponding to was matched to another gt in previous/next frame
+        mismatches = []
+        for a in matches:
+            if a in previousMatches:
+                if matches[a] != previousMatches[a]:
+                    mismatches.append(a)
+            elif matches[a] in list(previousMatches.values()):
+                mismatches.append(matches[a])
+        for a in previousMatches:
+            if a not in matches and previousMatches[a] in list(matches.values()):
+                mismatches.append(previousMatches[a])
+        if debug:
+            for mm in set(mismatches):
+                print('{} {}'.format(type(mm), mm.getNum()))
+        # some object mismatches may appear twice
+        mme += len(set(mismatches))
+
+    if ct > 0:
+        motp = dist/ct
+    else:
+        motp = None
+    if gt > 0:
+        mota = 1.-float(mt+fpt+mme)/gt
+    else:
+        mota = None
+    return motp, mota, mt, mme, fpt, gt, gtMatches, toMatches
+
+def plotRoadUsers(objects, colors):
+    '''Colors is a PlottingPropertyValues instance'''
+    from matplotlib.pyplot import figure, axis
+    figure()
+    for obj in objects:
+        obj.plot(colors.get(obj.userType))
+    axis('equal')
+
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/moving.txt')
+    #suite = doctest.DocTestSuite()
+    unittest.TextTestRunner().run(suite)
+    #doctest.testmod()
+    #doctest.testfile("example.txt")
+    if shapelyAvailable:
+        suite = doctest.DocFileSuite('tests/moving_shapely.txt')
+        unittest.TextTestRunner().run(suite)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/objectsmoothing.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,246 @@
+from trafficintelligence import storage, moving, utils
+
+from math import atan2, degrees, sin, cos, pi
+from numpy import median
+
+import matplotlib.pyplot as plt
+
+def findNearest(feat, featureSet,t,reverse=True):
+    dist={}
+    for f in featureSet:
+        if reverse:
+            dist[f]= moving.Point.distanceNorm2(feat.getPositionAtInstant(t+1),f.getPositionAtInstant(t))
+        else:
+            dist[f]= moving.Point.distanceNorm2(feat.getPositionAtInstant(t-1),f.getPositionAtInstant(t))
+    return min(dist, key=dist.get) # = utils.argmaxDict(dist)
+    
+def getFeatures(obj, featureID):
+    currentFeature = obj.getFeature(featureID)
+    first = currentFeature.getFirstInstant()
+    last = currentFeature.getLastInstant()
+    featureList=[[currentFeature,first,last,moving.Point(0,0)]]
+    # find the features to fill in the beginning of the object existence
+    while first != obj.getFirstInstant():
+        delta=featureList[-1][3]
+        featureSet = [f for f in obj.getFeatures() if f.existsAtInstant(first-1)]
+        feat = findNearest(currentFeature,featureSet,first-1,reverse=True)
+        if feat.existsAtInstant(first):
+            featureList.append([feat,feat.getFirstInstant(),first-1,(currentFeature.getPositionAtInstant(first)-feat.getPositionAtInstant(first))+delta])
+        else:
+            featureList.append([feat,feat.getFirstInstant(),first-1,(currentFeature.getPositionAtInstant(first)-feat.getPositionAtInstant(first-1))+delta])
+        currentFeature = feat
+        first= feat.getFirstInstant()
+    # find the features to fill in the end of the object existence
+    delta=moving.Point(0,0)
+    currentFeature = obj.getFeature(featureID) # need to reinitialize
+    while last!= obj.getLastInstant():
+        featureSet = [f for f in obj.getFeatures() if f.existsAtInstant(last+1)]
+        feat = findNearest(currentFeature,featureSet,last+1,reverse=False)
+        if feat.existsAtInstant(last):
+            featureList.append([feat,last+1,feat.getLastInstant(),(currentFeature.getPositionAtInstant(last)-feat.getPositionAtInstant(last))+delta])
+        else:
+            featureList.append([feat,last+1,feat.getLastInstant(),(currentFeature.getPositionAtInstant(last)-feat.getPositionAtInstant(last+1))+delta])
+        currentFeature = feat
+        last= feat.getLastInstant()
+        delta=featureList[-1][3]
+    return featureList
+    
+def buildFeature(obj, featureID, num = 1):
+    featureList= getFeatures(obj, featureID)
+    tmp={}
+    delta={}
+    for i in featureList:
+        for t in range(i[1],i[2]+1):
+            tmp[t]=[i[0],i[3]]
+    newTraj = moving.Trajectory()
+    
+    for instant in obj.getTimeInterval():
+        newTraj.addPosition(tmp[instant][0].getPositionAtInstant(instant)+tmp[instant][1])
+    newFeature= moving.MovingObject(num,timeInterval=obj.getTimeInterval(),positions=newTraj)
+    return newFeature
+
+def getBearing(p1,p2,p3):
+    angle = degrees(atan2(p3.y -p1.y, p3.x -p1.x))
+    bearing1 = (90 - angle) % 360
+    angle2 = degrees(atan2(p2.y -p1.y, p2.x -p1.x))
+    bearing2 = (90 - angle2) % 360    
+    dist= moving.Point.distanceNorm2(p1, p2)
+    return [dist,bearing1,bearing2,bearing2-bearing1]
+
+#Quantitative analysis "CSJ" functions    
+def computeVelocities(obj, smoothing=True, halfWidth=3):  #compute velocities from positions
+    velocities={}
+    for i in list(obj.timeInterval)[:-1]:
+        p1= obj.getPositionAtInstant(i)
+        p2= obj.getPositionAtInstant(i+1)
+        velocities[i]=p2-p1        
+    velocities[obj.getLastInstant()]= velocities[obj.getLastInstant()-1]  # duplicate last point
+    if smoothing:
+        velX= [velocities[y].aslist()[0] for y in sorted(velocities.keys())]
+        velY= [velocities[y].aslist()[1] for y in sorted(velocities.keys())]
+        v1= list(utils.filterMovingWindow(velX, halfWidth))
+        v2= list(utils.filterMovingWindow(velY, halfWidth))
+        smoothedVelocity={}
+        for t,i in enumerate(sorted(velocities.keys())):
+            smoothedVelocity[i]=moving.Point(v1[t], v2[t])
+        velocities=smoothedVelocity
+    return velocities
+    
+def computeAcceleration(obj,fromPosition=True):
+    acceleration={}
+    if fromPosition:
+        velocities=computeVelocities(obj,False,1)
+        for i in sorted(velocities.keys()):
+            if i != sorted(velocities.keys())[-1]:
+                acceleration[i]= velocities[i+1]-velocities[i]
+    else:
+        for i in list(obj.timeInterval)[:-1]:
+            v1= obj.getVelocityAtInstant(i)
+            v2= obj.getVelocityAtInstant(i+1)
+            acceleration[i]= v2-v1
+    return acceleration
+    
+def computeJerk(obj,fromPosition=True):
+    jerk={}
+    acceleration=computeAcceleration(obj,fromPosition=fromPosition)
+    for i in sorted(acceleration.keys()):
+        if i != sorted(acceleration.keys())[-1]:
+            jerk[i] = (acceleration[i+1]-acceleration[i]).norm2()
+    return jerk
+    
+def sumSquaredJerk(obj,fromPosition=True):
+    jerk= computeJerk(obj,fromPosition=fromPosition)
+    t=0
+    for i in sorted(jerk.keys()):
+        t+= jerk[i]* jerk[i]
+    return t
+    
+def smoothObjectTrajectory(obj, featureID,newNum,smoothing=False,halfWidth=3,create=False):
+    results=[]    
+    bearing={}
+    if create:
+        feature = buildFeature(obj, featureID , num=1) # why num=1
+    else:
+        feature = obj.getFeature(featureID)
+    for t in feature.getTimeInterval():
+        p1= feature.getPositionAtInstant(t)
+        p2= obj.getPositionAtInstant(t)
+        if t!=feature.getLastInstant():
+            p3= feature.getPositionAtInstant(t+1)
+        else:
+            p1= feature.getPositionAtInstant(t-1)
+            p3= feature.getPositionAtInstant(t)
+        bearing[t]= getBearing(p1,p2,p3)[1]        
+        results.append(getBearing(p1,p2,p3))
+    
+    medianResults=median(results,0)
+    dist= medianResults[0]
+    angle= medianResults[3]
+    
+    for i in sorted(bearing.keys()):
+        bearing[i]= bearing[i]+angle
+
+    if smoothing:
+        bearingInput=[]
+        for i in sorted(bearing.keys()):
+            bearingInput.append(bearing[i])
+        import utils
+        bearingOut=utils.filterMovingWindow(bearingInput, halfWidth)
+        for t,i in enumerate(sorted(bearing.keys())):
+            bearing[i]=bearingOut[t]
+        
+        #solve a smoothing problem in case of big drop in computing bearing (0,360)    
+        for t,i in enumerate(sorted(bearing.keys())):
+            if i!= max(bearing.keys()) and abs(bearingInput[t] - bearingInput[t+1])>=340:
+                for x in range(max(i-halfWidth,min(bearing.keys())),min(i+halfWidth,max(bearing.keys()))+1):
+                    bearing[x]=bearingInput[t-i+x]
+
+    translated = moving.Trajectory()
+    for t in feature.getTimeInterval():
+        p1= feature.getPositionAtInstant(t)
+        p1.x = p1.x + dist*sin(bearing[t]*pi/180)
+        p1.y = p1.y + dist*cos(bearing[t]*pi/180)
+        translated.addPosition(p1)
+        
+    #modify first and last un-smoothed positions (half width)
+    if smoothing:
+        d1= translated[halfWidth]- feature.positions[halfWidth]
+        d2= translated[-halfWidth-1]- feature.positions[-halfWidth-1]
+        for i in range(halfWidth):
+            p1= feature.getPositionAt(i)+d1
+            p2= feature.getPositionAt(-i-1)+d2
+            translated.setPosition(i,p1)
+            translated.setPosition(-i-1,p2)
+        
+    newObj= moving.MovingObject(newNum,timeInterval=feature.getTimeInterval(),positions=translated)
+    return newObj
+    
+def smoothObject(obj, newNum, minLengthParam = 0.7, smoothing = False, plotResults = True, halfWidth = 3, _computeVelocities = True, optimize = True, create = False):
+    '''Computes a smoother trajectory for the object
+    and optionnally smoother velocities
+    
+    The object should have its features in obj.features
+    TODO: check whether features are necessary'''
+    if not obj.hasFeatures():
+        print('Object {} has an empty list of features: please load and add them using obj.setFeatures(features)'.format(obj.getNum()))
+        from sys import exit
+        exit()
+
+    featureList=[i for i,f in enumerate(obj.getFeatures()) if f.length() >= minLengthParam*obj.length()]
+    if featureList==[]:
+        featureList.append(utils.argmaxDict({i:f.length() for i,f in enumerate(obj.getFeatures())}))
+        create = True
+    newObjects = []
+    for featureID in featureList: # featureID should be the index in the list of obj.features
+        newObjects.append(smoothObjectTrajectory(obj, featureID, newNum, smoothing = smoothing, halfWidth = halfWidth, create = create))
+
+    newTranslated = moving.Trajectory()
+    newInterval = []
+    for t in obj.getTimeInterval():
+        xCoord=[]
+        yCoord=[]
+        for i in newObjects:
+            if i.existsAtInstant(t):
+                p1= i.getPositionAtInstant(t)
+                xCoord.append(p1.x)
+                yCoord.append(p1.y)
+        if xCoord != []:
+            tmp= moving.Point(median(xCoord), median(yCoord))
+            newInterval.append(t)
+            newTranslated.addPosition(tmp)
+    
+    newObj= moving.MovingObject(newNum, timeInterval = moving.TimeInterval(min(newInterval),max(newInterval)),positions=newTranslated)
+        
+    if _computeVelocities:
+        tmpTraj = moving.Trajectory()
+        velocities= computeVelocities(newObj,True,5)
+        for i in sorted(velocities.keys()):
+            tmpTraj.addPosition(velocities[i])
+        newObj.velocities=tmpTraj
+    else:
+        newObj.velocities=obj.velocities
+    
+    if optimize:
+        csj1= sumSquaredJerk(obj,fromPosition=True)
+        csj2= sumSquaredJerk(newObj,fromPosition=True)
+        if csj1<csj2:
+            newObj=obj
+            newObj.velocities=obj.velocities
+        if _computeVelocities and csj1>=csj2:
+            csj3= sumSquaredJerk(obj,fromPosition=False)
+            csj4= sumSquaredJerk(newObj,fromPosition=False)
+            if csj4<=csj3:
+                newObj.velocities= obj.velocities
+
+    newObj.featureNumbers=obj.featureNumbers
+    newObj.features=obj.getFeatures()
+    newObj.userType=obj.userType
+
+    if plotResults:
+        plt.figure()
+        plt.title('objects_id = {}'.format(obj.num))
+        for i in featureList:
+            obj.getFeature(i).plot('cx-')
+        obj.plot('rx-')
+        newObj.plot('gx-')        
+    return newObj
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/pavement.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,311 @@
+#! /usr/bin/env python
+'''Tools for processing and analyzing pavement marking data'''
+
+import datetime
+
+import numpy as np
+
+from trafficintelligence import utils
+
+paintTypes = {0: "Non-existant",
+              1: "Eau",
+              2: "Epoxy",
+              3: "Alkyde",
+              4: "Autre"}
+
+durabilities = {1: 98, #96 to 100
+                2: 85, #75 to 96
+                3: 62, #50 to 75
+                4: 32, #15 to 50
+                5: 7 #0 to 15
+                }
+
+roadFunctionalClasses = {40: "Collectrice",
+                         20: "Nationale",
+                         30: "Regionale",
+                         10: "Autoroute",
+                         60: "Acces ressources",
+                         51: "Local 1",
+                         52: "Local 2",
+                         53: "Local 3",
+                         15: "Aut (PRN)",
+                         25: "Nat (PRN)",
+                         70: "Acces isolees",
+                         99: "Autres"}
+
+def caracteristiques(rtss, maintenanceLevel, rtssWeatherStation, fmr, paintType):
+    '''Computes characteristic data for the RTSS (class rtss) 
+    maintenanceLevel = pylab.csv2rec('C:\\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\exigence_circuits.txt', delimiter = ';')
+    rtssWeatherStation = pylab.csv2rec('C:\\Users\Alexandre\Desktop\Projet_maitrise_recherche\stations_environnement_canada\\rtssWeatherStation\juste_pour_rtss_avec_donnees_entretien_hiv\\rtssWeatherStation_EC3.txt', delimiter = ',')
+    fmr = pylab.csv2rec('C:\\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\fmr.txt', delimiter = ';')
+    paintType = pylab.csv2rec('C:\\Users\Alexandre\Desktop\Projet_maitrise_recherche\BDD_access\\analyse_donnees_deneigement\\type_peinture.txt', delimiter = ';')
+    '''
+    # determination exigence deneigement
+    if rtss.id in maintenanceLevel['rtss_debut']:
+        for i in range(len(maintenanceLevel)):
+            if maintenanceLevel['rtss_debut'][i] == rtss.id:
+                exigence = maintenanceLevel['exigence'][i]
+    else:
+        exigence = ''
+
+    # determination x/y
+    if rtss.id in rtssWeatherStation['rtss']:
+        for i in range(len(rtssWeatherStation)):		
+            if rtssWeatherStation['rtss'][i] == rtss.id:
+                x_moy = rtssWeatherStation['x_moy'][i]
+                y_moy = rtssWeatherStation['y_moy'][i]
+    else:
+        x_moy, y_moy = '',''	
+
+    # determination info fmr
+    age_revtm, classe_fonct, type_revtm, milieu, djma, pourc_camions, vit_max = [], [], [], [], [], [], []
+    if rtss.id in fmr['rtss_debut']:
+        for i in range(len(fmr)):
+            if fmr['rtss_debut'][i] == rtss.id:
+                age_revtm.append(fmr['age_revtm'][i])
+                classe_fonct.append(fmr['des_clasf_fonct'][i])
+                type_revtm.append(fmr['des_type_revtm'][i])
+                milieu.append(fmr['des_cod_mil'][i])
+                djma.append(fmr['val_djma'][i])
+                pourc_camions.append(fmr['val_pourc_camns'][i])
+                vit_max.append(fmr['val_limt_vitss'][i])
+        age_revtm = utils.mostCommon(age_revtm)
+        classe_fonct = utils.mostCommon(classe_fonct)
+        type_revtm = utils.mostCommon(type_revtm)
+        milieu = utils.mostCommon(milieu)
+        djma = utils.mostCommon(djma)
+        vit_max = utils.mostCommon(vit_max)
+        if vit_max < 0:
+            vit_max = ''
+        pourc_camions = utils.mostCommon(pourc_camions)
+        if pourc_camions == "" or pourc_camions < 0:
+            djma_camions = ""
+        else:
+            djma_camions = pourc_camions*djma/100
+    else:
+        age_revtm, classe_fonct, type_revtm, milieu, djma, djma_camions, vit_max  = '','','','','','',''
+
+    # determination type peinture
+    peinture_rd, peinture_rg, peinture_cl = [], [], []
+    peinture_lrd, peinture_lrg, peinture_lc = 0,0,0
+    if rtss.id in paintType['rtss_debut_orig']:
+        for i in range(len(paintType)):
+            if paintType['rtss_debut_orig'][i] == rtss.id:
+                peinture_rd.append((paintType['peinture_rd'][i]))
+                peinture_rg.append((paintType['peinture_rg'][i]))
+                peinture_cl.append((paintType['peinture_cl'][i]))
+        peinture_lrd = utils.mostCommon(peinture_rd)
+        peinture_lrg = utils.mostCommon(peinture_rg)
+        peinture_lc = utils.mostCommon(peinture_cl)
+    else:
+        peinture_lrd, peinture_lrg, peinture_lc = '','',''		
+
+    return (exigence, x_moy, y_moy, age_revtm, classe_fonct, type_revtm, milieu, djma, djma_camions, vit_max, peinture_lrd, peinture_lrg, peinture_lc)
+
+def winterMaintenanceIndicators(data, startDate, endDate, circuitReference, snowThreshold):
+    '''Computes several winter maintenance indicators
+    data = entretien_hivernal = pylab.csv2rec('C:\\Users\Alexandre\Documents\Cours\Poly\Projet\mesures_entretien_hivernal\mesures_deneigement.txt', delimiter = ',')'''
+    somme_eau, somme_neige, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, compteur_premiere_neige, compteur_somme_abrasif = 0,0,0,0,0,0,0,0,0
+
+    if circuitReference in data['ref_circuit']:
+        for i in range(len(data)):
+            if data['ref_circuit'][i] == circuitReference and (data['date'][i] + datetime.timedelta(days = 6)) <= endDate and (data['date'][i] + datetime.timedelta(days = 6)) > startDate:
+                compteur_premiere_neige += float(data['premiere_neige'][i])
+                somme_neige += float(data['neige'][i])
+                somme_eau += float(data['eau'][i])
+                somme_abrasif += float(data['abrasif'][i])
+                somme_sel += float(data['sel'][i])
+                somme_lc += float(data['lc'][i])
+                somme_lrg += float(data['lrg'][i])
+                somme_lrd += float(data['lrd'][i])
+                compteur_somme_abrasif += float(data['autre_abrasif_binaire'][i])
+        if compteur_premiere_neige >= 1:
+            premiere_neige = 1
+        else:
+            premiere_neige = 0
+        if compteur_somme_abrasif >= 1:
+            autres_abrasifs = 1
+        else:
+            autres_abrasifs = 0
+        if somme_neige < snowThreshold:
+            neigeMTQ_sup_seuil = 0
+        else:
+            neigeMTQ_sup_seuil = 1
+    else:
+        somme_eau, somme_neige, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, premiere_neige, autres_abrasifs, neigeMTQ_sup_seuil = '','','','','','','','','',''
+
+    return (somme_eau, somme_neige, neigeMTQ_sup_seuil, somme_abrasif, somme_sel, somme_lc, somme_lrg, somme_lrd, premiere_neige, autres_abrasifs)
+
+def weatherIndicators(data, startDate, endDate, snowThreshold, weatherDatatype, minProportionMeasures = 0.):
+    '''Computes the indicators from Environment Canada files
+    (loaded as a recarray using csv2rec in data),
+    between start and end dates (datetime.datetime objects)
+
+    weatherDataType is to indicate Environnement Canada data ('ec') or else MTQ
+    minProportionMeasures is proportion of measures necessary to consider the indicators'''
+    nbre_jours_T_negatif,nbre_jours_gel_degel,pluie_tot,neige_tot,ecart_type_T = 0,0,0,0,0
+    compteur,nbre_jours_gel_consecutifs=0,0
+    tmoys = []
+    seuils_T = [20,15,10,5]
+    deltas_T = [0,0,0,0]
+    startIndex = np.nonzero(data['date'] == startDate)
+    nDays = int((endDate - startDate).days)+1
+    if len(startIndex) > 0 and startIndex+nDays <= len(data):
+        startIndex = startIndex[0]
+        for i in range(startIndex, startIndex+nDays):
+            if not np.isnan(data['tmax'][i]):
+                tmax = data['tmax'][i]
+            else:
+                tmax = None
+            if not np.isnan(data['tmin'][i]):
+                tmin = data['tmin'][i]
+            else:
+                tmin = None
+            if weatherDatatype == 'ec':
+                if data['pluie_tot'][i] is not None and not np.isnan(data['pluie_tot'][i]):
+                    pluie_tot  += data['pluie_tot'][i]
+                if data['neige_tot'][i] is not None and not np.isnan(data['neige_tot'][i]):
+                    neige_tot  += data['neige_tot'][i]
+            if tmax is not None:
+                if tmax < 0:
+                    nbre_jours_T_negatif += 1
+            if tmax is not None and tmin is not None:
+                if tmax > 0 and tmin < 0:
+                    nbre_jours_gel_degel += 1
+                for l in range(len(seuils_T)):
+                    if tmax - tmin >=seuils_T[l]:
+                        deltas_T[l] += 1
+            if not np.isnan(data['tmoy'][i]):
+                tmoys.append(data['tmoy'][i])
+            if tmax is not None:
+                if tmax < 0:
+                    compteur += 1
+                elif tmax >= 0 and compteur >= nbre_jours_gel_consecutifs:
+                    nbre_jours_gel_consecutifs = compteur
+                    compteur = 0
+                else:
+                    compteur = 0
+            nbre_jours_gel_consecutifs = max(nbre_jours_gel_consecutifs,compteur)
+    if len(tmoys) > 0 and float(len(tmoys))/nDays >= minProportionMeasures:
+        if tmoys != []:
+            ecart_type_T = np.std(tmoys)
+        else:
+            ecart_type = None
+        if neige_tot < snowThreshold:
+            neigeEC_sup_seuil = 0
+        else:
+            neigeEC_sup_seuil = 1
+        return (nbre_jours_T_negatif,nbre_jours_gel_degel, deltas_T, nbre_jours_gel_consecutifs, pluie_tot, neige_tot, neigeEC_sup_seuil, ecart_type_T)
+    else:
+        return [None]*2+[[None]*len(seuils_T)]+[None]*5
+
+def mtqWeatherIndicators(data, startDate, endDate,tmax,tmin,tmoy):
+    print("Deprecated, use weatherIndicators")
+    nbre_jours_T_negatif,nbre_jours_gel_degel,ecart_type_T = 0,0,0
+    compteur,nbre_jours_gel_consecutifs=0,0
+    tmoys = []
+    seuils_T = [20,15,10,5]
+    deltas_T = [0,0,0,0]
+    startIndex = np.nonzero(data['date'] == startDate)
+    nDays = (endDate - startDate).days+1
+    for i in range(startIndex, startIndex+nDays):
+        if tmax[i] < 0:
+            nbre_jours_T_negatif += 1
+        if tmax[i] > 0 and tmin[i] < 0:
+            nbre_jours_gel_degel += 1
+        for l in range(len(seuils_T)):
+            if tmax[i] - tmin[i] >=seuils_T[l]:
+                deltas_T[l] += 1
+        tmoys.append(tmoy[i])
+        if tmax[i] < 0:
+            compteur += 1
+        elif tmax[i] >= 0 and compteur >= nbre_jours_gel_consecutifs:
+            nbre_jours_gel_consecutifs = compteur
+            compteur = 0
+        else:
+            compteur = 0
+        nbre_jours_gel_consecutifs = max(nbre_jours_gel_consecutifs,compteur)
+        if tmoys != []:
+            ecart_type_T = np.std(tmoys)
+        else:
+            ecart_type = None
+
+    return (nbre_jours_T_negatif,nbre_jours_gel_degel, deltas_T, nbre_jours_gel_consecutifs, ecart_type_T)
+
+class RTSS(object):
+    '''class for data related to a RTSS:
+    - agregating pavement marking measurements
+    - RTSS characteristics from FMR: pavement type, age, AADT, truck AADT
+    - winter maintenance level from V155
+
+    If divided highway, the RTSS ends with G or D and are distinct: there is no ambiguity
+    - retroreflectivity types: there are CB, RJ and RB
+    If undivided, ending with C
+    - durability is fine: ETAT_MARQG_RG ETAT_MARQG_CL ETAT_MARQG_RD (+SG/SD, but recent)
+    - retroreflectivity: CJ is center line, RB and SB are left/right if DEBUT-FIN>0 or <0
+    '''
+
+    def __init__(self, _id, name, data):
+        self.id = _id
+        self.name = name
+        self.data = data
+
+class MarkingTest(object):
+    '''class for a test site for a given product
+
+    including the series of measurements over the years'''
+
+    def __init__(self, _id, paintingDate, paintingType, color, data):
+        self.id = _id
+        self.paintingDate = paintingDate
+        self.paintingType = paintingType
+        self.color = color
+        self.data = data
+        self.nMeasures = len(data)
+
+    def getSite(self):
+        return int(self.id[:2])
+
+    def getTestAttributes(self):
+        return [self.paintingType, self.color, self.paintingDate.year]
+
+    def plot(self, measure, options = 'o', dayRatio = 1., **kwargs):
+        from matplotlib.pyplot import plot
+        plot(self.data['jours']/float(dayRatio), 
+             self.data[measure], options, **kwargs)
+
+    def getMarkingMeasures(self, dataLabel):
+        nonZeroIndices = ~np.isnan(self.data[dataLabel])
+        return self.data[nonZeroIndices]['jours'], self.data[nonZeroIndices][dataLabel]
+
+    def plotMarkingMeasures(self, measure, options = 'o', dayRatio = 1., **kwargs):
+        for i in range(1,7):
+            self.plot('{}_{}'.format(measure, i), options, dayRatio, **kwargs)
+
+    def computeMarkingMeasureVariations(self, dataLabel, lanePositions, weatherData, snowThreshold, weatherDataType = 'ec', minProportionMeasures = 0.):
+        '''Computes for each successive measurement
+        lanePositions = None
+        measure variation, initial measure, time duration, weather indicators
+        
+        TODO if measurements per lane, add a variable for lane position (position1 to 6)
+        lanePositions = list of integers (range(1,7))
+        measure variation, initial measure, time duration, lane position1, weather indicators
+        measure variation, initial measure, time duration, lane position2, weather indicators
+        ...'''
+        variationData = []
+        if lanePositions is None:
+            nonZeroIndices = ~np.isnan(self.data[dataLabel])
+            days = self.data[nonZeroIndices]['jours']
+            dates = self.data[nonZeroIndices]['date_mesure']
+            measures = self.data[nonZeroIndices][dataLabel]
+            for i in range(1, len(dates)):
+                nDaysTNegative, nDaysThawFreeze, deltaTemp, nConsecutiveFrozenDays, totalRain, totalSnow, snowAboveThreshold, stdevTemp = weatherIndicators(weatherData, dates[i-1], dates[i], snowThreshold, weatherDataType, minProportionMeasures)
+                if dates[i-1].year+1 == dates[i].year:
+                    winter = 1
+                    if days[i-1]<365:
+                        firstWinter = 1
+                else:
+                    winter = 0
+                    firstWinter = 0
+                variationData.append([measures[i-1]-measures[i], measures[i-1], days[i]-days[i-1], days[i-1], winter, firstWinter, nDaysTNegative, nDaysThawFreeze] + deltaTemp + [nConsecutiveFrozenDays, totalRain, totalSnow, snowAboveThreshold, stdevTemp])
+        return variationData
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/poly_utils.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,125 @@
+#! /usr/bin/env python
+'''Various utilities to load data saved by the POLY new output(s)'''
+
+from moving import  TimeInterval
+from indicators import SeverityIndicator
+
+import sys, utils
+import numpy as np
+
+
+def loadNewInteractions(videoFilename,interactionType,dirname, extension, indicatorsNames, roaduserNum1,roaduserNum2, selectedIndicators=[]):
+    '''Loads interactions from the POLY traffic event format'''
+    from events import Interaction 
+    filename= dirname + videoFilename + extension
+    #filename= dirname + interactionType+ '-' + videoFilename + extension # case of min distance todo: change the saving format to be matched with all outputs
+    file = utils.openCheck(filename)
+    if (not file):
+        return []
+    #interactions = []
+    interactionNum = 0
+    data= np.loadtxt(filename)
+    indicatorFrameNums= data[:,0]
+    inter = Interaction(interactionNum, TimeInterval(indicatorFrameNums[0],indicatorFrameNums[-1]), roaduserNum1, roaduserNum2) 
+    inter.addVideoFilename(videoFilename)
+    inter.addInteractionType(interactionType)
+    for key in indicatorsNames:
+        values= {}
+        for i,t in enumerate(indicatorFrameNums):
+            values[t] = data[i,key]
+        inter.addIndicator(SeverityIndicator(indicatorsNames[key], values))
+    if selectedIndicators !=[]:
+        values= {}
+        for i,t in enumerate(indicatorFrameNums):
+            values[t] = [data[i,index] for index in selectedIndicators]
+        inter.addIndicator(SeverityIndicator('selectedIndicators', values))    
+        
+    #interactions.append(inter)
+    file.close()
+    #return interactions
+    return inter
+
+# Plotting results
+
+frameRate = 15.
+
+# To run in directory that contains the directories that contain the results (Miss-xx and Incident-xx)
+#dirname = '/home/nicolas/Research/Data/kentucky-db/'
+
+interactingRoadUsers = {'Miss/0404052336': [(0,3)] # 0,2 and 1 vs 3
+                        #,
+                        #'Incident/0306022035': [(1,3)]
+                        #,
+                        #'Miss/0208030956': [(4,5),(5,7)]
+                        }
+
+
+def getIndicatorName(filename, withUnit = False):
+    if withUnit:
+        unit = ' (s)'
+    else:
+        unit = ''
+    if 'collision-point' in filename:
+        return 'TTC'+unit
+    elif 'crossing' in filename:
+        return 'pPET'+unit
+    elif 'probability' in filename:
+        return 'P(UEA)'
+
+def getMethodName(fileprefix):
+    if fileprefix == 'constant-velocity':
+        return 'Con. Vel.'
+    elif fileprefix == 'normal-adaptation':
+        return 'Norm. Ad.'
+    elif fileprefix == 'point-set':
+        return 'Pos. Set'
+    elif fileprefix == 'evasive-action':
+        return 'Ev. Act.'
+    elif fileprefix == 'point-set-evasive-action':
+        return 'Pos. Set'
+
+indicator2TimeIdx = {'TTC':2,'pPET':2, 'P(UEA)':3}
+
+def getDataAtInstant(data, i):
+    return data[data[:,2] == i]
+
+def getPointsAtInstant(data, i):
+    return getDataAtInstant(i)[3:5]
+
+def getIndicator(data, roadUserNumbers, indicatorName):
+    if data.ndim ==1:
+        data.shape = (1,data.shape[0])
+
+    # find the order for the roadUserNumbers
+    uniqueObj1 = np.unique(data[:,0])
+    uniqueObj2 = np.unique(data[:,1])
+    found = False
+    if roadUserNumbers[0] in uniqueObj1 and roadUserNumbers[1] in uniqueObj2:
+        objNum1 = roadUserNumbers[0]
+        objNum2 = roadUserNumbers[1]
+        found = True
+    if roadUserNumbers[1] in uniqueObj1 and roadUserNumbers[0] in uniqueObj2:
+        objNum1 = roadUserNumbers[1]
+        objNum2 = roadUserNumbers[0]
+        found = True
+
+    # get subset of data for road user numbers
+    if found:
+        roadUserData = data[np.logical_and(data[:,0] == objNum1, data[:,1] == objNum2),:]
+        if roadUserData.size > 0:
+            time = np.unique(roadUserData[:,indicator2TimeIdx[indicatorName]])
+            values = {}
+            if indicatorName == 'P(UEA)':
+                tmp = roadUserData[:,4]
+                for k,v in zip(time, tmp):
+                    values[k]=v
+                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False, maxValue = 1.), roadUserData
+            else:
+                for i in range(time[0],time[-1]+1):
+                    try:
+                        tmp = getDataAtInstant(roadUserData, i)
+                        values[i] = np.sum(tmp[:,5]*tmp[:,6])/np.sum(tmp[:,5])/frameRate
+                    except IOError:
+                        values[i] = np.inf
+                return SeverityIndicator(indicatorName, values, mostSevereIsMax = False), roadUserData
+    return None, None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/prediction.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,601 @@
+#! /usr/bin/env python
+'''Library for motion prediction methods'''
+
+import math, random
+from copy import copy
+
+import numpy as np
+
+from trafficintelligence import moving
+from trafficintelligence.utils import LCSS
+
+class PredictedTrajectory(object):
+    '''Class for predicted trajectories with lazy evaluation
+    if the predicted position has not been already computed, compute it
+
+    it should also have a probability'''
+
+    def __init__(self):
+        self.probability = 0.
+        self.predictedPositions = {}
+        self.predictedSpeedOrientations = {}
+        #self.collisionPoints = {}
+        #self.crossingZones = {}
+
+    def predictPosition(self, nTimeSteps):
+        if nTimeSteps > 0 and not nTimeSteps in self.predictedPositions:
+            self.predictPosition(nTimeSteps-1)
+            self.predictedPositions[nTimeSteps], self.predictedSpeedOrientations[nTimeSteps] = moving.predictPosition(self.predictedPositions[nTimeSteps-1], self.predictedSpeedOrientations[nTimeSteps-1], self.getControl(), self.maxSpeed)
+        return self.predictedPositions[nTimeSteps]
+
+    def getPredictedTrajectory(self):
+        return moving.Trajectory.fromPointList(list(self.predictedPositions.values()))
+
+    def getPredictedSpeeds(self):
+        return [so.norm for so in self.predictedSpeedOrientations.values()]
+
+    def plot(self, options = '', withOrigin = False, timeStep = 1, **kwargs):
+        self.getPredictedTrajectory().plot(options, withOrigin, timeStep, **kwargs)
+
+class PredictedTrajectoryConstant(PredictedTrajectory):
+    '''Predicted trajectory at constant speed or acceleration
+    TODO generalize by passing a series of velocities/accelerations'''
+
+    def __init__(self, initialPosition, initialVelocity, control = moving.NormAngle(0,0), probability = 1., maxSpeed = None):
+        self.control = control
+        self.maxSpeed = maxSpeed
+        self.probability = probability
+        self.predictedPositions = {0: initialPosition}
+        self.predictedSpeedOrientations = {0: moving.NormAngle.fromPoint(initialVelocity)}
+
+    def getControl(self):
+        return self.control
+
+class PredictedTrajectoryPrototype(PredictedTrajectory):
+    '''Predicted trajectory that follows a prototype trajectory
+    The prototype is in the format of a moving.Trajectory: it could be
+    1. an observed trajectory (extracted from video)
+    2. a generic polyline (eg the road centerline) that a vehicle is supposed to follow
+
+    Prediction can be done
+    1. at constant speed (the instantaneous user speed)
+    2. following the trajectory path, at the speed of the user
+    (applying a constant ratio equal 
+    to the ratio of the user instantaneous speed and the trajectory closest speed)'''
+
+    def __init__(self, initialPosition, initialVelocity, prototype, constantSpeed = False, nFramesIgnore = 3, probability = 1.):
+        ''' prototype is a MovingObject
+
+        Prediction at constant speed will not work for unrealistic trajectories 
+        that do not follow a slowly changing velocity (eg moving object trajectories, 
+        but is good for realistic motion (eg features)'''
+        self.prototype = prototype
+        self.constantSpeed = constantSpeed
+        self.nFramesIgnore = nFramesIgnore
+        self.probability = probability
+        self.predictedPositions = {0: initialPosition}
+        self.closestPointIdx = prototype.getPositions().getClosestPoint(initialPosition)
+        self.deltaPosition = initialPosition-prototype.getPositionAt(self.closestPointIdx) #should be computed in relative coordinates to position
+        self.theta = prototype.getVelocityAt(self.closestPointIdx).angle()
+        self.initialSpeed = initialVelocity.norm2()
+        if not constantSpeed:
+            self.ratio = self.initialSpeed/prototype.getVelocityAt(self.closestPointIdx).norm2()
+    
+    def predictPosition(self, nTimeSteps):
+        if nTimeSteps > 0 and not nTimeSteps in self.predictedPositions:
+            deltaPosition = copy(self.deltaPosition)
+            if self.constantSpeed:
+                traj = self.prototype.getPositions()
+                trajLength = traj.length()
+                traveledDistance = nTimeSteps*self.initialSpeed + traj.getCumulativeDistance(self.closestPointIdx)
+                i = self.closestPointIdx
+                while i < trajLength and traj.getCumulativeDistance(i) < traveledDistance:
+                    i += 1
+                if i == trajLength:
+                    v = self.prototype.getVelocityAt(-1-self.nFramesIgnore)
+                    self.predictedPositions[nTimeSteps] = deltaPosition.rotate(v.angle()-self.theta)+traj[i-1]+v*((traveledDistance-traj.getCumulativeDistance(i-1))/v.norm2())
+                else:
+                    v = self.prototype.getVelocityAt(min(i-1, int(self.prototype.length())-1-self.nFramesIgnore))
+                    self.predictedPositions[nTimeSteps] = deltaPosition.rotate(v.angle()-self.theta)+traj[i-1]+(traj[i]-traj[i-1])*((traveledDistance-traj.getCumulativeDistance(i-1))/traj.getDistance(i-1))
+            else:
+                traj = self.prototype.getPositions()
+                trajLength = traj.length()
+                nSteps = self.ratio*nTimeSteps+self.closestPointIdx
+                i = int(np.floor(nSteps))
+                if nSteps < trajLength-1:
+                    v = self.prototype.getVelocityAt(min(i, int(self.prototype.length())-1-self.nFramesIgnore))
+                    self.predictedPositions[nTimeSteps] = deltaPosition.rotate(v.angle()-self.theta)+traj[i]+(traj[i+1]-traj[i])*(nSteps-i)
+                else:
+                    v = self.prototype.getVelocityAt(-1-self.nFramesIgnore)
+                    self.predictedPositions[nTimeSteps] = deltaPosition.rotate(v.angle()-self.theta)+traj[-1]+v*(nSteps-trajLength+1)
+        return self.predictedPositions[nTimeSteps]
+
+class PredictedTrajectoryRandomControl(PredictedTrajectory):
+    '''Random vehicle control: suitable for normal adaptation'''
+    def __init__(self, initialPosition, initialVelocity, accelerationDistribution, steeringDistribution, probability = 1., maxSpeed = None):
+        '''Constructor
+        accelerationDistribution and steeringDistribution are distributions 
+        that return random numbers drawn from them'''
+        self.accelerationDistribution = accelerationDistribution
+        self.steeringDistribution = steeringDistribution
+        self.maxSpeed = maxSpeed
+        self.probability = probability
+        self.predictedPositions = {0: initialPosition}
+        self.predictedSpeedOrientations = {0: moving.NormAngle.fromPoint(initialVelocity)}
+
+    def getControl(self):
+        return moving.NormAngle(self.accelerationDistribution(),self.steeringDistribution())
+
+class SafetyPoint(moving.Point):
+    '''Can represent a collision point or crossing zone 
+    with respective safety indicator, TTC or pPET'''
+    def __init__(self, p, probability = 1., indicator = -1):
+        self.x = p.x
+        self.y = p.y
+        self.probability = probability
+        self.indicator = indicator
+
+    def __str__(self):
+        return '{0} {1} {2} {3}'.format(self.x, self.y, self.probability, self.indicator)
+
+    @staticmethod
+    def save(out, points, predictionInstant, objNum1, objNum2):
+        for p in points:
+            out.write('{0} {1} {2} {3}\n'.format(objNum1, objNum2, predictionInstant, p))
+
+    @staticmethod
+    def computeExpectedIndicator(points):
+        return np.sum([p.indicator*p.probability for p in points])/sum([p.probability for p in points])
+
+def computeCollisionTime(predictedTrajectory1, predictedTrajectory2, collisionDistanceThreshold, timeHorizon):
+    '''Computes the first instant 
+    at which two predicted trajectories are within some distance threshold
+    Computes all the times including timeHorizon
+    
+    User has to check the first variable collision to know about a collision'''
+    t = 1
+    p1 = predictedTrajectory1.predictPosition(t)
+    p2 = predictedTrajectory2.predictPosition(t)
+    collision = (p1-p2).norm2() <= collisionDistanceThreshold
+    while t < timeHorizon and not collision:
+        t += 1
+        p1 = predictedTrajectory1.predictPosition(t)
+        p2 = predictedTrajectory2.predictPosition(t)
+        collision = (p1-p2).norm2() <= collisionDistanceThreshold
+    return collision, t, p1, p2
+
+def savePredictedTrajectoriesFigure(currentInstant, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon, printFigure = True):
+    from matplotlib.pyplot import figure, axis, title, clf, savefig
+    if printFigure:
+        clf()
+    else:
+        figure()
+    for et in predictedTrajectories1:
+        for t in range(int(np.round(timeHorizon))):
+            et.predictPosition(t)
+            et.plot('rx')
+    for et in predictedTrajectories2:
+        for t in range(int(np.round(timeHorizon))):
+            et.predictPosition(t)
+            et.plot('bx')
+    obj1.plot('r', withOrigin = True)
+    obj2.plot('b', withOrigin = True)
+    title('instant {0}'.format(currentInstant))
+    axis('equal')
+    if printFigure:
+        savefig('predicted-trajectories-t-{0}.png'.format(currentInstant))
+
+def calculateProbability(nMatching,similarity,objects):
+    sumFrequencies=sum([nMatching[p] for p in similarity])
+    prototypeProbability={}
+    for i in similarity:
+        prototypeProbability[i]= similarity[i] * float(nMatching[i])/sumFrequencies
+    sumProbabilities= sum([prototypeProbability[p] for p in prototypeProbability])
+    probabilities={}
+    for i in prototypeProbability:
+        probabilities[objects[i]]= float(prototypeProbability[i])/sumProbabilities
+    return probabilities
+
+def findPrototypes(prototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity=0.1,mostMatched=None,spatialThreshold=1.0, delta=180):
+    ''' behaviour prediction first step'''
+    if route[0] not in noiseEntryNums: 
+        prototypesRoutes= [ x for x in sorted(prototypes.keys()) if route[0]==x[0]]
+    elif route[1] not in noiseExitNums:
+        prototypesRoutes=[ x for x in sorted(prototypes.keys()) if route[1]==x[1]]
+    else:
+        prototypesRoutes=[x for x in sorted(prototypes.keys())]
+    lcss = LCSS(similarityFunc=lambda x,y: (distanceForLCSS(x,y) <= spatialThreshold),delta=delta)
+    similarity={}
+    for y in prototypesRoutes: 
+        if y in prototypes:
+            prototypesIDs=prototypes[y]            
+            for x in prototypesIDs:
+                s=lcss.computeNormalized(partialObjPositions, objects[x].positions)
+                if s >= minSimilarity:
+                    similarity[x]=s
+    
+    if mostMatched==None:
+        probabilities= calculateProbability(nMatching,similarity,objects)        
+        return probabilities
+    else:
+        mostMatchedValues=sorted(similarity.values(),reverse=True)[:mostMatched]
+        keys=[k for k in similarity if similarity[k] in mostMatchedValues]
+        newSimilarity={}
+        for i in keys:
+            newSimilarity[i]=similarity[i]
+        probabilities= calculateProbability(nMatching,newSimilarity,objects)        
+        return probabilities        
+        
+def findPrototypesSpeed(prototypes,secondStepPrototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity=0.1,mostMatched=None,useDestination=True,spatialThreshold=1.0, delta=180):
+    if useDestination:
+        prototypesRoutes=[route]
+    else:
+        if route[0] not in noiseEntryNums: 
+            prototypesRoutes= [ x for x in sorted(prototypes.keys()) if route[0]==x[0]]
+        elif route[1] not in noiseExitNums:
+            prototypesRoutes=[ x for x in sorted(prototypes.keys()) if route[1]==x[1]]
+        else:
+            prototypesRoutes=[x for x in sorted(prototypes.keys())]
+    lcss = LCSS(similarityFunc=lambda x,y: (distanceForLCSS(x,y) <= spatialThreshold),delta=delta)
+    similarity={}
+    for y in prototypesRoutes: 
+        if y in prototypes:
+            prototypesIDs=prototypes[y]    
+            for x in prototypesIDs:
+                s=lcss.computeNormalized(partialObjPositions, objects[x].positions)
+                if s >= minSimilarity:
+                    similarity[x]=s
+    
+    newSimilarity={}
+    for i in similarity:
+        if i in secondStepPrototypes:
+            for j in secondStepPrototypes[i]:
+                newSimilarity[j]=similarity[i]
+    probabilities= calculateProbability(nMatching,newSimilarity,objects)        
+    return probabilities
+    
+def getPrototypeTrajectory(obj,route,currentInstant,prototypes,secondStepPrototypes,nMatching,objects,noiseEntryNums,noiseExitNums,minSimilarity=0.1,mostMatched=None,useDestination=True,useSpeedPrototype=True):
+    partialInterval=moving.Interval(obj.getFirstInstant(),currentInstant)
+    partialObjPositions= obj.getObjectInTimeInterval(partialInterval).positions    
+    if useSpeedPrototype:
+        prototypeTrajectories=findPrototypesSpeed(prototypes,secondStepPrototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched,useDestination)
+    else:
+        prototypeTrajectories=findPrototypes(prototypes,nMatching,objects,route,partialObjPositions,noiseEntryNums,noiseExitNums,minSimilarity,mostMatched)
+    return prototypeTrajectories
+
+
+class PredictionParameters(object):
+    def __init__(self, name, maxSpeed, useCurvilinear = False):
+        self.name = name
+        self.maxSpeed = maxSpeed
+        self.useCurvilinear = useCurvilinear
+
+    def __str__(self):
+        return '{0} {1}'.format(self.name, self.maxSpeed)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        return None
+
+    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False):
+        '''returns the lists of collision points and crossing zones'''
+        predictedTrajectories1 = self.generatePredictedTrajectories(obj1, currentInstant)
+        predictedTrajectories2 = self.generatePredictedTrajectories(obj2, currentInstant)
+
+        collisionPoints = []
+        if computeCZ:
+            crossingZones = []
+        else:
+            crossingZones = None
+        for et1 in predictedTrajectories1:
+            for et2 in predictedTrajectories2:
+                collision, t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
+                if collision:
+                    collisionPoints.append(SafetyPoint((p1+p2)*0.5, et1.probability*et2.probability, t))
+                elif computeCZ: # check if there is a crossing zone
+                    # TODO same computation as PET with metric + concatenate past trajectory with future trajectory
+                    cz = None
+                    t1 = 0
+                    while not cz and t1 < timeHorizon: # t1 <= timeHorizon-1
+                        t2 = 0
+                        while not cz and t2 < timeHorizon:
+                            cz = moving.segmentIntersection(et1.predictPosition(t1), et1.predictPosition(t1+1), et2.predictPosition(t2), et2.predictPosition(t2+1))
+                            if cz is not None:
+                                deltaV= (et1.predictPosition(t1)- et1.predictPosition(t1+1) - et2.predictPosition(t2)+ et2.predictPosition(t2+1)).norm2()
+                                crossingZones.append(SafetyPoint(cz, et1.probability*et2.probability, abs(t1-t2)-(float(collisionDistanceThreshold)/deltaV)))
+                            t2 += 1
+                        t1 += 1                        
+
+        if debug:
+            savePredictedTrajectoriesFigure(currentInstant, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon)
+
+        return collisionPoints, crossingZones
+
+    def computeCrossingsCollisions(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, timeInterval = None):#, nProcesses = 1):
+        '''Computes all crossing and collision points at each common instant for two road users. '''
+        collisionPoints = {}
+        if computeCZ:
+            crossingZones = {}
+        else:
+            crossingZones = None
+        if timeInterval is not None:
+            commonTimeInterval = timeInterval
+        else:
+            commonTimeInterval = obj1.commonTimeInterval(obj2)
+        #if nProcesses == 1:
+        for i in list(commonTimeInterval)[:-1]: # do not look at the 1 last position/velocities, often with errors
+            cp, cz = self.computeCrossingsCollisionsAtInstant(i, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ, debug)
+            if len(cp) != 0:
+                collisionPoints[i] = cp
+            if computeCZ and len(cz) != 0:
+                crossingZones[i] = cz
+        return collisionPoints, crossingZones
+
+    def computeCollisionProbability(self, obj1, obj2, collisionDistanceThreshold, timeHorizon, debug = False, timeInterval = None):
+        '''Computes only collision probabilities
+        Returns for each instant the collision probability and number of samples drawn'''
+        collisionProbabilities = {}
+        if timeInterval is not None:
+            commonTimeInterval = timeInterval
+        else:
+            commonTimeInterval = obj1.commonTimeInterval(obj2)
+        for i in list(commonTimeInterval)[:-1]:
+            nCollisions = 0
+            predictedTrajectories1 = self.generatePredictedTrajectories(obj1, i)
+            predictedTrajectories2 = self.generatePredictedTrajectories(obj2, i)
+            for et1 in predictedTrajectories1:
+                for et2 in predictedTrajectories2:
+                    collision, t, p1, p2 = computeCollisionTime(et1, et2, collisionDistanceThreshold, timeHorizon)
+                    if collision:
+                        nCollisions += 1
+            # take into account probabilities ??
+            nSamples = float(len(predictedTrajectories1)*len(predictedTrajectories2))
+            collisionProbabilities[i] = [nSamples, float(nCollisions)/nSamples]
+
+            if debug:
+                savePredictedTrajectoriesFigure(i, obj1, obj2, predictedTrajectories1, predictedTrajectories2, timeHorizon)
+
+        return collisionProbabilities
+
+class ConstantPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed):
+        PredictionParameters.__init__(self, 'constant velocity', maxSpeed)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        return [PredictedTrajectoryConstant(obj.getPositionAtInstant(instant), obj.getVelocityAtInstant(instant), maxSpeed = self.maxSpeed)]
+
+class NormalAdaptationPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed, nPredictedTrajectories, accelerationDistribution, steeringDistribution, useFeatures = False):
+        '''An example of acceleration and steering distributions is
+        lambda: random.triangular(-self.maxAcceleration, self.maxAcceleration, 0.)
+        '''
+        if useFeatures:
+            name = 'point set normal adaptation'
+        else:
+            name = 'normal adaptation'
+        PredictionParameters.__init__(self, name, maxSpeed)
+        self.nPredictedTrajectories = nPredictedTrajectories
+        self.useFeatures = useFeatures
+        self.accelerationDistribution = accelerationDistribution
+        self.steeringDistribution = steeringDistribution
+        
+    def __str__(self):
+        return PredictionParameters.__str__(self)+' {0} {1} {2}'.format(self.nPredictedTrajectories, 
+                                                                        self.maxAcceleration, 
+                                                                        self.maxSteering)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []
+        if self.useFeatures and obj.hasFeatures():
+            features = [f for f in obj.getFeatures() if f.existsAtInstant(instant)]
+            positions = [f.getPositionAtInstant(instant) for f in features]
+            velocities = [f.getVelocityAtInstant(instant) for f in features]
+        else:
+            positions = [obj.getPositionAtInstant(instant)]
+            velocities = [obj.getVelocityAtInstant(instant)]
+        probability = 1./float(len(positions)*self.nPredictedTrajectories)
+        for i in range(self.nPredictedTrajectories):
+            for initialPosition,initialVelocity in zip(positions, velocities):
+                predictedTrajectories.append(PredictedTrajectoryRandomControl(initialPosition, 
+                                                                              initialVelocity, 
+                                                                              self.accelerationDistribution, 
+                                                                              self.steeringDistribution, 
+                                                                              probability, 
+                                                                              maxSpeed = self.maxSpeed))
+        return predictedTrajectories
+
+class PointSetPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed):
+        PredictionParameters.__init__(self, 'point set', maxSpeed)
+    
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []
+        if obj.hasFeatures():
+            features = [f for f in obj.getFeatures() if f.existsAtInstant(instant)]
+            positions = [f.getPositionAtInstant(instant) for f in features]
+            velocities = [f.getVelocityAtInstant(instant) for f in features]
+            probability = 1./float(len(positions))
+            for initialPosition,initialVelocity in zip(positions, velocities):
+                predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, initialVelocity, probability = probability, maxSpeed = self.maxSpeed))
+            return predictedTrajectories
+        else:
+            print('Object {} has no features'.format(obj.getNum()))
+            return None
+
+        
+class EvasiveActionPredictionParameters(PredictionParameters):
+    def __init__(self, maxSpeed, nPredictedTrajectories, accelerationDistribution, steeringDistribution, useFeatures = False):
+        '''Suggested acceleration distribution may not be symmetric, eg
+        lambda: random.triangular(self.minAcceleration, self.maxAcceleration, 0.)'''
+
+        if useFeatures:
+            name = 'point set evasive action'
+        else:
+            name = 'evasive action'
+        PredictionParameters.__init__(self, name, maxSpeed)
+        self.nPredictedTrajectories = nPredictedTrajectories
+        self.useFeatures = useFeatures
+        self.accelerationDistribution = accelerationDistribution
+        self.steeringDistribution = steeringDistribution
+
+    def __str__(self):
+        return PredictionParameters.__str__(self)+' {0} {1} {2} {3}'.format(self.nPredictedTrajectories, self.minAcceleration, self.maxAcceleration, self.maxSteering)
+
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []
+        if self.useFeatures and obj.hasFeatures():
+            features = [f for f in obj.getFeatures() if f.existsAtInstant(instant)]
+            positions = [f.getPositionAtInstant(instant) for f in features]
+            velocities = [f.getVelocityAtInstant(instant) for f in features]
+        else:
+            positions = [obj.getPositionAtInstant(instant)]
+            velocities = [obj.getVelocityAtInstant(instant)]
+        probability = 1./float(self.nPredictedTrajectories)
+        for i in range(self.nPredictedTrajectories):
+            for initialPosition,initialVelocity in zip(positions, velocities):
+                predictedTrajectories.append(PredictedTrajectoryConstant(initialPosition, 
+                                                                         initialVelocity, 
+                                                                         moving.NormAngle(self.accelerationDistribution(), 
+                                                                                          self.steeringDistribution()), 
+                                                                         probability, 
+                                                                         self.maxSpeed))
+        return predictedTrajectories
+
+
+class CVDirectPredictionParameters(PredictionParameters):
+    '''Prediction parameters of prediction at constant velocity
+    using direct computation of the intersecting point
+    Warning: the computed time to collision may be higher than timeHorizon (not used)'''
+    
+    def __init__(self):
+        PredictionParameters.__init__(self, 'constant velocity (direct computation)', None)
+
+    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, *kwargs):
+        collisionPoints = []
+        if computeCZ:
+            crossingZones = []
+        else:
+            crossingZones = None
+
+        p1 = obj1.getPositionAtInstant(currentInstant)
+        p2 = obj2.getPositionAtInstant(currentInstant)
+        if (p1-p2).norm2() <= collisionDistanceThreshold:
+            collisionPoints = [SafetyPoint((p1+p2)*0.5, 1., 0.)]
+        else:
+            v1 = obj1.getVelocityAtInstant(currentInstant)
+            v2 = obj2.getVelocityAtInstant(currentInstant)
+            intersection = moving.intersection(p1, p1+v1, p2, p2+v2)
+
+            if intersection is not None:
+                dp1 = intersection-p1
+                dp2 = intersection-p2
+                dot1 = moving.Point.dot(dp1, v1)
+                dot2 = moving.Point.dot(dp2, v2)
+                if (computeCZ and (dot1 > 0 or dot2 > 0)) or (dot1 > 0 and dot2 > 0): # if the road users are moving towards the intersection or if computing pPET
+                    dist1 = dp1.norm2()
+                    dist2 = dp2.norm2()
+                    s1 = math.copysign(v1.norm2(), dot1)
+                    s2 = math.copysign(v2.norm2(), dot2)
+                    halfCollisionDistanceThreshold = collisionDistanceThreshold/2.
+                    timeInterval1 = moving.TimeInterval(max(0,dist1-halfCollisionDistanceThreshold)/s1, (dist1+halfCollisionDistanceThreshold)/s1)
+                    timeInterval2 = moving.TimeInterval(max(0,dist2-halfCollisionDistanceThreshold)/s2, (dist2+halfCollisionDistanceThreshold)/s2)
+                    collisionTimeInterval = moving.TimeInterval.intersection(timeInterval1, timeInterval2)
+                    
+                    if collisionTimeInterval.empty():
+                        if computeCZ:
+                            crossingZones = [SafetyPoint(intersection, 1., timeInterval1.distance(timeInterval2))]
+                    else:
+                        collisionPoints = [SafetyPoint(intersection, 1., collisionTimeInterval.center())]
+    
+        if debug and intersection is not None:
+            from matplotlib.pyplot import plot, figure, axis, title
+            figure()
+            plot([p1.x, intersection.x], [p1.y, intersection.y], 'r')
+            plot([p2.x, intersection.x], [p2.y, intersection.y], 'b')
+            intersection.plot()            
+            obj1.plot('r')
+            obj2.plot('b')
+            title('instant {0}'.format(currentInstant))
+            axis('equal')
+
+        return collisionPoints, crossingZones
+
+class CVExactPredictionParameters(PredictionParameters):
+    '''Prediction parameters of prediction at constant velocity
+    using direct computation of the intersecting point (solving the equation)
+    Warning: the computed time to collision may be higher than timeHorizon (not used)'''
+    
+    def __init__(self, useCurvilinear = False):
+        PredictionParameters.__init__(self, 'constant velocity (direct exact computation)', None, useCurvilinear)
+
+    def computeCrossingsCollisionsAtInstant(self, currentInstant, obj1, obj2, collisionDistanceThreshold, timeHorizon, computeCZ = False, debug = False, *kwargs):
+        'TODO compute pPET'
+        collisionPoints = []
+        crossingZones = []
+
+        if self.useCurvilinear:
+            pass # Lionel
+        else:
+            p1 = obj1.getPositionAtInstant(currentInstant)
+            p2 = obj2.getPositionAtInstant(currentInstant)
+            v1 = obj1.getVelocityAtInstant(currentInstant)
+            v2 = obj2.getVelocityAtInstant(currentInstant)
+            #intersection = moving.intersection(p1, p1+v1, p2, p2+v2)
+
+            if not moving.Point.parallel(v1, v2):
+                ttc = moving.Point.timeToCollision(p1, p2, v1, v2, collisionDistanceThreshold)
+                if ttc is not None:
+                    collisionPoints = [SafetyPoint((p1+(v1*ttc)+p2+(v2*ttc))*0.5, 1., ttc)]
+                else:
+                    pass # compute pPET
+
+        return collisionPoints, crossingZones
+
+class PrototypePredictionParameters(PredictionParameters):
+    def __init__(self, prototypes, nPredictedTrajectories, pointSimilarityDistance, minSimilarity, lcssMetric = 'cityblock', minFeatureTime = 10, constantSpeed = False, useFeatures = True):
+        PredictionParameters.__init__(self, 'prototypes', None)
+        self.prototypes = prototypes
+        self.nPredictedTrajectories = nPredictedTrajectories
+        self.lcss = LCSS(metric = lcssMetric, epsilon = pointSimilarityDistance)
+        self.minSimilarity = minSimilarity
+        self.minFeatureTime = minFeatureTime
+        self.constantSpeed = constantSpeed
+        self.useFeatures = useFeatures
+
+    def getLcss(self):
+        return self.lcss
+        
+    def addPredictedTrajectories(self, predictedTrajectories, obj, instant):
+        obj.computeTrajectorySimilarities(self.prototypes, self.lcss)
+        for proto, similarities in zip(self.prototypes, obj.prototypeSimilarities):
+            if similarities[instant-obj.getFirstInstant()] >= self.minSimilarity:
+                initialPosition = obj.getPositionAtInstant(instant)
+                initialVelocity = obj.getVelocityAtInstant(instant)
+                predictedTrajectories.append(PredictedTrajectoryPrototype(initialPosition, initialVelocity, proto.getMovingObject(), constantSpeed = self.constantSpeed, probability = proto.getNMatchings()))
+        
+    def generatePredictedTrajectories(self, obj, instant):
+        predictedTrajectories = []
+        if instant-obj.getFirstInstant()+1 >= self.minFeatureTime:
+            if self.useFeatures and obj.hasFeatures():
+                if not hasattr(obj, 'currentPredictionFeatures'):
+                    obj.currentPredictionFeatures = []
+                else:
+                    obj.currentPredictionFeatures[:] = [f for f in obj.currentPredictionFeatures if f.existsAtInstant(instant)]
+                firstInstants = [(f,f.getFirstInstant()) for f in obj.getFeatures() if f.existsAtInstant(instant) and f not in obj.currentPredictionFeatures]
+                firstInstants.sort(key = lambda t: t[1])
+                for f,t1 in firstInstants[:min(self.nPredictedTrajectories, len(firstInstants), self.nPredictedTrajectories-len(obj.currentPredictionFeatures))]:
+                    obj.currentPredictionFeatures.append(f)
+                for f in obj.currentPredictionFeatures:
+                    self.addPredictedTrajectories(predictedTrajectories, f, instant)
+            else:
+                self.addPredictedTrajectories(predictedTrajectories, obj, instant)
+        return predictedTrajectories
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/prediction.txt')
+    #suite = doctest.DocTestSuite()
+    unittest.TextTestRunner().run(suite)
+    #doctest.testmod()
+    #doctest.testfile("example.txt")
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/processing.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,67 @@
+#! /usr/bin/env python
+'''Algorithms to process trajectories and moving objects'''
+
+import numpy as np
+
+from trafficintelligence import ml, storage, utils
+
+def extractSpeeds(objects, zone):
+    speeds = {}
+    objectsNotInZone = []
+    import matplotlib.nxutils as nx        
+    for o in objects:
+        inPolygon = nx.points_inside_poly(o.getPositions().asArray().T, zone.T)
+        if inPolygon.any():
+            objspeeds = [o.getVelocityAt(i).norm2() for i in range(int(o.length()-1)) if inPolygon[i]]
+            speeds[o.num] = np.mean(objspeeds) # km/h
+        else:
+            objectsNotInZone.append(o)
+    return speeds, objectsNotInZone
+
+def extractVideoSequenceSpeeds(dbFilename, siteName, nObjects, startTime, frameRate, minDuration, aggMethods, aggCentiles):
+    data = []
+    d = startTime.date()
+    t1 = startTime.time()
+    print('Extracting speed from '+dbFilename)
+    aggFunctions, tmpheaders = utils.aggregationMethods(aggMethods, aggCentiles)
+    objects = storage.loadTrajectoriesFromSqlite(dbFilename, 'object', nObjects)
+    for o in objects:
+        if o.length() > minDuration:
+            row = [siteName, d, utils.framesToTime(o.getFirstInstant(), frameRate, t1), o.getUserType()]
+            tmp = o.getSpeeds()
+            for method,func in aggFunctions.items():
+                aggSpeeds = frameRate*3.6*func(tmp)
+                if method == 'centile':
+                    row.extend(aggSpeeds.tolist())
+                else:
+                    row.append(aggSpeeds)
+        data.append(row)
+    return data
+
+def learnAssignMotionPatterns(learn, assign, objects, similarities, minSimilarity, similarityFunc, minClusterSize = 0, optimizeCentroid = False, randomInitialization = False, removePrototypesAfterAssignment = False, initialPrototypes = []):
+    '''Learns motion patterns
+
+    During assignments, if using minClusterSize > 0, prototypes can change (be removed)
+    The argument removePrototypesAfterAssignment indicates whether the prototypes are removed or not'''
+    if len(initialPrototypes) > 0:
+        initialPrototypeIndices = list(range(len(initialPrototypes)))
+        trajectories = [p.getMovingObject().getPositions().asArray().T for p in initialPrototypes]
+    else:
+        initialPrototypeIndices = None
+        trajectories = []
+    trajectories.extend([o.getPositions().asArray().T for o in objects])
+
+    if learn:
+        prototypeIndices = ml.prototypeCluster(trajectories, similarities, minSimilarity, similarityFunc, optimizeCentroid, randomInitialization, initialPrototypeIndices)
+    else:
+        prototypeIndices = initialPrototypeIndices
+
+    if assign:
+        assignedPrototypeIndices, labels = ml.assignToPrototypeClusters(trajectories, prototypeIndices, similarities, minSimilarity, similarityFunc, minClusterSize)
+        if minClusterSize > 0 and removePrototypesAfterAssignment: # use prototypeIndices anyway
+            prototypeIndices = assignedPrototypeIndices
+    else:
+        labels = None
+
+    return prototypeIndices, labels
+    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/run-tests.sh	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,6 @@
+#!/bin/sh
+# for file in tests/*... basename
+for f in ./*.py
+do
+    python3 $f
+done
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/sensors.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,65 @@
+#! /usr/bin/env python
+'''Libraries for detecting, counting, etc., road users'''
+
+from numpy import mean, isnan
+
+from trafficintelligence import moving
+
+# TODO graphical user interface for creation
+
+class Sensor:
+    def detect(self, o):
+        print("Detect method not implemented")
+        return False
+    
+    def detectInstants(self, o):
+        print("DetectInstants method not implemented")
+        return []
+
+class BoxSensor(Sensor):
+    def __init__(self, polygon, minNPointsInBox = 1):
+        self.polygon = polygon # check 2xN?
+        self.minNPointsInBox = minNPointsInBox
+    
+    def detectInstants(self, obj):
+        indices = obj.getPositions().getInstantsInPolygon(self.polygon)
+        firstInstant = obj.getFirstInstant()
+        return [i+firstInstant for i in indices]
+
+    def detect(self, obj):
+        instants = self.detectInstants(obj)
+        return len(instants) >= self.minNPointsInBox
+
+def detectAnd(sensors, obj):
+    'Returns True if all sensors detect the object'
+    result = True
+    for s in sensors:
+        result = result and s.detect(obj)
+        if not result:
+            return result
+    return result
+
+def detectOr(sensors, obj):
+    'Returns True if any sensor detects the object'
+    result = False
+    for s in sensors:
+        result = result or s.detect(obj)
+        if result:
+            return result
+    return result
+
+def detectAndOrder(sensors, obj):
+    'Returns True if all sensors are detected and in their order'
+    detectionInstants = []
+    for s in sensors:
+        instants = s.detectInstants(obj)
+        if len(instants) == 0:
+            return False
+        else:
+            detectionInstants.append(mean(instants))
+    result = True
+    for i in range(len(sensors)-1):
+        result = result and (detectionInstants[i] <= detectionInstants[i+1])
+        if not result:
+            return result
+    return result
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/storage.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,1555 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+'''Various utilities to save and load data'''
+
+from pathlib import Path
+import shutil
+from copy import copy
+import sqlite3, logging
+
+from numpy import log, min as npmin, max as npmax, round as npround, array, sum as npsum, loadtxt, floor as npfloor, ceil as npceil, linalg, int32, int64
+from pandas import read_csv, merge
+
+from trafficintelligence import utils, moving, events, indicators
+from trafficintelligence.base import VideoFilenameAddable
+
+
+ngsimUserTypes = {'twowheels':1,
+                  'car':2,
+                  'truck':3}
+
+tableNames = {'feature':'positions',
+              'object': 'objects',
+              'objectfeatures': 'positions'}
+
+sqlite3.register_adapter(int64, lambda val: int(val))
+sqlite3.register_adapter(int32, lambda val: int(val))
+
+#########################
+# Sqlite
+#########################
+
+# utils
+def printDBError(error):
+    print('DB Error: {}'.format(error))
+
+def dropTables(connection, tableNames):
+    'deletes the table with names in tableNames'
+    try:
+        cursor = connection.cursor()
+        for tableName in tableNames:
+            cursor.execute('DROP TABLE IF EXISTS '+tableName)
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+
+def deleteFromSqlite(filename, dataType):
+    'Deletes (drops) some tables in the filename depending on type of data'
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            if dataType == 'object':
+                dropTables(connection, ['objects', 'objects_features'])
+            elif dataType == 'interaction':
+                dropTables(connection, ['interactions', 'indicators'])
+            elif dataType == 'bb':
+                dropTables(connection, ['bounding_boxes'])
+            elif dataType == 'pois':
+                dropTables(connection, ['gaussians2d', 'objects_pois'])
+            elif dataType == 'prototype':
+                dropTables(connection, ['prototypes', 'objects_prototypes', 'features_prototypes'])
+            else:
+                print('Unknown data type {} to delete from database'.format(dataType))
+    else:
+        print('{} does not exist'.format(filename))
+
+def tableExists(connection, tableName):
+    'indicates if the table exists in the database'
+    try:
+        cursor = connection.cursor()
+        cursor.execute('SELECT COUNT(*) FROM SQLITE_MASTER WHERE type = \'table\' AND name = \''+tableName+'\'')
+        return cursor.fetchone()[0] == 1
+    except sqlite3.OperationalError as error:
+        printDBError(error)        
+
+def tableNames(filename):
+    'Lists the names of the tables in the SQLite file'
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            try:
+                cursor = connection.cursor()
+                cursor.execute('SELECT name FROM sqlite_master WHERE type = \'table\'')
+                return [row[0] for row in cursor]
+            except sqlite3.OperationalError as error:
+                printDBError(error)
+    return []
+        
+def createTrajectoryTable(cursor, tableName):
+    if tableName.endswith('positions') or tableName.endswith('velocities'):
+        cursor.execute("CREATE TABLE IF NOT EXISTS "+tableName+" (trajectory_id INTEGER, frame_number INTEGER, x_coordinate REAL, y_coordinate REAL, PRIMARY KEY(trajectory_id, frame_number))")
+    else:
+        print('Unallowed name {} for trajectory table'.format(tableName))
+
+def createObjectsTable(cursor):
+    cursor.execute("CREATE TABLE IF NOT EXISTS objects (object_id INTEGER, road_user_type INTEGER, n_objects INTEGER, PRIMARY KEY(object_id))")
+
+def createAssignmentTable(cursor, objectType1, objectType2, objectIdColumnName1, objectIdColumnName2):
+    cursor.execute("CREATE TABLE IF NOT EXISTS "+objectType1+"s_"+objectType2+"s ("+objectIdColumnName1+" INTEGER, "+objectIdColumnName2+" INTEGER, PRIMARY KEY("+objectIdColumnName1+","+objectIdColumnName2+"))")
+
+def createObjectsFeaturesTable(cursor):
+    cursor.execute("CREATE TABLE IF NOT EXISTS objects_features (object_id INTEGER, trajectory_id INTEGER, PRIMARY KEY(object_id, trajectory_id))")
+
+
+def createCurvilinearTrajectoryTable(cursor):
+    cursor.execute("CREATE TABLE IF NOT EXISTS curvilinear_positions (trajectory_id INTEGER, frame_number INTEGER, s_coordinate REAL, y_coordinate REAL, lane TEXT, PRIMARY KEY(trajectory_id, frame_number))")
+
+def createFeatureCorrespondenceTable(cursor):
+    cursor.execute('CREATE TABLE IF NOT EXISTS feature_correspondences (trajectory_id INTEGER, source_dbname VARCHAR, db_trajectory_id INTEGER, PRIMARY KEY(trajectory_id))')
+
+def createInteractionTable(cursor):
+    cursor.execute('CREATE TABLE IF NOT EXISTS interactions (id INTEGER PRIMARY KEY, object_id1 INTEGER, object_id2 INTEGER, first_frame_number INTEGER, last_frame_number INTEGER, FOREIGN KEY(object_id1) REFERENCES objects(id), FOREIGN KEY(object_id2) REFERENCES objects(id))')
+
+def createIndicatorTable(cursor):
+    cursor.execute('CREATE TABLE IF NOT EXISTS indicators (interaction_id INTEGER, indicator_type INTEGER, frame_number INTEGER, value REAL, FOREIGN KEY(interaction_id) REFERENCES interactions(id), PRIMARY KEY(interaction_id, indicator_type, frame_number))')
+
+def insertTrajectoryQuery(tableName):
+    return "INSERT INTO "+tableName+" VALUES (?,?,?,?)"
+
+def insertObjectQuery():
+    return "INSERT INTO objects VALUES (?,?,?)"
+
+def insertObjectFeatureQuery():
+    return "INSERT INTO objects_features VALUES (?,?)"
+
+def createIndex(connection, tableName, columnName, unique = False):
+    '''Creates an index for the column in the table
+    I will make querying with a condition on this column faster'''
+    try:
+        cursor = connection.cursor()
+        s = "CREATE "
+        if unique:
+            s += "UNIQUE "
+        cursor.execute(s+"INDEX IF NOT EXISTS "+tableName+"_"+columnName+"_index ON "+tableName+"("+columnName+")")
+        connection.commit()
+        #connection.close()
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+
+def getNumberRowsTable(connection, tableName, columnName = None):
+    '''Returns the number of rows for the table
+    If columnName is not None, means we want the number of distinct values for that column
+    (otherwise, we can just count(*))'''
+    try:
+        cursor = connection.cursor()
+        if columnName is None:
+            cursor.execute("SELECT COUNT(*) from "+tableName)
+        else:
+            cursor.execute("SELECT COUNT(DISTINCT "+columnName+") from "+tableName)
+        return cursor.fetchone()[0]
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+
+def getMinMax(connection, tableName, columnName, minmax):
+    '''Returns max/min or both for given column in table
+    minmax must be string max, min or minmax'''
+    try:
+        cursor = connection.cursor()
+        if minmax == 'min' or minmax == 'max':
+            cursor.execute("SELECT "+minmax+"("+columnName+") from "+tableName)
+        elif minmax == 'minmax':
+            cursor.execute("SELECT MIN("+columnName+"), MAX("+columnName+") from "+tableName)
+        else:
+            print("Argument minmax unknown: {}".format(minmax))
+        return cursor.fetchone()[0]
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+
+def getObjectCriteria(objectNumbers):
+    if objectNumbers is None:
+        query = ''
+    elif type(objectNumbers) == int:
+        query = '<= {0}'.format(objectNumbers-1)
+    elif type(objectNumbers) == list:
+        query = 'in ('+', '.join([str(n) for n in objectNumbers])+')'
+    else:
+        print('objectNumbers {} are not a known type ({})'.format(objectNumbers, type(objectNumbers)))
+        query = ''
+    return query
+
+def loadTrajectoriesFromTable(connection, tableName, trajectoryType, objectNumbers = None, timeStep = None):
+    '''Loads trajectories (in the general sense) from the given table
+    can be positions or velocities
+
+    returns a moving object'''
+    cursor = connection.cursor()
+
+    try:
+        objectCriteria = getObjectCriteria(objectNumbers)
+        queryStatement = None
+        if trajectoryType == 'feature':
+            queryStatement = 'SELECT * from '+tableName
+            if objectNumbers is not None and timeStep is not None:
+                queryStatement += ' WHERE trajectory_id '+objectCriteria+' AND frame_number%{} = 0'.format(timeStep)
+            elif objectNumbers is not None:
+                queryStatement += ' WHERE trajectory_id '+objectCriteria
+            elif timeStep is not None:
+                queryStatement += ' WHERE frame_number%{} = 0'.format(timeStep)
+            queryStatement += ' ORDER BY trajectory_id, frame_number'
+        elif trajectoryType == 'object':
+            queryStatement = 'SELECT OF.object_id, P.frame_number, avg(P.x_coordinate), avg(P.y_coordinate) from '+tableName+' P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id'
+            if objectNumbers is not None:
+                queryStatement += ' AND OF.object_id '+objectCriteria
+            if timeStep is not None:
+                queryStatement += ' AND P.frame_number%{} = 0'.format(timeStep)
+            queryStatement += ' GROUP BY OF.object_id, P.frame_number ORDER BY OF.object_id, P.frame_number'
+        elif trajectoryType in ['bbtop', 'bbbottom']:
+            if trajectoryType == 'bbtop':
+                corner = 'top_left'
+            elif trajectoryType == 'bbbottom':
+                corner = 'bottom_right'
+            queryStatement = 'SELECT object_id, frame_number, x_'+corner+', y_'+corner+' FROM '+tableName
+            if objectNumbers is not None and timeStep is not None:
+                queryStatement += ' WHERE object_id '+objectCriteria+' AND frame_number%{} = 0'.format(timeStep)
+            elif objectNumbers is not None:
+                queryStatement += ' WHERE object_id '+objectCriteria
+            elif timeStep is not None:
+                queryStatement += ' WHERE frame_number%{} = 0'.format(timeStep)
+            queryStatement += ' ORDER BY object_id, frame_number'
+        else:
+            print('Unknown trajectory type {}'.format(trajectoryType))
+        if queryStatement is not None:
+            cursor.execute(queryStatement)
+            logging.debug(queryStatement)
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return []
+
+    objId = -1
+    obj = None
+    objects = []
+    for row in cursor:
+        if row[0] != objId:
+            objId = row[0]
+            if obj is not None and (obj.length() == obj.positions.length() or (timeStep is not None and npceil(obj.length()/timeStep) == obj.positions.length())):
+                objects.append(obj)
+            elif obj is not None:
+                print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length()))
+            obj = moving.MovingObject(row[0], timeInterval = moving.TimeInterval(row[1], row[1]), positions = moving.Trajectory([[row[2]],[row[3]]]))
+        else:
+            obj.timeInterval.last = row[1]
+            obj.positions.addPositionXY(row[2],row[3])
+
+    if obj is not None and (obj.length() == obj.positions.length() or (timeStep is not None and npceil(obj.length()/timeStep) == obj.positions.length())):
+        objects.append(obj)
+    elif obj is not None:
+        print('Object {} is missing {} positions'.format(obj.getNum(), int(obj.length())-obj.positions.length()))
+
+    return objects
+
+def loadObjectAttributesFromTable(cursor, objectNumbers, loadNObjects = False):
+    objectCriteria = getObjectCriteria(objectNumbers)
+    queryStatement = 'SELECT object_id, road_user_type'
+    if loadNObjects:
+        queryStatement += ', n_objects'
+    queryStatement += ' FROM objects'
+    if objectNumbers is not None:
+        queryStatement += ' WHERE object_id '+objectCriteria
+    cursor.execute(queryStatement)
+    attributes = {}
+    if loadNObjects:
+        for row in cursor:
+            attributes[row[0]] = row[1:]
+    else:
+        for row in cursor:
+            attributes[row[0]] = row[1]
+    return attributes
+
+def loadTrajectoriesFromSqlite(filename, trajectoryType, objectNumbers = None, withFeatures = False, timeStep = None, nLongestFeaturesPerObject = None):
+    '''Loads the trajectories (in the general sense, 
+    either features, objects (feature groups), longest features per object, or bounding box series)
+    types are only feature or object
+    if object, features can be loaded with withFeatures or nLongestObjectFeatures used to select the n longest features
+
+    The number loaded is either the first objectNumbers objects,
+    or the indices in objectNumbers from the database'''
+    objects = []
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            objects = loadTrajectoriesFromTable(connection, 'positions', trajectoryType, objectNumbers, timeStep)
+            objectVelocities = loadTrajectoriesFromTable(connection, 'velocities', trajectoryType, objectNumbers, timeStep)
+
+            if len(objectVelocities) > 0:
+                for o,v in zip(objects, objectVelocities):
+                    if o.getNum() == v.getNum():
+                        o.velocities = v.positions
+                        o.velocities.duplicateLastPosition() # avoid having velocity shorter by one position than positions
+                    else:
+                        print('Could not match positions {0} with velocities {1}'.format(o.getNum(), v.getNum()))
+
+            if trajectoryType == 'object':
+                cursor = connection.cursor()
+                try:
+                    # attribute feature numbers to objects
+                    queryStatement = 'SELECT trajectory_id, object_id FROM objects_features'
+                    if objectNumbers is not None:
+                        queryStatement += ' WHERE object_id '+getObjectCriteria(objectNumbers)
+                    queryStatement += ' ORDER BY object_id' # order is important to group all features per object
+                    logging.debug(queryStatement)
+                    cursor.execute(queryStatement)
+
+                    featureNumbers = {}
+                    for row in cursor:
+                        objId = row[1]
+                        if objId not in featureNumbers:
+                            featureNumbers[objId] = [row[0]]
+                        else:
+                            featureNumbers[objId].append(row[0])
+
+                    for obj in objects:
+                        obj.featureNumbers = featureNumbers[obj.getNum()]
+
+                    # load userType
+                    attributes = loadObjectAttributesFromTable(cursor, objectNumbers, True)
+                    for obj in objects:
+                        userType, nObjects = attributes[obj.getNum()]
+                        obj.setUserType(userType)
+                        obj.setNObjects(nObjects)
+
+                    # add features
+                    if withFeatures:
+                        for obj in objects:
+                            obj.features = loadTrajectoriesFromSqlite(filename, 'feature', obj.featureNumbers, timeStep = timeStep)
+                    elif nLongestFeaturesPerObject is not None:
+                        for obj in objects:
+                            queryStatement = 'SELECT trajectory_id, max(frame_number)-min(frame_number) AS length FROM positions WHERE trajectory_id '+getObjectCriteria(obj.featureNumbers)+' GROUP BY trajectory_id ORDER BY length DESC'
+                            logging.debug(queryStatement)
+                            cursor.execute(queryStatement)
+                            obj.features = loadTrajectoriesFromSqlite(filename, 'feature', [row[0] for i,row in enumerate(cursor) if i<nLongestFeaturesPerObject], timeStep = timeStep)
+
+                except sqlite3.OperationalError as error:
+                    printDBError(error)
+    return objects
+
+def loadObjectFeatureFrameNumbers(filename, objectNumbers = None):
+    'Loads the feature frame numbers for each object'
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            queryStatement = 'SELECT OF.object_id, TL.trajectory_id, TL.length FROM (SELECT trajectory_id, max(frame_number)-min(frame_number) AS length FROM positions GROUP BY trajectory_id) TL, objects_features OF WHERE TL.trajectory_id = OF.trajectory_id'
+            if objectNumbers is not None:
+                queryStatement += ' AND object_id '+getObjectCriteria(objectNumbers)
+            queryStatement += ' ORDER BY OF.object_id, TL.length DESC'
+            logging.debug(queryStatement)
+            cursor.execute(queryStatement)
+            objectFeatureNumbers = {}
+            for row in cursor:
+                objId = row[0]
+                if objId in objectFeatureNumbers:
+                    objectFeatureNumbers[objId].append(row[1])
+                else:
+                    objectFeatureNumbers[objId] = [row[1]]
+            return objectFeatureNumbers
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+            return None
+
+def addCurvilinearTrajectoriesFromSqlite(filename, objects):
+    '''Adds curvilinear positions (s_coordinate, y_coordinate, lane)
+    from a database to an existing MovingObject dict (indexed by each objects's num)'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+
+        try:
+            cursor.execute('SELECT * from curvilinear_positions order by trajectory_id, frame_number')
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+            return []
+
+        missingObjectNumbers = []
+        objNum = None
+        for row in cursor:
+            if objNum != row[0]:
+                objNum = row[0]
+                if objNum in objects:
+                    objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
+                else:
+                    missingObjectNumbers.append(objNum)
+            if objNum in objects:
+                objects[objNum].curvilinearPositions.addPositionSYL(row[2],row[3],row[4])
+        if len(missingObjectNumbers) > 0:
+            print('List of missing objects to attach corresponding curvilinear trajectories: {}'.format(missingObjectNumbers))
+
+def saveTrajectoriesToTable(connection, objects, trajectoryType):
+    'Saves trajectories in table tableName'
+    cursor = connection.cursor()
+    # Parse feature and/or object structure and commit to DB
+    if(trajectoryType == 'feature' or trajectoryType == 'object'):
+        # Extract features from objects
+        if trajectoryType == 'object':
+            features = []
+            for obj in objects:
+                if obj.hasFeatures():
+                    features += obj.getFeatures()
+            if len(features) == 0:
+                print('Warning, objects have no features') # todo save centroid trajectories?
+        elif trajectoryType == 'feature':
+            features = objects
+        # Setup feature queries
+        createTrajectoryTable(cursor, "positions")
+        createTrajectoryTable(cursor, "velocities")
+        positionQuery = insertTrajectoryQuery("positions")
+        velocityQuery = insertTrajectoryQuery("velocities")
+        # Setup object queries
+        if trajectoryType == 'object':    
+            createObjectsTable(cursor)
+            createObjectsFeaturesTable(cursor)
+            objectQuery = insertObjectQuery()
+            objectFeatureQuery = insertObjectFeatureQuery()
+        for feature in features:
+            num = feature.getNum()
+            frameNum = feature.getFirstInstant()
+            for p in feature.getPositions():
+                cursor.execute(positionQuery, (num, frameNum, p.x, p.y))
+                frameNum += 1
+            velocities = feature.getVelocities()
+            if velocities is not None:
+                frameNum = feature.getFirstInstant()
+                for v in velocities[:-1]:
+                    cursor.execute(velocityQuery, (num, frameNum, v.x, v.y))
+                    frameNum += 1
+        if trajectoryType == 'object':
+            for obj in objects:
+                if obj.hasFeatures():
+                    for feature in obj.getFeatures():
+                        featureNum = feature.getNum()
+                        cursor.execute(objectFeatureQuery, (obj.getNum(), featureNum))
+                cursor.execute(objectQuery, (obj.getNum(), obj.getUserType(), obj.nObjects if hasattr(obj, 'nObjects') and obj.nObjects is not None else 1))   
+    # Parse curvilinear position structure
+    elif(trajectoryType == 'curvilinear'):
+        createCurvilinearTrajectoryTable(cursor)
+        curvilinearQuery = "INSERT INTO curvilinear_positions VALUES (?,?,?,?,?)"
+        for obj in objects:
+            num = obj.getNum()
+            frameNum = obj.getFirstInstant()
+            for p in obj.getCurvilinearPositions():
+                cursor.execute(curvilinearQuery, (num, frameNum, p[0], p[1], p[2]))
+                frameNum += 1
+    else:
+        print('Unknown trajectory type {}'.format(trajectoryType))
+    connection.commit()
+
+def saveTrajectoriesToSqlite(outputFilename, objects, trajectoryType):
+    '''Writes features, ie the trajectory positions (and velocities if exist)
+    with their instants to a specified sqlite file
+    Either feature positions (and velocities if they exist)
+    or curvilinear positions will be saved at a time'''
+
+    with sqlite3.connect(outputFilename) as connection:
+        try:
+            saveTrajectoriesToTable(connection, objects, trajectoryType)
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+
+def setRoadUserTypes(filename, objects):
+    '''Saves the user types of the objects in the sqlite database stored in filename
+    The objects should exist in the objects table'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        for obj in objects:
+            cursor.execute('update objects set road_user_type = {} WHERE object_id = {}'.format(obj.getUserType(), obj.getNum()))
+        connection.commit()
+
+def loadBBMovingObjectsFromSqlite(filename, objectType = 'bb', objectNumbers = None, timeStep = None):
+    '''Loads bounding box moving object from an SQLite
+    (format of SQLite output by the ground truth annotation tool
+    or Urban Tracker
+
+    Load descriptions?'''
+    objects = []
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            if objectType == 'bb':
+                topCorners = loadTrajectoriesFromTable(connection, 'bounding_boxes', 'bbtop', objectNumbers, timeStep)
+                bottomCorners = loadTrajectoriesFromTable(connection, 'bounding_boxes', 'bbbottom', objectNumbers, timeStep)
+                userTypes = loadObjectAttributesFromTable(connection.cursor(), objectNumbers) # string format is same as object
+
+                for t, b in zip(topCorners, bottomCorners):
+                    num = t.getNum()
+                    if t.getNum() == b.getNum():
+                        annotation = moving.BBMovingObject(num, t, b, t.getTimeInterval(), userTypes[num])
+                        objects.append(annotation)
+            else:
+                print ('Unknown type of bounding box {}'.format(objectType))
+    return objects
+
+def saveInteraction(cursor, interaction):
+    roadUserNumbers = list(interaction.getRoadUserNumbers())
+    cursor.execute('INSERT INTO interactions VALUES({}, {}, {}, {}, {})'.format(interaction.getNum(), roadUserNumbers[0], roadUserNumbers[1], interaction.getFirstInstant(), interaction.getLastInstant()))
+
+def saveInteractionsToSqlite(filename, interactions):
+    'Saves the interactions in the table'
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            createInteractionTable(cursor)
+            for inter in interactions:
+                saveInteraction(cursor, inter)
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+        connection.commit()
+
+def saveIndicator(cursor, interactionNum, indicator):
+    for instant in indicator.getTimeInterval():
+        if indicator[instant]:
+            cursor.execute('INSERT INTO indicators VALUES({}, {}, {}, {})'.format(interactionNum, events.Interaction.indicatorNameToIndices[indicator.getName()], instant, indicator[instant]))
+
+def saveIndicatorsToSqlite(filename, interactions, indicatorNames = events.Interaction.indicatorNames):
+    'Saves the indicator values in the table'
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            createInteractionTable(cursor)
+            createIndicatorTable(cursor)
+            for inter in interactions:
+                saveInteraction(cursor, inter)
+                for indicatorName in indicatorNames:
+                    indicator = inter.getIndicator(indicatorName)
+                    if indicator is not None:
+                        saveIndicator(cursor, inter.getNum(), indicator)
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+        connection.commit()
+
+def loadInteractionsFromSqlite(filename):
+    '''Loads interaction and their indicators
+    
+    TODO choose the interactions to load'''
+    interactions = []
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            cursor = connection.cursor()
+            try:
+                cursor.execute('SELECT INT.id, INT.object_id1, INT.object_id2, INT.first_frame_number, INT.last_frame_number, IND.indicator_type, IND.frame_number, IND.value from interactions INT, indicators IND WHERE INT.id = IND.interaction_id ORDER BY INT.id, IND.indicator_type, IND.frame_number')
+                interactionNum = -1
+                indicatorTypeNum = -1
+                tmpIndicators = {}
+                for row in cursor:
+                    if row[0] != interactionNum:
+                        interactionNum = row[0]
+                        interactions.append(events.Interaction(interactionNum, moving.TimeInterval(row[3],row[4]), row[1], row[2]))
+                        interactions[-1].indicators = {}
+                    if indicatorTypeNum != row[5] or row[0] != interactionNum:
+                        indicatorTypeNum = row[5]
+                        indicatorName = events.Interaction.indicatorNames[indicatorTypeNum]
+                        indicatorValues = {row[6]:row[7]}
+                        interactions[-1].indicators[indicatorName] = indicators.SeverityIndicator(indicatorName, indicatorValues, mostSevereIsMax = not indicatorName in events.Interaction.timeIndicators)
+                    else:
+                        indicatorValues[row[6]] = row[7]
+                        interactions[-1].indicators[indicatorName].timeInterval.last = row[6]
+            except sqlite3.OperationalError as error:
+                printDBError(error)
+                return []
+    return interactions
+# load first and last object instants
+# CREATE TEMP TABLE IF NOT EXISTS object_instants AS SELECT OF.object_id, min(frame_number) as first_instant, max(frame_number) as last_instant from positions P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id group by OF.object_id order by OF.object_id
+
+def createBoundingBoxTable(filename, invHomography = None):
+    '''Create the table to store the object bounding boxes in image space
+    '''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            cursor.execute('CREATE TABLE IF NOT EXISTS bounding_boxes (object_id INTEGER, frame_number INTEGER, x_top_left REAL, y_top_left REAL, x_bottom_right REAL, y_bottom_right REAL,  PRIMARY KEY(object_id, frame_number))')
+            cursor.execute('INSERT INTO bounding_boxes SELECT object_id, frame_number, min(x), min(y), max(x), max(y) from '
+                  '(SELECT object_id, frame_number, (x*{}+y*{}+{})/w as x, (x*{}+y*{}+{})/w as y from '
+                  '(SELECT OF.object_id, P.frame_number, P.x_coordinate as x, P.y_coordinate as y, P.x_coordinate*{}+P.y_coordinate*{}+{} as w from positions P, objects_features OF WHERE P.trajectory_id = OF.trajectory_id)) '.format(invHomography[0,0], invHomography[0,1], invHomography[0,2], invHomography[1,0], invHomography[1,1], invHomography[1,2], invHomography[2,0], invHomography[2,1], invHomography[2,2])+
+                  'GROUP BY object_id, frame_number')
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+        connection.commit()
+
+def loadBoundingBoxTableForDisplay(filename):
+    '''Loads bounding boxes from bounding_boxes table for display over trajectories'''
+    boundingBoxes = {} # list of bounding boxes for each instant
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            cursor = connection.cursor()
+            try:
+                cursor.execute('SELECT name FROM sqlite_master WHERE type=\'table\' AND name=\'bounding_boxes\'')
+                result = cursor.fetchall()
+                if len(result) > 0:
+                    cursor.execute('SELECT * FROM bounding_boxes')
+                    for row in cursor:
+                        boundingBoxes.setdefault(row[1], []).append([moving.Point(row[2], row[3]), moving.Point(row[4], row[5])])
+            except sqlite3.OperationalError as error:
+                printDBError(error)
+    return boundingBoxes
+
+#########################
+# saving and loading for scene interpretation: POIs and Prototypes
+#########################
+
+def savePrototypesToSqlite(filename, prototypes):
+    '''save the prototypes (a prototype is defined by a filename, a number (id) and type)'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            cursor.execute('CREATE TABLE IF NOT EXISTS prototypes (prototype_filename VARCHAR, prototype_id INTEGER, trajectory_type VARCHAR CHECK (trajectory_type IN (\"feature\", \"object\")), nmatchings INTEGER, PRIMARY KEY (prototype_filename, prototype_id, trajectory_type))')
+            for p in prototypes:
+                cursor.execute('INSERT INTO prototypes VALUES(?,?,?,?)', (p.getFilename(), p.getNum(), p.getTrajectoryType(), p.getNMatchings()))
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+        connection.commit()
+
+def setPrototypeMatchingsInSqlite(filename, prototypes):
+    '''updates the prototype matchings'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            for p in prototypes:
+                if p.getNMatchings() is None:
+                    nMatchings = 'NULL'
+                else:
+                    nMatchings = p.getNMatchings()
+                cursor.execute('UPDATE prototypes SET nmatchings = {} WHERE prototype_filename = \"{}\" AND prototype_id = {} AND trajectory_type = \"{}\"'.format(nMatchings, p.getFilename(), p.getNum(), p.getTrajectoryType()))
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+        connection.commit()
+
+def prototypeAssignmentNames(objectType):
+    tableName = objectType+'s_prototypes'
+    if objectType == 'feature':
+        #tableName = 'features_prototypes'
+        objectIdColumnName = 'trajectory_id'
+    elif objectType == 'object':
+        #tableName = 'objects_prototypes'
+        objectIdColumnName = 'object_id'
+    return tableName, objectIdColumnName
+        
+def savePrototypeAssignmentsToSqlite(filename, objectNumbers, objectType, labels, prototypes):
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            tableName, objectIdColumnName = prototypeAssignmentNames(objectType)
+            cursor.execute('CREATE TABLE IF NOT EXISTS '+tableName+' ('+objectIdColumnName+' INTEGER, prototype_filename VARCHAR, prototype_id INTEGER, trajectory_type VARCHAR CHECK (trajectory_type IN (\"feature\", \"object\")), PRIMARY KEY('+objectIdColumnName+', prototype_filename, prototype_id, trajectory_type))')
+            for objNum, label in zip(objectNumbers, labels):
+                if label >=0:
+                    proto = prototypes[label]
+                    cursor.execute('INSERT INTO '+tableName+' VALUES(?,?,?,?)', (objNum, proto.getFilename(), proto.getNum(), proto.getTrajectoryType()))
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+        connection.commit()
+
+def loadPrototypeAssignmentsFromSqlite(filename, objectType):
+    prototypeAssignments = {}
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            cursor = connection.cursor()
+            try:
+                tableName, objectIdColumnName = prototypeAssignmentNames(objectType)
+                cursor.execute('SELECT * FROM '+tableName)
+                for row in cursor:
+                    p = moving.Prototype(row[1], row[2], row[3])
+                    if p in prototypeAssignments:
+                        prototypeAssignments[p].append(row[0])
+                    else:
+                        prototypeAssignments[p] = [row[0]]
+                return prototypeAssignments
+            except sqlite3.OperationalError as error:
+                printDBError(error)
+    return prototypeAssignments
+
+def loadPrototypesFromSqlite(filename, withTrajectories = True):
+    'Loads prototype ids and matchings (if stored)'
+    prototypes = []
+    if Path(filename).is_file():
+        parentPath = Path(filename).resolve().parent
+        with sqlite3.connect(filename) as connection:
+            cursor = connection.cursor()
+            objects = []
+            try:
+                cursor.execute('SELECT * FROM prototypes')
+                for row in cursor:
+                    prototypes.append(moving.Prototype(row[0], row[1], row[2], row[3]))
+                if withTrajectories:
+                    for p in prototypes:
+                        p.setMovingObject(loadTrajectoriesFromSqlite(str(parentPath/p.getFilename()), p.getTrajectoryType(), [p.getNum()])[0])
+                    # loadingInformation = {} # complicated slightly optimized
+                    # for p in prototypes:
+                    #     dbfn = p.getFilename()
+                    #     trajType = p.getTrajectoryType()
+                    #     if (dbfn, trajType) in loadingInformation:
+                    #         loadingInformation[(dbfn, trajType)].append(p)
+                    #     else:
+                    #         loadingInformation[(dbfn, trajType)] = [p]
+                    # for k, v in loadingInformation.iteritems():
+                    #     objects += loadTrajectoriesFromSqlite(k[0], k[1], [p.getNum() for p in v])
+            except sqlite3.OperationalError as error:
+                printDBError(error)
+        if len(set([p.getTrajectoryType() for p in prototypes])) > 1:
+            print('Different types of prototypes in database ({}).'.format(set([p.getTrajectoryType() for p in prototypes])))
+    return prototypes
+
+def savePOIsToSqlite(filename, gmm, gmmType, gmmId):
+    '''Saves a Gaussian mixture model (of class sklearn.mixture.GaussianMixture)
+    gmmType is a type of GaussianMixture, learnt either from beginnings or ends of trajectories'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        if gmmType not in ['beginning', 'end']:
+            print('Unknown POI type {}. Exiting'.format(gmmType))
+            import sys
+            sys.exit()
+        try:
+            cursor.execute('CREATE TABLE IF NOT EXISTS gaussians2d (poi_id INTEGER, id INTEGER, type VARCHAR, x_center REAL, y_center REAL, covariance VARCHAR, covariance_type VARCHAR, weight, precisions_cholesky VARCHAR, PRIMARY KEY(poi_id, id))')
+            for i in range(gmm.n_components):
+                cursor.execute('INSERT INTO gaussians2d VALUES(?,?,?,?,?,?,?,?,?)', (gmmId, i, gmmType, gmm.means_[i][0], gmm.means_[i][1], str(gmm.covariances_[i].tolist()), gmm.covariance_type, gmm.weights_[i], str(gmm.precisions_cholesky_[i].tolist())))
+            connection.commit()
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+
+def savePOIAssignmentsToSqlite(filename, objects):
+    'save the od fields of objects'
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        try:
+            cursor.execute('CREATE TABLE IF NOT EXISTS objects_pois (object_id INTEGER, origin_poi_id INTEGER, destination_poi_id INTEGER, PRIMARY KEY(object_id))')
+            for o in objects:
+                cursor.execute('INSERT INTO objects_pois VALUES(?,?,?)', (o.getNum(), o.od[0], o.od[1]))
+            connection.commit()
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+    
+def loadPOIsFromSqlite(filename):
+    'Loads all 2D Gaussians in the database'
+    from sklearn import mixture # todo if not avalaible, load data in duck-typed class with same fields
+    from ast import literal_eval
+    pois = []
+    if Path(filename).is_file():
+        with sqlite3.connect(filename) as connection:
+            cursor = connection.cursor()
+            try:
+                cursor.execute('SELECT * from gaussians2d')
+                gmmId = None
+                gmm = []
+                for row in cursor:
+                    if gmmId is None or row[0] != gmmId:
+                        if len(gmm) > 0:
+                            tmp = mixture.GaussianMixture(len(gmm), covarianceType)
+                            tmp.means_ = array([gaussian['mean'] for gaussian in gmm])
+                            tmp.covariances_ = array([gaussian['covar'] for gaussian in gmm])
+                            tmp.weights_ = array([gaussian['weight'] for gaussian in gmm])
+                            tmp.gmmTypes = [gaussian['type'] for gaussian in gmm]
+                            tmp.precisions_cholesky_ = array([gaussian['precisions'] for gaussian in gmm])
+                            pois.append(tmp)
+                        gaussian = {'type': row[2],
+                                    'mean': row[3:5],
+                                    'covar': array(literal_eval(row[5])),
+                                    'weight': row[7],
+                                    'precisions': array(literal_eval(row[8]))}
+                        gmm = [gaussian]
+                        covarianceType = row[6]
+                        gmmId = row[0]
+                    else:
+                        gmm.append({'type': row[2],
+                                    'mean': row[3:5],
+                                    'covar': array(literal_eval(row[5])),
+                                    'weight': row[7],
+                                    'precisions': array(literal_eval(row[8]))})
+                if len(gmm) > 0:
+                    tmp = mixture.GaussianMixture(len(gmm), covarianceType)
+                    tmp.means_ = array([gaussian['mean'] for gaussian in gmm])
+                    tmp.covariances_ = array([gaussian['covar'] for gaussian in gmm])
+                    tmp.weights_ = array([gaussian['weight'] for gaussian in gmm])
+                    tmp.gmmTypes = [gaussian['type'] for gaussian in gmm]
+                    tmp.precisions_cholesky_ = array([gaussian['precisions'] for gaussian in gmm])
+                    pois.append(tmp)
+            except sqlite3.OperationalError as error:
+                printDBError(error)
+    return pois
+    
+#########################
+# saving and loading for scene interpretation (Mohamed Gomaa Mohamed's PhD)
+#########################
+
+def writePrototypesToSqlite(prototypes,nMatching, outputFilename):
+    ''' prototype dataset is a dictionary with  keys== routes, values== prototypes Ids '''
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute('CREATE TABLE IF NOT EXISTS prototypes (prototype_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, nMatching INTEGER, PRIMARY KEY(prototype_id))')
+    
+    for route in prototypes:
+        if prototypes[route]!=[]:
+            for i in prototypes[route]:
+                cursor.execute('insert into prototypes (prototype_id, routeIDstart,routeIDend, nMatching) values (?,?,?,?)',(i,route[0],route[1],nMatching[route][i]))
+                    
+    connection.commit()
+    connection.close()
+    
+def readPrototypesFromSqlite(filename):
+    '''
+    This function loads the prototype file in the database 
+    It returns a dictionary for prototypes for each route and nMatching
+    '''
+    prototypes = {}
+    nMatching={}
+
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from prototypes order by prototype_id, routeIDstart,routeIDend, nMatching')
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[1],row[2])
+        if route not in prototypes:
+            prototypes[route]=[]
+        prototypes[route].append(row[0])
+        nMatching[row[0]]=row[3]
+
+    connection.close()
+    return prototypes,nMatching
+    
+def writeLabelsToSqlite(labels, outputFilename):
+    """ labels is a dictionary with  keys: routes, values: prototypes Ids
+    """
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS labels (object_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, prototype_id INTEGER, PRIMARY KEY(object_id))")
+    
+    for route in labels:
+        if labels[route]!=[]:
+            for i in labels[route]:
+                for j in labels[route][i]:
+                    cursor.execute("insert into labels (object_id, routeIDstart,routeIDend, prototype_id) values (?,?,?,?)",(j,route[0],route[1],i))
+                    
+    connection.commit()
+    connection.close()
+    
+def loadLabelsFromSqlite(filename):
+    labels = {}
+
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from labels order by object_id, routeIDstart,routeIDend, prototype_id')
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[1],row[2])
+        p=row[3]
+        if route not in labels:
+            labels[route]={}
+        if p not in labels[route]:
+            labels[route][p]=[]
+        labels[route][p].append(row[0])
+
+    connection.close()
+    return labels
+
+def writeSpeedPrototypeToSqlite(prototypes,nmatching, outFilename):
+    """ to match the format of second layer prototypes"""
+    connection = sqlite3.connect(outFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS speedprototypes (spdprototype_id INTEGER,prototype_id INTEGER,routeID_start INTEGER, routeID_end INTEGER, nMatching INTEGER, PRIMARY KEY(spdprototype_id))")
+    
+    for route in prototypes:
+        if prototypes[route]!={}:
+            for i in prototypes[route]:
+                if prototypes[route][i]!= []:
+                    for j in prototypes[route][i]:
+                        cursor.execute("insert into speedprototypes (spdprototype_id,prototype_id, routeID_start, routeID_end, nMatching) values (?,?,?,?,?)",(j,i,route[0],route[1],nmatching[j]))
+                    
+    connection.commit()
+    connection.close()
+    
+def loadSpeedPrototypeFromSqlite(filename):
+    """
+    This function loads the prototypes table in the database of name <filename>.
+    """
+    prototypes = {}
+    nMatching={}
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from speedprototypes order by spdprototype_id,prototype_id, routeID_start, routeID_end, nMatching')
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[2],row[3])
+        if route not in prototypes:
+            prototypes[route]={}
+        if row[1] not in prototypes[route]:
+            prototypes[route][row[1]]=[]
+        prototypes[route][row[1]].append(row[0])
+        nMatching[row[0]]=row[4]
+
+    connection.close()
+    return prototypes,nMatching
+
+
+def writeRoutesToSqlite(Routes, outputFilename):
+    """ This function writes the activity path define by start and end IDs"""
+    connection = sqlite3.connect(outputFilename)
+    cursor = connection.cursor()
+
+    cursor.execute("CREATE TABLE IF NOT EXISTS routes (object_id INTEGER,routeIDstart INTEGER,routeIDend INTEGER, PRIMARY KEY(object_id))")
+    
+    for route in Routes:
+        if Routes[route]!=[]:
+            for i in Routes[route]:
+                cursor.execute("insert into routes (object_id, routeIDstart,routeIDend) values (?,?,?)",(i,route[0],route[1]))
+                    
+    connection.commit()
+    connection.close()
+    
+def loadRoutesFromSqlite(filename):
+    Routes = {}
+
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+
+    try:
+        cursor.execute('SELECT * from routes order by object_id, routeIDstart,routeIDend')
+    except sqlite3.OperationalError as error:
+        printDBError(error)
+        return []
+
+    for row in cursor:
+        route=(row[1],row[2])
+        if route not in Routes:
+            Routes[route]=[]
+        Routes[route].append(row[0])
+
+    connection.close()
+    return Routes
+
+def setRoutes(filename, objects):
+    connection = sqlite3.connect(filename)
+    cursor = connection.cursor()
+    for obj in objects:
+        cursor.execute('update objects set startRouteID = {} WHERE object_id = {}'.format(obj.startRouteID, obj.getNum()))
+        cursor.execute('update objects set endRouteID = {} WHERE object_id = {}'.format(obj.endRouteID, obj.getNum()))        
+    connection.commit()
+    connection.close()
+
+#########################
+# txt files
+#########################
+
+def loadCSVs(filenames, **kwargs):
+    '''Loads all the data from the filenames (eg from glob) and returns a concatenated dataframe'''
+    data = read_csv(filenames[0], **kwargs)
+    for f in filenames[1:]:
+        data = data.append(read_csv(filenames[0], **kwargs))
+    return data
+
+def saveList(filename, l):
+    f = utils.openCheck(filename, 'w')
+    for x in l:
+        f.write('{}\n'.format(x))
+    f.close()
+
+def loadListStrings(filename, commentCharacters = utils.commentChar):
+    f = utils.openCheck(filename, 'r')
+    result = utils.getLines(f, commentCharacters)
+    f.close()
+    return result
+
+def getValuesFromINIFile(filename, option, delimiterChar = '=', commentCharacters = utils.commentChar):
+    values = []
+    for l in loadListStrings(filename, commentCharacters):
+        if l.startswith(option):
+            values.append(l.split(delimiterChar)[1].strip())
+    return values
+
+def addSectionHeader(propertiesFile, headerName = 'main'):
+    '''Add fake section header 
+
+    from http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/2819788#2819788
+    use read_file in Python 3.2+
+    '''
+    yield '[{}]\n'.format(headerName)
+    for line in propertiesFile:
+        yield line
+
+def loadPemsTraffic(filename):
+    '''Loads traffic data downloaded from the http://pems.dot.ca.gov clearinghouse 
+    into pandas dataframe'''
+    data=read_csv(filename, nrows = 3)
+    headerNames = ['time', 'station', 'district', 'freeway', 'direction', 'lanetype', 'length', 'nsamples', 'pctobserved', 'totalflow', 'occupancy', 'speed'] # default for 5 min
+    nLanes = int((len(data.columns)-len(headerNames))/5)
+    for i in range(1, nLanes+1):
+        headerNames += ['nsamples{}'.format(i), 'flow{}'.format(i), 'occupancy{}'.format(i), 'speed{}'.format(i), 'pctobserved{}'.format(i)]
+    return read_csv(filename, names = headerNames)
+
+def generatePDLaneColumn(data):
+    data['LANE'] = data['LANE\\LINK\\NO'].astype(str)+'_'+data['LANE\\INDEX'].astype(str)
+
+def convertTrajectoriesVissimToSqlite(filename):
+    '''Relies on a system call to sqlite3
+    sqlite3 [file.sqlite] < import_fzp.sql'''
+    sqlScriptFilename = "import_fzp.sql"
+    # create sql file
+    out = utils.openCheck(sqlScriptFilename, "w")
+    out.write(".separator \";\"\n"+
+              "CREATE TABLE IF NOT EXISTS curvilinear_positions (t REAL, trajectory_id INTEGER, link_id INTEGER, lane_id INTEGER, s_coordinate REAL, y_coordinate REAL, speed REAL, PRIMARY KEY (t, trajectory_id));\n"+
+              ".import "+filename+" curvilinear_positions\n"+
+              "DELETE FROM curvilinear_positions WHERE trajectory_id IS NULL OR trajectory_id = \"NO\";\n")
+    out.close()
+    # system call
+    from subprocess import run
+    out = utils.openCheck("err.log", "w")
+    run("sqlite3 "+utils.removeExtension(filename)+".sqlite < "+sqlScriptFilename, stderr = out)
+    out.close()
+    shutil.os.remove(sqlScriptFilename)
+
+def loadObjectNumbersInLinkFromVissimFile(filename, linkIds):
+    '''Finds the ids of the objects that go through any of the link in the list linkIds'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        queryStatement = 'SELECT DISTINCT trajectory_id FROM curvilinear_positions where link_id IN ('+','.join([str(id) for id in linkIds])+')'
+        try:
+            cursor.execute(queryStatement)
+            return [row[0] for row in cursor]
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+
+def getNObjectsInLinkFromVissimFile(filename, linkIds):
+    '''Returns the number of objects that traveled through the link ids'''
+    with sqlite3.connect(filename) as connection:
+        cursor = connection.cursor()
+        queryStatement = 'SELECT link_id, COUNT(DISTINCT trajectory_id) FROM curvilinear_positions where link_id IN ('+','.join([str(id) for id in linkIds])+') GROUP BY link_id'
+        try:
+            cursor.execute(queryStatement)
+            return {row[0]:row[1] for row in cursor}
+        except sqlite3.OperationalError as error:
+            printDBError(error)
+
+def loadTrajectoriesFromVissimFile(filename, simulationStepsPerTimeUnit, objectNumbers = None, warmUpLastInstant = None, usePandas = False, nDecimals = 2, lowMemory = True):
+    '''Reads data from VISSIM .fzp trajectory file
+    simulationStepsPerTimeUnit is the number of simulation steps per unit of time used by VISSIM (second)
+    for example, there seems to be 10 simulation steps per simulated second in VISSIM, 
+    so simulationStepsPerTimeUnit should be 10, 
+    so that all times correspond to the number of the simulation step (and can be stored as integers)
+    
+    Objects positions will be considered only after warmUpLastInstant 
+    (if the object has no such position, it won't be loaded)
+
+    Assumed to be sorted over time
+    Warning: if reading from SQLite a limited number of objects, objectNumbers will be the maximum object id'''
+    objects = {} # dictionary of objects index by their id
+
+    if usePandas:
+        data = read_csv(filename, delimiter=';', comment='*', header=0, skiprows = 1, low_memory = lowMemory)
+        generatePDLaneColumn(data)
+        data['TIME'] = data['$VEHICLE:SIMSEC']*simulationStepsPerTimeUnit
+        if warmUpLastInstant is not None:
+            data = data[data['TIME']>=warmUpLastInstant]
+        grouped = data.loc[:,['NO','TIME']].groupby(['NO'], as_index = False)
+        instants = grouped['TIME'].agg({'first': npmin, 'last': npmax})
+        for row_index, row in instants.iterrows():
+            objNum = int(row['NO'])
+            tmp = data[data['NO'] == objNum]
+            objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(row['first'], row['last']), nObjects = 1)
+            # positions should be rounded to nDecimals decimals only
+            objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory(S = npround(tmp['POS'].tolist(), nDecimals), Y = npround(tmp['POSLAT'].tolist(), nDecimals), lanes = tmp['LANE'].tolist())
+            if objectNumbers is not None and objectNumbers > 0 and len(objects) >= objectNumbers:
+                return list(objects.values())
+    else:
+        if filename.endswith(".fzp"):
+            inputfile = utils.openCheck(filename, quitting = True)
+            line = utils.readline(inputfile, '*$')
+            while len(line) > 0:#for line in inputfile:
+                data = line.strip().split(';')
+                objNum = int(data[1])
+                instant = float(data[0])*simulationStepsPerTimeUnit
+                s = float(data[4])
+                y = float(data[5])
+                lane = data[2]+'_'+data[3]
+                if objNum not in objects:
+                    if warmUpLastInstant is None or instant >= warmUpLastInstant:
+                        if objectNumbers is None or len(objects) < objectNumbers:
+                            objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant), nObjects = 1)
+                            objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
+                if (warmUpLastInstant is None or instant >= warmUpLastInstant) and objNum in objects:
+                    objects[objNum].timeInterval.last = instant
+                    objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane)
+                line = utils.readline(inputfile, '*$')
+        elif filename.endswith(".sqlite"):
+            with sqlite3.connect(filename) as connection:
+                cursor = connection.cursor()
+                queryStatement = 'SELECT t, trajectory_id, link_id, lane_id, s_coordinate, y_coordinate FROM curvilinear_positions'
+                if objectNumbers is not None:
+                    queryStatement += ' WHERE trajectory_id '+getObjectCriteria(objectNumbers)
+                queryStatement += ' ORDER BY trajectory_id, t'
+                try:
+                    cursor.execute(queryStatement)
+                    for row in cursor:
+                        objNum = row[1]
+                        instant = row[0]*simulationStepsPerTimeUnit
+                        s = row[4]
+                        y = row[5]
+                        lane = '{}_{}'.format(row[2], row[3])
+                        if objNum not in objects:
+                            if warmUpLastInstant is None or instant >= warmUpLastInstant:
+                                if objectNumbers is None or len(objects) < objectNumbers:
+                                    objects[objNum] = moving.MovingObject(num = objNum, timeInterval = moving.TimeInterval(instant, instant), nObjects = 1)
+                                    objects[objNum].curvilinearPositions = moving.CurvilinearTrajectory()
+                        if (warmUpLastInstant is None or instant >= warmUpLastInstant) and objNum in objects:
+                            objects[objNum].timeInterval.last = instant
+                            objects[objNum].curvilinearPositions.addPositionSYL(s, y, lane)
+                except sqlite3.OperationalError as error:
+                    printDBError(error)
+        else:
+            print("File type of "+filename+" not supported (only .sqlite and .fzp files)")
+        return list(objects.values())
+
+def selectPDLanes(data, lanes = None):
+    '''Selects the subset of data for the right lanes
+
+    Lane format is a string 'x_y' where x is link index and y is lane index'''
+    if lanes is not None:
+        if 'LANE' not in data.columns:
+            generatePDLaneColumn(data)
+        indices = (data['LANE'] == lanes[0])
+        for l in lanes[1:]:
+            indices = indices | (data['LANE'] == l)
+        return data[indices]
+    else:
+        return data
+
+def countStoppedVehiclesVissim(filename, lanes = None, proportionStationaryTime = 0.7):
+    '''Counts the number of vehicles stopped for a long time in a VISSIM trajectory file
+    and the total number of vehicles
+
+    Vehicles are considered finally stationary
+    if more than proportionStationaryTime of their total time
+    If lanes is not None, only the data for the selected lanes will be provided
+    (format as string x_y where x is link index and y is lane index)'''
+    if filename.endswith(".fzp"):
+        columns = ['NO', '$VEHICLE:SIMSEC', 'POS']
+        if lanes is not None:
+            columns += ['LANE\\LINK\\NO', 'LANE\\INDEX']
+        data = read_csv(filename, delimiter=';', comment='*', header=0, skiprows = 1, usecols = columns, low_memory = lowMemory)
+        data = selectPDLanes(data, lanes)
+        data.sort(['$VEHICLE:SIMSEC'], inplace = True)
+
+        nStationary = 0
+        nVehicles = 0
+        for name, group in data.groupby(['NO'], sort = False):
+            nVehicles += 1
+            positions = array(group['POS'])
+            diff = positions[1:]-positions[:-1]
+            if npsum(diff == 0.) >= proportionStationaryTime*(len(positions)-1):
+                nStationary += 1
+    elif filename.endswith(".sqlite"):
+        # select trajectory_id, t, s_coordinate, speed from curvilinear_positions where trajectory_id between 1860 and 1870 and speed < 0.1
+        # pb of the meaning of proportionStationaryTime in arterial network? Why proportion of existence time?
+        pass
+    else:
+        print("File type of "+filename+" not supported (only .sqlite and .fzp files)")
+
+    return nStationary, nVehicles
+
+def countCollisionsVissim(filename, lanes = None, collisionTimeDifference = 0.2, lowMemory = True):
+    '''Counts the number of collisions per lane in a VISSIM trajectory file
+
+    To distinguish between cars passing and collision, 
+    one checks when the sign of the position difference inverts
+    (if the time are closer than collisionTimeDifference)
+    If lanes is not None, only the data for the selected lanes will be provided
+    (format as string x_y where x is link index and y is lane index)'''
+    data = read_csv(filename, delimiter=';', comment='*', header=0, skiprows = 1, usecols = ['LANE\\LINK\\NO', 'LANE\\INDEX', '$VEHICLE:SIMSEC', 'NO', 'POS'], low_memory = lowMemory)
+    data = selectPDLanes(data, lanes)
+    data = data.convert_objects(convert_numeric=True)
+
+    merged = merge(data, data, how='inner', left_on=['LANE\\LINK\\NO', 'LANE\\INDEX', '$VEHICLE:SIMSEC'], right_on=['LANE\\LINK\\NO', 'LANE\\INDEX', '$VEHICLE:SIMSEC'], sort = False)
+    merged = merged[merged['NO_x']>merged['NO_y']]
+
+    nCollisions = 0
+    for name, group in merged.groupby(['LANE\\LINK\\NO', 'LANE\\INDEX', 'NO_x', 'NO_y']):
+        diff = group['POS_x']-group['POS_y']
+        # diff = group['POS_x']-group['POS_y'] # to check the impact of convert_objects and the possibility of using type conversion in read_csv or function to convert strings if any
+        if len(diff) >= 2 and npmin(diff) < 0 and npmax(diff) > 0:
+            xidx = diff[diff < 0].argmax()
+            yidx = diff[diff > 0].argmin()
+            if abs(group.loc[xidx, '$VEHICLE:SIMSEC'] - group.loc[yidx, '$VEHICLE:SIMSEC']) <= collisionTimeDifference:
+                nCollisions += 1
+
+    # select TD1.link_id, TD1.lane_id from temp.diff_positions as TD1, temp.diff_positions as TD2 where TD1.link_id = TD2.link_id and TD1.lane_id = TD2.lane_id and TD1.id1 = TD2.id1 and TD1.id2 = TD2.id2 and TD1.t = TD2.t+0.1 and TD1.diff*TD2.diff < 0; # besoin de faire un group by??
+    # create temp table diff_positions as select CP1.t as t, CP1.link_id as link_id, CP1.lane_id as lane_id, CP1.trajectory_id as id1, CP2.trajectory_id as id2, CP1.s_coordinate - CP2.s_coordinate as diff from curvilinear_positions CP1, curvilinear_positions CP2 where CP1.link_id = CP2.link_id and CP1.lane_id = CP2.lane_id and CP1.t = CP2.t and CP1.trajectory_id > CP2.trajectory_id;
+    # SQL select link_id, lane_id, id1, id2, min(diff), max(diff) from (select CP1.t as t, CP1.link_id as link_id, CP1.lane_id as lane_id, CP1.trajectory_id as id1, CP2.trajectory_id as id2, CP1.s_coordinate - CP2.s_coordinate as diff from curvilinear_positions CP1, curvilinear_positions CP2 where CP1.link_id = CP2.link_id and CP1.lane_id = CP2.lane_id and CP1.t = CP2.t and CP1.trajectory_id > CP2.trajectory_id) group by link_id, lane_id, id1, id2 having min(diff)*max(diff) < 0
+    return nCollisions
+    
+def loadTrajectoriesFromNgsimFile(filename, nObjects = -1, sequenceNum = -1):
+    '''Reads data from the trajectory data provided by NGSIM project 
+    and returns the list of Feature objects'''
+    objects = []
+
+    inputfile = utils.openCheck(filename, quitting = True)
+
+    def createObject(numbers):
+        firstFrameNum = int(numbers[1])
+        # do the geometry and usertype
+
+        firstFrameNum = int(numbers[1])
+        lastFrameNum = firstFrameNum+int(numbers[2])-1
+        #time = moving.TimeInterval(firstFrameNum, firstFrameNum+int(numbers[2])-1)
+        obj = moving.MovingObject(num = int(numbers[0]), 
+                                  timeInterval = moving.TimeInterval(firstFrameNum, lastFrameNum), 
+                                  positions = moving.Trajectory([[float(numbers[6])],[float(numbers[7])]]), 
+                                  userType = int(numbers[10]), nObjects = 1)
+        obj.userType = int(numbers[10])
+        obj.laneNums = [int(numbers[13])]
+        obj.precedingVehicles = [int(numbers[14])] # lead vehicle (before)
+        obj.followingVehicles = [int(numbers[15])] # following vehicle (after)
+        obj.spaceHeadways = [float(numbers[16])] # feet
+        obj.timeHeadways = [float(numbers[17])] # seconds
+        obj.curvilinearPositions = moving.CurvilinearTrajectory([float(numbers[5])],[float(numbers[4])], obj.laneNums) # X is the longitudinal coordinate
+        obj.speeds = [float(numbers[11])]
+        obj.size = [float(numbers[8]), float(numbers[9])] # 8 lengh, 9 width # TODO: temporary, should use a geometry object
+        return obj
+
+    numbers = utils.readline(inputfile).strip().split()
+    if (len(numbers) > 0):
+        obj = createObject(numbers)
+
+    for line in inputfile:
+        numbers = line.strip().split()
+        if obj.getNum() != int(numbers[0]):
+            # check and adapt the length to deal with issues in NGSIM data
+            if (obj.length() != obj.positions.length()):
+                print('length pb with object {} ({},{})'.format(obj.getNum(),obj.length(),obj.positions.length()))
+                obj.last = obj.getFirstInstant()+obj.positions.length()-1
+                #obj.velocities = utils.computeVelocities(f.positions) # compare norm to speeds ?
+            objects.append(obj)
+            if (nObjects>0) and (len(objects)>=nObjects):
+                break
+            obj = createObject(numbers)
+        else:
+            obj.laneNums.append(int(numbers[13]))
+            obj.positions.addPositionXY(float(numbers[6]), float(numbers[7]))
+            obj.curvilinearPositions.addPositionSYL(float(numbers[5]), float(numbers[4]), obj.laneNums[-1])
+            obj.speeds.append(float(numbers[11]))
+            obj.precedingVehicles.append(int(numbers[14]))
+            obj.followingVehicles.append(int(numbers[15]))
+            obj.spaceHeadways.append(float(numbers[16]))
+            obj.timeHeadways.append(float(numbers[17]))
+
+            if (obj.size[0] != float(numbers[8])):
+                print('changed length obj {}'.format(obj.getNum()))
+            if (obj.size[1] != float(numbers[9])):
+                print('changed width obj {}'.format(obj.getNum()))
+    
+    inputfile.close()
+    return objects
+
+def convertNgsimFile(inputfile, outputfile, append = False, nObjects = -1, sequenceNum = 0):
+    '''Reads data from the trajectory data provided by NGSIM project
+    and converts to our current format.'''
+    if append:
+        out = utils.openCheck(outputfile,'a')
+    else:
+        out = utils.openCheck(outputfile,'w')
+    nObjectsPerType = [0,0,0]
+
+    features = loadNgsimFile(inputfile, sequenceNum)
+    for f in features:
+        nObjectsPerType[f.userType-1] += 1
+        f.write(out)
+
+    print(nObjectsPerType)
+        
+    out.close()
+
+def loadPinholeCameraModel(filename, tanalystFormat = True):
+    '''Loads the data from a file containing the camera parameters
+    (pinhole camera model, http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html)
+    and returns a dictionary'''
+    if tanalystFormat:
+        f = utils.openCheck(filename, quitting = True)
+        content = utils.getLines(f)
+        cameraData = {}
+        for l in content:
+            tmp = l.split(':')
+            cameraData[tmp[0]] = float(tmp[1].strip().replace(',','.'))
+        return cameraData
+    else:
+        print('Unknown camera model (not tanalyst format')
+        return None
+
+def savePositionsToCsv(f, obj):
+    timeInterval = obj.getTimeInterval()
+    positions = obj.getPositions()
+    curvilinearPositions = obj.getCurvilinearPositions()
+    for i in range(int(obj.length())):
+        p1 = positions[i]
+        s = '{},{},{},{}'.format(obj.num,timeInterval[i],p1.x,p1.y)
+        if curvilinearPositions is not None:
+            p2 = curvilinearPositions[i]
+            s += ',{},{}'.format(p2[0],p2[1])
+        f.write(s+'\n')
+
+def saveTrajectoriesToCsv(filename, objects):
+    f = utils.openCheck(filename, 'w')
+    for i,obj in enumerate(objects):
+        savePositionsToCsv(f, obj)
+    f.close()
+
+
+#########################
+# Utils to read .ini type text files for configuration, meta data...
+#########################
+
+class ClassifierParameters(VideoFilenameAddable):
+    'Class for the parameters of object classifiers'
+    def loadConfigFile(self, filename):
+        from configparser import ConfigParser
+
+        config = ConfigParser()
+        config.read_file(addSectionHeader(utils.openCheck(filename)))
+
+        parentPath = Path(filename).parent
+        self.sectionHeader = config.sections()[0]
+
+        self.pedBikeCarSVMFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'pbv-svm-filename'))
+        self.bikeCarSVMFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'bv-svm-filename'))
+        self.percentIncreaseCrop = config.getfloat(self.sectionHeader, 'percent-increase-crop')
+        self.minNPixels = config.getint(self.sectionHeader, 'min-npixels-crop')
+        x  = config.getint(self.sectionHeader, 'hog-rescale-size')
+        self.hogRescaleSize = (x, x)
+        self.hogNOrientations = config.getint(self.sectionHeader, 'hog-norientations')
+        x = config.getint(self.sectionHeader, 'hog-npixels-cell')
+        self.hogNPixelsPerCell = (x, x)
+        x = config.getint(self.sectionHeader, 'hog-ncells-block')
+        self.hogNCellsPerBlock = (x, x)
+        self.hogBlockNorm = config.get(self.sectionHeader, 'hog-block-norm')
+        
+        self.speedAggregationMethod = config.get(self.sectionHeader, 'speed-aggregation-method')
+        self.nFramesIgnoreAtEnds = config.getint(self.sectionHeader, 'nframes-ignore-at-ends')
+        self.speedAggregationCentile = config.getint(self.sectionHeader, 'speed-aggregation-centile')
+        self.minSpeedEquiprobable = config.getfloat(self.sectionHeader, 'min-speed-equiprobable')
+        self.maxPercentUnknown = config.getfloat(self.sectionHeader, 'max-prop-unknown-appearance')
+        self.maxPedestrianSpeed = config.getfloat(self.sectionHeader, 'max-ped-speed')
+        self.maxCyclistSpeed = config.getfloat(self.sectionHeader, 'max-cyc-speed')
+        self.meanPedestrianSpeed = config.getfloat(self.sectionHeader, 'mean-ped-speed')
+        self.stdPedestrianSpeed = config.getfloat(self.sectionHeader, 'std-ped-speed')
+        self.locationCyclistSpeed = config.getfloat(self.sectionHeader, 'cyc-speed-loc')
+        self.scaleCyclistSpeed = config.getfloat(self.sectionHeader, 'cyc-speed-scale')
+        self.meanVehicleSpeed = config.getfloat(self.sectionHeader, 'mean-veh-speed')
+        self.stdVehicleSpeed = config.getfloat(self.sectionHeader, 'std-veh-speed')
+
+    def __init__(self, filename = None):
+        self.configFilename = filename
+        if filename is not None and Path(filename).is_file():
+            self.loadConfigFile(filename)
+        else:
+            print('Configuration filename {} could not be loaded.'.format(filename))
+
+    def convertToFrames(self, frameRate, speedRatio = 3.6):
+        '''Converts parameters with a relationship to time in 'native' frame time
+        speedRatio is the conversion from the speed unit in the config file
+        to the distance per second
+
+        ie param(config file) = speedRatio x fps x param(used in program)
+        eg km/h = 3.6 (m/s to km/h) x frame/s x m/frame'''
+        denominator = frameRate*speedRatio
+        #denominator2 = denominator**2
+        self.minSpeedEquiprobable = self.minSpeedEquiprobable/denominator
+        self.maxPedestrianSpeed = self.maxPedestrianSpeed/denominator
+        self.maxCyclistSpeed = self.maxCyclistSpeed/denominator
+        self.meanPedestrianSpeed = self.meanPedestrianSpeed/denominator
+        self.stdPedestrianSpeed = self.stdPedestrianSpeed/denominator
+        self.meanVehicleSpeed = self.meanVehicleSpeed/denominator
+        self.stdVehicleSpeed = self.stdVehicleSpeed/denominator
+        # special case for the lognormal distribution
+        self.locationCyclistSpeed = self.locationCyclistSpeed-log(denominator)
+        #self.scaleCyclistSpeed = self.scaleCyclistSpeed # no modification of scale
+
+        
+class ProcessParameters(VideoFilenameAddable):
+    '''Class for all parameters controlling data processing: input,
+    method parameters, etc. for tracking and safety
+
+    Note: framerate is already taken into account'''
+
+    def loadConfigFile(self, filename):
+        from configparser import ConfigParser
+
+        config = ConfigParser({ 'acceleration-bound' : '3', 
+                                'min-velocity-cosine' : '0.8', 
+                                'ndisplacements' : '3', 
+                                'max-nfeatures' : '1000',
+                                'feature-quality' : '0.0812219538558',
+                                'min-feature-distanceklt' : '3.54964337411',
+                                'block-size' : '7',
+                                'use-harris-detector' : '0',
+                                'k' : '0.04',
+                                'window-size' : '6',
+                                'pyramid-level' : '5',
+                                'min-tracking-error' : '0.183328975142',
+                                'max-number-iterations' : '20',
+                                'feature-flag' : '0',
+                                'min-feature-eig-threshold' : '1e-4',
+                                'min-feature-time' : '15',
+                                'min-feature-displacement' : '0.05',
+                                'tracker-reload-time' : '10'}, strict=False)
+        config.read_file(addSectionHeader(utils.openCheck(filename)))
+        parentPath = Path(filename).parent
+        self.sectionHeader = config.sections()[0]
+        # Tracking/display parameters
+        self.videoFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'video-filename'))
+        self.databaseFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'database-filename'))
+        self.homographyFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'homography-filename'))
+        if Path(self.homographyFilename).is_file():
+            self.homography = loadtxt(self.homographyFilename)
+        else:
+            self.homography = None
+        self.intrinsicCameraFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'intrinsic-camera-filename'))
+        if Path(self.intrinsicCameraFilename).is_file():
+            self.intrinsicCameraMatrix = loadtxt(self.intrinsicCameraFilename)
+        else:
+            self.intrinsicCameraMatrix = None
+        distortionCoefficients = getValuesFromINIFile(filename, 'distortion-coefficients', '=')        
+        self.distortionCoefficients = [float(x) for x in distortionCoefficients]
+        self.undistortedImageMultiplication  = config.getfloat(self.sectionHeader, 'undistorted-size-multiplication')
+        self.undistort = config.getboolean(self.sectionHeader, 'undistort')
+        self.firstFrameNum = config.getint(self.sectionHeader, 'frame1')
+        self.videoFrameRate = config.getfloat(self.sectionHeader, 'video-fps')
+        
+        self.classifierFilename = utils.getRelativeFilename(parentPath, config.get(self.sectionHeader, 'classifier-filename'))
+        
+        # Safety parameters
+        self.maxPredictedSpeed = config.getfloat(self.sectionHeader, 'max-predicted-speed')/3.6/self.videoFrameRate
+        self.predictionTimeHorizon = config.getfloat(self.sectionHeader, 'prediction-time-horizon')*self.videoFrameRate
+        self.collisionDistance = config.getfloat(self.sectionHeader, 'collision-distance')
+        self.crossingZones = config.getboolean(self.sectionHeader, 'crossing-zones')
+        self.predictionMethod = config.get(self.sectionHeader, 'prediction-method')
+        self.nPredictedTrajectories = config.getint(self.sectionHeader, 'npredicted-trajectories')
+        self.maxNormalAcceleration = config.getfloat(self.sectionHeader, 'max-normal-acceleration')/self.videoFrameRate**2
+        self.maxNormalSteering = config.getfloat(self.sectionHeader, 'max-normal-steering')/self.videoFrameRate
+        self.minExtremeAcceleration = config.getfloat(self.sectionHeader, 'min-extreme-acceleration')/self.videoFrameRate**2
+        self.maxExtremeAcceleration = config.getfloat(self.sectionHeader, 'max-extreme-acceleration')/self.videoFrameRate**2
+        self.maxExtremeSteering = config.getfloat(self.sectionHeader, 'max-extreme-steering')/self.videoFrameRate
+        self.useFeaturesForPrediction = config.getboolean(self.sectionHeader, 'use-features-prediction')
+        self.constantSpeedPrototypePrediction = config.getboolean(self.sectionHeader, 'constant-speed')
+        self.maxLcssDistance = config.getfloat(self.sectionHeader, 'max-lcss-distance')
+        self.lcssMetric = config.get(self.sectionHeader, 'lcss-metric')
+        self.minLcssSimilarity = config.getfloat(self.sectionHeader, 'min-lcss-similarity')
+        
+        # Tracking parameters
+        self.accelerationBound = config.getint(self.sectionHeader, 'acceleration-bound')
+        self.minVelocityCosine = config.getfloat(self.sectionHeader, 'min-velocity-cosine')
+        self.ndisplacements = config.getint(self.sectionHeader, 'ndisplacements')
+        self.maxNFeatures = config.getint(self.sectionHeader, 'max-nfeatures')
+        self.minFeatureDistanceKLT = config.getfloat(self.sectionHeader, 'min-feature-distanceklt')
+        self.featureQuality = config.getfloat(self.sectionHeader, 'feature-quality')
+        self.blockSize = config.getint(self.sectionHeader, 'block-size')
+        self.useHarrisDetector = config.getboolean(self.sectionHeader, 'use-harris-detector')
+        self.k = config.getfloat(self.sectionHeader, 'k')
+        self.winSize = config.getint(self.sectionHeader, 'window-size')
+        self.pyramidLevel = config.getint(self.sectionHeader, 'pyramid-level')
+        self.maxNumberTrackingIterations = config.getint(self.sectionHeader, 'max-number-iterations')
+        self.minTrackingError = config.getfloat(self.sectionHeader, 'min-tracking-error')
+        self.featureFlags = config.getboolean(self.sectionHeader, 'feature-flag')
+        self.minFeatureEigThreshold = config.getfloat(self.sectionHeader, 'min-feature-eig-threshold')
+        self.minFeatureTime = config.getint(self.sectionHeader, 'min-feature-time')
+        self.minFeatureDisplacement = config.getfloat(self.sectionHeader, 'min-feature-displacement')
+        self.updateTimer = config.getint(self.sectionHeader, 'tracker-reload-time')
+        
+
+    def __init__(self, filename = None):
+        self.configFilename = filename
+        if filename is not None and Path(filename).is_file():
+            self.loadConfigFile(filename)
+        else:
+            print('Configuration filename {} could not be loaded.'.format(filename))
+
+def processVideoArguments(args):
+    '''Loads information from configuration file
+    then checks what was passed on the command line
+    for override (eg video filename and database filename'''
+    parentPath = Path(args.configFilename).parent
+    if args.configFilename is not None: # consider there is a configuration file
+        params = ProcessParameters(args.configFilename)
+        videoFilename = params.videoFilename
+        databaseFilename = params.databaseFilename
+        if params.homography is not None:
+            invHomography = linalg.inv(params.homography)
+        else:
+            invHomography = None
+        intrinsicCameraMatrix = params.intrinsicCameraMatrix
+        distortionCoefficients = array(params.distortionCoefficients)
+        undistortedImageMultiplication = params.undistortedImageMultiplication
+        undistort = params.undistort
+        firstFrameNum = params.firstFrameNum
+    else:
+        invHomography = None
+        undistort = False
+        intrinsicCameraMatrix = None
+        distortionCoefficients = []
+        undistortedImageMultiplication = None
+        undistort = False
+        firstFrameNum = 0
+
+    # override video and database filenames if present on command line
+    # if not absolute, make all filenames relative to the location of the configuration filename
+    if args.videoFilename is not None:
+        videoFilename = args.videoFilename
+    else:
+        videoFilename = params.videoFilename
+    if args.databaseFilename is not None:
+        databaseFilename = args.databaseFilename
+    else:
+        databaseFilename = params.databaseFilename
+
+    return params, videoFilename, databaseFilename, invHomography, intrinsicCameraMatrix, distortionCoefficients, undistortedImageMultiplication, undistort, firstFrameNum
+    
+# deprecated
+class SceneParameters(object):
+    def __init__(self, config, sectionName):
+        from configparser import NoOptionError
+        from ast import literal_eval
+        try:
+            self.sitename = config.get(sectionName, 'sitename')
+            self.databaseFilename = config.get(sectionName, 'data-filename')
+            self.homographyFilename = config.get(sectionName, 'homography-filename')
+            self.calibrationFilename = config.get(sectionName, 'calibration-filename') 
+            self.videoFilename = config.get(sectionName, 'video-filename')
+            self.frameRate = config.getfloat(sectionName, 'framerate')
+            self.date = datetime.strptime(config.get(sectionName, 'date'), datetimeFormat) # 2011-06-22 11:00:39
+            self.translation = literal_eval(config.get(sectionName, 'translation')) #         = [0.0, 0.0]
+            self.rotation = config.getfloat(sectionName, 'rotation')
+            self.duration = config.getint(sectionName, 'duration')
+        except NoOptionError as e:
+            print(e)
+            print('Not a section for scene meta-data')
+
+    @staticmethod
+    def loadConfigFile(filename):
+        from configparser import ConfigParser
+        config = ConfigParser()
+        config.readfp(utils.openCheck(filename))
+        configDict = dict()
+        for sectionName in config.sections():
+            configDict[sectionName] = SceneParameters(config, sectionName) 
+        return configDict
+
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/storage.txt')
+    unittest.TextTestRunner().run(suite)
+#     #doctest.testmod()
+#     #doctest.testfile("example.txt")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/sumo.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,54 @@
+#! /usr/bin/env python
+'''Libraries for the SUMO traffic simulation software
+http://sumo.dlr.de
+'''
+import pandas as pd
+
+    
+
+def loadTazEdges(inFilename):
+    '''Converts list of OSM edges per OSM edge and groups per TAZ
+    format is csv with first two columns the OSM id and TAZ id, then the list of SUMO edge id
+
+    Returns the list of SUMO edge per TAZ'''
+    data = []
+    tazs = {}
+    with open(inFilename,'r') as f:
+        f.readline() # skip the headers
+        for r in f:
+            tmp = r.strip().split(',')
+            tazID = tmp[1]
+            for edge in tmp[2:]:                
+                if len(edge) > 0:
+                    if tazID in tazs:
+                        if edge not in tazs[tazID]:
+                            tazs[tazID].append(edge)
+                    else:
+                        tazs[tazID] = [edge]
+    return tazs
+
+def edge2Taz(tazs):
+    '''Returns the associative array of the TAZ of each SUMO edge'''
+    edge2Tazs = {}
+    for taz, edges in tazs.items():
+        for edge in edges:
+            if edge in edge2Tazs:
+                print('error for edge: {} (taz {}/{})'.format(edge, edge2Tazs[edge], taz))
+            edge2Tazs[edge] = taz
+    return edge2Tazs
+
+def saveTazEdges(outFilename, tazs):
+    with open(outFilename,'w') as out:
+        out.write('<tazs>\n')
+        for tazID in tazs:
+            out.write('<taz id="{}" edges="'.format(tazID)+' '.join(tazs[tazID])+'"/>\n')
+        out.write('</tazs>\n')
+
+# TODO add utils from process-cyber.py?
+        
+# if __name__ == "__main__":
+#     import doctest
+#     import unittest
+#     suite = doctest.DocFileSuite('tests/sumo.txt')
+#     #suite = doctest.DocTestSuite()
+#     unittest.TextTestRunner().run(suite)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/cvutils.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,47 @@
+>>> from numpy import array, round, ones, dot, linalg, absolute
+>>> import cv2
+>>> from trafficintelligence import cvutils
+>>> img = cv2.imread("../samples/val-dor-117-111.png")
+>>> width = img.shape[1]
+>>> height = img.shape[0]
+>>> intrinsicCameraMatrix = array([[ 377.42,    0.  ,  639.12], [   0.  ,  378.43,  490.2 ], [   0.  ,    0.  ,    1.  ]])
+>>> distortionCoefficients = array([-0.11759321, 0.0148536, 0.00030756, -0.00020578, -0.00091816])# distortionCoefficients = array([-0.11759321, 0., 0., 0., 0.])
+>>> multiplicationFactor = 1.31
+>>> [map1, map2], tmp = cvutils.computeUndistortMaps(width, height, multiplicationFactor, intrinsicCameraMatrix, distortionCoefficients)
+>>> undistorted = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
+>>> (undistorted.shape == array([int(round(height*multiplicationFactor)), int(round(width*multiplicationFactor)), 3])).all()
+True
+>>> imgPoints = array([[[150.,170.],[220.,340.],[340.,440.],[401.,521.]]])
+>>> newCameraMatrix = cv2.getDefaultNewCameraMatrix(intrinsicCameraMatrix, (int(round(width*multiplicationFactor)), int(round(height*multiplicationFactor))), True)
+>>> undistortedPoints = cv2.undistortPoints(imgPoints, intrinsicCameraMatrix, distortionCoefficients, P = newCameraMatrix).reshape(-1, 2) # undistort and project as if seen by new camera
+>>> invNewCameraMatrix = linalg.inv(newCameraMatrix)
+>>> tmp = ones((imgPoints[0].shape[0], 3))
+>>> tmp[:,:2] = undistortedPoints
+>>> reducedPoints = dot(invNewCameraMatrix, tmp.T).T
+>>> origPoints = cv2.projectPoints(reducedPoints, (0.,0.,0.), (0.,0.,0.), intrinsicCameraMatrix, distortionCoefficients)[0].reshape(-1,2)
+>>> (round(origPoints[1:,:]) == imgPoints[0][1:,:]).all()
+True
+>>> (absolute(origPoints[0,:]-imgPoints[0][0,:])).max() < 6.
+True
+>>> reducedPoints2 = cvutils.newCameraProject(undistortedPoints.T, invNewCameraMatrix)
+>>> (reducedPoints == reducedPoints).all()
+True
+
+>>> undistortedPoints2 = cv2.undistortPoints(imgPoints, intrinsicCameraMatrix, distortionCoefficients).reshape(-1, 2) # undistort and project as if seen by new camera
+>>> undistortedPoints2 = cvutils.newCameraProject(undistortedPoints2.T, newCameraMatrix)
+>>> (undistortedPoints == undistortedPoints2.T).all()
+True
+
+>>> undistortedPoints = cv2.undistortPoints(imgPoints, intrinsicCameraMatrix, distortionCoefficients).reshape(-1, 2) # undistort to ideal points
+>>> origPoints = cvutils.worldToImageProject(undistortedPoints.T, intrinsicCameraMatrix, distortionCoefficients).T
+>>> (round(origPoints[1:,:]) == imgPoints[0][1:,:]).all()
+True
+>>> (absolute(origPoints[0,:]-imgPoints[0][0,:])).max() < 6.
+True
+
+>>> undistortedPoints = cvutils.imageToWorldProject(imgPoints[0].T, intrinsicCameraMatrix, distortionCoefficients)
+>>> origPoints = cvutils.worldToImageProject(undistortedPoints, intrinsicCameraMatrix, distortionCoefficients).T
+>>> (round(origPoints[1:,:]) == imgPoints[0][1:,:]).all()
+True
+>>> (absolute(origPoints[0,:]-imgPoints[0][0,:])).max() < 6.
+True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/events.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,92 @@
+>>> from trafficintelligence.events import *
+>>> from trafficintelligence.moving import MovingObject, TimeInterval, Point
+>>> from trafficintelligence.prediction import ConstantPredictionParameters
+
+>>> objects = [MovingObject(num = i, timeInterval = TimeInterval(0,10)) for i in range(10)]
+>>> interactions = createInteractions(objects)
+>>> len([i for i in interactions if len(i.roadUserNumbers) == 1])
+0
+>>> len(interactions)
+45
+>>> objects2 = [MovingObject(num = i, timeInterval = TimeInterval(0,10)) for i in range(100, 110)]
+>>> interactions = createInteractions(objects, objects2)
+>>> len([i for i in interactions if len(i.roadUserNumbers) == 1])
+0
+>>> objects3 = [MovingObject(num = i, timeInterval = TimeInterval(12,22)) for i in range(100, 110)]
+>>> interactions = createInteractions(objects, objects3)
+>>> len(interactions)
+0
+>>> interactions = createInteractions(objects, objects3, 3)
+>>> len(interactions)
+100
+>>> interactions[0].getTimeInterval().empty()
+True
+
+>>> o1 = MovingObject.generate(1, Point(-5.,0.), Point(0.,0.), TimeInterval(0,10))
+>>> o2 = MovingObject.generate(2, Point(0.,-5.), Point(0.,1.), TimeInterval(0,10))
+>>> inter = Interaction(roadUser1 = o1, roadUser2 = o2)
+>>> inter.computeIndicators() # should not crash with 0 speed
+>>> va = inter.getIndicator("Velocity Angle")
+>>> va.empty()
+True
+
+>>> o1 = MovingObject.generate(1, Point(-5.,0.), Point(1.,0.), TimeInterval(0,10))
+>>> o2 = MovingObject.generate(2, Point(0.,-5.), Point(0.,1.), TimeInterval(0,10))
+>>> inter = Interaction(roadUser1 = o1, roadUser2 = o2)
+>>> inter.computeIndicators()
+>>> predictionParams = ConstantPredictionParameters(10.)
+>>> inter.computeCrossingsCollisions(predictionParams, 0.1, 10)
+>>> ttc = inter.getIndicator("Time to Collision")
+>>> ttc[0]
+5.0
+>>> ttc[1]
+4.0
+>>> (inter.collisionPoints[0][0] - Point(0.,0.)).norm2() < 0.0001
+True
+>>> (inter.collisionPoints[4][0] - Point(0.,0.)).norm2() < 0.0001
+True
+>>> inter.getIndicator(Interaction.indicatorNames[1])[4] < 0.000001 # collision angle
+True
+>>> inter.getIndicator(Interaction.indicatorNames[1])[5] is None
+True
+>>> inter.getIndicator(Interaction.indicatorNames[1])[6] # doctest:+ELLIPSIS
+3.1415...
+
+# test categorize
+>>> from collections import Counter
+>>> from numpy import pi
+>>> o1 = MovingObject.generate(0, Point(0,0), Point(1,0), TimeInterval(0,100))
+>>> o2 = MovingObject.generate(0, Point(100,1), Point(-1,0), TimeInterval(0,100))
+>>> inter12 = Interaction(roadUser1 = o1, roadUser2 = o2)
+>>> inter12.computeIndicators()
+>>> inter12.categorize(pi*20/180, pi*60/180)
+>>> Counter(inter12.categories.values()).most_common()[0][0] # head on
+0
+>>> inter12.categories[max(inter12.categories.keys())] # then side
+2
+>>> o3 = MovingObject.generate(0, Point(0,2), Point(1,0), TimeInterval(0,100))
+>>> inter13 = Interaction(roadUser1 = o1, roadUser2 = o3)
+>>> inter13.computeIndicators()
+>>> inter13.categorize(pi*20/180, pi*60/180)
+>>> Counter(inter13.categories.values()).most_common()[0][0] # parallel
+3
+>>> len(Counter(inter13.categories.values()))
+1
+>>> o4 = MovingObject.generate(0, Point(100,20), Point(-1,0), TimeInterval(0,100))
+>>> inter14 = Interaction(roadUser1 = o1, roadUser2 = o4)
+>>> inter14.computeIndicators()
+>>> inter14.categorize(pi*20/180, pi*60/180)
+>>> Counter(inter14.categories.values()).most_common()[0][0] # side
+2
+>>> inter12.categories[0] # first head one
+0
+>>> inter12.categories[max(inter12.categories.keys())] # then side
+2
+>>> o5 = MovingObject.generate(0, Point(50,50), Point(0,-1), TimeInterval(0,100))
+>>> inter15 = Interaction(roadUser1 = o1, roadUser2 = o5)
+>>> inter15.computeIndicators()
+>>> inter15.categorize(pi*20/180, pi*60/180)
+>>> Counter(inter15.categories.values()).most_common()[0][0] # side
+2
+>>> len(Counter(inter15.categories.values()))
+1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/indicators.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,49 @@
+>>> from trafficintelligence.indicators import *
+>>> from trafficintelligence.moving import TimeInterval,Trajectory
+
+>>> indic1 = TemporalIndicator('bla', [0,3,-4], TimeInterval(4,6))
+>>> indic1.empty()
+False
+>>> indic1.getIthValue(1)
+3
+>>> indic1.getIthValue(3)
+>>> indic1[6]
+-4
+>>> indic1[7]
+>>> [v for v in indic1]
+[0, 3, -4]
+>>> indic1 = TemporalIndicator('bla', {2:0,4:3,5:-5})
+>>> indic1.getIthValue(1)
+3
+>>> indic1.getIthValue(3)
+>>> indic1[2]
+0
+
+>>> ttc = SeverityIndicator('TTC', list(range(11)), TimeInterval(1,11), mostSevereIsMax = False)
+>>> ttc.getMostSevereValue(1)
+0.0
+>>> ttc.getMostSevereValue(2)
+0.5
+>>> ttc.getMostSevereValue(centile = 10.)
+1.0
+>>> ttc.mostSevereIsMax = True
+>>> ttc.getMostSevereValue(1)
+10.0
+>>> ttc.getMostSevereValue(2)
+9.5
+>>> ttc.getMostSevereValue(centile = 10.)
+9.0
+
+>>> t1 = Trajectory([[0.5,1.5,2.5],[0.5,3.5,6.5]])
+>>> m = indicatorMap([1,2,3], t1, 1)
+>>> m[(1.0, 3.0)]
+2.0
+>>> m[(2.0, 6.0)]
+3.0
+>>> m[(0.0, 0.0)]
+1.0
+>>> m = indicatorMap([1,2,3], t1, 4)
+>>> m[(0.0, 1.0)]
+3.0
+>>> m[(0.0, 0.0)]
+1.5
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/ml.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,11 @@
+>>> from math import fabs
+>>> from numpy import ones
+>>> from trafficintelligence.ml import prototypeCluster
+
+>>> nTrajectories = 7
+>>> similarityFunc = lambda x, y: 1.-fabs(x-y)/(nTrajectories-1)
+>>> similarities = -ones((nTrajectories, nTrajectories))
+>>> prototypeIndices = prototypeCluster(range(nTrajectories), similarities, 1., similarityFunc, optimizeCentroid = True) # too large to be similar
+>>> len(prototypeIndices) == nTrajectories
+True
+>>> # could use lists to have a length
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/moving.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,335 @@
+>>> from trafficintelligence.moving import *
+>>> from trafficintelligence import storage
+>>> import numpy as np
+
+>>> Interval().empty()
+True
+>>> Interval(0,1).empty()
+False
+>>> Interval(0,1)
+0-1
+>>> Interval(0,1).length()
+1.0
+>>> Interval(23.2,24.9).length()
+1.6999999999999993
+>>> Interval(10,8).length()
+0.0
+
+>>> i = Interval.parse('3-5')
+>>> i.first == 3 and i.last == 5
+True
+>>> type(i)
+<class 'trafficintelligence.moving.Interval'>
+>>> i = TimeInterval.parse('3-5')
+>>> type(i)
+<class 'trafficintelligence.moving.TimeInterval'>
+>>> list(i)
+[3, 4, 5]
+
+>>> TimeInterval(0,1).length()
+2.0
+>>> TimeInterval(10,8).length()
+0.0
+>>> TimeInterval(10,8) == TimeInterval(10,8)
+True
+>>> TimeInterval(10,8) == TimeInterval(8,10)
+True
+>>> TimeInterval(11,8) == TimeInterval(10,8)
+False
+
+>>> [i for i in TimeInterval(9,13)]
+[9, 10, 11, 12, 13]
+
+>>> TimeInterval(2,5).equal(TimeInterval(2,5))
+True
+>>> TimeInterval(2,5).equal(TimeInterval(2,4))
+False
+>>> TimeInterval(2,5).equal(TimeInterval(5,2))
+False
+
+>>> TimeInterval(3,6).distance(TimeInterval(4,6))
+0
+>>> TimeInterval(3,6).distance(TimeInterval(6,10))
+0
+>>> TimeInterval(3,6).distance(TimeInterval(8,10))
+2
+>>> TimeInterval(20,30).distance(TimeInterval(3,15))
+5
+>>> TimeInterval.unionIntervals([TimeInterval(3,6), TimeInterval(8,10),TimeInterval(11,15)])
+3-15
+
+>>> Point(0,3) == Point(0,3)
+True
+>>> Point(0,3) == Point(0,3.2)
+False
+>>> Point(3,4)-Point(1,7)
+(2.000000,-3.000000)
+>>> -Point(1,2)
+(-1.000000,-2.000000)
+>>> Point(1,2)*0.5
+(0.500000,1.000000)
+
+>>> Point(3,2).norm2Squared()
+13
+
+>>> Point.distanceNorm2(Point(3,4),Point(1,7))
+3.605551275463989
+
+>>> Point.boundingRectangle([Point(0,0), Point(1,0), Point(0,1), Point(1,1)], Point(1, 1))
+[(0.500000,1.500000), (1.500000,0.500000), (0.500000,-0.500000), (-0.500000,0.500000)]
+>>> Point.boundingRectangle([Point(0,0), Point(1,0), Point(0,1), Point(1,1)], Point(-1, -1))
+[(0.500000,-0.500000), (-0.500000,0.500000), (0.500000,1.500000), (1.500000,0.500000)]
+
+
+>>> Point(3,2).inPolygon(np.array([[0,0],[1,0],[1,1],[0,1]]))
+False
+>>> Point(3,2).inPolygon(np.array([[0,0],[4,0],[4,3],[0,3]]))
+True
+
+>>> predictPositionNoLimit(10, Point(0,0), Point(1,1)) # doctest:+ELLIPSIS
+((1.0...,1.0...), (10.0...,10.0...))
+
+>>> segmentIntersection(Point(0,0), Point(0,1), Point(1,1), Point(2,3))
+>>> segmentIntersection(Point(0,1), Point(0,3), Point(1,0), Point(3,1))
+>>> segmentIntersection(Point(0.,0.), Point(2.,2.), Point(0.,2.), Point(2.,0.))
+(1.000000,1.000000)
+>>> segmentIntersection(Point(0,0), Point(4,4), Point(0,4), Point(4,0))
+(2.000000,2.000000)
+>>> segmentIntersection(Point(0,1), Point(1,2), Point(2,0), Point(3,2))
+
+>>> t1 = Trajectory.fromPointList([(92.2, 102.9), (56.7, 69.6)])
+>>> t2 = Trajectory.fromPointList([(92.2, 102.9), (56.7, 69.6)])
+>>> t1 == t2
+True
+>>> t3 = Trajectory.fromPointList([(92.24, 102.9), (56.7, 69.6)])
+>>> t1 == t3
+False
+>>> t3 = Trajectory.fromPointList([(92.2, 102.9), (56.7, 69.6), (56.7, 69.6)])
+>>> t1 == t3
+False
+
+>>> left = Trajectory.fromPointList([(92.291666666666686, 102.99239033124439), (56.774193548387103, 69.688898836168306)])
+>>> middle = Trajectory.fromPointList([(87.211021505376351, 93.390778871978512), (59.032258064516128, 67.540286481647257)])
+>>> right = Trajectory.fromPointList([(118.82392473118281, 115.68263205013426), (63.172043010752688, 66.600268576544309)])
+>>> alignments = [left, middle, right]
+>>> for a in alignments: a.computeCumulativeDistances()
+>>> getSYfromXY(Point(73, 82), alignments)
+[1, 0, (73.819977,81.106170), 18.172277808821125, 18.172277808821125, 1.2129694042343868]
+>>> getSYfromXY(Point(78, 83), alignments, 0.5)
+[1, 0, (77.033188,84.053889), 13.811799123113715, 13.811799123113715, -1.4301775140225983]
+
+>>> Trajectory().length()
+0
+>>> t1 = Trajectory([[0.5,1.5,2.5],[0.5,3.5,6.5]])
+>>> t1.length() == 3.
+True
+>>> t1[1]
+(1.500000,3.500000)
+
+>>> t1.differentiate()
+(1.000000,3.000000) (1.000000,3.000000)
+>>> t1.differentiate(True)
+(1.000000,3.000000) (1.000000,3.000000) (1.000000,3.000000)
+>>> t1 = Trajectory([[0.5,1.5,3.5],[0.5,2.5,7.5]])
+>>> t1.differentiate()
+(1.000000,2.000000) (2.000000,5.000000)
+
+>>> t1.computeCumulativeDistances()
+>>> t1.getDistance(0)
+2.23606797749979
+>>> t1.getDistance(1)
+5.385164807134504
+>>> t1.getDistance(2)
+Index 2 beyond trajectory length 3-1
+>>> t1.getCumulativeDistance(0)
+0.0
+>>> t1.getCumulativeDistance(1)
+2.23606797749979
+>>> t1.getCumulativeDistance(2)
+7.6212327846342935
+>>> t1.getCumulativeDistance(3)
+Index 3 beyond trajectory length 3
+
+
+>>> from utils import LCSS
+>>> lcss = LCSS(lambda x,y: Point.distanceNorm2(x,y) <= 0.1)
+>>> Trajectory.lcss(t1, t1, lcss)
+3
+>>> lcss = LCSS(lambda p1, p2: (p1-p2).normMax() <= 0.1)
+>>> Trajectory.lcss(t1, t1, lcss)
+3
+
+>>> p1=Point(0,0)
+>>> p2=Point(1,0)
+>>> v1 = Point(0.1,0.1)
+>>> v2 = Point(-0.1, 0.1)
+>>> abs(Point.timeToCollision(p1, p2, v1, v2, 0.)-5.0) < 0.00001
+True
+>>> abs(Point.timeToCollision(p1, p2, v1, v2, 0.1)-4.5) < 0.00001
+True
+>>> p1=Point(0,1)
+>>> p2=Point(1,0)
+>>> v1 = Point(0,0.1)
+>>> v2 = Point(0.1, 0)
+>>> Point.timeToCollision(p1, p2, v1, v2, 0.) == None
+True
+>>> Point.timeToCollision(p2, p1, v2, v1, 0.) == None
+True
+>>> Point.midPoint(p1, p2)
+(0.500000,0.500000)
+>>> p1=Point(0.,0.)
+>>> p2=Point(5.,0.)
+>>> v1 = Point(2.,0.)
+>>> v2 = Point(1.,0.)
+>>> Point.timeToCollision(p1, p2, v1, v2, 0.)
+5.0
+>>> Point.timeToCollision(p1, p2, v1, v2, 1.)
+4.0
+
+>>> objects = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'object')
+>>> len(objects)
+5
+>>> objects[0].hasFeatures()
+False
+>>> features = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'feature')
+>>> for o in objects: o.setFeatures(features)
+>>> objects[0].hasFeatures()
+True
+
+>>> o1 = MovingObject.generate(1, Point(-5.,0.), Point(1.,0.), TimeInterval(0,10))
+>>> o1.getNObjects() is None
+True
+>>> o1.setNObjects(1.1)
+>>> o1.setNObjects(0.5)
+Number of objects represented by object 1 must be greater or equal to 1 (0.5)
+>>> o2 = MovingObject.generate(2, Point(0.,-5.), Point(0.,1.), TimeInterval(0,10))
+>>> MovingObject.computePET(o1, o2, 0.1)
+(0.0, 5, 5)
+>>> o2 = MovingObject.generate(2, Point(0.,-5.), Point(0.,1.), TimeInterval(5,15))
+>>> MovingObject.computePET(o1, o2, 0.1)
+(5.0, 5, 10)
+>>> o2 = MovingObject.generate(2, Point(0.,-5.), Point(0.,1.), TimeInterval(15,30))
+>>> MovingObject.computePET(o1, o2, 0.1)
+(15.0, 5, 20)
+
+>>> t1 = CurvilinearTrajectory.generate(3, 1., 10, 'b')
+>>> t1.length()
+10
+>>> t1[3]
+[6.0, 0, 'b']
+>>> t2 = CurvilinearTrajectory.generate(15, 1., 10, 'a', 1.)
+>>> t2[4]
+[19.0, 1.0, 'a']
+>>> t1.append(t2)
+>>> t1.length()
+20
+>>> t1[9]
+[12.0, 0, 'b']
+>>> o = MovingObject(0, TimeInterval(1,21))
+>>> o.curvilinearPositions = t1
+>>> o.interpolateCurvilinearPositions(2.3)
+[4.3, 0.0, 'b']
+>>> o.interpolateCurvilinearPositions(9.7) # doctest:+ELLIPSIS
+[11.7..., 0.0..., 'b']
+>>> o.interpolateCurvilinearPositions(10.7)
+Object 0 changes lane at 10.7 and alignments are not provided
+>>> t2 = CurvilinearTrajectory.generate(0, 1., 10, 'a', 1.)
+
+>>> t1 = CurvilinearTrajectory.generate(3, 1., 10, 'b')
+>>> t1.duplicateLastPosition()
+>>> t1[-1] == t1[-2]
+True
+
+>>> a = Trajectory.generate(Point(0.,0.), Point(10.,0.), 4)
+>>> t = Trajectory.generate(Point(0.1,-1.), Point(1.,0.), 22)
+>>> prepareAlignments([a])
+>>> ct = CurvilinearTrajectory.fromTrajectoryProjection(t, [a])
+>>> ct[3]
+[3.1, 1.0, 0]
+>>> p = getXYfromSY(ct[3][0], ct[3][1], ct[3][2], [a])
+>>> (Point(p[0], p[1])-t[3]).norm2() < 1e-10
+True
+>>> p = getXYfromSY(ct[21][0], ct[21][1], ct[21][2], [a])
+>>> (Point(p[0], p[1])-t[21]).norm2() < 1e-10
+True
+
+>>> t = CurvilinearTrajectory(S = [1., 2., 3., 5.], Y = [0.5, 0.5, 0.6, 0.7], lanes = ['1']*4)
+>>> t.differentiate() # doctest:+ELLIPSIS
+[1.0, 0.0, None] [1.0, 0.099..., None] [2.0, 0.099..., None]
+>>> t.differentiate(True) # doctest:+ELLIPSIS
+[1.0, 0.0, None] [1.0, 0.099..., None] [2.0, 0.099..., None] [2.0, 0.099..., None]
+>>> t = CurvilinearTrajectory(S = [1.], Y = [0.5], lanes = ['1'])
+>>> t.differentiate().empty()
+True
+
+>>> o1 = MovingObject.generate(1, Point(1., 2.), Point(1., 1.), TimeInterval(0,10))
+>>> o1.features = [o1]
+>>> o2 = MovingObject.generate(2, Point(14., 14.), Point(1., 0.), TimeInterval(14,20))
+>>> o2.features = [o2]
+>>> o3 = MovingObject.generate(3, Point(2., 2.), Point(1., 1.), TimeInterval(2,12))
+>>> o3.features = [o3]
+>>> o13 = MovingObject.concatenate(o1, o3, 4)
+>>> o13.getNum()
+4
+>>> o13.getTimeInterval() == TimeInterval(0,12)
+True
+>>> t=5
+>>> o13.getPositionAtInstant(t) == (o1.getPositionAtInstant(t)+o3.getPositionAtInstant(t)).divide(2)
+True
+>>> len(o13.getFeatures())
+2
+
+>>> o12 = MovingObject.concatenate(o1, o2, 5, minFeatureLength = 6)
+>>> o12.getTimeInterval() == TimeInterval(o1.getFirstInstant(), o2.getLastInstant())
+True
+>>> v = o12.getVelocityAtInstant(12)
+>>> v == Point(3./4, 2./4)
+True
+>>> o12.getPositionAtInstant(11) == o1.getPositionAtInstant(10)+v
+True
+>>> len(o12.getFeatures())
+3
+>>> f = o12.getFeatures()[-1]
+>>> f.length()
+6.0
+
+>>> o1 = MovingObject.generate(1, Point(0., 2.), Point(0., 1.), TimeInterval(0,2))
+>>> o1.classifyUserTypeSpeedMotorized(0.5, np.median)
+>>> userTypeNames[o1.getUserType()]
+'car'
+>>> o1.classifyUserTypeSpeedMotorized(1.5, np.median)
+>>> userTypeNames[o1.getUserType()]
+'pedestrian'
+
+>>> o1 = MovingObject.generate(1, Point(0.,0.), Point(1.,0.), TimeInterval(0,10))
+>>> gt1 = BBMovingObject(MovingObject.generate(1, Point(0.2,0.6), Point(1.,0.), TimeInterval(0,10)), MovingObject.generate(2, Point(-0.2,-0.4), Point(1.,0.), TimeInterval(0,10)), 1, TimeInterval(0,10), )
+>>> gt1.computeCentroidTrajectory()
+>>> computeClearMOT([gt1], [], 0.2, 0, 10)
+(None, 0.0, 11, 0, 0, 11, None, None)
+>>> computeClearMOT([], [o1], 0.2, 0, 10)
+(None, None, 0, 0, 11, 0, None, None)
+>>> computeClearMOT([gt1], [o1], 0.2, 0, 10) # doctest:+ELLIPSIS
+(0.0999..., 1.0, 0, 0, 0, 11, None, None)
+>>> computeClearMOT([gt1], [o1], 0.05, 0, 10)
+(None, -1.0, 11, 0, 11, 11, None, None)
+
+>>> o1 = MovingObject(1, TimeInterval(0,3), positions = Trajectory([list(range(4)), [0.1, 0.1, 1.1, 1.1]]))
+>>> o2 = MovingObject(2, TimeInterval(0,3), positions = Trajectory([list(range(4)), [0.9, 0.9, -0.1, -0.1]]))
+>>> gt1 = BBMovingObject(MovingObject(positions = Trajectory([list(range(4)), [0.]*4])), MovingObject(positions = Trajectory([list(range(4)), [0.]*4])), 1, TimeInterval(0,3))
+>>> gt1.computeCentroidTrajectory()
+>>> gt2 = BBMovingObject(MovingObject(positions = Trajectory([list(range(4)), [1.]*4])), MovingObject(positions = Trajectory([list(range(4)), [1.]*4])), 2, TimeInterval(0,3))
+>>> gt2.computeCentroidTrajectory()
+>>> computeClearMOT([gt1, gt2], [o1, o2], 0.2, 0, 3) # doctest:+ELLIPSIS
+(0.1..., 0.75, 0, 2, 0, 8, None, None)
+>>> computeClearMOT([gt2, gt1], [o2, o1], 0.2, 0, 3) # doctest:+ELLIPSIS
+(0.1..., 0.75, 0, 2, 0, 8, None, None)
+>>> computeClearMOT([gt1], [o1, o2], 0.2, 0, 3)
+(0.1, -0.25, 0, 1, 4, 4, None, None)
+>>> computeClearMOT([gt1], [o2, o1], 0.2, 0, 3) # symmetry
+(0.1, -0.25, 0, 1, 4, 4, None, None)
+>>> computeClearMOT([gt1, gt2], [o1], 0.2, 0, 3) # doctest:+ELLIPSIS
+(0.100..., 0.375, 4, 1, 0, 8, None, None)
+>>> computeClearMOT([gt2, gt1], [o1], 0.2, 0, 3) # doctest:+ELLIPSIS
+(0.100..., 0.375, 4, 1, 0, 8, None, None)
+>>> computeClearMOT([gt1, gt2], [o1, o2], 0.08, 0, 3)
+(None, -1.0, 8, 0, 8, 8, None, None)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/moving_shapely.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,28 @@
+>>> from moving import *
+>>> from shapely.geometry import Polygon
+>>> from shapely.prepared import prep
+
+>>> t1 = Trajectory([[0.5,1.5,2.5],[0.5,3.5,6.5]])
+>>> poly = Polygon([[0,0],[4,0],[4,3],[0,3]])
+>>> sub1, sub2 = t1.getTrajectoryInPolygon(poly)
+>>> sub1
+(0.500000,0.500000)
+>>> sub1, sub2 = t1.getTrajectoryInPolygon(Polygon([[10,10],[14,10],[14,13],[10,13]]))
+>>> sub1.length()
+0
+>>> sub1, sub2 = t1.getTrajectoryInPolygon(prep(poly))
+>>> sub1
+(0.500000,0.500000)
+>>> t2 = t1.differentiate(True)
+>>> sub1, sub2 = t1.getTrajectoryInPolygon(prep(poly), t2)
+>>> sub1.length() == sub2.length()
+True
+>>> sub1
+(0.500000,0.500000)
+>>> sub2
+(1.000000,3.000000)
+
+>>> t1.proportionInPolygon(poly, 0.5)
+False
+>>> t1.proportionInPolygon(poly, 0.3)
+True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/prediction.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,70 @@
+>>> from trafficintelligence.prediction import *
+>>> from trafficintelligence import moving, storage, utils
+>>> from numpy import absolute, array, max
+
+>>> et = PredictedTrajectoryConstant(moving.Point(0,0), moving.Point(1,0))
+>>> et.predictPosition(4) # doctest:+ELLIPSIS
+(4.0...,0.0...)
+>>> et.predictPosition(1) # doctest:+ELLIPSIS
+(1.0...,0.0...)
+
+>>> et = PredictedTrajectoryConstant(moving.Point(0,0), moving.Point(1,0), moving.NormAngle(0.1,0), maxSpeed = 2)
+>>> et.predictPosition(10) # doctest:+ELLIPSIS
+(15.5...,0.0...)
+>>> et.predictPosition(11) # doctest:+ELLIPSIS
+(17.5...,0.0...)
+>>> et.predictPosition(12) # doctest:+ELLIPSIS
+(19.5...,0.0...)
+
+>>> import random
+>>> acceleration = lambda: random.uniform(-0.5,0.5)
+>>> steering = lambda: random.uniform(-0.1,0.1)
+>>> et = PredictedTrajectoryRandomControl(moving.Point(0,0),moving.Point(1,1), acceleration, steering, maxSpeed = 2)
+>>> p = et.predictPosition(500)
+>>> max(et.getPredictedSpeeds()) <= 2.
+True
+
+>>> p = moving.Point(3,4)
+>>> sp = SafetyPoint(p, 0.1, 0)
+>>> print(sp)
+3 4 0.1 0
+
+>>> et1 = PredictedTrajectoryConstant(moving.Point(-5.,0.), moving.Point(1.,0.))
+>>> et2 = PredictedTrajectoryConstant(moving.Point(0.,-5.), moving.Point(0.,1.))
+>>> collision, t, cp1, cp2 = computeCollisionTime(et1, et2, 0.1, 10)
+>>> collision
+True
+>>> t
+5
+>>> collision, t, cp1, cp2 = computeCollisionTime(et1, et2, 0.1, 5)
+>>> collision
+True
+>>> t
+5
+>>> collision, t, cp1, cp2 = computeCollisionTime(et1, et2, 0.1, 4)
+>>> collision
+False
+
+>>> proto = storage.loadTrajectoriesFromSqlite('../samples/laurier.sqlite', 'feature', [1204])[0]
+>>> proto.getPositions().computeCumulativeDistances()
+>>> et = PredictedTrajectoryPrototype(proto.getPositionAt(10)+moving.Point(0.5, 0.5), proto.getVelocityAt(10)*0.9, proto, True)
+>>> absolute(et.initialSpeed - proto.getVelocityAt(10).norm2()*0.9) < 1e-5
+True
+>>> for t in range(int(proto.length())): x=et.predictPosition(t)
+>>> traj = et.getPredictedTrajectory()
+>>> traj.computeCumulativeDistances()
+>>> absolute(array(traj.distances).mean() - et.initialSpeed < 1e-3)
+True
+
+>>> et = PredictedTrajectoryPrototype(proto.getPositionAt(10)+moving.Point(0.6, 0.6), proto.getVelocityAt(10)*0.7, proto, False)
+>>> absolute(et.initialSpeed - proto.getVelocityAt(10).norm2()*0.7) < 1e-5
+True
+>>> proto = moving.MovingObject.generate(1, moving.Point(-5.,0.), moving.Point(1.,0.), moving.TimeInterval(0,10))
+>>> et = PredictedTrajectoryPrototype(proto.getPositionAt(0)+moving.Point(0., 1.), proto.getVelocityAt(0)*0.5, proto, False)
+>>> for t in range(int(proto.length()/0.5)): x=et.predictPosition(t)
+>>> et.predictPosition(10) # doctest:+ELLIPSIS
+(0.0...,1.0...)
+>>> et.predictPosition(12) # doctest:+ELLIPSIS
+(1.0...,1.0...)
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/storage.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,128 @@
+>>> from io import StringIO
+>>> from os import remove
+>>> from trafficintelligence.storage import *
+>>> from trafficintelligence.utils import openCheck, readline
+>>> from trafficintelligence.moving import MovingObject, Point, TimeInterval, Trajectory, prepareAlignments
+
+>>> f = openCheck('non_existant_file.txt')
+File non_existant_file.txt could not be opened.
+
+>>> nonexistentFilename = "nonexistent"
+>>> loadTrajectoriesFromSqlite(nonexistentFilename, 'feature')
+[]
+
+>>> o1 = MovingObject.generate(2, Point(0.,0.), Point(1.,0.), TimeInterval(0,10))
+>>> o2 = MovingObject.generate(3, Point(1.,1.), Point(-0.5,-0.2), TimeInterval(0,9))
+>>> saveTrajectoriesToSqlite('test.sqlite', [o1, o2], 'feature')
+>>> objects = loadTrajectoriesFromSqlite('test.sqlite', 'feature')
+>>> objects[0].getNum() == o1.num
+True
+>>> objects[1].getNum() == o2.num
+True
+>>> o1.getTimeInterval() == objects[0].getTimeInterval()
+True
+>>> o2.getTimeInterval() == objects[1].getTimeInterval()
+True
+>>> o1.getVelocities().length() == objects[0].getVelocities().length()
+True
+>>> o2.getVelocities().length() == objects[1].getVelocities().length()
+True
+>>> o1.getVelocities() == objects[0].getVelocities()
+True
+>>> o2.getVelocities() == objects[1].getVelocities()
+True
+>>> o1.getPositions() == objects[0].getPositions()
+True
+>>> o2.getPositions() == objects[1].getPositions()
+True
+>>> objects = loadTrajectoriesFromSqlite('test.sqlite', 'feature', timeStep = 2)
+>>> objects[0].positions.length()
+6
+>>> objects[1].positions.length()
+5
+>>> objects = loadTrajectoriesFromSqlite('test.sqlite', 'feature', timeStep = 3)
+>>> objects[0].positions.length()
+4
+>>> objects[1].positions.length()
+4
+>>> remove('test.sqlite')
+
+>>> align1 = Trajectory.fromPointList([Point(-1, 0), Point(20, 0)])
+>>> align2 = Trajectory.fromPointList([Point(-9, -3), Point(6, 3)])
+>>> align1.computeCumulativeDistances()
+>>> align2.computeCumulativeDistances()
+>>> prepareAlignments([align1, align2])
+>>> o1.projectCurvilinear([align1, align2])
+>>> o2.projectCurvilinear([align1, align2])
+>>> saveTrajectoriesToSqlite('test.sqlite', [o1, o2], 'curvilinear')
+>>> addCurvilinearTrajectoriesFromSqlite('test.sqlite', {o.num: o for o in objects})
+>>> o1.curvilinearPositions[3][:2] == objects[0].curvilinearPositions[3][:2]
+True
+>>> o1.curvilinearPositions[7][:2] == objects[0].curvilinearPositions[7][:2]
+True
+>>> [str(l) for l in o1.curvilinearPositions.getLanes()] == objects[0].curvilinearPositions.getLanes()
+True
+>>> o2.curvilinearPositions[2][:2] == objects[1].curvilinearPositions[2][:2]
+True
+>>> o2.curvilinearPositions[6][:2] == objects[1].curvilinearPositions[6][:2]
+True
+>>> [str(l) for l in o2.curvilinearPositions.getLanes()] == objects[1].curvilinearPositions.getLanes()
+True
+>>> remove('test.sqlite')
+
+>>> f1 = MovingObject.generate(3, Point(0.,0.), Point(1.,0.), TimeInterval(0,10))
+>>> f2 = MovingObject.generate(4, Point(1.,1.), Point(-0.5,-0.2), TimeInterval(0,9))
+>>> o1 = MovingObject(num = 1, userType = 1)
+>>> o1.features = [f1, f2]
+>>> saveTrajectoriesToSqlite('test.sqlite', [o1], 'object')
+>>> objects = loadTrajectoriesFromSqlite('test.sqlite', 'object', withFeatures = True)
+>>> len(objects)
+1
+>>> reloaded1 = objects[0]
+>>> reloaded1.getNum() == o1.getNum()
+True
+>>> reloaded1.getUserType() == o1.getUserType()
+True
+>>> len(reloaded1.featureNumbers)
+2
+>>> len(reloaded1.features)
+2
+>>> reloaded1.getPositionAt(0) == Point.midPoint(f1.getPositionAt(0), f2.getPositionAt(0))
+True
+>>> reloaded1.getPositionAt(5) == Point.midPoint(f1.getPositionAt(5), f2.getPositionAt(5))
+True
+>>> reloaded1.getPositionAt(10) == f1.getPositionAt(10)
+True
+>>> set(reloaded1.featureNumbers) == set([f1.num, f2.num])
+True
+>>> remove('test.sqlite')
+
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio)
+'sadlkfjsdlakjf'
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio, ['#'])
+'sadlkfjsdlakjf'
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio, ['%'])
+'# asdlfjasdlkj0'
+>>> strio = StringIO('# asdlfjasdlkj0\nsadlkfjsdlakjf')
+>>> readline(strio, '%*$')
+'# asdlfjasdlkj0'
+>>> readline(strio, '%#')
+'sadlkfjsdlakjf'
+
+>>> from sklearn.mixture import GaussianMixture
+>>> from numpy.random import random_sample
+>>> nPoints = 50
+>>> points = random_sample(nPoints*2).reshape(nPoints,2)
+>>> gmm = GaussianMixture(4, covariance_type = 'full')
+>>> tmp = gmm.fit(points)
+>>> gmmId = 0
+>>> savePOIsToSqlite('pois-tmp.sqlite', gmm, 'end', gmmId)
+>>> reloadedGmm = loadPOIsFromSqlite('pois-tmp.sqlite')
+>>> sum(gmm.predict(points) == reloadedGmm[gmmId].predict(points)) == nPoints
+True
+>>> reloadedGmm[gmmId].gmmTypes[0] == 'end'
+True
+>>> remove('pois-tmp.sqlite')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/tests/utils.txt	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,144 @@
+>>> from trafficintelligence.utils import *
+>>> from trafficintelligence.moving import Point
+
+>>> upperCaseFirstLetter('mmmm... donuts')
+'Mmmm... Donuts'
+>>> s = upperCaseFirstLetter('much ado about nothing')
+>>> s == 'Much Ado About Nothing'
+True
+>>> upperCaseFirstLetter(s) == s
+True
+
+>>> computeChi2([],[])
+0
+>>> computeChi2(list(range(1,10)),list(range(1,10)))
+0.0
+>>> computeChi2(list(range(1,9)),list(range(1,10)))
+0.0
+
+>>> ceilDecimals(1.23, 0)
+2.0
+>>> ceilDecimals(1.23, 1)
+1.3
+
+>>> inBetween(1,2,1.5)
+True
+>>> inBetween(2.1,1,1.5)
+True
+>>> inBetween(1,2,0)
+False
+
+>>> removeExtension('test-adfasdf.asdfa.txt')
+'test-adfasdf.asdfa'
+>>> removeExtension('test-adfasdf')
+'test-adfasdf'
+
+>>> values = line2Ints('1 2 3 5 6')
+>>> values[0]
+1
+>>> values[-1]
+6
+>>> values = line2Floats('1.3 2.45 7.158e+01 5 6')
+>>> values[0]
+1.3
+>>> values[2] #doctest: +ELLIPSIS
+71.5...
+>>> values[-1]
+6.0
+
+>>> stepPlot([3, 5, 7, 8], 1, 10, 0)
+([1, 3, 3, 5, 5, 7, 7, 8, 8, 10], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
+
+>>> mostCommon(['a','b','c','b'])
+'b'
+>>> mostCommon(['a','b','c','b', 'c'])
+'b'
+>>> mostCommon(list(range(10))+[1])
+1
+>>> mostCommon([(1,2), (2,3), (1,2)])
+(1, 2)
+
+>>> res = sortByLength([list(range(3)), list(range(4)), list(range(1))])
+>>> [len(r) for r in res]
+[1, 3, 4]
+>>> res = sortByLength([list(range(3)), list(range(4)), list(range(1)), list(range(5))], reverse = True)
+>>> [len(r) for r in res]
+[5, 4, 3, 1]
+
+>>> lcss = LCSS(similarityFunc = lambda x,y: abs(x-y) <= 0.1)
+>>> lcss.compute(list(range(5)), list(range(5)))
+5
+>>> lcss.compute(list(range(1,5)), list(range(5)))
+4
+>>> lcss.compute(list(range(5,10)), list(range(5)))
+0
+>>> lcss.compute(list(range(5)), list(range(10)))
+5
+>>> lcss.similarityFunc = lambda x,y: x == y
+>>> lcss.compute(['a','b','c'], ['a','b','c', 'd'])
+3
+>>> lcss.computeNormalized(['a','b','c'], ['a','b','c', 'd']) #doctest: +ELLIPSIS
+1.0
+>>> lcss.computeNormalized(['a','b','c','x'], ['a','b','c', 'd']) #doctest: +ELLIPSIS
+0.75
+>>> lcss.compute(['a','b','c'], ['a','b','c', 'd'])
+3
+>>> lcss.compute(['a','x','b','c'], ['a','b','c','d','x'])
+3
+>>> lcss.compute(['a','b','c','x','d'], ['a','b','c','d','x'])
+4
+>>> lcss.delta = 1
+>>> lcss.compute(['a','b','c'], ['a','b','x','x','c'])
+2
+
+>>> lcss.delta = float('inf')
+>>> lcss.compute(['a','b','c'], ['a','b','c', 'd'], computeSubSequence = True)
+3
+>>> lcss.subSequenceIndices
+[(0, 0), (1, 1), (2, 2)]
+>>> lcss.compute(['a','b','c'], ['x','a','b','c'], computeSubSequence = True)
+3
+>>> lcss.subSequenceIndices
+[(0, 1), (1, 2), (2, 3)]
+>>> lcss.compute(['a','g','b','c'], ['a','b','c', 'd'], computeSubSequence = True)
+3
+>>> lcss.subSequenceIndices
+[(0, 0), (2, 1), (3, 2)]
+
+>>> alignedLcss = LCSS(lambda x,y:(abs(x-y) <= 0.1), delta = 2, aligned = True)
+>>> alignedLcss.compute(list(range(5)), list(range(5)))
+5
+>>> alignedLcss.compute(list(range(1,5)), list(range(5)))
+4
+
+>>> alignedLcss.compute(list(range(5,10)), list(range(10)))
+5
+
+>>> lcss.delta = 2
+>>> lcss.compute(list(range(5,10)), list(range(10)))
+0
+>>> alignedLcss.delta = 6
+>>> alignedLcss.compute(list(range(5)), list(range(5)))
+5
+>>> alignedLcss.compute(list(range(5)), list(range(6)))
+5
+>>> lcss.delta = 10
+>>> alignedLcss.compute(list(range(1,7)), list(range(6)))
+5
+>>> lcss = LCSS(lambda x,y: x == y, delta = 2, aligned = True)
+>>> lcss.compute(list(range(20)), [2,4,6,7,8,9,11,13], True)
+8
+>>> lcss.subSequenceIndices
+[(2, 0), (4, 1), (6, 2), (7, 3), (8, 4), (9, 5), (11, 6), (13, 7)]
+
+>>> lcss = LCSS(metric = 'cityblock', epsilon = 0.1)
+>>> lcss.compute([[i] for i in range(5)], [[i] for i in range(5)])
+5
+>>> lcss.compute([[i] for i in range(1,5)], [[i] for i in range(5)])
+4
+>>> lcss.compute([[i] for i in range(5,10)], [[i] for i in range(5)])
+0
+>>> lcss.compute([[i] for i in range(5)], [[i] for i in range(10)])
+5
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/traffic_engineering.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,336 @@
+#! /usr/bin/env python
+''' Traffic Engineering Tools and Examples'''
+
+from math import ceil
+
+from trafficintelligence import prediction
+
+#########################
+# Simulation
+#########################
+
+def generateTimeHeadways(meanTimeHeadway, simulationTime):
+    '''Generates the time headways between arrivals 
+    given the meanTimeHeadway and the negative exponential distribution
+    over a time interval of length simulationTime (assumed to be in same time unit as headway'''
+    from random import expovariate
+    headways = []
+    totalTime = 0
+    flow = 1/meanTimeHeadway
+    while totalTime < simulationTime:
+        h = expovariate(flow)
+        headways.append(h)
+        totalTime += h
+    return headways
+
+class RoadUser(object):
+    '''Simple example of inheritance to plot different road users '''
+    def __init__(self, position, velocity):
+        'Both fields are 2D numpy arrays'
+        self.position = position.astype(float)        
+        self.velocity = velocity.astype(float)
+
+    def move(self, deltaT):
+        self.position += deltaT*self.velocity
+
+    def draw(self, init = False):
+        from matplotlib.pyplot import plot
+        if init:
+            self.plotLine = plot(self.position[0], self.position[1], self.getDescriptor())[0]
+        else:
+            self.plotLine.set_data(self.position[0], self.position[1])
+
+
+class PassengerVehicle(RoadUser):
+    def getDescriptor(self):
+        return 'dr'
+
+class Pedestrian(RoadUser):
+    def getDescriptor(self):
+        return 'xb'
+
+class Cyclist(RoadUser):
+    def getDescriptor(self):
+        return 'og'
+
+#########################
+# queueing models
+#########################
+
+class CapacityReduction(object):
+    def __init__(self, beta, reductionDuration, demandCapacityRatio = None, demand = None, capacity = None):
+        '''reduction duration should be positive
+        demandCapacityRatio is demand/capacity (q/s)'''
+        if demandCapacityRatio is None and demand is None and capacity is None:
+            print('Missing too much information (demand, capacity and ratio)')
+            import sys
+            sys.exit()
+        if 0 <= beta < 1:
+            self.beta = beta
+            self.reductionDuration = reductionDuration
+
+            if demandCapacityRatio is not None:
+                self.demandCapacityRatio = demandCapacityRatio
+            if demand is not None:
+                self.demand = demand
+            if capacity is not None:
+                self.capacity = capacity
+            if capacity is not None and demand is not None:
+                self.demandCapacityRatio = float(self.demand)/self.capacity
+                if demand <= beta*capacity:
+                    print('There is no queueing as the demand {} is inferior to the reduced capacity {}'.format(demand, beta*capacity))
+        else:
+            print('reduction coefficient (beta={}) is not in [0, 1['.format(beta))
+
+    def queueingDuration(self):
+        return self.reductionDuration*(1-self.beta)/(1-self.demandCapacityRatio)
+
+    def nArrived(self, t):
+        if self.demand is None:
+            print('Missing demand field')
+            return None
+        return self.demand*t
+
+    def nServed(self, t):
+        if self.capacity is None:
+            print('Missing capacity field')
+            return None
+        if 0<=t<=self.reductionDuration:
+            return self.beta*self.capacity*t
+        elif self.reductionDuration < t <= self.queueingDuration():
+            return self.beta*self.capacity*self.reductionDuration+self.capacity*(t-self.reductionDuration)
+
+    def nQueued(self, t):
+        return self.nArrived(t)-self.nServed(t)
+
+    def maxNQueued(self):
+        return self.nQueued(self.reductionDuration)
+
+    def totalDelay(self):
+        if self.capacity is None:
+            print('Missing capacity field')
+            return None
+        return self.capacity*self.reductionDuration**2*(1-self.beta)*(self.demandCapacityRatio-self.beta)/(2*(1-self.demandCapacityRatio))
+    
+    def averageDelay(self):
+        return self.reductionDuration*(self.demandCapacityRatio-self.beta)/(2*self.demandCapacityRatio)
+
+    def averageNQueued(self):
+        return self.totalDelay()/self.queueingDuration()
+
+
+#########################
+# fundamental diagram
+#########################
+
+class FundamentalDiagram(object):
+    ''' '''
+    def __init__(self, name):
+        self.name = name
+
+    def q(self, k):
+        return k*self.v(k)
+
+    @staticmethod
+    def meanHeadway(k):
+        return 1/k
+    
+    @staticmethod
+    def meanSpacing(q):
+        return 1/q
+
+    def plotVK(self, language='fr', units={}):
+        from numpy import arange
+        from matplotlib.pyplot import figure,plot,xlabel,ylabel
+        densities = [k for k in arange(1, self.kj+1)]
+        figure()
+        plot(densities, [self.v(k) for k in densities])
+        xlabel('Densite (veh/km)') # todo other languages and adapt to units
+        ylabel('Vitesse (km/h)')
+
+    def plotQK(self, language='fr', units={}):
+        from numpy import arange
+        from matplotlib.pyplot import figure,plot,xlabel,ylabel
+        densities = [k for k in arange(1, self.kj+1)]
+        figure()
+        plot(densities, [self.q(k) for k in densities])
+        xlabel('Densite (veh/km)') # todo other languages and adapt to units
+        ylabel('Debit (km/h)')
+
+class GreenbergFD(FundamentalDiagram):
+    '''Speed is the logarithm of density'''
+    def __init__(self, vc, kj):
+        FundamentalDiagram.__init__(self,'Greenberg')
+        self.vc=vc
+        self.kj=kj
+    
+    def v(self,k):
+        from numpy import log
+        return self.vc*log(self.kj/k)
+
+    def criticalDensity(self): 
+        from numpy import e
+        self.kc = self.kj/e
+        return self.kc
+
+    def capacity(self):
+        self.qmax = self.kc*self.vc
+        return self.qmax
+
+#########################
+# intersection
+#########################
+
+class FourWayIntersection(object):
+    '''Simple class for simple intersection outline'''
+    def __init__(self, dimension, coordX, coordY):
+        self.dimension = dimension
+        self.coordX = coordX
+        self.coordY = coordY
+
+    def plot(self, options = 'k'):
+        from matplotlib.pyplot import plot, axis
+    
+        minX = min(self.dimension[0])
+        maxX = max(self.dimension[0])
+        minY = min(self.dimension[1])
+        maxY = max(self.dimension[1])
+        
+        plot([minX, self.coordX[0], self.coordX[0]], [self.coordY[0], self.coordY[0], minY],options)
+        plot([self.coordX[1], self.coordX[1], maxX], [minY, self.coordY[0], self.coordY[0]],options)
+        plot([minX, self.coordX[0], self.coordX[0]], [self.coordY[1], self.coordY[1], maxY],options)
+        plot([self.coordX[1], self.coordX[1], maxX], [maxY, self.coordY[1], self.coordY[1]],options)
+        axis('equal')
+
+#########################
+# traffic signals
+#########################
+
+class Volume(object):
+    '''Class to represent volumes with varied vehicule types '''
+    def __init__(self, volume, types = ['pc'], proportions = [1], equivalents = [1], nLanes = 1):
+        '''mvtEquivalent is the equivalent if the movement is right of left turn'''
+
+        # check the sizes of the lists
+        if sum(proportions) == 1:
+            self.volume = volume
+            self.types = types
+            self.proportions = proportions
+            self.equivalents = equivalents
+            self.nLanes = nLanes
+        else:
+            print('Proportions do not sum to 1')
+            pass
+
+    def checkProtected(self, opposedThroughMvt):
+        '''Checks if this left movement should be protected,
+        ie if one of the main two conditions on left turn is verified'''
+        return self.volume >= 200 or self.volume*opposedThroughMvt.volume/opposedThroughMvt.nLanes > 50000
+
+    def getPCUVolume(self):
+        '''Returns the passenger-car equivalent for the input volume'''
+        v = 0
+        for p, e in zip(self.proportions, self.equivalents):
+            v += p*e
+        return v*self.volume
+
+class IntersectionMovement(object):
+    '''Represents an intersection movement
+    with a volume, a type (through, left or right)
+    and an equivalent for movement type'''
+    def __init__(self, volume, mvtEquivalent = 1):
+        self.volume = volume
+        self.mvtEquivalent = mvtEquivalent
+
+    def getTVUVolume(self):
+        return self.mvtEquivalent*self.volume.getPCUVolume()    
+
+class LaneGroup(object):
+    '''Class that represents a group of mouvements'''
+
+    def __init__(self, movements, nLanes):
+        self.movements = movements
+        self.nLanes = nLanes
+
+    def getTVUVolume(self):
+        return sum([mvt.getTVUVolume() for mvt in self.movements])
+
+    def getCharge(self, saturationVolume):
+        return self.getTVUVolume()/(self.nLanes*saturationVolume)
+
+def optimalCycle(lostTime, criticalCharge):
+    return (1.5*lostTime+5)/(1-criticalCharge)
+
+def minimumCycle(lostTime, criticalCharge, degreeSaturation=1.):
+    'degree of saturation can be used as the peak hour factor too'
+    return lostTime/(1-criticalCharge/degreeSaturation)
+
+class Cycle(object):
+    '''Class to compute optimal cycle and the split of effective green times'''
+    def __init__(self, phases, lostTime, saturationVolume):
+        '''phases is a list of phases
+        a phase is a list of lanegroups'''
+        self.phases = phases
+        self.lostTime = lostTime
+        self.saturationVolume = saturationVolume
+
+    def computeCriticalCharges(self):
+        self.criticalCharges = [max([lg.getCharge(self.saturationVolume) for lg in phase]) for phase in self.phases]
+        self.criticalCharge = sum(self.criticalCharges)
+        
+    def computeOptimalCycle(self):
+        self.computeCriticalCharges()
+        self.C = optimalCycle(self.lostTime, self.criticalCharge)
+        return self.C
+
+    def computeMinimumCycle(self, degreeSaturation=1.):
+        self.computeCriticalCharges()
+        self.C = minimumCycle(self.lostTime, self.criticalCharge, degreeSaturation)
+        return self.C
+
+    def computeEffectiveGreen(self):
+        #from numpy import round
+        #self.computeCycle() # in case it was not done before
+        effectiveGreenTime = self.C-self.lostTime
+        self.effectiveGreens = [round(c*effectiveGreenTime/self.criticalCharge,1) for c in self.criticalCharges]
+        return self.effectiveGreens
+
+
+def computeInterGreen(perceptionReactionTime, initialSpeed, intersectionLength, vehicleAverageLength = 6, deceleration = 3):
+    '''Computes the intergreen time (yellow/amber plus all red time)
+    Deceleration is positive
+    All variables should be in the same units'''
+    if deceleration > 0:
+        return [perceptionReactionTime+float(initialSpeed)/(2*deceleration), float(intersectionLength+vehicleAverageLength)/initialSpeed]
+    else:
+        print('Issue deceleration should be strictly positive')
+        return None
+
+def uniformDelay(cycleLength, effectiveGreen, saturationDegree):
+    '''Computes the uniform delay'''
+    return 0.5*cycleLength*(1-float(effectiveGreen)/cycleLength)**2/(1-float(effectiveGreen*saturationDegree)/cycleLength)
+
+def randomDelay(volume, saturationDegree):
+    '''Computes the random delay = queueing time for M/D/1'''
+    return saturationDegree**2/(2*volume*(1-saturationDegree))
+
+def incrementalDelay(T, X, c, k=0.5, I=1):
+    '''Computes the incremental delay (HCM)
+    T in hours
+    c capacity of the lane group
+    k default for fixed time signal
+    I=1 for isolated intersection (Poisson arrival)'''
+    from math import sqrt
+    return 900*T*(X - 1 + sqrt((X - 1)**2 + 8*k*I*X/(c*T)))
+
+#########################
+# misc
+#########################
+
+def timeChangingSpeed(v0, vf, a, TPR):
+    'for decelerations, a < 0'
+    return TPR-(vf-v0)/a
+
+def distanceChangingSpeed(v0, vf, a, TPR):
+    'for decelerations, a < 0'
+    return TPR*v0-(vf**2-v0**2)/(2*a)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/ubc_utils.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,224 @@
+#! /usr/bin/env python
+'''Various utilities to load data saved by the UBC tool(s)'''
+
+from trafficintelligence import utils, events, storage, indicators
+from trafficintelligence.moving import MovingObject, TimeInterval, Trajectory
+
+
+fileTypeNames = ['feature',
+                 'object',
+                 'prototype',
+                 'contoursequence']
+
+severityIndicatorNames = ['Distance',
+                          'Collision Course Cosine',
+                          'Velocity Cosine',
+                          'Speed Differential',
+                          'Collision Probability',
+                          'Severity Index',
+                          'Time to Collision']
+
+userTypeNames = ['car',
+                 'pedestrian',
+                 'twowheels',
+                 'bus',
+                 'truck']
+
+# severityIndicator = {'Distance': 0,
+#                      'Cosine': 1,
+#                      'Velocity Cosine': 2,
+#                      'Speed Differential': 3,
+#                      'Collision Probability': 4,
+#                      'Severity Index': 5,
+#                      'TTC': 6}
+
+mostSevereIsMax = [False, 
+                   False, 
+                   True, 
+                   True, 
+                   True, 
+                   True, 
+                   False]
+
+ignoredValue = [None, None, None, None, None, None, -1]
+
+def getFileType(s):
+    'Finds the type in fileTypeNames'
+    for fileType in fileTypeNames:
+        if s.find(fileType)>0:
+            return fileType
+    return ''
+
+def isFileType(s, fileType):
+    return (s.find(fileType)>0)
+
+def saveTrajectoryUserTypes(inFilename, outFilename, objects):
+    '''The program saves the objects, 
+    by just copying the corresponding trajectory and velocity data
+    from the inFilename, and saving the characteristics in objects (first line)
+    into outFilename'''
+    infile = utils.openCheck(inFilename)
+    outfile = utils.openCheck(outFilename,'w')
+
+    if (inFilename.find('features') >= 0) or infile is None or outfile is None:
+        return
+
+    lines = utils.getLines(infile)
+    objNum = 0 # in inFilename
+    while lines != []:
+        # find object in objects (index i)
+        i = 0
+        while (i<len(objects)) and (objects[i].num != objNum):
+            i+=1
+
+        if i<len(objects):
+            l = lines[0].split(' ')
+            l[3] = str(objects[i].userType)
+            outfile.write(' '.join(l)+'\n')
+            for l in lines[1:]:
+                outfile.write(l+'\n')
+            outfile.write(utils.delimiterChar+'\n')
+        # next object
+        objNum += 1
+        lines = utils.getLines(infile)
+
+    print('read {0} objects'.format(objNum))
+
+def modifyTrajectoryFile(modifyLines, filenameIn, filenameOut):
+    '''Reads filenameIn, replaces the lines with the result of modifyLines and writes the result in filenameOut'''
+    fileIn = utils.openCheck(filenameIn, 'r', True)
+    fileOut = utils.openCheck(filenameOut, "w", True)
+
+    lines = utils.getLines(fileIn)
+    trajNum = 0
+    while (lines != []):
+        modifiedLines = modifyLines(trajNum, lines)
+        if modifiedLines:
+            for l in modifiedLines:
+                fileOut.write(l+"\n")
+            fileOut.write(utils.delimiterChar+"\n")
+        lines = utils.getLines(fileIn)
+        trajNum += 1
+         
+    fileIn.close()
+    fileOut.close()
+
+def copyTrajectoryFile(keepTrajectory, filenameIn, filenameOut):
+    '''Reads filenameIn, keeps the trajectories for which the function keepTrajectory(trajNum, lines) is True
+    and writes the result in filenameOut'''
+    fileIn = utils.openCheck(filenameIn, 'r', True)
+    fileOut = utils.openCheck(filenameOut, "w", True)
+
+    lines = utils.getLines(fileIn)
+    trajNum = 0
+    while (lines != []):
+        if keepTrajectory(trajNum, lines):
+            for l in lines:
+                fileOut.write(l+"\n")
+            fileOut.write(utils.delimiterChar+"\n")
+        lines = utils.getLines(fileIn)
+        trajNum += 1
+        
+    fileIn.close()
+    fileOut.close()
+
+def loadTrajectories(filename, nObjects = -1):
+    '''Loads trajectories'''
+
+    f = utils.openCheck(filename)
+    if f is None:
+        return []
+
+    objects = []
+    objNum = 0
+    objectType = getFileType(filename)
+    lines = utils.getLines(f)
+    while (lines != []) and ((nObjects<0) or (objNum<nObjects)):
+        l = lines[0].split(' ')
+        parsedLine = [int(n) for n in l[:4]]
+        obj = MovingObject(num = objNum, timeInterval = TimeInterval(parsedLine[1],parsedLine[2]))
+        #add = True
+        if len(lines) >= 3:
+            obj.positions = Trajectory.load(lines[1], lines[2])
+            if len(lines) >= 5:
+                obj.velocities = Trajectory.load(lines[3], lines[4])
+                if objectType == 'object':
+                    obj.userType = parsedLine[3]
+                    obj.nObjects = float(l[4])
+                    obj.featureNumbers = [int(n) for n in l[5:]]
+                    
+                    # load contour data if available
+                    if len(lines) >= 6:
+                        obj.contourType = utils.line2Floats(lines[6])
+                        obj.contourOrigins = Trajectory.load(lines[7], lines[8])
+                        obj.contourSizes = Trajectory.load(lines[9], lines[10])
+                elif objectType == 'prototype':
+                    obj.userType = parsedLine[3]
+                    obj.nMatchings = int(l[4])
+
+        if len(lines) != 2:
+            objects.append(obj)
+            objNum+=1
+        else:
+            print("Error two lines of data for feature {}".format(f.num))
+
+        lines = utils.getLines(f)
+
+    f.close()
+    return objects
+   
+def getFeatureNumbers(objects):
+    featureNumbers=[]
+    for o in objects:
+        featureNumbers += o.featureNumbers
+    return featureNumbers
+
+def loadInteractions(filename, nInteractions = -1):
+    'Loads interactions from the old UBC traffic event format'
+    f = utils.openCheck(filename)
+    if f is None:
+        return []
+
+    interactions = []
+    interactionNum = 0
+    lines = utils.getLines(f)
+    while (lines != []) and ((nInteractions<0) or (interactionNum<nInteractions)):
+        parsedLine = [int(n) for n in lines[0].split(' ')]
+        inter = events.Interaction(interactionNum, TimeInterval(parsedLine[1],parsedLine[2]), parsedLine[3], parsedLine[4], categoryNum = parsedLine[5])
+        
+        indicatorFrameNums = [int(n) for n in lines[1].split(' ')]
+        for indicatorNum,line in enumerate(lines[2:]):
+            values = {}
+            for i,v in enumerate([float(n) for n in line.split(' ')]):
+                if not ignoredValue[indicatorNum] or v != ignoredValue[indicatorNum]:
+                    values[indicatorFrameNums[i]] = v
+            inter.addIndicator(indicators.SeverityIndicator(severityIndicatorNames[indicatorNum], values, None, mostSevereIsMax[indicatorNum]))
+
+        interactions.append(inter)
+        interactionNum+=1
+        lines = utils.getLines(f)
+
+    f.close()
+    return interactions
+
+def loadCollisionPoints(filename, nPoints = -1):
+    '''Loads collision points and returns a dict
+    with keys as a pair of the numbers of the two interacting objects'''
+    f = utils.openCheck(filename)
+    if f is None:
+        return []
+
+    points = {}
+    num = 0
+    lines = utils.getLines(f)
+    while (lines != []) and ((nPoints<0) or (num<nPoints)):
+        parsedLine = [int(n) for n in lines[0].split(' ')]
+        protagonistNums = (parsedLine[0], parsedLine[1])
+        points[protagonistNums] = [[float(n) for n in lines[1].split(' ')],
+                                   [float(n) for n in lines[2].split(' ')]]
+
+        num+=1
+        lines = utils.getLines(f)
+
+    f.close()
+    return points
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trafficintelligence/utils.py	Mon Aug 24 16:02:06 2020 -0400
@@ -0,0 +1,1164 @@
+#! /usr/bin/env python
+''' Generic utilities.'''
+
+from datetime import time, datetime
+from argparse import ArgumentTypeError
+from pathlib import Path
+from math import sqrt, ceil, floor
+from copy import deepcopy, copy
+from collections import Counter
+
+from scipy.stats import rv_continuous, kruskal, shapiro, lognorm, norm, t
+from scipy.spatial import distance
+from scipy.sparse import dok_matrix
+from numpy import zeros, array, exp, sum as npsum, int as npint, arange, cumsum, mean, median, percentile, isnan, ones, convolve,  dtype, isnan, NaN, ma, isinf, savez, load as npload, log, polyfit, float as npfloat
+from numpy.random import random_sample, permutation as nppermutation
+from pandas import DataFrame, concat
+import matplotlib.pyplot as plt
+
+datetimeFormat = "%Y-%m-%d %H:%M:%S"
+
+sjcamDatetimeFormat = "%Y_%m%d_%H%M%S"#2017_0626_143720
+
+#########################
+# txt files
+#########################
+
+commentChar = '#'
+
+delimiterChar = '%';
+
+def openCheck(filename, option = 'r', quitting = False):
+    '''Open file filename in read mode by default
+    and checks it is open'''
+    try:
+        return open(filename, option)
+    except IOError:
+        print('File {} could not be opened.'.format(filename))
+        if quitting:
+            from sys import exit
+            exit()
+        return None
+
+def readline(f, commentCharacters = commentChar):
+    '''Modified readline function to skip comments
+    Can take a list of characters or a string (in will work in both)'''
+    s = f.readline()
+    while (len(s) > 0) and s[0] in commentCharacters:
+        s = f.readline()
+    return s.strip()
+
+def getLines(f, delimiterChar = delimiterChar, commentCharacters = commentChar):
+    '''Gets a complete entry (all the lines) in between delimiterChar.'''
+    dataStrings = []
+    s = readline(f, commentCharacters)
+    while len(s) > 0 and s[0] != delimiterChar:
+        dataStrings += [s.strip()]
+        s = readline(f, commentCharacters)
+    return dataStrings
+
+#########################
+# Strings
+#########################
+
+def upperCaseFirstLetter(s):
+    words = s.split(' ')
+    lowerWords = [w[0].upper()+w[1:].lower() for w in words]
+    return ' '.join(lowerWords)
+
+class TimeConverter:
+    def __init__(self, datetimeFormat = datetimeFormat):
+        self.datetimeFormat = datetimeFormat
+    
+    def convert(self, s):
+        try:
+            return datetime.strptime(s, self.datetimeFormat)
+        except ValueError:
+            msg = "Not a valid date: '{0}'.".format(s)
+            raise ArgumentTypeError(msg)
+
+#########################
+# Enumerations
+#########################
+
+def inverseEnumeration(l):
+    'Returns the dictionary that provides for each element in the input list its index in the input list'
+    result = {}
+    for i,x in enumerate(l):
+        result[x] = i
+    return result
+
+def findElement(l, num):
+    i = 0
+    while l[i].getNum() != num:
+        i += 1
+    if i < len(l):
+        return l[i]
+    else:
+        return None
+
+#########################
+# Simple statistics
+#########################
+
+def logNormalMeanVar(loc, scale):
+    '''location and scale are respectively the mean and standard deviation of the normal in the log-normal distribution
+    https://en.wikipedia.org/wiki/Log-normal_distribution
+
+    same as lognorm.stats(scale, 0, exp(loc))'''
+    mean = exp(loc+(scale**2)/2)
+    var = (exp(scale**2)-1)*exp(2*loc+scale**2)
+    return mean, var
+
+def fitLogNormal(x):
+    'returns the fitted location and scale of the lognormal (general definition)'
+    shape, loc, scale = lognorm.fit(x, floc=0.)
+    return log(scale), shape
+
+def sampleSize(stdev, tolerance, percentConfidence, nRoundingDigits = None, printLatex = False):
+    if nRoundingDigits is None:
+        k = round(norm.ppf(0.5+percentConfidence/200., 0, 1), 2) # 1.-(100-percentConfidence)/200.
+    else:
+        k = round(norm.ppf(0.5+percentConfidence/200., 0, 1), nRoundingDigits)
+        stdev = round(stdev, nRoundingDigits)
+        tolerance = round(tolerance, nRoundingDigits)
+    if printLatex:
+        print('$z_{{{}}}^2\\frac{{s^2}}{{e^2}}={}^2\\frac{{{}^2}}{{{}^2}}$'.format(0.5+percentConfidence/200.,k, stdev, tolerance))
+    return (k*stdev/tolerance)**2
+
+def confidenceInterval(mean, stdev, nSamples, percentConfidence, trueStd = True, printLatex = False):
+    '''if trueStd, use normal distribution, otherwise, Student
+
+    Use otherwise t.interval or norm.interval for the boundaries
+    ex: norm.interval(0.95)
+    t.interval(0.95, nSamples-1)'''
+    if trueStd:
+        k = round(norm.ppf(0.5+percentConfidence/200., 0, 1), 2)
+    else: # use Student
+        k = round(t.ppf(0.5+percentConfidence/200., nSamples-1), 2)
+    e = k*stdev/sqrt(nSamples)
+    if printLatex:
+        print('${0} \pm {1}\\frac{{{2}}}{{\sqrt{{{3}}}}}$'.format(mean, k, stdev, nSamples))
+    return mean-e, mean+e
+
+def computeChi2(expected, observed):
+    '''Returns the Chi2 statistics'''
+    return sum([((e-o)*(e-o))/float(e) for e, o in zip(expected, observed)])
+
+class ConstantDistribution(object):
+    '''Distribution returning always the same value for the random variable '''
+    def __init__(self, value):
+        self.value = value
+
+    def rvs(self, size = 1):
+        if size == 1:
+            return self.value
+        else:
+            return array([self.value]*size)
+    
+class EmpiricalContinuousDistribution(rv_continuous):
+    def __init__(self, values, probabilities, **kwargs):
+        '''The values (and corresponding probabilities) are supposed to be sorted by value
+        for v, p in zip(values, probabilities): P(X<=v)=p'''
+        assert probabilities[0]==0
+        super(EmpiricalContinuousDistribution, self).__init__(**kwargs)
+        self.values = values
+        self.probabilities = probabilities
+
+    def save(self, filename):
+        import yaml
+        with open(filename, 'w') as out:
+            yaml.dump([self.values, self.probabilities], out)
+
+    @staticmethod
+    def load(filename):
+        import yaml
+        with open(filename) as f:
+            values, probabilities = yaml.load(f)
+            return EmpiricalContinuousDistribution(values, probabilities)
+
+    def _cdf(self, x):
+        if x < self.values[0]:
+            return self.probabilities[0]
+        else:
+            i=0
+            while i+1<len(self.values) and self.values[i+1] < x:
+                i += 1
+            if i == len(self.values)-1:
+                return self.probabilities[-1]
+            else:
+                return self.probabilities[i]+(x-self.values[i])*float(self.probabilities[i+1]-self.probabilities[i])/float(self.values[i+1]-self.values[i])
+
+class DistributionSample(object):
+    def nSamples(self):
+        return sum(self.counts)
+
+def cumulativeDensityFunction(sample, normalized = False):
+    '''Returns the cumulative density function of the sample of a random variable'''
+    xaxis = sorted(sample)
+    counts = arange(1,len(sample)+1) # dtype = float
+    if normalized:
+        counts = counts.astype(float)/float(len(sample))
+    return xaxis, counts
+
+class DiscreteDistributionSample(DistributionSample):
+    '''Class to represent a sample of a distribution for a discrete random variable'''
+    def __init__(self, categories, counts):
+        self.categories = categories
+        self.counts = counts
+
+    def mean(self):
+        result = [float(x*y) for x,y in zip(self.categories, self.counts)]
+        return npsum(result)/self.nSamples()
+
+    def var(self, mean = None):
+        if not mean:
+            m = self.mean()
+        else:
+            m = mean
+        result = 0.
+        squares = [float((x-m)*(x-m)*y) for x,y in zip(self.categories, self.counts)]
+        return npsum(squares)/(self.nSamples()-1)
+
+    def referenceCounts(self, probability):
+        '''probability is a function that returns the probability of the random variable for the category values'''
+        refProba = [probability(c) for c in self.categories]
+        refProba[-1] = 1-npsum(refProba[:-1])
+        refCounts = [r*self.nSamples() for r in refProba]
+        return refCounts, refProba
+
+class ContinuousDistributionSample(DistributionSample):
+    '''Class to represent a sample of a distribution for a continuous random variable
+    with the number of observations for each interval
+    intervals (categories variable) are defined by their left limits, the last one being the right limit
+    categories contain therefore one more element than the counts'''
+    def __init__(self, categories, counts):
+        # todo add samples for initialization and everything to None? (or setSamples?)
+        self.categories = categories
+        self.counts = counts
+
+    @staticmethod
+    def generate(sample, categories):
+        if min(sample) < min(categories):
+            print('Sample has lower min than proposed categories ({}, {})'.format(min(sample), min(categories)))
+        if max(sample) > max(categories):
+            print('Sample has higher max than proposed categories ({}, {})'.format(max(sample), max(categories)))
+        dist = ContinuousDistributionSample(sorted(categories), [0]*(len(categories)-1))
+        for s in sample:
+            i = 0
+            while  i<len(dist.categories) and dist.categories[i] <= s:
+                i += 1
+            if i <= len(dist.counts):
+                dist.counts[i-1] += 1
+                #print('{} in {} {}'.format(s, dist.categories[i-1], dist.categories[i]))
+            else:
+                print('Element {} is not in the categories'.format(s))
+        return dist
+
+    def mean(self):
+        result = 0.
+        for i in range(len(self.counts)-1):
+            result += self.counts[i]*(self.categories[i]+self.categories[i+1])/2
+        return result/self.nSamples()
+
+    def var(self, mean = None):
+        if not mean:
+            m = self.mean()
+        else:
+            m = mean
+        result = 0.
+        for i in range(len(self.counts)-1):
+            mid = (self.categories[i]+self.categories[i+1])/2
+            result += self.counts[i]*(mid - m)*(mid - m)
+        return result/(self.nSamples()-1)
+
+    def referenceCounts(self, cdf):
+        '''cdf is a cumulative distribution function
+        returning the probability of the variable being less that x'''
+        # refCumulativeCounts = [0]#[cdf(self.categories[0][0])]
+#         for inter in self.categories:
+#             refCumulativeCounts.append(cdf(inter[1]))
+        refCumulativeCounts = [cdf(x) for x in self.categories[1:-1]]
+
+        refProba = [refCumulativeCounts[0]]
+        for i in xrange(1,len(refCumulativeCounts)):
+            refProba.append(refCumulativeCounts[i]-refCumulativeCounts[i-1])
+        refProba.append(1-refCumulativeCounts[-1])
+        refCounts = [p*self.nSamples() for p in refProba]
+        
+        return refCounts, refProba
+
+    def printReferenceCounts(self, refCounts=None):
+        if refCounts:
+            ref = refCounts
+        else:
+            ref = self.referenceCounts
+        for i in xrange(len(ref[0])):
+            print('{0}-{1} & {2:0.3} & {3:0.3} \\\\'.format(self.categories[i],self.categories[i+1],ref[1][i], ref[0][i]))
+
+
+#########################
+# maths section
+#########################
+
+# def kernelSmoothing(sampleX, X, Y, weightFunc, halfwidth):
+#     '''Returns a smoothed weighted version of Y at the predefined values of sampleX
+#     Sum_x weight(sample_x,x) * y(x)'''
+#     from numpy import zeros, array
+#     smoothed = zeros(len(sampleX))
+#     for i,x in enumerate(sampleX):
+#         weights = array([weightFunc(x,xx, halfwidth) for xx in X])
+#         if sum(weights)>0:
+#             smoothed[i] = sum(weights*Y)/sum(weights)
+#         else:
+#             smoothed[i] = 0
+#     return smoothed
+
+def generateData(nrows, nvariables, scale):
+    x = random_sample(nrows*nvariables).reshape(nrows,nvariables)*scale
+    return DataFrame(x, columns=['x{}'.format(i+1) for i in range(nvariables)])
+
+def kernelSmoothing(x, X, Y, weightFunc, halfwidth):
+    '''Returns the smoothed estimate of (X,Y) at x
+    Sum_x weight(sample_x,x) * y(x)'''
+    weights = array([weightFunc(x,observedx, halfwidth) for observedx in X])
+    if sum(weights)>0:
+        return sum(weights*Y)/sum(weights)
+    else:
+        return 0
+
+def uniform(center, x, halfwidth):
+    if abs(center-x)<halfwidth:
+        return 1.
+    else:
+        return 0.
+
+def gaussian(center, x, halfwidth):
+    return exp(-((center-x)/halfwidth)**2/2)
+
+def epanechnikov(center, x, halfwidth):
+    diff = abs(center-x)
+    if diff<halfwidth:
+        return 1.-(diff/halfwidth)**2
+    else:
+        return 0.
+    
+def triangular(center, x, halfwidth):
+    diff = abs(center-x)
+    if diff<halfwidth:
+        return 1.-abs(diff/halfwidth)
+    else:
+        return 0.
+
+def medianSmoothing(x, X, Y, halfwidth):
+    '''Returns the media of Y's corresponding to X's in the interval [x-halfwidth, x+halfwidth]'''
+    return median([y for observedx, y in zip(X,Y) if abs(x-observedx)<halfwidth])
+
+def argmaxDict(d):
+    return max(d, key=d.get)
+
+def deltaFrames(t1, t2, frameRate):
+    '''Returns the number of frames between t1 and t2
+    positive if t1<=t2, negative otherwise'''
+    if t1 > t2:
+        return -(t1-t2).seconds*frameRate
+    else:
+        return (t2-t1).seconds*frameRate
+
+def framesToTime(nFrames, frameRate, initialTime = time()):
+    '''returns a datetime.time for the time in hour, minutes and seconds
+    initialTime is a datetime.time'''
+    seconds = int(floor(float(nFrames)/float(frameRate))+initialTime.hour*3600+initialTime.minute*60+initialTime.second)
+    h = int(floor(seconds/3600.))
+    seconds = seconds - h*3600
+    m = int(floor(seconds/60))
+    seconds = seconds - m*60
+    return time(h, m, seconds)
+
+def timeToFrames(t, frameRate):
+    return frameRate*(t.hour*3600+t.minute*60+t.second)
+
+def timeModulo(t, duration):
+    'returns the time modulo the duration in min'
+    return time(t.hour, t.minute//duration, t.second)
+
+def sortXY(X,Y):
+    'returns the sorted (x, Y(x)) sorted on X'
+    D = {}
+    for x, y in zip(X,Y):
+        D[x]=y
+    xsorted = sorted(D.keys())
+    return xsorted, [D[x] for x in xsorted]
+
+def compareLengthForSort(i, j):
+    if len(i) < len(j):
+        return -1
+    elif len(i) == len(j):
+        return 0
+    else:
+        return 1
+
+def sortByLength(instances, reverse = False):
+    '''Returns a new list with the instances sorted by length (method __len__)
+    reverse is passed to sorted'''
+    return sorted(instances, key = len, reverse = reverse)
+
+def ceilDecimals(v, nDecimals):
+    '''Rounds the number at the nth decimal
+    eg 1.23 at 0 decimal is 2, at 1 decimal is 1.3'''
+    tens = 10**nDecimals
+    return ceil(v*tens)/tens
+
+def inBetween(bound1, bound2, x):
+    'useful if one does not know the order of bound1/bound2'
+    return bound1 <= x <= bound2 or bound2 <= x <= bound1
+
+def pointDistanceL2(x1,y1,x2,y2):
+    ''' Compute point-to-point distance (L2 norm, ie Euclidean distance)'''
+    return sqrt((x2-x1)**2+(y2-y1)**2)
+
+def crossProduct(l1, l2):
+    return l1[0]*l2[1]-l1[1]*l2[0]
+
+def filterCategoricalMovingWindow(cat_list, halfWidth):
+    ''' Return a list of categories/values smoothed according to a window. 
+        halfWidth is the search radius on either side'''
+    smoothed = deepcopy(cat_list)
+    for point in range(len(cat_list)):
+        lower_bound_check = max(0,point-halfWidth)
+        upper_bound_check = min(len(cat_list)-1,point+halfWidth+1)
+        window_values = cat_list[lower_bound_check:upper_bound_check]
+        smoothed[point] = max(set(window_values), key=window_values.count)
+    return smoothed
+
+def filterMovingWindow(inputSignal, halfWidth):
+    '''Returns an array obtained after the smoothing of the input by a moving average
+    The first and last points are copied from the original.'''
+    width = float(halfWidth*2+1)
+    win = ones(width,'d')
+    result = convolve(win/width,array(inputSignal),'same')
+    result[:halfWidth] = inputSignal[:halfWidth]
+    result[-halfWidth:] = inputSignal[-halfWidth:]
+    return result
+
+def linearRegression(x, y, deg = 1, plotData = False):
+    '''returns the least square estimation of the linear regression of y = ax+b
+    as well as the plot'''
+    coef = polyfit(x, y, deg)
+    if plotData:
+        def poly(x):
+            result = 0
+            for i in range(len(coef)):
+                result += coef[i]*x**(len(coef)-i-1)
+            return result
+        plt.plot(x, y, 'x')
+        xx = arange(min(x), max(x),(max(x)-min(x))/1000)
+        plt.plot(xx, [poly(z) for z in xx])
+    return coef
+
+def correlation(data, correlationMethod = 'pearson', plotFigure = False, displayNames = None, figureFilename = None):
+    '''Computes (and displays) the correlation matrix for a pandas DataFrame'''
+    columns = data.columns.tolist()
+    for var in data.columns:
+        uniqueValues = data[var].unique()
+        if len(uniqueValues) == 1 or data.dtypes[var] == dtype('O') or (len(uniqueValues) == 2 and len(data.loc[~isnan(data[var]), var].unique()) == 1): # last condition: only one other value than nan
+            columns.remove(var)
+    c=data[columns].corr(correlationMethod)
+    if plotFigure:
+        fig = plt.figure(figsize=(4+0.4*c.shape[0], 0.4*c.shape[0]))
+        fig.add_subplot(1,1,1)
+        #plt.imshow(np.fabs(c), interpolation='none')
+        plt.imshow(c, vmin=-1., vmax = 1., interpolation='none', cmap = 'RdYlBu_r') # coolwarm
+        if displayNames is not None:
+            colnames = [displayNames.get(s.strip(), s.strip()) for s in columns]
+        else:
+            colnames = columns
+        #correlation.plot_corr(c, xnames = colnames, normcolor=True, title = filename)
+        plt.xticks(range(len(colnames)), colnames, rotation=90)
+        plt.yticks(range(len(colnames)), colnames)
+        plt.tick_params('both', length=0)
+        plt.subplots_adjust(bottom = 0.29)
+        plt.colorbar()
+        plt.title('Correlation ({})'.format(correlationMethod))
+        plt.tight_layout()
+        if len(colnames) > 50:
+            plt.subplots_adjust(left=.06)
+        if figureFilename is not None:
+            plt.savefig(figureFilename, dpi = 150, transparent = True)
+    return c
+
+def addDummies(data, variables, allVariables = True):
+    '''Add binary dummy variables for each value of a nominal variable 
+    in a pandas DataFrame'''
+    newVariables = []
+    for var in variables:
+        if var in data.columns and data.dtypes[var] == dtype('O') and len(data[var].unique()) > 2:
+            values = data[var].unique()
+            if not allVariables:
+                values = values[:-1]
+            for val in values:
+                if val is not NaN:
+                    newVariable = (var+'_{}'.format(val)).replace('.','').replace(' ','').replace('-','')
+                    data[newVariable] = (data[var] == val)
+                    newVariables.append(newVariable)
+    return newVariables
+
+def kruskalWallis(data, dependentVariable, independentVariable, plotFigure = False, filenamePrefix = None, figureFileType = 'pdf', saveLatex = False, renameVariables = lambda s: s, kwCaption = ''):
+    '''Studies the influence of (nominal) independent variable over the dependent variable
+
+    Makes tests if the conditional distributions are normal
+    using the Shapiro-Wilk test (in which case ANOVA could be used)
+    Implements uses the non-parametric Kruskal Wallis test'''
+    tmp = data[data[independentVariable].notnull()]
+    independentVariableValues = sorted(tmp[independentVariable].unique().tolist())
+    if len(independentVariableValues) >= 2:
+        if saveLatex:
+            out = openCheck(filenamePrefix+'-{}-{}.tex'.format(dependentVariable, independentVariable), 'w')
+        for x in independentVariableValues:
+            print('Shapiro-Wilk normality test for {} when {}={}: {} obs'.format(dependentVariable,independentVariable, x, len(tmp.loc[tmp[independentVariable] == x, dependentVariable])))
+            if len(tmp.loc[tmp[independentVariable] == x, dependentVariable]) >= 3:
+                print(shapiro(tmp.loc[tmp[independentVariable] == x, dependentVariable]))
+        if plotFigure:
+            plt.figure()
+            plt.boxplot([tmp.loc[tmp[independentVariable] == x, dependentVariable] for x in independentVariableValues])
+            plt.xticks(range(1,len(independentVariableValues)+1), independentVariableValues)
+            plt.title('{} vs {}'.format(dependentVariable, independentVariable))
+            if filenamePrefix is not None:
+                plt.savefig(filenamePrefix+'-{}-{}.{}'.format(dependentVariable, independentVariable, figureFileType))
+        table = tmp.groupby([independentVariable])[dependentVariable].describe().unstack().sort(['50%'], ascending = False)
+        table['count'] = table['count'].astype(int)
+        testResult = kruskal(*[tmp.loc[tmp[independentVariable] == x, dependentVariable] for x in independentVariableValues])
+        if saveLatex:
+            out.write('\\begin{minipage}{\\linewidth}\n'
+                      +'\\centering\n'
+                      +'\\captionof{table}{'+(kwCaption.format(dependentVariable, independentVariable, *testResult))+'}\n'
+                      +table.to_latex(float_format = lambda x: '{:.3f}'.format(x)).encode('ascii')+'\n'
+                      +'\\end{minipage}\n'
+                      +'\\ \\vspace{0.5cm}\n')
+        else:
+            print(table)
+        return testResult
+    else:
+        return None
+
+def prepareRegression(data, dependentVariable, independentVariables, maxCorrelationThreshold, correlations, maxCorrelationP, correlationFunc, stdoutText = ['Removing {} (constant: {})', 'Removing {} (correlation {} with {})', 'Removing {} (no correlation: {}, p={})'], saveFiles = False, filenamePrefix = None, latexHeader = '', latexTable = None, latexFooter=''):
+    '''Removes variables from candidate independent variables if
+    - if two independent variables are correlated (> maxCorrelationThreshold), one is removed
+    - if an independent variable is not correlated with the dependent variable (p>maxCorrelationP)
+    Returns the remaining non-correlated variables, correlated with the dependent variable
+
+    correlationFunc is spearmanr or pearsonr from scipy.stats
+    text is the template to display for the two types of printout (see default): 3 elements if no saving to latex file, 8 otherwise
+
+    TODO: pass the dummies for nominal variables and remove if all dummies are correlated, or none is correlated with the dependentvariable'''    
+    result = copy(independentVariables)
+    table1 = ''
+    table2 = {}
+    # constant variables
+    for var in independentVariables:
+        uniqueValues = data[var].unique()
+        if (len(uniqueValues) == 1) or (len(uniqueValues) == 2 and uniqueValues.dtype != dtype('O') and len(data.loc[~isnan(data[var]), var].unique()) == 1):
+            print(stdoutText[0].format(var, uniqueValues))
+            if saveFiles:
+                table1 += latexTable[0].format(var, *uniqueValues)
+            result.remove(var)
+    # correlated variables
+    for v1 in copy(result):
+        if v1 in correlations.index:
+            for v2 in copy(result):
+                if v2 != v1 and v2 in correlations.index:
+                    if abs(correlations.loc[v1, v2]) > maxCorrelationThreshold:
+                        if v1 in result and v2 in result:
+                            if saveFiles:
+                                table1 += latexTable[1].format(v2, v1, correlations.loc[v1, v2])
+                            print(stdoutText[1].format(v2, v1, correlations.loc[v1, v2]))
+                            result.remove(v2)
+    # not correlated with dependent variable
+    table2['Correlations'] = []
+    table2['Valeurs p'] = []
+    for var in copy(result):
+        if data.dtypes[var] != dtype('O'):
+            cor, p = correlationFunc(data[dependentVariable], data[var])
+            if p > maxCorrelationP:
+                if saveFiles:
+                    table1 += latexTable[2].format(var, cor, p)
+                print(stdoutText[2].format(var, cor, p))
+                result.remove(var)
+            else:
+                table2['Correlations'].append(cor)
+                table2['Valeurs p'].append(p)
+
+    if saveFiles:
+        out = openCheck(filenamePrefix+'-removed-variables.tex', 'w')
+        out.write(latexHeader)
+        out.write(table1)
+        out.write(latexFooter)
+        out.close()
+        out = openCheck(filenamePrefix+'-correlations.html', 'w')
+        table2['Variables'] = [var for var in result if data.dtypes[var] != dtype('O')]
+        out.write(DataFrame(table2)[['Variables', 'Correlations', 'Valeurs p']].to_html(formatters = {'Correlations': lambda x: '{:.2f}'.format(x), 'Valeurs p': lambda x: '{:.3f}'.format(x)}, index = False))
+        out.close()
+    return result
+
+def saveDokMatrix(filename, m, lowerTriangle = False):
+    'Saves a dok_matrix using savez'
+    if lowerTriangle:
+        keys = [k for k in m if k[0] > k[1]]
+        savez(filename, shape = m.shape, keys = keys, values = [m[k[0],k[1]] for k in keys])
+    else:
+        savez(filename, shape = m.shape, keys = list(m.keys()), values = list(m.values()))
+
+def loadDokMatrix(filename):
+    'Loads a dok_matrix saved using the above saveDokMatrix'
+    data = npload(filename)
+    m = dok_matrix(tuple(data['shape']))
+    for k, v in zip(data['keys'], data['values']):
+        m[tuple(k)] = v
+    return m
+
+def aggregationFunction(funcStr, centile = 50):
+    '''return the numpy function corresponding to funcStr
+    centile can be a list of centiles to compute at once, eg [25, 50, 75] for the 3 quartiles'''
+    if funcStr == 'median':
+        return median
+    elif funcStr == 'mean':
+        return mean
+    elif funcStr == 'centile':
+        return lambda x: percentile(x, centile)
+    elif funcStr == '85centile':
+        return lambda x: percentile(x, 85)
+    else:
+        print('Unknown aggregation method: {}'.format(funcStr))
+        return None
+
+def aggregationMethods(methods, centiles = None):
+    aggFunctions = {}
+    headers = []
+    for method in methods:
+        if method == 'centile':
+            aggFunctions[method] = aggregationFunction(method, centiles)
+            for c in centiles:
+                headers.append('{}{}'.format(method,c))
+        else:
+            aggFunctions[method] = aggregationFunction(method)
+            headers.append(method)
+    return aggFunctions, headers
+
+def maxSumSample(d, maxSum):
+    '''Generates a sample from distribution d (type scipy.stats, using rvs method)
+    until the sum of all elements is larger than maxSum'''
+    s = 0 # sum
+    sample = []
+    while s < maxSum:
+        x = d.rvs()
+        sample.append(x)
+        s += x
+    return sample
+    
+#########################
+# regression analysis using statsmodels (and pandas)
+#########################
+
+# TODO make class for experiments?
+# TODO add tests with public dataset downloaded from Internet (IRIS et al)
+def modelString(experiment, dependentVariable, independentVariables):
+    return dependentVariable+' ~ '+' + '.join([independentVariable for independentVariable in independentVariables if experiment[independentVariable]])
+
+def runModel(experiment, data, dependentVariable, independentVariables, regressionType = 'ols'):
+    import statsmodels.formula.api as smf
+    modelStr = modelString(experiment, dependentVariable, independentVariables)
+    if regressionType == 'ols':
+        model = smf.ols(modelStr, data = data)
+    elif regressionType == 'gls':
+        model = smf.gls(modelStr, data = data)
+    elif regressionType == 'rlm':
+        model = smf.rlm(modelStr, data = data)
+    else:
+        print('Unknown regression type {}. Exiting'.format(regressionType))
+        import sys
+        sys.exit()
+    return model.fit()
+
+def runModels(experiments, data, dependentVariable, independentVariables, regressionType = 'ols'):
+    '''Runs several models and stores 3 statistics
+    adjusted R2, condition number (should be small, eg < 1000)
+    and p-value for Shapiro-Wilk test of residual normality'''
+    for i,experiment in experiments.iterrows():
+        if experiment[independentVariables].any():
+            results = runModel(experiment, data, dependentVariable, independentVariables, regressionType = 'ols')
+            experiments.loc[i,'r2adj'] = results.rsquared_adj
+            experiments.loc[i,'condNum'] = results.condition_number
+            experiments.loc[i, 'shapiroP'] = shapiro(results.resid)[1]
+            experiments.loc[i,'nobs'] = int(results.nobs)
+    return experiments
+
+def generateExperiments(independentVariables):
+    '''Generates all possible models for including or not each independent variable'''
+    experiments = {}
+    nIndependentVariables = len(independentVariables)
+    if nIndependentVariables != len(set(independentVariables)):
+        print("Duplicate variables. Exiting")
+        import sys
+        sys.exit()
+    nModels = 2**nIndependentVariables
+    for i,var in enumerate(independentVariables):
+        pattern = [False]*(2**i)+[True]*(2**i)
+        experiments[var] = pattern*(2**(nIndependentVariables-i-1))
+    experiments = DataFrame(experiments)
+    experiments['r2adj'] = 0.
+    experiments['condNum'] = NaN
+    experiments['shapiroP'] = -1
+    experiments['nobs'] = -1
+    return experiments
+
+def findBestModel(data, dependentVariable, independentVariables, regressionType = 'ols', nProcesses = 1):
+    '''Generates all possible model with the independentVariables
+    and runs them, saving the results in experiments
+    with multiprocess option'''
+    experiments = generateExperiments(independentVariables)
+    nModels = len(experiments)
+    print("Running {} models with {} processes".format(nModels, nProcesses))
+    print("IndependentVariables: {}".format(independentVariables))
+    if nProcesses == 1:
+        return runModels(experiments, data, dependentVariable, independentVariables, regressionType)
+    else:
+        pool = Pool(processes = nProcesses)
+        chunkSize = int(ceil(nModels/nProcesses))
+        jobs = [pool.apply_async(runModels, args = (experiments[i*chunkSize:(i+1)*chunkSize], data, dependentVariable, independentVariables, regressionType)) for i in range(nProcesses)]
+        return concat([job.get() for job in jobs])
+
+def findBestModelFwd(data, dependentVariable, independentVariables, modelFunc, experiments = None):
+    '''Forward search for best model (based on adjusted R2)
+    Randomly starting with one variable and adding randomly variables 
+    if they improve the model
+    
+    The results are added to experiments if provided as argument
+    Storing in experiment relies on the index being the number equal 
+    to the binary code derived from the independent variables'''
+    if experiments is None:
+        experiments = generateExperiments(independentVariables)
+    nIndependentVariables = len(independentVariables)
+    permutation = nppermutation(list(range(nIndependentVariables)))
+    variableMapping = {j: independentVariables[i] for i,j in enumerate(permutation)}
+    print('Tested variables '+', '.join([variableMapping[i] for i in range(nIndependentVariables)]))
+    bestModel = [False]*nIndependentVariables
+    currentVarNum = 0
+    currentR2Adj = 0.
+    for currentVarNum in range(nIndependentVariables):
+        currentModel = [i for i in bestModel]
+        currentModel[currentVarNum] = True
+        rowIdx = sum([0]+[2**i for i in range(nIndependentVariables) if currentModel[permutation[i]]])
+        #print currentVarNum, sum(currentModel), ', '.join([independentVariables[i] for i in range(nIndependentVariables) if currentModel[permutation[i]]])
+        if experiments.loc[rowIdx, 'shapiroP'] < 0:
+            modelStr = modelString(experiments.loc[rowIdx], dependentVariable, independentVariables)
+            model = modelFunc(modelStr, data = data)
+            results = model.fit()
+            experiments.loc[rowIdx, 'r2adj'] = results.rsquared_adj
+            experiments.loc[rowIdx, 'condNum'] = results.condition_number
+            experiments.loc[rowIdx, 'shapiroP'] = shapiro(results.resid)[1]
+            experiments.loc[rowIdx, 'nobs'] = int(results.nobs)
+        if currentR2Adj < experiments.loc[rowIdx, 'r2adj']:
+            currentR2Adj = experiments.loc[rowIdx, 'r2adj']
+            bestModel[currentVarNum] = True
+    return experiments
+
+def displayModelResults(results, model = None, plotFigures = True, filenamePrefix = None, figureFileType = 'pdf', text = {'title-shapiro': 'Shapiro-Wilk normality test for residuals: {:.2f} (p={:.3f})', 'true-predicted.xlabel': 'Predicted values', 'true-predicted.ylabel': 'True values', 'residuals-predicted.xlabel': 'Predicted values', 'residuals-predicted.ylabel': 'Residuals'}):
+    import statsmodels.api as sm
+    '''Displays some model results
+
+    3 graphics, true-predicted, residuals-predicted, '''
+    print(results.summary())
+    shapiroResult = shapiro(results.resid)
+    print(shapiroResult)
+    if plotFigures:
+        fig = plt.figure(figsize=(7,6.3*(2+int(model is not None))))
+        if model is not None:
+            ax = fig.add_subplot(3,1,1)
+            plt.plot(results.predict(), model.endog, 'x')
+            x=plt.xlim()
+            y=plt.ylim()
+            plt.plot([max(x[0], y[0]), min(x[1], y[1])], [max(x[0], y[0]), min(x[1], y[1])], 'r')
+            #plt.axis('equal')
+            if text is not None:
+                plt.title(text['title-shapiro'].format(*shapiroResult))
+                #plt.title(text['true-predicted.title'])
+                plt.xlabel(text['true-predicted.xlabel'])
+                plt.ylabel(text['true-predicted.ylabel'])
+            fig.add_subplot(3,1,2, sharex = ax)
+            plt.plot(results.predict(), results.resid, 'x')
+            nextSubplotNum = 3
+        else:
+            fig.add_subplot(2,1,1)
+            plt.plot(results.predict(), results.resid, 'x')
+            nextSubplotNum = 2
+        if text is not None:
+            if model is None:
+                plt.title(text['title-shapiro'].format(*shapiroResult))
+            plt.xlabel(text['residuals-predicted.xlabel'])
+            plt.ylabel(text['residuals-predicted.ylabel'])
+        qqAx = fig.add_subplot(nextSubplotNum,1,nextSubplotNum)
+        sm.qqplot(results.resid, fit = True, line = '45', ax = qqAx)
+        plt.axis('equal')
+        if text is not None and 'qqplot.xlabel' in text:
+            plt.xlabel(text['qqplot.xlabel'])
+            plt.ylabel(text['qqplot.ylabel'])
+        plt.tight_layout()
+        if filenamePrefix is not None:
+            out = openCheck(filenamePrefix+'-coefficients.html', 'w')
+            out.write(results.summary().as_html())
+            plt.savefig(filenamePrefix+'-model-results.'+figureFileType)
+
+#########################
+# iterable section
+#########################
+
+def mostCommon(l):
+    '''Returns the most frequent element in a iterable
+    The element must be hashable
+
+    new version from https://stackoverflow.com/questions/41612368/find-most-common-element
+    previous version from from http://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list'''
+    return Counter(l).most_common(1)[0][0]
+    
+#########################
+# sequence section
+#########################
+
+class LCSS(object):
+    '''Class that keeps the LCSS parameters
+    and puts together the various computations
+
+    the methods with names starting with _ are not to be shadowed
+    in child classes, who will shadow the other methods, 
+    ie compute and computeXX methods'''
+    def __init__(self, similarityFunc = None, metric = None, epsilon = None, delta = float('inf'), aligned = False, lengthFunc = min):
+        '''One should provide either a similarity function
+        that indicates (return bool) whether elements in the compares lists are similar
+
+        eg distance(p1, p2) < epsilon
+        
+        or a type of metric usable in scipy.spatial.distance.cdist with an epsilon'''
+        if similarityFunc is None and metric is None:
+            print("No way to compute LCSS, similarityFunc and metric are None. Exiting")
+            import sys
+            sys.exit()
+        elif metric is not None and epsilon is None:
+            print("Please provide a value for epsilon if using a cdist metric. Exiting")
+            import sys
+            sys.exit()
+        else:
+            if similarityFunc is None and metric is not None and not isinf(delta):
+                print('Warning: you are using a cdist metric and a finite delta, which will make probably computation slower than using the equivalent similarityFunc (since all pairwise distances will be computed by cdist).')
+            self.similarityFunc = similarityFunc
+            self.metric = metric
+            self.epsilon = epsilon
+            self.aligned = aligned
+            self.delta = delta
+            self.lengthFunc = lengthFunc
+            self.subSequenceIndices = [(0,0)]
+
+    def similarities(self, l1, l2, jshift=0):
+        n1 = len(l1)
+        n2 = len(l2)
+        self.similarityTable = zeros((n1+1,n2+1), dtype = npint)
+        if self.similarityFunc is not None:
+            for i in range(1,n1+1):
+                for j in range(max(1,i-jshift-self.delta),min(n2,i-jshift+self.delta)+1):
+                    if self.similarityFunc(l1[i-1], l2[j-1]):
+                        self.similarityTable[i,j] = self.similarityTable[i-1,j-1]+1
+                    else:
+                        self.similarityTable[i,j] = max(self.similarityTable[i-1,j], self.similarityTable[i,j-1])
+        elif self.metric is not None:
+            similarElements = distance.cdist(l1, l2, self.metric) <= self.epsilon
+            for i in range(1,n1+1):
+                for j in range(max(1,i-jshift-self.delta),min(n2,i-jshift+self.delta)+1):
+                    if similarElements[i-1, j-1]:
+                        self.similarityTable[i,j] = self.similarityTable[i-1,j-1]+1
+                    else:
+                        self.similarityTable[i,j] = max(self.similarityTable[i-1,j], self.similarityTable[i,j-1])
+            
+
+    def subSequence(self, i, j):
+        '''Returns the subsequence of two sequences
+        http://en.wikipedia.org/wiki/Longest_common_subsequence_problem'''
+        if i == 0 or j == 0:
+            return []
+        elif self.similarityTable[i][j] == self.similarityTable[i][j-1]:
+            return self.subSequence(i, j-1)
+        elif self.similarityTable[i][j] == self.similarityTable[i-1][j]:
+            return self.subSequence(i-1, j)
+        else:
+            return self.subSequence(i-1, j-1) + [(i-1,j-1)]
+
+    def _compute(self, _l1, _l2, computeSubSequence = False):
+        '''returns the longest common subsequence similarity
+        l1 and l2 should be the right format
+        eg list of tuple points for cdist 
+        or elements that can be compare using similarityFunc
+
+        if aligned, returns the best matching if using a finite delta by shifting the series alignments
+        '''
+        if len(_l2) < len(_l1): # l1 is the shortest
+            l1 = _l2
+            l2 = _l1
+            revertIndices = True
+        else:
+            l1 = _l1
+            l2 = _l2
+            revertIndices = False
+        n1 = len(l1)
+        n2 = len(l2)
+
+        if self.aligned:
+            lcssValues = {}
+            similarityTables = {}
+            for i in range(-n2-self.delta+1, n1+self.delta): # interval such that [i-shift-delta, i-shift+delta] is never empty, which happens when i-shift+delta < 1 or when i-shift-delta > n2
+                self.similarities(l1, l2, i)
+                lcssValues[i] = self.similarityTable.max()
+                similarityTables[i] = self.similarityTable
+                #print self.similarityTable
+            alignmentShift = argmaxDict(lcssValues) # ideally get the medium alignment shift, the one that minimizes distance
+            self.similarityTable = similarityTables[alignmentShift]
+        else:
+            alignmentShift = 0
+            self.similarities(l1, l2)
+
+        # threshold values for the useful part of the similarity table are n2-n1-delta and n1-n2-delta
+        self.similarityTable = self.similarityTable[:min(n1, n2+alignmentShift+self.delta)+1, :min(n2, n1-alignmentShift+self.delta)+1]
+
+        if computeSubSequence:
+            self.subSequenceIndices = self.subSequence(self.similarityTable.shape[0]-1, self.similarityTable.shape[1]-1)
+            if revertIndices:
+                self.subSequenceIndices = [(j,i) for i,j in self.subSequenceIndices]
+        return self.similarityTable[-1,-1]
+
+    def compute(self, l1, l2, computeSubSequence = False):
+        '''get methods are to be shadowed in child classes '''
+        return self._compute(l1, l2, computeSubSequence)
+
+    def computeAlignment(self):
+        return mean([j-i for i,j in self.subSequenceIndices])
+
+    def _computeNormalized(self, l1, l2, computeSubSequence = False):
+        ''' compute the normalized LCSS
+        ie, the LCSS divided by the min or mean of the indicator lengths (using lengthFunc)
+        lengthFunc = lambda x,y:float(x,y)/2'''
+        return float(self._compute(l1, l2, computeSubSequence))/self.lengthFunc(len(l1), len(l2))
+
+    def computeNormalized(self, l1, l2, computeSubSequence = False):
+        return self._computeNormalized(l1, l2, computeSubSequence)
+
+    def _computeDistance(self, l1, l2, computeSubSequence = False):
+        ''' compute the LCSS distance'''
+        return 1-self._computeNormalized(l1, l2, computeSubSequence)
+
+    def computeDistance(self, l1, l2, computeSubSequence = False):
+        return self._computeDistance(l1, l2, computeSubSequence)
+    
+#########################
+# plotting section
+#########################
+
+def plotPolygon(poly, options = '', **kwargs):
+    'Plots shapely polygon poly'
+    x,y = poly.exterior.xy
+    plt.plot(x, y, options, **kwargs)
+
+def stepPlot(X, firstX, lastX, initialCount = 0, increment = 1):
+    '''for each value in X, increment by increment the initial count
+    returns the lists that can be plotted 
+    to obtain a step plot increasing by one for each value in x, from first to last value
+    firstX and lastX should be respectively smaller and larger than all elements in X'''
+    
+    sortedX = []
+    counts = [initialCount]
+    for x in sorted(X):
+        sortedX += [x,x]
+        counts.append(counts[-1])
+        counts.append(counts[-1]+increment)
+    counts.append(counts[-1])
+    return [firstX]+sortedX+[lastX], counts
+
+class PlottingPropertyValues(object):
+    def __init__(self, values):
+        self.values = values
+
+    def __getitem__(self, i):
+        return self.values[i%len(self.values)]
+
+markers = PlottingPropertyValues(['+', '*', ',', '.', 'x', 'D', 's', 'o'])
+scatterMarkers = PlottingPropertyValues(['s','o','^','>','v','<','d','p','h','8','+','x'])
+
+linestyles = PlottingPropertyValues(['-', '--', '-.', ':'])
+
+colors = PlottingPropertyValues('brgmyck') # 'w'
+
+def monochromeCycler(withMarker = False):
+    from cycler import cycler
+    if withMarker:
+        monochrome = (cycler('color', ['k']) * cycler('linestyle', ['-', '--', ':', '-.']) * cycler('marker', ['^',',', '.']))
+    else:
+        monochrome = (cycler('color', ['k']) * cycler('linestyle', ['-', '--', ':', '-.']))
+    plt.rc('axes', prop_cycle=monochrome)
+
+def plotIndicatorMap(indicatorMap, squareSize, masked = True, defaultValue=-1):
+    coords = array(list(indicatorMap.keys()))
+    minX = min(coords[:,0])
+    minY = min(coords[:,1])
+    X = arange(minX, max(coords[:,0])+1.1)*squareSize
+    Y = arange(minY, max(coords[:,1])+1.1)*squareSize
+    C = defaultValue*ones((len(Y), len(X)))
+    for k,v in indicatorMap.items():
+        C[k[1]-minY,k[0]-minX] = v
+    if masked:
+        plt.pcolor(X, Y, ma.masked_where(C==defaultValue,C))
+    else:
+        plt.pcolor(X, Y, C)
+
+#########################
+# Data download
+#########################
+
+def downloadECWeather(stationID, years, months = [], outputDirectoryname = '.', english = True):
+    '''Downloads monthly weather data from Environment Canada
+    If month is provided (number 1 to 12), it means hourly data for the whole month
+    Otherwise, means the data for each day, for the whole year
+
+    Example: MONTREAL MCTAVISH	10761
+             MONTREALPIERRE ELLIOTT TRUDEAU INTL A	5415
+    see ftp://client_climate@ftp.tor.ec.gc.ca/Pub/Get_More_Data_Plus_de_donnees/Station%20Inventory%20EN.csv
+
+    To get daily data for 2010 and 2011, downloadECWeather(10761, [2010,2011], [], '/tmp')
+    To get hourly data for 2009 and 2012, January, March and October, downloadECWeather(10761, [2009,2012], [1,3,10], '/tmp')
+
+    for annee in `seq 2016 2017`;do wget --content-disposition "http://climat.meteo.gc.ca/climate_data/bulk_data_f.html?format=csv&stationID=10761&Year=${annee}&timeframe=2&submit=++T%C3%A9l%C3%A9charger+%0D%0Ades+donn%C3%A9es" ;done
+    for annee in `seq 2016 2017`;do for mois in `seq 1 12`;do wget --content-disposition "http://climat.meteo.gc.ca/climate_data/bulk_data_f.html?format=csv&stationID=10761&Year=${annee}&Month=${mois}&timeframe=1&submit=++T%C3%A9l%C3%A9charger+%0D%0Ades+donn%C3%A9es" ;done;done
+    '''
+    import urllib.request
+    if english:
+        language = 'e'
+    else:
+        language = 'f'
+    if len(months) == 0:
+        timeFrame = 2
+        months = [1]
+    else:
+        timeFrame = 1
+
+    for year in years:
+        for month in months:
+            outFilename = '{}/{}-{}'.format(outputDirectoryname, stationID, year)
+            if timeFrame == 1:
+                outFilename += '-{}-hourly'.format(month)
+            else:
+                outFilename += '-daily'
+            outFilename += '.csv'
+            url = urllib.request.urlretrieve('http://climate.weather.gc.ca/climate_data/bulk_data_{}.html?format=csv&stationID={}&Year={}&Month={}&Day=1&timeframe={}&submit=Download+Data'.format(language, stationID, year, month, timeFrame), outFilename)
+
+#########################
+# File I/O
+#########################
+
+def removeExtension(filename, delimiter = '.'):
+    '''Returns the filename minus the extension (all characters after last .)'''
+    i = filename.rfind(delimiter)
+    if i>0:
+        return filename[:i]
+    else:
+        return filename
+
+def getExtension(filename, delimiter = '.'):
+    '''Returns the filename minus the extension (all characters after last .)'''
+    i = filename.rfind(delimiter)
+    if i>0:
+        return filename[i+1:]
+    else:
+        return ''
+
+def cleanFilename(s):
+    'cleans filenames obtained when contatenating figure characteristics'
+    return s.replace(' ','-').replace('.','').replace('/','-').replace(',','')
+
+def getRelativeFilename(parentPath, filename):
+    'Returns filename if absolute, otherwise parentPath/filename as string'
+    filePath = Path(filename)
+    if filePath.is_absolute():
+        return filename
+    else:
+        return str(parentPath/filePath)
+
+def listfiles(dirname, extension, remove = False):
+    '''Returns the list of files with the extension in the directory dirname
+    If remove is True, the filenames are stripped from the extension'''
+    d = Path(dirname)
+    if d.is_dir():
+        tmp = [str(f) for f in d.glob('*.'+extension)]
+        if remove:
+            return [removeExtension(f, extension) for f in tmp]
+        else:
+            return tmp
+    else:
+        print(dirname+' is not a directory')
+        return []
+
+def mkdir(dirname):
+    'Creates a directory if it does not exist'
+    p = Path(dirname)
+    if not p.exists():
+        p.mkdir()
+    else:
+        print(dirname+' already exists')
+
+def removeFile(filename):
+    '''Deletes the file while avoiding raising an error 
+    if the file does not exist'''
+    f = Path(filename)
+    if (f.exists()):
+        f.unlink()
+    else:
+        print(filename+' does not exist')
+
+def line2Floats(l, separator=' '):
+    '''Returns the list of floats corresponding to the string'''
+    return [float(x) for x in l.split(separator)]
+
+def line2Ints(l, separator=' '):
+    '''Returns the list of ints corresponding to the string'''
+    return [int(x) for x in l.split(separator)]
+
+#########################
+# Profiling
+#########################
+
+def analyzeProfile(profileFilename, stripDirs = True):
+    '''Analyze the file produced by cProfile 
+
+    obtained by for example: 
+    - call in script (for main() function in script)
+    import cProfile, os
+    cProfile.run('main()', os.path.join(os.getcwd(),'main.profile'))
+
+    - or on the command line:
+    python -m cProfile [-o profile.bin] [-s sort] scriptfile [arg]'''
+    import pstats, os
+    p = pstats.Stats(os.path.join(os.pardir, profileFilename))
+    if stripDirs:
+        p.strip_dirs()
+    p.sort_stats('time')
+    p.print_stats(.2)
+    #p.sort_stats('time')
+    # p.print_callees(.1, 'int_prediction.py:')
+    return p
+
+#########################
+# running tests
+#########################
+
+if __name__ == "__main__":
+    import doctest
+    import unittest
+    suite = doctest.DocFileSuite('tests/utils.txt')
+    #suite = doctest.DocTestSuite()
+    unittest.TextTestRunner().run(suite)
+    #doctest.testmod()
+    #doctest.testfile("example.txt")