Commits

Nicolas Saunier committed ce40a89

added functions for classification refactored from Sohail s work for TRB/TRC (to be tested)

Comments (0)

Files changed (3)

         at constant speed'''
         return predictPositionNoLimit(nTimeSteps, self.getPositionAtInstant(instant), self.getVelocityAtInstant(instant), externalAcceleration)
 
-    def classifyUserTypeSpeed(self, threshold, statisticsFunc = median, ignoreNInstantsAtEnds = 0):
+    ###
+    # User Type Classification
+    ###
+    def classifyUserTypeSpeedPedstrianCar(self, threshold, aggregationFunc = median, ignoreNInstantsAtEnds = 0):
         '''Classifies slow and fast road users
         slow: non-motorized -> pedestrians
         fast: motorized -> cars'''
         if ignoreNInstantsAtEnds > 0:
-            speeds = self.velocities.norm()[ignoreNInstantsAtEnds:-ignoreNInstantsAtEnds]
+            speeds = self.getSpeeds()[ignoreNInstantsAtEnds:-ignoreNInstantsAtEnds]
         else:
-            speeds = self.velocities.norm()
-        if statisticsFunc(speeds) >= threshold:
+            speeds = self.getSpeeds()
+        if aggregationFunc(speeds) >= threshold:
             self.setUserType(userType2Num['car'])
         else:
             self.setUserType(userType2Num['pedestrian'])
 
-    def classifyUserTypeHoGSVMAtInstant(self, img, svm, instant, homography, width, height, px = 0.2, py = 0.2, pixelThreshold = 800):
+    def classifyUserTypeSpeed(self, aggregationFunc = median, speedProbabilities):
+        '''Classifies road user per road user type
+        speedProbabilities are functions return P(speed|class)
+        in a dictionary indexed by user type names
+        Returns probabilities for each class
+
+        for simple threshold classification, simply pass non-overlapping indicator functions (membership)
+        e.g. def indic(x):
+        if abs(x-mu) < sigma:
+        return 1
+        else:
+        return x'''
+        if not hasattr(self, aggregatedSpeed):
+            self.aggregatedSpeed = aggregationFunc(self.getSpeeds())
+        userTypeProbabilities = {userType2Num[userTypename]: speedProbabilities[userTypename](self.aggregatedSpeed) for userTypename in speedProbabilities}
+        self.setUserType(utils.argmaxDict(userTypeProbabilities))
+        return userTypeProbabilities
+
+    def initClassifyUserTypeHoGSVM(self, aggregationFunc = median):
+        '''Initializes the data structures for classification
+
+        TODO? compute speed for longest feature?
+        Skip beginning and end of feature for speed? Offer options instead of median'''
+        self.aggregatedSpeed = aggregationFunc(self.getSpeeds())
+        self.userTypes = {}
+
+    def classifyUserTypeHoGSVMAtInstant(self, img, pedBikeCarSVM, instant, homography, width, height, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), px = 0.2, py = 0.2, pixelThreshold = 800):
         '''Extract the image box around the object and 
         applies the SVM model on it'''
         from numpy import array
         croppedImg, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, self, instant, homography, width, height, px, py, pixelThreshold)
         if len(croppedImg) > 0: # != []
             hog = array([cvutils.HOG(croppedImg)], dtype = np.float32)
-            return int(svm.predict(hog))
+            if self.aggregatedSpeed < pedBikeSpeedTreshold or bikeCarSVM == None:
+                self.userTypes[instant] = int(pedBikeCarSVM.predict(hog))
+            elif self.aggregatedSpeed < bikeCarSpeedTreshold:
+                self.userTypes[instant] = int(bikeCarSVM.predict(hog))
+            else:
+                self.userTypes[instant] = userType2Num['car']
         else:
-            return userType2Num['unknown']
+            self.userTypes[instant] = userType2Num['unknown']
+
+    def classifyUserTypeHoGSVM(self, images, pedBikeCarSVM, homography, width, height, bikeCarSVM = None, pedBikeSpeedTreshold = float('Inf'), bikeCarSpeedThreshold = float('Inf'), aggregationFunc = median, speedProbabilities = None, px = 0.2, py = 0.2, pixelThreshold = 800):
+        '''Agregates SVM detections in each image and returns probability
+        (proportion of instants with classification in each category)
+
+        iamges is a dictionary of images indexed by instant
+        With default parameters, the general (ped-bike-car) classifier will be used
+        TODO? consider all categories?'''
+        if not hasattr(self, aggregatedSpeed) or not hasattr(self, userTypes):
+            print('Initilize the data structures for classification by HoG-SVM')
+            self.initClassifyUserTypeHoGSVM(aggregationFunc)
+
+        if len(self.userTypes) != self.length(): # if classification has not been done previously
+            for t in self.getTimeInterval():
+                if t not in self.userTypes:
+                    self.classifyUserTypeHoGSVMAtInstant(images[t], pedBikeCarSVM, t, homography, width, height, bikeCarSVM, pedBikeSpeedTreshold, bikeCarSpeedThreshold, px, py, pixelThreshold)
+        # compute P(Speed|Class)
+        if speedProbabilities = None: # equiprobable information from speed
+            userTypeProbabilities = {userType2Num['car']: 1., userType2Num['pedestrian']: 1., userType2Num['bicycle']: 1.}
+        else:
+            userTypeProbabilities = {userType2Num[userTypename]: speedProbabilities[userTypename](self.aggregatedSpeed) for userTypename in speedProbabilities}
+        # result is P(Class|Appearance) x P(Speed|Class)
+        nInstantsUserType = {userType2Num[userTypename]: 0 for userTypename in userTypeProbabilities}# number of instants the object is classified as userTypename
+        for t in self.userTypes:
+            nInstantsUserType[self.userTypes[t]] += 1
+        for userTypename in userTypeProbabilities:
+            userTypeProbabilities[userTypename] *= nInstantsUserType[userTypename]
+        # class is the user type that maximizes usertype probabilities
+        self.setUserType(utils.argmaxDict(userTypeProbabilities))
+
+    def classifyUserTypeArea(self, areas, homography):
+        '''Classifies the object based on its location (projected to image space)
+        areas is a dictionary of matrix of the size of the image space 
+        for different road users possible locations, indexed by road user type names
+
+        TODO: areas could be a wrapper object with a contains method that would work for polygons and images (with wrapper class)
+        skip frames at beginning/end?'''
+        print('not implemented/tested yet')
+        if not hasattr(self, projectedPositions):
+            if homography != None:
+                self.projectedPositions = obj.positions.project(homography)
+            else:
+                self.projectedPositions = obj.positions
+        possibleUserTypes = {}
+        for userTypename in userTypenames:
+            possibleUserTypes[userTypename] = 0
+        for p in self.projectedPositions:
+            for userTypename in areas:
+                if areas[userTypename][p.x, p.y] != 0:
+                    possibleUserTypes[userTypename] += 1
+        # what to do: threshold for most common type? self.setUserType()
+        return possibleUserTypes
 
     @staticmethod
     def collisionCourseDotProduct(movingObject1, movingObject2, instant):

scripts/classify-objects.py

+#! /usr/bin/env python
+
+import numpy as np
+import sys, argparse
+from cv2 import SVM_RBF, SVM_C_SVC
+
+import cvutils, moving, ml
+
+parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
+parser.add_argument('-d', dest = 'directoryName', help = 'name of the parent directory containing the videos and extracted trajectories to process', required = True)
+#parser.add_argument('--kernel', dest = 'kernelType', help = 'kernel type for the support vector machine (SVM)', default = SVM_RBF, type = long)
+#parser.add_argument('--svm', dest = 'svmType', help = 'SVM type', default = SVM_C_SVC, type = long)
+#parser.add_argument('-s', dest = 'rescaleSize', help = 'rescale size of image samples', default = 64, type = int)
+#parser.add_argument('-o', dest = 'nOrientations', help = 'number of orientations in HoG', default = 9, type = int)
+#parser.add_argument('-p', dest = 'nPixelsPerCell', help = 'number of pixels per cell', default = 8, type = int)
+#parser.add_argument('-c', dest = 'nCellsPerBlock', help = 'number of cells per block', default = 2, type = int)
+args = parser.parse_args()
+
+print('TODO') # add all parameters for classification (distribution parameters)

scripts/train-object-classification.py

 
 import cvutils, moving, ml
 
-
-# todo update with argparse
 parser = argparse.ArgumentParser(description='The program processes indicators for all pairs of road users in the scene')
 parser.add_argument('-d', dest = 'directoryName', help = 'parent directory name for the directories containing the samples for the different road users', required = True)
 parser.add_argument('--kernel', dest = 'kernelType', help = 'kernel type for the support vector machine (SVM)', default = SVM_RBF, type = long)