Commits

Bryan O'Sullivan committed bfaf794 Merge

Merge

Comments (0)

Files changed (4)

     , runAndAnalyse
     ) where
 
-import Control.Monad ((<=<), replicateM_, when)
+import Control.Monad (replicateM_, when, mplus)
 import Control.Monad.Trans (liftIO)
 import Criterion.Analysis (Outliers(..), OutlierEffect(..), OutlierVariance(..),
                            SampleAnalysis(..), analyseSample,
 import Criterion.Types (Benchmarkable(..), Benchmark(..), Pure,
                         bench, bgroup, nf, nfIO, whnf, whnfIO)
 import qualified Data.Vector.Unboxed as U
+import Data.Monoid (getLast)
 import Statistics.Resampling.Bootstrap (Estimate(..))
 import Statistics.Types (Sample)
 import System.Mem (performGC)
                       (secs $ estPoint e)
                       (secs $ estLowerBound e) (secs $ estUpperBound e)
                       (estConfidenceLevel e)
-                    summary $ printf "%g,%g,%g" 
+                    summary $ printf "%g,%g,%g"
                       (estPoint e)
                       (estLowerBound e) (estUpperBound e)
 
-plotAll :: [(String, Sample, SampleAnalysis, Outliers)] -> Criterion ()
+
+plotAll :: [Result] -> Criterion ()
 plotAll descTimes = do
-  report (zipWith (\n (d,t,a,o) -> Report n d t a o) [0..] descTimes)
+  report (zipWith (\n (Result d t a o) -> Report n d t a o) [0..] descTimes)
+
+data Result = Result { description    :: String
+                     , _sample        :: Sample
+                     , sampleAnalysis :: SampleAnalysis
+                     , _outliers      :: Outliers
+                     }
+
+type ResultForest = [ResultTree]
+data ResultTree = Single Result | Compare ResultForest
 
 -- | Run, and analyse, one or more benchmarks.
 runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses
               -> Environment
               -> Benchmark
               -> Criterion ()
-runAndAnalyse p env = plotAll <=< go ""
-  where go pfx (Benchmark desc b)
+runAndAnalyse p env bs' = do
+  rts <- go "" bs'
+
+  mbCompareFile <- getConfigItem $ getLast . cfgCompareFile
+  case mbCompareFile of
+    Nothing -> return ()
+    Just compareFile -> do
+      liftIO $ writeFile compareFile $ resultForestToCSV rts
+
+  plotAll $ flatten rts
+
+  where go :: String -> Benchmark -> Criterion ResultForest
+        go pfx (Benchmark desc b)
             | p desc'   = do _ <- note "\nbenchmarking %s\n" desc'
                              summary (show desc' ++ ",") -- String will be quoted
                              (x,an,out) <- runAndAnalyseOne env desc' b
-                             return [(desc',x,an,out)]
+                             let result = Result desc' x an out
+                             return [Single result]
             | otherwise = return []
             where desc' = prefix pfx desc
         go pfx (BenchGroup desc bs) =
             concat `fmap` mapM (go (prefix pfx desc)) bs
+        go pfx (BenchCompare bs) = ((:[]) . Compare . concat) `fmap` mapM (go pfx) bs
+
         prefix ""  desc = desc
         prefix pfx desc = pfx ++ '/' : desc
+
+        flatten :: ResultForest -> [Result]
+        flatten [] = []
+        flatten (Single r    : rs) = r : flatten rs
+        flatten (Compare crs : rs) = flatten crs ++ flatten rs
+
+resultForestToCSV :: ResultForest -> String
+resultForestToCSV = unlines
+                  . ("Reference,Name,% faster than reference" :)
+                  . map (\(ref, n, p) -> printf "%s,%s,%.0f" ref n p)
+                  . top
+        where
+          top :: ResultForest -> [(String, String, Double)]
+          top [] = []
+          top (Single _     : rts) = top rts
+          top (Compare rts' : rts) = cmpRT rts' ++ top rts
+
+          cmpRT :: ResultForest -> [(String, String, Double)]
+          cmpRT [] = []
+          cmpRT (Single r     : rts) = cmpWith r rts
+          cmpRT (Compare rts' : rts) = case getReference rts' of
+                                         Nothing -> cmpRT rts
+                                         Just r  -> cmpRT rts' ++ cmpWith r rts
+
+          cmpWith :: Result -> ResultForest -> [(String, String, Double)]
+          cmpWith _   [] = []
+          cmpWith ref (Single r     : rts) = cmp ref r : cmpWith ref rts
+          cmpWith ref (Compare rts' : rts) = cmpRT rts'       ++
+                                             cmpWith ref rts' ++
+                                             cmpWith ref rts
+
+          getReference :: ResultForest -> Maybe Result
+          getReference []                   = Nothing
+          getReference (Single r     : _)   = Just r
+          getReference (Compare rts' : rts) = getReference rts' `mplus`
+                                              getReference rts
+
+cmp :: Result -> Result -> (String, String, Double)
+cmp ref r = (description ref, description r, percentFaster)
+    where
+      percentFaster = (meanRef - meanR) / meanRef * 100
+
+      meanRef = mean ref
+      meanR   = mean r
+
+      mean = estPoint . anMean . sampleAnalysis

Criterion/Config.hs

     , cfgReport       :: Last FilePath -- ^ Filename of report.
     , cfgSamples      :: Last Int    -- ^ Number of samples to collect.
     , cfgSummaryFile  :: Last FilePath -- ^ Filename of summary CSV.
+    , cfgCompareFile  :: Last FilePath -- ^ Filename of the comparison CSV.
     , cfgTemplate     :: Last FilePath -- ^ Filename of report template.
     , cfgVerbosity    :: Last Verbosity -- ^ Whether to run verbosely.
     } deriving (Eq, Read, Show, Typeable)
                 , cfgReport       = mempty
                 , cfgSamples      = ljust 100
                 , cfgSummaryFile  = mempty
+                , cfgCompareFile  = mempty
                 , cfgTemplate     = ljust "report.tpl"
                 , cfgVerbosity    = ljust Normal
                 }
               , cfgResamples    = mempty
               , cfgSamples      = mempty
               , cfgSummaryFile  = mempty
+              , cfgCompareFile  = mempty
               , cfgTemplate     = mempty
               , cfgVerbosity    = mempty
               }
     , cfgResamples    = app cfgResamples a b
     , cfgSamples      = app cfgSamples a b
     , cfgSummaryFile  = app cfgSummaryFile a b
+    , cfgCompareFile  = app cfgCompareFile a b
     , cfgTemplate     = app cfgTemplate a b
     , cfgVerbosity    = app cfgVerbosity a b
     }

Criterion/Main.hs

     -- * Constructing benchmarks
     , bench
     , bgroup
+    , bcompare
     , nf
     , whnf
     , nfIO
 import Criterion.IO (note, printError)
 import Criterion.Monad (Criterion, withConfig)
 import Criterion.Types (Benchmarkable(..), Benchmark(..), Pure, bench,
-                        benchNames, bgroup, nf, nfIO, whnf, whnfIO)
+                        benchNames, bgroup, bcompare, nf, nfIO, whnf, whnfIO)
 import Data.List (isPrefixOf, sort)
 import Data.Monoid (Monoid(..), Last(..))
 import System.Console.GetOpt
           "template file to use"
  , Option ['u'] ["summary"] (ReqArg (\s -> return $ mempty { cfgSummaryFile = ljust s }) "FILENAME")
           "produce a summary CSV file of all results"
+ , Option ['r'] ["compare"] (ReqArg (\s -> return $ mempty { cfgCompareFile = ljust s }) "FILENAME")
+          "produce a CSV file of comparisons\nagainst reference benchmarks.\nSee the bcompare combinator"
  , Option ['V'] ["version"] (noArg mempty { cfgPrintExit = Version })
           "display version, then exit"
  , Option ['v'] ["verbose"] (noArg mempty { cfgVerbosity = ljust Verbose })
  ]
 
 printBanner :: Config -> IO ()
-printBanner cfg = withConfig cfg $ 
+printBanner cfg = withConfig cfg $
     case cfgBanner cfg of
       Last (Just b) -> note "%s\n" b
       _             -> note "Hey, nobody told me what version I am!\n"
 -- >              -- Always GC between runs.
 -- >              cfgPerformGC = ljust True
 -- >            }
--- > 
+-- >
 -- > main = defaultMainWith myConfig (return ()) [
 -- >          bench "fib 30" $ whnf fib 30
 -- >        ]

Criterion/Types.hs

     , whnfIO
     , bench
     , bgroup
+    , bcompare
     , benchNames
     ) where
 
 -- with a name, created with 'bench', or a (possibly nested) group of
 -- 'Benchmark's, created with 'bgroup'.
 data Benchmark where
-    Benchmark  :: Benchmarkable b => String -> b -> Benchmark
-    BenchGroup :: String -> [Benchmark] -> Benchmark
+    Benchmark    :: Benchmarkable b => String -> b -> Benchmark
+    BenchGroup   :: String -> [Benchmark] -> Benchmark
+    BenchCompare :: [Benchmark] -> Benchmark
 
 -- | Create a single benchmark.
 bench :: Benchmarkable b =>
        -> Benchmark
 bgroup = BenchGroup
 
+-- | Compare benchmarks against a reference benchmark
+-- (The first 'bench' in the given list).
+--
+-- The results of the comparisons are written to a CSV file specified using the
+-- @-r@ command line flag. The CSV file uses the following format:
+--
+-- @Reference,Name,% faster than the reference@
+bcompare :: [Benchmark] -> Benchmark
+bcompare = BenchCompare
+
 -- | Retrieve the names of all benchmarks.  Grouped benchmarks are
 -- prefixed with the name of the group they're in.
 benchNames :: Benchmark -> [String]
 benchNames (Benchmark d _)   = [d]
 benchNames (BenchGroup d bs) = map ((d ++ "/") ++) . concatMap benchNames $ bs
+benchNames (BenchCompare bs) =                       concatMap benchNames $ bs
 
 instance Show Benchmark where
     show (Benchmark d _)  = ("Benchmark " ++ show d)
     show (BenchGroup d _) = ("BenchGroup " ++ show d)
+    show (BenchCompare _) = ("BenchCompare")
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.