Commits

Bryan O'Sullivan committed eef9cc6

Kill off benchmark comparison as a feature

I never really reviewed this, never use it, and find the code very
obtuse and difficult to understand. This makes it a huge pain to
refactor when I want to add new features.

It seems the more-correct thing to do is to get rid of this feature
and find a better way to implement it that doesn't make the core
of criterion less maintainable.

  • Participants
  • Parent commits c592709

Comments (0)

Files changed (6)

     , nfIO
     , whnfIO
     , bench
-    , bcompare
     , bgroup
     , runBenchmark
     , runAndAnalyse

Criterion/Config.hs

     , cfgReport       :: Last FilePath -- ^ Filename of report.
     , cfgSamples      :: Last Int    -- ^ Number of samples to collect.
     , cfgSummaryFile  :: Last FilePath -- ^ Filename of summary CSV.
-    , cfgCompareFile  :: Last FilePath -- ^ Filename of the comparison CSV.
     , cfgTemplate     :: Last FilePath -- ^ Filename of report template.
     , cfgVerbosity    :: Last Verbosity -- ^ Whether to run verbosely.
     , cfgJUnitFile    :: Last FilePath -- ^ Filename of JUnit report.
                 , cfgReport       = mempty
                 , cfgSamples      = ljust 100
                 , cfgSummaryFile  = mempty
-                , cfgCompareFile  = mempty
                 , cfgTemplate     = ljust "report.tpl"
                 , cfgVerbosity    = ljust Normal
                 , cfgJUnitFile    = mempty
               , cfgResults      = mempty
               , cfgSamples      = mempty
               , cfgSummaryFile  = mempty
-              , cfgCompareFile  = mempty
               , cfgTemplate     = mempty
               , cfgVerbosity    = mempty
               , cfgJUnitFile    = mempty
     , cfgResults      = app cfgResults a b
     , cfgSamples      = app cfgSamples a b
     , cfgSummaryFile  = app cfgSummaryFile a b
-    , cfgCompareFile  = app cfgCompareFile a b
     , cfgTemplate     = app cfgTemplate a b
     , cfgVerbosity    = app cfgVerbosity a b
     , cfgJUnitFile    = app cfgJUnitFile a b
     , writeResults
     ) where
 
-import Criterion.Types (ResultForest, ResultTree(..))
+import Criterion.Types (Result(..))
 import Data.Binary (Binary(..), encode)
 import Data.Binary.Get (runGetOrFail)
 import Data.Binary.Put (putByteString, putWord16be, runPut)
   putByteString "criterio"
   mapM_ (putWord16be . fromIntegral) (versionBranch version)
 
-hGetResults :: Handle -> IO (Either String ResultForest)
+hGetResults :: Handle -> IO (Either String [Result])
 hGetResults handle = do
-  let fixup = reverse . nukem . reverse
-      nukem (Compare k _ : rs) = let (cs, rs') = splitAt k rs
-                                 in Compare k (fixup (reverse cs)) : nukem rs'
-      nukem (r : rs)           = r : nukem rs
-      nukem _                  = []
   bs <- L.hGet handle (fromIntegral (L.length header))
   if bs == header
-    then (Right . fixup) `fmap` readAll handle
+    then Right `fmap` readAll handle
     else return $ Left "unexpected header"
 
-hPutResults :: Handle -> ResultForest -> IO ()
+hPutResults :: Handle -> [Result] -> IO ()
 hPutResults handle rs = do
   L.hPut handle header
   mapM_ (L.hPut handle . encode) rs
 
-readResults :: FilePath -> IO (Either String ResultForest)
+readResults :: FilePath -> IO (Either String [Result])
 readResults path = withFile path ReadMode hGetResults
 
-writeResults :: FilePath -> ResultForest -> IO ()
+writeResults :: FilePath -> [Result] -> IO ()
 writeResults path rs = withFile path WriteMode (flip hPutResults rs)
 
 readAll :: Binary a => Handle -> IO [a]

Criterion/Internal.hs

     , prefix
     ) where
 
-import Control.Monad (foldM, replicateM_, when, mplus)
+import Control.Monad (foldM, replicateM_, when)
 import Control.Monad.Trans (liftIO)
 import Data.Binary (encode)
 import qualified Data.ByteString.Lazy as L
 import Criterion.Measurement (getTime, runForAtLeast, secs, time_)
 import Criterion.Monad (Criterion, getConfig, getConfigItem)
 import Criterion.Report (Report(..), report)
-import Criterion.Types (Benchmark(..), Benchmarkable(..),
-                        Result(..), ResultForest, ResultTree(..))
+import Criterion.Types (Benchmark(..), Benchmarkable(..), Payload(..),
+                        Result(..))
 import qualified Data.Vector.Unboxed as U
 import Data.Monoid (getLast)
 import Statistics.Resampling.Bootstrap (Estimate(..))
 
 plotAll :: [Result] -> Criterion ()
 plotAll descTimes = do
-  report (zipWith (\n (Result d t a o) -> Report n d t a o) [0..] descTimes)
+  report (zipWith (\n (Single d (Payload t a o)) -> Report n d t a o) [0..] descTimes)
 
 -- | Run, and analyse, one or more benchmarks.
 runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses
           | p desc'   = do _ <- note "\nbenchmarking %s\n" desc'
                            summary (show desc' ++ ",") -- String will be quoted
                            (x,an,out) <- runAndAnalyseOne env desc' b
-                           let result = Single $ Result desc' x an out
+                           let result = Single desc' $ Payload x an out
                            liftIO $ L.hPut handle (encode result)
                            return $! k + 1
           | otherwise = return (k :: Int)
           where desc' = prefix pfx desc
       go !k (pfx, BenchGroup desc bs) =
           foldM go k [(prefix pfx desc, b) | b <- bs]
-      go !k (pfx, BenchCompare bs) = do
-                          l <- foldM go 0 [(pfx, b) | b <- bs]
-                          let result = Compare l []
-                          liftIO $ L.hPut handle (encode result)
-                          return $! l + k
   _ <- go 0 ("", bs')
 
   rts <- (either fail return =<<) . liftIO $ do
       Just _ -> return rs
       _      -> removeFile resultFile >> return rs
 
-  mbCompareFile <- getConfigItem $ getLast . cfgCompareFile
-  case mbCompareFile of
-    Nothing -> return ()
-    Just compareFile -> do
-      liftIO $ writeFile compareFile $ resultForestToCSV rts
-
-  let rs = flatten rts
-  plotAll rs
-  junit rs
+  plotAll rts
+  junit rts
 
 runNotAnalyse :: (String -> Bool) -- ^ A predicate that chooses
                                   -- whether to run a benchmark by its
             where desc' = prefix pfx desc
         goQuickly pfx (BenchGroup desc bs) =
             mapM_ (goQuickly (prefix pfx desc)) bs
-        goQuickly pfx (BenchCompare bs) = mapM_ (goQuickly pfx) bs
 
         runOne (Benchmarkable run) = do
             samples <- getConfigItem $ fromLJ cfgSamples
 prefix ""  desc = desc
 prefix pfx desc = pfx ++ '/' : desc
 
-flatten :: ResultForest -> [Result]
-flatten [] = []
-flatten (Single r    : rs) = r : flatten rs
-flatten (Compare _ crs : rs) = flatten crs ++ flatten rs
-
-resultForestToCSV :: ResultForest -> String
-resultForestToCSV = unlines
-                  . ("Reference,Name,% faster than reference" :)
-                  . map (\(ref, n, p) -> printf "%s,%s,%.0f" ref n p)
-                  . top
-        where
-          top :: ResultForest -> [(String, String, Double)]
-          top [] = []
-          top (Single _     : rts) = top rts
-          top (Compare _ rts' : rts) = cmpRT rts' ++ top rts
-
-          cmpRT :: ResultForest -> [(String, String, Double)]
-          cmpRT [] = []
-          cmpRT (Single r     : rts) = cmpWith r rts
-          cmpRT (Compare _ rts' : rts) = case getReference rts' of
-                                         Nothing -> cmpRT rts
-                                         Just r  -> cmpRT rts' ++ cmpWith r rts
-
-          cmpWith :: Result -> ResultForest -> [(String, String, Double)]
-          cmpWith _   [] = []
-          cmpWith ref (Single r     : rts) = cmp ref r : cmpWith ref rts
-          cmpWith ref (Compare _ rts' : rts) = cmpRT rts'       ++
-                                             cmpWith ref rts' ++
-                                             cmpWith ref rts
-
-          getReference :: ResultForest -> Maybe Result
-          getReference []                   = Nothing
-          getReference (Single r     : _)   = Just r
-          getReference (Compare _ rts' : rts) = getReference rts' `mplus`
-                                              getReference rts
-
-cmp :: Result -> Result -> (String, String, Double)
-cmp ref r = (description ref, description r, percentFaster)
-    where
-      percentFaster = (meanRef - meanR) / meanRef * 100
-
-      meanRef = mean ref
-      meanR   = mean r
-
-      mean = estPoint . anMean . sampleAnalysis
-
 -- | Write summary JUnit file (if applicable)
 junit :: [Result] -> Criterion ()
 junit rs
           (length rs) ++
           concatMap single rs ++
           "</testsuite>\n"
-    single r = printf "  <testcase name=\"%s\" time=\"%f\" />\n"
-               (attrEsc $ description r) (estPoint $ anMean $ sampleAnalysis r)
+    single (Single d r) = printf "  <testcase name=\"%s\" time=\"%f\" />\n"
+               (attrEsc d) (estPoint $ anMean $ sampleAnalysis r)
     attrEsc = concatMap esc
       where
         esc '\'' = "&apos;"

Criterion/Main.hs

     -- * Constructing benchmarks
     , bench
     , bgroup
-    , bcompare
     , nf
     , whnf
     , nfIO
 import Criterion.IO.Printf (note, printError)
 import Criterion.Monad (Criterion, withConfig)
 import Criterion.Types (Benchmarkable(..), Benchmark(..), bench,
-                        benchNames, bgroup, bcompare, nf, nfIO, whnf, whnfIO)
+                        benchNames, bgroup, nf, nfIO, whnf, whnfIO)
 import Data.Char (toLower)
 import Data.List (isPrefixOf, sort, stripPrefix)
 import Data.Maybe (fromMaybe)
           "template file to use"
  , Option ['u'] ["summary"] (ReqArg (\s -> return $ mempty { cfgSummaryFile = ljust s }) "FILENAME")
           "produce a summary CSV file of all results"
- , Option ['r'] ["compare"] (ReqArg (\s -> return $ mempty { cfgCompareFile = ljust s }) "FILENAME")
-          "produce a CSV file of comparisons\nagainst reference benchmarks\n\
-          \(see the bcompare combinator)"
  , Option ['n'] ["no-measurements"] (noArg mempty { cfgMeasure = ljust False })
           "don't do any measurements"
  , Option ['V'] ["version"] (noArg mempty { cfgPrintExit = Version })
   names = go ""
     where go pfx (BenchGroup pfx' bms) = concatMap (go (prefix pfx pfx')) bms
           go pfx (Benchmark desc _)    = [prefix pfx desc]
-          go _   (BenchCompare _)      = []
 
 -- | Display an error message from a command line parsing failure, and
 -- exit.

Criterion/Types.hs

     , whnfIO
     , bench
     , bgroup
-    , bcompare
     , benchNames
     -- * Result types
     , Result(..)
-    , ResultForest
-    , ResultTree(..)
+    , Payload(..)
     ) where
 
 import Control.DeepSeq (NFData, rnf)
 data Benchmark where
     Benchmark    :: String -> Benchmarkable -> Benchmark
     BenchGroup   :: String -> [Benchmark] -> Benchmark
-    BenchCompare :: [Benchmark] -> Benchmark
 
 -- | Create a single benchmark.
 bench :: String                 -- ^ A name to identify the benchmark.
        -> Benchmark
 bgroup = BenchGroup
 
--- | Compare benchmarks against a reference benchmark
--- (The first 'bench' in the given list).
---
--- The results of the comparisons are written to a CSV file specified using the
--- @-r@ command line flag. The CSV file uses the following format:
---
--- @Reference,Name,% faster than the reference@
-bcompare :: [Benchmark] -> Benchmark
-bcompare = BenchCompare
-
 -- | Retrieve the names of all benchmarks.  Grouped benchmarks are
 -- prefixed with the name of the group they're in.
 benchNames :: Benchmark -> [String]
 benchNames (Benchmark d _)   = [d]
 benchNames (BenchGroup d bs) = map ((d ++ "/") ++) . concatMap benchNames $ bs
-benchNames (BenchCompare bs) =                       concatMap benchNames $ bs
 
 instance Show Benchmark where
     show (Benchmark d _)  = ("Benchmark " ++ show d)
     show (BenchGroup d _) = ("BenchGroup " ++ show d)
-    show (BenchCompare _) = ("BenchCompare")
 
-data Result = Result {
-      description    :: String
-    , sample         :: Sample
+data Payload = Payload {
+      sample         :: Sample
     , sampleAnalysis :: SampleAnalysis
     , outliers       :: Outliers
     } deriving (Eq, Read, Show, Typeable, Data, Generic)
 
+instance Binary Payload
+
+data Result = Single String Payload
+              deriving (Eq, Read, Show, Typeable, Data, Generic)
+
 instance Binary Result
-
-type ResultForest = [ResultTree]
-data ResultTree = Single Result
-                | Compare !Int ResultForest
-                  deriving (Eq, Read, Show, Typeable, Data, Generic)
-
-instance Binary ResultTree