Bryan O'Sullivan avatar Bryan O'Sullivan committed c28ac80

Allow benchmarks to be specified via globs

Comments (0)

Files changed (6)

     , runNotAnalyse
     ) where
 
-import Control.Monad (replicateM_, when, mplus)
-import Control.Monad.Trans (liftIO)
-import Criterion.Analysis (Outliers(..), OutlierEffect(..), OutlierVariance(..),
-                           SampleAnalysis(..), analyseSample,
-                           classifyOutliers, noteOutliers)
-import Criterion.Config (Config(..), Verbosity(..), fromLJ)
-import Criterion.Environment (Environment(..))
-import Criterion.IO (note, prolix, summary)
-import Criterion.Measurement (getTime, runForAtLeast, secs, time_)
-import Criterion.Monad (Criterion, getConfig, getConfigItem)
-import Criterion.Report (Report(..), report)
-import Criterion.Types (Benchmarkable(..), Benchmark(..), Pure,
-                        bench, bgroup, nf, nfIO, whnf, whnfIO)
-import qualified Data.Vector.Unboxed as U
-import Data.Monoid (getLast)
-import Statistics.Resampling.Bootstrap (Estimate(..))
-import Statistics.Types (Sample)
-import System.Mem (performGC)
-import Text.Printf (printf)
-
--- | Run a single benchmark, and return timings measured when
--- executing it.
-runBenchmark :: Benchmarkable b => Environment -> b -> Criterion Sample
-runBenchmark env b = do
-  _ <- liftIO $ runForAtLeast 0.1 10000 (`replicateM_` getTime)
-  let minTime = envClockResolution env * 1000
-  (testTime, testIters, _) <- liftIO $ runForAtLeast (min minTime 0.1) 1 (run b)
-  _ <- prolix "ran %d iterations in %s\n" testIters (secs testTime)
-  cfg <- getConfig
-  let newIters    = ceiling $ minTime * testItersD / testTime
-      sampleCount = fromLJ cfgSamples cfg
-      newItersD   = fromIntegral newIters
-      testItersD  = fromIntegral testIters
-      estTime     = (fromIntegral sampleCount * newItersD *
-                     testTime / testItersD)
-  when (fromLJ cfgVerbosity cfg > Normal || estTime > 5) $
-    note "collecting %d samples, %d iterations each, in estimated %s\n"
-       sampleCount newIters (secs estTime)
-  -- Run the GC to make sure garabage created by previous benchmarks
-  -- don't affect this benchmark.
-  liftIO performGC
-  times <- liftIO . fmap (U.map ((/ newItersD) . subtract (envClockCost env))) .
-           U.replicateM sampleCount $ do
-             when (fromLJ cfgPerformGC cfg) $ performGC
-             time_ (run b newIters)
-  return times
-
--- | Run a single benchmark and analyse its performance.
-runAndAnalyseOne :: Benchmarkable b => Environment -> String -> b
-                 -> Criterion (Sample,SampleAnalysis,Outliers)
-runAndAnalyseOne env _desc b = do
-  times <- runBenchmark env b
-  ci <- getConfigItem $ fromLJ cfgConfInterval
-  numResamples <- getConfigItem $ fromLJ cfgResamples
-  _ <- prolix "analysing with %d resamples\n" numResamples
-  an@SampleAnalysis{..} <- liftIO $ analyseSample ci times numResamples
-  let OutlierVariance{..} = anOutlierVar
-  let wibble = case ovEffect of
-                 Unaffected -> "unaffected" :: String
-                 Slight -> "slightly inflated"
-                 Moderate -> "moderately inflated"
-                 Severe -> "severely inflated"
-  bs "mean" anMean
-  summary ","
-  bs "std dev" anStdDev
-  summary "\n"
-  vrb <- getConfigItem $ fromLJ cfgVerbosity
-  let out = classifyOutliers times
-  when (vrb == Verbose || (ovEffect > Unaffected && vrb > Quiet)) $ do
-    noteOutliers out
-    _ <- note "variance introduced by outliers: %.3f%%\n" (ovFraction * 100)
-    _ <- note "variance is %s by outliers\n" wibble
-    return ()
-  return (times,an,out)
-  where bs :: String -> Estimate -> Criterion ()
-        bs d e = do _ <- note "%s: %s, lb %s, ub %s, ci %.3f\n" d
-                      (secs $ estPoint e)
-                      (secs $ estLowerBound e) (secs $ estUpperBound e)
-                      (estConfidenceLevel e)
-                    summary $ printf "%g,%g,%g"
-                      (estPoint e)
-                      (estLowerBound e) (estUpperBound e)
-
-
-plotAll :: [Result] -> Criterion ()
-plotAll descTimes = do
-  report (zipWith (\n (Result d t a o) -> Report n d t a o) [0..] descTimes)
-
-data Result = Result { description    :: String
-                     , _sample        :: Sample
-                     , sampleAnalysis :: SampleAnalysis
-                     , _outliers      :: Outliers
-                     }
-
-type ResultForest = [ResultTree]
-data ResultTree = Single Result | Compare ResultForest
-
--- | Run, and analyse, one or more benchmarks.
-runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses
-                                  -- whether to run a benchmark by its
-                                  -- name.
-              -> Environment
-              -> Benchmark
-              -> Criterion ()
-runAndAnalyse p env bs' = do
-  rts <- go "" bs'
-
-  mbCompareFile <- getConfigItem $ getLast . cfgCompareFile
-  case mbCompareFile of
-    Nothing -> return ()
-    Just compareFile -> do
-      liftIO $ writeFile compareFile $ resultForestToCSV rts
-
-  let rs = flatten rts
-  plotAll rs
-  junit rs
-
-  where go :: String -> Benchmark -> Criterion ResultForest
-        go pfx (Benchmark desc b)
-            | p desc'   = do _ <- note "\nbenchmarking %s\n" desc'
-                             summary (show desc' ++ ",") -- String will be quoted
-                             (x,an,out) <- runAndAnalyseOne env desc' b
-                             let result = Result desc' x an out
-                             return [Single result]
-            | otherwise = return []
-            where desc' = prefix pfx desc
-        go pfx (BenchGroup desc bs) =
-            concat `fmap` mapM (go (prefix pfx desc)) bs
-        go pfx (BenchCompare bs) = ((:[]) . Compare . concat) `fmap` mapM (go pfx) bs
-
-runNotAnalyse :: (String -> Bool) -- ^ A predicate that chooses
-                                  -- whether to run a benchmark by its
-                                  -- name.
-              -> Benchmark
-              -> Criterion ()
-runNotAnalyse p bs' = goQuickly "" bs'
-  where goQuickly :: String -> Benchmark -> Criterion ()
-        goQuickly pfx (Benchmark desc b)
-            | p desc'   = do _ <- note "benchmarking %s\n" desc'
-                             runOne b
-            | otherwise = return ()
-            where desc' = prefix pfx desc
-        goQuickly pfx (BenchGroup desc bs) =
-            mapM_ (goQuickly (prefix pfx desc)) bs
-        goQuickly pfx (BenchCompare bs) = mapM_ (goQuickly pfx) bs
-
-        runOne b = do
-            samples <- getConfigItem $ fromLJ cfgSamples
-            liftIO $ run b samples
-
-prefix :: String -> String -> String
-prefix ""  desc = desc
-prefix pfx desc = pfx ++ '/' : desc
-
-flatten :: ResultForest -> [Result]
-flatten [] = []
-flatten (Single r    : rs) = r : flatten rs
-flatten (Compare crs : rs) = flatten crs ++ flatten rs
-
-resultForestToCSV :: ResultForest -> String
-resultForestToCSV = unlines
-                  . ("Reference,Name,% faster than reference" :)
-                  . map (\(ref, n, p) -> printf "%s,%s,%.0f" ref n p)
-                  . top
-        where
-          top :: ResultForest -> [(String, String, Double)]
-          top [] = []
-          top (Single _     : rts) = top rts
-          top (Compare rts' : rts) = cmpRT rts' ++ top rts
-
-          cmpRT :: ResultForest -> [(String, String, Double)]
-          cmpRT [] = []
-          cmpRT (Single r     : rts) = cmpWith r rts
-          cmpRT (Compare rts' : rts) = case getReference rts' of
-                                         Nothing -> cmpRT rts
-                                         Just r  -> cmpRT rts' ++ cmpWith r rts
-
-          cmpWith :: Result -> ResultForest -> [(String, String, Double)]
-          cmpWith _   [] = []
-          cmpWith ref (Single r     : rts) = cmp ref r : cmpWith ref rts
-          cmpWith ref (Compare rts' : rts) = cmpRT rts'       ++
-                                             cmpWith ref rts' ++
-                                             cmpWith ref rts
-
-          getReference :: ResultForest -> Maybe Result
-          getReference []                   = Nothing
-          getReference (Single r     : _)   = Just r
-          getReference (Compare rts' : rts) = getReference rts' `mplus`
-                                              getReference rts
-
-cmp :: Result -> Result -> (String, String, Double)
-cmp ref r = (description ref, description r, percentFaster)
-    where
-      percentFaster = (meanRef - meanR) / meanRef * 100
-
-      meanRef = mean ref
-      meanR   = mean r
-
-      mean = estPoint . anMean . sampleAnalysis
-
--- | Write summary JUnit file (if applicable)
-junit :: [Result] -> Criterion ()
-junit rs
-  = do junitOpt <- getConfigItem (getLast . cfgJUnitFile)
-       case junitOpt of
-         Just fn -> liftIO $ writeFile fn msg
-         Nothing -> return ()
-  where
-    msg = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ++
-          printf "<testsuite name=\"Criterion benchmarks\" tests=\"%d\">\n"
-          (length rs) ++
-          concatMap single rs ++
-          "</testsuite>\n"
-    single r = printf "  <testcase name=\"%s\" time=\"%f\" />\n"
-               (attrEsc $ description r) (estPoint $ anMean $ sampleAnalysis r)
-    attrEsc = concatMap esc
-      where
-        esc '\'' = "&apos;"
-        esc '"'  = "&quot;"
-        esc '<'  = "&lt;"
-        esc '>'  = "&gt;"
-        esc '&'  = "&amp;"
-        esc c    = [c]
+import Criterion.Internal

Criterion/Config.hs

     (
       Config(..)
     , PrintExit(..)
+    , MatchType(..)
     , Verbosity(..)
     , defaultConfig
     , fromLJ
 import Data.Monoid (Monoid(..), Last(..))
 import Data.Typeable (Typeable)
 
+data MatchType = Prefix | Glob
+               deriving (Eq, Ord, Bounded, Enum, Read, Show, Typeable)
+
 -- | Control the amount of information displayed.
 data Verbosity = Quiet
                | Normal
 data Config = Config {
       cfgBanner       :: Last String -- ^ The \"version\" banner to print.
     , cfgConfInterval :: Last Double -- ^ Confidence interval to use.
+    , cfgMatchType    :: Last MatchType -- ^ Kind of matching to use for benchmark names.
     , cfgPerformGC    :: Last Bool   -- ^ Whether to run the GC between passes.
     , cfgPrintExit    :: PrintExit   -- ^ Whether to print information and exit.
     , cfgResamples    :: Last Int    -- ^ Number of resamples to perform.
 defaultConfig = Config {
                   cfgBanner       = ljust "I don't know what version I am."
                 , cfgConfInterval = ljust 0.95
+                , cfgMatchType    = ljust Prefix
                 , cfgPerformGC    = ljust False
                 , cfgPrintExit    = Nada
                 , cfgResamples    = ljust (100 * 1000)
 emptyConfig = Config {
                 cfgBanner       = mempty
               , cfgConfInterval = mempty
+              , cfgMatchType    = mempty
               , cfgPerformGC    = mempty
               , cfgPrintExit    = mempty
               , cfgReport       = mempty
     Config {
       cfgBanner       = app cfgBanner a b
     , cfgConfInterval = app cfgConfInterval a b
+    , cfgMatchType    = app cfgMatchType a b
     , cfgPerformGC    = app cfgPerformGC a b
     , cfgPrintExit    = app cfgPrintExit a b
     , cfgReport       = app cfgReport a b

Criterion/Internal.hs

+{-# LANGUAGE RecordWildCards #-}
+-- |
+-- Module      : Criterion
+-- Copyright   : (c) 2009, 2010, 2011 Bryan O'Sullivan
+--
+-- License     : BSD-style
+-- Maintainer  : bos@serpentine.com
+-- Stability   : experimental
+-- Portability : GHC
+--
+-- Core benchmarking code.
+
+module Criterion.Internal
+    (
+      Benchmarkable(..)
+    , Benchmark
+    , Pure
+    , nf
+    , whnf
+    , nfIO
+    , whnfIO
+    , bench
+    , bgroup
+    , runBenchmark
+    , runAndAnalyse
+    , runNotAnalyse
+    , prefix
+    ) where
+
+import Control.Monad (replicateM_, when, mplus)
+import Control.Monad.Trans (liftIO)
+import Criterion.Analysis (Outliers(..), OutlierEffect(..), OutlierVariance(..),
+                           SampleAnalysis(..), analyseSample,
+                           classifyOutliers, noteOutliers)
+import Criterion.Config (Config(..), Verbosity(..), fromLJ)
+import Criterion.Environment (Environment(..))
+import Criterion.IO (note, prolix, summary)
+import Criterion.Measurement (getTime, runForAtLeast, secs, time_)
+import Criterion.Monad (Criterion, getConfig, getConfigItem)
+import Criterion.Report (Report(..), report)
+import Criterion.Types (Benchmarkable(..), Benchmark(..), Pure,
+                        bench, bgroup, nf, nfIO, whnf, whnfIO)
+import qualified Data.Vector.Unboxed as U
+import Data.Monoid (getLast)
+import Statistics.Resampling.Bootstrap (Estimate(..))
+import Statistics.Types (Sample)
+import System.Mem (performGC)
+import Text.Printf (printf)
+
+-- | Run a single benchmark, and return timings measured when
+-- executing it.
+runBenchmark :: Benchmarkable b => Environment -> b -> Criterion Sample
+runBenchmark env b = do
+  _ <- liftIO $ runForAtLeast 0.1 10000 (`replicateM_` getTime)
+  let minTime = envClockResolution env * 1000
+  (testTime, testIters, _) <- liftIO $ runForAtLeast (min minTime 0.1) 1 (run b)
+  _ <- prolix "ran %d iterations in %s\n" testIters (secs testTime)
+  cfg <- getConfig
+  let newIters    = ceiling $ minTime * testItersD / testTime
+      sampleCount = fromLJ cfgSamples cfg
+      newItersD   = fromIntegral newIters
+      testItersD  = fromIntegral testIters
+      estTime     = (fromIntegral sampleCount * newItersD *
+                     testTime / testItersD)
+  when (fromLJ cfgVerbosity cfg > Normal || estTime > 5) $
+    note "collecting %d samples, %d iterations each, in estimated %s\n"
+       sampleCount newIters (secs estTime)
+  -- Run the GC to make sure garabage created by previous benchmarks
+  -- don't affect this benchmark.
+  liftIO performGC
+  times <- liftIO . fmap (U.map ((/ newItersD) . subtract (envClockCost env))) .
+           U.replicateM sampleCount $ do
+             when (fromLJ cfgPerformGC cfg) $ performGC
+             time_ (run b newIters)
+  return times
+
+-- | Run a single benchmark and analyse its performance.
+runAndAnalyseOne :: Benchmarkable b => Environment -> String -> b
+                 -> Criterion (Sample,SampleAnalysis,Outliers)
+runAndAnalyseOne env _desc b = do
+  times <- runBenchmark env b
+  ci <- getConfigItem $ fromLJ cfgConfInterval
+  numResamples <- getConfigItem $ fromLJ cfgResamples
+  _ <- prolix "analysing with %d resamples\n" numResamples
+  an@SampleAnalysis{..} <- liftIO $ analyseSample ci times numResamples
+  let OutlierVariance{..} = anOutlierVar
+  let wibble = case ovEffect of
+                 Unaffected -> "unaffected" :: String
+                 Slight -> "slightly inflated"
+                 Moderate -> "moderately inflated"
+                 Severe -> "severely inflated"
+  bs "mean" anMean
+  summary ","
+  bs "std dev" anStdDev
+  summary "\n"
+  vrb <- getConfigItem $ fromLJ cfgVerbosity
+  let out = classifyOutliers times
+  when (vrb == Verbose || (ovEffect > Unaffected && vrb > Quiet)) $ do
+    noteOutliers out
+    _ <- note "variance introduced by outliers: %.3f%%\n" (ovFraction * 100)
+    _ <- note "variance is %s by outliers\n" wibble
+    return ()
+  return (times,an,out)
+  where bs :: String -> Estimate -> Criterion ()
+        bs d e = do _ <- note "%s: %s, lb %s, ub %s, ci %.3f\n" d
+                      (secs $ estPoint e)
+                      (secs $ estLowerBound e) (secs $ estUpperBound e)
+                      (estConfidenceLevel e)
+                    summary $ printf "%g,%g,%g"
+                      (estPoint e)
+                      (estLowerBound e) (estUpperBound e)
+
+
+plotAll :: [Result] -> Criterion ()
+plotAll descTimes = do
+  report (zipWith (\n (Result d t a o) -> Report n d t a o) [0..] descTimes)
+
+data Result = Result { description    :: String
+                     , _sample        :: Sample
+                     , sampleAnalysis :: SampleAnalysis
+                     , _outliers      :: Outliers
+                     }
+
+type ResultForest = [ResultTree]
+data ResultTree = Single Result | Compare ResultForest
+
+-- | Run, and analyse, one or more benchmarks.
+runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses
+                                  -- whether to run a benchmark by its
+                                  -- name.
+              -> Environment
+              -> Benchmark
+              -> Criterion ()
+runAndAnalyse p env bs' = do
+  rts <- go "" bs'
+
+  mbCompareFile <- getConfigItem $ getLast . cfgCompareFile
+  case mbCompareFile of
+    Nothing -> return ()
+    Just compareFile -> do
+      liftIO $ writeFile compareFile $ resultForestToCSV rts
+
+  let rs = flatten rts
+  plotAll rs
+  junit rs
+
+  where go :: String -> Benchmark -> Criterion ResultForest
+        go pfx (Benchmark desc b)
+            | p desc'   = do _ <- note "\nbenchmarking %s\n" desc'
+                             summary (show desc' ++ ",") -- String will be quoted
+                             (x,an,out) <- runAndAnalyseOne env desc' b
+                             let result = Result desc' x an out
+                             return [Single result]
+            | otherwise = return []
+            where desc' = prefix pfx desc
+        go pfx (BenchGroup desc bs) =
+            concat `fmap` mapM (go (prefix pfx desc)) bs
+        go pfx (BenchCompare bs) = ((:[]) . Compare . concat) `fmap` mapM (go pfx) bs
+
+runNotAnalyse :: (String -> Bool) -- ^ A predicate that chooses
+                                  -- whether to run a benchmark by its
+                                  -- name.
+              -> Benchmark
+              -> Criterion ()
+runNotAnalyse p bs' = goQuickly "" bs'
+  where goQuickly :: String -> Benchmark -> Criterion ()
+        goQuickly pfx (Benchmark desc b)
+            | p desc'   = do _ <- note "benchmarking %s\n" desc'
+                             runOne b
+            | otherwise = return ()
+            where desc' = prefix pfx desc
+        goQuickly pfx (BenchGroup desc bs) =
+            mapM_ (goQuickly (prefix pfx desc)) bs
+        goQuickly pfx (BenchCompare bs) = mapM_ (goQuickly pfx) bs
+
+        runOne b = do
+            samples <- getConfigItem $ fromLJ cfgSamples
+            liftIO $ run b samples
+
+prefix :: String -> String -> String
+prefix ""  desc = desc
+prefix pfx desc = pfx ++ '/' : desc
+
+flatten :: ResultForest -> [Result]
+flatten [] = []
+flatten (Single r    : rs) = r : flatten rs
+flatten (Compare crs : rs) = flatten crs ++ flatten rs
+
+resultForestToCSV :: ResultForest -> String
+resultForestToCSV = unlines
+                  . ("Reference,Name,% faster than reference" :)
+                  . map (\(ref, n, p) -> printf "%s,%s,%.0f" ref n p)
+                  . top
+        where
+          top :: ResultForest -> [(String, String, Double)]
+          top [] = []
+          top (Single _     : rts) = top rts
+          top (Compare rts' : rts) = cmpRT rts' ++ top rts
+
+          cmpRT :: ResultForest -> [(String, String, Double)]
+          cmpRT [] = []
+          cmpRT (Single r     : rts) = cmpWith r rts
+          cmpRT (Compare rts' : rts) = case getReference rts' of
+                                         Nothing -> cmpRT rts
+                                         Just r  -> cmpRT rts' ++ cmpWith r rts
+
+          cmpWith :: Result -> ResultForest -> [(String, String, Double)]
+          cmpWith _   [] = []
+          cmpWith ref (Single r     : rts) = cmp ref r : cmpWith ref rts
+          cmpWith ref (Compare rts' : rts) = cmpRT rts'       ++
+                                             cmpWith ref rts' ++
+                                             cmpWith ref rts
+
+          getReference :: ResultForest -> Maybe Result
+          getReference []                   = Nothing
+          getReference (Single r     : _)   = Just r
+          getReference (Compare rts' : rts) = getReference rts' `mplus`
+                                              getReference rts
+
+cmp :: Result -> Result -> (String, String, Double)
+cmp ref r = (description ref, description r, percentFaster)
+    where
+      percentFaster = (meanRef - meanR) / meanRef * 100
+
+      meanRef = mean ref
+      meanR   = mean r
+
+      mean = estPoint . anMean . sampleAnalysis
+
+-- | Write summary JUnit file (if applicable)
+junit :: [Result] -> Criterion ()
+junit rs
+  = do junitOpt <- getConfigItem (getLast . cfgJUnitFile)
+       case junitOpt of
+         Just fn -> liftIO $ writeFile fn msg
+         Nothing -> return ()
+  where
+    msg = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ++
+          printf "<testsuite name=\"Criterion benchmarks\" tests=\"%d\">\n"
+          (length rs) ++
+          concatMap single rs ++
+          "</testsuite>\n"
+    single r = printf "  <testcase name=\"%s\" time=\"%f\" />\n"
+               (attrEsc $ description r) (estPoint $ anMean $ sampleAnalysis r)
+    attrEsc = concatMap esc
+      where
+        esc '\'' = "&apos;"
+        esc '"'  = "&quot;"
+        esc '<'  = "&lt;"
+        esc '>'  = "&gt;"
+        esc '&'  = "&amp;"
+        esc c    = [c]

Criterion/Main.hs

     , defaultMain
     , defaultMainWith
     -- * Other useful code
+    , makeMatcher
     , defaultOptions
     , parseArgs
     ) where
 
+import Control.Monad (unless)
 import Control.Monad.Trans (liftIO)
-import Criterion (runAndAnalyse, runNotAnalyse)
+import Criterion.Internal (runAndAnalyse, runNotAnalyse, prefix)
 import Criterion.Config
 import Criterion.Environment (measureEnvironment)
 import Criterion.IO (note, printError)
 import Criterion.Monad (Criterion, withConfig)
 import Criterion.Types (Benchmarkable(..), Benchmark(..), Pure, bench,
                         benchNames, bgroup, bcompare, nf, nfIO, whnf, whnfIO)
-import Data.List (isPrefixOf, sort)
+import Data.Char (toLower)
+import Data.List (isPrefixOf, sort, stripPrefix)
+import Data.Maybe (fromMaybe)
 import Data.Monoid (Monoid(..), Last(..))
 import System.Console.GetOpt
 import System.Environment (getArgs, getProgName)
 import System.Exit (ExitCode(..), exitWith)
+import System.FilePath.Glob
 
 -- | Parse a confidence interval.
 ci :: String -> IO Config
                 | d >= 1 = parseError "confidence interval is greater than 1"
                 | otherwise = return mempty { cfgConfInterval = ljust d }
 
+matchType :: String -> IO Config
+matchType s = case map toLower s of
+                "prefix" -> return mempty { cfgMatchType = ljust Prefix }
+                "glob" -> return mempty { cfgMatchType = ljust Glob }
+                _ -> parseError "match type is not 'glob' or 'prefix'"
+
 -- | Parse a positive number.
 pos :: (Num a, Ord a, Read a) =>
        String -> (Last a -> Config) -> String -> IO Config
           "bootstrap confidence interval"
  , Option ['l'] ["list"] (noArg mempty { cfgPrintExit = List })
           "print only a list of benchmark names"
+ , Option ['m'] ["match"] (ReqArg matchType "MATCH")
+          "how to match benchmark names (prefix|glob)"
  , Option ['o'] ["output"]
           (ReqArg (\t -> return $ mempty { cfgReport = ljust t }) "FILENAME")
           "report file to write to"
  , Option ['u'] ["summary"] (ReqArg (\s -> return $ mempty { cfgSummaryFile = ljust s }) "FILENAME")
           "produce a summary CSV file of all results"
  , Option ['r'] ["compare"] (ReqArg (\s -> return $ mempty { cfgCompareFile = ljust s }) "FILENAME")
-          "produce a CSV file of comparisons\nagainst reference benchmarks.\nSee the bcompare combinator"
+          "produce a CSV file of comparisons\nagainst reference benchmarks\n\
+          \(see the bcompare combinator)"
  , Option ['n'] ["no-measurements"] (noArg mempty { cfgMeasure = ljust False })
-          "Don't do any measurements"
+          "don't do any measurements"
  , Option ['V'] ["version"] (noArg mempty { cfgPrintExit = Version })
           "display version, then exit"
  , Option ['v'] ["verbose"] (noArg mempty { cfgVerbosity = ljust Verbose })
   p <- getProgName
   putStr (usageInfo ("Usage: " ++ p ++ " [OPTIONS] [BENCHMARKS]") options)
   putStrLn "If no benchmark names are given, all are run\n\
-           \Otherwise, benchmarks are run by prefix match"
+           \Otherwise, benchmarks are chosen by prefix or zsh-style pattern \
+           \match\n\
+           \(use --match to specify how to match the benchmarks to run)"
   exitWith exitCode
 
 -- | Parse command line options.
 defaultMain :: [Benchmark] -> IO ()
 defaultMain = defaultMainWith defaultConfig (return ())
 
+makeMatcher :: MatchType -> [String] -> Either String (String -> Bool)
+makeMatcher matchKind args =
+  case matchKind of
+    Prefix -> Right $ \b -> null args || any (`isPrefixOf` b) args
+    Glob ->
+      let compOptions = compDefault { errorRecovery = False }
+      in case mapM (tryCompileWith compOptions) args of
+           Left errMsg -> Left . fromMaybe errMsg . stripPrefix "compile :: " $
+                          errMsg
+           Right ps -> Right $ \b -> null ps || any (`match` b) ps
+
 -- | An entry point that can be used as a @main@ function, with
 -- configurable defaults.
 --
                 -> IO ()
 defaultMainWith defCfg prep bs = do
   (cfg, args) <- parseArgs defCfg defaultOptions =<< getArgs
-  let shouldRun b = null args || any (`isPrefixOf` b) args
+  shouldRun <- either parseError return .
+               makeMatcher (fromMaybe Prefix . getLast . cfgMatchType $ cfg) $
+               args
+  unless (null args || any shouldRun (names bsgroup)) $
+    parseError "none of the specified names matches a benchmark"
   withConfig cfg $
    if not $ fromLJ cfgMeasure cfg
      then runNotAnalyse shouldRun bsgroup
           runAndAnalyse shouldRun env bsgroup
   where
   bsgroup = BenchGroup "" bs
+  names = go ""
+    where go pfx (BenchGroup pfx' bms) = concatMap (go (prefix pfx pfx')) bms
+          go pfx (Benchmark desc _)    = [prefix pfx desc]
+          go _   (BenchCompare _)      = []
 
 -- | Display an error message from a command line parsing failure, and
 -- exit.
 parseError :: String -> IO a
 parseError msg = do
-  _ <- printError "Error: %s" msg
+  _ <- printError "Error: %s\n" msg
   _ <- printError "Run \"%s --help\" for usage information\n" =<< getProgName
   exitWith (ExitFailure 64)
 

Criterion/Measurement.hs

     , time
     , time_
     ) where
-    
+
 import Control.Monad (when)
 import Data.Time.Clock.POSIX (getPOSIXTime)
 import Text.Printf (printf)
-        
+
 time :: IO a -> IO (Double, a)
 time act = do
   start <- getTime
     Criterion.Types
 
   other-modules:
+    Criterion.Internal
     Paths_criterion
 
   build-depends:
     deepseq >= 1.1.0.0,
     directory,
     filepath,
+    Glob >= 0.7.2,
     hastache >= 0.5.0,
     mtl >= 2,
     mwc-random >= 0.8.0.3,
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.