Commits

Ruben Martinez-Cantin committed 45ce3a9

Adding parameters to criteria. Adding Lizotte's EI.

  • Participants
  • Parent commits 377c0f3

Comments (0)

Files changed (24)

include/criteria_atomic.hpp

       return 0;
     };
 
-    inline void setExponent(size_t exp) {mExp = exp;};
+    int setParameters(const vectord &params)
+    {
+      mExp = static_cast<size_t>(params(0));
+      return 0;
+    };
+
+    size_t nParameters() {return 1;};
+
     double operator()(const vectord &x)
     { 
       const double min = mProc->getValueAtMinimum();
       return mProc->prediction(x)->negativeExpectedImprovement(min,mExp); 
     };
+
     std::string name() {return "cEI";};
+
   private:
     size_t mExp;
   };
 
+  /// Expected improvement criterion modification by Lizotte
+  class BiasedExpectedImprovement: public AtomicCriteria
+  {
+  public:
+    virtual ~BiasedExpectedImprovement(){};
+    int init(NonParametricProcess* proc)
+    { 
+      mProc = proc;
+      mBias = 0.01;
+      mExp = 1;
+      return 0;
+    };
+    int setParameters(const vectord &params)
+    {
+      mExp = static_cast<size_t>(params(0));
+      mBias = params(1);
+      return 0;
+    };
 
-  /// Lower (upper) confidence bound criterion.
+    size_t nParameters() {return 2;};
+
+    double operator()(const vectord &x)
+    { 
+      const double sigma = mProc->getSignalVariance();
+      const double min = mProc->getValueAtMinimum() - mBias/sigma;
+      return mProc->prediction(x)->negativeExpectedImprovement(min,mExp); 
+    };
+    std::string name() {return "cBEI";};
+  private:
+    double mBias;
+    size_t mExp;
+  };
+
+
+  /// Lower (upper) confidence bound criterion by [Cox and John, 1992].
   class LowerConfidenceBound: public AtomicCriteria
   {
   public:
       mBeta = 1.0;
       return 0;
     };
+    int setParameters(const vectord &params)
+    {
+      mBeta = params(0);
+      return 0;
+    };
 
-    inline void setBeta(double beta) { mBeta = beta; };
+    size_t nParameters() {return 1;};
+
     double operator()( const vectord &x)
     { 
       return mProc->prediction(x)->lowerConfidenceBound(mBeta); 
       mEpsilon = 0.01;
       return 0;
     };
+    int setParameters(const vectord &params)
+    {
+      mEpsilon = params(0);
+      return 0;
+    };
+
+    size_t nParameters() {return 1;};
 
     inline void setEpsilon(double eps) { mEpsilon = eps; };
     double operator()( const vectord &x)
   {
   public:
     virtual ~GreedyAOptimality(){};
+    int setParameters(const vectord &params) { return 0; };
+    size_t nParameters() {return 0;};
     double operator()( const vectord &x)
     { return mProc->prediction(x)->getStd(); };
     std::string name() {return "cAopt";};
   };
 
+
   /// Expected return criterion.
   class ExpectedReturn: public AtomicCriteria
   {
   public:
     virtual ~ExpectedReturn(){};
+    int setParameters(const vectord &params) { return 0; };
+    size_t nParameters() {return 0;};
     double operator()( const vectord &x)
     { return mProc->prediction(x)->getMean(); };
     std::string name() {return "cExpReturn";};
   };
 
+
   /**
    * \brief Optimistic sampling. A simple variation of Thompson sampling
    * that picks only samples that are better than the best outcome so
   public:
     OptimisticSampling(): mtRandom(100u) {};
     virtual ~OptimisticSampling(){};
+    int setParameters(const vectord &params) { return 0; };
+    size_t nParameters() {return 0;};
     double operator()( const vectord &x)
     {
       ProbabilityDistribution* d_ = mProc->prediction(x);
     randEngine mtRandom;
   };
 
+
   /**
    * \brief Thompson sampling. 
    * Picks a random realization of the surrogate model.
   public:
     ThompsonSampling(): mtRandom(100u) {};
     virtual ~ThompsonSampling(){};
+    int setParameters(const vectord &params) { return 0; };
+    size_t nParameters() {return 0;};
     double operator()( const vectord &x)
     {
       ProbabilityDistribution* d_ = mProc->prediction(x);
   };
 
 
+
   /// Expected improvement criterion using Schonlau annealing. \cite Schonlau98
   class AnnealedExpectedImprovement: public AtomicCriteria
   {
       return 0;
     };
 
-    inline void setExponent(size_t exp) {mExp = exp;};
+    int setParameters(const vectord &params)
+    {
+      mExp = static_cast<size_t>(params(0));
+      return 0;
+    };
+
+    size_t nParameters() {return 1;};
     void reset() { nCalls = 0; mExp = 10;};
     double operator()( const vectord &x)
     {
       reset();
       return 0;
     };
-    inline void setBetaCoef(double betac) { mCoef = betac; };
+
+    int setParameters(const vectord &params)
+    {
+      mCoef = params(0);
+      return 0;
+    };
+    size_t nParameters() {return 1;};
     void reset() { nCalls = 0; mCoef = 5.0;};
     double operator()( const vectord &x)
     {
       return 0;
     };
     virtual ~InputDistance(){};
-    inline void setWeight(double w) { mW = w; };
+    int setParameters(const vectord &params)
+    {
+      mW = params(0);
+      return 0;
+    };
+    size_t nParameters() {return 1;};
+ 
     double operator()(const vectord &x)
     { 
       vectord x2(x.size());

include/criteria_combined.hpp

 #ifndef  _CRITERIA_COMBINED_HPP_
 #define  _CRITERIA_COMBINED_HPP_
 
+#include <boost/numeric/ublas/vector_proxy.hpp>
 #include "criteria_functors.hpp"
 
+
 namespace bayesopt
 {
   /**\addtogroup CriteriaFunctions
       mCriteriaList = list;
       return 0; 
     };
+
+    int setParameters(const vectord &theta) 
+    {
+      using boost::numeric::ublas::subrange;
+      vectori sizes(mCriteriaList.size());
+
+      for (size_t i = 0; i < mCriteriaList.size(); ++i)
+	{
+	  sizes(i) = mCriteriaList[i]->nParameters();
+	}
+
+      if (theta.size() != norm_1(sizes))
+	{
+	  FILE_LOG(logERROR) << "Wrong number of criteria parameters"; 
+	  return -1; 
+	}
+
+      size_t start = 0;
+      for (size_t i = 0; i < mCriteriaList.size(); ++i)
+	{
+	  mCriteriaList[i]->setParameters(subrange(theta,start,start+sizes(i)));
+	  start += sizes(i);
+	}
+      return 0;
+    };
+
+    size_t nParameters() 
+    {
+      size_t sum = 0;
+      for (size_t i = 0; i < mCriteriaList.size(); ++i)
+	{
+	  sum += mCriteriaList[i]->nParameters();
+	}
+      return sum;
+    };
+
+
   protected:
     NonParametricProcess* mProc;
     std::vector<Criteria*> mCriteriaList;

include/criteria_functors.hpp

     virtual bool requireComparison() = 0;
     virtual double operator()(const vectord &x) = 0;
     virtual std::string name() = 0;
+    virtual int setParameters(const vectord &params) = 0;
+    virtual size_t nParameters() = 0;
 
     //Dummy functions.
     virtual void reset() { assert(false); };

include/gaussian_process.hpp

 
   private:
     vectord mAlphaV;              ///< Precomputed L\y
-    double mSigma;                ///< Signal variance
     GaussianDistribution* d_;     ///< Pointer to distribution function
   };
 

include/gaussian_process_ml.hpp

 
   private:
     vectord mWML;           //!< GP ML parameters
-    double mSigML;           //!< GP ML parameters
     
     /// Precomputed GP prediction operations
     vectord mAlphaF;

include/gaussian_process_normal.hpp

 
   private:
     vectord mWML;           //!< GP ML parameters
-    double mSigML;           //!< GP ML parameters
     
     /// Precomputed GP prediction operations
     vectord mAlphaF;

include/kernel_atomic.hpp

       n_inputs = input_dim;
       return 0;
     };
-    void setHyperParameters(const vectord &theta) 
+    int setHyperParameters(const vectord &theta) 
     {
-      assert(theta.size() == n_params);
+      if(theta.size() != n_params)
+	{
+	  FILE_LOG(logERROR) << "Wrong number of hyperparameters"; 
+	  return -1; 
+	}
       params = theta;
+      return 0;
     };
     vectord getHyperParameters() {return params;};
     size_t nHyperParameters() {return n_params;};

include/kernel_combined.hpp

       this->right = right;
       return 0;
     };
-    void setHyperParameters(const vectord &theta) 
+    int setHyperParameters(const vectord &theta) 
     {
       using boost::numeric::ublas::subrange;
 
       size_t n_lhs = left->nHyperParameters();
       size_t n_rhs = right->nHyperParameters();
-      assert(theta.size() == n_lhs + n_rhs);
+      if (theta.size() != n_lhs + n_rhs)
+	{
+	  FILE_LOG(logERROR) << "Wrong number of hyperparameters"; 
+	  return -1; 
+	}
       left->setHyperParameters(subrange(theta,0,n_lhs));
       right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
+      return 0;
     };
 
     vectord getHyperParameters() 

include/kernel_functors.hpp

     virtual int init(size_t input_dim) {return 0;};
     virtual int init(size_t input_dim, Kernel* left, Kernel* right) {return 0;};
 
-    virtual void setHyperParameters(const vectord &theta) = 0;
+    virtual int setHyperParameters(const vectord &theta) = 0;
     virtual vectord getHyperParameters() = 0;
     virtual size_t nHyperParameters() = 0;
 

include/nonparametricprocess.hpp

     inline vectord getPointAtMinimum() { return mGPXX[mMinIndex]; };
     inline double getValueAtMinimum() { return mGPY(mMinIndex); };
     inline size_t getNSamples() { return mGPY.size(); };
+    inline double getSignalVariance() { return mSigma; };
   
     // Kernel function
     /** 
 
   protected:
     const double mRegularizer;   ///< Std of the obs. model (also used as nugget)
+    double mSigma;                                           ///< Signal variance
     vecOfvec mGPXX;                                              ///< Data inputs
     vectord mGPY;                                                ///< Data values
     

include/specialtypes.hpp

 #include <boost/numeric/ublas/vector.hpp>
 #include <boost/numeric/ublas/io.hpp>
 
+typedef boost::numeric::ublas::vector<size_t>                   vectori;
 typedef boost::numeric::ublas::vector<double>                   vectord;
 typedef boost::numeric::ublas::zero_vector<double>             zvectord;
 typedef boost::numeric::ublas::scalar_vector<double>           svectord;

include/student_t_process_jef.hpp

 
   private:
     vectord mWML;           //!< GP ML parameters
-    double mSigML;           //!< GP ML parameters
     
     /// Precomputed GP prediction operations
     vectord mAlphaF;

python/demo_cam.py

+#!/usr/bin/env python
+# -------------------------------------------------------------------------
+#    This file is part of BayesOpt, an efficient C++ library for 
+#    Bayesian optimization.
+#
+#    Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
+# 
+#    BayesOpt is free software: you can redistribute it and/or modify it 
+#    under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    BayesOpt is distributed in the hope that it will be useful, but 
+#    WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
+# ------------------------------------------------------------------------
+
 from SimpleCV import Camera
 import numpy as np
 import bayesopt

python/demo_dimscaling.py

+#!/usr/bin/env python
+# -------------------------------------------------------------------------
+#    This file is part of BayesOpt, an efficient C++ library for 
+#    Bayesian optimization.
+#
+#    Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
+# 
+#    BayesOpt is free software: you can redistribute it and/or modify it 
+#    under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    BayesOpt is distributed in the hope that it will be useful, but 
+#    WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
+# ------------------------------------------------------------------------
+
+# This example was provided by Janto Dreijer <jantod@gmail.com>
+
+import sys
+sys.path.append('/usr/local/lib')
+
+import numpy as np
+import bayesopt
+
+def func(x):
+	#print "x", x
+	#~ target = np.ones(len(x))*0.3
+	target = np.arange(1,1+len(x))
+	#print "target", target
+	e = ((np.asarray(x) - target)**2).mean()
+	#print "e", e
+	return e
+
+# Initialize the parameters by default
+params = bayesopt.initialize_params()
+
+# We decided to change some of them
+params['n_init_samples'] = 300
+params['noise'] = 1
+params['kernel_name'] = "kMaternISO3"
+#params['surr_name'] = "GAUSSIAN_PROCESS_INV_GAMMA_NORMAL"
+
+dim = 20
+lb = np.ones((dim,))*0
+ub = np.ones((dim,))*20
+
+mvalue, x_out, error = bayesopt.optimize(func, dim, lb, ub, params)
+
+print mvalue, x_out, error

python/demo_multiprocess.py

+#!/usr/bin/env python
+# -------------------------------------------------------------------------
+#    This file is part of BayesOpt, an efficient C++ library for 
+#    Bayesian optimization.
+#
+#    Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
+# 
+#    BayesOpt is free software: you can redistribute it and/or modify it 
+#    under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    BayesOpt is distributed in the hope that it will be useful, but 
+#    WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
+# ------------------------------------------------------------------------
+
+import numpy as np
+from bayesoptmodule import BayesOptContinuous
+from multiprocessing import Process, Pipe
+
+# Function for testing.
+def testfunc(Xin):
+    total = 5.0
+    for value in Xin:
+        total = total + (value -0.33)*(value-0.33)
+    return total
+
+
+def worker(pipe):
+    x = None
+    while True:
+        x = pipe.recv()
+        if x == 'STOP':
+            break
+
+        result = testfunc(x)
+        pipe.send(result)
+
+
+class BayesOptProcess(Process,BayesOptContinuous):
+
+    def __init__ (self, pipe):
+        Process.__init__(self)
+        BayesOptContinuous.__init__(self)
+        self.pipe = pipe
+
+    def run(self):
+        mvalue, x_out, error = self.optimize()
+        self.pipe.send('STOP')
+        self.mvalue = mvalue
+        self.x_out = x_out
+        self.error = error
+
+        return
+
+    def evalfunc(self, x):
+        self.pipe.send(x)
+        result = self.pipe.recv()
+        return result
+
+
+if __name__ == '__main__':
+    pipe_par, pipe_child = Pipe()
+
+    bo = BayesOptProcess(pipe_child)
+    bo.params['n_iterations'] = 50
+    bo.params['n_init_samples'] = 20
+    bo.params['s_name'] = "GAUSSIAN_PROCESS_INV_GAMMA_NORMAL"
+    bo.params['c_name'] = "GP_HEDGE"
+
+    p = Process(target=worker, args=(pipe_par,))
+
+    bo.start()
+    p.start()
+    
+    bo.join()
+    p.join()

python/demo_quad.py

+#!/usr/bin/env python
+# -------------------------------------------------------------------------
+#    This file is part of BayesOpt, an efficient C++ library for 
+#    Bayesian optimization.
+#
+#    Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
+# 
+#    BayesOpt is free software: you can redistribute it and/or modify it 
+#    under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    BayesOpt is distributed in the hope that it will be useful, but 
+#    WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
+# ------------------------------------------------------------------------
+
+import bayesopt
+import bayesoptmodule
+import numpy as np
+
+from time import clock
+
+# Function for testing.
+def testfunc(Xin):
+    total = 5.0
+    for value in Xin:
+        total = total + (value -0.33)*(value-0.33)
+
+    return total
+
+# Class for OO testing.
+class BayesOptTest(bayesoptmodule.BayesOptContinuous):
+    def evalfunc(self,Xin):
+        return testfunc(Xin)
+
+
+# Let's define the parameters
+# For different options: see parameters.h and cpp
+# If a parameter is not define, it will be automatically set
+# to a default value.
+params = bayesopt.initialize_params()
+params['n_iterations'] = 50
+params['n_init_samples'] = 20
+#params['surr_name'] = "GAUSSIAN_PROCESS_INV_GAMMA_NORMAL"
+params['crit_name'] = "cEI"
+params['kernel_name'] = "kMaternISO3"
+print params['mean_name']
+print "Callback implementation"
+
+n = 5                     # n dimensions
+lb = np.zeros((n,))
+ub = np.ones((n,))
+
+start = clock()
+
+mvalue, x_out, error = bayesopt.optimize(testfunc, n, lb, ub, params)
+
+print "Result", x_out
+print "Seconds", clock() - start
+
+
+print "OO implementation"
+bo_test = BayesOptTest()
+bo_test.params = params
+bo_test.n = n
+bo_test.lb = lb
+bo_test.ub = ub
+
+start = clock()
+mvalue, x_out, error = bo_test.optimize()
+
+print "Result", x_out
+print "Seconds", clock() - start
+
+
+print "Callback discrete implementation"
+x_set = np.random.rand(100,n)
+start = clock()
+
+mvalue, x_out, error = bayesopt.optimize_discrete(testfunc, x_set, params)
+
+print "Result", x_out
+print "Seconds", clock() - start
+
+value = np.array([testfunc(i) for i in x_set])
+print "Optimun", x_set[value.argmin()]

python/test.py

-#!/usr/bin/env python
-# -------------------------------------------------------------------------
-#    This file is part of BayesOpt, an efficient C++ library for 
-#    Bayesian optimization.
-#
-#    Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
-# 
-#    BayesOpt is free software: you can redistribute it and/or modify it 
-#    under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    BayesOpt is distributed in the hope that it will be useful, but 
-#    WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
-# ------------------------------------------------------------------------
-
-import bayesopt
-import bayesoptmodule
-import numpy as np
-
-from time import clock
-
-# Function for testing.
-def testfunc(Xin):
-    total = 5.0
-    for value in Xin:
-        total = total + (value -0.33)*(value-0.33)
-
-    return total
-
-# Class for OO testing.
-class BayesOptTest(bayesoptmodule.BayesOptContinuous):
-    def evalfunc(self,Xin):
-        return testfunc(Xin)
-
-
-# Let's define the parameters
-# For different options: see parameters.h and cpp
-# If a parameter is not define, it will be automatically set
-# to a default value.
-params = bayesopt.initialize_params()
-params['n_iterations'] = 50
-params['n_init_samples'] = 20
-#params['surr_name'] = "GAUSSIAN_PROCESS_INV_GAMMA_NORMAL"
-params['crit_name'] = "cEI"
-params['kernel_name'] = "kMaternISO3"
-print params['mean_name']
-print "Callback implementation"
-
-n = 5                     # n dimensions
-lb = np.zeros((n,))
-ub = np.ones((n,))
-
-start = clock()
-
-mvalue, x_out, error = bayesopt.optimize(testfunc, n, lb, ub, params)
-
-print "Result", x_out
-print "Seconds", clock() - start
-
-
-print "OO implementation"
-bo_test = BayesOptTest()
-bo_test.params = params
-bo_test.n = n
-bo_test.lb = lb
-bo_test.ub = ub
-
-start = clock()
-mvalue, x_out, error = bo_test.optimize()
-
-print "Result", x_out
-print "Seconds", clock() - start
-
-
-print "Callback discrete implementation"
-x_set = np.random.rand(100,n)
-start = clock()
-
-mvalue, x_out, error = bayesopt.optimize_discrete(testfunc, x_set, params)
-
-print "Result", x_out
-print "Seconds", clock() - start
-
-value = np.array([testfunc(i) for i in x_set])
-print "Optimun", x_set[value.argmin()]

python/test2.py

-#!/usr/bin/env python
-# -------------------------------------------------------------------------
-#    This file is part of BayesOpt, an efficient C++ library for 
-#    Bayesian optimization.
-#
-#    Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
-# 
-#    BayesOpt is free software: you can redistribute it and/or modify it 
-#    under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    BayesOpt is distributed in the hope that it will be useful, but 
-#    WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
-# ------------------------------------------------------------------------
-
-# This example was provided by Janto Dreijer <jantod@gmail.com>
-
-import sys
-sys.path.append('/usr/local/lib')
-
-import numpy as np
-import bayesopt
-
-def func(x):
-	#print "x", x
-	#~ target = np.ones(len(x))*0.3
-	target = np.arange(1,1+len(x))
-	#print "target", target
-	e = ((np.asarray(x) - target)**2).mean()
-	#print "e", e
-	return e
-
-# Initialize the parameters by default
-params = bayesopt.initialize_params()
-
-# We decided to change some of them
-params['n_init_samples'] = 300
-params['noise'] = 1
-params['kernel_name'] = "kMaternISO3"
-#params['surr_name'] = "GAUSSIAN_PROCESS_INV_GAMMA_NORMAL"
-
-dim = 20
-lb = np.ones((dim,))*0
-ub = np.ones((dim,))*20
-
-mvalue, x_out, error = bayesopt.optimize(func, dim, lb, ub, params)
-
-print mvalue, x_out, error

python/testmultiproc.py

-#!/usr/bin/env python
-import numpy as np
-from bayesoptmodule import BayesOptContinuous
-from multiprocessing import Process, Pipe
-
-# Function for testing.
-def testfunc(Xin):
-    total = 5.0
-    for value in Xin:
-        total = total + (value -0.33)*(value-0.33)
-    return total
-
-
-def worker(pipe):
-    x = None
-    while True:
-        x = pipe.recv()
-        if x == 'STOP':
-            break
-
-        result = testfunc(x)
-        pipe.send(result)
-
-
-class BayesOptProcess(Process,BayesOptContinuous):
-
-    def __init__ (self, pipe):
-        Process.__init__(self)
-        BayesOptContinuous.__init__(self)
-        self.pipe = pipe
-
-    def run(self):
-        mvalue, x_out, error = self.optimize()
-        self.pipe.send('STOP')
-        self.mvalue = mvalue
-        self.x_out = x_out
-        self.error = error
-
-        return
-
-    def evalfunc(self, x):
-        self.pipe.send(x)
-        result = self.pipe.recv()
-        return result
-
-
-if __name__ == '__main__':
-    pipe_par, pipe_child = Pipe()
-
-    bo = BayesOptProcess(pipe_child)
-    bo.params['n_iterations'] = 50
-    bo.params['n_init_samples'] = 20
-    bo.params['s_name'] = "GAUSSIAN_PROCESS_INV_GAMMA_NORMAL"
-    bo.params['c_name'] = "GP_HEDGE"
-
-    p = Process(target=worker, args=(pipe_par,))
-
-    bo.start()
-    p.start()
-    
-    bo.join()
-    p.join()

src/criteria_functors.cpp

   CriteriaFactory::CriteriaFactory()
   {
     registry["cEI"] = & create_func<ExpectedImprovement>;
+    registry["cBEI"] = & create_func<BiasedExpectedImprovement>;
     registry["cEIa"] = & create_func<AnnealedExpectedImprovement>;
     registry["cLCB"] = & create_func<LowerConfidenceBound>;
     registry["cLCBa"] = & create_func<AnnealedLowerConfindenceBound>;

src/gaussian_process.cpp

   namespace ublas = boost::numeric::ublas;
 
   GaussianProcess::GaussianProcess(size_t dim, bopt_params params):
-    NonParametricProcess(dim, params), mSigma(params.sigma_s)
+    NonParametricProcess(dim, params)
   {
     d_ = new GaussianDistribution();
   }  // Constructor

src/gaussian_process_ml.cpp

     inplace_solve(mL2,rho,ublas::lower_tag());
     
     double yPred = inner_prod(phi,mWML) + inner_prod(v,mAlphaF);
-    double sPred = sqrt( mSigML * (kq - inner_prod(v,v) 
+    double sPred = sqrt( mSigma * (kq - inner_prod(v,v) 
 				   + inner_prod(rho,rho)));
 
     d_->setMeanAndStd(yPred,sPred);
 
     mAlphaF = mGPY - prod(mWML,mFeatM);
     inplace_solve(mL,mAlphaF,ublas::lower_tag());
-    mSigML = inner_prod(mAlphaF,mAlphaF)/(n-p);
+    mSigma = inner_prod(mAlphaF,mAlphaF)/(n-p);
   
     return 1;
   }

src/nonparametricprocess.cpp

 {
   
   NonParametricProcess::NonParametricProcess(size_t dim, bopt_params parameters):
-    InnerOptimization(), mRegularizer(parameters.noise), dim_(dim)
+    InnerOptimization(), mRegularizer(parameters.noise), 
+    mSigma(parameters.sigma_s), dim_(dim)
   { 
     mMinIndex = 0;     mMaxIndex = 0;   
     setAlgorithm(BOBYQA);

src/student_t_process_jef.cpp

     inplace_solve(mL2,rho,ublas::lower_tag());
     
     double yPred = inner_prod(phi,mWML) + inner_prod(v,mAlphaF);
-    double sPred = sqrt( mSigML * (kq - inner_prod(v,v) 
+    double sPred = sqrt( mSigma * (kq - inner_prod(v,v) 
 				   + inner_prod(rho,rho)));
 
     d_->setMeanAndStd(yPred,sPred);
 
     mAlphaF = mGPY - prod(mWML,mFeatM);
     inplace_solve(mL,mAlphaF,ublas::lower_tag());
-    mSigML = inner_prod(mAlphaF,mAlphaF)/(n-p);
+    mSigma = inner_prod(mAlphaF,mAlphaF)/(n-p);
     
     d_->setDof(n-p);  
     return 1;