Commits

Ruben Martinez-Cantin committed 950bb8e Merge

Merging

  • Participants
  • Parent commits da124ac, 364c068

Comments (0)

Files changed (41)

File CMakeLists.txt

 
 INCLUDE(UseDoxygen)
 
+# Sobol sequences are hardcoded tables, so it might take a lot of time
+# to compile.
 IF(BAYESOPT_BUILD_SOBOL)
   ADD_DEFINITIONS(-DUSE_SOBOL)	
   SET(SOBOL_SRC
   ./src/bayesoptbase.cpp
   ./src/inneroptimization.cpp
   ./src/nonparametricprocess.cpp
+  ./src/kernelregressor.cpp
   ./src/empiricalbayesprocess.cpp
   ./src/hierarchical_gaussian_process.cpp
   ./src/gaussian_process.cpp
 LINK_DIRECTORIES( ${CMAKE_SOURCE_DIR}/lib )
 
 IF(NLOPT_BUILD)
-  ADD_SUBDIRECTORY(nlopt2)
-  include_directories(${CMAKE_SOURCE_DIR}/nlopt2/api)
+  ADD_SUBDIRECTORY(nlopt)
+  include_directories(${CMAKE_SOURCE_DIR}/nlopt/api)
   SET(EXT_LIBS nlopt)
 ELSE(NLOPT_BUILD)
   SET(EXT_LIBS ${NLOPT})

File include/bayesoptbase.hpp

 #ifndef  _BAYESOPTBASE_HPP_
 #define  _BAYESOPTBASE_HPP_
 
+#include <boost/scoped_ptr.hpp>
 #include "criteria_functors.hpp"
 
 /**
     virtual ~BayesOptBase();
 
     /** 
-     * \brief Execute the optimization process of the function defined
-     * in evaluateSample.
-     * 
-     * @see evaluateSample
-     * @see checkReachability
-     *
-     * @param bestPoint returns point with the optimum value in a ublas::vector.
-     * @return 0 if terminate successfully, any other value otherwise
-     */
-    int optimize(vectord &bestPoint);
-
-    /** 
-     * \brief Execute ONE step the optimization process of the function defined
-     * in evaluateSample.
-     * 
-     * @see evaluateSample
-     * @see checkReachability
-     *
-     * @param ii iteration number.
-     * @return 0 if terminate successfully, any other value otherwise
-     */  
-    int stepOptimization(size_t ii);
-
-    /** 
-     * Initialize the optimization process.
-     * @return error_code
-     */
-    virtual int initializeOptimization() = 0;
-
-    /** 
-     * Once the optimization has been perfomed, return the optimal point.
-     */
-    virtual vectord getFinalResult() = 0;
-
-    /** 
-     * Once the optimization has been perfomed, return the value of
-     * the optimal point.
-     */
-    double getMinimumValue()
-    { return mGP->getValueAtMinimum(); };
-
-
-    /** 
      * \brief Function that defines the actual function to be optimized.
      * This function must be modified (overriden) according to the
      * specific problem.
 
 
     /** 
+     * \brief Execute the optimization process of the function defined
+     * in evaluateSample.
+     * 
+     * @see evaluateSample
+     * @see checkReachability
+     *
+     * @param bestPoint returns point with the optimum value in a ublas::vector.
+     * @return 0 if terminate successfully, any other value otherwise
+     */
+    int optimize(vectord &bestPoint);
+
+    /** 
+     * \brief Execute ONE step the optimization process of the
+     * function defined in evaluateSample.  
+     * @param ii iteration number.
+     */  
+    void stepOptimization(size_t ii);
+
+    /** Initialize the optimization process.  */
+    virtual void initializeOptimization() = 0;
+
+    /** Once the optimization has been perfomed, return the optimal point. */
+    virtual vectord getFinalResult() = 0;
+
+    /** 
      * \brief Evaluate the criteria considering if the query is
      * reachable or not.  This is a way to include non-linear
      * restrictions.
      * @param query query point
      * @return value of the criteria, 0 otherwise.
      */
-    inline double evaluateCriteria( const vectord &query )
-    {
-      bool reachable = checkReachability(query);
-      if (!reachable)  return 0.0;
-      return (*mCrit)(query);
-    };
+    double evaluateCriteria( const vectord &query );
 
-    
-    inline NonParametricProcess* getSurrogateModel()
-    { return mGP.get(); };
+    void setSamples(const matrixd &x, const vectord &y);
+    void addSample(const vectord &x, double y);
+    vectord getPointAtMinimum();
+    double getValueAtMinimum();
 
-    int setSurrogateModel();    
-    int setCriteria();
-    bopt_params* getParameters() {return &mParameters;};
+    NonParametricProcess* getSurrogateModel();
+    void setSurrogateModel();    
+    void  setCriteria();
+    bopt_params* getParameters();
 
   protected:
     /** 
      * @param iteration 
      * @param xNext 
      * @param yNext 
-     * 
-     * @return error code
      */
-    virtual int plotStepData(size_t iteration, const vectord& xNext,
-		     double yNext) = 0;
+    virtual void plotStepData(size_t iteration, const vectord& xNext,
+			      double yNext) = 0;
 
 
     /** 
      * criteria or metacriteria
      * 
      * @param Xnext next point to evaluate
-     * @return error code
      */
-    int nextPoint( vectord &Xnext );  
+    void nextPoint( vectord &Xnext );  
 
 
 
   protected:
-    boost::scoped_ptr<NonParametricProcess> mGP;  ///< Pointer to surrogate model
+    Dataset mData;
+    boost::scoped_ptr<NonParametricProcess> mGP;     ///< Pointer to surrogate model
     boost::scoped_ptr<Criteria> mCrit;                    ///< Metacriteria model
     bopt_params mParameters;                        ///< Configuration parameters
     size_t mDims;                                       ///< Number of dimensions
      * \brief Checks the parameters and setups the elements (criteria, 
      * surrogate, etc.)
      */
-    int __init__();
+    void __init__();
 
     CriteriaFactory mCFactory;
     randEngine mEngine;
 
   /**@}*/
 
+  inline void BayesOptBase::setSamples(const matrixd &x, const vectord &y)
+  { mData.setSamples(x,y);  mGP->setSamples(x,y);
+  }
+
+  inline void BayesOptBase::addSample(const vectord &x, double y)
+  {  mData.addSample(x,y); mGP->addSample(x,y);  };
+
+  inline vectord BayesOptBase::getPointAtMinimum() 
+  { return mData.getPointAtMinimum(); };
+  
+  inline double BayesOptBase::getValueAtMinimum() 
+  { return mData.getValueAtMinimum(); };
+
+  inline double BayesOptBase::evaluateCriteria( const vectord &query )
+  {
+    if (checkReachability(query))  return (*mCrit)(query);
+    return 0.0;
+  };
+
+  inline NonParametricProcess* BayesOptBase::getSurrogateModel()
+  { return mGP.get(); };
+
+  inline bopt_params* BayesOptBase::getParameters() 
+  {return &mParameters;};
+
 } //namespace bayesopt
 
 

File include/bayesoptcont.hpp

   {
   public:
    
-    /** 
-     * Default constructor
-     */
+    /** Default constructor */
     ContinuousModel();
 
     /** 
      */
     ContinuousModel(size_t dim, bopt_params params);
 
-    /** 
-     * Default destructor
-     * 
-     * @return 
-     */
+    /**  Default destructor  */
     virtual ~ContinuousModel();
   
-    /** 
-     * Initialize the optimization process.
-     * @return error_code
-     */
-    int initializeOptimization();
+    /** Initialize the optimization process.  */
+    void initializeOptimization();
 
     /** 
-     * Once the optimization has been perfomed, return the optimal point.
+     * Once the optimization has been perfomed, return the optimal
+     * point.
      */
     vectord getFinalResult();
 
     /** 
      * \brief Sets the bounding box. 
      *
-     * @param lowerBound vector with the lower bounds of the hypercube 
-     * @param upperBound vector with the upper bounds of the hypercube 
+     * @param lowerBound vector with the lower bounds of the hypercube
+     * @param upperBound vector with the upper bounds of the hypercube
      * 
      * @return 0 if terminate successfully, nonzero otherwise
      */
      * @param iteration iteration number 
      * @param xNext next point
      * @param yNext function value at next point
-     * 
-     * @return error code
      */
-    int plotStepData(size_t iteration, const vectord& xNext,
-		     double yNext);
+    void plotStepData(size_t iteration, const vectord& xNext,
+		      double yNext);
 
     /** \brief Sample a set of points to initialize the surrogate function.
      * It uses pure random sampling or uniform Latin Hypercube sampling.
 
   inline double ContinuousModel::evaluateSampleInternal( const vectord &query )
   { 
-    vectord unnormalizedQuery = mBB->unnormalizeVector(query);
-    return evaluateSample(unnormalizedQuery);
+    return evaluateSample(mBB->unnormalizeVector(query));
   }; // evaluateSampleInternal
 
   inline int ContinuousModel::findOptimal(vectord &xOpt)

File include/bayesoptdisc.hpp

     DiscreteModel( const vecOfvec &validSet, 
 		 bopt_params params);
     
-    /** 
-     * Default destructor
-     */
+    /** Default destructor  */
     virtual ~DiscreteModel();
 
-    /** 
-     * Initialize the optimization process.
-     * @return error_code
-     */
-    int initializeOptimization();
+    /** Initialize the optimization process. */
+    void initializeOptimization();
 
     /** 
-     * Once the optimization has been perfomed, return the optimal point.
+     * Once the optimization has been perfomed, return the optimal
+     * point.
      */
     vectord getFinalResult();
 
      * @param iteration 
      * @param xNext 
      * @param yNext 
-     * 
-     * @return error code
      */
-    int plotStepData(size_t iteration, const vectord& xNext,
+    void plotStepData(size_t iteration, const vectord& xNext,
 		     double yNext);
 
     /** 

File include/criteria_atomic.hpp

   public:
     virtual ~AtomicCriteria(){};
     virtual int init(NonParametricProcess* proc)
-    { 
-      mProc = proc;
-      return 0;
-    };
+    { mProc = proc;  return 0;   };
+    // This criteria does not support comparisons!
     bool requireComparison(){ return false; };
+    bool checkIfBest(vectord& xNext,std::string& name)
+    { assert(false); return false; };
+
     virtual double operator()(const vectord &x)  = 0;
   };
 

File include/criteria_combined.hpp

   public:
     virtual ~SumCriteria() {};
   
-    bool requireComparison(){ return false; };
     double operator()(const vectord &x)  
     {
       double sum = 0.0;
   public:
     virtual ~ProdCriteria() {};
   
-    bool requireComparison(){ return false; };
     double operator()(const vectord &x)  
     {
       double prod = 1.0;
     double operator()(const vectord &x) { return (*mCurrentCriterium)(x); };
 
     void reset();
-    bool checkIfBest(vectord& best, std::string& name,int& error_code);
+    bool checkIfBest(vectord& best, std::string& name);
     std::string name() {return "cHedge";};
   protected:
     int update_hedge();

File include/criteria_functors.hpp

 #ifndef  _CRITERIA_FUNCTORS_HPP_
 #define  _CRITERIA_FUNCTORS_HPP_
 
+#include <map>
 #include <algorithm>
 #include "optimizable.hpp"
 #include "nonparametricprocess.hpp"
     virtual int init(NonParametricProcess *proc, 
 		     const std::vector<Criteria*>& list) { return 0; };
 
-    virtual bool requireComparison() = 0;
     double evaluate(const vectord &x) {return (*this)(x);}
     virtual double operator()(const vectord &x) = 0;
     virtual std::string name() = 0;
 
     //Dummy functions.
     virtual void reset() { assert(false); };
-    virtual bool checkIfBest(vectord& xNext,
-			     std::string& name,
-			     int& error_code) { assert(false); return false; };
+
+    // In general, most criteria does not support comparisons!
+    virtual bool requireComparison(){ return false; };
+    virtual bool checkIfBest(vectord& xNext,std::string& name)
+    { assert(false); return false; };
+
   protected:
     NonParametricProcess *mProc;
   };

File include/empiricalbayesprocess.hpp

 #ifndef  _EMPIRICAL_BAYES_PROCESS_HPP_
 #define  _EMPIRICAL_BAYES_PROCESS_HPP_
 
-#include "nonparametricprocess.hpp"
+#include "kernelregressor.hpp"
 #include "inneroptimization.hpp"
 
 namespace bayesopt
   /**
    * \brief Empirical Bayesian NonParametric process.
    */
-  class EmpiricalBayesProcess: public NonParametricProcess, RBOptimizable
+  class EmpiricalBayesProcess: public KernelRegressor, RBOptimizable
   {
   public:
-    EmpiricalBayesProcess(size_t dim, bopt_params parameters);
+    EmpiricalBayesProcess(size_t dim, bopt_params parameters,
+			  Dataset& data);
     virtual ~EmpiricalBayesProcess();
 
     /** 
     /** 
      * \brief Updates the kernel parameters acording with a point
      * estimate (ML, MAP, etc.)
-     * @return error code
      */
-    int updateKernelParameters();
+    void updateKernelParameters();
 
     /** 
-     * \brief Updates and computes the score (eg:likelihood) of the kernel
-     * parameters.
-     * @param query set of parameters.
+     * \brief Computes the score (eg:likelihood) of the kernel
+     * parameters.  
+     * Warning: To evaluate the score, it is necessary to change the parameters
+     * @param x set of parameters.  
      * @return score
      */
-    double evaluateKernelParams(const vectord& query);
-    double evaluate(const vectord &x) {return evaluateKernelParams(x);}
+    double evaluate(const vectord &x);
 
     /** 
      * \brief Computes the score (eg:likelihood) of the current kernel
     NLOPT_Optimization* kOptimizer;
   };
 
+
+
+  inline double EmpiricalBayesProcess::evaluate(const vectord& x)
+  { 
+    mKernel.setHyperParameters(x);
+    return evaluateKernelParams();
+  };
+
+
   /**@}*/
   
 } //namespace bayesopt

File include/fullbayesprocess.hpp

   /**
    * \brief Full Bayesian NonParametric process.
    */
-  class FullBayesProcess: public NonParametricProcess
+  class FullBayesProcess: public KernelRegressor
   {
   public:
     static const size_t N_PROC = 10;
 
-    FullBayesProcess(size_t dim, bopt_params params);
+    FullBayesProcess(size_t dim, bopt_params params, Dataset& data);
     virtual ~FullBayesProcess();
 
     /** 
     ProbabilityDistribution* prediction(const vectord &query);
 
     /** 
-     * \brief Updates the kernel parameters acording with a point
-     * estimate (ML, MAP, etc.)
-     * @return error code
+     * \brief Updates the kernel parameters acording with a Bayesian
+     * estimate (grid sampling, MCMC, etc.)
      */
-    int updateKernelParameters();
+    void updateKernelParameters();
 
   private:
-    std::vector<NonParametricProcess*>   mVProc;
+    std::vector<KernelRegressor*>   mVProc;
     vectord                            mWeights;
     
     MixtureDistribution* d_;      //!< Predictive distributions

File include/gaussian_process.hpp

   class GaussianProcess: public EmpiricalBayesProcess
   {
   public:
-    GaussianProcess(size_t dim, bopt_params params);
+    GaussianProcess(size_t dim, bopt_params params, Dataset& data);
     virtual ~GaussianProcess();
 
     /** 
      */
     double negativeLogLikelihood();
 
-    /** 
-     * \brief Precompute some values of the prediction that do not depends on
-     * the query
-     * @return error code
+    /** Precompute some values of the prediction that do not depends
+     *	on the query
      */
-    int precomputePrediction();
+    void precomputePrediction();
 
   private:
     vectord mAlphaV;              ///< Precomputed L\y

File include/gaussian_process_ml.hpp

   class GaussianProcessML: public HierarchicalGaussianProcess
   {
   public:
-    GaussianProcessML(size_t dim, bopt_params params);
+    GaussianProcessML(size_t dim, bopt_params params, Dataset& data);
     virtual ~GaussianProcessML();
 
     /** 
      */
     double negativeLogLikelihood();
 
-    /** 
-     * \brief Precompute some values of the prediction that do not depends on
+    /** Precompute some values of the prediction that do not depends on
      * the query
-     * @return error code
      */
-    int precomputePrediction();
+    void precomputePrediction();
 
   private:
     vectord mWML;           //!< GP ML parameters

File include/gaussian_process_normal.hpp

   class GaussianProcessNormal: public HierarchicalGaussianProcess
   {
   public:
-    GaussianProcessNormal(size_t dim, bopt_params params);
+    GaussianProcessNormal(size_t dim, bopt_params params, Dataset& data);
     virtual ~GaussianProcessNormal();
 
     /** 
      */
     double negativeLogLikelihood();
 
-    /** 
-     * \brief Precompute some values of the prediction that do not depends on
-     * the query
-     * @return error code
+    /** Precompute some values of the prediction that do not depends
+     *	on the query
      */
-    int precomputePrediction();
+    void precomputePrediction();
 
   private:
     vectord mWMap;                      //!< GP posterior parameters

File include/hierarchical_gaussian_process.hpp

   class HierarchicalGaussianProcess: public EmpiricalBayesProcess
   {
   public:
-    HierarchicalGaussianProcess(size_t dim, bopt_params params);
+    HierarchicalGaussianProcess(size_t dim, bopt_params params, Dataset& data);
     virtual ~HierarchicalGaussianProcess() {};
 
   protected:

File include/kernel_atomic.hpp

   class AtomicKernel : public Kernel
   {
   public:
-    virtual int init(size_t input_dim)
-    {
-      n_inputs = input_dim;
-      return 0;
-    };
-    int setHyperParameters(const vectord &theta) 
+    virtual void init(size_t input_dim)
+    { n_inputs = input_dim; };
+
+    void setHyperParameters(const vectord &theta) 
     {
       if(theta.size() != n_params)
 	{
 	  FILE_LOG(logERROR) << "Wrong number of kernel hyperparameters"; 
-	  return -1; 
+	  throw std::invalid_argument("Wrong number of kernel hyperparameters");
 	}
       params = theta;
-      return 0;
     };
+
     vectord getHyperParameters() {return params;};
     size_t nHyperParameters() {return n_params;};
 
   class ConstKernel: public AtomicKernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 1;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 1; n_inputs = input_dim;  };
 
     double operator()(const vectord &x1, const vectord &x2)
-    {
-      return params(0);
-    };
+    { return params(0); };
 
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      return 0.0;
-    };
+    { return 0.0; };
   };
 
+
   /** \brief Linear kernel. */
   class LinKernel: public AtomicKernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 0;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 0;  n_inputs = input_dim; };
 
     double operator()(const vectord &x1, const vectord &x2)
     {
       return boost::numeric::ublas::inner_prod(x1,x2); 
     };
 
+    // TODO:
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      assert(false);
-      return 0.0;
-    };
+    { assert(false);  return 0.0;   };
   };
 
   /** \brief Linear kernel. */
   class LinKernelARD: public AtomicKernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = input_dim;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = input_dim;  n_inputs = input_dim;  };
 
     double operator()(const vectord &x1, const vectord &x2)
     {
       return boost::numeric::ublas::inner_prod(v1,v2); 
     };
 
+    // TODO:
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      assert(false); return 0.0;
-    };
+    { assert(false); return 0.0;  };
   };
 
 
   class MaternIso1: public ISOkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 1;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 1;  n_inputs = input_dim;  };
 
     double operator()(const vectord &x1, const vectord &x2)
     {
   class MaternARD1: public ARDkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = input_dim;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = input_dim; n_inputs = input_dim;  };
 
     double operator()(const vectord &x1, const vectord &x2)
     {
       return exp(-r);
     };
 
+    //TODO: 
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      assert(false); //TODO: 
-      return 0.0;
-    };
+    { assert(false);  return 0.0;  };
   };
 
 
   class MaternIso3: public ISOkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 1;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 1; n_inputs = input_dim;  };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
   class MaternARD3: public ARDkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = input_dim;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = input_dim;  n_inputs = input_dim; };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
   class MaternIso5: public ISOkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 1;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 1; n_inputs = input_dim;  };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
   class MaternARD5: public ARDkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = input_dim;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = input_dim;  n_inputs = input_dim; };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
       double er = exp(-r);
       return (1+r*(1+r/3))*er;
     };
+
+    //TODO:
     double gradient( const vectord &x1, const vectord &x2,
 		     size_t component)
-    {    
-      assert(false); return 0.0;
-    };
+    { assert(false); return 0.0; };
   };
 
   /** Polynomial covariance function*/
   public:
     Polynomial(){ mExp = 1; };
 
-    int init(size_t input_dim)
-    {
-      n_params = 2;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 2;  n_inputs = input_dim;  };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
       double xx = boost::numeric::ublas::inner_prod(x1,x2); 
       return params(0)*params(0) * (params(1)+xx)*mExp;
     };
+
+    //TODO:
     double gradient( const vectord &x1, const vectord &x2,
 		     size_t component)
-    {    
-      assert(false); return 0.0;
-    };
+    { assert(false); return 0.0; };
   protected:
     size_t mExp;
   };
   class Polynomial2: public Polynomial { public: Polynomial2(){ mExp = 2;};};
   class Polynomial3: public Polynomial { public: Polynomial3(){ mExp = 3;};};
   class Polynomial4: public Polynomial { public: Polynomial4(){ mExp = 4;};};
-  class Polynomial5: public Polynomial { public: Polynomial5(){mExp = 5;};};
-  class Polynomial6: public Polynomial { public: Polynomial6(){mExp = 6;};};
+  class Polynomial5: public Polynomial { public: Polynomial5(){ mExp = 5;};};
+  class Polynomial6: public Polynomial { public: Polynomial6(){ mExp = 6;};};
+  class Polynomial7: public Polynomial { public: Polynomial7(){ mExp = 7;};};
 
   /** \brief Square exponential (Gaussian) kernel. Isotropic version. */
   class SEIso: public ISOkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 1;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 1; n_inputs = input_dim;  };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
       double k = rl*rl;
       return exp(-k/2);
     };
+
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
     {
   class SEArd: public ARDkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = input_dim;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = input_dim;  n_inputs = input_dim; };
 
     double operator()( const vectord &x1, const vectord &x2 )
     {
   class RQIso: public ISOkernel
   {
   public:
-    int init(size_t input_dim)
-    {
-      n_params = 2;
-      n_inputs = input_dim;
-      return 0;
-    };
+    void init(size_t input_dim)
+    { n_params = 2; n_inputs = input_dim; };
 
     double operator()( const vectord &x1, const vectord &x2)
     {
       double k = rl*rl/(2*params(1));
       return pow(1+k,-params(1));
     };
+
+    // TODO:
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      assert(false); return 0.0;
-    };
+    { assert(false); return 0.0;  };
   };
 
   //@}

File include/kernel_combined.hpp

   class CombinedKernel : public Kernel
   {
   public:
-    virtual int init(size_t input_dim, Kernel* left, Kernel* right)
+    virtual void init(size_t input_dim, Kernel* left, Kernel* right)
     {
       n_inputs = input_dim;
       this->left = left;
       this->right = right;
-      return 0;
     };
-    int setHyperParameters(const vectord &theta) 
+
+    void setHyperParameters(const vectord &theta) 
     {
       using boost::numeric::ublas::subrange;
-      int error = 0;
 
       size_t n_lhs = left->nHyperParameters();
       size_t n_rhs = right->nHyperParameters();
       if (theta.size() != n_lhs + n_rhs)
 	{
 	  FILE_LOG(logERROR) << "Wrong number of kernel hyperparameters"; 
-	  return -1; 
+	  throw std::invalid_argument("Wrong number of kernel hyperparameters");
 	}
-      error += left->setHyperParameters(subrange(theta,0,n_lhs));
-      error += right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
-      return error;
+      left->setHyperParameters(subrange(theta,0,n_lhs));
+      right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
     };
 
     vectord getHyperParameters() 
   {
   public:
     double operator()(const vectord &x1, const vectord &x2)
-    {
-      return (*left)(x1,x2) + (*right)(x1,x2);
-    };
+    { return (*left)(x1,x2) + (*right)(x1,x2); };
 
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      return left->gradient(x1,x2,component) + right->gradient(x1,x2,component);
-    };
+    { return left->gradient(x1,x2,component) + right->gradient(x1,x2,component); };
   };
 
 
   {
   public:
     double operator()(const vectord &x1, const vectord &x2)
-    {
-      return (*left)(x1,x2) * (*right)(x1,x2);
-    };
+    { return (*left)(x1,x2) * (*right)(x1,x2); };
 
+    //TODO: Not implemented
     double gradient(const vectord &x1, const vectord &x2,
 		    size_t component)
-    {
-      return 0.0; //TODO: Not implemented
-    };
+    { return 0.0; };
   };
 
   //@}

File include/kernel_functors.hpp

   class Kernel
   {
   public:
-    virtual int init(size_t input_dim) {return 0;};
-    virtual int init(size_t input_dim, Kernel* left, Kernel* right) {return 0;};
+    virtual ~Kernel(){};
+    virtual void init(size_t input_dim) {};
+    virtual void init(size_t input_dim, Kernel* left, Kernel* right) {};
 
-    virtual int setHyperParameters(const vectord &theta) = 0;
+    virtual void setHyperParameters(const vectord &theta) = 0;
     virtual vectord getHyperParameters() = 0;
     virtual size_t nHyperParameters() = 0;
 
     virtual double operator()( const vectord &x1, const vectord &x2 ) = 0;
     virtual double gradient( const vectord &x1, const vectord &x2,
 			     size_t component ) = 0;
-    virtual ~Kernel(){};
 
   protected:
     size_t n_inputs;
 
     Kernel* getKernel();
     
-    int setHyperParameters(const vectord &theta);
+    void setHyperParameters(const vectord &theta);
     vectord getHyperParameters();
     size_t nHyperParameters();
 
      * @param thetav kernel parameters (mean)
      * @param stheta kernel parameters (std)
      * @param k_name kernel name
-     * @return error_code
      */
-    int setKernel (const vectord &thetav, const vectord &stheta, 
+    void setKernel (const vectord &thetav, const vectord &stheta, 
 		   std::string k_name, size_t dim);
 
     /** Wrapper of setKernel for C kernel structure */
-    int setKernel (kernel_parameters kernel, size_t dim);
+    void setKernel (kernel_parameters kernel, size_t dim);
 
-    int computeCorrMatrix(const vecOfvec& XX, matrixd& corrMatrix, double nugget);
-    int computeDerivativeCorrMatrix(const vecOfvec& XX, matrixd& corrMatrix, int dth_index);
+    void computeCorrMatrix(const vecOfvec& XX, matrixd& corrMatrix, double nugget);
+    void computeDerivativeCorrMatrix(const vecOfvec& XX, matrixd& corrMatrix, 
+				    int dth_index);
     vectord computeCrossCorrelation(const vecOfvec& XX, const vectord &query);
-    void computeCrossCorrelation(const vecOfvec& XX, 
-				 const vectord &query,
+    void computeCrossCorrelation(const vecOfvec& XX, const vectord &query,
 				 vectord& knx);
     double computeSelfCorrelation(const vectord& query);
     double kernelLogPrior();
 
   private:
     /** Set prior (Gaussian) for kernel hyperparameters */
-    int setKernelPrior (const vectord &theta, const vectord &s_theta);
+    void setKernelPrior (const vectord &theta, const vectord &s_theta);
 
     boost::scoped_ptr<Kernel> mKernel;            ///< Pointer to kernel function
     std::vector<boost::math::normal> priorKernel; ///< Prior of kernel parameters
   inline Kernel* KernelModel::getKernel()
   { return mKernel.get();  }
 
-  inline int KernelModel::setHyperParameters(const vectord &theta)
-  { return mKernel->setHyperParameters(theta); };
+  inline void KernelModel::setHyperParameters(const vectord &theta)
+  { mKernel->setHyperParameters(theta); };
     
   inline vectord KernelModel::getHyperParameters()
   {return mKernel->getHyperParameters();};
   inline double KernelModel::computeSelfCorrelation(const vectord& query)
   { return (*mKernel)(query,query); }
 
+  inline void KernelModel::setKernelPrior (const vectord &theta, 
+					   const vectord &s_theta)
+  {
+    for (size_t i = 0; i<theta.size(); ++i)
+      {
+	boost::math::normal n(theta(i),s_theta(i));
+	priorKernel.push_back(n);
+      }
+  };
+
+
   //@}
 
 } //namespace bayesopt

File include/kernelregressor.hpp

+/** \file kernelregressor.hpp 
+    \brief Nonparametric process abstract module */
+/*
+-------------------------------------------------------------------------
+   This file is part of BayesOpt, an efficient C++ library for 
+   Bayesian optimization.
+
+   Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
+ 
+   BayesOpt is free software: you can redistribute it and/or modify it 
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation, either version 3 of the License, or
+   (at your option) any later version.
+
+   BayesOpt is distributed in the hope that it will be useful, but 
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
+------------------------------------------------------------------------
+*/
+
+
+#ifndef __NONPARAMETRICPROCESS_HPP__
+#define __NONPARAMETRICPROCESS_HPP__
+
+#include "cholesky.hpp"
+#include "nonparametricprocess.hpp"
+#include "kernel_functors.hpp"
+
+namespace bayesopt
+{
+
+  /** \addtogroup NonParametricProcesses 
+   *  \brief Set of nonparametric processes (Gaussian process, Student's
+   *  t process, etc.) for surrogate modelling
+   */
+  /**@{*/
+
+
+  /**
+   * \brief Abstract class to implement non-parametric processes
+   */
+  class KernelRegressor: public NonParametricProcess
+  {
+  public:
+    KernelRegressor(size_t dim, bopt_params parameters, Dataset& data);
+    virtual ~KernelRegressor();
+
+    /** 
+     * \brief Updates the kernel parameters with a point estimate
+     * (empirical Bayes) or a full Bayesian distribution
+     */
+    virtual void updateKernelParameters() = 0;
+
+    /** 
+     * \brief Computes the initial surrogate model and updates the
+     * kernel parameters estimation. 
+     *
+     * This function requires to recompute all covariance matrixes,
+     * inverses, etc.  Use it with precaution.
+     */
+    void fitSurrogateModel();
+
+    /** 
+     * \brief Sequential update of the surrogate model by adding a new point.
+     *  Add new point efficiently using Matrix Decomposition Lemma for the
+     *  inversion of the correlation matrix or adding new rows to the
+     *  Cholesky decomposition.  If retrain is true, it recomputes the
+     *  kernel hyperparameters and computes a full covariance matrix.
+     */   
+    void updateSurrogateModel(const vectord &Xnew, double Ynew, bool retrain);
+
+
+    // Getters and setters
+    double getSignalVariance();
+
+    /** Sets the kind of learning methodology for kernel hyperparameters */
+    void setLearnType(learning_type l_type);
+
+  protected:
+
+    /** Precompute some values of the prediction that do not depends
+     * on the query. */
+    virtual void precomputePrediction() = 0;
+
+    /** Computes the Correlation (Kernel or Gram) matrix */
+    void computeCorrMatrix(matrixd& corrMatrix);
+
+    /** Computes the Correlation (Kernel or Gram) matrix */
+    matrixd computeCorrMatrix();
+
+    /** Computes the derivative of the correlation matrix with respect
+     *	to the dth hyperparameter */
+    matrixd computeDerivativeCorrMatrix(int dth_index);
+    vectord computeCrossCorrelation(const vectord &query);
+    double computeSelfCorrelation(const vectord& query);
+
+    /** Computes the Cholesky decomposition of the Correlation matrix */
+    void computeCholeskyCorrelation();
+
+
+  protected:
+    matrixd mL;             ///< Cholesky decomposition of the Correlation matrix
+    learning_type mLearnType;
+    KernelModel mKernel;
+
+  private:
+    /** Adds a new point to the Cholesky decomposition of the
+     * Correlation matrix. */
+    void addNewPointToCholesky(const vectord& correlation,
+			      double selfcorrelation);
+
+
+    const double mRegularizer;   ///< Std of the obs. model (also used as nugget)
+  };
+
+  //// Inline methods
+  inline void KernelRegressor::fitSurrogateModel()
+  {
+    updateKernelParameters();
+    computeCholeskyCorrelation();
+    precomputePrediction(); 
+  };
+
+  inline void KernelRegressor::setLearnType(learning_type l_type) 
+  { mLearnType = l_type; };
+
+  inline void KernelRegressor::computeCorrMatrix(matrixd& corrMatrix)
+  { mKernel.computeCorrMatrix(mData.mX,corrMatrix,mRegularizer); }
+
+  inline  matrixd KernelRegressor::computeCorrMatrix()
+  {
+    const size_t nSamples = mData.getNSamples();
+    matrixd corrMatrix(nSamples,nSamples);
+    mKernel.computeCorrMatrix(mData.mX,corrMatrix,mRegularizer);
+    return corrMatrix;
+  }
+
+  inline vectord KernelRegressor::computeCrossCorrelation(const vectord &query)
+  { return mKernel.computeCrossCorrelation(mData.mX,query); }
+
+  inline double KernelRegressor::computeSelfCorrelation(const vectord& query)
+  { return mKernel.computeSelfCorrelation(query); }
+
+  inline void KernelRegressor::addNewPointToCholesky(const vectord& correlation,
+							  double selfcorrelation)
+  {
+    vectord newK(correlation);
+    utils::append(newK, selfcorrelation);
+    utils::cholesky_add_row(mL,newK);
+  }
+
+  /**@}*/
+  
+} //namespace bayesopt
+
+#endif

File include/mcmc.hpp

+/**  \file bayesoptbase.hpp \brief Bayesian optimization module */
+/*
+-------------------------------------------------------------------------
+   This file is part of BayesOpt, an efficient C++ library for 
+   Bayesian optimization.
+
+   Copyright (C) 2011-2013 Ruben Martinez-Cantin <rmcantin@unizar.es>
+ 
+   BayesOpt is free software: you can redistribute it and/or modify it 
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation, either version 3 of the License, or
+   (at your option) any later version.
+
+   BayesOpt is distributed in the hope that it will be useful, but 
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with BayesOpt.  If not, see <http://www.gnu.org/licenses/>.
+------------------------------------------------------------------------
+*/
+
+
+#ifndef  _MCMC_HPP_
+#define  _MCMC_HPP_
+
+
+namespace bayesopt {
+
+  class MCMCSampler
+  {
+  public:
+    MCMCSampler(size_t n_samples = 500);
+    virtual ~MCMCSampler();
+
+    newParticle(const vectord& past);
+
+  private:
+    vecOfvec particles;
+  }
+
+} //namespace bayesopt
+
+
+#endif

File include/nonparametricprocess.hpp

 /** \file nonparametricprocess.hpp 
-    \brief Nonparametric process abstract module */
+    \brief Abstract module for a Bayesian regressor */
 /*
 -------------------------------------------------------------------------
    This file is part of BayesOpt, an efficient C++ library for 
 */
 
 
-#ifndef __NONPARAMETRICPROCESS_HPP__
-#define __NONPARAMETRICPROCESS_HPP__
+#ifndef __BAYESIANREGRESSOR_HPP__
+#define __BAYESIANREGRESSOR_HPP__
 
+#include <boost/scoped_ptr.hpp>
+#include "dll_stuff.h"
 #include "ublas_extra.hpp"
-#include "kernel_functors.hpp"
+#include "prob_distribution.hpp"
 #include "mean_functors.hpp"
-#include "prob_distribution.hpp"
 
 namespace bayesopt
 {
 
-  /** \addtogroup NonParametricProcesses 
-   *  \brief Set of nonparametric processes (Gaussian process, Student's
-   *  t process, etc.) for surrogate modelling
-   */
+  /** \addtogroup NonParametricProcesses */
   /**@{*/
 
   /** \brief Dataset model to deal with the vector (real) based datasets */
 
 
   //// Inline methods
+  inline void Dataset::setSamples(const matrixd &x, const vectord &y)
+  {
+    mY = y;
+    for (size_t i=0; i<x.size1(); ++i)
+      {
+	mX.push_back(row(x,i));
+	checkBoundsY(i);
+      } 
+  };
+  
   inline double Dataset::getSample(size_t index, vectord &x)
-  { 
-    x = mX[index];  
-    return mY(index);  
+  { x = mX[index];  return mY(index);  }
+
+  inline void Dataset::addSample(const vectord &x, double y)
+  {
+    mX.push_back(x); utils::append(mY,y);
+    checkBoundsY(mY.size()-1);
   }
 
   inline double Dataset::getLastSample(vectord &x)
-  { 
-    size_t last = mY.size()-1; 
-    return getSample(last, x); 
-  }
+  { return getSample(mY.size()-1, x); }
 
   inline vectord Dataset::getPointAtMinimum() { return mX[mMinIndex]; };
   inline double Dataset::getValueAtMinimum() { return mY(mMinIndex); };
 
 
   /**
-   * \brief Abstract class to implement non-parametric processes
+   * \brief Abstract class to implement Bayesian regressors
    */
   class BAYESOPT_API NonParametricProcess
   {
   public:
-    NonParametricProcess(size_t dim, bopt_params parameters);
+    NonParametricProcess(size_t dim, bopt_params parameters, Dataset& data);
     virtual ~NonParametricProcess();
 
     /** 
      * @param parameters (process name, noise, priors, etc.)
      * @return pointer to the corresponding derivate class (surrogate model)
      */
-    static NonParametricProcess* create(size_t dim, bopt_params parameters);
+    static NonParametricProcess* create(size_t dim, bopt_params parameters,
+					Dataset& data);
 
     /** 
      * \brief Function that returns the prediction of the GP for a query point
     virtual ProbabilityDistribution* prediction(const vectord &query) = 0;
 		 		 
     /** 
-     * \brief Computes the initial surrogate model. 
-     * It also updates the kernel parameters estimation. This
-     * function is hightly inefficient.  Use it only at the begining.
-     * @return error code
+     * \brief Computes the initial surrogate model and updates the
+     * kernel parameters estimation. 
+     *
+     * This function requires to recompute all covariance matrixes,
+     * inverses, etc.  Use it with precaution.
      */
-    int fitSurrogateModel();
-    virtual int updateKernelParameters() = 0;
-    int precomputeSurrogate();
+    virtual void fitSurrogateModel() = 0;
 
     /** 
      * \brief Sequential update of the surrogate model by adding a new point.
      *  inversion of the correlation matrix or adding new rows to the
      *  Cholesky decomposition.  If retrain is true, it recomputes the
      *  kernel hyperparameters and computes a full covariance matrix.
-     * @return error code
      */   
-    int updateSurrogateModel(const vectord &Xnew, double Ynew, bool retrain);
+    virtual void updateSurrogateModel(const vectord &Xnew, 
+				      double Ynew, bool retrain) = 0;
 
 
     // Getters and setters
     void setSamples(const matrixd &x, const vectord &y);
     void addSample(const vectord &x, double y);
-    Dataset* getData() {return &mData;}
-
-    vectord getPointAtMinimum() { return mData.getPointAtMinimum(); };
-    double getValueAtMinimum() { return mData.getValueAtMinimum(); };
-    double getSignalVariance() { return mSigma; };
-
-    /** Sets the kind of learning methodology for kernel hyperparameters */
-    void setLearnType(learning_type l_type) { mLearnType = l_type; };
+    //    void setData(Dataset* data);
+    double getValueAtMinimum();
+    Dataset* getData();
+    double getSignalVariance();
 
   protected:
-    /** 
-     * \brief Precompute some values of the prediction that do not
-     * depends on the query.
-     * @return error code
-     */
-    virtual int precomputePrediction() = 0;
-
-    //TODO: Refactorize from KernelModel
-    int computeCorrMatrix(matrixd& corrMatrix);
-    matrixd computeCorrMatrix();
-    matrixd computeDerivativeCorrMatrix(int dth_index);
-    vectord computeCrossCorrelation(const vectord &query);
-    double computeSelfCorrelation(const vectord& query);
-
-
-  protected:
-    Dataset mData;
-    
-    double mSigma;                                   //!< GP posterior parameters
-    matrixd mL;             ///< Cholesky decomposition of the Correlation matrix
+    Dataset& mData;  
+    double mSigma;                                   //!< Signal variance
     size_t dim_;
-    learning_type mLearnType;
-    KernelModel mKernel;
     MeanModel mMean;
-
-  private:
-    /** 
-     * Computes the Cholesky decomposition of the Correlation (Kernel
-     * or Gram) matrix 
-     * @return error code
-     */
-    int computeCholeskyCorrelation();
-
-    /** 
-     * Adds a new point to the Cholesky decomposition of the Correlation 
-     * matrix.
-     * @return error code
-     */
-    int addNewPointToCholesky(const vectord& correlation,
-			      double selfcorrelation);
-
-
-    const double mRegularizer;   ///< Std of the obs. model (also used as nugget)
   };
 
-  //// Inline methods
-  inline int NonParametricProcess::computeCorrMatrix(matrixd& corrMatrix)
+  //////////////////////////////////////////////////////////////////////////////
+  //// Inlines
+  inline void NonParametricProcess::setSamples(const matrixd &x, const vectord &y)
   {
-    return mKernel.computeCorrMatrix(mData.mX,corrMatrix,mRegularizer);
+    mMean.setPoints(mData.mX);  //Because it expects a vecOfvec instead of a matrixd
   }
 
-  inline  matrixd NonParametricProcess::computeCorrMatrix()
-  {
-    const size_t nSamples = mData.getNSamples();
-    matrixd corrMatrix(nSamples,nSamples);
-    int error = mKernel.computeCorrMatrix(mData.mX,corrMatrix,mRegularizer);
-    return corrMatrix;
-  }
+  inline void NonParametricProcess::addSample(const vectord &x, double y)
+  {  mMean.addNewPoint(x);  };
 
-  inline vectord NonParametricProcess::computeCrossCorrelation(const vectord &query)
-  {
-    return mKernel.computeCrossCorrelation(mData.mX,query);
-  }
+  // inline void NonParametricProcess::setData(Dataset* data)
+  // {mData = data;}
 
-  inline  double NonParametricProcess::computeSelfCorrelation(const vectord& query)
-  {
-    return mKernel.computeSelfCorrelation(query);
-  }
+  inline double NonParametricProcess::getValueAtMinimum() 
+  { return mData.getValueAtMinimum(); };
+
+
+  inline Dataset* NonParametricProcess::getData() 
+  {return &mData;}
+
+  inline double NonParametricProcess::getSignalVariance() 
+  { return mSigma; };
 
   /**@}*/
   

File include/student_t_process_jef.hpp

   class StudentTProcessJeffreys: public HierarchicalGaussianProcess
   {
   public:
-    StudentTProcessJeffreys(size_t dim, bopt_params params);
+    StudentTProcessJeffreys(size_t dim, bopt_params params, Dataset& data);
     virtual ~StudentTProcessJeffreys();
 
     /** 
      */
     double negativeLogLikelihood();
 
-    /** 
-     * \brief Precompute some values of the prediction that do not depends on
-     * the query
-     * @return error code
+    /** Precompute some values of the prediction that do not depends
+     *	on the query
      */
-    int precomputePrediction();
+    void precomputePrediction();
 
   private:
     vectord mWML;           //!< GP ML parameters

File include/student_t_process_nig.hpp

   class StudentTProcessNIG: public HierarchicalGaussianProcess
   {
   public:
-    StudentTProcessNIG(size_t dim, bopt_params params);
+    StudentTProcessNIG(size_t dim, bopt_params params, Dataset& data);
     virtual ~StudentTProcessNIG();
 
     /** 
      */
     double negativeLogLikelihood();
 
-    /** 
-     * \brief Precompute some values of the prediction that do not depends on
-     * the query
-     * @return error code
+    /** Precompute some values of the prediction that do not depends
+     *	on the query
      */
-    int precomputePrediction();
+    void precomputePrediction();
 
   private:
     vectord mWMap;                      //!< GP posterior parameters

File src/bayesoptbase.cpp

     __init__();
   }
 
-  int BayesOptBase::__init__()
+  void BayesOptBase::__init__()
   { 
     // Configure logging
     size_t verbose = mParameters.verbose_level;
 	static_cast<size_t>(ceil(0.1*mParameters.n_iterations));
 
     // Configure Surrogate and Criteria Functions
+    
     setSurrogateModel();
     setCriteria();
 
-    return 0;
+    // mData.reset(new Dataset());
+    // mGP->setData(mData.get());
   } // __init__
 
   BayesOptBase::~BayesOptBase()
   {} // Default destructor
 
-  int BayesOptBase::setSurrogateModel()
+  void BayesOptBase::setSurrogateModel()
   {
-    // Configure Surrogate and Criteria Functions
-    mGP.reset(NonParametricProcess::create(mDims,mParameters));
-    if (mGP == NULL) 
-      {
-	FILE_LOG(logERROR) << "Error setting the surrogate function"; 
-	exit(EXIT_FAILURE);
-      } 
-    return 0;
+    mGP.reset(NonParametricProcess::create(mDims,mParameters,mData));
   } // setSurrogateModel
 
-  int BayesOptBase::setCriteria()
+  void BayesOptBase::setCriteria()
   {
     mCrit.reset(mCFactory.create(mParameters.crit_name,mGP.get()));
-    if (mCrit == NULL)
+    
+    if (mCrit->nParameters() == mParameters.n_crit_params)
       {
-	FILE_LOG(logERROR) << "Error in criterium"; 
-	exit(EXIT_FAILURE);
+	mCrit->setParameters(utils::array2vector(mParameters.crit_params,
+					       mParameters.n_crit_params));
       }
-    
-    if (mCrit->nParameters() != mParameters.n_crit_params)
+    else // If the number of paramerters is different, use default.
       {
 	if (mParameters.n_crit_params != 0)
 	  {
 			       << mParameters.n_crit_params << " instead.";
 	  }
 	FILE_LOG(logINFO) << "Usign default parameters for criteria.";
-	return 0;
       }
-      
-    // If we have the correct number of parameters.
-    vectord critParams = utils::array2vector(mParameters.crit_params,
-					       mParameters.n_crit_params);
-    mCrit->setParameters(critParams);
-    return 0;
   } // setCriteria
 
-  int BayesOptBase::stepOptimization(size_t ii)
+  void BayesOptBase::stepOptimization(size_t ii)
   {
     vectord xNext(mDims);
     nextPoint(xNext); // Find what is the next point.
     bool retrain = ((mParameters.n_iter_relearn > 0) && 
 		    ((ii + 1) % mParameters.n_iter_relearn == 0));
     
+    addSample(xNext,yNext);
     mGP->updateSurrogateModel(xNext,yNext,retrain); 
     plotStepData(ii,xNext,yNext);
-    return 0;
   }
 
   int BayesOptBase::optimize(vectord &bestPoint)
   } // optimize
   
 
-  int BayesOptBase::nextPoint(vectord &Xnext)
+  void BayesOptBase::nextPoint(vectord &Xnext)
   {
-    int error = 0;
-    
     //Epsilon-Greedy exploration (see Bull 2011)
     if ((mParameters.epsilon > 0.0) && (mParameters.epsilon < 1.0))
       {
 	FILE_LOG(logINFO) << "Trying random jump with prob:" << result;
 	if (mParameters.epsilon > result)
 	  {
-	    for (size_t i = 0; i <Xnext.size(); ++i)
+	    for (size_t i = 0; i<Xnext.size(); ++i)
 	      {
 		 Xnext(i) = drawSample();
 	      } 
 	    FILE_LOG(logINFO) << "Epsilon-greedy random query!";
-	    return 0;
 	  }
       }
-
-    if (mCrit->requireComparison())
-      {
-	bool check = false;
-	std::string name;
-
-	mCrit->reset();
-	while (!check)
-	  {
-	    findOptimal(Xnext);
-	    check = mCrit->checkIfBest(Xnext,name,error);
-	  }
-	FILE_LOG(logINFO) << name << " was selected.";
-      }
     else
       {
-	findOptimal(Xnext);
+	// GP-Hedge and related algorithms
+	if (mCrit->requireComparison())
+	  {
+	    bool check = false;
+	    std::string name;
+	    
+	    mCrit->reset();
+	    while (!check)
+	      {
+		findOptimal(Xnext);
+		check = mCrit->checkIfBest(Xnext,name);
+	      }
+	    FILE_LOG(logINFO) << name << " was selected.";
+	  }
+	else  // Standard "Bayesian optimization"
+	  {
+	    findOptimal(Xnext);
+	  }
       }
-    return error;
   }
 
 

File src/bayesoptcont.cpp

       delete mBB;
   } // Default destructor
 
-  int ContinuousModel::initializeOptimization()
+  void ContinuousModel::initializeOptimization()
   {
     if (mBB == NULL)
       {
 	mBB = new utils::BoundingBox<vectord>(lowerBound,upperBound);
       }
     sampleInitialPoints();
-    return 0;
   }
 
   vectord ContinuousModel::getFinalResult()
   {
-    return mBB->unnormalizeVector(mGP->getPointAtMinimum());
+    return mBB->unnormalizeVector(getPointAtMinimum());
   }
 
 
 
 
 
-  int ContinuousModel::plotStepData(size_t iteration, const vectord& xNext,
+  void ContinuousModel::plotStepData(size_t iteration, const vectord& xNext,
 			    double yNext)
   {
-    FILE_LOG(logINFO) << "Iteration: " << iteration+1 << " of " 
-		      << mParameters.n_iterations << " | Total samples: " 
-		      << iteration+1+mParameters.n_init_samples ;
-    FILE_LOG(logINFO) << "Query: " << mBB->unnormalizeVector(xNext); ;
-    FILE_LOG(logINFO) << "Query outcome: " << yNext ;
-    FILE_LOG(logINFO) << "Best query: " 
-		      << mBB->unnormalizeVector(mGP->getPointAtMinimum()); 
-    FILE_LOG(logINFO) << "Best outcome: " <<  mGP->getValueAtMinimum();
-    
-    return 0;
+    if(mParameters.verbose_level >0)
+      { 
+	FILE_LOG(logINFO) << "Iteration: " << iteration+1 << " of " 
+			  << mParameters.n_iterations << " | Total samples: " 
+			  << iteration+1+mParameters.n_init_samples ;
+	FILE_LOG(logINFO) << "Query: " << mBB->unnormalizeVector(xNext); ;
+	FILE_LOG(logINFO) << "Query outcome: " << yNext ;
+	FILE_LOG(logINFO) << "Best query: " 
+			  << mBB->unnormalizeVector(getPointAtMinimum()); 
+	FILE_LOG(logINFO) << "Best outcome: " <<  getValueAtMinimum();
+      }
   } //plotStepData
 
   int ContinuousModel::sampleInitialPoints()
 	yPoints(i) = evaluateSampleInternal(sample);
       }
     
-    mGP->setSamples(xPoints,yPoints);
+    setSamples(xPoints,yPoints);
     mGP->fitSurrogateModel();
     
     // For logging purpose

File src/bayesoptdisc.cpp

   DiscreteModel::~DiscreteModel()
   {} // Default destructor
 
-  int DiscreteModel::initializeOptimization()
+  void DiscreteModel::initializeOptimization()
   {
     mDims = mInputSet[0].size();    
     sampleInitialPoints();
-    return 0;
   }
 
   vectord DiscreteModel::getFinalResult()
   {
-    return mGP->getPointAtMinimum();
+    return getPointAtMinimum();
   }
   
-  int DiscreteModel::plotStepData(size_t iteration, const vectord& xNext,
-				double yNext)
+  void DiscreteModel::plotStepData(size_t iteration, const vectord& xNext,
+				   double yNext)
   {
     if(mParameters.verbose_level >0)
       { 
 			  << iteration+1+mParameters.n_init_samples ;
 	FILE_LOG(logINFO) << "Trying point at: " << xNext ;
 	FILE_LOG(logINFO) << "Current outcome: " << yNext ;
-	FILE_LOG(logINFO) << "Best found at: " << mGP->getPointAtMinimum() ; 
-	FILE_LOG(logINFO) << "Best outcome: " <<  mGP->getValueAtMinimum() ;    
+	FILE_LOG(logINFO) << "Best found at: " << getPointAtMinimum() ; 
+	FILE_LOG(logINFO) << "Best outcome: " <<  getValueAtMinimum() ;    
       }
-    return 0;
   }
 
 
       {
 	xPoint = perms[i];
 	yPoint = evaluateSample(xPoint);
-	mGP->addSample(xPoint,yPoint);
+	addSample(xPoint,yPoint);
       }
 
     mGP->fitSurrogateModel();

File src/criteria_combined.cpp

   };
 
   bool GP_Hedge::checkIfBest(vectord& best, 
-			     std::string& name,
-			     int& error_code)
+			     std::string& name)
   { 
     if (mIndex < mCriteriaList.size())
       {
 	loss_(mIndex) = computeLoss(best);
 	mBestLists.push_back(best);
-	error_code = 0;
 	++mIndex;
 	if (mIndex < mCriteriaList.size())
 	  mCurrentCriterium = mCriteriaList[mIndex];
     else
       {
 	int optIndex = update_hedge();
-	if (optIndex >= 0)
-	  {
-	    name = mCriteriaList[optIndex]->name();
-      	    best = mBestLists[optIndex];
-	    error_code = 0;
-	  }
-	else
-	  {
-	    name = mCriteriaList[0]->name();
-      	    best = mBestLists[0];
-	    FILE_LOG(logERROR) << "Error updating Hedge algorithm. Selecting " << name;
-	    error_code = optIndex; 
-	  }
+	name = mCriteriaList[optIndex]->name();
+	best = mBestLists[optIndex];
 	return true;	
       }
 
 	if (u < cumprob_(i))
 	  return i;
       }
-    return -1;
+    FILE_LOG(logERROR) << "Error updating Hedge algorithm. " 
+		       << "Selecting first criteria by default.";
+    return 0;
   };
 
 

File src/criteria_functors.cpp

 
   /// Factory method for criterion functions.
   // Criteria* CriteriaFactory::create(criterium_name name,
-  // 				    NonParametricProcess* proc)
+  // 				    KernelRegressor* proc)
   // {
   //   Criteria* c_ptr;
   //   std::vector<Criteria*> list;

File src/empiricalbayesprocess.cpp

 
 namespace bayesopt
 {
-  EmpiricalBayesProcess::EmpiricalBayesProcess(size_t dim, bopt_params parameters):
-    NonParametricProcess(dim,parameters)
+  EmpiricalBayesProcess::EmpiricalBayesProcess(size_t dim, bopt_params parameters, 
+					       Dataset& data):
+    KernelRegressor(dim,parameters,data)
   { 
     if (mLearnType == L_BAYES)
       {
   }
 
 
-  int EmpiricalBayesProcess::updateKernelParameters()
+  void EmpiricalBayesProcess::updateKernelParameters()
   {
     if (mLearnType == L_FIXED)
       {
 	FILE_LOG(logDEBUG) << "Fixed hyperparameters. Not learning";
-	return 0;
       }
     else
       {
-	int error = -1;
 	vectord optimalTheta = mKernel.getHyperParameters();
 	
-	FILE_LOG(logDEBUG) << "Computing kernel parameters. Initial: " 
-			   << optimalTheta;
-
+	FILE_LOG(logDEBUG) << "Initial kernel parameters: " << optimalTheta;
 	kOptimizer->run(optimalTheta);
-	error = mKernel.setHyperParameters(optimalTheta);
-
-	if (error)
-	  {
-	    FILE_LOG(logERROR) << "Error updating kernel parameters.";
-	    exit(EXIT_FAILURE);
-	  }   
-
+	mKernel.setHyperParameters(optimalTheta);
 	FILE_LOG(logDEBUG) << "Final kernel parameters: " << optimalTheta;	
-	return error;
       }
   };
 
-  double EmpiricalBayesProcess::evaluateKernelParams(const vectord& query)
-  { 
-    if (mLearnType == L_FIXED)
-      {
-	FILE_LOG(logERROR) << "Fixed hyperparameters should not change.";
-	return -1;
-      }
-    else
-      {
-	int error = mKernel.setHyperParameters(query);
-	if (error) 
-	  {
-	    FILE_LOG(logERROR) << "Problem optimizing kernel parameters."; 
-	    exit(EXIT_FAILURE);	
-	  }
-	return evaluateKernelParams();
-      }
-  };
<