Commits

Ruben Martinez-Cantin  committed 071cbb7

Refactoring namespaces. Improve criteria factory.

  • Participants
  • Parent commits aa506e6

Comments (0)

Files changed (15)

 # comments will behave just like regular Qt-style comments
 # (thus requiring an explicit @brief command for a brief description.)
 
-JAVADOC_AUTOBRIEF      = NO
+JAVADOC_AUTOBRIEF      = YES
 
 # If the QT_AUTOBRIEF tag is set to YES then Doxygen will
 # interpret the first line (until the first dot) of a Qt-style

File app/bo_cont.cpp

   bopt_params par = initialize_parameters_to_default();
 
   par.theta[0] = KERNEL_THETA;
-  par.n_theta = 1;
+  par.theta[1] = KERNEL_THETA;
+  par.s_theta[0] = 1;
+  par.s_theta[1] = 1;
+  par.n_theta = 2;
   par.alpha = PRIOR_ALPHA;
   par.beta = PRIOR_BETA;
   par.delta = PRIOR_DELTA_SQ;
   par.c_name = C_EI;
   par.s_name = S_STUDENT_T_PROCESS_JEFFREYS;
   par.k_name = K_MATERN_ISO3;
+  par.k_s_name = "kSum(kSEISO,kConst)";
   par.m_name = M_ZERO;
+  par.m_s_name = "mConst";
   par.n_iterations = 200;       // Number of iterations
   par.n_init_samples = 50;
   par.verbose_level = 2;

File include/gaussian_process.hpp

 
   private:
     vectord mAlphaV;
-    GaussianDistribution* d_;
+    GaussianDistribution* d_;     ///< Pointer to distribution function
   };
 
   /**@}*/

File include/kernel_atomic.hpp

 #include "kernel_functors.hpp"
 #include "elementwise_ublas.hpp"
 
-/**\addtogroup KernelFunctions */
-//@{
+namespace bayesopt
+{
+  
+  /**\addtogroup KernelFunctions */
+  //@{
 
 
-/** \brief Abstract class for an atomic kernel */
-class AtomicKernel : public Kernel
-{
-public:
-  virtual int init(size_t input_dim)
+  /** \brief Abstract class for an atomic kernel */
+  class AtomicKernel : public Kernel
   {
-    n_inputs = input_dim;
-    return 0;
-  };
-  void setHyperParameters(const vectord &theta) 
-  {
-    assert(theta.size() == n_params);
-    params = theta;
-  };
-  vectord getHyperParameters() {return params;};
-  size_t nHyperParameters() {return n_params;};
+  public:
+    virtual int init(size_t input_dim)
+    {
+      n_inputs = input_dim;
+      return 0;
+    };
+    void setHyperParameters(const vectord &theta) 
+    {
+      assert(theta.size() == n_params);
+      params = theta;
+    };
+    vectord getHyperParameters() {return params;};
+    size_t nHyperParameters() {return n_params;};
 
-  virtual ~AtomicKernel(){};
+    virtual ~AtomicKernel(){};
 
-protected:
-  size_t n_params;
-  vectord params;
-};
-
-//////////////////////////////////////////////////////////////////////////////
-/** \brief Constant kernel.  
- * Combined with sum and product kernel can be used to scale a kernel
- * or add noise.
- */
-
-class ConstKernel: public AtomicKernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = 1;
-    n_inputs = input_dim;
-    return 0;
+  protected:
+    size_t n_params;
+    vectord params;
   };
 
-  double operator()(const vectord &x1, const vectord &x2)
+  //////////////////////////////////////////////////////////////////////////
+  /** \brief Constant kernel.  
+   * Combined with sum and product kernel can be used to scale a kernel
+   * or add noise.
+   */
+
+  class ConstKernel: public AtomicKernel
   {
-    return params(0);
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = 1;
+      n_inputs = input_dim;
+      return 0;
+    };
+
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      return params(0);
+    };
+
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      return 0.0;
+    };
   };
 
-  double gradient(const vectord &x1, const vectord &x2,
-		  size_t component)
+  /** \brief Linear kernel. */
+  class LinKernel: public AtomicKernel
   {
-    return 0.0;
-  };
-};
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = 0;
+      n_inputs = input_dim;
+      return 0;
+    };
 
-/** \brief Linear kernel. */
-class LinKernel: public AtomicKernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = 0;
-    n_inputs = input_dim;
-    return 0;
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      assert(x1.size() == x2.size());
+      return boost::numeric::ublas::inner_prod(x1,x2); 
+    };
+
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      assert(false);
+    };
   };
 
-  double operator()(const vectord &x1, const vectord &x2)
+  /** \brief Linear kernel. */
+  class LinKernelARD: public AtomicKernel
   {
-    assert(x1.size() == x2.size());
-    return boost::numeric::ublas::inner_prod(x1,x2); 
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = input_dim;
+      n_inputs = input_dim;
+      return 0;
+    };
+
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      assert(x1.size() == x2.size());
+      vectord v1 = utils::ublas_elementwise_div(x1, params);
+      vectord v2 = utils::ublas_elementwise_div(x2, params);
+      return boost::numeric::ublas::inner_prod(v1,v2); 
+    };
+
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      assert(false);
+    };
   };
 
-  double gradient(const vectord &x1, const vectord &x2,
-		  size_t component)
+
+  ///////////////////////////////////////////////////////////////////////////
+  /** \brief Abstract class for isotropic kernel functors */
+  class ISOkernel : public AtomicKernel
   {
-    assert(false);
-  };
-};
+  public:
+    virtual ~ISOkernel(){};
 
-/** \brief Linear kernel. */
-class LinKernelARD: public AtomicKernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = input_dim;
-    n_inputs = input_dim;
-    return 0;
+  protected:
+    inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
+    {  
+      assert(n_inputs == x1.size());
+      assert(x1.size() == x2.size());
+      return norm_2(x1-x2)/params(0); 
+    };
   };
 
-  double operator()(const vectord &x1, const vectord &x2)
+
+
+  /** \brief Abstract class for anisotropic kernel functors. 
+   * Typically ARD (Automatic Relevance Determination)
+   */
+  class ARDkernel : public AtomicKernel
   {
-    assert(x1.size() == x2.size());
-    vectord v1 = ublas_elementwise_div(x1, params);
-    vectord v2 = ublas_elementwise_div(x2, params);
-    return boost::numeric::ublas::inner_prod(v1,v2); 
+  public:
+    virtual ~ARDkernel(){};
+
+  protected:
+    inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
+    {
+      assert(n_inputs == x1.size());
+      assert(x1.size() == x2.size());
+      assert(x1.size() == params.size());
+
+      vectord xd = x1-x2;
+      vectord r = utils::ublas_elementwise_div(xd, params);
+      return norm_2(r);
+    };
   };
 
-  double gradient(const vectord &x1, const vectord &x2,
-		  size_t component)
+
+
+  /** \brief Matern isotropic kernel of 1st order */
+  class MaternIso1: public ISOkernel
   {
-    assert(false);
-  };
-};
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = 1;
+      n_inputs = input_dim;
+      return 0;
+    };
 
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      double r = computeWeightedNorm2(x1,x2);
+      return exp(-r);
+    };
 
-/////////////////////////////////////////////////////////////////////////////
-/** \brief Abstract class for isotropic kernel functors */
-class ISOkernel : public AtomicKernel
-{
-public:
-  virtual ~ISOkernel(){};
-
-protected:
-  inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
-  {  
-    assert(n_inputs == x1.size());
-    assert(x1.size() == x2.size());
-    return norm_2(x1-x2)/params(0); 
-  };
-};
-
-
-
-/** \brief Abstract class for anisotropic kernel functors. 
- * Typically ARD (Automatic Relevance Determination)
- */
-class ARDkernel : public AtomicKernel
-{
-public:
-  virtual ~ARDkernel(){};
-
-protected:
-  inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
-  {
-    assert(n_inputs == x1.size());
-    assert(x1.size() == x2.size());
-    assert(x1.size() == params.size());
-
-    vectord xd = x1-x2;
-    vectord r = ublas_elementwise_div(xd, params);
-    return norm_2(r);
-  };
-};
-
-
-
-/** \brief Matern isotropic kernel of 1st order */
-class MaternIso1: public ISOkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = 1;
-    n_inputs = input_dim;
-    return 0;
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      double r = computeWeightedNorm2(x1,x2);
+      return r*exp(-r);
+    };
   };
 
-  double operator()(const vectord &x1, const vectord &x2)
+
+  /** \brief Matern ARD kernel of 1st order */
+  class MaternARD1: public ARDkernel
   {
-    double r = computeWeightedNorm2(x1,x2);
-    return exp(-r);
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = input_dim;
+      n_inputs = input_dim;
+      return 0;
+    };
+
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      double r = computeWeightedNorm2(x1,x2);
+      return exp(-r);
+    };
+
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      assert(false); //TODO:
+    };
   };
 
-  double gradient(const vectord &x1, const vectord &x2,
-                  size_t component)
+
+  /** \brief Matern kernel of 3rd order */
+  class MaternIso3: public ISOkernel
   {
-    double r = computeWeightedNorm2(x1,x2);
-    return r*exp(-r);
-  };
-};
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = 1;
+      n_inputs = input_dim;
+      return 0;
+    };
 
+    double operator()( const vectord &x1, const vectord &x2)
+    {
+      double r = sqrt(3.0) * computeWeightedNorm2(x1,x2);
+      double er = exp(-r);
+      return (1+r)*er;
+    };
 
-/** \brief Matern ARD kernel of 1st order */
-class MaternARD1: public ARDkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = input_dim;
-    n_inputs = input_dim;
-    return 0;
+    double gradient( const vectord &x1, const vectord &x2,
+		     size_t component)
+    {
+      double r = sqrt(3.0) * computeWeightedNorm2(x1,x2);
+      double er = exp(-r);
+      return r*r*er; 
+    };
   };
 
-  double operator()(const vectord &x1, const vectord &x2)
+  /** \brief Matern ARD kernel of 3rd order */
+  class MaternARD3: public ARDkernel
   {
-    double r = computeWeightedNorm2(x1,x2);
-    return exp(-r);
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = input_dim;
+      n_inputs = input_dim;
+      return 0;
+    };
+
+    double operator()( const vectord &x1, const vectord &x2)
+    {
+      double r = sqrt(3.0) * computeWeightedNorm2(x1,x2);
+      double er = exp(-r);
+      return (1+r)*er;
+    };
+
+    double gradient( const vectord &x1, const vectord &x2,
+		     size_t component)
+    {
+      assert(false);
+    };
   };
 
-  double gradient(const vectord &x1, const vectord &x2,
-                  size_t component)
+
+  /** \brief Matern isotropic kernel of 5th order */
+  class MaternIso5: public ISOkernel
   {
-    assert(false); //TODO:
-  };
-};
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = 1;
+      n_inputs = input_dim;
+      return 0;
+    };
 
-
-/** \brief Matern kernel of 3rd order */
-class MaternIso3: public ISOkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = 1;
-    n_inputs = input_dim;
-    return 0;
+    double operator()( const vectord &x1, const vectord &x2)
+    {
+      double r = sqrt(5.0) * computeWeightedNorm2(x1,x2);
+      double er = exp(-r);
+      return (1+r*(1+r/3))*er;
+    };
+    double gradient( const vectord &x1, const vectord &x2,
+		     size_t component)
+    {    
+      double r = sqrt(5.0) * computeWeightedNorm2(x1,x2);
+      double er = exp(-r);
+      return r*(1+r)/3*r*er; 
+    };
   };
 
-  double operator()( const vectord &x1, const vectord &x2)
+
+  /** \brief Matern ARD kernel of 5th order */
+  class MaternARD5: public ARDkernel
   {
-    double r = sqrt(3.0) * computeWeightedNorm2(x1,x2);
-    double er = exp(-r);
-    return (1+r)*er;
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = input_dim;
+      n_inputs = input_dim;
+      return 0;
+    };
+
+    double operator()( const vectord &x1, const vectord &x2)
+    {
+      double r = sqrt(5.0) * computeWeightedNorm2(x1,x2);
+      double er = exp(-r);
+      return (1+r*(1+r/3))*er;
+    };
+    double gradient( const vectord &x1, const vectord &x2,
+		     size_t component)
+    {    
+      assert(false);
+    };
   };
 
-  double gradient( const vectord &x1, const vectord &x2,
-                   size_t component)
+
+
+
+  /** \brief Square exponential (Gaussian) kernel. Isotropic version. */
+  class SEIso: public ISOkernel
   {
-    double r = sqrt(3.0) * computeWeightedNorm2(x1,x2);
-    double er = exp(-r);
-    return r*r*er; 
-  };
-};
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = 1;
+      n_inputs = input_dim;
+      return 0;
+    };
 
-/** \brief Matern ARD kernel of 3rd order */
-class MaternARD3: public ARDkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = input_dim;
-    n_inputs = input_dim;
-    return 0;
+    double operator()( const vectord &x1, const vectord &x2)
+    {
+      double rl = computeWeightedNorm2(x1,x2);
+      double k = rl*rl;
+      return exp(-k/2);
+    };
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      double rl = computeWeightedNorm2(x1,x2);
+      double k = rl*rl;
+      return exp(-k/2)*k;
+    };
   };
 
-  double operator()( const vectord &x1, const vectord &x2)
+
+  /** \brief Square exponential (Gaussian) kernel. ARD version. */
+  class SEArd: public ARDkernel
   {
-    double r = sqrt(3.0) * computeWeightedNorm2(x1,x2);
-    double er = exp(-r);
-    return (1+r)*er;
+  public:
+    int init(size_t input_dim)
+    {
+      n_params = input_dim;
+      n_inputs = input_dim;
+      return 0;
+    };
+
+    double operator()( const vectord &x1, const vectord &x2 )
+    {
+      double rl = computeWeightedNorm2(x1,x2);
+      double k = rl*rl;
+      return exp(-k/2);
+    };
+  
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      double rl = computeWeightedNorm2(x1,x2);
+      double k = rl*rl;
+      double r = (x1(component) - x2(component))/params(component);
+      return exp(-k/2)*r*r;
+    };
   };
 
-  double gradient( const vectord &x1, const vectord &x2,
-                   size_t component)
-  {
-    assert(false);
-  };
-};
+  //@}
 
-
-/** \brief Matern isotropic kernel of 5th order */
-class MaternIso5: public ISOkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = 1;
-    n_inputs = input_dim;
-    return 0;
-  };
-
-  double operator()( const vectord &x1, const vectord &x2)
-  {
-    double r = sqrt(5.0) * computeWeightedNorm2(x1,x2);
-    double er = exp(-r);
-    return (1+r*(1+r/3))*er;
-  };
-  double gradient( const vectord &x1, const vectord &x2,
-                   size_t component)
-  {    
-    double r = sqrt(5.0) * computeWeightedNorm2(x1,x2);
-    double er = exp(-r);
-    return r*(1+r)/3*r*er; 
-  };
-};
-
-
-/** \brief Matern ARD kernel of 5th order */
-class MaternARD5: public ARDkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = input_dim;
-    n_inputs = input_dim;
-    return 0;
-  };
-
-  double operator()( const vectord &x1, const vectord &x2)
-  {
-    double r = sqrt(5.0) * computeWeightedNorm2(x1,x2);
-    double er = exp(-r);
-    return (1+r*(1+r/3))*er;
-  };
-  double gradient( const vectord &x1, const vectord &x2,
-                   size_t component)
-  {    
-    assert(false);
-  };
-};
-
-
-
-
-/** \brief Square exponential (Gaussian) kernel. Isotropic version. */
-class SEIso: public ISOkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = 1;
-    n_inputs = input_dim;
-    return 0;
-  };
-
-  double operator()( const vectord &x1, const vectord &x2)
-  {
-    double rl = computeWeightedNorm2(x1,x2);
-    double k = rl*rl;
-    return exp(-k/2);
-  };
-  double gradient(const vectord &x1, const vectord &x2,
-                  size_t component)
-  {
-    double rl = computeWeightedNorm2(x1,x2);
-    double k = rl*rl;
-    return exp(-k/2)*k;
-  };
-};
-
-
-/** \brief Square exponential (Gaussian) kernel. ARD version. */
-class SEArd: public ARDkernel
-{
-public:
-  int init(size_t input_dim)
-  {
-    n_params = input_dim;
-    n_inputs = input_dim;
-    return 0;
-  };
-
-  double operator()( const vectord &x1, const vectord &x2 )
-  {
-    double rl = computeWeightedNorm2(x1,x2);
-    double k = rl*rl;
-    return exp(-k/2);
-  };
-  
-  double gradient(const vectord &x1, const vectord &x2,
-                  size_t component)
-  {
-    double rl = computeWeightedNorm2(x1,x2);
-    double k = rl*rl;
-    double r = (x1(component) - x2(component))/params(component);
-    return exp(-k/2)*r*r;
-  };
-};
-
-
-//@}
+} //namespace bayesopt
 
 #endif

File include/kernel_combined.hpp

 #include <boost/numeric/ublas/vector_proxy.hpp>
 #include "kernel_functors.hpp"
 
-/**\addtogroup KernelFunctions */
-//@{
+namespace bayesopt
+{
+  
+  /**\addtogroup KernelFunctions */
+  //@{
 
+  /** \brief Abstract class for combined kernel.
+   *  It allows combinations of other kernels (addition, product, etc.)
+   */
+  class CombinedKernel : public Kernel
+  {
+  public:
+    virtual int init(size_t input_dim, Kernel* left, Kernel* right)
+    {
+      n_inputs = input_dim;
+      this->left = left;
+      this->right = right;
+      return 0;
+    };
+    void setHyperParameters(const vectord &theta) 
+    {
+      using boost::numeric::ublas::subrange;
 
-/** \brief Abstract class for combined kernel.
- *  It allows combinations of other kernels (addition, product, etc.)
- */
-class CombinedKernel : public Kernel
-{
-public:
-  virtual int init(size_t input_dim, Kernel* left, Kernel* right)
-  {
-    n_inputs = input_dim;
-    this->left = left;
-    this->right = right;
-    return 0;
-  };
-  void setHyperParameters(const vectord &theta) 
-  {
-    using boost::numeric::ublas::subrange;
+      size_t n_lhs = left->nHyperParameters();
+      size_t n_rhs = right->nHyperParameters();
+      assert(theta.size() == n_lhs + n_rhs);
+      left->setHyperParameters(subrange(theta,0,n_lhs));
+      right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
+    };
 
-    size_t n_lhs = left->nHyperParameters();
-    size_t n_rhs = right->nHyperParameters();
-    assert(theta.size() == n_lhs + n_rhs);
-    left->setHyperParameters(subrange(theta,0,n_lhs));
-    right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
+    vectord getHyperParameters() 
+    {
+      using boost::numeric::ublas::subrange;
+
+      size_t n_lhs = left->nHyperParameters();
+      size_t n_rhs = right->nHyperParameters();
+      vectord par(n_lhs + n_rhs);
+      subrange(par,0,n_lhs) = left->getHyperParameters();
+      subrange(par,n_lhs,n_lhs+n_rhs) = right->getHyperParameters();
+      return par;
+    };
+
+    size_t nHyperParameters() 
+    {
+      size_t n_lhs = left->nHyperParameters();
+      size_t n_rhs = right->nHyperParameters();
+      return n_lhs + n_rhs;
+    };
+
+    virtual ~CombinedKernel()
+    {
+      delete left;
+      delete right;
+    };
+
+  protected:
+    Kernel* left;
+    Kernel* right;
   };
 
-  vectord getHyperParameters() 
+
+  /** \brief Sum of two kernels */
+  class KernelSum: public CombinedKernel
   {
-    using boost::numeric::ublas::subrange;
+  public:
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      return (*left)(x1,x2) + (*right)(x1,x2);
+    };
 
-    size_t n_lhs = left->nHyperParameters();
-    size_t n_rhs = right->nHyperParameters();
-    vectord par(n_lhs + n_rhs);
-    subrange(par,0,n_lhs) = left->getHyperParameters();
-    subrange(par,n_lhs,n_lhs+n_rhs) = right->getHyperParameters();
-    return par;
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      return left->gradient(x1,x2,component) + right->gradient(x1,x2,component);
+    };
   };
 
-  size_t nHyperParameters() 
+
+  /** \brief Product of two kernels */
+  class KernelProd: public CombinedKernel
   {
-    size_t n_lhs = left->nHyperParameters();
-    size_t n_rhs = right->nHyperParameters();
-    return n_lhs + n_rhs;
+  public:
+    double operator()(const vectord &x1, const vectord &x2)
+    {
+      return (*left)(x1,x2) * (*right)(x1,x2);
+    };
+
+    double gradient(const vectord &x1, const vectord &x2,
+		    size_t component)
+    {
+      return 0.0; //TODO: Not implemented
+    };
   };
 
-  virtual ~CombinedKernel()
-  {
-    delete left;
-    delete right;
-  };
+  //@}
 
-protected:
-  Kernel* left;
-  Kernel* right;
-};
-
-
-/** \brief Sum of two kernels */
-class KernelSum: public CombinedKernel
-{
-public:
-  double operator()(const vectord &x1, const vectord &x2)
-  {
-    return (*left)(x1,x2) + (*right)(x1,x2);
-  };
-
-  double gradient(const vectord &x1, const vectord &x2,
-		  size_t component)
-  {
-    return left->gradient(x1,x2,component) + right->gradient(x1,x2,component);
-  };
-};
-
-
-/** \brief Product of two kernels */
-class KernelProd: public CombinedKernel
-{
-public:
-  double operator()(const vectord &x1, const vectord &x2)
-  {
-    return (*left)(x1,x2) * (*right)(x1,x2);
-  };
-
-  double gradient(const vectord &x1, const vectord &x2,
-		  size_t component)
-  {
-    return 0.0; //TODO: Not implemented
-  };
-};
-
-
-
-//@}
+} //namespace bayesopt
 
 #endif

File include/kernel_functors.hpp

 #include "parameters.h"
 #include "specialtypes.hpp"
 
-/**\addtogroup KernelFunctions
- * \brief Set of kernel or covariance functions for the nonparametric
- * processes.
- */
-//@{
+namespace bayesopt
+{
+  
+  /**\addtogroup KernelFunctions
+   * \brief Set of kernel or covariance functions for the nonparametric
+   * processes.
+   */
+  //@{
 
-/** \brief Interface for kernel functors */
-class Kernel
-{
-public:
-  virtual int init(size_t input_dim) {return 0;};
-  virtual int init(size_t input_dim, Kernel* left, Kernel* right) {return 0;};
+  /** \brief Interface for kernel functors */
+  class Kernel
+  {
+  public:
+    virtual int init(size_t input_dim) {return 0;};
+    virtual int init(size_t input_dim, Kernel* left, Kernel* right) {return 0;};
 
-  virtual void setHyperParameters(const vectord &theta) = 0;
-  virtual vectord getHyperParameters() = 0;
-  virtual size_t nHyperParameters() = 0;
+    virtual void setHyperParameters(const vectord &theta) = 0;
+    virtual vectord getHyperParameters() = 0;
+    virtual size_t nHyperParameters() = 0;
 
-  virtual double operator()( const vectord &x1, const vectord &x2 ) = 0;
-  virtual double gradient( const vectord &x1, const vectord &x2,
-			   size_t component ) = 0;
-  virtual ~Kernel(){};
+    virtual double operator()( const vectord &x1, const vectord &x2 ) = 0;
+    virtual double gradient( const vectord &x1, const vectord &x2,
+			     size_t component ) = 0;
+    virtual ~Kernel(){};
 
-protected:
-  size_t n_inputs;
-};
+  protected:
+    size_t n_inputs;
+  };
 
-template <typename KernelType> Kernel * create_func()
-{
-  return new KernelType();
-}
+  template <typename KernelType> Kernel * create_func()
+  {
+    return new KernelType();
+  }
 
 
-/** 
- * \brief Factory model for kernel functions
- * This factory is based on the libgp library by Manuel Blum
- *      https://bitbucket.org/mblum/libgp
- * which follows the squeme of GPML by Rasmussen and Nickisch
- *     http://www.gaussianprocess.org/gpml/code/matlab/doc/
- */
-class KernelFactory
-{
-public:
-  KernelFactory ();
-  virtual ~KernelFactory () {};
+  /** 
+   * \brief Factory model for kernel functions
+   * This factory is based on the libgp library by Manuel Blum
+   *      https://bitbucket.org/mblum/libgp
+   * which follows the squeme of GPML by Rasmussen and Nickisch
+   *     http://www.gaussianprocess.org/gpml/code/matlab/doc/
+   */
+  class KernelFactory
+  {
+  public:
+    KernelFactory ();
+    virtual ~KernelFactory () {};
   
-  Kernel* create(kernel_name name, size_t input_dim);
-  Kernel* create(std::string name, size_t input_dim);
+    Kernel* create(kernel_name name, size_t input_dim);
+    Kernel* create(std::string name, size_t input_dim);
     
-private:
-  typedef Kernel* (*create_func_definition)();
-  std::map<std::string , KernelFactory::create_func_definition> registry;
-};
+  private:
+    typedef Kernel* (*create_func_definition)();
+    std::map<std::string , KernelFactory::create_func_definition> registry;
+  };
 
+  //@}
 
-//@}
+} //namespace bayesopt
+
 
 #endif

File include/parameters.h

 
   /** \brief Configuration parameters */
   typedef struct {
+
     size_t n_iterations;         /**< Maximum BayesOpt evaluations (budget) */
     size_t n_inner_iterations;   /**< Maximum inner optimizer evaluations */
     size_t n_init_samples;       /**< Number of samples before optimization */
-    size_t verbose_level;        /**< Verbose level if <3 stderr, else log file*/
-    char* log_filename;          /**< Filename of the log file (if applicable) */
-    double theta[128];           /**< Kernel hyperparameters (mean) */
-    double s_theta[128];         /**< Kernel hyperparameters (std) */
+
+    size_t verbose_level;        /**< 1-Error,2-Warning,3-Info. 4-6 log file*/
+    char* log_filename;          /**< Log file path (if applicable) */
+
+    double theta[128];           /**< Kernel hyperparameters prior (mean) */
+    double s_theta[128];         /**< Kernel hyperparameters prior (std) */
     size_t n_theta;              /**< Number of kernel hyperparameters */
+
     double mu[128];              /**< Mean function hyperparameters */
-    size_t n_mu;                 /**< Number of mean function hyperparameters */
-    double alpha, beta, delta;   /**< Inv-Gamma-Normal hyperparameters */
-    double noise;                /**< Observation noise */
+    size_t n_mu;                 /**< Number of mean funct. hyperparameters */
+
+    double alpha;                /**< Inverse Gamma prior for signal var */
+    double beta;                 /**< Inverse Gamma prior for signal var*/
+    double delta;                /**< Normal prior for mean hyperparameters */
+    double noise;                /**< Observation noise (and nugget) */
+
     surrogate_name s_name;       /**< Name of the surrogate function */
-    kernel_name k_name;          /**< Name of the kernel function */
+    kernel_name k_name;          /**< Name of the kernel funct. -DEPRECATED-*/
     char* k_s_name;              /**< Name of the kernel function */
     criterium_name c_name;       /**< Name of the criterion */
-    mean_name m_name;            /**< Name of the mean function */
+    mean_name m_name;            /**< Name of the mean funct. -DEPRECATED-*/
     char* m_s_name;              /**< Name of the mean function */
+
   } bopt_params;
 
 

File src/criteria_functors.cpp

       }
   };
 
+
+  // Criteria* CriteriaFactory::create(std::string name,
+  // 				    NonParametricProcess* proc)
+  // {
+  //   Criteria *cFunc;
+  //   size_t pos = name.find("(");         // position of "(" in str
+  //   if (pos = std::string::npos)              // Atomic criterion
+  //     {
+  // 	std::map<std::string,CriteriaFactory::create_func_definition>::iterator it = registry.find(name);
+  //     }
+
+  //   if (it == registry.end()) 
+  //     {
+  // 	FILE_LOG(logERROR) << "Error: Fatal error while parsing "
+  // 			   << "criteria function: " << os.str() 
+  // 			   << " not found" << std::endl;
+  // 	return NULL;
+  //     } 
+  //   kFunc = registry.find(name)->second();
+    
+  // std::string str3 = str.substr (pos);     // get from "live" to the end
+  // }
+
 } //namespace bayesopt

File src/gaussian_process.cpp

 
     vectord alpha(mGPY-mMeanV);
     inplace_solve(L,alpha,ublas::lower_tag());
-    double loglik = .5*ublas::inner_prod(alpha,alpha) + log_trace(L);
+    double loglik = .5*ublas::inner_prod(alpha,alpha) + utils::log_trace(L);
     return loglik;
   }
 

File src/gaussian_process_ign.cpp

     inplace_solve(L,alphY,lower_tag());
 
     double lik1 = inner_prod(alphY,alphY) / (2*sigma); 
-    double lik2 = log_trace(L) + 0.5*n*log(sigma) + n*0.91893853320467; //log(2*pi)/2
+    double lik2 = utils::log_trace(L) + 0.5*n*log(sigma) + n*0.91893853320467; //log(2*pi)/2
   
     //TODO: This must be wrong.
     size_t index = 0;

File src/kernel_functors.cpp

 #include "kernel_atomic.hpp"
 #include "kernel_combined.hpp"
 
+namespace bayesopt
+{
 
-KernelFactory::KernelFactory()
-{
-  registry["kConst"] = & create_func<ConstKernel>;
-  registry["kLinear"] = & create_func<LinKernel>;
-  registry["kLinearARD"] = & create_func<LinKernelARD>;
+  KernelFactory::KernelFactory()
+  {
+    registry["kConst"] = & create_func<ConstKernel>;
+    registry["kLinear"] = & create_func<LinKernel>;
+    registry["kLinearARD"] = & create_func<LinKernelARD>;
 
-  registry["kMaternISO1"] = & create_func<MaternIso1>;
-  registry["kMaternISO3"] = & create_func<MaternIso3>;
-  registry["kMaternISO5"] = & create_func<MaternIso5>;
-  registry["kMaternARD1"] = & create_func<MaternARD1>;
-  registry["kMaternARD3"] = & create_func<MaternARD3>;
-  registry["kMaternARD5"] = & create_func<MaternARD5>;
+    registry["kMaternISO1"] = & create_func<MaternIso1>;
+    registry["kMaternISO3"] = & create_func<MaternIso3>;
+    registry["kMaternISO5"] = & create_func<MaternIso5>;
+    registry["kMaternARD1"] = & create_func<MaternARD1>;
+    registry["kMaternARD3"] = & create_func<MaternARD3>;
+    registry["kMaternARD5"] = & create_func<MaternARD5>;
   
-  registry["kSEARD"] = & create_func<SEArd>;
-  registry["kSEISO"] = & create_func<SEIso>;
+    registry["kSEARD"] = & create_func<SEArd>;
+    registry["kSEISO"] = & create_func<SEIso>;
 
-  registry["kSum"] = & create_func<KernelSum>;
-  registry["kProd"] = & create_func<KernelProd>;
-}
+    registry["kSum"] = & create_func<KernelSum>;
+    registry["kProd"] = & create_func<KernelProd>;
+  }
 
 
-/** \brief Factory method for kernels. */
-Kernel* KernelFactory::create(kernel_name name, size_t input_dim)
-{
-  Kernel* k_ptr;
+  /** \brief Factory method for kernels. */
+  Kernel* KernelFactory::create(kernel_name name, size_t input_dim)
+  {
+    Kernel* k_ptr;
 
-  switch(name)
-    {
-    case K_MATERN_ISO1: k_ptr = new MaternIso1(); break;
-    case K_MATERN_ISO3: k_ptr = new MaternIso3(); break;
-    case K_MATERN_ISO5: k_ptr = new MaternIso5(); break;
-    case K_SE_ISO: k_ptr = new SEIso(); break;
-    case K_SE_ARD: k_ptr = new SEArd(); break;
-    default:
-      FILE_LOG(logERROR) << "Error: kernel function not supported.";
-      return NULL;
-    }
+    switch(name)
+      {
+      case K_MATERN_ISO1: k_ptr = new MaternIso1(); break;
+      case K_MATERN_ISO3: k_ptr = new MaternIso3(); break;
+      case K_MATERN_ISO5: k_ptr = new MaternIso5(); break;
+      case K_SE_ISO: k_ptr = new SEIso(); break;
+      case K_SE_ARD: k_ptr = new SEArd(); break;
+      default:
+	FILE_LOG(logERROR) << "Error: kernel function not supported.";
+	return NULL;
+      }
 
-  k_ptr->init(input_dim);
-  return k_ptr;
-};
+    k_ptr->init(input_dim);
+    return k_ptr;
+  };
 
-/** 
- * \brief Factory model for kernel functions
- * This function is based on the libgp library by Manuel Blum
- *      https://bitbucket.org/mblum/libgp
- * which follows the squeme of GPML by Rasmussen and Nickisch
- *     http://www.gaussianprocess.org/gpml/code/matlab/doc/
- * @param name string with the kernel structure
- * @param imput_dim number of input dimensions
- * @return kernel pointer
- */
-Kernel* KernelFactory::create(std::string name, size_t input_dim)
-{
-  Kernel *kFunc;
-  std::stringstream is(name);
-  std::stringstream os(std::stringstream::out);
-  std::stringstream os1(std::stringstream::out);
-  std::stringstream os2(std::stringstream::out);
-  char c;
-  int i = 0, j = 0;
-  while (is >> c) 
-    {
-      if (c == '(') i++;
-      else if (c == ')') i--;
-      else if (c == ',') j++;
-      else 
-	{
-	  if (i == 0) os << c;
-	  else if (j == 0) os1 << c;
-	  else os2 << c;
-	}
-    }
-  std::map<std::string,KernelFactory::create_func_definition>::iterator it = registry.find(os.str());
-  if (it == registry.end()) 
-    {
-      FILE_LOG(logERROR) << "Error: Fatal error while parsing kernel function: " << os.str() << " not found" << std::endl;
-      return NULL;
-    } 
-  kFunc = registry.find(os.str())->second();
-  if (os1.str().length() == 0 && os2.str().length() == 0) 
-    {
-      kFunc->init(input_dim);
-    } 
-  else 
-    {
-      kFunc->init(input_dim, create(os1.str(),input_dim), create(os2.str(),input_dim));
-    }
-  return kFunc;
+  /** 
+   * \brief Factory model for kernel functions
+   * This function is based on the libgp library by Manuel Blum
+   *      https://bitbucket.org/mblum/libgp
+   * which follows the squeme of GPML by Rasmussen and Nickisch
+   *     http://www.gaussianprocess.org/gpml/code/matlab/doc/
+   * @param name string with the kernel structure
+   * @param imput_dim number of input dimensions
+   * @return kernel pointer
+   */
+  Kernel* KernelFactory::create(std::string name, size_t input_dim)
+  {
+    Kernel *kFunc;
+    std::stringstream is(name);
+    std::stringstream os(std::stringstream::out);
+    std::stringstream os1(std::stringstream::out);
+    std::stringstream os2(std::stringstream::out);
+    char c;
+    int i = 0, j = 0;
+    while (is >> c) 
+      {
+	if (c == '(') i++;
+	else if (c == ')') i--;
+	else if ((i == 1) && (c == ',')) j++;
+	else 
+	  {
+	    if (i == 0) os << c;
+	    else if (j == 0) os1 << c;
+	    else os2 << c;
+	  }
+      }
 
-};
+    std::map<std::string,KernelFactory::create_func_definition>::iterator it = registry.find(os.str());
+    if (it == registry.end()) 
+      {
+	FILE_LOG(logERROR) << "Error: Fatal error while parsing "
+			   << "kernel function: " << os.str() 
+			   << " not found" << std::endl;
+	return NULL;
+      } 
+    kFunc = it->second();
+    if (os1.str().length() == 0 && os2.str().length() == 0) 
+      {
+	kFunc->init(input_dim);
+      } 
+    else 
+      {
+	kFunc->init(input_dim, create(os1.str(),input_dim), create(os2.str(),input_dim));
+      }
+    return kFunc;
 
+  };
+
+} //namespace bayesopt

File src/student_t_process.cpp

     // Obtained from SEQUENTIAL DESIGN OF COMPUTER EXPERIMENTS
     // TO MINIMIZE INTEGRATED RESPONSE FUNCTIONS
     // http://www3.stat.sinica.edu.tw/statistica/oldpdf/A10n46.pdf
-    double negloglik = log_trace(L) + 0.5*( (n-1)*log(sigma) + log(eta) );
+    double negloglik = utils::log_trace(L) + 0.5*( (n-1)*log(sigma) + log(eta) );
 
     return negloglik;
   }

File utils/elementwise_ublas.hpp

 #define  _ELEMENTWISE_UBLAS_HPP_
 
 // BOOST Libraries
+#include <algorithm>
 #include <boost/numeric/ublas/vector.hpp>
-#include <algorithm>
 
-/** 
- * Computes the elementwise product of two vectors or matrices.
- * 
- * c_i = a_i * b_i
- *
- */
-template <class v1, class v2>
-v1 ublas_elementwise_prod(const v1& a, const v2& b)
+namespace bayesopt
 {
-  typedef typename v1::value_type D;
-  v1 c(a.size());
-  std::transform(a.begin(),a.end(),b.begin(),c.begin(),std::multiplies<D>());
-  return c;
-}
+  namespace utils
+  {
 
-/** 
- * Computes the elementwise division of two vectors or matrices.
- * 
- * c_i = a_i / b_i
- *
- */
-template <class v1, class v2>
-v1 ublas_elementwise_div(const v1& a, const v2& b)
-{
-  typedef typename v1::value_type D;
-  v1 c(a.size());
-  std::transform(a.begin(),a.end(),b.begin(),c.begin(),std::divides<D>());
-  return c;
-}
+    /** 
+     * Computes the elementwise product of two vectors or matrices.
+     *             c_i = a_i * b_i
+     */
+    template <class v1, class v2>
+    v1 ublas_elementwise_prod(const v1& a, const v2& b)
+    {
+      typedef typename v1::value_type D;
+      v1 c(a.size());
+      std::transform(a.begin(),a.end(),b.begin(),c.begin(),std::multiplies<D>());
+      return c;
+    }
+
+    /** 
+     * Computes the elementwise division of two vectors or matrices.
+     *            c_i = a_i / b_i
+     */
+    template <class v1, class v2>
+    v1 ublas_elementwise_div(const v1& a, const v2& b)
+    {
+      typedef typename v1::value_type D;
+      v1 c(a.size());
+      std::transform(a.begin(),a.end(),b.begin(),c.begin(),std::divides<D>());
+      return c;
+    }
+
+  } //namespace utils
+} //namespace bayesopt
 
 #endif

File utils/log.hpp

 #include <string>
 #include <cstdio>
 
+
 inline std::string NowTime();
 
 enum TLogLevel {logERROR, logWARNING, logINFO, logDEBUG, 

File utils/trace_ublas.hpp

 #include <boost/numeric/ublas/matrix.hpp>
 #include <boost/numeric/ublas/matrix_proxy.hpp>
 
-template<class E>
-typename E::value_type trace(const E &A)
+namespace bayesopt
 {
-  const size_t n = std::min(A.size1(),A.size2());
-  typename E::value_type sum = 0;
-  for (size_t i=0; i<n; ++i)
-    sum += A(i,i);
+  namespace utils
+  {
 
-  return sum; 
+    template<class E>
+    typename E::value_type trace(const E &A)
+    {
+      const size_t n = std::min(A.size1(),A.size2());
+      typename E::value_type sum = 0;
+      for (size_t i=0; i<n; ++i)
+	sum += A(i,i);
+
+      return sum; 
+    }
+
+    template<class E>
+    typename E::value_type log_trace(const E &A)
+    {
+      const size_t n = std::min(A.size1(),A.size2());
+      typename E::value_type sum = 0;
+      for (size_t i=0; i<n; ++i)
+	sum += log(A(i,i));
+
+      return sum; 
+    }
+
+
+    template<class E1, class E2>
+    typename E1::value_type trace_prod(const E1 & A, const E2 & B )
+    {
+      namespace ublas = boost::numeric::ublas;
+
+      const size_t n = std::min(A.size1(),B.size2());
+      typename E1::value_type sum = 0;
+      for (size_t i=0; i<n; ++i)
+	sum += ublas::inner_prod(ublas::row(A,i),ublas::column(B,i));
+
+      return sum; 
+    }
+    
+  }
 }
 
-template<class E>
-typename E::value_type log_trace(const E &A)
-{
-  const size_t n = std::min(A.size1(),A.size2());
-  typename E::value_type sum = 0;
-  for (size_t i=0; i<n; ++i)
-    sum += log(A(i,i));
-
-  return sum; 
-}
-
-
-template<class E1, class E2>
-typename E1::value_type trace_prod(const E1 & A, const E2 & B )
-{
-  namespace ublas = boost::numeric::ublas;
-
-  const size_t n = std::min(A.size1(),B.size2());
-  typename E1::value_type sum = 0;
-  for (size_t i=0; i<n; ++i)
-    sum += ublas::inner_prod(ublas::row(A,i),ublas::column(B,i));
-
-  return sum; 
-}
-
 
 
 #endif