Commits

Ruben Martinez-Cantin committed 53d686d

Student T Process and polymorphic interface working smoothly

  • Participants
  • Parent commits d0e8f98

Comments (0)

Files changed (18)

   ./src/inneroptimization.cpp
   ./src/gaussprocess.cpp
   ./src/basicgaussprocess.cpp
+  ./src/studenttprocess.cpp
   ./wrappers/krigwpr.cpp
   )
 
 #include "krigwpr.h"
 #include "krigging.hpp"
 
+#include "gaussprocess.hpp"
+#include "basicgaussprocess.hpp"
+#include "studenttprocess.hpp"
 
 double testFunction(unsigned int n, double *x,
 		    double *gradient, /* NULL if not needed */
 {
  public:
 
-  TestEGO( gp_params params, size_t nIter):
-    SKO(params,nIter) {}
+  TestEGO( gp_params params, size_t nIter, 
+	   NonParametricProcess* gp):
+    SKO(params,nIter, gp) {}
 
   double evaluateSample( const vectord &Xi ) 
   {
 
   int nIterations = 600;
 
+  criterium_name c_name = c_ei;
   gp_params par;
 
   par.theta = KERNEL_THETA;
   par.noise = DEF_REGULARIZER;
   
   std::cout << "Running C++ interface" << std::endl;
-  TestEGO gp(par,nIterations);
+  NonParametricProcess* gp = new StudentTProcess(par.theta,par.noise);
+  TestEGO gp_opt(par,nIterations,gp);
+
   start = clock();
-  gp.optimize(result);
+  gp_opt.setCriteria(c_name);
+  gp_opt.optimize(result);
   end = clock();
   diff = (double)(end-start) / (double)CLOCKS_PER_SEC;
 
   }
   start = clock();
 
-  krigging_optimization(n,&testFunction,NULL,l,u,x,fmin,nIterations,par,1,0);
+  krigging_optimization(n,&testFunction,NULL,l,u,x,fmin,
+			nIterations,par,c_name,s_studentTProcess);
 
   end = clock();
   diff2 = (double)(end-start) / (double)CLOCKS_PER_SEC;
     return total
 
 params = {"theta": 0.11, "p": 1.6, "alpha": 1.0, "beta": 0.1, "delta": 10.0, "noise": 0.001}
+crit = "ei"
+surr = "gp"
 
 n = 6
 lb = np.zeros((n,))
 out = testfunc(x)
 print out
 
-mvalue, x_out, error = kp.optimize(testfunc, n, lb, ub, x, 300, params, 1, 0)
+mvalue, x_out, error = kp.optimize(testfunc, n, lb, ub, x, 300, params, crit, surr)
 print x_out

include/basicgaussprocess.hpp

 #include "nonparametricprocess.hpp"
 #include "kernels.hpp"
 #include "meanfuncs.hpp"
-#include "inneroptimization.hpp"
  
 /** \addtogroup BayesOptimization */
 /*@{*/
 
 
-class BasicGaussianProcess: public NonParametricProcess, 
-			    public InnerOptimization 
+class BasicGaussianProcess: public NonParametricProcess
 {
 public:
   BasicGaussianProcess( double theta = KERNEL_THETA, 
 			       size_t index = 1);			 
 			 
 
-  /** Computes the GP based on mGPXX
-   *  This function is hightly inefficient O(N^3). Use it only at 
-   *  the begining.
-   *
-   *  It also computes the kernel hyperparameters.
-   */
-  int fitGP();
-
-
-  /** Add new point efficiently using Matrix Decomposition Lemma
-   *  for the inversion of the correlation matrix. Maybe it is faster
-   *  to just construct and invert a new matrix each time.
-   */   
-  int addNewPointToGP( const vectord &Xnew,
-		       double Ynew);
-
-  inline double getTheta()
-  { return mTheta; }
-
-  inline void setTheta( double theta )
-  { mTheta = theta; }
-
-  virtual double innerEvaluate(const vectord& query, 
-			      vectord& grad)
-  { 
-    setTheta(query(0));
-    return negativeLogLikelihood(grad(0),1);
-  }
-
 protected:
   inline double correlationFunction( const vectord &x1,const vectord &x2,
 				     size_t param_index = 0 )
   {return 1;};
 
 
-protected:
-  double mTheta;                      // Kernel parameters
-  const double mRegularizer;  // GP prior parameters (Normal)
-
 };
 
 

include/criteria.hpp

 #include <boost/math/special_functions/factorials.hpp>
 #include <boost/math/distributions/normal.hpp> // for normal_distribution
 
+#include "ctypes.h"
 #include "specialtypes.hpp"
 #include "randgen.hpp"
 
 using boost::math::factorial;
 using boost::math::normal; // typedef provides default type is double.
 
-enum criterium_name{
-  expectedImprovement,
-  lcb,
-  poi,
-  gp_hedge,
-  greedyAOptimality,
-  expectedReturn
-};
-
 
 class Criteria
 {
   Criteria():
     mtRandom(100u)
   {
-    criterium = expectedImprovement;
+    criterium = c_ei;
     resetAnnealValues();
     resetHedgeValues();
     eta = 100;
 
     switch (criterium)
       {
-      case expectedImprovement: return negativeExpectedImprovement(yPred,sPred,yMin);
-      case lcb: return lowerConfidenceBound(yPred,sPred);
-      case poi: return negativeProbabilityOfImprovement(yPred,sPred,yMin);
-      case greedyAOptimality: return sPred;
-      case expectedReturn: return yPred;
+      case c_ei: return negativeExpectedImprovement(yPred,sPred,yMin);
+      case c_lcb: return lowerConfidenceBound(yPred,sPred);
+      case c_poi: return negativeProbabilityOfImprovement(yPred,sPred,yMin);
+      case c_greedyAOptimality: return sPred;
+      case c_expectedReturn: return yPred;
       default: std::cout << "Error in criterium" << std::endl; return 0.0;
       }
 
     double u = sample();
 
     if (u < p_ei)
-      return expectedImprovement;
+      return c_ei;
     else if (u < p_lcb+p_ei)
-      return lcb;
+      return c_lcb;
     else
-    return poi;
+    return c_poi;
   }
 
 protected:
+/*
+-----------------------------------------------------------------------------
+   This file is part of BayesOptimization, an efficient C++ library for 
+   Bayesian optimization.
+
+   Copyright (C) 2011 Ruben Martinez-Cantin <rmcantin@unizar.es>
+ 
+   BayesOptimization is free software: you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation, either version 3 of the License, or
+   (at your option) any later version.
+
+   BayesOptimization is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with BayesOptimization.  If not, see <http://www.gnu.org/licenses/>.
+-----------------------------------------------------------------------------
+*/
+
+#ifndef __CTYPES_H__
+#define __CTYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif 
+
+  typedef struct {
+    double theta;
+    double alpha;
+    double beta;
+    double delta;
+    double noise;
+  } gp_params;
+
+  typedef enum {  
+    c_ei,
+    c_lcb,
+    c_poi,
+    c_gp_hedge,
+    c_greedyAOptimality,
+    c_expectedReturn
+  } criterium_name;
+
+  typedef enum {  
+    s_gaussianProcess,
+    s_gaussianProcessHyperPriors,
+    s_studentTProcess
+  } surrogate_name;
+
+#ifdef __cplusplus
+}
+#endif 
+
+
+#endif

include/gaussprocess.hpp

 #include "nonparametricprocess.hpp"
 #include "kernels.hpp"
 #include "meanfuncs.hpp"
-#include "inneroptimization.hpp"	
  
 /** \addtogroup BayesOptimization */
 /*@{*/
 
 
-class GaussianProcess: public NonParametricProcess, 
-		       public InnerOptimization
+class GaussianProcess: public NonParametricProcess
 {
 public:
   GaussianProcess( double theta = KERNEL_THETA,
 		   double alpha = PRIOR_ALPHA, 
 		   double beta  = PRIOR_BETA, 
 		   double delta = PRIOR_DELTA_SQ );
+
   virtual ~GaussianProcess();
 
   /** 
 			       size_t index = 1);			 
 			 		 
 
-  /** Computes the GP based on mGPXX
-   *  This function is hightly inefficient O(N^3). Use it only at 
-   *  the begining
-   */
-  int fitGP();
-
-
-
-  /** Add new point efficiently using Matrix Decomposition Lemma
-   *  for the inversion of the correlation matrix. Maybe it is faster
-   *  to just construct and invert a new matrix each time.
-   */   
-  int addNewPointToGP( const vectord &Xnew,
-		       double Ynew);
-
-  inline double getTheta()
-  { return mTheta; }
-
-  inline void setTheta( double theta )
-  { mTheta = theta; }
-
-  virtual double innerEvaluate(const vectord& query, 
-			      vectord& grad)
-  { 
-    setTheta(query(0));
-    return negativeLogLikelihood(grad(0),1);
-  }
-
 protected:
   inline double correlationFunction( const vectord &x1,const vectord &x2,
 				     size_t param_index = 0 )
 
 
 protected:
-  double mTheta, mP;                  // Kernel parameters
   const double mAlpha, mBeta;         // GP prior parameters (Inv-Gamma)
-  const double mDelta2,mRegularizer;  // GP prior parameters (Normal)
+  const double mDelta2;               // GP prior parameters (Normal)
 
   double mMu, mSig;                   // GP posterior parameters
 

include/krigging.hpp

 #include "elementwiseUblas.hpp"
 
 #include "inneroptimization.hpp"
-#include "gaussprocess.hpp"
+#include "nonparametricprocess.hpp"
 
-#include "krigwpr.h"
+#include "ctypes.h"
 #include "criteria.hpp"
 
 /** \addtogroup BayesOptimization */
    */
   SKO( double theta = KERNEL_THETA, double noise = DEF_REGULARIZER,
        size_t nIter = MAX_ITERATIONS, double alpha = PRIOR_ALPHA, 
-       double beta = PRIOR_BETA, double delta = PRIOR_DELTA_SQ); 
+       double beta = PRIOR_BETA, double delta = PRIOR_DELTA_SQ,
+       NonParametricProcess* gp = NULL); 
 
   /** 
    * Constructor
    *     noise        observation noise
    * @param nIter        number of iterations before stopping 
    */
-  SKO( gp_params params,
-       size_t nIter); 
+  SKO( gp_params params, size_t nIter = MAX_ITERATIONS,
+       NonParametricProcess* gp = NULL); 
 	
   /** 
    * Default destructor
   { return 0.0; };
 
   /** 
+   * Chooses which criterium to optimize in the inner loop.
+   * 
+   * @param c criterium name
+   */
+  void setCriteria (criterium_name c)
+  {crit_name = c;}
+  
+  /** 
    * This function checks if the query is valid or not. It can be used 
    * to introduce arbitrary constrains. Since the Gaussian process 
    * assumes smoothness, constrains are managed by DIRECT, being highly
 protected:
 
   // Member variables
-  GaussianProcess mGP;
+  NonParametricProcess* mGP;
   Criteria crit;
   criterium_name crit_name;
   

include/nonparametricprocess.hpp

 
 #include "specialtypes.hpp"
 #include "cholesky.hpp"
+#include "inneroptimization.hpp"	
 
 
-class NonParametricProcess
+class NonParametricProcess: public InnerOptimization
 {
 public:
-  NonParametricProcess()
+  NonParametricProcess(double theta = KERNEL_THETA,
+		       double noise = DEF_REGULARIZER):
+    InnerOptimization(),  mTheta(theta), mRegularizer(noise)
   { mMinIndex = 0; mMaxIndex = 0; }
   
   virtual ~NonParametricProcess(){ }
    */	
   virtual int prediction(const vectord &query,
 			 double& yPred, double& sPred)
-  {return 1;}
+  {return 1;}  
 
   /** 
+   * Computes the negative log likelihood and its gradient of the data.
+   * 
+   * @param grad gradient of the negative Log Likelihood
+   * @param param value of the param to be optimized
+   * 
+   * @return value negative log likelihood
+   */
+  virtual double negativeLogLikelihood(double& grad,
+				       size_t index = 1)
+  {return 0.0;}
+			 		 
+  /** 
    *  Computes the GP based on mGPXX
    *  This function is hightly inefficient O(N^3). Use it only at 
    *  the begining
    * 
    * @return error code
    */
-  virtual int fitGP()
-  { return 1;}
+  int fitGP()
+  {
+    size_t nSamples = mGPXX.size();
+    for (size_t ii=0; ii<nSamples; ii++)
+      checkBoundsY(ii);
+  
+    vectord th = svectord(1,mTheta);  
 
+    std::cout << "Initial theta: " << mTheta << " "<<th.size()<< std::endl;
+    innerOptimize(th);
+    setTheta(th(0));
+    std::cout << "Final theta: " << mTheta << std::endl;
+
+    int error = computeInverseCorrMatrix(mRegularizer);
+
+    if (error < 0)
+      return error;
+
+    return precomputeGPParams();
+  } // fitGP
+
+  
   /** 
    *  Add new point efficiently using Matrix Decomposition Lemma
    *  for the inversion of the correlation matrix. Maybe it is faster
    * 
    * @return error code
    */   
-  virtual int addNewPointToGP( const vectord &Xnew,
-			       double Ynew)
-  {return 1;}
+  int addNewPointToGP( const vectord &Xnew,
+		       double Ynew)
+  {
+    size_t nSamples = mGPXX.size();
+    size_t XDim = mGPXX[1].size();
+  
+    vectord Li(nSamples);
+    vectord wInvR(nSamples);
+    double wInvRw;
+    double selfCorrelation, Ni;
+  
+    if (XDim != Xnew.size())
+      {
+	std::cout << "Dimensional Error" << std::endl;
+	return -1;
+      }
+    
+    vectord correlationNewValue = computeCrossCorrelation(Xnew);
+  
+    selfCorrelation = correlationFunction(Xnew, Xnew) + mRegularizer;
+  
+    noalias(wInvR) = prod(correlationNewValue,mInvR);
+    wInvRw = inner_prod(wInvR,correlationNewValue);
+    Ni = 1/(selfCorrelation + wInvRw);
+    noalias(Li) = -Ni * wInvR;
+    mInvR += outer_prod(Li,Li) / Ni;
+  
+    //TODO: There must be a better way to do this.
+    mInvR.resize(nSamples+1,nSamples+1);
+  
+    Li.resize(nSamples+1);
+    Li(nSamples) = Ni;
+  
+    row(mInvR,nSamples) = Li;
+    column(mInvR,nSamples) = Li;
+
+    addSample(Xnew,Ynew);
+    checkBoundsY(nSamples);
+  
+    return precomputeGPParams();
+  } // addNewPointToGP
 
 
   // Getters and setters
   inline double getValueAtMinimum()
   { return mGPY(mMinIndex); }
 
+  inline double getTheta()
+  { return mTheta; }
+
+  inline void setTheta( double theta )
+  { mTheta = theta; }
+
+  inline double innerEvaluate(const vectord& query, 
+			      vectord& grad)
+  { 
+    setTheta(query(0));
+    return negativeLogLikelihood(grad(0),1);
+  }
 
 protected:
   virtual double correlationFunction( const vectord &x1,const vectord &x2, 
   covMatrix mInvR;                   // Inverse Correlation matrix
 
   size_t mMinIndex, mMaxIndex;
+
+  double mTheta;                      // Kernel parameters
+  const double mRegularizer;  // GP prior parameters (Normal)
+
 };
 
 #endif

include/studenttprocess.hpp

 #include "nonparametricprocess.hpp"
 #include "kernels.hpp"
 #include "meanfuncs.hpp"
-#include "inneroptimization.hpp"
  
 /** \addtogroup BayesOptimization */
 /*@{*/
 
 
-class StudentTProcess: public NonParametricProcess, 
-		       public InnerOptimization 
+class StudentTProcess: public NonParametricProcess
 {
 public:
   StudentTProcess( double theta = KERNEL_THETA, 
    * @param yPred mean of the predicted Gaussian distribution
    * @param sPred std of the predicted Gaussian distribution
    * 
-   * @return error code.
+   * @return if positive: degrees of freedom, if negative: error code.
    */	
   int prediction(const vectord &query,
   		 double& yPred, double& sPred);
 			       size_t index = 1);			 
 			 
 
-  /** Computes the GP based on mGPXX
-   *  This function is hightly inefficient O(N^3). Use it only at 
-   *  the begining.
-   *
-   *  It also computes the kernel hyperparameters.
-   */
-  int fitGP();
-
-
-  /** Add new point efficiently using Matrix Decomposition Lemma
-   *  for the inversion of the correlation matrix. Maybe it is faster
-   *  to just construct and invert a new matrix each time.
-   */   
-  int addNewPointToGP( const vectord &Xnew,
-		       double Ynew);
-
-  inline double getTheta()
-  { return mTheta; }
-
-  inline void setTheta( double theta )
-  { mTheta = theta; }
-
-  virtual double innerEvaluate(const vectord& query, 
-			      vectord& grad)
-  { 
-    setTheta(query(0));
-    return negativeLogLikelihood(grad(0),1);
-  }
 
 protected:
   inline double correlationFunction( const vectord &x1,const vectord &x2,
   { return kernels::MatternIso(x1,x2,param_index,mTheta,3); }
 
   inline double meanFunction( const vectord &x )
-  { return means::Zero(x); }
+  { return means::One(x); }
 
 
-  int precomputeGPParams()
-  {return 1;};
+  int precomputeGPParams();
 
 
 protected:
-  double mTheta;                      // Kernel parameters
-  const double mRegularizer;  // GP prior parameters (Normal)
+
+  double mMu, mSig;                   // GP posterior parameters
+
+  // Precomputed GP prediction operations
+  vectord mUInvR;              
+  double mUInvRUDelta;
+  vectord mYUmu;
 
 };
 

python/bayesopt.cpp

-/* Generated by Cython 0.13 on Fri Oct 21 18:37:06 2011 */
+/* Generated by Cython 0.13 on Sun Oct 23 15:04:07 2011 */
 
 #define PY_SSIZE_T_CLEAN
 #include "Python.h"
 #include "stdlib.h"
 #include "numpy/arrayobject.h"
 #include "numpy/ufuncobject.h"
+#include "ctypes.h"
 #include "krigwpr.h"
 
 /* inline attribute */
 /* Module declarations from bayesopt */
 
 static PyObject *__pyx_f_8bayesopt_dict2structparams(PyObject *, gp_params *); /*proto*/
+static criterium_name __pyx_f_8bayesopt_find_criterium(PyObject *); /*proto*/
+static surrogate_name __pyx_f_8bayesopt_find_surrogate(PyObject *); /*proto*/
 static PyObject *__pyx_f_8bayesopt_ndarray2pointer(PyObject *, double *); /*proto*/
 static PyObject *__pyx_f_8bayesopt_pointer2ndarray(int, double *); /*proto*/
 static double __pyx_f_8bayesopt_callback(unsigned int, double *, double *, void *); /*proto*/
 static char __pyx_k__Zd[] = "Zd";
 static char __pyx_k__Zf[] = "Zf";
 static char __pyx_k__Zg[] = "Zg";
+static char __pyx_k__ei[] = "ei";
+static char __pyx_k__gp[] = "gp";
 static char __pyx_k__np[] = "np";
 static char __pyx_k__buf[] = "buf";
 static char __pyx_k__obj[] = "obj";
 static char __pyx_k__range[] = "range";
 static char __pyx_k__shape[] = "shape";
 static char __pyx_k__theta[] = "theta";
-static char __pyx_k__useEI[] = "useEI";
 static char __pyx_k__zeros[] = "zeros";
 static char __pyx_k__fields[] = "fields";
 static char __pyx_k__format[] = "format";
 static char __pyx_k__strides[] = "strides";
 static char __pyx_k____main__[] = "__main__";
 static char __pyx_k____test__[] = "__test__";
+static char __pyx_k__criteria[] = "criteria";
 static char __pyx_k__itemsize[] = "itemsize";
 static char __pyx_k__readonly[] = "readonly";
 static char __pyx_k__type_num[] = "type_num";
 static char __pyx_k__byteorder[] = "byteorder";
+static char __pyx_k__surrogate[] = "surrogate";
 static char __pyx_k__ValueError[] = "ValueError";
 static char __pyx_k__suboffsets[] = "suboffsets";
 static char __pyx_k__RuntimeError[] = "RuntimeError";
-static char __pyx_k__use_cooling_scheme[] = "use_cooling_scheme";
 static PyObject *__pyx_kp_u_1;
 static PyObject *__pyx_kp_u_2;
 static PyObject *__pyx_kp_u_3;
 static PyObject *__pyx_n_s__beta;
 static PyObject *__pyx_n_s__buf;
 static PyObject *__pyx_n_s__byteorder;
+static PyObject *__pyx_n_s__criteria;
 static PyObject *__pyx_n_s__data;
 static PyObject *__pyx_n_s__delta;
 static PyObject *__pyx_n_s__descr;
 static PyObject *__pyx_n_s__dparams;
+static PyObject *__pyx_n_s__ei;
 static PyObject *__pyx_n_s__f;
 static PyObject *__pyx_n_s__fields;
 static PyObject *__pyx_n_s__format;
+static PyObject *__pyx_n_s__gp;
 static PyObject *__pyx_n_s__itemsize;
 static PyObject *__pyx_n_s__maxeval;
 static PyObject *__pyx_n_s__nDim;
 static PyObject *__pyx_n_s__shape;
 static PyObject *__pyx_n_s__strides;
 static PyObject *__pyx_n_s__suboffsets;
+static PyObject *__pyx_n_s__surrogate;
 static PyObject *__pyx_n_s__theta;
 static PyObject *__pyx_n_s__type_num;
-static PyObject *__pyx_n_s__useEI;
-static PyObject *__pyx_n_s__use_cooling_scheme;
 static PyObject *__pyx_n_s__zeros;
 static PyObject *__pyx_int_0;
 static PyObject *__pyx_int_15;
 
-/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":20
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":36
  * 
  * 
  * cdef dict2structparams(dict dparams, gp_params *params):             # <<<<<<<<<<<<<<
   double __pyx_t_2;
   __Pyx_RefNannySetupContext("dict2structparams");
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":21
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":37
  * 
  * cdef dict2structparams(dict dparams, gp_params *params):
  *     params.theta = dparams['theta']             # <<<<<<<<<<<<<<
  *     params.alpha = dparams['alpha']
  *     params.beta = dparams['beta']
  */
-  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__theta)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__theta)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_params->theta = __pyx_t_2;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":22
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":38
  * cdef dict2structparams(dict dparams, gp_params *params):
  *     params.theta = dparams['theta']
  *     params.alpha = dparams['alpha']             # <<<<<<<<<<<<<<
  *     params.beta = dparams['beta']
  *     params.delta = dparams['delta']
  */
-  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__alpha)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__alpha)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_params->alpha = __pyx_t_2;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":23
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":39
  *     params.theta = dparams['theta']
  *     params.alpha = dparams['alpha']
  *     params.beta = dparams['beta']             # <<<<<<<<<<<<<<
  *     params.delta = dparams['delta']
  *     params.noise = dparams['noise']
  */
-  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__beta)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__beta)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_params->beta = __pyx_t_2;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":24
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":40
  *     params.alpha = dparams['alpha']
  *     params.beta = dparams['beta']
  *     params.delta = dparams['delta']             # <<<<<<<<<<<<<<
  *     params.noise = dparams['noise']
  * 
  */
-  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__delta)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__delta)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_params->delta = __pyx_t_2;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":25
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":41
  *     params.beta = dparams['beta']
  *     params.delta = dparams['delta']
  *     params.noise = dparams['noise']             # <<<<<<<<<<<<<<
  * 
- * 
+ * cdef criterium_name find_criterium(str criteria):
  */
-  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__noise)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject *)__pyx_v_dparams), ((PyObject *)__pyx_n_s__noise)); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __pyx_v_params->noise = __pyx_t_2;
 
   return __pyx_r;
 }
 
-/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":28
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":43
+ *     params.noise = dparams['noise']
+ * 
+ * cdef criterium_name find_criterium(str criteria):             # <<<<<<<<<<<<<<
+ *     if criteria == 'ei':
+ *         return c_ei
+ */
+
+static  criterium_name __pyx_f_8bayesopt_find_criterium(PyObject *__pyx_v_criteria) {
+  criterium_name __pyx_r;
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  __Pyx_RefNannySetupContext("find_criterium");
+
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":44
+ * 
+ * cdef criterium_name find_criterium(str criteria):
+ *     if criteria == 'ei':             # <<<<<<<<<<<<<<
+ *         return c_ei
+ * 
+ */
+  __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_criteria), ((PyObject *)__pyx_n_s__ei), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (__pyx_t_2) {
+
+    /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":45
+ * cdef criterium_name find_criterium(str criteria):
+ *     if criteria == 'ei':
+ *         return c_ei             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+    __pyx_r = c_ei;
+    goto __pyx_L0;
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_WriteUnraisable("bayesopt.find_criterium");
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":48
+ * 
+ * 
+ * cdef surrogate_name find_surrogate(str surrogate):             # <<<<<<<<<<<<<<
+ *     if surrogate == 'gp':
+ *         return s_gaussianProcess
+ */
+
+static  surrogate_name __pyx_f_8bayesopt_find_surrogate(PyObject *__pyx_v_surrogate) {
+  surrogate_name __pyx_r;
+  PyObject *__pyx_t_1 = NULL;
+  int __pyx_t_2;
+  __Pyx_RefNannySetupContext("find_surrogate");
+
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":49
+ * 
+ * cdef surrogate_name find_surrogate(str surrogate):
+ *     if surrogate == 'gp':             # <<<<<<<<<<<<<<
+ *         return s_gaussianProcess
+ * 
+ */
+  __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_surrogate), ((PyObject *)__pyx_n_s__gp), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_GOTREF(__pyx_t_1);
+  __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+  if (__pyx_t_2) {
+
+    /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":50
+ * cdef surrogate_name find_surrogate(str surrogate):
+ *     if surrogate == 'gp':
+ *         return s_gaussianProcess             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+    __pyx_r = s_gaussianProcess;
+    goto __pyx_L0;
+    goto __pyx_L3;
+  }
+  __pyx_L3:;
+
+  goto __pyx_L0;
+  __pyx_L1_error:;
+  __Pyx_XDECREF(__pyx_t_1);
+  __Pyx_WriteUnraisable("bayesopt.find_surrogate");
+  __pyx_L0:;
+  __Pyx_RefNannyFinishContext();
+  return __pyx_r;
+}
+
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":54
  * 
  * 
  * cdef ndarray2pointer(varray, double* p_array):             # <<<<<<<<<<<<<<
   __pyx_v_n = Py_None; __Pyx_INCREF(Py_None);
   __pyx_v_i = Py_None; __Pyx_INCREF(Py_None);
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":30
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":56
  * cdef ndarray2pointer(varray, double* p_array):
  *     # We assume is a vector. We just copy the first row
  *     n = varray.shape[0];             # <<<<<<<<<<<<<<
  *     for i in range(0,n):
  *         p_array[i] = varray[i]
  */
-  __pyx_t_1 = PyObject_GetAttr(__pyx_v_varray, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_GetAttr(__pyx_v_varray, __pyx_n_s__shape); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_v_n);
   __pyx_v_n = __pyx_t_2;
   __pyx_t_2 = 0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":31
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":57
  *     # We assume is a vector. We just copy the first row
  *     n = varray.shape[0];
  *     for i in range(0,n):             # <<<<<<<<<<<<<<
  *         p_array[i] = varray[i]
  * 
  */
-  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_INCREF(__pyx_int_0);
   PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_int_0);
   __Pyx_INCREF(__pyx_v_n);
   PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_n);
   __Pyx_GIVEREF(__pyx_v_n);
-  __pyx_t_1 = PyObject_Call(__pyx_builtin_range, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_Call(__pyx_builtin_range, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   if (PyList_CheckExact(__pyx_t_1) || PyTuple_CheckExact(__pyx_t_1)) {
     __pyx_t_3 = 0; __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2);
   } else {
-    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_2);
   }
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
     } else {
       __pyx_t_1 = PyIter_Next(__pyx_t_2);
       if (!__pyx_t_1) {
-        if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+        if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
         break;
       }
       __Pyx_GOTREF(__pyx_t_1);
     __pyx_v_i = __pyx_t_1;
     __pyx_t_1 = 0;
 
-    /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":32
+    /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":58
  *     n = varray.shape[0];
  *     for i in range(0,n):
  *         p_array[i] = varray[i]             # <<<<<<<<<<<<<<
  * 
  * 
  */
-    __pyx_t_1 = PyObject_GetItem(__pyx_v_varray, __pyx_v_i); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = PyObject_GetItem(__pyx_v_varray, __pyx_v_i); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-    __pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     (__pyx_v_p_array[__pyx_t_5]) = __pyx_t_4;
   }
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   return __pyx_r;
 }
 
-/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":35
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":61
  * 
  * 
  * cdef pointer2ndarray(int n, double* p_array):             # <<<<<<<<<<<<<<
   __Pyx_RefNannySetupContext("pointer2ndarray");
   __pyx_v_varray = Py_None; __Pyx_INCREF(Py_None);
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":37
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":63
  * cdef pointer2ndarray(int n, double* p_array):
  *     # We assume is a vector. We just copy the first row
  *     varray = np.zeros(n)             # <<<<<<<<<<<<<<
  *     for i in range(0,n):
  *         varray[i] = p_array[i]
  */
-  __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-  __pyx_t_1 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
   __pyx_t_1 = 0;
-  __pyx_t_1 = PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
   __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
   __pyx_v_varray = __pyx_t_1;
   __pyx_t_1 = 0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":38
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":64
  *     # We assume is a vector. We just copy the first row
  *     varray = np.zeros(n)
  *     for i in range(0,n):             # <<<<<<<<<<<<<<
   for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
     __pyx_v_i = __pyx_t_5;
 
-    /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":39
+    /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":65
  *     varray = np.zeros(n)
  *     for i in range(0,n):
  *         varray[i] = p_array[i]             # <<<<<<<<<<<<<<
  *     return varray
  * 
  */
-    __pyx_t_1 = PyFloat_FromDouble((__pyx_v_p_array[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    __pyx_t_1 = PyFloat_FromDouble((__pyx_v_p_array[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_GOTREF(__pyx_t_1);
-    if (__Pyx_SetItemInt(__pyx_v_varray, __pyx_v_i, __pyx_t_1, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (__Pyx_SetItemInt(__pyx_v_varray, __pyx_v_i, __pyx_t_1, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
     __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   }
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":40
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":66
  *     for i in range(0,n):
  *         varray[i] = p_array[i]
  *     return varray             # <<<<<<<<<<<<<<
   return __pyx_r;
 }
 
-/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":43
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":69
  * 
  * 
  * cdef double callback(unsigned int n, double *x, double *gradient, void *func_data):             # <<<<<<<<<<<<<<
   __pyx_v_invector = Py_None; __Pyx_INCREF(Py_None);
   __pyx_v_result = Py_None; __Pyx_INCREF(Py_None);
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":44
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":70
  * 
  * cdef double callback(unsigned int n, double *x, double *gradient, void *func_data):
  *     invector = pointer2ndarray(n,x)             # <<<<<<<<<<<<<<
  *     result = (<object>func_data)(invector)
  *     return result
  */
-  __pyx_t_1 = __pyx_f_8bayesopt_pointer2ndarray(__pyx_v_n, __pyx_v_x); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_f_8bayesopt_pointer2ndarray(__pyx_v_n, __pyx_v_x); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_v_invector);
   __pyx_v_invector = __pyx_t_1;
   __pyx_t_1 = 0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":45
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":71
  * cdef double callback(unsigned int n, double *x, double *gradient, void *func_data):
  *     invector = pointer2ndarray(n,x)
  *     result = (<object>func_data)(invector)             # <<<<<<<<<<<<<<
  *     return result
  * 
  */
-  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_INCREF(__pyx_v_invector);
   PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_invector);
   __Pyx_GIVEREF(__pyx_v_invector);
-  __pyx_t_2 = PyObject_Call(((PyObject *)__pyx_v_func_data), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyObject_Call(((PyObject *)__pyx_v_func_data), __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
   __Pyx_DECREF(__pyx_v_result);
   __pyx_v_result = __pyx_t_2;
   __pyx_t_2 = 0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":46
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":72
  *     invector = pointer2ndarray(n,x)
  *     result = (<object>func_data)(invector)
  *     return result             # <<<<<<<<<<<<<<
  * 
  * 
  */
-  __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_v_result); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_v_result); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_r = __pyx_t_3;
   goto __pyx_L0;
 
   return __pyx_r;
 }
 
-/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":50
+/* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":76
  * 
  * 
- * def optimize(object f, int nDim, np.ndarray[np.double_t] np_lb, np.ndarray[np.double_t] np_ub, np.ndarray[np.double_t] np_x, int maxeval, dict dparams, int useEI, int use_cooling_scheme):             # <<<<<<<<<<<<<<
+ * def optimize(object f, int nDim, np.ndarray[np.double_t] np_lb, np.ndarray[np.double_t] np_ub, np.ndarray[np.double_t] np_x, int maxeval, dict dparams, str criteria, str surrogate):             # <<<<<<<<<<<<<<
  *     cdef gp_params zero_params
  *     zero_params.theta = 0
  */
   PyArrayObject *__pyx_v_np_x = 0;
   int __pyx_v_maxeval;
   PyObject *__pyx_v_dparams = 0;
-  int __pyx_v_useEI;
-  int __pyx_v_use_cooling_scheme;
+  PyObject *__pyx_v_criteria = 0;
+  PyObject *__pyx_v_surrogate = 0;
   gp_params __pyx_v_zero_params;
   gp_params __pyx_v_params;
+  criterium_name __pyx_v_crit;
+  surrogate_name __pyx_v_surr;
   double __pyx_v_minf[1000];
   int __pyx_v_error_code;
   double __pyx_v_min_value;
   PyObject *__pyx_t_1 = NULL;
   PyObject *__pyx_t_2 = NULL;
   PyObject *__pyx_t_3 = NULL;
-  static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__f,&__pyx_n_s__nDim,&__pyx_n_s__np_lb,&__pyx_n_s__np_ub,&__pyx_n_s__np_x,&__pyx_n_s__maxeval,&__pyx_n_s__dparams,&__pyx_n_s__useEI,&__pyx_n_s__use_cooling_scheme,0};
+  static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__f,&__pyx_n_s__nDim,&__pyx_n_s__np_lb,&__pyx_n_s__np_ub,&__pyx_n_s__np_x,&__pyx_n_s__maxeval,&__pyx_n_s__dparams,&__pyx_n_s__criteria,&__pyx_n_s__surrogate,0};
   __Pyx_RefNannySetupContext("optimize");
   __pyx_self = __pyx_self;
   if (unlikely(__pyx_kwds)) {
       values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__nDim);
       if (likely(values[1])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  2:
       values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__np_lb);
       if (likely(values[2])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  3:
       values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__np_ub);
       if (likely(values[3])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  4:
       values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__np_x);
       if (likely(values[4])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  5:
       values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__maxeval);
       if (likely(values[5])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  6:
       values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dparams);
       if (likely(values[6])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  7:
-      values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__useEI);
+      values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__criteria);
       if (likely(values[7])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
       case  8:
-      values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__use_cooling_scheme);
+      values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__surrogate);
       if (likely(values[8])) kw_args--;
       else {
-        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+        __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
       }
     }
     if (unlikely(kw_args > 0)) {
-      if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "optimize") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+      if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "optimize") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
     }
     __pyx_v_f = values[0];
-    __pyx_v_nDim = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_nDim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+    __pyx_v_nDim = __Pyx_PyInt_AsInt(values[1]); if (unlikely((__pyx_v_nDim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
     __pyx_v_np_lb = ((PyArrayObject *)values[2]);
     __pyx_v_np_ub = ((PyArrayObject *)values[3]);
     __pyx_v_np_x = ((PyArrayObject *)values[4]);
-    __pyx_v_maxeval = __Pyx_PyInt_AsInt(values[5]); if (unlikely((__pyx_v_maxeval == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+    __pyx_v_maxeval = __Pyx_PyInt_AsInt(values[5]); if (unlikely((__pyx_v_maxeval == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
     __pyx_v_dparams = ((PyObject *)values[6]);
-    __pyx_v_useEI = __Pyx_PyInt_AsInt(values[7]); if (unlikely((__pyx_v_useEI == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
-    __pyx_v_use_cooling_scheme = __Pyx_PyInt_AsInt(values[8]); if (unlikely((__pyx_v_use_cooling_scheme == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+    __pyx_v_criteria = ((PyObject *)values[7]);
+    __pyx_v_surrogate = ((PyObject *)values[8]);
   } else if (PyTuple_GET_SIZE(__pyx_args) != 9) {
     goto __pyx_L5_argtuple_error;
   } else {
     __pyx_v_f = PyTuple_GET_ITEM(__pyx_args, 0);
-    __pyx_v_nDim = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_nDim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+    __pyx_v_nDim = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_nDim == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
     __pyx_v_np_lb = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 2));
     __pyx_v_np_ub = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 3));
     __pyx_v_np_x = ((PyArrayObject *)PyTuple_GET_ITEM(__pyx_args, 4));
-    __pyx_v_maxeval = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 5)); if (unlikely((__pyx_v_maxeval == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+    __pyx_v_maxeval = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 5)); if (unlikely((__pyx_v_maxeval == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
     __pyx_v_dparams = ((PyObject *)PyTuple_GET_ITEM(__pyx_args, 6));
-    __pyx_v_useEI = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 7)); if (unlikely((__pyx_v_useEI == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
-    __pyx_v_use_cooling_scheme = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 8)); if (unlikely((__pyx_v_use_cooling_scheme == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+    __pyx_v_criteria = ((PyObject *)PyTuple_GET_ITEM(__pyx_args, 7));
+    __pyx_v_surrogate = ((PyObject *)PyTuple_GET_ITEM(__pyx_args, 8));
   }
   goto __pyx_L4_argument_unpacking_done;
   __pyx_L5_argtuple_error:;
-  __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+  __Pyx_RaiseArgtupleInvalid("optimize", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
   __pyx_L3_error:;
   __Pyx_AddTraceback("bayesopt.optimize");
   __Pyx_RefNannyFinishContext();
   __pyx_bstruct_np_lb.buf = NULL;
   __pyx_bstruct_np_ub.buf = NULL;
   __pyx_bstruct_np_x.buf = NULL;
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_np_lb), __pyx_ptype_5numpy_ndarray, 1, "np_lb", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_np_ub), __pyx_ptype_5numpy_ndarray, 1, "np_ub", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_np_x), __pyx_ptype_5numpy_ndarray, 1, "np_x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dparams), &PyDict_Type, 1, "dparams", 1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_np_lb), __pyx_ptype_5numpy_ndarray, 1, "np_lb", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_np_ub), __pyx_ptype_5numpy_ndarray, 1, "np_ub", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_np_x), __pyx_ptype_5numpy_ndarray, 1, "np_x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dparams), &PyDict_Type, 1, "dparams", 1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_criteria), &PyString_Type, 1, "criteria", 1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_surrogate), &PyString_Type, 1, "surrogate", 1))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_np_lb, (PyObject*)__pyx_v_np_lb, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_np_lb, (PyObject*)__pyx_v_np_lb, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_bstride_0_np_lb = __pyx_bstruct_np_lb.strides[0];
   __pyx_bshape_0_np_lb = __pyx_bstruct_np_lb.shape[0];
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_np_ub, (PyObject*)__pyx_v_np_ub, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_np_ub, (PyObject*)__pyx_v_np_ub, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_bstride_0_np_ub = __pyx_bstruct_np_ub.strides[0];
   __pyx_bshape_0_np_ub = __pyx_bstruct_np_ub.shape[0];
   {
     __Pyx_BufFmt_StackElem __pyx_stack[1];
-    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_np_x, (PyObject*)__pyx_v_np_x, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+    if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_np_x, (PyObject*)__pyx_v_np_x, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   }
   __pyx_bstride_0_np_x = __pyx_bstruct_np_x.strides[0];
   __pyx_bshape_0_np_x = __pyx_bstruct_np_x.shape[0];
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":52
- * def optimize(object f, int nDim, np.ndarray[np.double_t] np_lb, np.ndarray[np.double_t] np_ub, np.ndarray[np.double_t] np_x, int maxeval, dict dparams, int useEI, int use_cooling_scheme):
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":78
+ * def optimize(object f, int nDim, np.ndarray[np.double_t] np_lb, np.ndarray[np.double_t] np_ub, np.ndarray[np.double_t] np_x, int maxeval, dict dparams, str criteria, str surrogate):
  *     cdef gp_params zero_params
  *     zero_params.theta = 0             # <<<<<<<<<<<<<<
  *     zero_params.alpha = 0
  */
   __pyx_v_zero_params.theta = 0.0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":53
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":79
  *     cdef gp_params zero_params
  *     zero_params.theta = 0
  *     zero_params.alpha = 0             # <<<<<<<<<<<<<<
  */
   __pyx_v_zero_params.alpha = 0.0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":54
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":80
  *     zero_params.theta = 0
  *     zero_params.alpha = 0
  *     zero_params.beta = 0             # <<<<<<<<<<<<<<
  */
   __pyx_v_zero_params.beta = 0.0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":55
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":81
  *     zero_params.alpha = 0
  *     zero_params.beta = 0
  *     zero_params.delta = 0             # <<<<<<<<<<<<<<
  */
   __pyx_v_zero_params.delta = 0.0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":56
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":82
  *     zero_params.beta = 0
  *     zero_params.delta = 0
  *     zero_params.noise = 0             # <<<<<<<<<<<<<<
  */
   __pyx_v_zero_params.noise = 0.0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":59
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":85
  * 
  *     cdef gp_params params
  *     dict2structparams(dparams,&zero_params)             # <<<<<<<<<<<<<<
  *     params = zero_params
  * 
  */
-  __pyx_t_1 = __pyx_f_8bayesopt_dict2structparams(__pyx_v_dparams, (&__pyx_v_zero_params)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = __pyx_f_8bayesopt_dict2structparams(__pyx_v_dparams, (&__pyx_v_zero_params)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
   __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":60
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":86
  *     cdef gp_params params
  *     dict2structparams(dparams,&zero_params)
  *     params = zero_params             # <<<<<<<<<<<<<<
  * 
- *     #cdef double lb[1000]
+ *     cdef criterium_name crit
  */
   __pyx_v_params = __pyx_v_zero_params;
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":71
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":89
+ * 
+ *     cdef criterium_name crit
+ *     crit = find_criterium(criteria)             # <<<<<<<<<<<<<<
+ *     cdef surrogate_name surr
+ *     surr = find_surrogate(surrogate)
+ */
+  __pyx_v_crit = __pyx_f_8bayesopt_find_criterium(__pyx_v_criteria);
+
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":91
+ *     crit = find_criterium(criteria)
+ *     cdef surrogate_name surr
+ *     surr = find_surrogate(surrogate)             # <<<<<<<<<<<<<<
+ * 
+ * 
+ */
+  __pyx_v_surr = __pyx_f_8bayesopt_find_surrogate(__pyx_v_surrogate);
+
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":103
  *     #ndarray2pointer(np_x,c_x)
  * 
- *     error_code = krigging_optimization(nDim, callback, <void *> f, <double *>np_lb.data, <double *>np_ub.data, <double *>np_x.data, minf, maxeval, params, useEI, use_cooling_scheme)             # <<<<<<<<<<<<<<
+ *     error_code = krigging_optimization(nDim, callback, <void *> f, <double *>np_lb.data, <double *>np_ub.data, <double *>np_x.data, minf, maxeval, params, crit, surr)             # <<<<<<<<<<<<<<
  *     min_value = minf[0]
  *     #point_min_value = pointer2ndarray(nDim,c_x)
  */
-  __pyx_v_error_code = krigging_optimization(__pyx_v_nDim, __pyx_f_8bayesopt_callback, ((void *)__pyx_v_f), ((double *)__pyx_v_np_lb->data), ((double *)__pyx_v_np_ub->data), ((double *)__pyx_v_np_x->data), __pyx_v_minf, __pyx_v_maxeval, __pyx_v_params, __pyx_v_useEI, __pyx_v_use_cooling_scheme);
-
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":72
+  __pyx_v_error_code = krigging_optimization(__pyx_v_nDim, __pyx_f_8bayesopt_callback, ((void *)__pyx_v_f), ((double *)__pyx_v_np_lb->data), ((double *)__pyx_v_np_ub->data), ((double *)__pyx_v_np_x->data), __pyx_v_minf, __pyx_v_maxeval, __pyx_v_params, __pyx_v_crit, __pyx_v_surr);
+
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":104
  * 
- *     error_code = krigging_optimization(nDim, callback, <void *> f, <double *>np_lb.data, <double *>np_ub.data, <double *>np_x.data, minf, maxeval, params, useEI, use_cooling_scheme)
+ *     error_code = krigging_optimization(nDim, callback, <void *> f, <double *>np_lb.data, <double *>np_ub.data, <double *>np_x.data, minf, maxeval, params, crit, surr)
  *     min_value = minf[0]             # <<<<<<<<<<<<<<
  *     #point_min_value = pointer2ndarray(nDim,c_x)
  *     return min_value,np_x,error_code
  */
   __pyx_v_min_value = (__pyx_v_minf[0]);
 
-  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":74
+  /* "/home/rmcantin/code/bayesian-optimization/python/bayesopt.pyx":106
  *     min_value = minf[0]
  *     #point_min_value = pointer2ndarray(nDim,c_x)
  *     return min_value,np_x,error_code             # <<<<<<<<<<<<<<
  */
   __Pyx_XDECREF(__pyx_r);
-  __pyx_t_1 = PyFloat_FromDouble(__pyx_v_min_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_1 = PyFloat_FromDouble(__pyx_v_min_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_1);
-  __pyx_t_2 = PyInt_FromLong(__pyx_v_error_code); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_2 = PyInt_FromLong(__pyx_v_error_code); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_2);
-  __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __Pyx_GOTREF(__pyx_t_3);
   PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
   __Pyx_GIVEREF(__pyx_t_1);
   {&__pyx_n_s__beta, __pyx_k__beta, sizeof(__pyx_k__beta), 0, 0, 1, 1},
   {&__pyx_n_s__buf, __pyx_k__buf, sizeof(__pyx_k__buf), 0, 0, 1, 1},
   {&__pyx_n_s__byteorder, __pyx_k__byteorder, sizeof(__pyx_k__byteorder), 0, 0, 1, 1},
+  {&__pyx_n_s__criteria, __pyx_k__criteria, sizeof(__pyx_k__criteria), 0, 0, 1, 1},
   {&__pyx_n_s__data, __pyx_k__data, sizeof(__pyx_k__data), 0, 0, 1, 1},
   {&__pyx_n_s__delta, __pyx_k__delta, sizeof(__pyx_k__delta), 0, 0, 1, 1},
   {&__pyx_n_s__descr, __pyx_k__descr, sizeof(__pyx_k__descr), 0, 0, 1, 1},
   {&__pyx_n_s__dparams, __pyx_k__dparams, sizeof(__pyx_k__dparams), 0, 0, 1, 1},
+  {&__pyx_n_s__ei, __pyx_k__ei, sizeof(__pyx_k__ei), 0, 0, 1, 1},
   {&__pyx_n_s__f, __pyx_k__f, sizeof(__pyx_k__f), 0, 0, 1, 1},
   {&__pyx_n_s__fields, __pyx_k__fields, sizeof(__pyx_k__fields), 0, 0, 1, 1},
   {&__pyx_n_s__format, __pyx_k__format, sizeof(__pyx_k__format), 0, 0, 1, 1},
+  {&__pyx_n_s__gp, __pyx_k__gp, sizeof(__pyx_k__gp), 0, 0, 1, 1},
   {&__pyx_n_s__itemsize, __pyx_k__itemsize, sizeof(__pyx_k__itemsize), 0, 0, 1, 1},
   {&__pyx_n_s__maxeval, __pyx_k__maxeval, sizeof(__pyx_k__maxeval), 0, 0, 1, 1},
   {&__pyx_n_s__nDim, __pyx_k__nDim, sizeof(__pyx_k__nDim), 0, 0, 1, 1},
   {&__pyx_n_s__shape, __pyx_k__shape, sizeof(__pyx_k__shape), 0, 0, 1, 1},
   {&__pyx_n_s__strides, __pyx_k__strides, sizeof(__pyx_k__strides), 0, 0, 1, 1},
   {&__pyx_n_s__suboffsets, __pyx_k__suboffsets, sizeof(__pyx_k__suboffsets), 0, 0, 1, 1},
+  {&__pyx_n_s__surrogate, __pyx_k__surrogate, sizeof(__pyx_k__surrogate), 0, 0, 1, 1},
   {&__pyx_n_s__theta, __pyx_k__theta, sizeof(__pyx_k__theta), 0, 0, 1, 1},
   {&__pyx_n_s__type_num, __pyx_k__type_num, sizeof(__pyx_k__type_num), 0, 0, 1, 1},
-  {&__pyx_n_s__useEI, __pyx_k__useEI, sizeof(__pyx_k__useEI), 0, 0, 1, 1},
-  {&__pyx_n_s__use_cooling_scheme, __pyx_k__use_cooling_scheme, sizeof(__pyx_k__use_cooling_scheme), 0, 0, 1, 1},
   {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1},
   {0, 0, 0, 0, 0, 0, 0}
 };
 static int __Pyx_InitCachedBuiltins(void) {
-  __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+  __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
   return 0;

python/bayesopt.pyx

 import numpy as np
 cimport numpy as np
 
-cdef extern from "krigwpr.h":
+cdef extern from "ctypes.h":
     ctypedef struct gp_params:
         double theta
         double alpha
         double delta
         double noise
 
+    ctypedef enum criterium_name:
+        c_ei,
+        c_lcb,
+        c_poi,
+        c_gp_hedge,
+        c_greedyAOptimality,
+        c_expectedReturn
+
+    ctypedef enum surrogate_name:
+        s_gaussianProcess,
+        s_gaussianProcessHyperPriors,
+        s_studentTProcess
+
+
+
+cdef extern from "krigwpr.h":
     ctypedef double (*eval_func)(unsigned int n, double *x, double *gradient, void *func_data)
-    int krigging_optimization(int nDim, eval_func f, void* f_data, double *lb, double *ub, double *x, double *minf, int maxeval, gp_params params, int useEI, int use_cooling_scheme)
+    int krigging_optimization(int nDim, eval_func f, void* f_data, double *lb, double *ub, double *x, double *minf, int maxeval, gp_params params, criterium_name c_name, surrogate_name gp_name)
 
 
 cdef dict2structparams(dict dparams, gp_params *params):
     params.delta = dparams['delta']
     params.noise = dparams['noise']
 
+cdef criterium_name find_criterium(str criteria):
+    if criteria == 'ei':
+        return c_ei
+
+
+cdef surrogate_name find_surrogate(str surrogate):
+    if surrogate == 'gp':
+        return s_gaussianProcess
+
+
 
 cdef ndarray2pointer(varray, double* p_array):
     # We assume is a vector. We just copy the first row
 
 
 
-def optimize(object f, int nDim, np.ndarray[np.double_t] np_lb, np.ndarray[np.double_t] np_ub, np.ndarray[np.double_t] np_x, int maxeval, dict dparams, int useEI, int use_cooling_scheme):
+def optimize(object f, int nDim, np.ndarray[np.double_t] np_lb, np.ndarray[np.double_t] np_ub, np.ndarray[np.double_t] np_x, int maxeval, dict dparams, str criteria, str surrogate):
     cdef gp_params zero_params
     zero_params.theta = 0
     zero_params.alpha = 0
     cdef gp_params params
     dict2structparams(dparams,&zero_params)
     params = zero_params
+
+    cdef criterium_name crit
+    crit = find_criterium(criteria)
+    cdef surrogate_name surr
+    surr = find_surrogate(surrogate)
+    
     
     #cdef double lb[1000]
     #cdef double ub[1000]
     #ndarray2pointer(np_ub,ub)
     #ndarray2pointer(np_x,c_x)
     
-    error_code = krigging_optimization(nDim, callback, <void *> f, <double *>np_lb.data, <double *>np_ub.data, <double *>np_x.data, minf, maxeval, params, useEI, use_cooling_scheme)
+    error_code = krigging_optimization(nDim, callback, <void *> f, <double *>np_lb.data, <double *>np_ub.data, <double *>np_x.data, minf, maxeval, params, crit, surr)
     min_value = minf[0]
     #point_min_value = pointer2ndarray(nDim,c_x)
     return min_value,np_x,error_code

src/basicgaussprocess.cpp

 
 BasicGaussianProcess::BasicGaussianProcess( double theta, 
 					    double noise):
-  NonParametricProcess(), InnerOptimization(),
-  mTheta(theta), mRegularizer(noise)
+  NonParametricProcess(theta,noise)
 {
   setAlgorithm(bobyqa);
   setLimits(0.,100.);
   return 1;
 }
 	
-
-int BasicGaussianProcess::fitGP()
-{
-  size_t nSamples = mGPXX.size();
-  for (size_t ii=0; ii<nSamples; ii++)
-    checkBoundsY(ii);
-  
-  vectord th = svectord(1,mTheta);  
-
-  std::cout << "Initial theta: " << mTheta << " "<<th.size()<< std::endl;
-  innerOptimize(th);
-  setTheta(th(0));
-  std::cout << "Final theta: " << mTheta << std::endl;
-  
-  int error = computeInverseCorrMatrix(mRegularizer);
-
-  if (error < 0)
-    return error;
-
-  return 1;
-} // fitGP
-
-
-int BasicGaussianProcess::addNewPointToGP(const vectord &Xnew, 
-					  double Ynew)
-{
-  size_t nSamples = mGPXX.size();
-  size_t XDim = mGPXX[1].size();
-  size_t NewDim = Xnew.size();
-  
-  svectord colU(nSamples+1,1.0);
-  vectord Li(nSamples);
-  vectord wInvR(nSamples);
-  double wInvRw;
-  double selfCorrelation, Ni;
-  
-  if (XDim != NewDim)
-    {
-      std::cout << "Dimensional Error" << std::endl;
-      return -1;
-    }
-    
-  vectord correlationNewValue = computeCrossCorrelation(Xnew);
-  
-  selfCorrelation = correlationFunction(Xnew, Xnew) + mRegularizer;
-  
-  noalias(wInvR) = prod(correlationNewValue,mInvR);
-  wInvRw = inner_prod(wInvR,correlationNewValue);
-  Ni = 1/(selfCorrelation + wInvRw);
-  noalias(Li) = -Ni * wInvR;
-  mInvR += outer_prod(Li,Li) / Ni;
-  
-  //TODO: There must be a better way to do this.
-  mInvR.resize(nSamples+1,nSamples+1);
-  
-  Li.resize(nSamples+1);
-  Li(nSamples) = Ni;
-  
-  row(mInvR,nSamples) = Li;
-  column(mInvR,nSamples) = Li;
-
-  addSample(Xnew,Ynew);
-  checkBoundsY(nSamples);
-  
-  return 1;
-} // addNewPointToGP
-

src/gaussprocess.cpp

 GaussianProcess::GaussianProcess( double theta,
 				  double alpha, double beta, 
 				  double delta, double noise):
-  NonParametricProcess(), mTheta(theta),
+  NonParametricProcess(theta,noise),
   mAlpha(alpha), mBeta (beta),
-  mDelta2(delta), mRegularizer(noise)
+  mDelta2(delta)
 {
   setAlgorithm(bobyqa);
   setLimits(0.,100.);
   double mu     = inner_prod(colU,alphY) / eta;
   double YInvRY = inner_prod(mGPY,alphY);
     
-  double sigma = (mBeta + YInvRY - mu*mu/eta) / (mAlpha + (n+1) + 2);
+  double sigma = (mBeta + YInvRY - mu*mu*eta) / (mAlpha + (n+1) + 2);
   
   svectord colMu(n,mu);
   vectord yumu = mGPY - colMu;
   vectord rInvR(mGPXX.size());
   double kn;
   double uInvRr, rInvRr;
+  double meanf = meanFunction(query);
 
   vectord colR = computeCrossCorrelation(query);
   kn = correlationFunction(query, query);
   rInvRr = inner_prod(rInvR,colR);
   uInvRr = inner_prod(mUInvR,colR);
   
-  yPred = mMu + inner_prod( rInvR, mYUmu );
-  sPred = sqrt( mSig * (kn - rInvRr + (1.0 - uInvRr) * (1.0 - uInvRr) 
+  yPred = meanf*mMu + inner_prod( rInvR, mYUmu );
+  sPred = sqrt( mSig * (kn - rInvRr + (meanf - uInvRr) * (meanf - uInvRr) 
 			/ mUInvRUDelta ) );
 
   return 1;
 }
 	
 
-int GaussianProcess::fitGP()
-{
-  size_t nSamples = mGPXX.size();
-  for (size_t ii=0; ii<nSamples; ii++)
-    checkBoundsY(ii);
-  
-  vectord th = svectord(1,mTheta);  
-
-  std::cout << "Initial theta: " << mTheta << " "<<th.size()<< std::endl;
-  innerOptimize(th);
-  setTheta(th(0));
-  std::cout << "Final theta: " << mTheta << std::endl;
-
-  int error = computeInverseCorrMatrix(mRegularizer);
-
-  if (error < 0)
-    return error;
-
-  return precomputeGPParams();
-} // fitGP
-
-
-int GaussianProcess::addNewPointToGP(const vectord &Xnew, 
-				     double Ynew)
-{
-  size_t nSamples = mGPXX.size();
-  size_t XDim = mGPXX[1].size();
-  size_t NewDim = Xnew.size();
-  
-  svectord colU(nSamples+1,1.0);
-  vectord Li(nSamples);
-  vectord wInvR(nSamples);
-  double wInvRw;
-  double selfCorrelation, Ni;
-  
-  if (XDim != NewDim)
-    {
-      std::cout << "Dimensional Error" << std::endl;
-      return -1;
-    }
-    
-  vectord correlationNewValue = computeCrossCorrelation(Xnew);
-  
-  selfCorrelation = correlationFunction(Xnew, Xnew) + mRegularizer;
-  
-  noalias(wInvR) = prod(correlationNewValue,mInvR);
-  wInvRw = inner_prod(wInvR,correlationNewValue);
-  Ni = 1/(selfCorrelation + wInvRw);
-  noalias(Li) = -Ni * wInvR;
-  mInvR += outer_prod(Li,Li) / Ni;
-  
-  //TODO: There must be a better way to do this.
-  mInvR.resize(nSamples+1,nSamples+1);
-  
-  Li.resize(nSamples+1);
-  Li(nSamples) = Ni;
-  
-  row(mInvR,nSamples) = Li;
-  column(mInvR,nSamples) = Li;
-
-  addSample(Xnew,Ynew);
-  checkBoundsY(nSamples);
-  
-  return precomputeGPParams();
-} // addNewPointToGP
-
-
 int GaussianProcess::precomputeGPParams()
 {
   size_t nSamples = mGPXX.size();
 #include "krigging.hpp"
 #include "lhs.hpp"
 #include "randgen.hpp"
+#include "basicgaussprocess.hpp"
 
 
 SKO::SKO( double theta, double noise,
 	  size_t nIter, double alpha, 
-	  double beta,  double delta):
-  mGP(theta,noise),
+	  double beta,  double delta,
+	  NonParametricProcess* gp):
   mMaxIterations(nIter), mMaxDim(MAX_DIM),
   mVerbose(0)
-{} // Constructor
+{ 
+  crit_name = c_gp_hedge;
+  if (gp == NULL)
+    mGP = new BasicGaussianProcess(theta,noise);
+  else
+    mGP = gp;
+
+} // Constructor
 
 SKO::SKO( gp_params params,
-	  size_t nIter):
-  mGP(params.theta,params.noise),
+	  size_t nIter,
+	  NonParametricProcess* gp):
   mMaxIterations(nIter), mMaxDim(MAX_DIM),
   mVerbose(0)
-{} // Constructor
+{
+  crit_name = c_gp_hedge;
+  if (gp == NULL)
+    mGP = new BasicGaussianProcess(params.theta,params.noise);
+  else
+    mGP = gp;
+} // Constructor
 
 
 SKO::~SKO()
 {
   mVerbose = 1;
 
-  crit_name = gp_hedge;
   crit.resetHedgeValues();
  
   mLowerBound = lowerBound;
 	{ 
 	  std::cout << "Iteration " << ii+1 << std::endl;
 	  std::cout << "Trying: " << xNext << std::endl;
-	  std::cout << "Best: " << mGP.getPointAtMinimum() << std::endl; 
-	  std::cout << "Best outcome: " <<  mGP.getValueAtMinimum() <<  std::endl; 
+	  std::cout << "Best: " << mGP->getPointAtMinimum() << std::endl; 
+	  std::cout << "Best outcome: " <<  mGP->getValueAtMinimum() <<  std::endl; 
 	}
      
       yNext = evaluateNormalizedSample(xNext);
-      mGP.addNewPointToGP(xNext,yNext);     
+      mGP->addNewPointToGP(xNext,yNext);     
     }
 
-  bestPoint = mGP.getPointAtMinimum();
+  bestPoint = mGP->getPointAtMinimum();
 
   return 1;
 } // optimize
       yPoints(i) = evaluateNormalizedSample(sample);
     }
 
-  mGP.setSamples(xPoints,yPoints);
-  mGP.fitGP();
+  mGP->setSamples(xPoints,yPoints);
+  mGP->fitGP();
 
   return 1;
 } // sampleInitialPoints
 int SKO::nextPoint(vectord &Xnext)
 {
   crit.resetAnnealValues();
-  if (crit_name == gp_hedge)
+  if (crit_name == c_gp_hedge)
     {
       vectord best_ei(Xnext);
       vectord best_lcb(Xnext);
       vectord best_poi(Xnext);
       double r_ei,r_lcb,r_poi,foo;
 
-      crit.setCriterium(expectedImprovement);
+      crit.setCriterium(c_ei);
       innerOptimize(best_ei);
-      mGP.prediction(best_ei,r_ei,foo);
+      mGP->prediction(best_ei,r_ei,foo);
       
-      crit.setCriterium(lcb);
+      crit.setCriterium(c_lcb);
       innerOptimize(best_lcb);
-      mGP.prediction(best_lcb,r_lcb,foo);
+      mGP->prediction(best_lcb,r_lcb,foo);
 
-      crit.setCriterium(poi);
+      crit.setCriterium(c_poi);
       innerOptimize(best_poi);
-      mGP.prediction(best_poi,r_poi,foo);
+      mGP->prediction(best_poi,r_poi,foo);
 
       criterium_name better = crit.update_hedge(r_ei,r_lcb,r_poi);
       switch(better)
 	{
-	case expectedImprovement: Xnext = best_ei; break;
-	case lcb: Xnext = best_lcb; break;
-	case poi: Xnext = best_poi; break;
+	case c_ei: Xnext = best_ei; break;
+	case c_lcb: Xnext = best_lcb; break;
+	case c_poi: Xnext = best_poi; break;
 	default: return -1;
 	}
       return 1;
   if (!reachable)
     return 0.0;
 
-  return crit.evaluate(mGP,query);
+  return crit.evaluate(*mGP,query);
        
 }  // evaluateCriteria
 

src/studenttprocess.cpp

+#include "studenttprocess.hpp"
+#include "cholesky.hpp"
+#include "trace.hpp"
+
+  
+StudentTProcess::StudentTProcess( double theta, double noise):
+  NonParametricProcess(theta,noise)
+{
+  setAlgorithm(bobyqa);
+  setLimits(0.,100.);
+}  // Constructor
+
+
+
+StudentTProcess::~StudentTProcess()
+{} // Default destructor
+
+
+
+
+double StudentTProcess::negativeLogLikelihood(double &grad,
+					      size_t index)
+{
+  matrixd K = computeCorrMatrix(mRegularizer,0);
+  size_t n = K.size1();
+  
+  matrixd L(n,n);
+  cholesky_decompose(K,L);
+
+  vectord colU(n);
+
+  //TODO: Replace by transform
+  for (size_t ii=0; ii< n; ii++) 
+    colU(ii) = meanFunction(mGPXX[ii]);
+
+  vectord alphU(colU);
+  boost::numeric::ublas::inplace_solve(L,alphU,boost::numeric::ublas::lower_tag());
+  double eta = inner_prod(colU,alphU);
+  
+  vectord alphY(mGPY);
+  boost::numeric::ublas::inplace_solve(L,alphY,boost::numeric::ublas::lower_tag());
+  double mu     = inner_prod(colU,alphY) / eta;
+  double YInvRY = inner_prod(mGPY,alphY);
+    
+  double sigma = (YInvRY - mu*mu*eta) / (n-1);
+
+  double negloglik = 0.5*( (n-1)*log(sigma) + trace(L) + log(eta) );
+
+  return negloglik;
+}
+
+
+int StudentTProcess::prediction( const vectord &query,
+				 double& yPred, double& sPred)
+{
+  size_t n = mGPXX.size();
+  vectord rInvR(n);
+  double kn;
+  double uInvRr, rInvRr;
+  double meanf = meanFunction(query);
+  
+  vectord colR = computeCrossCorrelation(query);
+  kn = correlationFunction(query, query);
+  
+  noalias(rInvR) = prod(colR,mInvR);	
+  rInvRr = inner_prod(rInvR,colR);
+  uInvRr = inner_prod(mUInvR,colR);
+  
+  yPred = meanf*mMu + inner_prod( rInvR, mYUmu );
+  sPred = sqrt( mSig * (kn - rInvRr + (meanf - uInvRr) * (meanf - uInvRr) 
+			/ mUInvRUDelta ) );
+
+  return n-1;
+}
+	
+
+int StudentTProcess::precomputeGPParams()
+{
+  size_t nSamples = mGPXX.size();
+  vectord colU(nSamples);
+
+  //TODO: Replace by transform
+  for (size_t ii=0; ii< nSamples; ii++) 
+    colU(ii) = meanFunction(mGPXX[ii]);
+
+  mUInvR = prod(colU,mInvR);
+  mUInvRUDelta = inner_prod(mUInvR,colU);
+  
+  vectord YInvR(nSamples);
+  double YInvRY;
+  
+  mMu =  inner_prod(mUInvR,mGPY) / mUInvRUDelta;
+  
+  noalias(YInvR) = prod(mGPY,mInvR);
+  YInvRY = inner_prod(YInvR,mGPY);
+  
+  mSig = (YInvRY - mMu*mMu*mUInvRUDelta) / (nSamples-1);
+  
+  svectord colMu(nSamples,mMu);
+  mYUmu = mGPY - colMu;
+
+  std::cout << "Mu, Sigma" << mMu <<", "<< mSig << std::endl;
+  std::cout << "Eta" << mUInvRUDelta << std::endl;
+
+  return 1;
+}
+
+

wrappers/krigwpr.cpp

 #include "krigwpr.h"
 #include "krigging.hpp"
 
+#include "gaussprocess.hpp"
+#include "basicgaussprocess.hpp"
+#include "studenttprocess.hpp"
+
 
 
 void copyCarrayInVector(const double* array, int n, vectord& vec)
 {
  public:
 
-  CSKO( gp_params params,
-	size_t nIter): 
-    SKO(params,nIter)
+  CSKO( gp_params params, size_t nIter,
+	NonParametricProcess* gp = NULL): 
+    SKO(params,nIter,gp)
   {}; 
 
 
 			  double *x, /* in: initial guess, out: minimizer */
 			  double *minf, /* out: minimum */
 			  int maxeval, gp_params params,
-			  int useEI,