Commits

Ruben Martinez-Cantin committed bd1caac

Simplifying inner optimization class. Added anisotropic box support. Replaced some copy hacks with std copy. Adding wrapper to callback pointer to guarantee consistency.

Comments (0)

Files changed (9)

 LINK_DIRECTORIES( ${CMAKE_SOURCE_DIR}/lib )
 
 IF(NLOPT_BUILD)
-  ADD_SUBDIRECTORY(nlopt2)
-  include_directories(${CMAKE_SOURCE_DIR}/nlopt2/api)
+  ADD_SUBDIRECTORY(nlopt)
+  include_directories(${CMAKE_SOURCE_DIR}/nlopt/api)
   SET(EXT_LIBS nlopt)
 ELSE(NLOPT_BUILD)
   SET(EXT_LIBS ${NLOPT})

include/inneroptimization.hpp

   } innerOptAlgorithms;
 
 
+  class RBOptimizableWrapper
+  {
+  public:
+    explicit RBOptimizableWrapper(RBOptimizable* rbo): rbo_(rbo){};
+    virtual ~RBOptimizableWrapper(){};
+    double evaluate(const vectord& query){return rbo_->evaluate(query);}
+  private:
+    RBOptimizable* rbo_;
+  };
+
+  class RGBOptimizableWrapper
+  {
+  public:
+    explicit RGBOptimizableWrapper(RGBOptimizable* rgbo): rgbo_(rgbo){};
+    virtual ~RGBOptimizableWrapper(){};
+    double evaluate(const vectord& query, vectord& grad){return rgbo_->evaluate(query,grad);}
+  private:
+    RGBOptimizable* rgbo_;
+  };
+
+
   class NLOPT_Optimization //: public Optimization
   {
   public:
-    NLOPT_Optimization(RBOptimizable* rbo);
-    virtual ~NLOPT_Optimization(){};
+    NLOPT_Optimization(RBOptimizable* rbo, size_t dim);
+    NLOPT_Optimization(RGBOptimizable* rgbo, size_t dim);
+    virtual ~NLOPT_Optimization();
 
     /** Sets the optimization algorithm  */
     void setAlgorithm(innerOptAlgorithms newAlg);
     /** Sets the optimization algorithm  */
     void setMaxEvals(size_t meval);
 
-    /** Limits of the hypercube. 
-     * Currently, it assumes that all dimensions have the same limits.
-     */
+    /** Limits of the hypercube. */
+    void setLimits(const vectord& down, const vectord& up);
+
+    /** Limits of the hypercube assuming that all dimensions have the same limits. */
     void setLimits(double down, double up);
 
     /** Compute the inner optimization algorithm
 
   private:
 
-    int send_to_nlopt_optimize(double* x, int n, void* objPointer);	
-
-    RBOptimizable *rbobj;
+    //    int send_to_nlopt_optimize(double* x, int n, void* objPointer);	
+    // TODO: Consider adding a container object to avoid casting to and from a polymorphic object
+    RBOptimizableWrapper *rbobj;
+    RGBOptimizableWrapper *rgbobj;
 
     innerOptAlgorithms alg;
-    double mDown, mUp;
+    vectord mDown, mUp;
     size_t maxEvals;
   };
 
   inline void NLOPT_Optimization::setMaxEvals(size_t meval)
   { maxEvals = meval; }
 
-  inline void NLOPT_Optimization::setLimits(double down, double up)
+  inline void NLOPT_Optimization::setLimits(const vectord& down, const vectord& up)
   { mDown = down;   mUp = up; }
 
-
+  inline void NLOPT_Optimization::setLimits(double down, double up)
+  { 
+    for(size_t i = 0; i<mDown.size();++i) 
+      {
+	mDown(i) = down; mUp(i) = up;
+      }
+  };
 }//namespace bayesopt
 
 #endif

include/student_t_process_jef.hpp

     matrixd mKF, mL2;
 
     StudentTDistribution* d_;      //!< Predictive distributions
-    clock_t timer;
   };
 
   /**@}*/

src/bayesoptcont.cpp

     BayesOptBase(), mBB(NULL)
   { 
     if (mCrit == NULL)    throw(-1);
-    cOptimizer = new NLOPT_Optimization(mCrit.get());
+    cOptimizer = new NLOPT_Optimization(mCrit.get(),1);
     cOptimizer->setAlgorithm(DIRECT);
   } // Def Constructor
 
     BayesOptBase(dim,parameters), mBB(NULL)
   { 
     if (mCrit == NULL)    throw(-1);
-    cOptimizer = new NLOPT_Optimization(mCrit.get());
+    cOptimizer = new NLOPT_Optimization(mCrit.get(),dim);
     cOptimizer->setAlgorithm(DIRECT);
     cOptimizer->setMaxEvals(parameters.n_inner_iterations);
   } // Constructor

src/empiricalbayesprocess.cpp

 	throw 1;
       }
 
-    kOptimizer = new NLOPT_Optimization(this);
+    size_t nhp = mKernel.nHyperParameters();
+    kOptimizer = new NLOPT_Optimization(this,nhp);
 
     //TODO: Generalize
     if (parameters.l_type == L_ML)
       {
 	kOptimizer->setAlgorithm(COMBINED);
       }
-    kOptimizer->setLimits(1e-10,100.);
+    kOptimizer->setLimits(svectord(nhp,1e-10),svectord(nhp,100.));
   }
 
   EmpiricalBayesProcess::~EmpiricalBayesProcess()

src/inneroptimization.cpp

       }
   }
 
-  NLOPT_Optimization::NLOPT_Optimization(RBOptimizable* rbo)
+  NLOPT_Optimization::NLOPT_Optimization(RBOptimizable* rbo, size_t dim)
   { 
-    rbobj = rbo;
-    alg = DIRECT;    mDown = 0.;    mUp = 1.;    maxEvals = MAX_INNER_EVALUATIONS;
+    rbobj = new RBOptimizableWrapper(rbo);       rgbobj = NULL;
+    alg = DIRECT;                  maxEvals = MAX_INNER_EVALUATIONS;
+    mDown = zvectord(dim);         mUp = svectord(dim,1.0);  
   };
 
+  NLOPT_Optimization::NLOPT_Optimization(RGBOptimizable* rgbo, size_t dim)
+  { 
+    rbobj = NULL;             rgbobj = new RGBOptimizableWrapper(rgbo);
+    alg = DIRECT;             maxEvals = MAX_INNER_EVALUATIONS;
+    mDown = zvectord(dim);    mUp = svectord(dim,1.0);  
+  };
+
+  NLOPT_Optimization::~NLOPT_Optimization()
+  {
+    if(rbobj != NULL) delete rbobj;
+    if(rgbobj != NULL) delete rgbobj;
+  }
 
   int NLOPT_Optimization::run(vectord &Xnext)
   {   
-    void *objPointer = static_cast<void *>(rbobj);
-    int n = static_cast<int>(Xnext.size());
-    int error;
+    assert(mDown.size() == Xnext.size());
+    assert(mUp.size() == Xnext.size());
 
-    assert(objPointer != NULL);
-    error = send_to_nlopt_optimize(&Xnext(0), n, objPointer);
-
-    return error;
-  } // run (uBlas)
-
-
-
-  int NLOPT_Optimization::send_to_nlopt_optimize(double* x, int n, void* objPointer)
-  {
-    double u[128], l[128];
-    double fmin = 1;
-    int maxf = maxEvals*n;    
-    int ierror;
-
-    for (int i = 0; i < n; ++i) 
-      {
-	l[i] = mDown;	
-	u[i] = mUp;
-      
-	if (x[i] < l[i] || x[i] > u[i])
-	  {
-	    x[i]=(l[i]+u[i])/2.0;  
-	    //nlopt requires x to have a valid initial value even for algorithms that do
-	    //not need it
-	  }
-      }
-    
     nlopt_opt opt;
     double (*fpointer)(unsigned int, const double *, double *, void *);
-    double coef;  //Percentaje of resources used in local optimization
+    void *objPointer;
 
-    /* algorithm and dims */
-    if (alg == LBFGS)                                     //Require gradient
-      fpointer = &(NLOPT_WPR::evaluate_nlopt_grad);
-    else                                           //Do not require gradient
-      fpointer = &(NLOPT_WPR::evaluate_nlopt);
-
-    if (alg == COMBINED)  
-      coef = 0.8;
-    else
-      coef = 1.0;
+    int n = static_cast<int>(Xnext.size());
+    double fmin = 1;
+    int maxf1 = maxEvals*n;
+    int maxf2 = 0;    // For a second pass
+    double coef_local = 0.2;
+    int ierror;
 
     switch(alg)
       {
-      case DIRECT:      /* same as combined */
-      case COMBINED: 	opt = nlopt_create(NLOPT_GN_DIRECT_L, n); break;
-      case BOBYQA: 	opt = nlopt_create(NLOPT_LN_BOBYQA, n); break;
-      case LBFGS:       opt = nlopt_create(NLOPT_LD_LBFGS, n); break;
-      default: FILE_LOG(logERROR) << "Algorithm not supported"; return -1;
+      case DIRECT: // Pure global. No gradient
+	opt = nlopt_create(NLOPT_GN_DIRECT_L, n); 
+	fpointer = &(NLOPT_WPR::evaluate_nlopt);
+	objPointer = static_cast<void *>(rbobj);
+	break;
+      case COMBINED: // Combined local-global (80% DIRECT -> 20% BOBYQA). No gradient
+	opt = nlopt_create(NLOPT_GN_DIRECT_L, n); 
+	maxf2 = static_cast<int>(static_cast<double>(maxf1)*coef_local);
+	maxf1 -= maxf2;  // That way, the number of evaluations is the same in all methods.
+	fpointer = &(NLOPT_WPR::evaluate_nlopt);
+	objPointer = static_cast<void *>(rbobj);
+	break;
+      case BOBYQA:  // Pure local. No gradient
+	opt = nlopt_create(NLOPT_LN_BOBYQA, n); 
+	fpointer = &(NLOPT_WPR::evaluate_nlopt);
+	objPointer = static_cast<void *>(rbobj);
+	break;
+      case LBFGS:  // Pure local. Gradient based
+	opt = nlopt_create(NLOPT_LD_LBFGS, n); 	
+	fpointer = &(NLOPT_WPR::evaluate_nlopt_grad);
+	objPointer = static_cast<void *>(rgbobj);
+	break;
+      default: 
+	FILE_LOG(logERROR) << "Algorithm not supported"; 
+	return -1;
       }
 
-    nlopt_set_lower_bounds(opt, l);
-    nlopt_set_upper_bounds(opt, u);
+    if (objPointer == NULL)
+      {
+	FILE_LOG(logERROR) << "Wrong object model (gradient/no gradient)"; 
+	return -1;
+      }
+
+    nlopt_set_lower_bounds(opt, &mDown(0));
+    nlopt_set_upper_bounds(opt, &mUp(0));
     nlopt_set_min_objective(opt, fpointer, objPointer);
-    int nfeval = static_cast<int>(static_cast<double>(maxf)*coef);
-    nlopt_set_maxeval(opt, nfeval) ;
+    nlopt_set_maxeval(opt, maxf1) ;
 
-
-    nlopt_result errortype = nlopt_optimize(opt, x, &fmin);
+    nlopt_result errortype = nlopt_optimize(opt, &Xnext(0), &fmin);
     checkNLOPTerror(errortype);
 
-    // Local refinement
-    if ((alg == COMBINED) && (coef < 1)) 
+    if (maxf2)
       {
 	nlopt_destroy(opt);  // Destroy previous one
-	opt = nlopt_create(NLOPT_LN_SBPLX, n); /* algorithm and dims */
-	nlopt_set_lower_bounds(opt, l);
-	nlopt_set_upper_bounds(opt, u);
+	opt = nlopt_create(NLOPT_LN_BOBYQA, n); /* algorithm and dims */
+	nlopt_set_lower_bounds(opt, &mDown(0));
+	nlopt_set_upper_bounds(opt, &mUp(0));
 	nlopt_set_min_objective(opt, fpointer, objPointer);
-	nlopt_set_maxeval(opt, maxf-nfeval);
+	nlopt_set_maxeval(opt, maxf2) ;
 	
-	errortype = nlopt_optimize(opt, x, &fmin);
+	errortype = nlopt_optimize(opt, &Xnext(0), &fmin);
 	checkNLOPTerror(errortype);
       }
       
     nlopt_destroy(opt);  // Destroy opt
-    
+
     ierror = static_cast<int>(errortype);
     return ierror;
+  } // innerOptimize (uBlas)
 
-  } // send_to_nlopt_optimize (C array)
+
+  // {   
+  //   void *objPointer = static_cast<void *>(rbobj);
+  //   int n = static_cast<int>(Xnext.size());
+  //   int error;
+
+  //   assert(objPointer != NULL);
+  //   error = send_to_nlopt_optimize(&Xnext(0), n, objPointer);
+
+  //   return error;
+  // } // run (uBlas)
+
+
+
+  // int NLOPT_Optimization::send_to_nlopt_optimize(double* x, int n, void* objPointer)
+  // {
+  //   double u[128], l[128];
+  //   double fmin = 1;
+  //   int maxf = maxEvals*n;    
+  //   int ierror;
+
+  //   for (int i = 0; i < n; ++i) 
+  //     {
+  // 	l[i] = mDown;	
+  // 	u[i] = mUp;
+      
+  // 	if (x[i] < l[i] || x[i] > u[i])
+  // 	  {
+  // 	    x[i]=(l[i]+u[i])/2.0;  
+  // 	    //nlopt requires x to have a valid initial value even for algorithms that do
+  // 	    //not need it
+  // 	  }
+  //     }
+    
+  //   nlopt_opt opt;
+  //   double (*fpointer)(unsigned int, const double *, double *, void *);
+  //   double coef;  //Percentaje of resources used in local optimization
+
+  //   /* algorithm and dims */
+  //   if (alg == LBFGS)                                     //Require gradient
+  //     fpointer = &(NLOPT_WPR::evaluate_nlopt_grad);
+  //   else                                           //Do not require gradient
+  //     fpointer = &(NLOPT_WPR::evaluate_nlopt);
+
+  //   if (alg == COMBINED)  
+  //     coef = 0.8;
+  //   else
+  //     coef = 1.0;
+
+  //   switch(alg)
+  //     {
+  //     case DIRECT:      /* same as combined */
+  //     case COMBINED: 	opt = nlopt_create(NLOPT_GN_DIRECT_L, n); break;
+  //     case BOBYQA: 	opt = nlopt_create(NLOPT_LN_BOBYQA, n); break;
+  //     case LBFGS:       opt = nlopt_create(NLOPT_LD_LBFGS, n); break;
+  //     default: FILE_LOG(logERROR) << "Algorithm not supported"; return -1;
+  //     }
+
+  //   nlopt_set_lower_bounds(opt, l);
+  //   nlopt_set_upper_bounds(opt, u);
+  //   nlopt_set_min_objective(opt, fpointer, objPointer);
+  //   int nfeval = static_cast<int>(static_cast<double>(maxf)*coef);
+  //   nlopt_set_maxeval(opt, nfeval) ;
+
+
+  //   nlopt_result errortype = nlopt_optimize(opt, x, &fmin);
+  //   checkNLOPTerror(errortype);
+
+  //   // Local refinement
+  //   if ((alg == COMBINED) && (coef < 1)) 
+  //     {
+  // 	nlopt_destroy(opt);  // Destroy previous one
+  // 	opt = nlopt_create(NLOPT_LN_SBPLX, n); /* algorithm and dims */
+  // 	nlopt_set_lower_bounds(opt, l);
+  // 	nlopt_set_upper_bounds(opt, u);
+  // 	nlopt_set_min_objective(opt, fpointer, objPointer);
+  // 	nlopt_set_maxeval(opt, maxf-nfeval);
+	
+  // 	errortype = nlopt_optimize(opt, x, &fmin);
+  // 	checkNLOPTerror(errortype);
+  //     }
+      
+  //   nlopt_destroy(opt);  // Destroy opt
+    
+  //   ierror = static_cast<int>(errortype);
+  //   return ierror;
+
+  // } // send_to_nlopt_optimize (C array)
 
 
 }// namespace bayesopt

src/student_t_process_jef.cpp

 -----------------------------------------------------------------------------
 */
 
-#include <ctime>
 #include "cholesky.hpp"
 #include "trace_ublas.hpp"
 #include "student_t_process_jef.hpp"
 						   bopt_params params):
     HierarchicalGaussianProcess(dim, params)
   {
-    timer = 0;
     mSigma = params.sigma_s;
     d_ = new StudentTDistribution();
   }  // Constructor
 
   StudentTProcessJeffreys::~StudentTProcessJeffreys()
   {
-    std::cout << "Time in pred:" << (double)(timer) / (double)CLOCKS_PER_SEC << std::endl;
     delete d_;
   } // Default destructor
 
 				   + inner_prod(rho,rho)));
 
     d_->setMeanAndStd(yPred,sPred);
-    timer += clock() - start;
     return d_;
   }
 

tests/testnlopt.cpp

  * Unit testing for the NLOPT wrappers
  * 
  */
+
+#include <iostream>
+#include <vector>
+#include "FastDelegate.h"
+
+class FooInterface
+{
+public:
+  FooInterface(){i=0.0;}
+  virtual ~FooInterface(){};
+  virtual double call(const double& step)
+  { 
+    i+=step; 
+    //    std::cout << "Foo Interface: " << i << std::endl;
+    return i;
+  }
+protected:
+  double i;
+};
+
+class FooDerivate: public FooInterface
+{
+public:
+  FooDerivate(){j=5.0;}
+  virtual ~FooDerivate(){};
+  double call(const double& step)
+  {
+    j=step/j;
+    //    std::cout << "Foo Derivate: " << j << std::endl;
+    return j;
+  }
+private:
+  double j;  
+};
+
+
+class Optimizer
+{
+public:
+  Optimizer(){iter = 10e6;}
+  virtual ~Optimizer(){};
+  void setCallback(FooInterface* foo)
+  {foo_ = static_cast<void*>(foo);}
+
+  static double step(void* ptr, const double& query)
+  {
+    FooInterface* p = static_cast<FooInterface*>(ptr);
+    return p->call(query);
+  }
+  void run()
+  {
+    std::vector<double> res;
+    for(size_t i=10;i<iter;++i)
+      {
+	res.push_back(step(foo_,static_cast<double>(i)));
+      }
+  }
+private:
+  size_t iter;
+  void* foo_;
+};
+
+typedef fastdelegate::FastDelegate1<const double&, double> myfunc;
+
+class OptimizerDelegate
+{
+public:
+  OptimizerDelegate(){iter = 10e6;}
+  virtual ~OptimizerDelegate(){};
+  void setCallback(FooInterface* foo)
+  {f_.bind(foo, &FooInterface::call);}
+  void run()
+  {
+    std::vector<double> res;
+    for(size_t i=10;i<iter;++i)
+      {
+	res.push_back(f_(static_cast<double>(i)));
+      }
+  }
+
+private:
+  size_t iter;
+  myfunc f_;
+};
+
+
+//////////////////////////////////////////////////////////////////////
+
+class OptimizerBase
+{
+public:
+  OptimizerBase(){iter = 10e6;}
+  virtual ~OptimizerBase(){};
+  virtual double call(const double& step) = 0;
+  void run()
+  {
+    std::vector<double> res;
+    for(size_t i=10;i<iter;++i)
+      {
+	res.push_back(call(static_cast<double>(i)));
+      }
+  }
+private:
+  size_t iter;
+};
+
+
+
+class BarInterface: public OptimizerBase
+{
+public:
+  BarInterface(){i=0.0;}
+  virtual ~BarInterface(){};
+  virtual double call(const double& step)
+  { 
+    i+=step; 
+    //    std::cout << "Bar Interface: " << i << std::endl;
+    return i;
+  }
+protected:
+  double i;
+};
+
+class BarDerivate: public BarInterface
+{
+public:
+  BarDerivate(){j=5.0;}
+  virtual ~BarDerivate(){};
+  double call(const double& step)
+  {
+    j=step/j;
+    // std::cout << "Bar Derivate: " << j << std::endl;
+    return j;
+  }
+private:
+  double j;  
+};
+
+
+int main( int argc, const char* argv[] )
+{
+  FooInterface* ptr1 = new FooDerivate();
+  BarInterface* ptr2 = new BarDerivate();
+  FooInterface* ptr3 = new FooDerivate();
+
+  Optimizer op1;
+  op1.setCallback(ptr1);
+
+  Optimizer op2;
+  op2.setCallback(ptr3);
+
+  clock_t start = clock();
+  op1.run();
+  double dif1 = (double)(clock()-start) / (double)CLOCKS_PER_SEC;
+
+  start = clock();
+  ptr2->run();
+  double dif2 = (double)(clock()-start) / (double)CLOCKS_PER_SEC;
+
+  start = clock();
+  op2.run();
+  double dif3 = (double)(clock()-start) / (double)CLOCKS_PER_SEC;
+
+  std::cout << dif1 << "|||" << dif2 << "|||" << dif3 << std::endl;
+  return 0;
+}
+

wrappers/nloptwpr.cpp

 {
 
   namespace ublas = boost::numeric::ublas;
-  using bayesopt::RBOptimizable;
-  using bayesopt::RGBOptimizable;
+  using bayesopt::RBOptimizableWrapper;
+  using bayesopt::RGBOptimizableWrapper;
 
   double evaluate_nlopt (unsigned int n, const double *x,
 			 double *grad, void *my_func_data)
 
     // This is not very clever... but works!
     void *objPointer = my_func_data;
-    RBOptimizable* OPTIMIZER = static_cast<RBOptimizable*>(objPointer);
+    RBOptimizableWrapper* OPTIMIZER = static_cast<RBOptimizableWrapper*>(objPointer);
     
     return OPTIMIZER->evaluate(vx);
   } /* evaluate_criteria_nlopt */
     
     // This is not very clever... but works!
     void *objPointer = my_func_data;
-    RGBOptimizable* OPTIMIZER = static_cast<RGBOptimizable*>(objPointer);
+    RGBOptimizableWrapper* OPTIMIZER = static_cast<RGBOptimizableWrapper*>(objPointer);
     
 
     ublas::vector<double> vgrad = ublas::zero_vector<double>(n);