1. Ruben Martinez-Cantin
  2. BayesOpt

Commits

Ruben Martinez-Cantin  committed 3c68ced

Refactoring code. InnerOptimization parameterized. Check for bottlenecks

  • Participants
  • Parent commits 83f7745
  • Branches default

Comments (0)

Files changed (5)

File include/inneroptimization.hpp

View file
     void setAlgorithm(innerOptAlgorithms newAlg)
     { alg = newAlg; }
 
+    /** Sets the optimization algorithm  */
+    void setMaxEvals(size_t meval)
+    { maxEvals = meval; }
+
     /** 
      * Limits of the hypercube. 
      * Currently, it assumes that all dimensions have the same limits.
 
     innerOptAlgorithms alg;
     double mDown, mUp;
+    size_t maxEvals;
   };
 
 }//namespace bayesopt

File src/bayesoptcont.cpp

View file
   { 
     cOptimizer = new OptimizeCriteria(this);
     cOptimizer->setAlgorithm(DIRECT);
+    cOptimizer->setMaxEvals(parameters.n_inner_iterations);
   } // Constructor
 
   ContinuousModel::~ContinuousModel()

File src/inneroptimization.cpp

View file
 
   InnerOptimization::InnerOptimization()
   { 
-    alg = DIRECT;    mDown = 0.;    mUp = 1.;
+    alg = DIRECT;    mDown = 0.;    mUp = 1.;    maxEvals = MAX_INNER_EVALUATIONS;
   };
 
 
   {
     double u[128], l[128];
     double fmin = 1;
-    int maxf = MAX_INNER_EVALUATIONS*n;    
+    int maxf = maxEvals*n;    
     int ierror;
 
-    for (int i = 0; i < n; ++i) {
-      l[i] = mDown;	u[i] = mUp;
-      // What if x is undefined?
-      if (x[i] < l[i] || x[i] > u[i])
-	x[i]=(l[i]+u[i])/2.0;
-    }
-
+    for (int i = 0; i < n; ++i) 
+      {
+	l[i] = mDown;	
+	u[i] = mUp;
+      
+	if (x[i] < l[i] || x[i] > u[i])
+	  {
+	    x[i]=(l[i]+u[i])/2.0;  
+	    //nlopt requires x to have a valid initial value even for algorithms that do
+	    //not need it
+	  }
+      }
+    
     nlopt_opt opt;
     double (*fpointer)(unsigned int, const double *, double *, void *);
-    double coef = 0.8;  //Percentaje of resources used in local optimization
+    double coef;  //Percentaje of resources used in local optimization
 
     /* algorithm and dims */
     if (alg == LBFGS)                                     //Require gradient
     else                                           //Do not require gradient
       fpointer = &(NLOPT_WPR::evaluate_nlopt);
 
-    if (alg == COMBINED)  coef = 0.8;
+    if (alg == COMBINED)  
+      coef = 0.8;
+    else
+      coef = 1.0;
 
     switch(alg)
       {

File src/nonparametricprocess.cpp

View file
   {
     assert( mGPXX[1].size() == Xnew.size() );
     addSample(Xnew,Ynew);
+    FILE_LOG(logDEBUG) << "Retraining model parameters";
     return fitSurrogateModel();
   } // fitSurrogateModel
 

File src/student_t_process_nig.cpp

View file
     matrixd KK = computeCorrMatrix();
     matrixd WW = zmatrixd(p,p);  //TODO: diagonal matrix
     utils::addToDiagonal(WW,mInvVarW);
-    matrixd FW = prod(trans(mFeatM),WW);
+    const matrixd FW = prod(trans(mFeatM),WW);
     KK += prod(FW,mFeatM);
     matrixd BB(n,n);
     utils::cholesky_decompose(KK,BB);