Commits

Ruben Martinez-Cantin committed d71b8ad

Cleaning virtual getSigma

  • Participants
  • Parent commits 693abb9

Comments (0)

Files changed (11)

include/gaussian_process.hpp

      */	
     ProbabilityDistribution* prediction(const vectord &query);
 
-    double getSignalVariance() { return mSigma; };
-
   private:
 
     /** 
     int precomputePrediction();
 
   private:
-    double mSigma;                ///< Signal variance
     vectord mAlphaV;              ///< Precomputed L\y
     GaussianDistribution* d_;     ///< Pointer to distribution function
   };

include/gaussian_process_ml.hpp

      */	
     ProbabilityDistribution* prediction(const vectord &query);
 
-    double getSignalVariance() { return mSigma; };
-
   private:
 
     /** 
     int precomputePrediction();
 
   private:
-    double mSigma;                ///< Signal variance
     vectord mWML;           //!< GP ML parameters
     
     /// Precomputed GP prediction operations

include/gaussian_process_normal.hpp

      */	
     ProbabilityDistribution* prediction(const vectord &query);
 
-    double getSignalVariance() { return mSigma; };
-
   private:
 
     /** 
 
   private:
     vectord mWMap;                      //!< GP posterior parameters
-    double mSigma;
     vectord mW0;
     vectord mInvVarW;
     //! Precomputed GP prediction operations

include/nonparametricprocess.hpp

     inline vectord getPointAtMinimum() { return mGPXX[mMinIndex]; };
     inline double getValueAtMinimum() { return mGPY(mMinIndex); };
     inline size_t getNSamples() { return mGPY.size(); };
-    
-    virtual double getSignalVariance() = 0;
-  
+    inline double getSignalVariance() { return mSigma; };
 
     /** Sets the kind of learning methodology for kernel hyperparameters */
     inline void setLearnType(learning_type l_type) { mLearnType = l_type; };
     vectord mMu;                 ///< Mean of the parameters of the mean function
     vectord mS_Mu;    ///< Variance of the params of the mean function W=mS_Mu*I
 
+    double mSigma;                                   //!< GP posterior parameters
     matrixd mL;             ///< Cholesky decomposition of the Correlation matrix
     size_t dim_;
     learning_type mLearnType;

include/student_t_process_jef.hpp

      */	
     ProbabilityDistribution* prediction(const vectord &query);
 
-    double getSignalVariance() { return mSigma; };
-
   private:
 
     /** 
     int precomputePrediction();
 
   private:
-    double mSigma;          ///< Signal variance
     vectord mWML;           //!< GP ML parameters
     
     /// Precomputed GP prediction operations

include/student_t_process_nig.hpp

      */	
     ProbabilityDistribution* prediction(const vectord &query);
 
-    double getSignalVariance() { return mSigmaMap; };
-
   private:
 
     /** 
 
   private:
     vectord mWMap;                      //!< GP posterior parameters
-    double mSigmaMap;                   //!< GP posterior parameters
     double mAlpha, mBeta;
     vectord mW0;
     vectord mInvVarW;

src/gaussian_process.cpp

   namespace ublas = boost::numeric::ublas;
 
   GaussianProcess::GaussianProcess(size_t dim, bopt_params params):
-    EmpiricalBayesProcess(dim, params), mSigma(params.sigma_s)
+    EmpiricalBayesProcess(dim, params)
   {
+    mSigma = params.sigma_s;
     d_ = new GaussianDistribution();
   }  // Constructor
 

src/gaussian_process_ml.cpp

   namespace ublas = boost::numeric::ublas;
 
   GaussianProcessML::GaussianProcessML(size_t dim, bopt_params params):
-    HierarchicalGaussianProcess(dim, params), mSigma(params.sigma_s)
+    HierarchicalGaussianProcess(dim, params)
   {
+    mSigma = params.sigma_s;
     d_ = new GaussianDistribution();
   }  // Constructor
 

src/gaussian_process_normal.cpp

   GaussianProcessNormal::GaussianProcessNormal(size_t dim, 
 					 bopt_params params):
     HierarchicalGaussianProcess(dim,params),
-    mSigma(params.sigma_s), 
     mW0(params.mean.n_coef), mInvVarW(params.mean.n_coef), 
     mD(params.mean.n_coef,params.mean.n_coef)
   {  
+    mSigma = params.sigma_s;
     mW0 = utils::array2vector(params.mean.coef_mean,params.mean.n_coef);
     for (size_t ii = 0; ii < params.mean.n_coef; ++ii)
       {

src/student_t_process_jef.cpp

 
   StudentTProcessJeffreys::StudentTProcessJeffreys(size_t dim, 
 						   bopt_params params):
-    HierarchicalGaussianProcess(dim, params), mSigma(params.sigma_s)
+    HierarchicalGaussianProcess(dim, params)
   {
+    mSigma = params.sigma_s;
     d_ = new StudentTDistribution();
   }  // Constructor
 

src/student_t_process_nig.cpp

     inplace_solve(mD,rho,ublas::lower_tag());
     
     double yPred = inner_prod(phi,mWMap) + inner_prod(v,mVf);
-    double sPred = sqrt( mSigmaMap * (kq - inner_prod(v,v) 
+    double sPred = sqrt( mSigma * (kq - inner_prod(v,v) 
 				   + inner_prod(rho,rho)));
 
     if ((boost::math::isnan(yPred)) || (boost::math::isnan(sPred)))
     matrixd BB(n,n);
     utils::cholesky_decompose(KK,BB);
     inplace_solve(BB,v0,ublas::lower_tag());
-    mSigmaMap = (mBeta/mAlpha + inner_prod(v0,v0))/(n+2*mAlpha);
+    mSigma = (mBeta/mAlpha + inner_prod(v0,v0))/(n+2*mAlpha);
     
     int dof = static_cast<int>(n+2*mAlpha);
     
-    if ((boost::math::isnan(mWMap(0))) || (boost::math::isnan(mSigmaMap)))
+    if ((boost::math::isnan(mWMap(0))) || (boost::math::isnan(mSigma)))
       {
 	FILE_LOG(logERROR) << "Error in precomputed prediction. NaN found.";
 	return -1;