22 #include <shogun/lib/external/brent.h>
26 using namespace Eigen;
31 #ifndef DOXYGEN_SHOULD_SKIP_THIS
34 class CPsiLine :
public func_base
49 virtual double operator() (
double x)
55 (*alpha)=start_alpha+x*dalpha;
56 eigen_f=K*(*alpha)*
CMath::sq(scale)+eigen_m;
65 float64_t result = (*alpha).dot(eigen_f-eigen_m)/2.0-
86 void CSingleLaplacianInferenceMethod::init()
127 if (eigen_W.minCoeff()<0)
135 result=(eigen_alpha.dot(eigen_mu-eigen_mean))/2.0-
136 lp+log(lu.determinant())/2.0;
140 result=eigen_alpha.dot(eigen_mu-eigen_mean)/2.0-lp+
141 eigen_L.diagonal().array().log().sum();
157 MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
178 if (eigen_W.minCoeff() < 0)
181 VectorXd eigen_iW = (VectorXd::Ones(
m_W.
vlen)).cwiseQuotient(eigen_W);
183 FullPivLU<MatrixXd> lu(
195 (eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_ktrtr*
CMath::sq(
m_scale))+
198 eigen_L = L.matrixU();
246 Psi_New=eigen_alpha.dot(eigen_mu-eigen_mean)/2.0-
252 if (Psi_Def < Psi_New)
280 if (eigen_W.minCoeff() < 0)
296 eigen_W+=(2.0/df)*eigen_dlp.cwiseProduct(eigen_dlp);
300 eigen_sW=eigen_W.cwiseSqrt();
302 LLT<MatrixXd> L((eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_ktrtr*
CMath::sq(
m_scale))+
305 VectorXd b=eigen_W.cwiseProduct(eigen_mu - eigen_mean)+eigen_dlp;
307 VectorXd dalpha=b-eigen_sW.cwiseProduct(
308 L.solve(eigen_sW.cwiseProduct(eigen_ktrtr*b*
CMath::sq(
m_scale))))-eigen_alpha;
316 func.start_alpha=eigen_alpha;
317 func.alpha=&eigen_alpha;
344 if (eigen_W.minCoeff()>0)
345 eigen_sW=eigen_W.cwiseSqrt();
369 if (eigen_W.minCoeff()<0)
384 eigen_Z=eigen_L.triangularView<Upper>().adjoint().solve(
386 eigen_Z=eigen_L.triangularView<Upper>().solve(eigen_Z);
387 eigen_Z=eigen_sW.asDiagonal()*eigen_Z;
390 MatrixXd C=eigen_L.triangularView<Upper>().adjoint().solve(
395 (C.cwiseProduct(C)).colwise().sum().adjoint())/2.0;
403 eigen_dfhat=eigen_g.cwiseProduct(eigen_d3lp);
409 REQUIRE(!strcmp(param->
m_name,
"scale"),
"Can't compute derivative of "
410 "the nagative log marginal likelihood wrt %s.%s parameter\n",
426 result[0]=(eigen_Z.cwiseProduct(dK)).
sum()/2.0-
427 (eigen_alpha.adjoint()*dK).
dot(eigen_alpha)/2.0;
430 VectorXd b=dK*eigen_dlp;
433 result[0]=result[0]-eigen_dfhat.dot(b-eigen_K*
CMath::sq(
m_scale)*(eigen_Z*b));
463 VectorXd b=eigen_K*eigen_dlp_dhyp;
466 result[0]=-eigen_g.dot(eigen_d2lp_dhyp)-eigen_lp_dhyp.sum()-
488 "Length of the parameter %s should not be NULL\n", param->
m_name)
508 result[i]=(eigen_Z.cwiseProduct(eigen_dK)).
sum()/2.0-
509 (eigen_alpha.adjoint()*eigen_dK).
dot(eigen_alpha)/2.0;
512 VectorXd b=eigen_dK*eigen_dlp;
537 "Length of the parameter %s should not be NULL\n", param->
m_name)
558 result[i]=-eigen_alpha.dot(eigen_dmu)-eigen_dfhat.dot(eigen_dmu-
virtual SGVector< float64_t > get_log_probability_f(const CLabels *lab, SGVector< float64_t > func) const =0
SGVector< float64_t > m_dlp
virtual void update_chol()
SGVector< float64_t > m_alpha
Vector::Scalar dot(Vector a, Vector b)
The class Labels models labels, i.e. class assignments of objects.
static const float64_t INFTY
infinity
virtual void update_alpha()
virtual SGVector< float64_t > get_second_derivative(const CLabels *lab, SGVector< float64_t > func, const TParameter *param) const
virtual int32_t get_num_labels() const =0
virtual void update_approx_cov()
virtual float64_t get_negative_log_marginal_likelihood()
virtual ELikelihoodModelType get_model_type() const
The Laplace approximation inference method base class.
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
An abstract class of the mean function.
void scale(T alpha)
Scale vector inplace.
virtual SGVector< float64_t > get_derivative_wrt_mean(const TParameter *param)
SGMatrix< float64_t > m_L
virtual SGVector< float64_t > get_derivative_wrt_inference_method(const TParameter *param)
virtual SGVector< float64_t > get_derivative_wrt_likelihood_model(const TParameter *param)
SGVector< float64_t > m_sW
SGVector< float64_t > m_d2lp
Matrix::Scalar sum(Matrix m, bool no_diag=false)
SGVector< float64_t > m_mu
static T sum(T *vec, int32_t len)
Return sum(vec)
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
virtual void update_deriv()
SGVector< float64_t > m_dfhat
SGVector< float64_t > m_g
Class that models a Student's-t likelihood.
virtual SGVector< float64_t > get_parameter_derivative(const CFeatures *features, const TParameter *param, index_t index=-1)
all of classes and functions are contained in the shogun namespace
virtual ~CSingleLaplacianInferenceMethod()
The class Features is the base class of all feature objects.
float64_t m_opt_tolerance
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
virtual SGVector< float64_t > get_diagonal_vector()
virtual SGVector< float64_t > get_derivative_wrt_kernel(const TParameter *param)
CSingleLaplacianInferenceMethod()
SGVector< T > clone() const
float64_t get_degrees_freedom()
virtual SGVector< float64_t > get_log_probability_derivative_f(const CLabels *lab, SGVector< float64_t > func, index_t i) const =0
static void inverse(SGMatrix< float64_t > matrix)
inverses square matrix in-place
virtual SGVector< float64_t > get_third_derivative(const CLabels *lab, SGVector< float64_t > func, const TParameter *param) const
SGVector< float64_t > m_W
static CStudentsTLikelihood * obtain_from_generic(CLikelihoodModel *likelihood)
SGMatrix< float64_t > m_Sigma
virtual SGVector< float64_t > get_first_derivative(const CLabels *lab, SGVector< float64_t > func, const TParameter *param) const
SGVector< float64_t > m_d3lp
SGMatrix< float64_t > m_Z
virtual bool parameter_hash_changed()
The Likelihood model base class.
SGMatrix< float64_t > m_ktrtr
CLikelihoodModel * m_model
virtual const char * get_name() const