IMP  2.0.1
The Integrative Modeling Platform
MultivariateFNormalSufficient.h
Go to the documentation of this file.
1 /**
2  * \file IMP/isd/MultivariateFNormalSufficient.h
3  * \brief Normal distribution of Function
4  *
5  * Copyright 2007-2013 IMP Inventors. All rights reserved.
6  */
7 
8 #ifndef IMPISD_MULTIVARIATE_FNORMAL_SUFFICIENT_H
9 #define IMPISD_MULTIVARIATE_FNORMAL_SUFFICIENT_H
10 
11 #include <IMP/isd/isd_config.h>
12 #include "internal/timer.h"
13 #include <IMP/macros.h>
14 #include <IMP/Model.h>
15 #include <IMP/constants.h>
16 #include <IMP/base/Object.h>
17 #include <math.h>
18 #include <Eigen/Dense>
19 #include <Eigen/Cholesky>
20 #include <IMP/isd/internal/cg_eigen.h>
21 
22 IMPISD_BEGIN_NAMESPACE
23 using Eigen::MatrixXd;
24 using Eigen::VectorXd;
25 
26 //IMP_MVN_TIMER_NFUNCS is the number of functions used by the timer
27 #define IMP_MVN_TIMER_NFUNCS 11
28 
29 //! MultivariateFNormalSufficient
30 /** Probability density function and -log(p) of multivariate normal
31  * distribution of N M-variate observations.
32  *
33  * \f[ p(x_1,\cdots,x_N|\mu,F,\Sigma) =
34  * \left((2\pi\sigma^2)^M|\Sigma|\right)^{-N/2} J(F)
35  * \exp\left(-\frac{1}{2\sigma^2}
36  * \sum_{i=1}^N {}^t(F(\mu) - F(x_i))\Sigma^{-1}(F(\mu)-F(x_i))
37  * \right)
38  * \f]
39  * which is implemented as
40  * \f[ p(x_1,\cdots,x_N|\mu,F,\Sigma) = ((2\pi\sigma^2)^M|\Sigma|)^{-N/2} J(F)
41  * \exp\left(-\frac{N}{2\sigma^2} {}^t\epsilon \Sigma^{-1} \epsilon\right)
42  * \exp\left(-\frac{1}{2\sigma^2} \operatorname{tr}(W\Sigma^{-1})\right)
43  * \f]
44  * where
45  * \f[\epsilon = (F(\mu)- \overline{F(x)}) \quad
46  * \overline{F(x)} = \frac{1}{N} \sum_{i=1}^N F(x_i)\f]
47  * and
48  * \f[W=\sum_{i=1}^N(F(x_i)-\overline{F(x)}){}^t(F(x_i)-\overline{F(x)}) \f]
49  *
50  * \f$\sigma\f$ is a multiplicative scaling factor that factors out of the
51  * \f$\Sigma\f$ covariance matrix. It is set to 1 by default and its intent is
52  * to avoid inverting the $\Sigma$ matrix unless necessary.
53  *
54  * Set J(F) to 1 if you want the multivariate normal distribution.
55  * The distribution is normalized with respect to the matrix variable X.
56  * The Sufficient statistics are calculated at initialization.
57  *
58  * Example: if F is the log function, the multivariate F-normal distribution
59  * is the multivariate lognormal distribution with mean \f$\mu\f$ and
60  * standard deviation \f$\Sigma\f$.
61  *
62  * \note This is an implementation of the matrix normal distribution for F(X),
63  * where rows of F(X) are independent and homoscedastic (they represent
64  * repetitions of the same experiment), but columns might be correlated,
65  * though the provided matrix.
66  *
67  * \note For now, F must be monotonically increasing, so that J(F) > 0. The
68  * program will not check for that. Uses a Cholesky (\f$LDL^T\f$)
69  * decomposition of \f$\Sigma\f$, which is recomputed when needed.
70  *
71  * \note All observations must be given, so if you have missing data you might
72  * want to do some imputation on it first.
73  *
74  * \note References:
75  * - Multivariate Likelihood:
76  * Box and Tiao, "Bayesian Inference in Statistical Analysis",
77  * Addison-Wesley publishing company, 1973, pp 423.
78  * - Factorization in terms of sufficient statistics:
79  * Daniel Fink, "A Compendium of Conjugate Priors", online, May 1997, p.40-41.
80  * - Matrix calculations for derivatives:
81  * Petersen and Pedersen, "The Matrix Cookbook", 2008, matrixcookbook.com
82  * - Useful reading on missing data (for the case of varying Nobs):
83  * Little and Rubin, "Statistical Analysis with Missing Data", 2nd ed, Wiley,
84  * 2002, Chapters 6,7 and 11.
85  *
86  */
87 
88 class IMPISDEXPORT MultivariateFNormalSufficient : public base::Object
89 {
90 
91 private:
92 
93  VectorXd FM_, Fbar_, epsilon_,Peps_;
94  double JF_,lJF_,norm_,lnorm_;
95  MatrixXd P_,W_,Sigma_,FX_,PW_;
96  int N_; //number of repetitions
97  int M_; //number of variables
98  //Eigen::LLT<MatrixXd, Eigen::Upper> ldlt_;
99  Eigen::LDLT<MatrixXd, Eigen::Upper> ldlt_;
100  //flags are true if the corresponding object is up to date.
101  bool flag_FM_, flag_FX_, flag_Fbar_,
102  flag_W_, flag_Sigma_, flag_epsilon_,
103  flag_PW_, flag_P_, flag_ldlt_, flag_norms_,
104  flag_Peps_;
105  //cg-related variables
106  MatrixXd precond_;
107  bool use_cg_, first_PW_, first_PWP_;
108  double cg_tol_;
109  double factor_;
110  IMP::Pointer<internal::ConjugateGradientEigen> cg_;
111 
112  internal::CallTimer<IMP_MVN_TIMER_NFUNCS> timer_;
113 
114  public:
115  /** Initialize with all observed data
116  * \param [in] FX F(X) matrix of observations with M columns and N rows.
117  * \param [in] JF J(F) determinant of Jacobian of F with respect to
118  * observation matrix X.
119  * \param [in] FM F(M) mean vector \f$F(\mu)\f$ of size M.
120  * \param [in] Sigma : MxM variance-covariance matrix \f$\Sigma\f$.
121  * \param [in] factor : multiplicative factor (default 1)
122  * */
123  MultivariateFNormalSufficient(const MatrixXd& FX, double JF,
124  const VectorXd& FM, const MatrixXd& Sigma, double factor=1);
125 
126  /** Initialize with sufficient statistics
127  * \param [in] Fbar : M-dimensional vector of mean observations.
128  * \param [in] JF J(F) determinant of Jacobian of F with respect to observation
129  * matrix X.
130  * \param [in] FM F(M) : M-dimensional true mean vector \f$\mu\f$.
131  * \param [in] Nobs : number of observations for each variable.
132  * \param [in] W : MxM matrix of sample variance-covariances.
133  * \param [in] Sigma : MxM variance-covariance matrix Sigma.
134  * \param [in] factor : multiplicative factor (default 1)
135  * */
136  MultivariateFNormalSufficient(const VectorXd& Fbar, double JF,
137  const VectorXd& FM, int Nobs, const MatrixXd& W,
138  const MatrixXd& Sigma, double factor=1);
139 
140  /* probability density function */
141  double density() const;
142 
143  /* energy (score) functions, aka -log(p) */
144  double evaluate() const;
145 
146  /* gradient of the energy wrt the mean F(M) */
147  VectorXd evaluate_derivative_FM() const;
148 
149  /* gradient of the energy wrt the variance-covariance matrix Sigma */
150  MatrixXd evaluate_derivative_Sigma() const;
151 
152  // derivative wrt scalar factor
153  double evaluate_derivative_factor() const;
154 
155  /* second derivative wrt FM and FM */
156  MatrixXd evaluate_second_derivative_FM_FM() const;
157 
158  /* second derivative wrt FM(l) and Sigma
159  * row and column indices in the matrix returned are for Sigma
160  */
161  MatrixXd evaluate_second_derivative_FM_Sigma(unsigned l) const;
162 
163  /* second derivative wrt Sigma and Sigma(k,l) */
164  MatrixXd evaluate_second_derivative_Sigma_Sigma(unsigned k, unsigned l) const;
165 
166 
167  /* change of parameters */
168  void set_FX(const MatrixXd& f);
169  MatrixXd get_FX() const;
170 
171  void set_Fbar(const VectorXd& f);
172  VectorXd get_Fbar() const;
173 
174  void set_jacobian(double f);
175  double get_jacobian() const;
176  void set_minus_log_jacobian(double f); //-log(J)
177  double get_minus_log_jacobian() const;
178 
179  void set_FM(const VectorXd& f);
180  VectorXd get_FM() const;
181 
182  void set_W(const MatrixXd& f);
183  MatrixXd get_W() const;
184 
185  void set_Sigma(const MatrixXd& f);
186  MatrixXd get_Sigma() const;
187 
188  void set_factor(double f);
189  double get_factor() const;
190 
191  //if you want to force a recomputation of all stored variables
192  void reset_flags();
193 
194  // use conjugate gradients (default false)
195  void set_use_cg(bool use, double tol);
196 
197  // print runtime statistics
198  void stats() const;
199 
200  //return Sigma's eigenvalues from smallest to biggest
201  VectorXd get_Sigma_eigenvalues() const;
202 
203  //return Sigma's condition number
204  double get_Sigma_condition_number() const;
205 
206  //Solve for Sigma*X = B, yielding X
207  MatrixXd solve(MatrixXd B) const;
208 
209  /* return transpose(epsilon)*P*epsilon */
210  double get_mean_square_residuals() const;
211 
212  /* return minus exponent
213  * \f[-\frac{1}{2\sigma^2}
214  * \sum_{i=1}^N {}^t(F(\mu) - F(x_i))\Sigma^{-1}(F(\mu)-F(x_i)) \f]
215  */
216  double get_minus_exponent() const;
217 
218  /* return minus log normalization
219  * \f[\frac{N}{2}\left(\log(2\pi\sigma^2) + \log |\Sigma|\right)
220  * -\log J(F) \f]
221  */
222  double get_minus_log_normalization() const;
223 
224  /* remaining stuff */
226  out << "MultivariateFNormalSufficient: "
227  << N_ << " observations of "
228  << M_ << " variables " <<std::endl,
229  {});
230 
231  private:
232 
233  //conjugate gradient init
234  void setup_cg();
235 
236  //precision matrix
237  MatrixXd get_P() const;
238  void set_P(const MatrixXd& P);
239 
240  //precision * W
241  MatrixXd get_PW() const;
242  MatrixXd compute_PW_direct() const;
243  MatrixXd compute_PW_cg() const;
244  void set_PW(const MatrixXd& PW);
245 
246  //precision * epsilon
247  VectorXd get_Peps() const;
248  void set_Peps(const VectorXd& Peps);
249 
250  // epsilon = Fbar - FM
251  VectorXd get_epsilon() const;
252  void set_epsilon(const VectorXd& eps);
253 
254  // gets factorization object
255  //Eigen::LLT<MatrixXd, Eigen::Upper> get_ldlt() const;
256  Eigen::LDLT<MatrixXd, Eigen::Upper> get_ldlt() const;
257  void set_ldlt(const Eigen::LDLT<MatrixXd, Eigen::Upper>& ldlt);
258 
259  // compute determinant and norm
260  void set_norms(double norm, double lnorm);
261  std::vector<double> get_norms() const;
262 
263  /* return trace(W.P) */
264  double trace_WP() const;
265 
266  /* return P*epsilon*transpose(P*epsilon) */
267  MatrixXd compute_PTP() const;
268 
269  /* return P * W * P, O(M^2) */
270  MatrixXd compute_PWP() const;
271 
272  /*computes the discrepancy vector*/
273  void compute_epsilon();
274 
275 
276 };
277 
278 IMPISD_END_NAMESPACE
279 
280 #endif /* IMPISD_MULTIVARIATE_FNORMAL_SUFFICIENT_H */