Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.3.3
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
MCMC-Laplace.h
Go to the documentation of this file.
1 5,
477 /* fe_degree = */ 1,
478 "");
479 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
480 LogPrior::LogGaussian log_prior(0, 2);
481
482 const unsigned int n_theta = 64;
483 for (unsigned int test=0; test<10; ++test)
484 {
485 std::cout << "Generating output for test " << test << std::endl;
486
487 // For each test, read the input file...
488 std::ifstream test_input ("../testing/input." + std::to_string(test) + ".txt");
489 Assert (test_input, ExcIO());
490
491 Vector<double> theta(n_theta);
492 for (unsigned int i=0; i<n_theta; ++i)
493 test_input >> theta[i];
494
495 // ...run it through the forward simulator to get the
496 // z vector (which we then output to a file)...
497 const Vector<double> z = laplace_problem.evaluate(theta);
498 std::ofstream test_output_z ("output." + std::to_string(test) + ".z.txt");
499 z.print(test_output_z, 16);
500
501 // ...and then also evaluate prior and likelihood for these
502 // theta vectors:
503 std::ofstream test_output_likelihood ("output." + std::to_string(test) + ".likelihood.txt");
504 test_output_likelihood.precision(12);
505 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
506
507 std::ofstream test_output_prior ("output." + std::to_string(test) + ".prior.txt");
508 test_output_prior.precision(12);
509 test_output_prior << log_prior.log_prior(theta) << std::endl;
510 }
511}
512@endcode
513This code reads in each of the input files (assuming that the executable is located in a
514build directory parallel to the `testing/` directory) and outputs its results into the
515current directory. The inputs you get from a modified program should then be compared
516against the ones stored in the `testing/` directory. They should match to several digits.
517
518
519
520An alternative implementation in Matlab
521---------------------------------------
522
523To facilitate experiments, this directory also contains alternative
524implementations of the benchmark. The first one was written by David
525Aristoff in Matlab and can be found in the `Matlab/` directory. As is
526common in Matlab programs, each function is provided in its own
527file. We have verified that the program generates the same results (to
52812 or more digits) as the C++ program, using the tests mentioned in
529the previous section.
530
531
532
533An alternative implementation in Python
534---------------------------------------
535
536Another alternative, written by Wolfgang Bangerth, can be found in the
537`Python/` directory and uses Python3. As is common for Python
538programs, the whole program is provided in one file. As for the Matlab
539version, we have verified that the program generates the same results
540(to 12 or more digits) as the C++ program, using the tests mentioned
541in the previous section. In fact, if you just execute the program
542as-is, it runs diagnostics that output the errors.
543
544This Python version is essentially a literal translation of the Matlab
545code. It is not nearly as efficient (taking around 5 times as much
546time for each function evaluation) and could probably be optimized
547substantially if desired. A good starting point is the insertion of
548the local elements into the global matrix in the line
549@code{.sh}
550 A[np.ix_(dof,dof)] += theta_loc * A_loc
551@endcode
552that takes up a substantial fraction of the overall run time.
553
554
555<a name="ann-mcmc-laplace.cc"></a>
556<h1>Annotated version of mcmc-laplace.cc</h1>
557 *
558 *
559 *
560 *
561 * @code
562 * /* ---------------------------------------------------------------------
563 * *
564 * * Copyright (C) 2019 by the deal.II authors and Wolfgang Bangerth.
565 * *
566 * * This file is part of the deal.II library.
567 * *
568 * * The deal.II library is free software; you can use it, redistribute
569 * * it, and/or modify it under the terms of the GNU Lesser General
570 * * Public License as published by the Free Software Foundation; either
571 * * version 2.1 of the License, or (at your option) any later version.
572 * * The full text of the license can be found in the file LICENSE.md at
573 * * the top level directory of deal.II.
574 * *
575 * * ---------------------------------------------------------------------
576 *
577 * *
578 * * Author: Wolfgang Bangerth, Colorado State University, 2019.
579 * */
580 *
581 *
582 *
583 * #include <deal.II/base/timer.h>
584 * #include <deal.II/grid/tria.h>
585 * #include <deal.II/dofs/dof_handler.h>
586 * #include <deal.II/grid/grid_generator.h>
587 * #include <deal.II/grid/tria_accessor.h>
588 * #include <deal.II/grid/tria_iterator.h>
589 * #include <deal.II/dofs/dof_accessor.h>
590 * #include <deal.II/fe/fe_q.h>
591 * #include <deal.II/dofs/dof_tools.h>
592 * #include <deal.II/fe/fe_values.h>
593 * #include <deal.II/base/quadrature_lib.h>
594 * #include <deal.II/base/function.h>
595 * #include <deal.II/numerics/vector_tools.h>
596 * #include <deal.II/numerics/matrix_tools.h>
597 * #include <deal.II/lac/vector.h>
598 * #include <deal.II/lac/full_matrix.h>
599 * #include <deal.II/lac/sparse_matrix.h>
600 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
601 * #include <deal.II/lac/solver_cg.h>
602 * #include <deal.II/lac/precondition.h>
603 * #include <deal.II/lac/sparse_ilu.h>
604 *
605 * #include <deal.II/numerics/data_out.h>
606 *
607 * #include <fstream>
608 * #include <iostream>
609 * #include <random>
610 *
611 * #include <deal.II/base/logstream.h>
612 *
613 * using namespace dealii;
614 *
615 *
616 * @endcode
617 *
618 * The following is a namespace in which we define the solver of the PDE.
619 * The main class implements an abstract `Interface` class declared at
620 * the top, which provides for an `evaluate()` function that, given
621 * a coefficient vector, solves the PDE discussed in the Readme file
622 * and then evaluates the solution at the 169 mentioned points.
623 *
624
625 *
626 * The solver follows the basic layout of @ref step_4 "step-4", though it precomputes
627 * a number of things in the `setup_system()` function, such as the
628 * evaluation of the matrix that corresponds to the point evaluations,
629 * as well as the local contributions to matrix and right hand side.
630 *
631
632 *
633 * Rather than commenting on everything in detail, in the following
634 * we will only document those things that are not already clear from
635 * @ref step_4 "step-4" and a small number of other tutorial programs.
636 *
637 * @code
638 * namespace ForwardSimulator
639 * {
640 * class Interface
641 * {
642 * public:
643 * virtual Vector<double> evaluate(const Vector<double> &coefficients) = 0;
644 *
645 * virtual ~Interface() = default;
646 * };
647 *
648 *
649 *
650 * template <int dim>
651 * class PoissonSolver : public Interface
652 * {
653 * public:
654 * PoissonSolver(const unsigned int global_refinements,
655 * const unsigned int fe_degree,
656 * const std::string &dataset_name);
657 * virtual Vector<double>
658 * evaluate(const Vector<double> &coefficients) override;
659 *
660 * private:
661 * void make_grid(const unsigned int global_refinements);
662 * void setup_system();
663 * void assemble_system(const Vector<double> &coefficients);
664 * void solve();
665 * void output_results(const Vector<double> &coefficients) const;
666 *
668 * FE_Q<dim> fe;
669 * DoFHandler<dim> dof_handler;
670 *
672 * Vector<double> cell_rhs;
673 * std::map<types::global_dof_index,double> boundary_values;
674 *
675 * SparsityPattern sparsity_pattern;
676 * SparseMatrix<double> system_matrix;
677 *
678 * Vector<double> solution;
679 * Vector<double> system_rhs;
680 *
681 * std::vector<Point<dim>> measurement_points;
682 *
683 * SparsityPattern measurement_sparsity;
684 * SparseMatrix<double> measurement_matrix;
685 *
686 * TimerOutput timer;
687 * unsigned int nth_evaluation;
688 *
689 * const std::string &dataset_name;
690 * };
691 *
692 *
693 *
694 * template <int dim>
695 * PoissonSolver<dim>::PoissonSolver(const unsigned int global_refinements,
696 * const unsigned int fe_degree,
697 * const std::string &dataset_name)
698 * : fe(fe_degree)
699 * , dof_handler(triangulation)
700 * , timer(std::cout, TimerOutput::summary, TimerOutput::cpu_times)
701 * , nth_evaluation(0)
702 * , dataset_name(dataset_name)
703 * {
704 * make_grid(global_refinements);
705 * setup_system();
706 * }
707 *
708 *
709 *
710 * template <int dim>
711 * void PoissonSolver<dim>::make_grid(const unsigned int global_refinements)
712 * {
713 * Assert(global_refinements >= 3,
714 * ExcMessage("This program makes the assumption that the mesh for the "
715 * "solution of the PDE is at least as fine as the one used "
716 * "in the definition of the coefficient."));
718 * triangulation.refine_global(global_refinements);
719 *
720 * std::cout << " Number of active cells: " << triangulation.n_active_cells()
721 * << std::endl;
722 * }
723 *
724 *
725 *
726 * template <int dim>
727 * void PoissonSolver<dim>::setup_system()
728 * {
729 * @endcode
730 *
731 * First define the finite element space:
732 *
733 * @code
734 * dof_handler.distribute_dofs(fe);
735 *
736 * std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
737 * << std::endl;
738 *
739 * @endcode
740 *
741 * Then set up the main data structures that will hold the discrete problem:
742 *
743 * @code
744 * {
745 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
746 * DoFTools::make_sparsity_pattern(dof_handler, dsp);
747 * sparsity_pattern.copy_from(dsp);
748 *
749 * system_matrix.reinit(sparsity_pattern);
750 *
751 * solution.reinit(dof_handler.n_dofs());
752 * system_rhs.reinit(dof_handler.n_dofs());
753 * }
754 *
755 * @endcode
756 *
757 * And then define the tools to do point evaluation. We choose
758 * a set of 13x13 points evenly distributed across the domain:
759 *
760 * @code
761 * {
762 * const unsigned int n_points_per_direction = 13;
763 * const double dx = 1. / (n_points_per_direction + 1);
764 *
765 * for (unsigned int x = 1; x <= n_points_per_direction; ++x)
766 * for (unsigned int y = 1; y <= n_points_per_direction; ++y)
767 * measurement_points.emplace_back(x * dx, y * dx);
768 *
769 * @endcode
770 *
771 * First build a full matrix of the evaluation process. We do this
772 * even though the matrix is really sparse -- but we don't know
773 * which entries are nonzero. Later, the `copy_from()` function
774 * calls build a sparsity pattern and a sparse matrix from
775 * the dense matrix.
776 *
777 * @code
778 * Vector<double> weights(dof_handler.n_dofs());
779 * FullMatrix<double> full_measurement_matrix(n_points_per_direction *
780 * n_points_per_direction,
781 * dof_handler.n_dofs());
782 *
783 * for (unsigned int index = 0; index < measurement_points.size(); ++index)
784 * {
785 * VectorTools::create_point_source_vector(dof_handler,
786 * measurement_points[index],
787 * weights);
788 * for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
789 * full_measurement_matrix(index, i) = weights(i);
790 * }
791 *
792 * measurement_sparsity.copy_from(full_measurement_matrix);
793 * measurement_matrix.reinit(measurement_sparsity);
794 * measurement_matrix.copy_from(full_measurement_matrix);
795 * }
796 *
797 * @endcode
798 *
799 * Next build the mapping from cell to the index in the 64-element
800 * coefficient vector:
801 *
802 * @code
803 * for (const auto &cell : triangulation.active_cell_iterators())
804 * {
805 * const unsigned int i = std::floor(cell->center()[0] * 8);
806 * const unsigned int j = std::floor(cell->center()[1] * 8);
807 *
808 * const unsigned int index = i + 8 * j;
809 *
810 * cell->set_user_index(index);
811 * }
812 *
813 * @endcode
814 *
815 * Finally prebuild the building blocks of the linear system as
816 * discussed in the Readme file :
817 *
818 * @code
819 * {
820 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
821 *
822 * cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
823 * cell_rhs.reinit(dofs_per_cell);
824 *
825 * const QGauss<dim> quadrature_formula(fe.degree+1);
826 * const unsigned int n_q_points = quadrature_formula.size();
827 *
828 * FEValues<dim> fe_values(fe,
829 * quadrature_formula,
830 * update_values | update_gradients |
831 * update_JxW_values);
832 *
833 * fe_values.reinit(dof_handler.begin_active());
834 *
835 * for (unsigned int q_index = 0; q_index < n_q_points; ++q_index)
836 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
837 * {
838 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
839 * cell_matrix(i, j) +=
840 * (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
841 * fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
842 * fe_values.JxW(q_index)); // dx
843 *
844 * cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
845 * 10.0 * // f(x_q)
846 * fe_values.JxW(q_index)); // dx
847 * }
848 *
849 * VectorTools::interpolate_boundary_values(dof_handler,
850 * 0,
851 * ZeroFunction<dim>(),
852 * boundary_values);
853 * }
854 * }
855 *
856 *
857 *
858 * @endcode
859 *
860 * Given that we have pre-built the matrix and right hand side contributions
861 * for a (representative) cell, the function that assembles the matrix is
862 * pretty short and straightforward:
863 *
864 * @code
865 * template <int dim>
866 * void PoissonSolver<dim>::assemble_system(const Vector<double> &coefficients)
867 * {
868 * Assert(coefficients.size() == 64, ExcInternalError());
869 *
870 * system_matrix = 0;
871 * system_rhs = 0;
872 *
873 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
874 *
875 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
876 *
877 * for (const auto &cell : dof_handler.active_cell_iterators())
878 * {
879 * const double coefficient = coefficients(cell->user_index());
880 *
881 * cell->get_dof_indices(local_dof_indices);
882 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
883 * {
884 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
885 * system_matrix.add(local_dof_indices[i],
886 * local_dof_indices[j],
887 * coefficient * cell_matrix(i, j));
888 *
889 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
890 * }
891 * }
892 *
893 * MatrixTools::apply_boundary_values(boundary_values,
894 * system_matrix,
895 * solution,
896 * system_rhs);
897 * }
898 *
899 *
900 * @endcode
901 *
902 * The same is true for the function that solves the linear system:
903 *
904 * @code
905 * template <int dim>
906 * void PoissonSolver<dim>::solve()
907 * {
908 * SparseILU<double> ilu;
909 * ilu.initialize(system_matrix);
910 * SolverControl control(100, 1e-10*system_rhs.l2_norm());
911 * SolverCG<> solver(control);
912 * solver.solve(system_matrix, solution, system_rhs, ilu);
913 * }
914 *
915 *
916 *
917 * @endcode
918 *
919 * The following function outputs graphical data for the most recently
920 * used coefficient and corresponding solution of the PDE. Collecting
921 * the coefficient values requires translating from the 64-element
922 * coefficient vector and the cells that correspond to each of these
923 * elements. The rest remains pretty obvious, with the exception
924 * of including the number of the current sample into the file name.
925 *
926 * @code
927 * template <int dim>
928 * void
929 * PoissonSolver<dim>::output_results(const Vector<double> &coefficients) const
930 * {
931 * Vector<float> coefficient_values(triangulation.n_active_cells());
932 * for (const auto &cell : triangulation.active_cell_iterators())
933 * coefficient_values[cell->active_cell_index()] =
934 * coefficients(cell->user_index());
935 *
936 * DataOut<dim> data_out;
937 *
938 * data_out.attach_dof_handler(dof_handler);
939 * data_out.add_data_vector(solution, "solution");
940 * data_out.add_data_vector(coefficient_values, "coefficient");
941 *
942 * data_out.build_patches();
943 *
944 * std::ofstream output("solution-" +
945 * Utilities::int_to_string(nth_evaluation, 10) + ".vtu");
946 * data_out.write_vtu(output);
947 * }
948 *
949 *
950 *
951 * @endcode
952 *
953 * The following is the main function of this class: Given a coefficient
954 * vector, it assembles the linear system, solves it, and then evaluates
955 * the solution at the measurement points by applying the measurement
956 * matrix to the solution vector. That vector of "measured" values
957 * is then returned.
958 *
959
960 *
961 * The function will also output the solution in a graphical format
962 * if you un-comment the corresponding statement in the third
963 * code block. However, you may end up with a very large amount
964 * of data: This code is producing, at the minimum, 10,000 samples
965 * and creating output for each one of them is surely more data
966 * than you ever want to see!
967 *
968
969 *
970 * At the end of the function, we output some timing information
971 * every 10,000 samples.
972 *
973 * @code
974 * template <int dim>
975 * Vector<double>
976 * PoissonSolver<dim>::evaluate(const Vector<double> &coefficients)
977 * {
978 * {
979 * TimerOutput::Scope section(timer, "Building linear systems");
980 * assemble_system(coefficients);
981 * }
982 *
983 * {
984 * TimerOutput::Scope section(timer, "Solving linear systems");
985 * solve();
986 * }
987 *
988 * Vector<double> measurements(measurement_matrix.m());
989 * {
990 * TimerOutput::Scope section(timer, "Postprocessing");
991 *
992 * measurement_matrix.vmult(measurements, solution);
993 * Assert(measurements.size() == measurement_points.size(),
994 * ExcInternalError());
995 *
996 * /* output_results(coefficients); */
997 * }
998 *
999 * ++nth_evaluation;
1000 * if (nth_evaluation % 10000 == 0)
1001 * timer.print_summary();
1002 *
1003 * return measurements;
1004 * }
1005 * } // namespace ForwardSimulator
1006 *
1007 *
1008 * @endcode
1009 *
1010 * The following namespaces define the statistical properties of the Bayesian
1011 * inverse problem. The first is about the definition of the measurement
1012 * statistics (the "likelihood"), which we here assume to be a normal
1013 * distribution @f$N(\mu,\sigma I)@f$ with mean value @f$\mu@f$ given by the
1014 * actual measurement vector (passed as an argument to the constructor
1015 * of the `Gaussian` class and standard deviation @f$\sigma@f$.
1016 *
1017
1018 *
1019 * For reasons of numerical accuracy, it is useful to not return the
1020 * actual likelihood, but its logarithm. This is because these
1021 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1022 * for which it becomes very difficult to compute accurate
1023 * values.
1024 *
1025 * @code
1026 * namespace LogLikelihood
1027 * {
1028 * class Interface
1029 * {
1030 * public:
1031 * virtual double log_likelihood(const Vector<double> &x) const = 0;
1032 *
1033 * virtual ~Interface() = default;
1034 * };
1035 *
1036 *
1037 * class Gaussian : public Interface
1038 * {
1039 * public:
1040 * Gaussian(const Vector<double> &mu, const double sigma);
1041 *
1042 * virtual double log_likelihood(const Vector<double> &x) const override;
1043 *
1044 * private:
1045 * const Vector<double> mu;
1046 * const double sigma;
1047 * };
1048 *
1049 * Gaussian::Gaussian(const Vector<double> &mu, const double sigma)
1050 * : mu(mu)
1051 * , sigma(sigma)
1052 * {}
1053 *
1054 *
1055 * double Gaussian::log_likelihood(const Vector<double> &x) const
1056 * {
1057 * Vector<double> x_minus_mu = x;
1058 * x_minus_mu -= mu;
1059 *
1060 * return -x_minus_mu.norm_sqr() / (2 * sigma * sigma);
1061 * }
1062 * } // namespace LogLikelihood
1063 *
1064 *
1065 * @endcode
1066 *
1067 * Next up is the "prior" imposed on the coefficients. We assume
1068 * that the logarithms of the entries of the coefficient vector
1069 * are all distributed as a Gaussian with given mean and standard
1070 * deviation. If the logarithms of the coefficients are normally
1071 * distributed, then this implies in particular that the coefficients
1072 * can only be positive, which is a useful property to ensure the
1073 * well-posedness of the forward problem.
1074 *
1075
1076 *
1077 * For the same reasons as for the likelihood above, the interface
1078 * for the prior asks for returning the *logarithm* of the prior,
1079 * instead of the prior probability itself.
1080 *
1081 * @code
1082 * namespace LogPrior
1083 * {
1084 * class Interface
1085 * {
1086 * public:
1087 * virtual double log_prior(const Vector<double> &x) const = 0;
1088 *
1089 * virtual ~Interface() = default;
1090 * };
1091 *
1092 *
1093 * class LogGaussian : public Interface
1094 * {
1095 * public:
1096 * LogGaussian(const double mu, const double sigma);
1097 *
1098 * virtual double log_prior(const Vector<double> &x) const override;
1099 *
1100 * private:
1101 * const double mu;
1102 * const double sigma;
1103 * };
1104 *
1105 * LogGaussian::LogGaussian(const double mu, const double sigma)
1106 * : mu(mu)
1107 * , sigma(sigma)
1108 * {}
1109 *
1110 *
1111 * double LogGaussian::log_prior(const Vector<double> &x) const
1112 * {
1113 * double log_of_product = 0;
1114 *
1115 * for (const auto &el : x)
1116 * log_of_product +=
1117 * -(std::log(el) - mu) * (std::log(el) - mu) / (2 * sigma * sigma);
1118 *
1119 * return log_of_product;
1120 * }
1121 * } // namespace LogPrior
1122 *
1123 *
1124 *
1125 * @endcode
1126 *
1127 * The Metropolis-Hastings algorithm requires a method to create a new sample
1128 * given a previous sample. We do this by perturbing the current (coefficient)
1129 * sample randomly using a Gaussian distribution centered at the current
1130 * sample. To ensure that the samples' individual entries all remain
1131 * positive, we use a Gaussian distribution in logarithm space -- in other
1132 * words, instead of *adding* a small perturbation with mean value zero,
1133 * we *multiply* the entries of the current sample by a factor that
1134 * is the exponential of a random number with mean zero. (Because the
1135 * exponential of zero is one, this means that the most likely factors
1136 * to multiply the existing sample entries by are close to one. And
1137 * because the exponential of a number is always positive, we never
1138 * get negative samples this way.)
1139 *
1140
1141 *
1142 * But the Metropolis-Hastings sampler doesn't just need a perturbed
1143 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
1144 * needs to know the ratio of the probability of reaching @f$y@f$ from
1145 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
1146 * were to use a symmetric proposal distribution (e.g., a Gaussian
1147 * distribution centered at @f$x@f$ with a width independent of @f$x@f$), then
1148 * these two probabilities would be the same, and the ratio one. But
1149 * that's not the case for the Gaussian in log space. It's not
1150 * terribly difficult to verify that in that case, for a single
1151 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
1152 * consequently for all components of the vector together, the
1153 * probability is the product of these ratios.
1154 *
1155 * @code
1156 * namespace ProposalGenerator
1157 * {
1158 * class Interface
1159 * {
1160 * public:
1161 * virtual
1162 * std::pair<Vector<double>,double>
1163 * perturb(const Vector<double> &current_sample) const = 0;
1164 *
1165 * virtual ~Interface() = default;
1166 * };
1167 *
1168 *
1169 * class LogGaussian : public Interface
1170 * {
1171 * public:
1172 * LogGaussian(const unsigned int random_seed, const double log_sigma);
1173 *
1174 * virtual
1175 * std::pair<Vector<double>,double>
1176 * perturb(const Vector<double> &current_sample) const;
1177 *
1178 * private:
1179 * const double log_sigma;
1180 * mutable std::mt19937 random_number_generator;
1181 * };
1182 *
1183 *
1184 *
1185 * LogGaussian::LogGaussian(const unsigned int random_seed,
1186 * const double log_sigma)
1187 * : log_sigma(log_sigma)
1188 * {
1189 * random_number_generator.seed(random_seed);
1190 * }
1191 *
1192 *
1193 * std::pair<Vector<double>,double>
1194 * LogGaussian::perturb(const Vector<double> &current_sample) const
1195 * {
1196 * Vector<double> new_sample = current_sample;
1197 * double product_of_ratios = 1;
1198 * for (auto &x : new_sample)
1199 * {
1200 * const double rnd = std::normal_distribution<>(0, log_sigma)(random_number_generator);
1201 * const double exp_rnd = std::exp(rnd);
1202 * x *= exp_rnd;
1203 * product_of_ratios *= exp_rnd;
1204 * }
1205 *
1206 * return {new_sample, product_of_ratios};
1207 * }
1208 *
1209 * } // namespace ProposalGenerator
1210 *
1211 *
1212 * @endcode
1213 *
1214 * The last main class is the Metropolis-Hastings sampler itself.
1215 * If you understand the algorithm behind this method, then
1216 * the following implementation should not be too difficult
1217 * to read. The only thing of relevance is that descriptions
1218 * of the algorithm typically ask whether the *ratio* of two
1219 * probabilities (the "posterior" probabilities of the current
1220 * and the previous samples, where the "posterior" is the product of the
1221 * likelihood and the prior probability) is larger or smaller than a
1222 * randomly drawn number. But because our interfaces return the
1223 * *logarithms* of these probabilities, we now need to take
1224 * the ratio of appropriate exponentials -- which is made numerically
1225 * more stable by considering the exponential of the difference of
1226 * the log probabilities. The only other slight complication is that
1227 * we need to multiply this ratio by the ratio of proposal probabilities
1228 * since we use a non-symmetric proposal distribution.
1229 *
1230
1231 *
1232 * Finally, we note that the output is generated with 7 digits of
1233 * accuracy. (The C++ default is 6 digits.) We do this because,
1234 * as shown in the paper, we can determine the mean value of the
1235 * probability distribution we are sampling here to at least six
1236 * digits of accuracy, and do not want to be limited by the precision
1237 * of the output.
1238 *
1239 * @code
1240 * namespace Sampler
1241 * {
1242 * class MetropolisHastings
1243 * {
1244 * public:
1245 * MetropolisHastings(ForwardSimulator::Interface & simulator,
1246 * const LogLikelihood::Interface & likelihood,
1247 * const LogPrior::Interface & prior,
1248 * const ProposalGenerator::Interface &proposal_generator,
1249 * const unsigned int random_seed,
1250 * const std::string & dataset_name);
1251 *
1252 * void sample(const Vector<double> &starting_guess,
1253 * const unsigned int n_samples);
1254 *
1255 * private:
1256 * ForwardSimulator::Interface & simulator;
1257 * const LogLikelihood::Interface & likelihood;
1258 * const LogPrior::Interface & prior;
1259 * const ProposalGenerator::Interface &proposal_generator;
1260 *
1261 * std::mt19937 random_number_generator;
1262 *
1263 * unsigned int sample_number;
1264 * unsigned int accepted_sample_number;
1265 *
1266 * std::ofstream output_file;
1267 *
1268 * void write_sample(const Vector<double> &current_sample,
1269 * const double current_log_likelihood);
1270 * };
1271 *
1272 *
1273 * MetropolisHastings::MetropolisHastings(
1274 * ForwardSimulator::Interface & simulator,
1275 * const LogLikelihood::Interface & likelihood,
1276 * const LogPrior::Interface & prior,
1277 * const ProposalGenerator::Interface &proposal_generator,
1278 * const unsigned int random_seed,
1279 * const std::string & dataset_name)
1280 * : simulator(simulator)
1281 * , likelihood(likelihood)
1282 * , prior(prior)
1283 * , proposal_generator(proposal_generator)
1284 * , sample_number(0)
1285 * , accepted_sample_number(0)
1286 * {
1287 * output_file.open("samples-" + dataset_name + ".txt");
1288 * output_file.precision(7);
1289 *
1290 * random_number_generator.seed(random_seed);
1291 * }
1292 *
1293 *
1294 * void MetropolisHastings::sample(const Vector<double> &starting_guess,
1295 * const unsigned int n_samples)
1296 * {
1297 * std::uniform_real_distribution<> uniform_distribution(0, 1);
1298 *
1299 * Vector<double> current_sample = starting_guess;
1300 * double current_log_posterior =
1301 * (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
1302 * prior.log_prior(current_sample));
1303 *
1304 * ++sample_number;
1305 * ++accepted_sample_number;
1306 * write_sample(current_sample, current_log_posterior);
1307 *
1308 * for (unsigned int k = 1; k < n_samples; ++k, ++sample_number)
1309 * {
1310 * std::pair<Vector<double>,double>
1311 * perturbation = proposal_generator.perturb(current_sample);
1312 * const Vector<double> trial_sample = std::move (perturbation.first);
1313 * const double perturbation_probability_ratio = perturbation.second;
1314 *
1315 * const double trial_log_posterior =
1316 * (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
1317 * prior.log_prior(trial_sample));
1318 *
1319 * if (std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
1320 * >=
1321 * uniform_distribution(random_number_generator))
1322 * {
1323 * current_sample = trial_sample;
1324 * current_log_posterior = trial_log_posterior;
1325 *
1326 * ++accepted_sample_number;
1327 * }
1328 *
1329 * write_sample(current_sample, current_log_posterior);
1330 * }
1331 * }
1332 *
1333 *
1334 *
1335 * void MetropolisHastings::write_sample(const Vector<double> &current_sample,
1336 * const double current_log_posterior)
1337 * {
1338 * output_file << current_log_posterior << '\t';
1339 * output_file << accepted_sample_number << '\t';
1340 * for (const auto &x : current_sample)
1341 * output_file << x << ' ';
1342 * output_file << '\n';
1343 *
1344 * output_file.flush();
1345 * }
1346 * } // namespace Sampler
1347 *
1348 *
1349 * @endcode
1350 *
1351 * The final function is `main()`, which simply puts all of these pieces
1352 * together into one. The "exact solution", i.e., the "measurement values"
1353 * we use for this program are tabulated to make it easier for other
1354 * people to use in their own implementations of this benchmark. These
1355 * values created using the same main class above, but using 8 mesh
1356 * refinements and using a Q3 element -- i.e., using a much more accurate
1357 * method than the one we use in the forward simulator for generating
1358 * samples below (which uses 5 global mesh refinement steps and a Q1
1359 * element). If you wanted to regenerate this set of numbers, then
1360 * the following code snippet would do that:
1361 * <div class=CodeFragmentInTutorialComment>
1362 * @code
1363 * /* Set the exact coefficient: */
1364 * Vector<double> exact_coefficients(64);
1365 * for (auto &el : exact_coefficients)
1366 * el = 1.;
1367 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
1368 * exact_coefficients(18) = 0.1;
1369 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
1370 * exact_coefficients(54) = 10.;
1371 *
1372
1373 * /* Compute the "correct" solution vector: */
1374 * const Vector<double> exact_solution =
1375 * ForwardSimulator::PoissonSolver<2>(/* global_refinements = */ 8,
1376 * /* fe_degree = */ 3,
1377 * /* prefix = */ "exact")
1378 * .evaluate(exact_coefficients);
1379 * @endcode
1380 * </div>
1381 *
1382 * @code
1383 * int main()
1384 * {
1385 * const bool testing = true;
1386 *
1387 * @endcode
1388 *
1389 * Run with one thread, so as to not step on other processes
1390 * doing the same at the same time. It turns out that the problem
1391 * is also so small that running with more than one thread
1392 * *increases* the runtime.
1393 *
1394 * @code
1395 * MultithreadInfo::set_thread_limit(1);
1396 *
1397 * const unsigned int random_seed = (testing ? 1U : std::random_device()());
1398 * const std::string dataset_name = Utilities::to_string(random_seed, 10);
1399 *
1400 * const Vector<double> exact_solution(
1401 * { 0.06076511762259369, 0.09601910120848481,
1402 * 0.1238852517838584, 0.1495184117375201,
1403 * 0.1841596127549784, 0.2174525028261122,
1404 * 0.2250996160898698, 0.2197954769002993,
1405 * 0.2074695698370926, 0.1889996477663016,
1406 * 0.1632722532153726, 0.1276782480038186,
1407 * 0.07711845915789312, 0.09601910120848552,
1408 * 0.2000589533367983, 0.3385592591951766,
1409 * 0.3934300024647806, 0.4040223892461541,
1410 * 0.4122329537843092, 0.4100480091545554,
1411 * 0.3949151637189968, 0.3697873264791232,
1412 * 0.33401826235924, 0.2850397806663382,
1413 * 0.2184260032478671, 0.1271121156350957,
1414 * 0.1238852517838611, 0.3385592591951819,
1415 * 0.7119285162766475, 0.8175712861756428,
1416 * 0.6836254116578105, 0.5779452419831157,
1417 * 0.5555615956136897, 0.5285181561736719,
1418 * 0.491439702849224, 0.4409367494853282,
1419 * 0.3730060082060772, 0.2821694983395214,
1420 * 0.1610176733857739, 0.1495184117375257,
1421 * 0.3934300024647929, 0.8175712861756562,
1422 * 0.9439154625527653, 0.8015904115095128,
1423 * 0.6859683749254024, 0.6561235366960599,
1424 * 0.6213197201867315, 0.5753611315000049,
1425 * 0.5140091754526823, 0.4325325506354165,
1426 * 0.3248315148915482, 0.1834600412730086,
1427 * 0.1841596127549917, 0.4040223892461832,
1428 * 0.6836254116578439, 0.8015904115095396,
1429 * 0.7870119561144977, 0.7373108331395808,
1430 * 0.7116558878070463, 0.6745179049094283,
1431 * 0.6235300574156917, 0.5559332704045935,
1432 * 0.4670304994474178, 0.3499809143811,
1433 * 0.19688263746294, 0.2174525028261253,
1434 * 0.4122329537843404, 0.5779452419831566,
1435 * 0.6859683749254372, 0.7373108331396063,
1436 * 0.7458811983178246, 0.7278968022406559,
1437 * 0.6904793535357751, 0.6369176452710288,
1438 * 0.5677443693743215, 0.4784738764865867,
1439 * 0.3602190632823262, 0.2031792054737325,
1440 * 0.2250996160898818, 0.4100480091545787,
1441 * 0.5555615956137137, 0.6561235366960938,
1442 * 0.7116558878070715, 0.727896802240657,
1443 * 0.7121928678670187, 0.6712187391428729,
1444 * 0.6139157775591492, 0.5478251665295381,
1445 * 0.4677122687599031, 0.3587654911000848,
1446 * 0.2050734291675918, 0.2197954769003094,
1447 * 0.3949151637190157, 0.5285181561736911,
1448 * 0.6213197201867471, 0.6745179049094407,
1449 * 0.690479353535786, 0.6712187391428787,
1450 * 0.6178408289359514, 0.5453605027237883,
1451 * 0.489575966490909, 0.4341716881061278,
1452 * 0.3534389974779456, 0.2083227496961347,
1453 * 0.207469569837099, 0.3697873264791366,
1454 * 0.4914397028492412, 0.5753611315000203,
1455 * 0.6235300574157017, 0.6369176452710497,
1456 * 0.6139157775591579, 0.5453605027237935,
1457 * 0.4336604929612851, 0.4109641743019312,
1458 * 0.3881864790111245, 0.3642640090182592,
1459 * 0.2179599909280145, 0.1889996477663011,
1460 * 0.3340182623592461, 0.4409367494853381,
1461 * 0.5140091754526943, 0.5559332704045969,
1462 * 0.5677443693743304, 0.5478251665295453,
1463 * 0.4895759664908982, 0.4109641743019171,
1464 * 0.395727260284338, 0.3778949322004734,
1465 * 0.3596268271857124, 0.2191250268948948,
1466 * 0.1632722532153683, 0.2850397806663325,
1467 * 0.373006008206081, 0.4325325506354207,
1468 * 0.4670304994474315, 0.4784738764866023,
1469 * 0.4677122687599041, 0.4341716881061055,
1470 * 0.388186479011099, 0.3778949322004602,
1471 * 0.3633362567187364, 0.3464457261905399,
1472 * 0.2096362321365655, 0.1276782480038148,
1473 * 0.2184260032478634, 0.2821694983395252,
1474 * 0.3248315148915535, 0.3499809143811097,
1475 * 0.3602190632823333, 0.3587654911000799,
1476 * 0.3534389974779268, 0.3642640090182283,
1477 * 0.35962682718569, 0.3464457261905295,
1478 * 0.3260728953424643, 0.180670595355394,
1479 * 0.07711845915789244, 0.1271121156350963,
1480 * 0.1610176733857757, 0.1834600412730144,
1481 * 0.1968826374629443, 0.2031792054737354,
1482 * 0.2050734291675885, 0.2083227496961245,
1483 * 0.2179599909279998, 0.2191250268948822,
1484 * 0.2096362321365551, 0.1806705953553887,
1485 * 0.1067965550010013 });
1486 *
1487 * @endcode
1488 *
1489 * Now run the forward simulator for samples:
1490 *
1491 * @code
1492 * ForwardSimulator::PoissonSolver<2> laplace_problem(
1493 * /* global_refinements = */ 5,
1494 * /* fe_degree = */ 1,
1495 * dataset_name);
1496 * LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
1497 * LogPrior::LogGaussian log_prior(0, 2);
1498 * ProposalGenerator::LogGaussian proposal_generator(
1499 * random_seed, 0.09); /* so that the acceptance ratio is ~0.24 */
1500 * Sampler::MetropolisHastings sampler(laplace_problem,
1501 * log_likelihood,
1502 * log_prior,
1503 * proposal_generator,
1504 * random_seed,
1505 * dataset_name);
1506 *
1507 * Vector<double> starting_coefficients(64);
1508 * for (auto &el : starting_coefficients)
1509 * el = 1.;
1510 * sampler.sample(starting_coefficients,
1511 * (testing ? 250 * 40 /* takes 40 seconds */
1512 * :
1513 * 100000000 /* takes 6 days */
1514 * ));
1515 * }
1516 * @endcode
1517
1518
1519*/
Definition: fe_q.h:549
@ cpu_times
Definition: timer.h:649
@ summary
Definition: timer.h:609
Definition: vector.h:110
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
Point< 2 > first
Definition: grid_out.cc:4587
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
Definition: exceptions.h:1465
std::string to_string(const T &t)
Definition: patterns.h:2329
static ::ExceptionBase & ExcMessage(std::string arg1)
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void random(DoFHandler< dim, spacedim > &dof_handler)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
static const types::blas_int zero
@ matrix
Contents is actually a matrix.
static const char A
static const types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition: advection.h:75
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:472
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation