479 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
480 LogPrior::LogGaussian log_prior(0, 2);
482 const unsigned int n_theta = 64;
483 for (
unsigned int test=0; test<10; ++test)
485 std::cout <<
"Generating output for test " << test << std::endl;
488 std::ifstream test_input (
"../testing/input." +
std::to_string(test) +
".txt");
492 for (
unsigned int i=0; i<n_theta; ++i)
493 test_input >> theta[i];
498 std::ofstream test_output_z (
"output." +
std::to_string(test) +
".z.txt");
499 z.
print(test_output_z, 16);
503 std::ofstream test_output_likelihood (
"output." +
std::to_string(test) +
".likelihood.txt");
504 test_output_likelihood.precision(12);
505 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
507 std::ofstream test_output_prior (
"output." +
std::to_string(test) +
".prior.txt");
508 test_output_prior.precision(12);
509 test_output_prior << log_prior.log_prior(theta) << std::endl;
513This code reads in each of the input files (assuming that the executable is located in a
514build directory
parallel to the `testing/` directory) and outputs its results into the
515current directory. The inputs you get from a modified program should then be compared
516against the ones stored in the `testing/` directory. They should match to several digits.
520An alternative implementation in Matlab
521---------------------------------------
523To facilitate experiments,
this directory also contains alternative
524implementations of the benchmark. The
first one was written by David
525Aristoff in Matlab and can be found in the `Matlab/` directory. As is
526common in Matlab programs, each function is provided in its own
527file. We have verified that the program generates the same results (to
52812 or more digits) as the
C++ program,
using the tests mentioned in
533An alternative implementation in Python
534---------------------------------------
536Another alternative, written by Wolfgang Bangerth, can be found in the
537`Python/` directory and uses Python3. As is common
for Python
538programs, the whole program is provided in
one file. As
for the Matlab
539version, we have verified that the program generates the same results
540(to 12 or more digits) as the
C++ program,
using the tests mentioned
541in the previous section. In fact,
if you just execute the program
542as-is, it runs diagnostics that output the errors.
544This Python version is essentially a literal
translation of the Matlab
545code. It is not nearly as efficient (taking around 5 times as much
546time
for each function evaluation) and could probably be optimized
547substantially
if desired.
A good starting
point is the insertion of
548the local elements into the global
matrix in the line
550 A[np.ix_(dof,dof)] += theta_loc * A_loc
552that takes up a substantial fraction of the overall
run time.
555<a name=
"ann-mcmc-laplace.cc"></a>
556<h1>Annotated version of mcmc-laplace.cc</h1>
583 * #include <deal.II/base/timer.h>
584 * #include <deal.II/grid/tria.h>
585 * #include <deal.II/dofs/dof_handler.h>
586 * #include <deal.II/grid/grid_generator.h>
587 * #include <deal.II/grid/tria_accessor.h>
588 * #include <deal.II/grid/tria_iterator.h>
589 * #include <deal.II/dofs/dof_accessor.h>
590 * #include <deal.II/fe/fe_q.h>
591 * #include <deal.II/dofs/dof_tools.h>
592 * #include <deal.II/fe/fe_values.h>
593 * #include <deal.II/base/quadrature_lib.h>
594 * #include <deal.II/base/function.h>
595 * #include <deal.II/numerics/vector_tools.h>
596 * #include <deal.II/numerics/matrix_tools.h>
597 * #include <deal.II/lac/vector.h>
598 * #include <deal.II/lac/full_matrix.h>
599 * #include <deal.II/lac/sparse_matrix.h>
600 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
601 * #include <deal.II/lac/solver_cg.h>
602 * #include <deal.II/lac/precondition.h>
603 * #include <deal.II/lac/sparse_ilu.h>
605 * #include <deal.II/numerics/data_out.h>
608 * #include <iostream>
611 * #include <deal.II/base/logstream.h>
618 * The following is a
namespace in which we define the solver of the PDE.
619 * The main
class implements an abstract `Interface` class declared at
620 * the top, which provides
for an `evaluate()` function that, given
621 * a coefficient vector, solves the PDE discussed in the Readme file
622 * and then evaluates the solution at the 169 mentioned points.
626 * The solver follows the basic layout of @ref step_4
"step-4", though it precomputes
627 * a number of things in the `setup_system()` function, such as the
628 * evaluation of the
matrix that corresponds to the
point evaluations,
629 * as well as the local contributions to
matrix and right hand side.
633 * Rather than commenting on everything in detail, in the following
634 * we will only document those things that are not already clear from
635 * @ref step_4
"step-4" and a small number of other tutorial programs.
638 *
namespace ForwardSimulator
645 *
virtual ~Interface() =
default;
651 *
class PoissonSolver :
public Interface
654 * PoissonSolver(
const unsigned int global_refinements,
655 *
const unsigned int fe_degree,
656 *
const std::string &dataset_name);
661 *
void make_grid(
const unsigned int global_refinements);
662 *
void setup_system();
673 * std::map<types::global_dof_index,double> boundary_values;
681 * std::vector<Point<dim>> measurement_points;
687 *
unsigned int nth_evaluation;
689 *
const std::string &dataset_name;
695 * PoissonSolver<dim>::PoissonSolver(
const unsigned int global_refinements,
696 *
const unsigned int fe_degree,
697 *
const std::string &dataset_name)
701 * , nth_evaluation(0)
702 * , dataset_name(dataset_name)
704 * make_grid(global_refinements);
711 *
void PoissonSolver<dim>::make_grid(
const unsigned int global_refinements)
713 *
Assert(global_refinements >= 3,
714 *
ExcMessage(
"This program makes the assumption that the mesh for the "
715 *
"solution of the PDE is at least as fine as the one used "
716 *
"in the definition of the coefficient."));
720 * std::cout <<
" Number of active cells: " <<
triangulation.n_active_cells()
727 *
void PoissonSolver<dim>::setup_system()
731 * First define the finite element space:
734 * dof_handler.distribute_dofs(fe);
736 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
741 * Then
set up the main data structures that will hold the discrete problem:
747 * sparsity_pattern.copy_from(dsp);
749 * system_matrix.reinit(sparsity_pattern);
751 * solution.reinit(dof_handler.n_dofs());
752 * system_rhs.reinit(dof_handler.n_dofs());
757 * And then define the tools to
do point evaluation. We choose
758 * a
set of 13x13 points evenly distributed across the domain:
762 *
const unsigned int n_points_per_direction = 13;
763 *
const double dx = 1. / (n_points_per_direction + 1);
765 *
for (
unsigned int x = 1; x <= n_points_per_direction; ++x)
766 *
for (
unsigned int y = 1; y <= n_points_per_direction; ++y)
767 * measurement_points.emplace_back(x *
dx, y *
dx);
771 * First build a full
matrix of the evaluation process. We
do this
772 * even though the
matrix is really sparse -- but we don
't know
773 * which entries are nonzero. Later, the `copy_from()` function
774 * calls build a sparsity pattern and a sparse matrix from
778 * Vector<double> weights(dof_handler.n_dofs());
779 * FullMatrix<double> full_measurement_matrix(n_points_per_direction *
780 * n_points_per_direction,
781 * dof_handler.n_dofs());
783 * for (unsigned int index = 0; index < measurement_points.size(); ++index)
785 * VectorTools::create_point_source_vector(dof_handler,
786 * measurement_points[index],
788 * for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
789 * full_measurement_matrix(index, i) = weights(i);
792 * measurement_sparsity.copy_from(full_measurement_matrix);
793 * measurement_matrix.reinit(measurement_sparsity);
794 * measurement_matrix.copy_from(full_measurement_matrix);
799 * Next build the mapping from cell to the index in the 64-element
800 * coefficient vector:
803 * for (const auto &cell : triangulation.active_cell_iterators())
805 * const unsigned int i = std::floor(cell->center()[0] * 8);
806 * const unsigned int j = std::floor(cell->center()[1] * 8);
808 * const unsigned int index = i + 8 * j;
810 * cell->set_user_index(index);
815 * Finally prebuild the building blocks of the linear system as
816 * discussed in the Readme file :
820 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
822 * cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
823 * cell_rhs.reinit(dofs_per_cell);
825 * const QGauss<dim> quadrature_formula(fe.degree+1);
826 * const unsigned int n_q_points = quadrature_formula.size();
828 * FEValues<dim> fe_values(fe,
829 * quadrature_formula,
830 * update_values | update_gradients |
831 * update_JxW_values);
833 * fe_values.reinit(dof_handler.begin_active());
835 * for (unsigned int q_index = 0; q_index < n_q_points; ++q_index)
836 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
838 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
839 * cell_matrix(i, j) +=
840 * (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
841 * fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
842 * fe_values.JxW(q_index)); // dx
844 * cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
846 * fe_values.JxW(q_index)); // dx
849 * VectorTools::interpolate_boundary_values(dof_handler,
851 * ZeroFunction<dim>(),
860 * Given that we have pre-built the matrix and right hand side contributions
861 * for a (representative) cell, the function that assembles the matrix is
862 * pretty short and straightforward:
866 * void PoissonSolver<dim>::assemble_system(const Vector<double> &coefficients)
868 * Assert(coefficients.size() == 64, ExcInternalError());
873 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
875 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
877 * for (const auto &cell : dof_handler.active_cell_iterators())
879 * const double coefficient = coefficients(cell->user_index());
881 * cell->get_dof_indices(local_dof_indices);
882 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
884 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
885 * system_matrix.add(local_dof_indices[i],
886 * local_dof_indices[j],
887 * coefficient * cell_matrix(i, j));
889 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
893 * MatrixTools::apply_boundary_values(boundary_values,
902 * The same is true for the function that solves the linear system:
906 * void PoissonSolver<dim>::solve()
908 * SparseILU<double> ilu;
909 * ilu.initialize(system_matrix);
910 * SolverControl control(100, 1e-10*system_rhs.l2_norm());
911 * SolverCG<> solver(control);
912 * solver.solve(system_matrix, solution, system_rhs, ilu);
919 * The following function outputs graphical data for the most recently
920 * used coefficient and corresponding solution of the PDE. Collecting
921 * the coefficient values requires translating from the 64-element
922 * coefficient vector and the cells that correspond to each of these
923 * elements. The rest remains pretty obvious, with the exception
924 * of including the number of the current sample into the file name.
929 * PoissonSolver<dim>::output_results(const Vector<double> &coefficients) const
931 * Vector<float> coefficient_values(triangulation.n_active_cells());
932 * for (const auto &cell : triangulation.active_cell_iterators())
933 * coefficient_values[cell->active_cell_index()] =
934 * coefficients(cell->user_index());
936 * DataOut<dim> data_out;
938 * data_out.attach_dof_handler(dof_handler);
939 * data_out.add_data_vector(solution, "solution");
940 * data_out.add_data_vector(coefficient_values, "coefficient");
942 * data_out.build_patches();
944 * std::ofstream output("solution-" +
945 * Utilities::int_to_string(nth_evaluation, 10) + ".vtu");
946 * data_out.write_vtu(output);
953 * The following is the main function of this class: Given a coefficient
954 * vector, it assembles the linear system, solves it, and then evaluates
955 * the solution at the measurement points by applying the measurement
956 * matrix to the solution vector. That vector of "measured" values
961 * The function will also output the solution in a graphical format
962 * if you un-comment the corresponding statement in the third
963 * code block. However, you may end up with a very large amount
964 * of data: This code is producing, at the minimum, 10,000 samples
965 * and creating output for each one of them is surely more data
966 * than you ever want to see!
970 * At the end of the function, we output some timing information
971 * every 10,000 samples.
976 * PoissonSolver<dim>::evaluate(const Vector<double> &coefficients)
979 * TimerOutput::Scope section(timer, "Building linear systems");
980 * assemble_system(coefficients);
984 * TimerOutput::Scope section(timer, "Solving linear systems");
988 * Vector<double> measurements(measurement_matrix.m());
990 * TimerOutput::Scope section(timer, "Postprocessing");
992 * measurement_matrix.vmult(measurements, solution);
993 * Assert(measurements.size() == measurement_points.size(),
994 * ExcInternalError());
996 * /* output_results(coefficients); */
1000 * if (nth_evaluation % 10000 == 0)
1001 * timer.print_summary();
1003 * return measurements;
1005 * } // namespace ForwardSimulator
1010 * The following namespaces define the statistical properties of the Bayesian
1011 * inverse problem. The first is about the definition of the measurement
1012 * statistics (the "likelihood"), which we here assume to be a normal
1013 * distribution @f$N(\mu,\sigma I)@f$ with mean value @f$\mu@f$ given by the
1014 * actual measurement vector (passed as an argument to the constructor
1015 * of the `Gaussian` class and standard deviation @f$\sigma@f$.
1019 * For reasons of numerical accuracy, it is useful to not return the
1020 * actual likelihood, but its logarithm. This is because these
1021 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1022 * for which it becomes very difficult to compute accurate
1026 * namespace LogLikelihood
1031 * virtual double log_likelihood(const Vector<double> &x) const = 0;
1033 * virtual ~Interface() = default;
1037 * class Gaussian : public Interface
1040 * Gaussian(const Vector<double> &mu, const double sigma);
1042 * virtual double log_likelihood(const Vector<double> &x) const override;
1045 * const Vector<double> mu;
1046 * const double sigma;
1049 * Gaussian::Gaussian(const Vector<double> &mu, const double sigma)
1055 * double Gaussian::log_likelihood(const Vector<double> &x) const
1057 * Vector<double> x_minus_mu = x;
1060 * return -x_minus_mu.norm_sqr() / (2 * sigma * sigma);
1062 * } // namespace LogLikelihood
1067 * Next up is the "prior" imposed on the coefficients. We assume
1068 * that the logarithms of the entries of the coefficient vector
1069 * are all distributed as a Gaussian with given mean and standard
1070 * deviation. If the logarithms of the coefficients are normally
1071 * distributed, then this implies in particular that the coefficients
1072 * can only be positive, which is a useful property to ensure the
1073 * well-posedness of the forward problem.
1077 * For the same reasons as for the likelihood above, the interface
1078 * for the prior asks for returning the *logarithm* of the prior,
1079 * instead of the prior probability itself.
1082 * namespace LogPrior
1087 * virtual double log_prior(const Vector<double> &x) const = 0;
1089 * virtual ~Interface() = default;
1093 * class LogGaussian : public Interface
1096 * LogGaussian(const double mu, const double sigma);
1098 * virtual double log_prior(const Vector<double> &x) const override;
1102 * const double sigma;
1105 * LogGaussian::LogGaussian(const double mu, const double sigma)
1111 * double LogGaussian::log_prior(const Vector<double> &x) const
1113 * double log_of_product = 0;
1115 * for (const auto &el : x)
1117 * -(std::log(el) - mu) * (std::log(el) - mu) / (2 * sigma * sigma);
1119 * return log_of_product;
1121 * } // namespace LogPrior
1127 * The Metropolis-Hastings algorithm requires a method to create a new sample
1128 * given a previous sample. We do this by perturbing the current (coefficient)
1129 * sample randomly using a Gaussian distribution centered at the current
1130 * sample. To ensure that the samples' individual entries all remain
1131 * positive, we use a Gaussian distribution in logarithm space -- in other
1132 * words, instead of *adding* a small perturbation with
mean value zero,
1133 * we *multiply* the entries of the current sample by a factor that
1134 * is the exponential of a
random number with
mean zero. (Because the
1135 * exponential of
zero is
one,
this means that the most likely factors
1136 * to multiply the existing sample entries by are close to
one. And
1137 * because the exponential of a number is
always positive, we never
1138 * get negative samples
this way.)
1142 * But the Metropolis-Hastings sampler doesn
't just need a perturbed
1143 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
1144 * needs to know the ratio of the probability of reaching @f$y@f$ from
1145 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
1146 * were to use a symmetric proposal distribution (e.g., a Gaussian
1147 * distribution centered at @f$x@f$ with a width independent of @f$x@f$), then
1148 * these two probabilities would be the same, and the ratio one. But
1149 * that's not the
case for the Gaussian in
log space. It
's not
1150 * terribly difficult to verify that in that case, for a single
1151 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
1152 * consequently for all components of the vector together, the
1153 * probability is the product of these ratios.
1156 * namespace ProposalGenerator
1162 * std::pair<Vector<double>,double>
1163 * perturb(const Vector<double> ¤t_sample) const = 0;
1165 * virtual ~Interface() = default;
1169 * class LogGaussian : public Interface
1172 * LogGaussian(const unsigned int random_seed, const double log_sigma);
1175 * std::pair<Vector<double>,double>
1176 * perturb(const Vector<double> ¤t_sample) const;
1179 * const double log_sigma;
1180 * mutable std::mt19937 random_number_generator;
1185 * LogGaussian::LogGaussian(const unsigned int random_seed,
1186 * const double log_sigma)
1187 * : log_sigma(log_sigma)
1189 * random_number_generator.seed(random_seed);
1193 * std::pair<Vector<double>,double>
1194 * LogGaussian::perturb(const Vector<double> ¤t_sample) const
1196 * Vector<double> new_sample = current_sample;
1197 * double product_of_ratios = 1;
1198 * for (auto &x : new_sample)
1200 * const double rnd = std::normal_distribution<>(0, log_sigma)(random_number_generator);
1201 * const double exp_rnd = std::exp(rnd);
1203 * product_of_ratios *= exp_rnd;
1206 * return {new_sample, product_of_ratios};
1209 * } // namespace ProposalGenerator
1214 * The last main class is the Metropolis-Hastings sampler itself.
1215 * If you understand the algorithm behind this method, then
1216 * the following implementation should not be too difficult
1217 * to read. The only thing of relevance is that descriptions
1218 * of the algorithm typically ask whether the *ratio* of two
1219 * probabilities (the "posterior" probabilities of the current
1220 * and the previous samples, where the "posterior" is the product of the
1221 * likelihood and the prior probability) is larger or smaller than a
1222 * randomly drawn number. But because our interfaces return the
1223 * *logarithms* of these probabilities, we now need to take
1224 * the ratio of appropriate exponentials -- which is made numerically
1225 * more stable by considering the exponential of the difference of
1226 * the log probabilities. The only other slight complication is that
1227 * we need to multiply this ratio by the ratio of proposal probabilities
1228 * since we use a non-symmetric proposal distribution.
1232 * Finally, we note that the output is generated with 7 digits of
1233 * accuracy. (The C++ default is 6 digits.) We do this because,
1234 * as shown in the paper, we can determine the mean value of the
1235 * probability distribution we are sampling here to at least six
1236 * digits of accuracy, and do not want to be limited by the precision
1242 * class MetropolisHastings
1245 * MetropolisHastings(ForwardSimulator::Interface & simulator,
1246 * const LogLikelihood::Interface & likelihood,
1247 * const LogPrior::Interface & prior,
1248 * const ProposalGenerator::Interface &proposal_generator,
1249 * const unsigned int random_seed,
1250 * const std::string & dataset_name);
1252 * void sample(const Vector<double> &starting_guess,
1253 * const unsigned int n_samples);
1256 * ForwardSimulator::Interface & simulator;
1257 * const LogLikelihood::Interface & likelihood;
1258 * const LogPrior::Interface & prior;
1259 * const ProposalGenerator::Interface &proposal_generator;
1261 * std::mt19937 random_number_generator;
1263 * unsigned int sample_number;
1264 * unsigned int accepted_sample_number;
1266 * std::ofstream output_file;
1268 * void write_sample(const Vector<double> ¤t_sample,
1269 * const double current_log_likelihood);
1273 * MetropolisHastings::MetropolisHastings(
1274 * ForwardSimulator::Interface & simulator,
1275 * const LogLikelihood::Interface & likelihood,
1276 * const LogPrior::Interface & prior,
1277 * const ProposalGenerator::Interface &proposal_generator,
1278 * const unsigned int random_seed,
1279 * const std::string & dataset_name)
1280 * : simulator(simulator)
1281 * , likelihood(likelihood)
1283 * , proposal_generator(proposal_generator)
1284 * , sample_number(0)
1285 * , accepted_sample_number(0)
1287 * output_file.open("samples-" + dataset_name + ".txt");
1288 * output_file.precision(7);
1290 * random_number_generator.seed(random_seed);
1294 * void MetropolisHastings::sample(const Vector<double> &starting_guess,
1295 * const unsigned int n_samples)
1297 * std::uniform_real_distribution<> uniform_distribution(0, 1);
1299 * Vector<double> current_sample = starting_guess;
1300 * double current_log_posterior =
1301 * (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
1302 * prior.log_prior(current_sample));
1305 * ++accepted_sample_number;
1306 * write_sample(current_sample, current_log_posterior);
1308 * for (unsigned int k = 1; k < n_samples; ++k, ++sample_number)
1310 * std::pair<Vector<double>,double>
1311 * perturbation = proposal_generator.perturb(current_sample);
1312 * const Vector<double> trial_sample = std::move (perturbation.first);
1313 * const double perturbation_probability_ratio = perturbation.second;
1315 * const double trial_log_posterior =
1316 * (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
1317 * prior.log_prior(trial_sample));
1319 * if (std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
1321 * uniform_distribution(random_number_generator))
1323 * current_sample = trial_sample;
1324 * current_log_posterior = trial_log_posterior;
1326 * ++accepted_sample_number;
1329 * write_sample(current_sample, current_log_posterior);
1335 * void MetropolisHastings::write_sample(const Vector<double> ¤t_sample,
1336 * const double current_log_posterior)
1338 * output_file << current_log_posterior << '\t
';
1339 * output_file << accepted_sample_number << '\t
';
1340 * for (const auto &x : current_sample)
1341 * output_file << x << ' ';
1342 * output_file << '\n
';
1344 * output_file.flush();
1346 * } // namespace Sampler
1351 * The final function is `main()`, which simply puts all of these pieces
1352 * together into one. The "exact solution", i.e., the "measurement values"
1353 * we use for this program are tabulated to make it easier for other
1354 * people to use in their own implementations of this benchmark. These
1355 * values created using the same main class above, but using 8 mesh
1356 * refinements and using a Q3 element -- i.e., using a much more accurate
1357 * method than the one we use in the forward simulator for generating
1358 * samples below (which uses 5 global mesh refinement steps and a Q1
1359 * element). If you wanted to regenerate this set of numbers, then
1360 * the following code snippet would do that:
1361 * <div class=CodeFragmentInTutorialComment>
1363 * /* Set the exact coefficient: */
1364 * Vector<double> exact_coefficients(64);
1365 * for (auto &el : exact_coefficients)
1367 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
1368 * exact_coefficients(18) = 0.1;
1369 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
1370 * exact_coefficients(54) = 10.;
1373 * /* Compute the "correct" solution vector: */
1374 * const Vector<double> exact_solution =
1375 * ForwardSimulator::PoissonSolver<2>(/* global_refinements = */ 8,
1376 * /* fe_degree = */ 3,
1377 * /* prefix = */ "exact")
1378 * .evaluate(exact_coefficients);
1385 * const bool testing = true;
1389 * Run with one thread, so as to not step on other processes
1390 * doing the same at the same time. It turns out that the problem
1391 * is also so small that running with more than one thread
1392 * *increases* the runtime.
1395 * MultithreadInfo::set_thread_limit(1);
1397 * const unsigned int random_seed = (testing ? 1U : std::random_device()());
1398 * const std::string dataset_name = Utilities::to_string(random_seed, 10);
1400 * const Vector<double> exact_solution(
1401 * { 0.06076511762259369, 0.09601910120848481,
1402 * 0.1238852517838584, 0.1495184117375201,
1403 * 0.1841596127549784, 0.2174525028261122,
1404 * 0.2250996160898698, 0.2197954769002993,
1405 * 0.2074695698370926, 0.1889996477663016,
1406 * 0.1632722532153726, 0.1276782480038186,
1407 * 0.07711845915789312, 0.09601910120848552,
1408 * 0.2000589533367983, 0.3385592591951766,
1409 * 0.3934300024647806, 0.4040223892461541,
1410 * 0.4122329537843092, 0.4100480091545554,
1411 * 0.3949151637189968, 0.3697873264791232,
1412 * 0.33401826235924, 0.2850397806663382,
1413 * 0.2184260032478671, 0.1271121156350957,
1414 * 0.1238852517838611, 0.3385592591951819,
1415 * 0.7119285162766475, 0.8175712861756428,
1416 * 0.6836254116578105, 0.5779452419831157,
1417 * 0.5555615956136897, 0.5285181561736719,
1418 * 0.491439702849224, 0.4409367494853282,
1419 * 0.3730060082060772, 0.2821694983395214,
1420 * 0.1610176733857739, 0.1495184117375257,
1421 * 0.3934300024647929, 0.8175712861756562,
1422 * 0.9439154625527653, 0.8015904115095128,
1423 * 0.6859683749254024, 0.6561235366960599,
1424 * 0.6213197201867315, 0.5753611315000049,
1425 * 0.5140091754526823, 0.4325325506354165,
1426 * 0.3248315148915482, 0.1834600412730086,
1427 * 0.1841596127549917, 0.4040223892461832,
1428 * 0.6836254116578439, 0.8015904115095396,
1429 * 0.7870119561144977, 0.7373108331395808,
1430 * 0.7116558878070463, 0.6745179049094283,
1431 * 0.6235300574156917, 0.5559332704045935,
1432 * 0.4670304994474178, 0.3499809143811,
1433 * 0.19688263746294, 0.2174525028261253,
1434 * 0.4122329537843404, 0.5779452419831566,
1435 * 0.6859683749254372, 0.7373108331396063,
1436 * 0.7458811983178246, 0.7278968022406559,
1437 * 0.6904793535357751, 0.6369176452710288,
1438 * 0.5677443693743215, 0.4784738764865867,
1439 * 0.3602190632823262, 0.2031792054737325,
1440 * 0.2250996160898818, 0.4100480091545787,
1441 * 0.5555615956137137, 0.6561235366960938,
1442 * 0.7116558878070715, 0.727896802240657,
1443 * 0.7121928678670187, 0.6712187391428729,
1444 * 0.6139157775591492, 0.5478251665295381,
1445 * 0.4677122687599031, 0.3587654911000848,
1446 * 0.2050734291675918, 0.2197954769003094,
1447 * 0.3949151637190157, 0.5285181561736911,
1448 * 0.6213197201867471, 0.6745179049094407,
1449 * 0.690479353535786, 0.6712187391428787,
1450 * 0.6178408289359514, 0.5453605027237883,
1451 * 0.489575966490909, 0.4341716881061278,
1452 * 0.3534389974779456, 0.2083227496961347,
1453 * 0.207469569837099, 0.3697873264791366,
1454 * 0.4914397028492412, 0.5753611315000203,
1455 * 0.6235300574157017, 0.6369176452710497,
1456 * 0.6139157775591579, 0.5453605027237935,
1457 * 0.4336604929612851, 0.4109641743019312,
1458 * 0.3881864790111245, 0.3642640090182592,
1459 * 0.2179599909280145, 0.1889996477663011,
1460 * 0.3340182623592461, 0.4409367494853381,
1461 * 0.5140091754526943, 0.5559332704045969,
1462 * 0.5677443693743304, 0.5478251665295453,
1463 * 0.4895759664908982, 0.4109641743019171,
1464 * 0.395727260284338, 0.3778949322004734,
1465 * 0.3596268271857124, 0.2191250268948948,
1466 * 0.1632722532153683, 0.2850397806663325,
1467 * 0.373006008206081, 0.4325325506354207,
1468 * 0.4670304994474315, 0.4784738764866023,
1469 * 0.4677122687599041, 0.4341716881061055,
1470 * 0.388186479011099, 0.3778949322004602,
1471 * 0.3633362567187364, 0.3464457261905399,
1472 * 0.2096362321365655, 0.1276782480038148,
1473 * 0.2184260032478634, 0.2821694983395252,
1474 * 0.3248315148915535, 0.3499809143811097,
1475 * 0.3602190632823333, 0.3587654911000799,
1476 * 0.3534389974779268, 0.3642640090182283,
1477 * 0.35962682718569, 0.3464457261905295,
1478 * 0.3260728953424643, 0.180670595355394,
1479 * 0.07711845915789244, 0.1271121156350963,
1480 * 0.1610176733857757, 0.1834600412730144,
1481 * 0.1968826374629443, 0.2031792054737354,
1482 * 0.2050734291675885, 0.2083227496961245,
1483 * 0.2179599909279998, 0.2191250268948822,
1484 * 0.2096362321365551, 0.1806705953553887,
1485 * 0.1067965550010013 });
1489 * Now run the forward simulator for samples:
1492 * ForwardSimulator::PoissonSolver<2> laplace_problem(
1493 * /* global_refinements = */ 5,
1494 * /* fe_degree = */ 1,
1496 * LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
1497 * LogPrior::LogGaussian log_prior(0, 2);
1498 * ProposalGenerator::LogGaussian proposal_generator(
1499 * random_seed, 0.09); /* so that the acceptance ratio is ~0.24 */
1500 * Sampler::MetropolisHastings sampler(laplace_problem,
1503 * proposal_generator,
1507 * Vector<double> starting_coefficients(64);
1508 * for (auto &el : starting_coefficients)
1510 * sampler.sample(starting_coefficients,
1511 * (testing ? 250 * 40 /* takes 40 seconds */
1513 * 100000000 /* takes 6 days */
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
std::string to_string(const T &t)
static ::ExceptionBase & ExcMessage(std::string arg1)
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void random(DoFHandler< dim, spacedim > &dof_handler)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
static const types::blas_int zero
@ matrix
Contents is actually a matrix.
static const types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation