489 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
490 LogPrior::LogGaussian log_prior(0, 2);
492 const unsigned int n_theta = 64;
493 for (
unsigned int test=0; test<10; ++test)
495 std::cout <<
"Generating output for test " << test << std::endl;
498 std::ifstream test_input (
"../testing/input." + std::to_string(test) +
".txt");
499 Assert (test_input, ExcIO());
502 for (
unsigned int i=0; i<n_theta; ++i)
503 test_input >> theta[i];
508 std::ofstream test_output_z (
"output." + std::to_string(test) +
".z.txt");
509 z.
print(test_output_z, 16);
513 std::ofstream test_output_likelihood (
"output." + std::to_string(test) +
".loglikelihood.txt");
514 test_output_likelihood.precision(12);
515 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
517 std::ofstream test_output_prior (
"output." + std::to_string(test) +
".logprior.txt");
518 test_output_prior.precision(12);
519 test_output_prior << log_prior.log_prior(theta) << std::endl;
523This code reads in each of the input files (assuming that the executable is located in a
524build directory
parallel to the `testing/` directory) and outputs its results into the
525current directory. The inputs you get from a modified program should then be compared
526against the ones stored in the `testing/` directory. They should match to several digits.
530An alternative implementation in Matlab
531---------------------------------------
533To facilitate experiments,
this directory also contains alternative
534implementations of the benchmark. The
first one was written by David
535Aristoff in Matlab and can be found in the `Matlab/` directory. As is
536common in Matlab programs, each function is provided in its own
537file. We have verified that the program generates the same results (to
53812 or more digits) as the
C++ program,
using the tests mentioned in
543An alternative implementation in Python
544---------------------------------------
546Another alternative, written by Wolfgang Bangerth, can be found in the
547`Python/` directory and uses Python3. As is common
for Python
548programs, the whole program is provided in one file. As
for the Matlab
549version, we have verified that the program generates the same results
550(to 12 or more digits) as the
C++ program,
using the tests mentioned
551in the previous section. In fact,
if you just execute the program
552as-is, it runs diagnostics that output the errors.
554This Python version is essentially a literal
translation of the Matlab
555code. It is not nearly as efficient (taking around 5 times as much
556time
for each function evaluation) and could probably be optimized
557substantially
if desired. A good starting
point is the insertion of
558the local elements into the global
matrix in the line
560 A[np.ix_(dof,dof)] += theta_loc * A_loc
562that takes up a substantial fraction of the overall
run time.
565<a name=
"ann-Matlab/exact_values.m"></a>
566<h1>Annotated version of Matlab/exact_values.m</h1>
568%% -----------------------------------------------------------------------------
570%% SPDX-License-Identifier: LGPL-2.1-or-later
571%% Copyright (C) 2022 by Wolfgang Bangerth
573%% This file is part of the deal.II code gallery.
575%% -----------------------------------------------------------------------------
577%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
578%%%%%%%%%%%% list of
"exact" measurement values, z_hat %%%%%%%%%%%%%%%%%%%%
579%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
581z_hat = [0.06076511762259369;
753<a name=
"ann-Matlab/forward_solver.m"></a>
754<h1>Annotated version of Matlab/forward_solver.m</h1>
756%% -----------------------------------------------------------------------------
758%% SPDX-License-Identifier: LGPL-2.1-or-later
759%% Copyright (C) 2022 by Wolfgang Bangerth
761%% This file is part of the deal.II code gallery.
763%% -----------------------------------------------------------------------------
765%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
766%%%%%%%%%%%%%%%%%%%%%% forward solver function %%%%%%%%%%%%%%%%%%%%%%%%%%%%
767%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
769%theta = current 8x8 parameter
matrix
770%lbl = cell labeling function
771%A_loc =
matrix of local contributions to A
772%Id = Identity
matrix of size 128x128
773%boundaries = labels of boundary cells
774%
b = right hand side of linear system (AU = b)
776%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
778%z = vector of measurements
779%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
781function z = forward_solver(theta,lbl,A_loc,Id,boundaries,b,M)
783%initialize
matrix A
for FEM linear solve, AU =
b
786%
loop over cells to build A
788 for j=0:31 %build A by summing over contribution from each cell
790 %find local coefficient in 8x8 grid
793 %update A by including contribution from cell (i,j)
794 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
795 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
799%enforce boundary condition
800A(boundaries,:) = Id(boundaries,:);
801A(:,boundaries) = Id(:,boundaries);
806%solve linear equation
for coefficients, U
814<a name=
"ann-Matlab/get_statistics.m"></a>
815<h1>Annotated version of Matlab/get_statistics.m</h1>
817%% -----------------------------------------------------------------------------
819%% SPDX-License-Identifier: LGPL-2.1-or-later
820%% Copyright (C) 2022 by Wolfgang Bangerth
822%% This file is part of the deal.II code gallery.
824%% -----------------------------------------------------------------------------
826%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
827%%%%%%%%%%%%%%%%% compute statistics on data
set %%%%%%%%%%%%%%%%%%%%%%%%%%
828%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
830%data = tensor of theta samples from each lag time and chain
831%theta_means = means of theta over each
independent chain
832%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
834%theta_mean = overall
mean of chains
835%covars = covariance matrices of each
independent chain
836%autocovar =
mean of autocovariance
matrix over all the chains
837%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
839function [theta_mean,covars,autocovar] = get_statistics(data,theta_means);
841%compute overall
mean of data, and get size of data
matrix
842theta_mean =
mean(theta_means,3);
843[~,~,L,N] = size(data);
845%initialize covariance matrices and
mean autocovariance
matrix
846covars = zeros(64,64,N);
847autocovar = zeros(64,64,2*L-1);
849%compute covariance matrices and
mean autocovariance
matrix
852 %get data from chain n
853 data_ = reshape(permute(data(:,:,:,n),[3 2 1]),[L 64]);
855 %compute autocovariance
matrix of chain n
856 mat = xcov(data_,
'unbiased');
858 %store covariance
matrix of chain n
859 covars(:,:,n) = reshape(mat(L,:),64,64);
862 autocovar = autocovar + reshape(mat
',[64 64 2*L-1]);
865%compute mean of autocovariance matrix
866autocovar = autocovar(1:64,1:64,L:2*L-1)/N;
870<a name="ann-Matlab/log_probability.m"></a>
871<h1>Annotated version of Matlab/log_probability.m</h1>
873%% -----------------------------------------------------------------------------
875%% SPDX-License-Identifier: LGPL-2.1-or-later
876%% Copyright (C) 2022 by Wolfgang Bangerth
878%% This file is part of the deal.II code gallery.
880%% -----------------------------------------------------------------------------
882%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
883%%%%%%%%%%%%%%%%% compute log probability, log pi %%%%%%%%%%%%%%%%%%%%%%%%%
884%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
886%theta = current 8x8 parameter matrix
887%z = current vector of measurements
888%z_hat = vector of "exact" measurements
889%sig = standard deviation parameter in likelihood
890%sig_pr = standard deviation parameter in prior
891%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
893%log_pi = logarithm of posterior probability
894%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
896function log_pi = log_probability(theta,z,z_hat,sig,sig_pr)
898%compute log likelihood
899log_L = -sum((z-z_hat).^2)/(2*sig^2);
902log_pi_pr = -sum(log(theta).^2,'all
')/(2*sig_pr^2);
904%compute log posterior
905log_pi = log_L+log_pi_pr;
909<a name="ann-Matlab/main.m"></a>
910<h1>Annotated version of Matlab/main.m</h1>
912%% -----------------------------------------------------------------------------
914%% SPDX-License-Identifier: LGPL-2.1-or-later
915%% Copyright (C) 2022 by Wolfgang Bangerth
917%% This file is part of the deal.II code gallery.
919%% -----------------------------------------------------------------------------
921%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
922%%%%%%%%% Run MCMC sampler to estimate posterior distribution %%%%%%%%%%%%%
923%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
925%define number of chains, chain length, and lag time
927N_L = input('length of each Markov chain:
');
928lag = input('lag time for measurements:
');
929workers = input('number of
parallel workers:
');
932%open Matlab parallel pool
936load precomputations.mat
938%define lag time and data matrix
939data = zeros(8,8,L,N); %data matrix of samples at lag times
940theta_means = zeros(8,8,N); %overall mean of theta
946 %set initial theta, theta mean, and z values of chain
955 %define proposal, theta_tilde
956 xi = normrnd(0,sig_prop,[8 8]);
957 theta_tilde = theta.*exp(xi);
959 %compute new z values
960 z_tilde = forward_solver_(theta_tilde);
962 %compute posterior log probability of theta_tilde
963 log_pi_tilde = log_probability_(theta_tilde,z_tilde);
964 log_pi = log_probability_(theta,z);
966 %compute acceptance probability; accept proposal appropriately
967 accept = exp(log_pi_tilde-log_pi) ...
968 *prod(theta_tilde./theta,'all
');
970 theta = theta_tilde; %accept new theta values
971 z = z_tilde; %record associated measurements
974 %update mean of theta
975 theta_mean = theta_mean + theta;
980 data(:,:,m,n) = theta;
985 theta_means(:,:,n) = theta_mean/N_L;
991%compute statistics on data set
992[theta_mean,covars,autocovar] = get_statistics(data,theta_means);
994%save data to Matlab workspace, labeled by N and N_L
995save (['data_N_
' num2str(N) '_N_L_
' num2str(N_L) '.mat
'])
999<a name="ann-Matlab/plot_solution.m"></a>
1000<h1>Annotated version of Matlab/plot_solution.m</h1>
1002%% -----------------------------------------------------------------------------
1004%% SPDX-License-Identifier: LGPL-2.1-or-later
1005%% Copyright (C) 2022 by Wolfgang Bangerth
1007%% This file is part of the deal.II code gallery.
1009%% -----------------------------------------------------------------------------
1011%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1012%%%%%%%%%%%%%%% plots solution, u, to Poisson equation %%%%%%%%%%%%%%%%%%%%
1013%%%%%%%%%%%%%%%% associated to theta and a 32x32 mesh %%%%%%%%%%%%%%%%%%%%%
1014%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1016%input matrix theta for plotting (e.g., theta = theta_hat)
1017theta = input('choose theta for plotting u: theta =
');
1019%construct mass matrix, M_plot, for plotting
1022Mp = zeros(n,n,33^2);
1027 Mp(i,j,k) = phi(xsp(i)-h*c(1),xsp(j)-h*c(2));
1031Mp = reshape(Mp,[n^2 33^2]);
1033%run forward solver on mean of theta
1034A = zeros(33^2,33^2);
1036 for j=0:31 %build A by summing over contribution from each cell
1038 %find local coefficient in 8x8 grid
1039 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
1041 %update A by including contribution from cell (i,j)
1042 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
1043 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
1047%enforce boundary condition
1048A(boundaries,:) = Id(boundaries,:);
1053%solve linear equation for coefficients, U
1056%close all current plots
1061zs = reshape(Mp*U,[n n]);
1066<a name="ann-Matlab/precomputations.m"></a>
1067<h1>Annotated version of Matlab/precomputations.m</h1>
1069%% -----------------------------------------------------------------------------
1071%% SPDX-License-Identifier: LGPL-2.1-or-later
1072%% Copyright (C) 2022 by Wolfgang Bangerth
1074%% This file is part of the deal.II code gallery.
1076%% -----------------------------------------------------------------------------
1078%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1079%%%%%%% do all precomputations necessary for MCMC simulations %%%%%%%%%%%%%
1080%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1085%define characteristic function of unit square
1086S = @(x,y) heaviside(x).*heaviside(y) ...
1087 .*(1-heaviside(x-h)).*(1-heaviside(y-h));
1089%define tent function on the domain [-h,h]x[-h,h]
1090phi = @(x,y) ((x+h).*(y+h).*S(x+h,y+h) + (h-x).*(h-y).*S(x,y) ...
1091 + (x+h).*(h-y).*S(x+h,y) + (h-x).*(y+h).*S(x,y+h))/h^2;
1093%define function that converts from (i,j) to dof, and its inverse
1094lbl = @(i,j) 33*j+i+1;
1095inv_lbl = @(k) [k-1-33*floor((k-1)/33),floor((k-1)/33)];
1097%construct measurement matrix, M
1098xs = 1/14:1/14:13/14; %measurement points
1099M = zeros(13,13,33^2);
1104 M(i,j,k) = phi(xs(i)-h*c(1),xs(j)-h*c(2));
1108M = reshape(M,[13^2 33^2]);
1110%construct exact coefficient matrix, theta_hat
1111theta_hat = ones(8,8);
1112theta_hat(2:3,2:3) = 0.1;
1113theta_hat(6:7,6:7) = 10;
1115%construct local overlap matrix, A_loc, and identity matrix Id
1116A_loc = [2/3 -1/6 -1/3 -1/6;
1119 -1/6 -1/3 -1/6 2/3];
1122%locate boundary labels
1123boundaries = [lbl(0:1:32,0),lbl(0:1:32,32),lbl(0,1:1:31),lbl(32,1:1:31)];
1125%define RHS of FEM linear system, AU = b
1126b = ones(33^2,1)*10*h^2;
1127b(boundaries) = zeros(128,1); %enforce boundary conditions on b
1129%load exact z_hat values
1132%set global parameters and functions for simulation
1133sig = 0.05; %likelihood standard deviation
1134sig_pr = 2; %prior (log) standard deviation
1135sig_prop = 0.0725; %proposal (log) standard deviation
1136theta0 = ones(8,8); %initial theta values
1137forward_solver_ = @(theta) ...
1138 forward_solver(theta,lbl,A_loc,Id,boundaries,b,M);
1139log_probability_ = @(theta,z) log_probability(theta,z,z_hat,sig,sig_pr);
1141%find initial z values
1142z0 = forward_solver_(theta0);
1144save precomputations.mat
1148<a name="ann-Python/forward_solver.py"></a>
1149<h1>Annotated version of Python/forward_solver.py</h1>
1151## -----------------------------------------------------------------------------
1153## SPDX-License-Identifier: LGPL-2.1-or-later
1154## Copyright (C) 2022 by Wolfgang Bangerth
1156## This file is part of the deal.II code gallery.
1158## -----------------------------------------------------------------------------
1162from scipy.sparse.linalg import spsolve
1165###########################################################################
1166############ list of "exact" measurement values, z_hat ####################
1167###########################################################################
1170 [0.06076511762259369, 0.09601910120848481,
1171 0.1238852517838584, 0.1495184117375201,
1172 0.1841596127549784, 0.2174525028261122,
1173 0.2250996160898698, 0.2197954769002993,
1174 0.2074695698370926, 0.1889996477663016,
1175 0.1632722532153726, 0.1276782480038186,
1176 0.07711845915789312, 0.09601910120848552,
1177 0.2000589533367983, 0.3385592591951766,
1178 0.3934300024647806, 0.4040223892461541,
1179 0.4122329537843092, 0.4100480091545554,
1180 0.3949151637189968, 0.3697873264791232,
1181 0.33401826235924, 0.2850397806663382,
1182 0.2184260032478671, 0.1271121156350957,
1183 0.1238852517838611, 0.3385592591951819,
1184 0.7119285162766475, 0.8175712861756428,
1185 0.6836254116578105, 0.5779452419831157,
1186 0.5555615956136897, 0.5285181561736719,
1187 0.491439702849224, 0.4409367494853282,
1188 0.3730060082060772, 0.2821694983395214,
1189 0.1610176733857739, 0.1495184117375257,
1190 0.3934300024647929, 0.8175712861756562,
1191 0.9439154625527653, 0.8015904115095128,
1192 0.6859683749254024, 0.6561235366960599,
1193 0.6213197201867315, 0.5753611315000049,
1194 0.5140091754526823, 0.4325325506354165,
1195 0.3248315148915482, 0.1834600412730086,
1196 0.1841596127549917, 0.4040223892461832,
1197 0.6836254116578439, 0.8015904115095396,
1198 0.7870119561144977, 0.7373108331395808,
1199 0.7116558878070463, 0.6745179049094283,
1200 0.6235300574156917, 0.5559332704045935,
1201 0.4670304994474178, 0.3499809143811,
1202 0.19688263746294, 0.2174525028261253,
1203 0.4122329537843404, 0.5779452419831566,
1204 0.6859683749254372, 0.7373108331396063,
1205 0.7458811983178246, 0.7278968022406559,
1206 0.6904793535357751, 0.6369176452710288,
1207 0.5677443693743215, 0.4784738764865867,
1208 0.3602190632823262, 0.2031792054737325,
1209 0.2250996160898818, 0.4100480091545787,
1210 0.5555615956137137, 0.6561235366960938,
1211 0.7116558878070715, 0.727896802240657,
1212 0.7121928678670187, 0.6712187391428729,
1213 0.6139157775591492, 0.5478251665295381,
1214 0.4677122687599031, 0.3587654911000848,
1215 0.2050734291675918, 0.2197954769003094,
1216 0.3949151637190157, 0.5285181561736911,
1217 0.6213197201867471, 0.6745179049094407,
1218 0.690479353535786, 0.6712187391428787,
1219 0.6178408289359514, 0.5453605027237883,
1220 0.489575966490909, 0.4341716881061278,
1221 0.3534389974779456, 0.2083227496961347,
1222 0.207469569837099, 0.3697873264791366,
1223 0.4914397028492412, 0.5753611315000203,
1224 0.6235300574157017, 0.6369176452710497,
1225 0.6139157775591579, 0.5453605027237935,
1226 0.4336604929612851, 0.4109641743019312,
1227 0.3881864790111245, 0.3642640090182592,
1228 0.2179599909280145, 0.1889996477663011,
1229 0.3340182623592461, 0.4409367494853381,
1230 0.5140091754526943, 0.5559332704045969,
1231 0.5677443693743304, 0.5478251665295453,
1232 0.4895759664908982, 0.4109641743019171,
1233 0.395727260284338, 0.3778949322004734,
1234 0.3596268271857124, 0.2191250268948948,
1235 0.1632722532153683, 0.2850397806663325,
1236 0.373006008206081, 0.4325325506354207,
1237 0.4670304994474315, 0.4784738764866023,
1238 0.4677122687599041, 0.4341716881061055,
1239 0.388186479011099, 0.3778949322004602,
1240 0.3633362567187364, 0.3464457261905399,
1241 0.2096362321365655, 0.1276782480038148,
1242 0.2184260032478634, 0.2821694983395252,
1243 0.3248315148915535, 0.3499809143811097,
1244 0.3602190632823333, 0.3587654911000799,
1245 0.3534389974779268, 0.3642640090182283,
1246 0.35962682718569, 0.3464457261905295,
1247 0.3260728953424643, 0.180670595355394,
1248 0.07711845915789244, 0.1271121156350963,
1249 0.1610176733857757, 0.1834600412730144,
1250 0.1968826374629443, 0.2031792054737354,
1251 0.2050734291675885, 0.2083227496961245,
1252 0.2179599909279998, 0.2191250268948822,
1253 0.2096362321365551, 0.1806705953553887,
1254 0.1067965550010013])
1257###########################################################################
1258####### do all precomputations necessary for MCMC simulations #############
1259###########################################################################
1261# Define the mesh width
1264# Define characteristic function of unit square
1272 return heaviside(x)*heaviside(y) * (1-heaviside(x-h))*(1-heaviside(y-h));
1274# Define tent function on the domain [0,2h]x[0,2h]
1276 return ((x+h)*(y+h)*S(x+h,y+h) + (h-x)*(h-y)*S(x,y)
1277 + (x+h)*(h-y)*S(x+h,y) + (h-x)*(y+h)*S(x,y+h))/h**2
1279# Define conversion function for dof's from 2D to
scalar label, and
1281def ij_to_dof_index(i,j) :
1284def inv_ij_to_dof_index(k) :
1285 return [k-33*
int(k/33),
int(k/33)]
1288# Construct measurement
matrix, M, for measurements
1289xs = np.arange(1./14,13./14,1./14); #measurement points
1291M = np.zeros((13,13,33**2));
1292for k in range(33**2) :
1293 c = inv_ij_to_dof_index(k)
1294 for i in range(13) :
1295 for j in range(13) :
1296 M[i,j,k] = phi(xs[i]-h*c[0], xs[j]-h*c[1])
1297M = M.reshape((13**2, 33**2))
1298M = scipy.sparse.csr_matrix(M);
1300# Construct local overlap matrix, A_loc, and identity matrix Id
1301A_loc = np.array([[2./3, -1./6, -1./3, -1./6],
1302 [-1./6, 2./3, -1./6, -1./3],
1303 [-1./3, -1./6, 2./3, -1./6],
1304 [-1./6, -1./3, -1./6, 2./3]])
1305Id = np.eye(33**2,33**2)
1307# Locate boundary labels
1308boundaries = ([ij_to_dof_index(i,0) for i in range(33)] +
1309 [ij_to_dof_index(i,32) for i in range(33)] +
1310 [ij_to_dof_index(0,j+1) for j in range(31)] +
1311 [ij_to_dof_index(32,j+1) for j in range(31)])
1313# Define RHS of FEM linear system, AU = b
1314b = np.ones(33**2)*10*h**2
1315b[boundaries] = 0 #enforce boundary conditions on
b
1321###########################################################################
1322###################### forward solver function ############################
1323###########################################################################
1325def forward_solver(theta) :
1326 # Initialize
matrix A for FEM linear solve, AU =
b
1327 A = np.zeros((33**2,33**2))
1329 # Build A by summing over contribution from each cell
1330 for i in range(32) :
1331 for j in range (32) :
1332 # Find local coefficient in 8x8 grid
1333 theta_loc = theta[
int(i/4)+
int(j/4)*8]
1335 # Update A by including contribution from cell (i,j)
1336 dof = [ij_to_dof_index(i,j),
1337 ij_to_dof_index(i,j+1),
1338 ij_to_dof_index(i+1,j+1),
1339 ij_to_dof_index(i+1,j)]
1340 A[np.ix_(dof,dof)] += theta_loc * A_loc
1342 # Enforce boundary condition: Zero out rows and columns, then
1343 # put a one back into the
diagonal entries.
1346 A[boundaries,boundaries] = 1
1348 # Solve linear equation for coefficients, U, and then
1349 # get the Z vector by multiplying by the measurement
matrix
1350 u = spsolve(scipy.sparse.csr_matrix(A),
b)
1359###########################################################################
1360################# compute
log probability,
log pi #########################
1361###########################################################################
1363def log_likelihood(theta) :
1364 z = forward_solver(theta)
1366 sig = 0.05 #likelihood standard deviation
1367 return -np.dot(misfit,misfit)/(2*sig**2)
1369def log_prior(theta) :
1370 sig_pr = 2 #prior (
log) standard deviation
1371 return -np.linalg.
norm(np.
log(theta))**2/(2*sig_pr**2)
1373def log_posterior(theta) :
1374 return log_likelihood(theta) + log_prior(theta)
1378###########################################################################
1379############# A function to test against known output #####################
1380###########################################################################
1383def verify_against_stored_tests() :
1384 for i in range(10) :
1385 print (
"Verifying against data set", i)
1387 # Read the input vector
1388 f_input = open (
"../testing/input.{}.txt".format(i),
'r')
1389 theta = np.fromfile(f_input, count=64, sep=
" ")
1391 # Then compute both the forward solution and its statistics.
1392 # This is not efficiently written here (it calls the forward
1393 # solver twice), but we don
't care about efficiency here since
1394 # we are only computing with ten samples
1395 this_z = forward_solver(theta)
1396 this_log_likelihood = log_likelihood(theta)
1397 this_log_prior = log_prior(theta)
1399 # Then also read the reference output generated by the C++ program:
1400 f_output_z = open ("../testing/output.{}.z.txt".format(i), 'r
')
1401 f_output_likelihood = open ("../testing/output.{}.loglikelihood.txt".format(i), 'r
')
1402 f_output_prior = open ("../testing/output.{}.logprior.txt".format(i), 'r
')
1404 reference_z = np.fromfile(f_output_z, count=13**2, sep=" ")
1405 reference_log_likelihood = float(f_output_likelihood.read())
1406 reference_log_prior = float(f_output_prior.read())
1408 print (" || z-z_ref || : ",
1409 np.linalg.norm(this_z - reference_z))
1410 print (" log likelihood : ",
1411 "Python value=", this_log_likelihood,
1412 "(C++ reference value=", reference_log_likelihood,
1413 ", error=", abs(this_log_likelihood - reference_log_likelihood),
1415 print (" log prior : ",
1416 "Python value=", this_log_prior,
1417 "(C++ reference value=", reference_log_prior,
1418 ", error=", abs(this_log_prior - reference_log_prior),
1422def time_forward_solver() :
1426 for i in range(n_runs) :
1427 # Create a random vector (with entries between 0 and 1), scale
1428 # it by a factor of 4, subtract 2, then take the exponential
1429 # of each entry to get random entries between e^{-2} and
1431 theta = np.exp(np.random.rand(64) * 4 - 2)
1432 z = forward_solver(theta)
1434 print ("Time per forward evaluation:", (end-begin)/n_runs)
1436verify_against_stored_tests()
1437time_forward_solver()
1441<a name="ann-mcmc-laplace.cc"></a>
1442<h1>Annotated version of mcmc-laplace.cc</h1>
1448 * /* -----------------------------------------------------------------------------
1450 * * SPDX-License-Identifier: LGPL-2.1-or-later
1451 * * Copyright (C) 2019 by Wolfgang Bangerth
1453 * * This file is part of the deal.II code gallery.
1455 * * -----------------------------------------------------------------------------
1457 * * Author: Wolfgang Bangerth, Colorado State University, 2019.
1461 * #include <deal.II/base/timer.h>
1462 * #include <deal.II/base/multithread_info.h>
1463 * #include <deal.II/grid/tria.h>
1464 * #include <deal.II/dofs/dof_handler.h>
1465 * #include <deal.II/grid/grid_generator.h>
1466 * #include <deal.II/grid/tria_accessor.h>
1467 * #include <deal.II/grid/tria_iterator.h>
1468 * #include <deal.II/dofs/dof_accessor.h>
1469 * #include <deal.II/fe/fe_q.h>
1470 * #include <deal.II/dofs/dof_tools.h>
1471 * #include <deal.II/fe/fe_values.h>
1472 * #include <deal.II/base/quadrature_lib.h>
1473 * #include <deal.II/base/function.h>
1474 * #include <deal.II/numerics/vector_tools.h>
1475 * #include <deal.II/numerics/matrix_tools.h>
1476 * #include <deal.II/lac/vector.h>
1477 * #include <deal.II/lac/full_matrix.h>
1478 * #include <deal.II/lac/sparse_matrix.h>
1479 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
1480 * #include <deal.II/lac/solver_cg.h>
1481 * #include <deal.II/lac/precondition.h>
1482 * #include <deal.II/lac/sparse_ilu.h>
1484 * #include <deal.II/numerics/data_out.h>
1486 * #include <fstream>
1487 * #include <iostream>
1490 * #include <deal.II/base/logstream.h>
1492 * using namespace dealii;
1497 * The following is a namespace in which we define the solver of the PDE.
1498 * The main class implements an abstract `Interface` class declared at
1499 * the top, which provides for an `evaluate()` function that, given
1500 * a coefficient vector, solves the PDE discussed in the Readme file
1501 * and then evaluates the solution at the 169 mentioned points.
1505 * The solver follows the basic layout of @ref step_4 "step-4", though it precomputes
1506 * a number of things in the `setup_system()` function, such as the
1507 * evaluation of the matrix that corresponds to the point evaluations,
1508 * as well as the local contributions to matrix and right hand side.
1512 * Rather than commenting on everything in detail, in the following
1513 * we will only document those things that are not already clear from
1514 * @ref step_4 "step-4" and a small number of other tutorial programs.
1517 * namespace ForwardSimulator
1522 * virtual Vector<double> evaluate(const Vector<double> &coefficients) = 0;
1524 * virtual ~Interface() = default;
1529 * template <int dim>
1530 * class PoissonSolver : public Interface
1533 * PoissonSolver(const unsigned int global_refinements,
1534 * const unsigned int fe_degree,
1535 * const std::string &dataset_name);
1536 * virtual Vector<double>
1537 * evaluate(const Vector<double> &coefficients) override;
1540 * void make_grid(const unsigned int global_refinements);
1541 * void setup_system();
1542 * void assemble_system(const Vector<double> &coefficients);
1544 * void output_results(const Vector<double> &coefficients) const;
1546 * Triangulation<dim> triangulation;
1548 * DoFHandler<dim> dof_handler;
1550 * FullMatrix<double> cell_matrix;
1551 * Vector<double> cell_rhs;
1552 * std::map<types::global_dof_index,double> boundary_values;
1554 * SparsityPattern sparsity_pattern;
1555 * SparseMatrix<double> system_matrix;
1557 * Vector<double> solution;
1558 * Vector<double> system_rhs;
1560 * std::vector<Point<dim>> measurement_points;
1562 * SparsityPattern measurement_sparsity;
1563 * SparseMatrix<double> measurement_matrix;
1565 * TimerOutput timer;
1566 * unsigned int nth_evaluation;
1568 * const std::string &dataset_name;
1573 * template <int dim>
1574 * PoissonSolver<dim>::PoissonSolver(const unsigned int global_refinements,
1575 * const unsigned int fe_degree,
1576 * const std::string &dataset_name)
1578 * , dof_handler(triangulation)
1579 * , timer(std::cout, TimerOutput::summary, TimerOutput::cpu_times)
1580 * , nth_evaluation(0)
1581 * , dataset_name(dataset_name)
1583 * make_grid(global_refinements);
1589 * template <int dim>
1590 * void PoissonSolver<dim>::make_grid(const unsigned int global_refinements)
1592 * Assert(global_refinements >= 3,
1593 * ExcMessage("This program makes the assumption that the mesh for the "
1594 * "solution of the PDE is at least as fine as the one used "
1595 * "in the definition of the coefficient."));
1596 * GridGenerator::hyper_cube(triangulation, 0, 1);
1597 * triangulation.refine_global(global_refinements);
1599 * std::cout << " Number of active cells: " << triangulation.n_active_cells()
1605 * template <int dim>
1606 * void PoissonSolver<dim>::setup_system()
1610 * First define the finite element space:
1613 * dof_handler.distribute_dofs(fe);
1615 * std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1620 * Then set up the main data structures that will hold the discrete problem:
1624 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
1625 * DoFTools::make_sparsity_pattern(dof_handler, dsp);
1626 * sparsity_pattern.copy_from(dsp);
1628 * system_matrix.reinit(sparsity_pattern);
1630 * solution.reinit(dof_handler.n_dofs());
1631 * system_rhs.reinit(dof_handler.n_dofs());
1636 * And then define the tools to do point evaluation. We choose
1637 * a set of 13x13 points evenly distributed across the domain:
1641 * const unsigned int n_points_per_direction = 13;
1642 * const double dx = 1. / (n_points_per_direction + 1);
1644 * for (unsigned int x = 1; x <= n_points_per_direction; ++x)
1645 * for (unsigned int y = 1; y <= n_points_per_direction; ++y)
1646 * measurement_points.emplace_back(x * dx, y * dx);
1650 * First build a full matrix of the evaluation process. We do this
1651 * even though the matrix is really sparse -- but we don't know
1652 * which entries are
nonzero. Later, the `copy_from()` function
1653 * calls build a sparsity pattern and a sparse
matrix from
1657 *
Vector<double> weights(dof_handler.n_dofs());
1659 * n_points_per_direction,
1660 * dof_handler.n_dofs());
1662 *
for (
unsigned int index = 0;
index < measurement_points.size(); ++
index)
1665 * measurement_points[index],
1667 *
for (
unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
1668 * full_measurement_matrix(index, i) = weights(i);
1671 * measurement_sparsity.copy_from(full_measurement_matrix);
1672 * measurement_matrix.reinit(measurement_sparsity);
1673 * measurement_matrix.copy_from(full_measurement_matrix);
1678 * Next build the mapping from cell to the
index in the 64-element
1679 * coefficient vector:
1682 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1685 *
const unsigned int j = std::floor(cell->center()[1] * 8);
1687 *
const unsigned int index = i + 8 * j;
1689 * cell->set_user_index(index);
1694 * Finally prebuild the building blocks of the linear system as
1695 * discussed in the Readme file :
1699 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1701 *
cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1702 * cell_rhs.reinit(dofs_per_cell);
1704 *
const QGauss<dim> quadrature_formula(fe.degree+1);
1705 *
const unsigned int n_q_points = quadrature_formula.size();
1708 * quadrature_formula,
1712 * fe_values.reinit(dof_handler.begin_active());
1714 *
for (
unsigned int q_index = 0; q_index < n_q_points; ++q_index)
1715 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1717 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1719 * (fe_values.shape_grad(i, q_index) *
1720 * fe_values.shape_grad(j, q_index) *
1721 * fe_values.JxW(q_index));
1723 * cell_rhs(i) += (fe_values.shape_value(i, q_index) *
1725 * fe_values.JxW(q_index));
1739 * Given that we have pre-built the
matrix and right hand side contributions
1740 *
for a (representative) cell, the function that assembles the
matrix is
1741 * pretty
short and straightforward:
1744 *
template <
int dim>
1745 *
void PoissonSolver<dim>::assemble_system(
const Vector<double> &coefficients)
1747 *
Assert(coefficients.size() == 64, ExcInternalError());
1749 * system_matrix = 0;
1752 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1754 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1756 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1758 * const double coefficient = coefficients(cell->user_index());
1760 * cell->get_dof_indices(local_dof_indices);
1761 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1763 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1764 * system_matrix.add(local_dof_indices[i],
1765 * local_dof_indices[j],
1768 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
1781 * The same is
true for the function that solves the linear system:
1784 *
template <
int dim>
1785 *
void PoissonSolver<dim>::solve()
1791 * solver.solve(system_matrix, solution, system_rhs, ilu);
1798 * The following function outputs graphical data
for the most recently
1799 * used coefficient and corresponding solution of the PDE. Collecting
1800 * the coefficient values
requires translating from the 64-element
1801 * coefficient vector and the cells that correspond to each of these
1802 * elements. The rest remains pretty obvious, with the exception
1803 * of including the number of the current sample into the file name.
1806 *
template <
int dim>
1808 * PoissonSolver<dim>::output_results(
const Vector<double> &coefficients)
const
1811 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1812 * coefficient_values[cell->active_cell_index()] =
1813 * coefficients(cell->user_index());
1818 * data_out.add_data_vector(solution,
"solution");
1819 * data_out.add_data_vector(coefficient_values,
"coefficient");
1821 * data_out.build_patches();
1823 * std::ofstream output(
"solution-" +
1825 * data_out.write_vtu(output);
1832 * The following is the main function of
this class: Given a coefficient
1833 * vector, it assembles the linear system, solves it, and then evaluates
1834 * the solution at the measurement points by applying the measurement
1835 *
matrix to the solution vector. That vector of
"measured" values
1840 * The function will also output the solution in a graphical format
1841 *
if you un-comment the corresponding statement in the third
1842 * code block. However, you may
end up with a very large amount
1843 * of data: This code is producing, at the minimum, 10,000 samples
1844 * and creating output
for each one of them is surely more data
1845 * than you ever want to see!
1849 * At the
end of the function, we output some timing information
1850 * every 10,000 samples.
1853 *
template <
int dim>
1855 * PoissonSolver<dim>::evaluate(
const Vector<double> &coefficients)
1859 * assemble_system(coefficients);
1871 * measurement_matrix.vmult(measurements, solution);
1872 *
Assert(measurements.size() == measurement_points.size(),
1873 * ExcInternalError());
1879 *
if (nth_evaluation % 10000 == 0)
1880 * timer.print_summary();
1882 *
return measurements;
1889 * The following namespaces define the statistical properties of the Bayesian
1890 * inverse problem. The
first is about the definition of the measurement
1891 * statistics (the
"likelihood"), which we here assume to be a normal
1892 * distribution @f$N(\mu,\sigma I)@f$ with
mean value @f$\mu@f$ given by the
1893 * actual measurement vector (passed as an argument to the constructor
1894 * of the `Gaussian`
class and standard deviation @f$\sigma@f$.
1898 * For reasons of numerical accuracy, it is useful to not
return the
1899 * actual likelihood, but its logarithm. This is because these
1900 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1901 *
for which it becomes very difficult to compute accurate
1905 *
namespace LogLikelihood
1910 * virtual double log_likelihood(const Vector<double> &x) const = 0;
1912 * virtual ~Interface() = default;
1916 *
class Gaussian :
public Interface
1921 *
virtual double log_likelihood(
const Vector<double> &x)
const override;
1925 *
const double sigma;
1928 * Gaussian::Gaussian(
const Vector<double> &mu,
const double sigma)
1939 *
return -x_minus_mu.
norm_sqr() / (2 * sigma * sigma);
1946 * Next up is the
"prior" imposed on the coefficients. We assume
1947 * that the logarithms of the entries of the coefficient vector
1948 * are all distributed as a Gaussian with given
mean and standard
1949 * deviation. If the logarithms of the coefficients are normally
1950 * distributed, then
this implies in particular that the coefficients
1951 * can only be
positive, which is a useful
property to ensure the
1952 * well-posedness of the forward problem.
1956 * For the same reasons as
for the likelihood above, the interface
1957 *
for the prior asks
for returning the *logarithm* of the prior,
1958 * instead of the prior probability itself.
1961 *
namespace LogPrior
1968 *
virtual ~Interface() =
default;
1972 *
class LogGaussian :
public Interface
1975 * LogGaussian(
const double mu,
const double sigma);
1977 *
virtual double log_prior(
const Vector<double> &x)
const override;
1981 *
const double sigma;
1984 * LogGaussian::LogGaussian(
const double mu,
const double sigma)
1992 *
double log_of_product = 0;
1994 *
for (
const auto &el : x)
1996 * -(
std::
log(el) - mu) * (
std::
log(el) - mu) / (2 * sigma * sigma);
1998 *
return log_of_product;
2006 * The Metropolis-Hastings algorithm
requires a method to create a
new sample
2007 * given a previous sample. We
do this by perturbing the current (coefficient)
2008 * sample randomly
using a Gaussian distribution centered at the current
2009 * sample. To ensure that the samples
' individual entries all remain
2010 * positive, we use a Gaussian distribution in logarithm space -- in other
2011 * words, instead of *adding* a small perturbation with mean value zero,
2012 * we *multiply* the entries of the current sample by a factor that
2013 * is the exponential of a random number with mean zero. (Because the
2014 * exponential of zero is one, this means that the most likely factors
2015 * to multiply the existing sample entries by are close to one. And
2016 * because the exponential of a number is always positive, we never
2017 * get negative samples this way.)
2021 * But the Metropolis-Hastings sampler doesn't just need a perturbed
2022 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
2023 * needs to know the ratio of the probability of reaching @f$y@f$ from
2024 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
2025 * were to use a
symmetric proposal distribution (
e.g., a Gaussian
2026 * distribution centered at @f$x@f$ with a width
independent of @f$x@f$), then
2027 * these two probabilities would be the same, and the ratio one. But
2028 * that
's not the case for the Gaussian in log space. It's not
2029 * terribly difficult to verify that in that
case,
for a single
2030 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
2031 * consequently
for all components of the vector together, the
2032 * probability is the product of these ratios.
2035 *
namespace ProposalGenerator
2041 * std::pair<Vector<double>,
double>
2044 *
virtual ~Interface() =
default;
2048 *
class LogGaussian :
public Interface
2051 * LogGaussian(
const unsigned int random_seed,
const double log_sigma);
2054 * std::pair<Vector<double>,
double>
2058 *
const double log_sigma;
2059 *
mutable std::mt19937 random_number_generator;
2064 * LogGaussian::LogGaussian(
const unsigned int random_seed,
2065 *
const double log_sigma)
2066 * : log_sigma(log_sigma)
2068 * random_number_generator.seed(random_seed);
2072 * std::pair<Vector<double>,
double>
2073 * LogGaussian::perturb(
const Vector<double> ¤t_sample)
const
2076 *
double product_of_ratios = 1;
2077 *
for (
auto &x : new_sample)
2079 * const double rnd =
std::normal_distribution<>(0, log_sigma)(random_number_generator);
2080 *
const double exp_rnd =
std::exp(rnd);
2082 * product_of_ratios *= exp_rnd;
2085 *
return {new_sample, product_of_ratios};
2093 * The last main
class is the Metropolis-Hastings sampler itself.
2094 * If you understand the algorithm behind
this method, then
2095 * the following implementation should not be too difficult
2096 * to read. The only thing of relevance is that descriptions
2097 * of the algorithm typically ask whether the *ratio* of two
2098 * probabilities (the
"posterior" probabilities of the current
2099 * and the previous samples, where the
"posterior" is the product of the
2100 * likelihood and the prior probability) is larger or smaller than a
2101 * randomly drawn number. But because our interfaces
return the
2102 * *logarithms* of these probabilities, we now need to take
2103 * the ratio of appropriate exponentials -- which is made numerically
2104 * more stable by considering the exponential of the difference of
2105 * the
log probabilities. The only other slight complication is that
2106 * we need to multiply
this ratio by the ratio of proposal probabilities
2107 * since we use a non-
symmetric proposal distribution.
2111 * Finally, we note that the output is generated with 7 digits of
2112 * accuracy. (The
C++
default is 6 digits.) We
do this because,
2113 * as shown in the paper, we can determine the
mean value of the
2114 * probability distribution we are sampling here to at least six
2115 * digits of accuracy, and
do not want to be limited by the precision
2121 *
class MetropolisHastings
2124 * MetropolisHastings(ForwardSimulator::Interface & simulator,
2125 *
const LogLikelihood::Interface & likelihood,
2126 *
const LogPrior::Interface & prior,
2127 *
const ProposalGenerator::Interface &proposal_generator,
2128 *
const unsigned int random_seed,
2129 *
const std::string & dataset_name);
2132 *
const unsigned int n_samples);
2135 * ForwardSimulator::Interface & simulator;
2136 *
const LogLikelihood::Interface & likelihood;
2137 *
const LogPrior::Interface & prior;
2138 *
const ProposalGenerator::Interface &proposal_generator;
2140 * std::mt19937 random_number_generator;
2142 *
unsigned int sample_number;
2143 *
unsigned int accepted_sample_number;
2145 * std::ofstream output_file;
2148 *
const double current_log_likelihood);
2152 * MetropolisHastings::MetropolisHastings(
2153 * ForwardSimulator::Interface & simulator,
2154 *
const LogLikelihood::Interface & likelihood,
2155 *
const LogPrior::Interface & prior,
2156 *
const ProposalGenerator::Interface &proposal_generator,
2157 *
const unsigned int random_seed,
2158 *
const std::string & dataset_name)
2159 * : simulator(simulator)
2160 * , likelihood(likelihood)
2162 * , proposal_generator(proposal_generator)
2163 * , sample_number(0)
2164 * , accepted_sample_number(0)
2166 * output_file.open(
"samples-" + dataset_name +
".txt");
2167 * output_file.precision(7);
2169 * random_number_generator.seed(random_seed);
2173 *
void MetropolisHastings::sample(
const Vector<double> &starting_guess,
2174 *
const unsigned int n_samples)
2176 * std::uniform_real_distribution<> uniform_distribution(0, 1);
2179 *
double current_log_posterior =
2180 * (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
2181 * prior.log_prior(current_sample));
2184 * ++accepted_sample_number;
2185 * write_sample(current_sample, current_log_posterior);
2187 *
for (
unsigned int k = 1; k < n_samples; ++k, ++sample_number)
2189 * std::pair<Vector<double>,
double>
2190 * perturbation = proposal_generator.perturb(current_sample);
2191 *
const Vector<double> trial_sample = std::move (perturbation.first);
2192 *
const double perturbation_probability_ratio = perturbation.second;
2194 *
const double trial_log_posterior =
2195 * (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
2196 * prior.log_prior(trial_sample));
2198 *
if (
std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
2200 * uniform_distribution(random_number_generator))
2202 * current_sample = trial_sample;
2203 * current_log_posterior = trial_log_posterior;
2205 * ++accepted_sample_number;
2208 * write_sample(current_sample, current_log_posterior);
2214 *
void MetropolisHastings::write_sample(
const Vector<double> ¤t_sample,
2215 *
const double current_log_posterior)
2217 * output_file << current_log_posterior <<
'\t';
2218 * output_file << accepted_sample_number <<
'\t';
2219 *
for (
const auto &x : current_sample)
2220 * output_file << x <<
' ';
2221 * output_file <<
'\n';
2223 * output_file.flush();
2230 * The
final function is `main()`, which simply puts all of these pieces
2231 * together into one. The
"exact solution", i.e., the
"measurement values"
2232 * we use
for this program are tabulated to make it easier
for other
2233 * people to use in their own implementations of
this benchmark. These
2234 * values created
using the same main
class above, but using 8 mesh
2235 * refinements and
using a Q3 element -- i.e.,
using a much more accurate
2236 * method than the one we use in the forward simulator
for generating
2237 * samples below (which uses 5 global mesh refinement steps and a Q1
2238 * element). If you wanted to regenerate
this set of
numbers, then
2239 * the following code snippet would
do that:
2240 * <div
class=CodeFragmentInTutorialComment>
2244 *
for (
auto &el : exact_coefficients)
2246 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
2247 * exact_coefficients(18) = 0.1;
2248 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
2249 * exact_coefficients(54) = 10.;
2254 * ForwardSimulator::PoissonSolver<2>( 8,
2257 * .evaluate(exact_coefficients);
2264 *
const bool testing =
true;
2268 * Run with one thread, so as to not step on other processes
2269 * doing the same at the same time. It turns out that the problem
2270 * is also so small that running with more than one thread
2271 * *increases* the runtime.
2276 *
const unsigned int random_seed = (testing ? 1U : std::random_device()());
2280 * { 0.06076511762259369, 0.09601910120848481,
2281 * 0.1238852517838584, 0.1495184117375201,
2282 * 0.1841596127549784, 0.2174525028261122,
2283 * 0.2250996160898698, 0.2197954769002993,
2284 * 0.2074695698370926, 0.1889996477663016,
2285 * 0.1632722532153726, 0.1276782480038186,
2286 * 0.07711845915789312, 0.09601910120848552,
2287 * 0.2000589533367983, 0.3385592591951766,
2288 * 0.3934300024647806, 0.4040223892461541,
2289 * 0.4122329537843092, 0.4100480091545554,
2290 * 0.3949151637189968, 0.3697873264791232,
2291 * 0.33401826235924, 0.2850397806663382,
2292 * 0.2184260032478671, 0.1271121156350957,
2293 * 0.1238852517838611, 0.3385592591951819,
2294 * 0.7119285162766475, 0.8175712861756428,
2295 * 0.6836254116578105, 0.5779452419831157,
2296 * 0.5555615956136897, 0.5285181561736719,
2297 * 0.491439702849224, 0.4409367494853282,
2298 * 0.3730060082060772, 0.2821694983395214,
2299 * 0.1610176733857739, 0.1495184117375257,
2300 * 0.3934300024647929, 0.8175712861756562,
2301 * 0.9439154625527653, 0.8015904115095128,
2302 * 0.6859683749254024, 0.6561235366960599,
2303 * 0.6213197201867315, 0.5753611315000049,
2304 * 0.5140091754526823, 0.4325325506354165,
2305 * 0.3248315148915482, 0.1834600412730086,
2306 * 0.1841596127549917, 0.4040223892461832,
2307 * 0.6836254116578439, 0.8015904115095396,
2308 * 0.7870119561144977, 0.7373108331395808,
2309 * 0.7116558878070463, 0.6745179049094283,
2310 * 0.6235300574156917, 0.5559332704045935,
2311 * 0.4670304994474178, 0.3499809143811,
2312 * 0.19688263746294, 0.2174525028261253,
2313 * 0.4122329537843404, 0.5779452419831566,
2314 * 0.6859683749254372, 0.7373108331396063,
2315 * 0.7458811983178246, 0.7278968022406559,
2316 * 0.6904793535357751, 0.6369176452710288,
2317 * 0.5677443693743215, 0.4784738764865867,
2318 * 0.3602190632823262, 0.2031792054737325,
2319 * 0.2250996160898818, 0.4100480091545787,
2320 * 0.5555615956137137, 0.6561235366960938,
2321 * 0.7116558878070715, 0.727896802240657,
2322 * 0.7121928678670187, 0.6712187391428729,
2323 * 0.6139157775591492, 0.5478251665295381,
2324 * 0.4677122687599031, 0.3587654911000848,
2325 * 0.2050734291675918, 0.2197954769003094,
2326 * 0.3949151637190157, 0.5285181561736911,
2327 * 0.6213197201867471, 0.6745179049094407,
2328 * 0.690479353535786, 0.6712187391428787,
2329 * 0.6178408289359514, 0.5453605027237883,
2330 * 0.489575966490909, 0.4341716881061278,
2331 * 0.3534389974779456, 0.2083227496961347,
2332 * 0.207469569837099, 0.3697873264791366,
2333 * 0.4914397028492412, 0.5753611315000203,
2334 * 0.6235300574157017, 0.6369176452710497,
2335 * 0.6139157775591579, 0.5453605027237935,
2336 * 0.4336604929612851, 0.4109641743019312,
2337 * 0.3881864790111245, 0.3642640090182592,
2338 * 0.2179599909280145, 0.1889996477663011,
2339 * 0.3340182623592461, 0.4409367494853381,
2340 * 0.5140091754526943, 0.5559332704045969,
2341 * 0.5677443693743304, 0.5478251665295453,
2342 * 0.4895759664908982, 0.4109641743019171,
2343 * 0.395727260284338, 0.3778949322004734,
2344 * 0.3596268271857124, 0.2191250268948948,
2345 * 0.1632722532153683, 0.2850397806663325,
2346 * 0.373006008206081, 0.4325325506354207,
2347 * 0.4670304994474315, 0.4784738764866023,
2348 * 0.4677122687599041, 0.4341716881061055,
2349 * 0.388186479011099, 0.3778949322004602,
2350 * 0.3633362567187364, 0.3464457261905399,
2351 * 0.2096362321365655, 0.1276782480038148,
2352 * 0.2184260032478634, 0.2821694983395252,
2353 * 0.3248315148915535, 0.3499809143811097,
2354 * 0.3602190632823333, 0.3587654911000799,
2355 * 0.3534389974779268, 0.3642640090182283,
2356 * 0.35962682718569, 0.3464457261905295,
2357 * 0.3260728953424643, 0.180670595355394,
2358 * 0.07711845915789244, 0.1271121156350963,
2359 * 0.1610176733857757, 0.1834600412730144,
2360 * 0.1968826374629443, 0.2031792054737354,
2361 * 0.2050734291675885, 0.2083227496961245,
2362 * 0.2179599909279998, 0.2191250268948822,
2363 * 0.2096362321365551, 0.1806705953553887,
2364 * 0.1067965550010013 });
2368 * Now
run the forward simulator
for samples:
2371 * ForwardSimulator::PoissonSolver<2> laplace_problem(
2375 * LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
2376 * LogPrior::LogGaussian log_prior(0, 2);
2377 * ProposalGenerator::LogGaussian proposal_generator(
2378 * random_seed, 0.09);
2379 * Sampler::MetropolisHastings sampler(laplace_problem,
2382 * proposal_generator,
2387 *
for (
auto &el : starting_coefficients)
2389 * sampler.sample(starting_coefficients,
2390 * (testing ? 250 * 40
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void initialize(const SparseMatrix< somenumber > &matrix, const AdditionalData ¶meters=AdditionalData())
unsigned int n_active_cells() const
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
real_type norm_sqr() const
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
Expression floor(const Expression &x)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation