487 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
488 LogPrior::LogGaussian log_prior(0, 2);
490 const unsigned int n_theta = 64;
491 for (
unsigned int test=0; test<10; ++test)
493 std::cout <<
"Generating output for test " << test << std::endl;
496 std::ifstream test_input (
"../testing/input." + std::to_string(test) +
".txt");
497 Assert (test_input, ExcIO());
500 for (
unsigned int i=0; i<n_theta; ++i)
501 test_input >> theta[i];
506 std::ofstream test_output_z (
"output." + std::to_string(test) +
".z.txt");
507 z.
print(test_output_z, 16);
511 std::ofstream test_output_likelihood (
"output." + std::to_string(test) +
".likelihood.txt");
512 test_output_likelihood.precision(12);
513 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
515 std::ofstream test_output_prior (
"output." + std::to_string(test) +
".prior.txt");
516 test_output_prior.precision(12);
517 test_output_prior << log_prior.log_prior(theta) << std::endl;
521This code reads in each of the input files (assuming that the executable is located in a
522build directory
parallel to the `testing/` directory) and outputs its results into the
523current directory. The inputs you get from a modified program should then be compared
524against the ones stored in the `testing/` directory. They should match to several digits.
528An alternative implementation in Matlab
529---------------------------------------
531To facilitate experiments,
this directory also contains alternative
532implementations of the benchmark. The
first one was written by David
533Aristoff in Matlab and can be found in the `Matlab/` directory. As is
534common in Matlab programs, each function is provided in its own
535file. We have verified that the program generates the same results (to
53612 or more digits) as the
C++ program,
using the tests mentioned in
541An alternative implementation in Python
542---------------------------------------
544Another alternative, written by Wolfgang Bangerth, can be found in the
545`Python/` directory and uses Python3. As is common
for Python
546programs, the whole program is provided in one file. As
for the Matlab
547version, we have verified that the program generates the same results
548(to 12 or more digits) as the
C++ program,
using the tests mentioned
549in the previous section. In fact,
if you just execute the program
550as-is, it runs diagnostics that output the errors.
552This Python version is essentially a literal
translation of the Matlab
553code. It is not nearly as efficient (taking around 5 times as much
554time
for each function evaluation) and could probably be optimized
555substantially
if desired. A good starting
point is the insertion of
556the local elements into the global
matrix in the line
558 A[np.ix_(dof,dof)] += theta_loc * A_loc
560that takes up a substantial fraction of the overall
run time.
563<a name=
"ann-Matlab/exact_values.m"></a>
564<h1>Annotated version of Matlab/exact_values.m</h1>
566%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
567%%%%%%%%%%%% list of
"exact" measurement
values, z_hat %%%%%%%%%%%%%%%%%%%%
568%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
570z_hat = [0.06076511762259369;
742<a name=
"ann-Matlab/forward_solver.m"></a>
743<h1>Annotated version of Matlab/forward_solver.m</h1>
745%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
746%%%%%%%%%%%%%%%%%%%%%% forward solver function %%%%%%%%%%%%%%%%%%%%%%%%%%%%
747%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
749%theta = current 8x8 parameter
matrix
750%lbl = cell labeling function
751%A_loc =
matrix of local contributions to A
752%Id = Identity
matrix of size 128x128
753%boundaries = labels of boundary cells
754%
b = right hand side of linear system (AU = b)
756%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
758%z = vector of measurements
759%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
761function z = forward_solver(theta,lbl,A_loc,Id,boundaries,b,M)
763%initialize
matrix A
for FEM linear solve, AU =
b
766%
loop over cells to build A
768 for j=0:31 %build A by summing over contribution from each cell
770 %find local coefficient in 8x8 grid
773 %update A by including contribution from cell (i,j)
774 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
775 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
779%enforce boundary condition
780A(boundaries,:) = Id(boundaries,:);
781A(:,boundaries) = Id(:,boundaries);
786%solve linear equation
for coefficients, U
794<a name=
"ann-Matlab/get_statistics.m"></a>
795<h1>Annotated version of Matlab/get_statistics.m</h1>
797%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
798%%%%%%%%%%%%%%%%% compute statistics on data
set %%%%%%%%%%%%%%%%%%%%%%%%%%
799%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
801%data = tensor of theta samples from each lag time and chain
802%theta_means = means of theta over each independent chain
803%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
805%theta_mean = overall
mean of chains
806%covars = covariance matrices of each independent chain
807%autocovar =
mean of autocovariance
matrix over all the chains
808%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
810function [theta_mean,covars,autocovar] = get_statistics(data,theta_means);
812%compute overall
mean of data, and get size of data
matrix
813theta_mean =
mean(theta_means,3);
814[~,~,L,N] = size(data);
816%initialize covariance matrices and
mean autocovariance
matrix
817covars = zeros(64,64,N);
818autocovar = zeros(64,64,2*L-1);
820%compute covariance matrices and
mean autocovariance
matrix
821for n=1:N %
loop over independent Markov chains
823 %get data from chain n
824 data_ = reshape(permute(data(:,:,:,n),[3 2 1]),[L 64]);
826 %compute autocovariance
matrix of chain n
827 mat = xcov(data_,
'unbiased');
829 %store covariance
matrix of chain n
830 covars(:,:,n) = reshape(mat(L,:),64,64);
833 autocovar = autocovar + reshape(mat
',[64 64 2*L-1]);
836%compute mean of autocovariance matrix
837autocovar = autocovar(1:64,1:64,L:2*L-1)/N;
841<a name="ann-Matlab/log_probability.m"></a>
842<h1>Annotated version of Matlab/log_probability.m</h1>
844%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
845%%%%%%%%%%%%%%%%% compute log probability, log pi %%%%%%%%%%%%%%%%%%%%%%%%%
846%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
848%theta = current 8x8 parameter matrix
849%z = current vector of measurements
850%z_hat = vector of "exact" measurements
851%sig = standard deviation parameter in likelihood
852%sig_pr = standard deviation parameter in prior
853%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
855%log_pi = logarithm of posterior probability
856%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
858function log_pi = log_probability(theta,z,z_hat,sig,sig_pr)
860%compute log likelihood
861log_L = -sum((z-z_hat).^2)/(2*sig^2);
864log_pi_pr = -sum(log(theta).^2,'all
')/(2*sig_pr^2);
866%compute log posterior
867log_pi = log_L+log_pi_pr;
871<a name="ann-Matlab/main.m"></a>
872<h1>Annotated version of Matlab/main.m</h1>
874%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
875%%%%%%%%% Run MCMC sampler to estimate posterior distribution %%%%%%%%%%%%%
876%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
878%define number of chains, chain length, and lag time
879N = input('number of independent Markov chains:
');
880N_L = input('length of each Markov chain:
');
881lag = input('lag time
for measurements:
');
882workers = input('number of
parallel workers:
');
885%open Matlab parallel pool
889load precomputations.mat
891%define lag time and data matrix
892data = zeros(8,8,L,N); %data matrix of samples at lag times
893theta_means = zeros(8,8,N); %overall mean of theta
899 %set initial theta, theta mean, and z values of chain
908 %define proposal, theta_tilde
909 xi = normrnd(0,sig_prop,[8 8]);
910 theta_tilde = theta.*exp(xi);
912 %compute new z values
913 z_tilde = forward_solver_(theta_tilde);
915 %compute posterior log probability of theta_tilde
916 log_pi_tilde = log_probability_(theta_tilde,z_tilde);
917 log_pi = log_probability_(theta,z);
919 %compute acceptance probability; accept proposal appropriately
920 accept = exp(log_pi_tilde-log_pi) ...
921 *prod(theta_tilde./theta,'all
');
923 theta = theta_tilde; %accept new theta values
924 z = z_tilde; %record associated measurements
927 %update mean of theta
928 theta_mean = theta_mean + theta;
933 data(:,:,m,n) = theta;
938 theta_means(:,:,n) = theta_mean/N_L;
944%compute statistics on data set
945[theta_mean,covars,autocovar] = get_statistics(data,theta_means);
947%save data to Matlab workspace, labeled by N and N_L
948save (['data_N_
' num2str(N) '_N_L_
' num2str(N_L) '.mat
'])
952<a name="ann-Matlab/plot_solution.m"></a>
953<h1>Annotated version of Matlab/plot_solution.m</h1>
955%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
956%%%%%%%%%%%%%%% plots solution, u, to Poisson equation %%%%%%%%%%%%%%%%%%%%
957%%%%%%%%%%%%%%%% associated to theta and a 32x32 mesh %%%%%%%%%%%%%%%%%%%%%
958%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
960%input matrix theta for plotting (e.g., theta = theta_hat)
961theta = input('choose theta
for plotting u: theta =
');
963%construct mass matrix, M_plot, for plotting
971 Mp(i,j,k) = phi(xsp(i)-h*c(1),xsp(j)-h*c(2));
975Mp = reshape(Mp,[n^2 33^2]);
977%run forward solver on mean of theta
980 for j=0:31 %build A by summing over contribution from each cell
982 %find local coefficient in 8x8 grid
983 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
985 %update A by including contribution from cell (i,j)
986 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
987 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
991%enforce boundary condition
992A(boundaries,:) = Id(boundaries,:);
997%solve linear equation for coefficients, U
1000%close all current plots
1005zs = reshape(Mp*U,[n n]);
1010<a name="ann-Matlab/precomputations.m"></a>
1011<h1>Annotated version of Matlab/precomputations.m</h1>
1013%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1014%%%%%%% do all precomputations necessary for MCMC simulations %%%%%%%%%%%%%
1015%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1020%define characteristic function of unit square
1021S = @(x,y) heaviside(x).*heaviside(y) ...
1022 .*(1-heaviside(x-h)).*(1-heaviside(y-h));
1024%define tent function on the domain [-h,h]x[-h,h]
1025phi = @(x,y) ((x+h).*(y+h).*S(x+h,y+h) + (h-x).*(h-y).*S(x,y) ...
1026 + (x+h).*(h-y).*S(x+h,y) + (h-x).*(y+h).*S(x,y+h))/h^2;
1028%define function that converts from (i,j) to dof, and its inverse
1029lbl = @(i,j) 33*j+i+1;
1030inv_lbl = @(k) [k-1-33*floor((k-1)/33),floor((k-1)/33)];
1032%construct measurement matrix, M
1033xs = 1/14:1/14:13/14; %measurement points
1034M = zeros(13,13,33^2);
1039 M(i,j,k) = phi(xs(i)-h*c(1),xs(j)-h*c(2));
1043M = reshape(M,[13^2 33^2]);
1045%construct exact coefficient matrix, theta_hat
1046theta_hat = ones(8,8);
1047theta_hat(2:3,2:3) = 0.1;
1048theta_hat(6:7,6:7) = 10;
1050%construct local overlap matrix, A_loc, and identity matrix Id
1051A_loc = [2/3 -1/6 -1/3 -1/6;
1054 -1/6 -1/3 -1/6 2/3];
1057%locate boundary labels
1058boundaries = [lbl(0:1:32,0),lbl(0:1:32,32),lbl(0,1:1:31),lbl(32,1:1:31)];
1060%define RHS of FEM linear system, AU = b
1061b = ones(33^2,1)*10*h^2;
1062b(boundaries) = zeros(128,1); %enforce boundary conditions on b
1064%load exact z_hat values
1067%set global parameters and functions for simulation
1068sig = 0.05; %likelihood standard deviation
1069sig_pr = 2; %prior (log) standard deviation
1070sig_prop = 0.0725; %proposal (log) standard deviation
1071theta0 = ones(8,8); %initial theta values
1072forward_solver_ = @(theta) ...
1073 forward_solver(theta,lbl,A_loc,Id,boundaries,b,M);
1074log_probability_ = @(theta,z) log_probability(theta,z,z_hat,sig,sig_pr);
1076%find initial z values
1077z0 = forward_solver_(theta0);
1079save precomputations.mat
1083<a name="ann-Python/forward_solver.py"></a>
1084<h1>Annotated version of Python/forward_solver.py</h1>
1088from scipy.sparse.linalg import spsolve
1091###########################################################################
1092############ list of "exact" measurement values, z_hat ####################
1093###########################################################################
1096 [0.06076511762259369, 0.09601910120848481,
1097 0.1238852517838584, 0.1495184117375201,
1098 0.1841596127549784, 0.2174525028261122,
1099 0.2250996160898698, 0.2197954769002993,
1100 0.2074695698370926, 0.1889996477663016,
1101 0.1632722532153726, 0.1276782480038186,
1102 0.07711845915789312, 0.09601910120848552,
1103 0.2000589533367983, 0.3385592591951766,
1104 0.3934300024647806, 0.4040223892461541,
1105 0.4122329537843092, 0.4100480091545554,
1106 0.3949151637189968, 0.3697873264791232,
1107 0.33401826235924, 0.2850397806663382,
1108 0.2184260032478671, 0.1271121156350957,
1109 0.1238852517838611, 0.3385592591951819,
1110 0.7119285162766475, 0.8175712861756428,
1111 0.6836254116578105, 0.5779452419831157,
1112 0.5555615956136897, 0.5285181561736719,
1113 0.491439702849224, 0.4409367494853282,
1114 0.3730060082060772, 0.2821694983395214,
1115 0.1610176733857739, 0.1495184117375257,
1116 0.3934300024647929, 0.8175712861756562,
1117 0.9439154625527653, 0.8015904115095128,
1118 0.6859683749254024, 0.6561235366960599,
1119 0.6213197201867315, 0.5753611315000049,
1120 0.5140091754526823, 0.4325325506354165,
1121 0.3248315148915482, 0.1834600412730086,
1122 0.1841596127549917, 0.4040223892461832,
1123 0.6836254116578439, 0.8015904115095396,
1124 0.7870119561144977, 0.7373108331395808,
1125 0.7116558878070463, 0.6745179049094283,
1126 0.6235300574156917, 0.5559332704045935,
1127 0.4670304994474178, 0.3499809143811,
1128 0.19688263746294, 0.2174525028261253,
1129 0.4122329537843404, 0.5779452419831566,
1130 0.6859683749254372, 0.7373108331396063,
1131 0.7458811983178246, 0.7278968022406559,
1132 0.6904793535357751, 0.6369176452710288,
1133 0.5677443693743215, 0.4784738764865867,
1134 0.3602190632823262, 0.2031792054737325,
1135 0.2250996160898818, 0.4100480091545787,
1136 0.5555615956137137, 0.6561235366960938,
1137 0.7116558878070715, 0.727896802240657,
1138 0.7121928678670187, 0.6712187391428729,
1139 0.6139157775591492, 0.5478251665295381,
1140 0.4677122687599031, 0.3587654911000848,
1141 0.2050734291675918, 0.2197954769003094,
1142 0.3949151637190157, 0.5285181561736911,
1143 0.6213197201867471, 0.6745179049094407,
1144 0.690479353535786, 0.6712187391428787,
1145 0.6178408289359514, 0.5453605027237883,
1146 0.489575966490909, 0.4341716881061278,
1147 0.3534389974779456, 0.2083227496961347,
1148 0.207469569837099, 0.3697873264791366,
1149 0.4914397028492412, 0.5753611315000203,
1150 0.6235300574157017, 0.6369176452710497,
1151 0.6139157775591579, 0.5453605027237935,
1152 0.4336604929612851, 0.4109641743019312,
1153 0.3881864790111245, 0.3642640090182592,
1154 0.2179599909280145, 0.1889996477663011,
1155 0.3340182623592461, 0.4409367494853381,
1156 0.5140091754526943, 0.5559332704045969,
1157 0.5677443693743304, 0.5478251665295453,
1158 0.4895759664908982, 0.4109641743019171,
1159 0.395727260284338, 0.3778949322004734,
1160 0.3596268271857124, 0.2191250268948948,
1161 0.1632722532153683, 0.2850397806663325,
1162 0.373006008206081, 0.4325325506354207,
1163 0.4670304994474315, 0.4784738764866023,
1164 0.4677122687599041, 0.4341716881061055,
1165 0.388186479011099, 0.3778949322004602,
1166 0.3633362567187364, 0.3464457261905399,
1167 0.2096362321365655, 0.1276782480038148,
1168 0.2184260032478634, 0.2821694983395252,
1169 0.3248315148915535, 0.3499809143811097,
1170 0.3602190632823333, 0.3587654911000799,
1171 0.3534389974779268, 0.3642640090182283,
1172 0.35962682718569, 0.3464457261905295,
1173 0.3260728953424643, 0.180670595355394,
1174 0.07711845915789244, 0.1271121156350963,
1175 0.1610176733857757, 0.1834600412730144,
1176 0.1968826374629443, 0.2031792054737354,
1177 0.2050734291675885, 0.2083227496961245,
1178 0.2179599909279998, 0.2191250268948822,
1179 0.2096362321365551, 0.1806705953553887,
1180 0.1067965550010013])
1183###########################################################################
1184####### do all precomputations necessary for MCMC simulations #############
1185###########################################################################
1187# Define the mesh width
1190# Define characteristic function of unit square
1198 return heaviside(x)*heaviside(y) * (1-heaviside(x-h))*(1-heaviside(y-h));
1200# Define tent function on the domain [0,2h]x[0,2h]
1202 return ((x+h)*(y+h)*S(x+h,y+h) + (h-x)*(h-y)*S(x,y)
1203 + (x+h)*(h-y)*S(x+h,y) + (h-x)*(y+h)*S(x,y+h))/h**2
1205# Define conversion function for dof's from 2D to scalar label, and
1207def ij_to_dof_index(i,j) :
1210def inv_ij_to_dof_index(k) :
1211 return [k-33*
int(k/33),
int(k/33)]
1214# Construct measurement matrix, M,
for measurements
1215xs = np.arange(1./14,13./14,1./14); #measurement points
1217M = np.zeros((13,13,33**2));
1218for k in range(33**2) :
1219 c = inv_ij_to_dof_index(k)
1220 for i in range(13) :
1221 for j in range(13) :
1222 M[i,j,k] = phi(xs[i]-h*c[0], xs[j]-h*c[1])
1223M = M.reshape((13**2, 33**2))
1224M = scipy.sparse.csr_matrix(M);
1226# Construct local overlap matrix, A_loc, and
identity matrix Id
1227A_loc = np.array([[2./3, -1./6, -1./3, -1./6],
1228 [-1./6, 2./3, -1./6, -1./3],
1229 [-1./3, -1./6, 2./3, -1./6],
1230 [-1./6, -1./3, -1./6, 2./3]])
1231Id = np.eye(33**2,33**2)
1233# Locate boundary labels
1234boundaries = ([ij_to_dof_index(i,0)
for i in range(33)] +
1235 [ij_to_dof_index(i,32)
for i in range(33)] +
1236 [ij_to_dof_index(0,j+1)
for j in range(31)] +
1237 [ij_to_dof_index(32,j+1)
for j in range(31)])
1239# Define RHS of FEM linear system, AU = b
1240b = np.ones(33**2)*10*h**2
1241b[boundaries] = 0 #enforce boundary conditions on b
1247###########################################################################
1248###################### forward solver function ############################
1249###########################################################################
1251def forward_solver(theta) :
1252 # Initialize matrix A
for FEM linear solve, AU = b
1253 A = np.zeros((33**2,33**2))
1255 # Build A by summing over contribution from each cell
1256 for i in range(32) :
1257 for j in range (32) :
1258 # Find local coefficient in 8x8 grid
1259 theta_loc = theta[
int(i/4)+
int(j/4)*8]
1261 # Update A by including contribution from cell (i,j)
1262 dof = [ij_to_dof_index(i,j),
1263 ij_to_dof_index(i,j+1),
1264 ij_to_dof_index(i+1,j+1),
1265 ij_to_dof_index(i+1,j)]
1266 A[np.ix_(dof,dof)] += theta_loc * A_loc
1268 # Enforce boundary condition: Zero out rows and columns, then
1269 # put a one back into the diagonal entries.
1272 A[boundaries,boundaries] = 1
1274 # Solve linear equation
for coefficients, U, and then
1275 # get the Z vector by multiplying by the measurement matrix
1276 u = spsolve(scipy.sparse.csr_matrix(A), b)
1285###########################################################################
1286################# compute log probability, log pi #########################
1287###########################################################################
1289def log_likelihood(theta) :
1290 z = forward_solver(theta)
1292 sig = 0.05 #likelihood standard deviation
1293 return -np.dot(misfit,misfit)/(2*sig**2)
1295def log_prior(theta) :
1296 sig_pr = 2 #prior (log) standard deviation
1297 return -np.linalg.norm(np.log(theta))**2/(2*sig_pr**2)
1299def log_posterior(theta) :
1300 return log_likelihood(theta) + log_prior(theta)
1304###########################################################################
1305############# A function to test against known output #####################
1306###########################################################################
1309def verify_against_stored_tests() :
1310 for i in range(10) :
1311 print (
"Verifying against data set", i)
1313 # Read the input vector
1314 f_input = open (
"../testing/input.{}.txt".format(i),
'r')
1315 theta = np.fromfile(f_input, count=64, sep=
" ")
1317 # Then compute both the forward solution and its statistics.
1318 # This is not efficiently written here (it calls the forward
1319 # solver twice), but we don
't care about efficiency here since
1320 # we are only computing with ten samples
1321 this_z = forward_solver(theta)
1322 this_log_likelihood = log_likelihood(theta)
1323 this_log_prior = log_prior(theta)
1325 # Then also read the reference output generated by the C++ program:
1326 f_output_z = open ("../testing/output.{}.z.txt".format(i), 'r
')
1327 f_output_likelihood = open ("../testing/output.{}.likelihood.txt".format(i), 'r
')
1328 f_output_prior = open ("../testing/output.{}.prior.txt".format(i), 'r
')
1330 reference_z = np.fromfile(f_output_z, count=13**2, sep=" ")
1331 reference_log_likelihood = float(f_output_likelihood.read())
1332 reference_log_prior = float(f_output_prior.read())
1334 print (" || z-z_ref || : ",
1335 np.linalg.norm(this_z - reference_z))
1336 print (" log likelihood : ",
1337 "Python value=", this_log_likelihood,
1338 "(C++ reference value=", reference_log_likelihood,
1339 ", error=", abs(this_log_likelihood - reference_log_likelihood),
1341 print (" log prior : ",
1342 "Python value=", this_log_prior,
1343 "(C++ reference value=", reference_log_prior,
1344 ", error=", abs(this_log_prior - reference_log_prior),
1348def time_forward_solver() :
1352 for i in range(n_runs) :
1353 # Create a random vector (with entries between 0 and 1), scale
1354 # it by a factor of 4, subtract 2, then take the exponential
1355 # of each entry to get random entries between e^{-2} and
1357 theta = np.exp(np.random.rand(64) * 4 - 2)
1358 z = forward_solver(theta)
1360 print ("Time per forward evaluation:", (end-begin)/n_runs)
1362verify_against_stored_tests()
1363time_forward_solver()
1367<a name="ann-mcmc-laplace.cc"></a>
1368<h1>Annotated version of mcmc-laplace.cc</h1>
1374 * /* ---------------------------------------------------------------------
1376 * * Copyright (C) 2019 by the deal.II authors and Wolfgang Bangerth.
1378 * * This file is part of the deal.II library.
1380 * * The deal.II library is free software; you can use it, redistribute
1381 * * it, and/or modify it under the terms of the GNU Lesser General
1382 * * Public License as published by the Free Software Foundation; either
1383 * * version 2.1 of the License, or (at your option) any later version.
1384 * * The full text of the license can be found in the file LICENSE.md at
1385 * * the top level directory of deal.II.
1387 * * ---------------------------------------------------------------------
1390 * * Author: Wolfgang Bangerth, Colorado State University, 2019.
1395 * #include <deal.II/base/timer.h>
1396 * #include <deal.II/grid/tria.h>
1397 * #include <deal.II/dofs/dof_handler.h>
1398 * #include <deal.II/grid/grid_generator.h>
1399 * #include <deal.II/grid/tria_accessor.h>
1400 * #include <deal.II/grid/tria_iterator.h>
1401 * #include <deal.II/dofs/dof_accessor.h>
1402 * #include <deal.II/fe/fe_q.h>
1403 * #include <deal.II/dofs/dof_tools.h>
1404 * #include <deal.II/fe/fe_values.h>
1405 * #include <deal.II/base/quadrature_lib.h>
1406 * #include <deal.II/base/function.h>
1407 * #include <deal.II/numerics/vector_tools.h>
1408 * #include <deal.II/numerics/matrix_tools.h>
1409 * #include <deal.II/lac/vector.h>
1410 * #include <deal.II/lac/full_matrix.h>
1411 * #include <deal.II/lac/sparse_matrix.h>
1412 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
1413 * #include <deal.II/lac/solver_cg.h>
1414 * #include <deal.II/lac/precondition.h>
1415 * #include <deal.II/lac/sparse_ilu.h>
1417 * #include <deal.II/numerics/data_out.h>
1419 * #include <fstream>
1420 * #include <iostream>
1423 * #include <deal.II/base/logstream.h>
1425 * using namespace dealii;
1430 * The following is a namespace in which we define the solver of the PDE.
1431 * The main class implements an abstract `Interface` class declared at
1432 * the top, which provides for an `evaluate()` function that, given
1433 * a coefficient vector, solves the PDE discussed in the Readme file
1434 * and then evaluates the solution at the 169 mentioned points.
1438 * The solver follows the basic layout of @ref step_4 "step-4", though it precomputes
1439 * a number of things in the `setup_system()` function, such as the
1440 * evaluation of the matrix that corresponds to the point evaluations,
1441 * as well as the local contributions to matrix and right hand side.
1445 * Rather than commenting on everything in detail, in the following
1446 * we will only document those things that are not already clear from
1447 * @ref step_4 "step-4" and a small number of other tutorial programs.
1450 * namespace ForwardSimulator
1455 * virtual Vector<double> evaluate(const Vector<double> &coefficients) = 0;
1457 * virtual ~Interface() = default;
1462 * template <int dim>
1463 * class PoissonSolver : public Interface
1466 * PoissonSolver(const unsigned int global_refinements,
1467 * const unsigned int fe_degree,
1468 * const std::string &dataset_name);
1469 * virtual Vector<double>
1470 * evaluate(const Vector<double> &coefficients) override;
1473 * void make_grid(const unsigned int global_refinements);
1474 * void setup_system();
1475 * void assemble_system(const Vector<double> &coefficients);
1477 * void output_results(const Vector<double> &coefficients) const;
1479 * Triangulation<dim> triangulation;
1481 * DoFHandler<dim> dof_handler;
1483 * FullMatrix<double> cell_matrix;
1484 * Vector<double> cell_rhs;
1485 * std::map<types::global_dof_index,double> boundary_values;
1487 * SparsityPattern sparsity_pattern;
1488 * SparseMatrix<double> system_matrix;
1490 * Vector<double> solution;
1491 * Vector<double> system_rhs;
1493 * std::vector<Point<dim>> measurement_points;
1495 * SparsityPattern measurement_sparsity;
1496 * SparseMatrix<double> measurement_matrix;
1498 * TimerOutput timer;
1499 * unsigned int nth_evaluation;
1501 * const std::string &dataset_name;
1506 * template <int dim>
1507 * PoissonSolver<dim>::PoissonSolver(const unsigned int global_refinements,
1508 * const unsigned int fe_degree,
1509 * const std::string &dataset_name)
1511 * , dof_handler(triangulation)
1512 * , timer(std::cout, TimerOutput::summary, TimerOutput::cpu_times)
1513 * , nth_evaluation(0)
1514 * , dataset_name(dataset_name)
1516 * make_grid(global_refinements);
1522 * template <int dim>
1523 * void PoissonSolver<dim>::make_grid(const unsigned int global_refinements)
1525 * Assert(global_refinements >= 3,
1526 * ExcMessage("This program makes the assumption that the mesh for the "
1527 * "solution of the PDE is at least as fine as the one used "
1528 * "in the definition of the coefficient."));
1529 * GridGenerator::hyper_cube(triangulation, 0, 1);
1530 * triangulation.refine_global(global_refinements);
1532 * std::cout << " Number of active cells: " << triangulation.n_active_cells()
1538 * template <int dim>
1539 * void PoissonSolver<dim>::setup_system()
1543 * First define the finite element space:
1546 * dof_handler.distribute_dofs(fe);
1548 * std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1553 * Then set up the main data structures that will hold the discrete problem:
1557 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
1558 * DoFTools::make_sparsity_pattern(dof_handler, dsp);
1559 * sparsity_pattern.copy_from(dsp);
1561 * system_matrix.reinit(sparsity_pattern);
1563 * solution.reinit(dof_handler.n_dofs());
1564 * system_rhs.reinit(dof_handler.n_dofs());
1569 * And then define the tools to do point evaluation. We choose
1570 * a set of 13x13 points evenly distributed across the domain:
1574 * const unsigned int n_points_per_direction = 13;
1575 * const double dx = 1. / (n_points_per_direction + 1);
1577 * for (unsigned int x = 1; x <= n_points_per_direction; ++x)
1578 * for (unsigned int y = 1; y <= n_points_per_direction; ++y)
1579 * measurement_points.emplace_back(x * dx, y * dx);
1583 * First build a full matrix of the evaluation process. We do this
1584 * even though the matrix is really sparse -- but we don't know
1585 * which entries are
nonzero. Later, the `copy_from()` function
1586 * calls build a sparsity pattern and a sparse matrix from
1592 * n_points_per_direction,
1593 * dof_handler.n_dofs());
1595 * for (
unsigned int index = 0;
index < measurement_points.size(); ++
index)
1598 * measurement_points[index],
1600 *
for (
unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
1601 * full_measurement_matrix(index, i) = weights(i);
1604 * measurement_sparsity.copy_from(full_measurement_matrix);
1605 * measurement_matrix.reinit(measurement_sparsity);
1606 * measurement_matrix.copy_from(full_measurement_matrix);
1611 * Next build the mapping from cell to the
index in the 64-element
1612 * coefficient vector:
1615 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1617 *
const unsigned int i = std::floor(cell->center()[0] * 8);
1618 *
const unsigned int j = std::floor(cell->center()[1] * 8);
1620 *
const unsigned int index = i + 8 * j;
1622 * cell->set_user_index(index);
1627 * Finally prebuild the building blocks of the linear system as
1628 * discussed in the Readme file :
1632 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1634 *
cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1635 * cell_rhs.reinit(dofs_per_cell);
1637 *
const QGauss<dim> quadrature_formula(fe.degree+1);
1638 *
const unsigned int n_q_points = quadrature_formula.size();
1641 * quadrature_formula,
1645 * fe_values.reinit(dof_handler.begin_active());
1647 *
for (
unsigned int q_index = 0; q_index < n_q_points; ++q_index)
1648 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1650 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1652 * (fe_values.shape_grad(i, q_index) *
1653 * fe_values.shape_grad(j, q_index) *
1654 * fe_values.JxW(q_index));
1656 * cell_rhs(i) += (fe_values.shape_value(i, q_index) *
1658 * fe_values.JxW(q_index));
1672 * Given that we have pre-built the
matrix and right hand side contributions
1673 *
for a (representative) cell, the function that assembles the
matrix is
1674 * pretty
short and straightforward:
1677 *
template <
int dim>
1678 *
void PoissonSolver<dim>::assemble_system(
const Vector<double> &coefficients)
1680 *
Assert(coefficients.size() == 64, ExcInternalError());
1682 * system_matrix = 0;
1685 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1687 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1689 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1691 *
const double coefficient = coefficients(cell->user_index());
1693 * cell->get_dof_indices(local_dof_indices);
1694 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1696 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1697 * system_matrix.add(local_dof_indices[i],
1698 * local_dof_indices[j],
1701 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
1714 * The same is
true for the function that solves the linear system:
1717 *
template <
int dim>
1718 *
void PoissonSolver<dim>::solve()
1724 * solver.solve(system_matrix, solution, system_rhs, ilu);
1731 * The following function outputs graphical data
for the most recently
1732 * used coefficient and corresponding solution of the PDE. Collecting
1733 * the coefficient
values requires translating from the 64-element
1734 * coefficient vector and the cells that correspond to each of these
1735 * elements. The rest remains pretty obvious, with the exception
1736 * of including the number of the current sample into the file name.
1739 *
template <
int dim>
1741 * PoissonSolver<dim>::output_results(
const Vector<double> &coefficients)
const
1744 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1745 * coefficient_values[cell->active_cell_index()] =
1746 * coefficients(cell->user_index());
1756 * std::ofstream output(
"solution-" +
1765 * The following is the main function of
this class: Given a coefficient
1766 * vector, it assembles the linear system, solves it, and then evaluates
1767 * the solution at the measurement points by applying the measurement
1768 *
matrix to the solution vector. That vector of
"measured" values
1773 * The function will also output the solution in a graphical format
1774 *
if you un-comment the corresponding statement in the third
1775 * code block. However, you may
end up with a very large amount
1776 * of data: This code is producing, at the minimum, 10,000 samples
1777 * and creating output
for each one of them is surely more data
1778 * than you ever want to see!
1782 * At the
end of the function, we output some timing information
1783 * every 10,000 samples.
1786 *
template <
int dim>
1788 * PoissonSolver<dim>::evaluate(
const Vector<double> &coefficients)
1792 * assemble_system(coefficients);
1804 * measurement_matrix.vmult(measurements, solution);
1805 *
Assert(measurements.size() == measurement_points.size(),
1806 * ExcInternalError());
1812 *
if (nth_evaluation % 10000 == 0)
1813 * timer.print_summary();
1815 *
return measurements;
1822 * The following namespaces define the statistical properties of the Bayesian
1823 * inverse problem. The
first is about the definition of the measurement
1824 * statistics (the
"likelihood"), which we here assume to be a normal
1825 * distribution @f$N(\mu,\sigma I)@f$ with
mean value @f$\mu@f$ given by the
1826 * actual measurement vector (passed as an argument to the constructor
1827 * of the `Gaussian`
class and standard deviation @f$\sigma@f$.
1831 * For reasons of numerical accuracy, it is useful to not
return the
1832 * actual likelihood, but its logarithm. This is because these
1833 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1834 *
for which it becomes very difficult to compute accurate
1838 *
namespace LogLikelihood
1843 * virtual double log_likelihood(const Vector<double> &x) const = 0;
1845 * virtual ~Interface() = default;
1849 *
class Gaussian :
public Interface
1852 * Gaussian(const Vector<double> &mu, const double sigma);
1854 * virtual double log_likelihood(const Vector<double> &x) const override;
1857 * const Vector<double> mu;
1858 * const double sigma;
1861 * Gaussian::Gaussian(
const Vector<double> &mu,
const double sigma)
1872 *
return -x_minus_mu.
norm_sqr() / (2 * sigma * sigma);
1879 * Next up is the
"prior" imposed on the coefficients. We assume
1880 * that the logarithms of the entries of the coefficient vector
1881 * are all distributed as a Gaussian with given
mean and standard
1882 * deviation. If the logarithms of the coefficients are normally
1883 * distributed, then
this implies in particular that the coefficients
1884 * can only be
positive, which is a useful
property to ensure the
1885 * well-posedness of the forward problem.
1889 * For the same reasons as
for the likelihood above, the interface
1890 *
for the prior asks
for returning the *logarithm* of the prior,
1891 * instead of the prior probability itself.
1894 *
namespace LogPrior
1901 *
virtual ~Interface() =
default;
1905 *
class LogGaussian :
public Interface
1908 * LogGaussian(
const double mu,
const double sigma);
1910 *
virtual double log_prior(
const Vector<double> &x)
const override;
1914 *
const double sigma;
1917 * LogGaussian::LogGaussian(
const double mu,
const double sigma)
1925 *
double log_of_product = 0;
1927 *
for (
const auto &el : x)
1931 *
return log_of_product;
1939 * The Metropolis-Hastings algorithm
requires a method to create a
new sample
1940 * given a previous sample. We
do this by perturbing the current (coefficient)
1941 * sample randomly
using a Gaussian distribution centered at the current
1942 * sample. To ensure that the samples
' individual entries all remain
1943 * positive, we use a Gaussian distribution in logarithm space -- in other
1944 * words, instead of *adding* a small perturbation with mean value zero,
1945 * we *multiply* the entries of the current sample by a factor that
1946 * is the exponential of a random number with mean zero. (Because the
1947 * exponential of zero is one, this means that the most likely factors
1948 * to multiply the existing sample entries by are close to one. And
1949 * because the exponential of a number is always positive, we never
1950 * get negative samples this way.)
1954 * But the Metropolis-Hastings sampler doesn't just need a perturbed
1955 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
1956 * needs to know the ratio of the probability of reaching @f$y@f$ from
1957 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
1958 * were to use a
symmetric proposal distribution (
e.g., a Gaussian
1959 * distribution centered at @f$x@f$ with a width independent of @f$x@f$), then
1960 * these two probabilities would be the same, and the ratio one. But
1961 * that
's not the case for the Gaussian in log space. It's not
1962 * terribly difficult to verify that in that
case,
for a single
1963 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
1964 * consequently
for all components of the vector together, the
1965 * probability is the product of these ratios.
1968 *
namespace ProposalGenerator
1974 * std::pair<Vector<double>,
double>
1977 *
virtual ~Interface() =
default;
1981 *
class LogGaussian :
public Interface
1984 * LogGaussian(
const unsigned int random_seed,
const double log_sigma);
1987 * std::pair<Vector<double>,
double>
1991 *
const double log_sigma;
1992 *
mutable std::mt19937 random_number_generator;
1997 * LogGaussian::LogGaussian(
const unsigned int random_seed,
1998 *
const double log_sigma)
1999 * : log_sigma(log_sigma)
2001 * random_number_generator.seed(random_seed);
2005 * std::pair<Vector<double>,
double>
2006 * LogGaussian::perturb(
const Vector<double> ¤t_sample)
const
2009 *
double product_of_ratios = 1;
2010 *
for (
auto &x : new_sample)
2012 *
const double rnd = std::normal_distribution<>(0, log_sigma)(random_number_generator);
2013 *
const double exp_rnd =
std::exp(rnd);
2015 * product_of_ratios *= exp_rnd;
2018 *
return {new_sample, product_of_ratios};
2026 * The last main
class is the Metropolis-Hastings sampler itself.
2027 * If you understand the algorithm behind
this method, then
2028 * the following implementation should not be too difficult
2029 * to read. The only thing of relevance is that descriptions
2030 * of the algorithm typically ask whether the *ratio* of two
2031 * probabilities (the
"posterior" probabilities of the current
2032 * and the previous samples, where the
"posterior" is the product of the
2033 * likelihood and the prior probability) is larger or smaller than a
2034 * randomly drawn number. But because our interfaces
return the
2035 * *logarithms* of these probabilities, we now need to take
2036 * the ratio of appropriate exponentials -- which is made numerically
2037 * more stable by considering the exponential of the difference of
2038 * the
log probabilities. The only other slight complication is that
2039 * we need to multiply
this ratio by the ratio of proposal probabilities
2040 * since we use a non-
symmetric proposal distribution.
2044 * Finally, we note that the output is generated with 7 digits of
2045 * accuracy. (The
C++
default is 6 digits.) We
do this because,
2046 * as shown in the paper, we can determine the
mean value of the
2047 * probability distribution we are sampling here to at least six
2048 * digits of accuracy, and
do not want to be limited by the precision
2054 *
class MetropolisHastings
2057 * MetropolisHastings(ForwardSimulator::Interface & simulator,
2058 *
const LogLikelihood::Interface & likelihood,
2059 *
const LogPrior::Interface & prior,
2060 *
const ProposalGenerator::Interface &proposal_generator,
2061 *
const unsigned int random_seed,
2062 *
const std::string & dataset_name);
2065 *
const unsigned int n_samples);
2068 * ForwardSimulator::Interface & simulator;
2069 *
const LogLikelihood::Interface & likelihood;
2070 *
const LogPrior::Interface & prior;
2071 *
const ProposalGenerator::Interface &proposal_generator;
2073 * std::mt19937 random_number_generator;
2075 *
unsigned int sample_number;
2076 *
unsigned int accepted_sample_number;
2078 * std::ofstream output_file;
2081 *
const double current_log_likelihood);
2085 * MetropolisHastings::MetropolisHastings(
2086 * ForwardSimulator::Interface & simulator,
2087 *
const LogLikelihood::Interface & likelihood,
2088 *
const LogPrior::Interface & prior,
2089 *
const ProposalGenerator::Interface &proposal_generator,
2090 *
const unsigned int random_seed,
2091 *
const std::string & dataset_name)
2092 * : simulator(simulator)
2093 * , likelihood(likelihood)
2095 * , proposal_generator(proposal_generator)
2096 * , sample_number(0)
2097 * , accepted_sample_number(0)
2099 * output_file.open(
"samples-" + dataset_name +
".txt");
2100 * output_file.precision(7);
2102 * random_number_generator.seed(random_seed);
2106 *
void MetropolisHastings::sample(
const Vector<double> &starting_guess,
2107 *
const unsigned int n_samples)
2109 * std::uniform_real_distribution<> uniform_distribution(0, 1);
2112 *
double current_log_posterior =
2113 * (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
2114 * prior.log_prior(current_sample));
2117 * ++accepted_sample_number;
2118 * write_sample(current_sample, current_log_posterior);
2120 *
for (
unsigned int k = 1; k < n_samples; ++k, ++sample_number)
2122 * std::pair<Vector<double>,
double>
2123 * perturbation = proposal_generator.perturb(current_sample);
2124 *
const Vector<double> trial_sample = std::move (perturbation.first);
2125 *
const double perturbation_probability_ratio = perturbation.second;
2127 *
const double trial_log_posterior =
2128 * (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
2129 * prior.log_prior(trial_sample));
2131 *
if (
std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
2133 * uniform_distribution(random_number_generator))
2135 * current_sample = trial_sample;
2136 * current_log_posterior = trial_log_posterior;
2138 * ++accepted_sample_number;
2141 * write_sample(current_sample, current_log_posterior);
2147 *
void MetropolisHastings::write_sample(
const Vector<double> ¤t_sample,
2148 *
const double current_log_posterior)
2150 * output_file << current_log_posterior <<
'\t';
2151 * output_file << accepted_sample_number <<
'\t';
2152 *
for (
const auto &x : current_sample)
2153 * output_file << x <<
' ';
2154 * output_file <<
'\n';
2156 * output_file.flush();
2163 * The
final function is `main()`, which simply puts all of these pieces
2164 * together into one. The
"exact solution", i.e., the
"measurement values"
2165 * we use
for this program are tabulated to make it easier
for other
2166 * people to use in their own implementations of
this benchmark. These
2167 *
values created
using the same main
class above, but using 8 mesh
2168 * refinements and
using a Q3 element -- i.e.,
using a much more accurate
2169 * method than the one we use in the forward simulator
for generating
2170 * samples below (which uses 5 global mesh refinement steps and a Q1
2171 * element). If you wanted to regenerate
this set of
numbers, then
2172 * the following code snippet would
do that:
2173 * <div
class=CodeFragmentInTutorialComment>
2177 *
for (
auto &el : exact_coefficients)
2179 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
2180 * exact_coefficients(18) = 0.1;
2181 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
2182 * exact_coefficients(54) = 10.;
2187 * ForwardSimulator::PoissonSolver<2>( 8,
2190 * .evaluate(exact_coefficients);
2197 *
const bool testing =
true;
2201 * Run with one thread, so as to not step on other processes
2202 * doing the same at the same time. It turns out that the problem
2203 * is also so small that running with more than one thread
2204 * *increases* the runtime.
2209 *
const unsigned int random_seed = (testing ? 1U : std::random_device()());
2213 * { 0.06076511762259369, 0.09601910120848481,
2214 * 0.1238852517838584, 0.1495184117375201,
2215 * 0.1841596127549784, 0.2174525028261122,
2216 * 0.2250996160898698, 0.2197954769002993,
2217 * 0.2074695698370926, 0.1889996477663016,
2218 * 0.1632722532153726, 0.1276782480038186,
2219 * 0.07711845915789312, 0.09601910120848552,
2220 * 0.2000589533367983, 0.3385592591951766,
2221 * 0.3934300024647806, 0.4040223892461541,
2222 * 0.4122329537843092, 0.4100480091545554,
2223 * 0.3949151637189968, 0.3697873264791232,
2224 * 0.33401826235924, 0.2850397806663382,
2225 * 0.2184260032478671, 0.1271121156350957,
2226 * 0.1238852517838611, 0.3385592591951819,
2227 * 0.7119285162766475, 0.8175712861756428,
2228 * 0.6836254116578105, 0.5779452419831157,
2229 * 0.5555615956136897, 0.5285181561736719,
2230 * 0.491439702849224, 0.4409367494853282,
2231 * 0.3730060082060772, 0.2821694983395214,
2232 * 0.1610176733857739, 0.1495184117375257,
2233 * 0.3934300024647929, 0.8175712861756562,
2234 * 0.9439154625527653, 0.8015904115095128,
2235 * 0.6859683749254024, 0.6561235366960599,
2236 * 0.6213197201867315, 0.5753611315000049,
2237 * 0.5140091754526823, 0.4325325506354165,
2238 * 0.3248315148915482, 0.1834600412730086,
2239 * 0.1841596127549917, 0.4040223892461832,
2240 * 0.6836254116578439, 0.8015904115095396,
2241 * 0.7870119561144977, 0.7373108331395808,
2242 * 0.7116558878070463, 0.6745179049094283,
2243 * 0.6235300574156917, 0.5559332704045935,
2244 * 0.4670304994474178, 0.3499809143811,
2245 * 0.19688263746294, 0.2174525028261253,
2246 * 0.4122329537843404, 0.5779452419831566,
2247 * 0.6859683749254372, 0.7373108331396063,
2248 * 0.7458811983178246, 0.7278968022406559,
2249 * 0.6904793535357751, 0.6369176452710288,
2250 * 0.5677443693743215, 0.4784738764865867,
2251 * 0.3602190632823262, 0.2031792054737325,
2252 * 0.2250996160898818, 0.4100480091545787,
2253 * 0.5555615956137137, 0.6561235366960938,
2254 * 0.7116558878070715, 0.727896802240657,
2255 * 0.7121928678670187, 0.6712187391428729,
2256 * 0.6139157775591492, 0.5478251665295381,
2257 * 0.4677122687599031, 0.3587654911000848,
2258 * 0.2050734291675918, 0.2197954769003094,
2259 * 0.3949151637190157, 0.5285181561736911,
2260 * 0.6213197201867471, 0.6745179049094407,
2261 * 0.690479353535786, 0.6712187391428787,
2262 * 0.6178408289359514, 0.5453605027237883,
2263 * 0.489575966490909, 0.4341716881061278,
2264 * 0.3534389974779456, 0.2083227496961347,
2265 * 0.207469569837099, 0.3697873264791366,
2266 * 0.4914397028492412, 0.5753611315000203,
2267 * 0.6235300574157017, 0.6369176452710497,
2268 * 0.6139157775591579, 0.5453605027237935,
2269 * 0.4336604929612851, 0.4109641743019312,
2270 * 0.3881864790111245, 0.3642640090182592,
2271 * 0.2179599909280145, 0.1889996477663011,
2272 * 0.3340182623592461, 0.4409367494853381,
2273 * 0.5140091754526943, 0.5559332704045969,
2274 * 0.5677443693743304, 0.5478251665295453,
2275 * 0.4895759664908982, 0.4109641743019171,
2276 * 0.395727260284338, 0.3778949322004734,
2277 * 0.3596268271857124, 0.2191250268948948,
2278 * 0.1632722532153683, 0.2850397806663325,
2279 * 0.373006008206081, 0.4325325506354207,
2280 * 0.4670304994474315, 0.4784738764866023,
2281 * 0.4677122687599041, 0.4341716881061055,
2282 * 0.388186479011099, 0.3778949322004602,
2283 * 0.3633362567187364, 0.3464457261905399,
2284 * 0.2096362321365655, 0.1276782480038148,
2285 * 0.2184260032478634, 0.2821694983395252,
2286 * 0.3248315148915535, 0.3499809143811097,
2287 * 0.3602190632823333, 0.3587654911000799,
2288 * 0.3534389974779268, 0.3642640090182283,
2289 * 0.35962682718569, 0.3464457261905295,
2290 * 0.3260728953424643, 0.180670595355394,
2291 * 0.07711845915789244, 0.1271121156350963,
2292 * 0.1610176733857757, 0.1834600412730144,
2293 * 0.1968826374629443, 0.2031792054737354,
2294 * 0.2050734291675885, 0.2083227496961245,
2295 * 0.2179599909279998, 0.2191250268948822,
2296 * 0.2096362321365551, 0.1806705953553887,
2297 * 0.1067965550010013 });
2301 * Now
run the forward simulator
for samples:
2304 * ForwardSimulator::PoissonSolver<2> laplace_problem(
2308 * LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
2309 * LogPrior::LogGaussian log_prior(0, 2);
2310 * ProposalGenerator::LogGaussian proposal_generator(
2311 * random_seed, 0.09);
2312 * Sampler::MetropolisHastings sampler(laplace_problem,
2315 * proposal_generator,
2320 *
for (
auto &el : starting_coefficients)
2322 * sampler.sample(starting_coefficients,
2323 * (testing ? 250 * 40
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
virtual void build_patches(const unsigned int n_subdivisions=0)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void write_vtu(std::ostream &out) const
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void initialize(const SparseMatrix< somenumber > &matrix, const AdditionalData ¶meters=AdditionalData())
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
real_type norm_sqr() const
Expression floor(const Expression &x)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation