489 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
490 LogPrior::LogGaussian log_prior(0, 2);
492 const unsigned int n_theta = 64;
493 for (
unsigned int test=0; test<10; ++test)
495 std::cout <<
"Generating output for test " << test << std::endl;
498 std::ifstream test_input (
"../testing/input." + std::to_string(test) +
".txt");
499 Assert (test_input, ExcIO());
502 for (
unsigned int i=0; i<n_theta; ++i)
503 test_input >> theta[i];
508 std::ofstream test_output_z (
"output." + std::to_string(test) +
".z.txt");
509 z.
print(test_output_z, 16);
513 std::ofstream test_output_likelihood (
"output." + std::to_string(test) +
".loglikelihood.txt");
514 test_output_likelihood.precision(12);
515 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
517 std::ofstream test_output_prior (
"output." + std::to_string(test) +
".logprior.txt");
518 test_output_prior.precision(12);
519 test_output_prior << log_prior.log_prior(theta) << std::endl;
523This code reads in each of the input files (assuming that the executable is located in a
524build directory
parallel to the `testing/` directory) and outputs its results into the
525current directory. The inputs you get from a modified program should then be compared
526against the ones stored in the `testing/` directory. They should match to several digits.
530An alternative implementation in Matlab
531---------------------------------------
533To facilitate experiments,
this directory also contains alternative
534implementations of the benchmark. The
first one was written by David
535Aristoff in Matlab and can be found in the `Matlab/` directory. As is
536common in Matlab programs, each function is provided in its own
537file. We have verified that the program generates the same results (to
53812 or more digits) as the
C++ program,
using the tests mentioned in
543An alternative implementation in Python
544---------------------------------------
546Another alternative, written by Wolfgang Bangerth, can be found in the
547`Python/` directory and uses Python3. As is common
for Python
548programs, the whole program is provided in one file. As
for the Matlab
549version, we have verified that the program generates the same results
550(to 12 or more digits) as the
C++ program,
using the tests mentioned
551in the previous section. In fact,
if you just execute the program
552as-is, it runs diagnostics that output the errors.
554This Python version is essentially a literal
translation of the Matlab
555code. It is not nearly as efficient (taking around 5 times as much
556time
for each function evaluation) and could probably be optimized
557substantially
if desired. A good starting
point is the insertion of
558the local elements into the global
matrix in the line
560 A[np.ix_(dof,dof)] += theta_loc * A_loc
562that takes up a substantial fraction of the overall
run time.
565<a name=
"ann-Matlab/exact_values.m"></a>
566<h1>Annotated version of Matlab/exact_values.m</h1>
568%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
569%%%%%%%%%%%% list of
"exact" measurement
values, z_hat %%%%%%%%%%%%%%%%%%%%
570%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
572z_hat = [0.06076511762259369;
744<a name=
"ann-Matlab/forward_solver.m"></a>
745<h1>Annotated version of Matlab/forward_solver.m</h1>
747%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
748%%%%%%%%%%%%%%%%%%%%%% forward solver function %%%%%%%%%%%%%%%%%%%%%%%%%%%%
749%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
751%theta = current 8x8 parameter
matrix
752%lbl = cell labeling function
753%A_loc =
matrix of local contributions to A
754%Id = Identity
matrix of size 128x128
755%boundaries = labels of boundary cells
756%
b = right hand side of linear system (AU = b)
758%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
760%z = vector of measurements
761%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
763function z = forward_solver(theta,lbl,A_loc,Id,boundaries,b,M)
765%initialize
matrix A
for FEM linear solve, AU =
b
768%
loop over cells to build A
770 for j=0:31 %build A by summing over contribution from each cell
772 %find local coefficient in 8x8 grid
775 %update A by including contribution from cell (i,j)
776 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
777 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
781%enforce boundary condition
782A(boundaries,:) = Id(boundaries,:);
783A(:,boundaries) = Id(:,boundaries);
788%solve linear equation
for coefficients, U
796<a name=
"ann-Matlab/get_statistics.m"></a>
797<h1>Annotated version of Matlab/get_statistics.m</h1>
799%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
800%%%%%%%%%%%%%%%%% compute statistics on data
set %%%%%%%%%%%%%%%%%%%%%%%%%%
801%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
803%data = tensor of theta samples from each lag time and chain
804%theta_means = means of theta over each independent chain
805%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
807%theta_mean = overall
mean of chains
808%covars = covariance matrices of each independent chain
809%autocovar =
mean of autocovariance
matrix over all the chains
810%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
812function [theta_mean,covars,autocovar] = get_statistics(data,theta_means);
814%compute overall
mean of data, and get size of data
matrix
815theta_mean =
mean(theta_means,3);
816[~,~,L,N] = size(data);
818%initialize covariance matrices and
mean autocovariance
matrix
819covars = zeros(64,64,N);
820autocovar = zeros(64,64,2*L-1);
822%compute covariance matrices and
mean autocovariance
matrix
823for n=1:N %
loop over independent Markov chains
825 %get data from chain n
826 data_ = reshape(permute(data(:,:,:,n),[3 2 1]),[L 64]);
828 %compute autocovariance
matrix of chain n
829 mat = xcov(data_,
'unbiased');
831 %store covariance
matrix of chain n
832 covars(:,:,n) = reshape(mat(L,:),64,64);
835 autocovar = autocovar + reshape(mat
',[64 64 2*L-1]);
838%compute mean of autocovariance matrix
839autocovar = autocovar(1:64,1:64,L:2*L-1)/N;
843<a name="ann-Matlab/log_probability.m"></a>
844<h1>Annotated version of Matlab/log_probability.m</h1>
846%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
847%%%%%%%%%%%%%%%%% compute log probability, log pi %%%%%%%%%%%%%%%%%%%%%%%%%
848%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
850%theta = current 8x8 parameter matrix
851%z = current vector of measurements
852%z_hat = vector of "exact" measurements
853%sig = standard deviation parameter in likelihood
854%sig_pr = standard deviation parameter in prior
855%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
857%log_pi = logarithm of posterior probability
858%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
860function log_pi = log_probability(theta,z,z_hat,sig,sig_pr)
862%compute log likelihood
863log_L = -sum((z-z_hat).^2)/(2*sig^2);
866log_pi_pr = -sum(log(theta).^2,'all
')/(2*sig_pr^2);
868%compute log posterior
869log_pi = log_L+log_pi_pr;
873<a name="ann-Matlab/main.m"></a>
874<h1>Annotated version of Matlab/main.m</h1>
876%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
877%%%%%%%%% Run MCMC sampler to estimate posterior distribution %%%%%%%%%%%%%
878%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
880%define number of chains, chain length, and lag time
881N = input('number of independent Markov chains:
');
882N_L = input('length of each Markov chain:
');
883lag = input('lag time for measurements:
');
884workers = input('number of
parallel workers:
');
887%open Matlab parallel pool
891load precomputations.mat
893%define lag time and data matrix
894data = zeros(8,8,L,N); %data matrix of samples at lag times
895theta_means = zeros(8,8,N); %overall mean of theta
901 %set initial theta, theta mean, and z values of chain
910 %define proposal, theta_tilde
911 xi = normrnd(0,sig_prop,[8 8]);
912 theta_tilde = theta.*exp(xi);
914 %compute new z values
915 z_tilde = forward_solver_(theta_tilde);
917 %compute posterior log probability of theta_tilde
918 log_pi_tilde = log_probability_(theta_tilde,z_tilde);
919 log_pi = log_probability_(theta,z);
921 %compute acceptance probability; accept proposal appropriately
922 accept = exp(log_pi_tilde-log_pi) ...
923 *prod(theta_tilde./theta,'all
');
925 theta = theta_tilde; %accept new theta values
926 z = z_tilde; %record associated measurements
929 %update mean of theta
930 theta_mean = theta_mean + theta;
935 data(:,:,m,n) = theta;
940 theta_means(:,:,n) = theta_mean/N_L;
946%compute statistics on data set
947[theta_mean,covars,autocovar] = get_statistics(data,theta_means);
949%save data to Matlab workspace, labeled by N and N_L
950save (['data_N_
' num2str(N) '_N_L_
' num2str(N_L) '.mat
'])
954<a name="ann-Matlab/plot_solution.m"></a>
955<h1>Annotated version of Matlab/plot_solution.m</h1>
957%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
958%%%%%%%%%%%%%%% plots solution, u, to Poisson equation %%%%%%%%%%%%%%%%%%%%
959%%%%%%%%%%%%%%%% associated to theta and a 32x32 mesh %%%%%%%%%%%%%%%%%%%%%
960%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
962%input matrix theta for plotting (e.g., theta = theta_hat)
963theta = input('choose theta for plotting u: theta =
');
965%construct mass matrix, M_plot, for plotting
973 Mp(i,j,k) = phi(xsp(i)-h*c(1),xsp(j)-h*c(2));
977Mp = reshape(Mp,[n^2 33^2]);
979%run forward solver on mean of theta
982 for j=0:31 %build A by summing over contribution from each cell
984 %find local coefficient in 8x8 grid
985 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
987 %update A by including contribution from cell (i,j)
988 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
989 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
993%enforce boundary condition
994A(boundaries,:) = Id(boundaries,:);
999%solve linear equation for coefficients, U
1002%close all current plots
1007zs = reshape(Mp*U,[n n]);
1012<a name="ann-Matlab/precomputations.m"></a>
1013<h1>Annotated version of Matlab/precomputations.m</h1>
1015%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1016%%%%%%% do all precomputations necessary for MCMC simulations %%%%%%%%%%%%%
1017%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1022%define characteristic function of unit square
1023S = @(x,y) heaviside(x).*heaviside(y) ...
1024 .*(1-heaviside(x-h)).*(1-heaviside(y-h));
1026%define tent function on the domain [-h,h]x[-h,h]
1027phi = @(x,y) ((x+h).*(y+h).*S(x+h,y+h) + (h-x).*(h-y).*S(x,y) ...
1028 + (x+h).*(h-y).*S(x+h,y) + (h-x).*(y+h).*S(x,y+h))/h^2;
1030%define function that converts from (i,j) to dof, and its inverse
1031lbl = @(i,j) 33*j+i+1;
1032inv_lbl = @(k) [k-1-33*floor((k-1)/33),floor((k-1)/33)];
1034%construct measurement matrix, M
1035xs = 1/14:1/14:13/14; %measurement points
1036M = zeros(13,13,33^2);
1041 M(i,j,k) = phi(xs(i)-h*c(1),xs(j)-h*c(2));
1045M = reshape(M,[13^2 33^2]);
1047%construct exact coefficient matrix, theta_hat
1048theta_hat = ones(8,8);
1049theta_hat(2:3,2:3) = 0.1;
1050theta_hat(6:7,6:7) = 10;
1052%construct local overlap matrix, A_loc, and identity matrix Id
1053A_loc = [2/3 -1/6 -1/3 -1/6;
1056 -1/6 -1/3 -1/6 2/3];
1059%locate boundary labels
1060boundaries = [lbl(0:1:32,0),lbl(0:1:32,32),lbl(0,1:1:31),lbl(32,1:1:31)];
1062%define RHS of FEM linear system, AU = b
1063b = ones(33^2,1)*10*h^2;
1064b(boundaries) = zeros(128,1); %enforce boundary conditions on b
1066%load exact z_hat values
1069%set global parameters and functions for simulation
1070sig = 0.05; %likelihood standard deviation
1071sig_pr = 2; %prior (log) standard deviation
1072sig_prop = 0.0725; %proposal (log) standard deviation
1073theta0 = ones(8,8); %initial theta values
1074forward_solver_ = @(theta) ...
1075 forward_solver(theta,lbl,A_loc,Id,boundaries,b,M);
1076log_probability_ = @(theta,z) log_probability(theta,z,z_hat,sig,sig_pr);
1078%find initial z values
1079z0 = forward_solver_(theta0);
1081save precomputations.mat
1085<a name="ann-Python/forward_solver.py"></a>
1086<h1>Annotated version of Python/forward_solver.py</h1>
1090from scipy.sparse.linalg import spsolve
1093###########################################################################
1094############ list of "exact" measurement values, z_hat ####################
1095###########################################################################
1098 [0.06076511762259369, 0.09601910120848481,
1099 0.1238852517838584, 0.1495184117375201,
1100 0.1841596127549784, 0.2174525028261122,
1101 0.2250996160898698, 0.2197954769002993,
1102 0.2074695698370926, 0.1889996477663016,
1103 0.1632722532153726, 0.1276782480038186,
1104 0.07711845915789312, 0.09601910120848552,
1105 0.2000589533367983, 0.3385592591951766,
1106 0.3934300024647806, 0.4040223892461541,
1107 0.4122329537843092, 0.4100480091545554,
1108 0.3949151637189968, 0.3697873264791232,
1109 0.33401826235924, 0.2850397806663382,
1110 0.2184260032478671, 0.1271121156350957,
1111 0.1238852517838611, 0.3385592591951819,
1112 0.7119285162766475, 0.8175712861756428,
1113 0.6836254116578105, 0.5779452419831157,
1114 0.5555615956136897, 0.5285181561736719,
1115 0.491439702849224, 0.4409367494853282,
1116 0.3730060082060772, 0.2821694983395214,
1117 0.1610176733857739, 0.1495184117375257,
1118 0.3934300024647929, 0.8175712861756562,
1119 0.9439154625527653, 0.8015904115095128,
1120 0.6859683749254024, 0.6561235366960599,
1121 0.6213197201867315, 0.5753611315000049,
1122 0.5140091754526823, 0.4325325506354165,
1123 0.3248315148915482, 0.1834600412730086,
1124 0.1841596127549917, 0.4040223892461832,
1125 0.6836254116578439, 0.8015904115095396,
1126 0.7870119561144977, 0.7373108331395808,
1127 0.7116558878070463, 0.6745179049094283,
1128 0.6235300574156917, 0.5559332704045935,
1129 0.4670304994474178, 0.3499809143811,
1130 0.19688263746294, 0.2174525028261253,
1131 0.4122329537843404, 0.5779452419831566,
1132 0.6859683749254372, 0.7373108331396063,
1133 0.7458811983178246, 0.7278968022406559,
1134 0.6904793535357751, 0.6369176452710288,
1135 0.5677443693743215, 0.4784738764865867,
1136 0.3602190632823262, 0.2031792054737325,
1137 0.2250996160898818, 0.4100480091545787,
1138 0.5555615956137137, 0.6561235366960938,
1139 0.7116558878070715, 0.727896802240657,
1140 0.7121928678670187, 0.6712187391428729,
1141 0.6139157775591492, 0.5478251665295381,
1142 0.4677122687599031, 0.3587654911000848,
1143 0.2050734291675918, 0.2197954769003094,
1144 0.3949151637190157, 0.5285181561736911,
1145 0.6213197201867471, 0.6745179049094407,
1146 0.690479353535786, 0.6712187391428787,
1147 0.6178408289359514, 0.5453605027237883,
1148 0.489575966490909, 0.4341716881061278,
1149 0.3534389974779456, 0.2083227496961347,
1150 0.207469569837099, 0.3697873264791366,
1151 0.4914397028492412, 0.5753611315000203,
1152 0.6235300574157017, 0.6369176452710497,
1153 0.6139157775591579, 0.5453605027237935,
1154 0.4336604929612851, 0.4109641743019312,
1155 0.3881864790111245, 0.3642640090182592,
1156 0.2179599909280145, 0.1889996477663011,
1157 0.3340182623592461, 0.4409367494853381,
1158 0.5140091754526943, 0.5559332704045969,
1159 0.5677443693743304, 0.5478251665295453,
1160 0.4895759664908982, 0.4109641743019171,
1161 0.395727260284338, 0.3778949322004734,
1162 0.3596268271857124, 0.2191250268948948,
1163 0.1632722532153683, 0.2850397806663325,
1164 0.373006008206081, 0.4325325506354207,
1165 0.4670304994474315, 0.4784738764866023,
1166 0.4677122687599041, 0.4341716881061055,
1167 0.388186479011099, 0.3778949322004602,
1168 0.3633362567187364, 0.3464457261905399,
1169 0.2096362321365655, 0.1276782480038148,
1170 0.2184260032478634, 0.2821694983395252,
1171 0.3248315148915535, 0.3499809143811097,
1172 0.3602190632823333, 0.3587654911000799,
1173 0.3534389974779268, 0.3642640090182283,
1174 0.35962682718569, 0.3464457261905295,
1175 0.3260728953424643, 0.180670595355394,
1176 0.07711845915789244, 0.1271121156350963,
1177 0.1610176733857757, 0.1834600412730144,
1178 0.1968826374629443, 0.2031792054737354,
1179 0.2050734291675885, 0.2083227496961245,
1180 0.2179599909279998, 0.2191250268948822,
1181 0.2096362321365551, 0.1806705953553887,
1182 0.1067965550010013])
1185###########################################################################
1186####### do all precomputations necessary for MCMC simulations #############
1187###########################################################################
1189# Define the mesh width
1192# Define characteristic function of unit square
1200 return heaviside(x)*heaviside(y) * (1-heaviside(x-h))*(1-heaviside(y-h));
1202# Define tent function on the domain [0,2h]x[0,2h]
1204 return ((x+h)*(y+h)*S(x+h,y+h) + (h-x)*(h-y)*S(x,y)
1205 + (x+h)*(h-y)*S(x+h,y) + (h-x)*(y+h)*S(x,y+h))/h**2
1207# Define conversion function for dof's from 2D to
scalar label, and
1209def ij_to_dof_index(i,j) :
1212def inv_ij_to_dof_index(k) :
1213 return [k-33*
int(k/33),
int(k/33)]
1216# Construct measurement
matrix, M, for measurements
1217xs = np.arange(1./14,13./14,1./14); #measurement points
1219M = np.zeros((13,13,33**2));
1220for k in range(33**2) :
1221 c = inv_ij_to_dof_index(k)
1222 for i in range(13) :
1223 for j in range(13) :
1224 M[i,j,k] = phi(xs[i]-h*c[0], xs[j]-h*c[1])
1225M = M.reshape((13**2, 33**2))
1226M = scipy.sparse.csr_matrix(M);
1228# Construct local overlap matrix, A_loc, and identity matrix Id
1229A_loc = np.array([[2./3, -1./6, -1./3, -1./6],
1230 [-1./6, 2./3, -1./6, -1./3],
1231 [-1./3, -1./6, 2./3, -1./6],
1232 [-1./6, -1./3, -1./6, 2./3]])
1233Id = np.eye(33**2,33**2)
1235# Locate boundary labels
1236boundaries = ([ij_to_dof_index(i,0) for i in range(33)] +
1237 [ij_to_dof_index(i,32) for i in range(33)] +
1238 [ij_to_dof_index(0,j+1) for j in range(31)] +
1239 [ij_to_dof_index(32,j+1) for j in range(31)])
1241# Define RHS of FEM linear system, AU = b
1242b = np.ones(33**2)*10*h**2
1243b[boundaries] = 0 #enforce boundary conditions on
b
1249###########################################################################
1250###################### forward solver function ############################
1251###########################################################################
1253def forward_solver(theta) :
1254 # Initialize
matrix A for FEM linear solve, AU =
b
1255 A = np.zeros((33**2,33**2))
1257 # Build A by summing over contribution from each cell
1258 for i in range(32) :
1259 for j in range (32) :
1260 # Find local coefficient in 8x8 grid
1261 theta_loc = theta[
int(i/4)+
int(j/4)*8]
1263 # Update A by including contribution from cell (i,j)
1264 dof = [ij_to_dof_index(i,j),
1265 ij_to_dof_index(i,j+1),
1266 ij_to_dof_index(i+1,j+1),
1267 ij_to_dof_index(i+1,j)]
1268 A[np.ix_(dof,dof)] += theta_loc * A_loc
1270 # Enforce boundary condition: Zero out rows and columns, then
1271 # put a one back into the
diagonal entries.
1274 A[boundaries,boundaries] = 1
1276 # Solve linear equation for coefficients, U, and then
1277 # get the Z vector by multiplying by the measurement
matrix
1278 u = spsolve(scipy.sparse.csr_matrix(A),
b)
1287###########################################################################
1288################# compute
log probability,
log pi #########################
1289###########################################################################
1291def log_likelihood(theta) :
1292 z = forward_solver(theta)
1294 sig = 0.05 #likelihood standard deviation
1295 return -np.dot(misfit,misfit)/(2*sig**2)
1297def log_prior(theta) :
1298 sig_pr = 2 #prior (
log) standard deviation
1299 return -np.linalg.
norm(np.
log(theta))**2/(2*sig_pr**2)
1301def log_posterior(theta) :
1302 return log_likelihood(theta) + log_prior(theta)
1306###########################################################################
1307############# A function to test against known output #####################
1308###########################################################################
1311def verify_against_stored_tests() :
1312 for i in range(10) :
1313 print (
"Verifying against data set", i)
1315 # Read the input vector
1316 f_input = open (
"../testing/input.{}.txt".format(i),
'r')
1317 theta = np.fromfile(f_input, count=64, sep=
" ")
1319 # Then compute both the forward solution and its statistics.
1320 # This is not efficiently written here (it calls the forward
1321 # solver twice), but we don
't care about efficiency here since
1322 # we are only computing with ten samples
1323 this_z = forward_solver(theta)
1324 this_log_likelihood = log_likelihood(theta)
1325 this_log_prior = log_prior(theta)
1327 # Then also read the reference output generated by the C++ program:
1328 f_output_z = open ("../testing/output.{}.z.txt".format(i), 'r
')
1329 f_output_likelihood = open ("../testing/output.{}.loglikelihood.txt".format(i), 'r
')
1330 f_output_prior = open ("../testing/output.{}.logprior.txt".format(i), 'r
')
1332 reference_z = np.fromfile(f_output_z, count=13**2, sep=" ")
1333 reference_log_likelihood = float(f_output_likelihood.read())
1334 reference_log_prior = float(f_output_prior.read())
1336 print (" || z-z_ref || : ",
1337 np.linalg.norm(this_z - reference_z))
1338 print (" log likelihood : ",
1339 "Python value=", this_log_likelihood,
1340 "(C++ reference value=", reference_log_likelihood,
1341 ", error=", abs(this_log_likelihood - reference_log_likelihood),
1343 print (" log prior : ",
1344 "Python value=", this_log_prior,
1345 "(C++ reference value=", reference_log_prior,
1346 ", error=", abs(this_log_prior - reference_log_prior),
1350def time_forward_solver() :
1354 for i in range(n_runs) :
1355 # Create a random vector (with entries between 0 and 1), scale
1356 # it by a factor of 4, subtract 2, then take the exponential
1357 # of each entry to get random entries between e^{-2} and
1359 theta = np.exp(np.random.rand(64) * 4 - 2)
1360 z = forward_solver(theta)
1362 print ("Time per forward evaluation:", (end-begin)/n_runs)
1364verify_against_stored_tests()
1365time_forward_solver()
1369<a name="ann-mcmc-laplace.cc"></a>
1370<h1>Annotated version of mcmc-laplace.cc</h1>
1376 * /* ---------------------------------------------------------------------
1378 * * Copyright (C) 2019 by the deal.II authors and Wolfgang Bangerth.
1380 * * This file is part of the deal.II library.
1382 * * The deal.II library is free software; you can use it, redistribute
1383 * * it, and/or modify it under the terms of the GNU Lesser General
1384 * * Public License as published by the Free Software Foundation; either
1385 * * version 2.1 of the License, or (at your option) any later version.
1386 * * The full text of the license can be found in the file LICENSE.md at
1387 * * the top level directory of deal.II.
1389 * * ---------------------------------------------------------------------
1392 * * Author: Wolfgang Bangerth, Colorado State University, 2019.
1397 * #include <deal.II/base/timer.h>
1398 * #include <deal.II/base/multithread_info.h>
1399 * #include <deal.II/grid/tria.h>
1400 * #include <deal.II/dofs/dof_handler.h>
1401 * #include <deal.II/grid/grid_generator.h>
1402 * #include <deal.II/grid/tria_accessor.h>
1403 * #include <deal.II/grid/tria_iterator.h>
1404 * #include <deal.II/dofs/dof_accessor.h>
1405 * #include <deal.II/fe/fe_q.h>
1406 * #include <deal.II/dofs/dof_tools.h>
1407 * #include <deal.II/fe/fe_values.h>
1408 * #include <deal.II/base/quadrature_lib.h>
1409 * #include <deal.II/base/function.h>
1410 * #include <deal.II/numerics/vector_tools.h>
1411 * #include <deal.II/numerics/matrix_tools.h>
1412 * #include <deal.II/lac/vector.h>
1413 * #include <deal.II/lac/full_matrix.h>
1414 * #include <deal.II/lac/sparse_matrix.h>
1415 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
1416 * #include <deal.II/lac/solver_cg.h>
1417 * #include <deal.II/lac/precondition.h>
1418 * #include <deal.II/lac/sparse_ilu.h>
1420 * #include <deal.II/numerics/data_out.h>
1422 * #include <fstream>
1423 * #include <iostream>
1426 * #include <deal.II/base/logstream.h>
1428 * using namespace dealii;
1433 * The following is a namespace in which we define the solver of the PDE.
1434 * The main class implements an abstract `Interface` class declared at
1435 * the top, which provides for an `evaluate()` function that, given
1436 * a coefficient vector, solves the PDE discussed in the Readme file
1437 * and then evaluates the solution at the 169 mentioned points.
1441 * The solver follows the basic layout of @ref step_4 "step-4", though it precomputes
1442 * a number of things in the `setup_system()` function, such as the
1443 * evaluation of the matrix that corresponds to the point evaluations,
1444 * as well as the local contributions to matrix and right hand side.
1448 * Rather than commenting on everything in detail, in the following
1449 * we will only document those things that are not already clear from
1450 * @ref step_4 "step-4" and a small number of other tutorial programs.
1453 * namespace ForwardSimulator
1458 * virtual Vector<double> evaluate(const Vector<double> &coefficients) = 0;
1460 * virtual ~Interface() = default;
1465 * template <int dim>
1466 * class PoissonSolver : public Interface
1469 * PoissonSolver(const unsigned int global_refinements,
1470 * const unsigned int fe_degree,
1471 * const std::string &dataset_name);
1472 * virtual Vector<double>
1473 * evaluate(const Vector<double> &coefficients) override;
1476 * void make_grid(const unsigned int global_refinements);
1477 * void setup_system();
1478 * void assemble_system(const Vector<double> &coefficients);
1480 * void output_results(const Vector<double> &coefficients) const;
1482 * Triangulation<dim> triangulation;
1484 * DoFHandler<dim> dof_handler;
1486 * FullMatrix<double> cell_matrix;
1487 * Vector<double> cell_rhs;
1488 * std::map<types::global_dof_index,double> boundary_values;
1490 * SparsityPattern sparsity_pattern;
1491 * SparseMatrix<double> system_matrix;
1493 * Vector<double> solution;
1494 * Vector<double> system_rhs;
1496 * std::vector<Point<dim>> measurement_points;
1498 * SparsityPattern measurement_sparsity;
1499 * SparseMatrix<double> measurement_matrix;
1501 * TimerOutput timer;
1502 * unsigned int nth_evaluation;
1504 * const std::string &dataset_name;
1509 * template <int dim>
1510 * PoissonSolver<dim>::PoissonSolver(const unsigned int global_refinements,
1511 * const unsigned int fe_degree,
1512 * const std::string &dataset_name)
1514 * , dof_handler(triangulation)
1515 * , timer(std::cout, TimerOutput::summary, TimerOutput::cpu_times)
1516 * , nth_evaluation(0)
1517 * , dataset_name(dataset_name)
1519 * make_grid(global_refinements);
1525 * template <int dim>
1526 * void PoissonSolver<dim>::make_grid(const unsigned int global_refinements)
1528 * Assert(global_refinements >= 3,
1529 * ExcMessage("This program makes the assumption that the mesh for the "
1530 * "solution of the PDE is at least as fine as the one used "
1531 * "in the definition of the coefficient."));
1532 * GridGenerator::hyper_cube(triangulation, 0, 1);
1533 * triangulation.refine_global(global_refinements);
1535 * std::cout << " Number of active cells: " << triangulation.n_active_cells()
1541 * template <int dim>
1542 * void PoissonSolver<dim>::setup_system()
1546 * First define the finite element space:
1549 * dof_handler.distribute_dofs(fe);
1551 * std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1556 * Then set up the main data structures that will hold the discrete problem:
1560 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
1561 * DoFTools::make_sparsity_pattern(dof_handler, dsp);
1562 * sparsity_pattern.copy_from(dsp);
1564 * system_matrix.reinit(sparsity_pattern);
1566 * solution.reinit(dof_handler.n_dofs());
1567 * system_rhs.reinit(dof_handler.n_dofs());
1572 * And then define the tools to do point evaluation. We choose
1573 * a set of 13x13 points evenly distributed across the domain:
1577 * const unsigned int n_points_per_direction = 13;
1578 * const double dx = 1. / (n_points_per_direction + 1);
1580 * for (unsigned int x = 1; x <= n_points_per_direction; ++x)
1581 * for (unsigned int y = 1; y <= n_points_per_direction; ++y)
1582 * measurement_points.emplace_back(x * dx, y * dx);
1586 * First build a full matrix of the evaluation process. We do this
1587 * even though the matrix is really sparse -- but we don't know
1588 * which entries are
nonzero. Later, the `copy_from()` function
1589 * calls build a sparsity pattern and a sparse
matrix from
1593 *
Vector<double> weights(dof_handler.n_dofs());
1595 * n_points_per_direction,
1596 * dof_handler.n_dofs());
1598 *
for (
unsigned int index = 0;
index < measurement_points.size(); ++
index)
1601 * measurement_points[index],
1603 *
for (
unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
1604 * full_measurement_matrix(index, i) = weights(i);
1607 * measurement_sparsity.copy_from(full_measurement_matrix);
1608 * measurement_matrix.reinit(measurement_sparsity);
1609 * measurement_matrix.copy_from(full_measurement_matrix);
1614 * Next build the mapping from cell to the
index in the 64-element
1615 * coefficient vector:
1618 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1621 *
const unsigned int j = std::floor(cell->center()[1] * 8);
1623 *
const unsigned int index = i + 8 * j;
1625 * cell->set_user_index(index);
1630 * Finally prebuild the building blocks of the linear system as
1631 * discussed in the Readme file :
1635 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1637 *
cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1638 * cell_rhs.reinit(dofs_per_cell);
1640 *
const QGauss<dim> quadrature_formula(fe.degree+1);
1641 *
const unsigned int n_q_points = quadrature_formula.size();
1644 * quadrature_formula,
1648 * fe_values.reinit(dof_handler.begin_active());
1650 *
for (
unsigned int q_index = 0; q_index < n_q_points; ++q_index)
1651 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1653 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1655 * (fe_values.shape_grad(i, q_index) *
1656 * fe_values.shape_grad(j, q_index) *
1657 * fe_values.JxW(q_index));
1659 * cell_rhs(i) += (fe_values.shape_value(i, q_index) *
1661 * fe_values.JxW(q_index));
1675 * Given that we have pre-built the
matrix and right hand side contributions
1676 *
for a (representative) cell, the function that assembles the
matrix is
1677 * pretty
short and straightforward:
1680 *
template <
int dim>
1681 *
void PoissonSolver<dim>::assemble_system(
const Vector<double> &coefficients)
1683 *
Assert(coefficients.size() == 64, ExcInternalError());
1685 * system_matrix = 0;
1688 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1690 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1692 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1694 * const double coefficient = coefficients(cell->user_index());
1696 * cell->get_dof_indices(local_dof_indices);
1697 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1699 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1700 * system_matrix.add(local_dof_indices[i],
1701 * local_dof_indices[j],
1704 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
1717 * The same is
true for the function that solves the linear system:
1720 *
template <
int dim>
1721 *
void PoissonSolver<dim>::solve()
1727 * solver.solve(system_matrix, solution, system_rhs, ilu);
1734 * The following function outputs graphical data
for the most recently
1735 * used coefficient and corresponding solution of the PDE. Collecting
1736 * the coefficient
values requires translating from the 64-element
1737 * coefficient vector and the cells that correspond to each of these
1738 * elements. The rest remains pretty obvious, with the exception
1739 * of including the number of the current sample into the file name.
1742 *
template <
int dim>
1744 * PoissonSolver<dim>::output_results(
const Vector<double> &coefficients)
const
1747 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1748 * coefficient_values[cell->active_cell_index()] =
1749 * coefficients(cell->user_index());
1754 * data_out.add_data_vector(solution,
"solution");
1755 * data_out.add_data_vector(coefficient_values,
"coefficient");
1757 * data_out.build_patches();
1759 * std::ofstream output(
"solution-" +
1761 * data_out.write_vtu(output);
1768 * The following is the main function of
this class: Given a coefficient
1769 * vector, it assembles the linear system, solves it, and then evaluates
1770 * the solution at the measurement points by applying the measurement
1771 *
matrix to the solution vector. That vector of
"measured" values
1776 * The function will also output the solution in a graphical format
1777 *
if you un-comment the corresponding statement in the third
1778 * code block. However, you may
end up with a very large amount
1779 * of data: This code is producing, at the minimum, 10,000 samples
1780 * and creating output
for each one of them is surely more data
1781 * than you ever want to see!
1785 * At the
end of the function, we output some timing information
1786 * every 10,000 samples.
1789 *
template <
int dim>
1791 * PoissonSolver<dim>::evaluate(
const Vector<double> &coefficients)
1795 * assemble_system(coefficients);
1807 * measurement_matrix.vmult(measurements, solution);
1808 *
Assert(measurements.size() == measurement_points.size(),
1809 * ExcInternalError());
1815 *
if (nth_evaluation % 10000 == 0)
1816 * timer.print_summary();
1818 *
return measurements;
1825 * The following namespaces define the statistical properties of the Bayesian
1826 * inverse problem. The
first is about the definition of the measurement
1827 * statistics (the
"likelihood"), which we here assume to be a normal
1828 * distribution @f$N(\mu,\sigma I)@f$ with
mean value @f$\mu@f$ given by the
1829 * actual measurement vector (passed as an argument to the constructor
1830 * of the `Gaussian`
class and standard deviation @f$\sigma@f$.
1834 * For reasons of numerical accuracy, it is useful to not
return the
1835 * actual likelihood, but its logarithm. This is because these
1836 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1837 *
for which it becomes very difficult to compute accurate
1841 *
namespace LogLikelihood
1846 * virtual double log_likelihood(const Vector<double> &x) const = 0;
1848 * virtual ~Interface() = default;
1852 *
class Gaussian :
public Interface
1857 *
virtual double log_likelihood(
const Vector<double> &x)
const override;
1861 *
const double sigma;
1864 * Gaussian::Gaussian(
const Vector<double> &mu,
const double sigma)
1875 *
return -x_minus_mu.
norm_sqr() / (2 * sigma * sigma);
1882 * Next up is the
"prior" imposed on the coefficients. We assume
1883 * that the logarithms of the entries of the coefficient vector
1884 * are all distributed as a Gaussian with given
mean and standard
1885 * deviation. If the logarithms of the coefficients are normally
1886 * distributed, then
this implies in particular that the coefficients
1887 * can only be
positive, which is a useful
property to ensure the
1888 * well-posedness of the forward problem.
1892 * For the same reasons as
for the likelihood above, the interface
1893 *
for the prior asks
for returning the *logarithm* of the prior,
1894 * instead of the prior probability itself.
1897 *
namespace LogPrior
1904 *
virtual ~Interface() =
default;
1908 *
class LogGaussian :
public Interface
1911 * LogGaussian(
const double mu,
const double sigma);
1913 *
virtual double log_prior(
const Vector<double> &x)
const override;
1917 *
const double sigma;
1920 * LogGaussian::LogGaussian(
const double mu,
const double sigma)
1928 *
double log_of_product = 0;
1930 *
for (
const auto &el : x)
1932 * -(
std::
log(el) - mu) * (
std::
log(el) - mu) / (2 * sigma * sigma);
1934 *
return log_of_product;
1942 * The Metropolis-Hastings algorithm
requires a method to create a
new sample
1943 * given a previous sample. We
do this by perturbing the current (coefficient)
1944 * sample randomly
using a Gaussian distribution centered at the current
1945 * sample. To ensure that the samples
' individual entries all remain
1946 * positive, we use a Gaussian distribution in logarithm space -- in other
1947 * words, instead of *adding* a small perturbation with mean value zero,
1948 * we *multiply* the entries of the current sample by a factor that
1949 * is the exponential of a random number with mean zero. (Because the
1950 * exponential of zero is one, this means that the most likely factors
1951 * to multiply the existing sample entries by are close to one. And
1952 * because the exponential of a number is always positive, we never
1953 * get negative samples this way.)
1957 * But the Metropolis-Hastings sampler doesn't just need a perturbed
1958 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
1959 * needs to know the ratio of the probability of reaching @f$y@f$ from
1960 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
1961 * were to use a
symmetric proposal distribution (
e.g., a Gaussian
1962 * distribution centered at @f$x@f$ with a width independent of @f$x@f$), then
1963 * these two probabilities would be the same, and the ratio one. But
1964 * that
's not the case for the Gaussian in log space. It's not
1965 * terribly difficult to verify that in that
case,
for a single
1966 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
1967 * consequently
for all components of the vector together, the
1968 * probability is the product of these ratios.
1971 *
namespace ProposalGenerator
1977 * std::pair<Vector<double>,
double>
1980 *
virtual ~Interface() =
default;
1984 *
class LogGaussian :
public Interface
1987 * LogGaussian(
const unsigned int random_seed,
const double log_sigma);
1990 * std::pair<Vector<double>,
double>
1994 *
const double log_sigma;
1995 *
mutable std::mt19937 random_number_generator;
2000 * LogGaussian::LogGaussian(
const unsigned int random_seed,
2001 *
const double log_sigma)
2002 * : log_sigma(log_sigma)
2004 * random_number_generator.seed(random_seed);
2008 * std::pair<Vector<double>,
double>
2009 * LogGaussian::perturb(
const Vector<double> ¤t_sample)
const
2012 *
double product_of_ratios = 1;
2013 *
for (
auto &x : new_sample)
2015 * const double rnd =
std::normal_distribution<>(0, log_sigma)(random_number_generator);
2016 *
const double exp_rnd =
std::exp(rnd);
2018 * product_of_ratios *= exp_rnd;
2021 *
return {new_sample, product_of_ratios};
2029 * The last main
class is the Metropolis-Hastings sampler itself.
2030 * If you understand the algorithm behind
this method, then
2031 * the following implementation should not be too difficult
2032 * to read. The only thing of relevance is that descriptions
2033 * of the algorithm typically ask whether the *ratio* of two
2034 * probabilities (the
"posterior" probabilities of the current
2035 * and the previous samples, where the
"posterior" is the product of the
2036 * likelihood and the prior probability) is larger or smaller than a
2037 * randomly drawn number. But because our interfaces
return the
2038 * *logarithms* of these probabilities, we now need to take
2039 * the ratio of appropriate exponentials -- which is made numerically
2040 * more stable by considering the exponential of the difference of
2041 * the
log probabilities. The only other slight complication is that
2042 * we need to multiply
this ratio by the ratio of proposal probabilities
2043 * since we use a non-
symmetric proposal distribution.
2047 * Finally, we note that the output is generated with 7 digits of
2048 * accuracy. (The
C++
default is 6 digits.) We
do this because,
2049 * as shown in the paper, we can determine the
mean value of the
2050 * probability distribution we are sampling here to at least six
2051 * digits of accuracy, and
do not want to be limited by the precision
2057 *
class MetropolisHastings
2060 * MetropolisHastings(ForwardSimulator::Interface & simulator,
2061 *
const LogLikelihood::Interface & likelihood,
2062 *
const LogPrior::Interface & prior,
2063 *
const ProposalGenerator::Interface &proposal_generator,
2064 *
const unsigned int random_seed,
2065 *
const std::string & dataset_name);
2068 *
const unsigned int n_samples);
2071 * ForwardSimulator::Interface & simulator;
2072 *
const LogLikelihood::Interface & likelihood;
2073 *
const LogPrior::Interface & prior;
2074 *
const ProposalGenerator::Interface &proposal_generator;
2076 * std::mt19937 random_number_generator;
2078 *
unsigned int sample_number;
2079 *
unsigned int accepted_sample_number;
2081 * std::ofstream output_file;
2084 *
const double current_log_likelihood);
2088 * MetropolisHastings::MetropolisHastings(
2089 * ForwardSimulator::Interface & simulator,
2090 *
const LogLikelihood::Interface & likelihood,
2091 *
const LogPrior::Interface & prior,
2092 *
const ProposalGenerator::Interface &proposal_generator,
2093 *
const unsigned int random_seed,
2094 *
const std::string & dataset_name)
2095 * : simulator(simulator)
2096 * , likelihood(likelihood)
2098 * , proposal_generator(proposal_generator)
2099 * , sample_number(0)
2100 * , accepted_sample_number(0)
2102 * output_file.open(
"samples-" + dataset_name +
".txt");
2103 * output_file.precision(7);
2105 * random_number_generator.seed(random_seed);
2109 *
void MetropolisHastings::sample(
const Vector<double> &starting_guess,
2110 *
const unsigned int n_samples)
2112 * std::uniform_real_distribution<> uniform_distribution(0, 1);
2115 *
double current_log_posterior =
2116 * (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
2117 * prior.log_prior(current_sample));
2120 * ++accepted_sample_number;
2121 * write_sample(current_sample, current_log_posterior);
2123 *
for (
unsigned int k = 1; k < n_samples; ++k, ++sample_number)
2125 * std::pair<Vector<double>,
double>
2126 * perturbation = proposal_generator.perturb(current_sample);
2127 *
const Vector<double> trial_sample = std::move (perturbation.first);
2128 *
const double perturbation_probability_ratio = perturbation.second;
2130 *
const double trial_log_posterior =
2131 * (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
2132 * prior.log_prior(trial_sample));
2134 *
if (
std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
2136 * uniform_distribution(random_number_generator))
2138 * current_sample = trial_sample;
2139 * current_log_posterior = trial_log_posterior;
2141 * ++accepted_sample_number;
2144 * write_sample(current_sample, current_log_posterior);
2150 *
void MetropolisHastings::write_sample(
const Vector<double> ¤t_sample,
2151 *
const double current_log_posterior)
2153 * output_file << current_log_posterior <<
'\t';
2154 * output_file << accepted_sample_number <<
'\t';
2155 *
for (
const auto &x : current_sample)
2156 * output_file << x <<
' ';
2157 * output_file <<
'\n';
2159 * output_file.flush();
2166 * The
final function is `main()`, which simply puts all of these pieces
2167 * together into one. The
"exact solution", i.e., the
"measurement values"
2168 * we use
for this program are tabulated to make it easier
for other
2169 * people to use in their own implementations of
this benchmark. These
2170 *
values created
using the same main
class above, but using 8 mesh
2171 * refinements and
using a Q3 element -- i.e.,
using a much more accurate
2172 * method than the one we use in the forward simulator
for generating
2173 * samples below (which uses 5 global mesh refinement steps and a Q1
2174 * element). If you wanted to regenerate
this set of
numbers, then
2175 * the following code snippet would
do that:
2176 * <div
class=CodeFragmentInTutorialComment>
2180 *
for (
auto &el : exact_coefficients)
2182 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
2183 * exact_coefficients(18) = 0.1;
2184 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
2185 * exact_coefficients(54) = 10.;
2190 * ForwardSimulator::PoissonSolver<2>( 8,
2193 * .evaluate(exact_coefficients);
2200 *
const bool testing =
true;
2204 * Run with one thread, so as to not step on other processes
2205 * doing the same at the same time. It turns out that the problem
2206 * is also so small that running with more than one thread
2207 * *increases* the runtime.
2212 *
const unsigned int random_seed = (testing ? 1U : std::random_device()());
2216 * { 0.06076511762259369, 0.09601910120848481,
2217 * 0.1238852517838584, 0.1495184117375201,
2218 * 0.1841596127549784, 0.2174525028261122,
2219 * 0.2250996160898698, 0.2197954769002993,
2220 * 0.2074695698370926, 0.1889996477663016,
2221 * 0.1632722532153726, 0.1276782480038186,
2222 * 0.07711845915789312, 0.09601910120848552,
2223 * 0.2000589533367983, 0.3385592591951766,
2224 * 0.3934300024647806, 0.4040223892461541,
2225 * 0.4122329537843092, 0.4100480091545554,
2226 * 0.3949151637189968, 0.3697873264791232,
2227 * 0.33401826235924, 0.2850397806663382,
2228 * 0.2184260032478671, 0.1271121156350957,
2229 * 0.1238852517838611, 0.3385592591951819,
2230 * 0.7119285162766475, 0.8175712861756428,
2231 * 0.6836254116578105, 0.5779452419831157,
2232 * 0.5555615956136897, 0.5285181561736719,
2233 * 0.491439702849224, 0.4409367494853282,
2234 * 0.3730060082060772, 0.2821694983395214,
2235 * 0.1610176733857739, 0.1495184117375257,
2236 * 0.3934300024647929, 0.8175712861756562,
2237 * 0.9439154625527653, 0.8015904115095128,
2238 * 0.6859683749254024, 0.6561235366960599,
2239 * 0.6213197201867315, 0.5753611315000049,
2240 * 0.5140091754526823, 0.4325325506354165,
2241 * 0.3248315148915482, 0.1834600412730086,
2242 * 0.1841596127549917, 0.4040223892461832,
2243 * 0.6836254116578439, 0.8015904115095396,
2244 * 0.7870119561144977, 0.7373108331395808,
2245 * 0.7116558878070463, 0.6745179049094283,
2246 * 0.6235300574156917, 0.5559332704045935,
2247 * 0.4670304994474178, 0.3499809143811,
2248 * 0.19688263746294, 0.2174525028261253,
2249 * 0.4122329537843404, 0.5779452419831566,
2250 * 0.6859683749254372, 0.7373108331396063,
2251 * 0.7458811983178246, 0.7278968022406559,
2252 * 0.6904793535357751, 0.6369176452710288,
2253 * 0.5677443693743215, 0.4784738764865867,
2254 * 0.3602190632823262, 0.2031792054737325,
2255 * 0.2250996160898818, 0.4100480091545787,
2256 * 0.5555615956137137, 0.6561235366960938,
2257 * 0.7116558878070715, 0.727896802240657,
2258 * 0.7121928678670187, 0.6712187391428729,
2259 * 0.6139157775591492, 0.5478251665295381,
2260 * 0.4677122687599031, 0.3587654911000848,
2261 * 0.2050734291675918, 0.2197954769003094,
2262 * 0.3949151637190157, 0.5285181561736911,
2263 * 0.6213197201867471, 0.6745179049094407,
2264 * 0.690479353535786, 0.6712187391428787,
2265 * 0.6178408289359514, 0.5453605027237883,
2266 * 0.489575966490909, 0.4341716881061278,
2267 * 0.3534389974779456, 0.2083227496961347,
2268 * 0.207469569837099, 0.3697873264791366,
2269 * 0.4914397028492412, 0.5753611315000203,
2270 * 0.6235300574157017, 0.6369176452710497,
2271 * 0.6139157775591579, 0.5453605027237935,
2272 * 0.4336604929612851, 0.4109641743019312,
2273 * 0.3881864790111245, 0.3642640090182592,
2274 * 0.2179599909280145, 0.1889996477663011,
2275 * 0.3340182623592461, 0.4409367494853381,
2276 * 0.5140091754526943, 0.5559332704045969,
2277 * 0.5677443693743304, 0.5478251665295453,
2278 * 0.4895759664908982, 0.4109641743019171,
2279 * 0.395727260284338, 0.3778949322004734,
2280 * 0.3596268271857124, 0.2191250268948948,
2281 * 0.1632722532153683, 0.2850397806663325,
2282 * 0.373006008206081, 0.4325325506354207,
2283 * 0.4670304994474315, 0.4784738764866023,
2284 * 0.4677122687599041, 0.4341716881061055,
2285 * 0.388186479011099, 0.3778949322004602,
2286 * 0.3633362567187364, 0.3464457261905399,
2287 * 0.2096362321365655, 0.1276782480038148,
2288 * 0.2184260032478634, 0.2821694983395252,
2289 * 0.3248315148915535, 0.3499809143811097,
2290 * 0.3602190632823333, 0.3587654911000799,
2291 * 0.3534389974779268, 0.3642640090182283,
2292 * 0.35962682718569, 0.3464457261905295,
2293 * 0.3260728953424643, 0.180670595355394,
2294 * 0.07711845915789244, 0.1271121156350963,
2295 * 0.1610176733857757, 0.1834600412730144,
2296 * 0.1968826374629443, 0.2031792054737354,
2297 * 0.2050734291675885, 0.2083227496961245,
2298 * 0.2179599909279998, 0.2191250268948822,
2299 * 0.2096362321365551, 0.1806705953553887,
2300 * 0.1067965550010013 });
2304 * Now
run the forward simulator
for samples:
2307 * ForwardSimulator::PoissonSolver<2> laplace_problem(
2311 * LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
2312 * LogPrior::LogGaussian log_prior(0, 2);
2313 * ProposalGenerator::LogGaussian proposal_generator(
2314 * random_seed, 0.09);
2315 * Sampler::MetropolisHastings sampler(laplace_problem,
2318 * proposal_generator,
2323 *
for (
auto &el : starting_coefficients)
2325 * sampler.sample(starting_coefficients,
2326 * (testing ? 250 * 40
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void initialize(const SparseMatrix< somenumber > &matrix, const AdditionalData ¶meters=AdditionalData())
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
real_type norm_sqr() const
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
Expression floor(const Expression &x)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation