Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
MCMC-Laplace.h
Go to the documentation of this file.
1 5,
485 /* fe_degree = */ 1,
486 "");
487 LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
488 LogPrior::LogGaussian log_prior(0, 2);
489
490 const unsigned int n_theta = 64;
491 for (unsigned int test=0; test<10; ++test)
492 {
493 std::cout << "Generating output for test " << test << std::endl;
494
495 // For each test, read the input file...
496 std::ifstream test_input ("../testing/input." + std::to_string(test) + ".txt");
497 Assert (test_input, ExcIO());
498
499 Vector<double> theta(n_theta);
500 for (unsigned int i=0; i<n_theta; ++i)
501 test_input >> theta[i];
502
503 // ...run it through the forward simulator to get the
504 // z vector (which we then output to a file)...
505 const Vector<double> z = laplace_problem.evaluate(theta);
506 std::ofstream test_output_z ("output." + std::to_string(test) + ".z.txt");
507 z.print(test_output_z, 16);
508
509 // ...and then also evaluate prior and likelihood for these
510 // theta vectors:
511 std::ofstream test_output_likelihood ("output." + std::to_string(test) + ".likelihood.txt");
512 test_output_likelihood.precision(12);
513 test_output_likelihood << log_likelihood.log_likelihood(z) << std::endl;
514
515 std::ofstream test_output_prior ("output." + std::to_string(test) + ".prior.txt");
516 test_output_prior.precision(12);
517 test_output_prior << log_prior.log_prior(theta) << std::endl;
518 }
519}
520@endcode
521This code reads in each of the input files (assuming that the executable is located in a
522build directory parallel to the `testing/` directory) and outputs its results into the
523current directory. The inputs you get from a modified program should then be compared
524against the ones stored in the `testing/` directory. They should match to several digits.
525
526
527
528An alternative implementation in Matlab
529---------------------------------------
530
531To facilitate experiments, this directory also contains alternative
532implementations of the benchmark. The first one was written by David
533Aristoff in Matlab and can be found in the `Matlab/` directory. As is
534common in Matlab programs, each function is provided in its own
535file. We have verified that the program generates the same results (to
53612 or more digits) as the C++ program, using the tests mentioned in
537the previous section.
538
539
540
541An alternative implementation in Python
542---------------------------------------
543
544Another alternative, written by Wolfgang Bangerth, can be found in the
545`Python/` directory and uses Python3. As is common for Python
546programs, the whole program is provided in one file. As for the Matlab
547version, we have verified that the program generates the same results
548(to 12 or more digits) as the C++ program, using the tests mentioned
549in the previous section. In fact, if you just execute the program
550as-is, it runs diagnostics that output the errors.
551
552This Python version is essentially a literal translation of the Matlab
553code. It is not nearly as efficient (taking around 5 times as much
554time for each function evaluation) and could probably be optimized
555substantially if desired. A good starting point is the insertion of
556the local elements into the global matrix in the line
557@code{.sh}
558 A[np.ix_(dof,dof)] += theta_loc * A_loc
559@endcode
560that takes up a substantial fraction of the overall run time.
561
562
563<a name="ann-Matlab/exact_values.m"></a>
564<h1>Annotated version of Matlab/exact_values.m</h1>
565@code{.m}
566%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
567%%%%%%%%%%%% list of "exact" measurement values, z_hat %%%%%%%%%%%%%%%%%%%%
568%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
569
570z_hat = [0.06076511762259369;
571 0.09601910120848481;
572 0.1238852517838584;
573 0.1495184117375201;
574 0.1841596127549784;
575 0.2174525028261122;
576 0.2250996160898698;
577 0.2197954769002993;
578 0.2074695698370926;
579 0.1889996477663016;
580 0.1632722532153726;
581 0.1276782480038186;
582 0.07711845915789312;
583 0.09601910120848552;
584 0.2000589533367983;
585 0.3385592591951766;
586 0.3934300024647806;
587 0.4040223892461541;
588 0.4122329537843092;
589 0.4100480091545554;
590 0.3949151637189968;
591 0.3697873264791232;
592 0.33401826235924;
593 0.2850397806663382;
594 0.2184260032478671;
595 0.1271121156350957;
596 0.1238852517838611;
597 0.3385592591951819;
598 0.7119285162766475;
599 0.8175712861756428;
600 0.6836254116578105;
601 0.5779452419831157;
602 0.5555615956136897;
603 0.5285181561736719;
604 0.491439702849224;
605 0.4409367494853282;
606 0.3730060082060772;
607 0.2821694983395214;
608 0.1610176733857739;
609 0.1495184117375257;
610 0.3934300024647929;
611 0.8175712861756562;
612 0.9439154625527653;
613 0.8015904115095128;
614 0.6859683749254024;
615 0.6561235366960599;
616 0.6213197201867315;
617 0.5753611315000049;
618 0.5140091754526823;
619 0.4325325506354165;
620 0.3248315148915482;
621 0.1834600412730086;
622 0.1841596127549917;
623 0.4040223892461832;
624 0.6836254116578439;
625 0.8015904115095396;
626 0.7870119561144977;
627 0.7373108331395808;
628 0.7116558878070463;
629 0.6745179049094283;
630 0.6235300574156917;
631 0.5559332704045935;
632 0.4670304994474178;
633 0.3499809143811;
634 0.19688263746294;
635 0.2174525028261253;
636 0.4122329537843404;
637 0.5779452419831566;
638 0.6859683749254372;
639 0.7373108331396063;
640 0.7458811983178246;
641 0.7278968022406559;
642 0.6904793535357751;
643 0.6369176452710288;
644 0.5677443693743215;
645 0.4784738764865867;
646 0.3602190632823262;
647 0.2031792054737325;
648 0.2250996160898818;
649 0.4100480091545787;
650 0.5555615956137137;
651 0.6561235366960938;
652 0.7116558878070715;
653 0.727896802240657;
654 0.7121928678670187;
655 0.6712187391428729;
656 0.6139157775591492;
657 0.5478251665295381;
658 0.4677122687599031;
659 0.3587654911000848;
660 0.2050734291675918;
661 0.2197954769003094;
662 0.3949151637190157;
663 0.5285181561736911;
664 0.6213197201867471;
665 0.6745179049094407;
666 0.690479353535786;
667 0.6712187391428787;
668 0.6178408289359514;
669 0.5453605027237883;
670 0.489575966490909;
671 0.4341716881061278;
672 0.3534389974779456;
673 0.2083227496961347;
674 0.207469569837099;
675 0.3697873264791366;
676 0.4914397028492412;
677 0.5753611315000203;
678 0.6235300574157017;
679 0.6369176452710497;
680 0.6139157775591579;
681 0.5453605027237935;
682 0.4336604929612851;
683 0.4109641743019312;
684 0.3881864790111245;
685 0.3642640090182592;
686 0.2179599909280145;
687 0.1889996477663011;
688 0.3340182623592461;
689 0.4409367494853381;
690 0.5140091754526943;
691 0.5559332704045969;
692 0.5677443693743304;
693 0.5478251665295453;
694 0.4895759664908982;
695 0.4109641743019171;
696 0.395727260284338;
697 0.3778949322004734;
698 0.3596268271857124;
699 0.2191250268948948;
700 0.1632722532153683;
701 0.2850397806663325;
702 0.373006008206081;
703 0.4325325506354207;
704 0.4670304994474315;
705 0.4784738764866023;
706 0.4677122687599041;
707 0.4341716881061055;
708 0.388186479011099;
709 0.3778949322004602;
710 0.3633362567187364;
711 0.3464457261905399;
712 0.2096362321365655;
713 0.1276782480038148;
714 0.2184260032478634;
715 0.2821694983395252;
716 0.3248315148915535;
717 0.3499809143811097;
718 0.3602190632823333;
719 0.3587654911000799;
720 0.3534389974779268;
721 0.3642640090182283;
722 0.35962682718569;
723 0.3464457261905295;
724 0.3260728953424643;
725 0.180670595355394;
726 0.07711845915789244;
727 0.1271121156350963;
728 0.1610176733857757;
729 0.1834600412730144;
730 0.1968826374629443;
731 0.2031792054737354;
732 0.2050734291675885;
733 0.2083227496961245;
734 0.2179599909279998;
735 0.2191250268948822;
736 0.2096362321365551;
737 0.1806705953553887;
738 0.1067965550010013];
739@endcode
740
741
742<a name="ann-Matlab/forward_solver.m"></a>
743<h1>Annotated version of Matlab/forward_solver.m</h1>
744@code{.m}
745%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
746%%%%%%%%%%%%%%%%%%%%%% forward solver function %%%%%%%%%%%%%%%%%%%%%%%%%%%%
747%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
748%INPUTS:
749%theta = current 8x8 parameter matrix
750%lbl = cell labeling function
751%A_loc = matrix of local contributions to A
752%Id = Identity matrix of size 128x128
753%boundaries = labels of boundary cells
754%b = right hand side of linear system (AU = b)
755%M = measurement matrix
756%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
757%OUTPUTS:
758%z = vector of measurements
759%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
760
761function z = forward_solver(theta,lbl,A_loc,Id,boundaries,b,M)
762
763%initialize matrix A for FEM linear solve, AU = b
764A = zeros(33^2,33^2);
765
766%loop over cells to build A
767for i=0:31
768 for j=0:31 %build A by summing over contribution from each cell
769
770 %find local coefficient in 8x8 grid
771 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
772
773 %update A by including contribution from cell (i,j)
774 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
775 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
776 end
777end
778
779%enforce boundary condition
780A(boundaries,:) = Id(boundaries,:);
781A(:,boundaries) = Id(:,boundaries);
782
783%sparsify A
784A = sparse(A);
785
786%solve linear equation for coefficients, U
787U = A\b;
788
789%get new z values
790z = M*U;
791@endcode
792
793
794<a name="ann-Matlab/get_statistics.m"></a>
795<h1>Annotated version of Matlab/get_statistics.m</h1>
796@code{.m}
797%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
798%%%%%%%%%%%%%%%%% compute statistics on data set %%%%%%%%%%%%%%%%%%%%%%%%%%
799%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
800%INPUTS:
801%data = tensor of theta samples from each lag time and chain
802%theta_means = means of theta over each independent chain
803%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
804%OUTPUTS:
805%theta_mean = overall mean of chains
806%covars = covariance matrices of each independent chain
807%autocovar = mean of autocovariance matrix over all the chains
808%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
809
810function [theta_mean,covars,autocovar] = get_statistics(data,theta_means);
811
812%compute overall mean of data, and get size of data matrix
813theta_mean = mean(theta_means,3);
814[~,~,L,N] = size(data);
815
816%initialize covariance matrices and mean autocovariance matrix
817covars = zeros(64,64,N);
818autocovar = zeros(64,64,2*L-1);
819
820%compute covariance matrices and mean autocovariance matrix
821for n=1:N %loop over independent Markov chains
822
823 %get data from chain n
824 data_ = reshape(permute(data(:,:,:,n),[3 2 1]),[L 64]);
825
826 %compute autocovariance matrix of chain n
827 mat = xcov(data_,'unbiased');
828
829 %store covariance matrix of chain n
830 covars(:,:,n) = reshape(mat(L,:),64,64);
831
832 %update mean autocovariance matrix
833 autocovar = autocovar + reshape(mat',[64 64 2*L-1]);
834end
835
836%compute mean of autocovariance matrix
837autocovar = autocovar(1:64,1:64,L:2*L-1)/N;
838@endcode
839
840
841<a name="ann-Matlab/log_probability.m"></a>
842<h1>Annotated version of Matlab/log_probability.m</h1>
843@code{.m}
844%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
845%%%%%%%%%%%%%%%%% compute log probability, log pi %%%%%%%%%%%%%%%%%%%%%%%%%
846%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
847%INPUTS:
848%theta = current 8x8 parameter matrix
849%z = current vector of measurements
850%z_hat = vector of "exact" measurements
851%sig = standard deviation parameter in likelihood
852%sig_pr = standard deviation parameter in prior
853%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
854%OUTPUTS:
855%log_pi = logarithm of posterior probability
856%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
857
858function log_pi = log_probability(theta,z,z_hat,sig,sig_pr)
859
860%compute log likelihood
861log_L = -sum((z-z_hat).^2)/(2*sig^2);
862
863%compute log prior
864log_pi_pr = -sum(log(theta).^2,'all')/(2*sig_pr^2);
865
866%compute log posterior
867log_pi = log_L+log_pi_pr;
868@endcode
869
870
871<a name="ann-Matlab/main.m"></a>
872<h1>Annotated version of Matlab/main.m</h1>
873@code{.m}
874%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
875%%%%%%%%% Run MCMC sampler to estimate posterior distribution %%%%%%%%%%%%%
876%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
877
878%define number of chains, chain length, and lag time
879N = input('number of independent Markov chains: ');
880N_L = input('length of each Markov chain: ');
881lag = input('lag time for measurements: ');
882workers = input('number of parallel workers: ');
883L = N_L/lag;
884
885%open Matlab parallel pool
886parpool(workers)
887
888%load precomputations
889load precomputations.mat
890
891%define lag time and data matrix
892data = zeros(8,8,L,N); %data matrix of samples at lag times
893theta_means = zeros(8,8,N); %overall mean of theta
894
895tic
896
897parfor n=1:N
898
899 %set initial theta, theta mean, and z values of chain
900 theta = theta0;
901 theta_mean = 0;
902 z = z0;
903
904 for m=1:L
905
906 for l=1:lag
907
908 %define proposal, theta_tilde
909 xi = normrnd(0,sig_prop,[8 8]);
910 theta_tilde = theta.*exp(xi);
911
912 %compute new z values
913 z_tilde = forward_solver_(theta_tilde);
914
915 %compute posterior log probability of theta_tilde
916 log_pi_tilde = log_probability_(theta_tilde,z_tilde);
917 log_pi = log_probability_(theta,z);
918
919 %compute acceptance probability; accept proposal appropriately
920 accept = exp(log_pi_tilde-log_pi) ...
921 *prod(theta_tilde./theta,'all');
922 if rand<accept
923 theta = theta_tilde; %accept new theta values
924 z = z_tilde; %record associated measurements
925 end
926
927 %update mean of theta
928 theta_mean = theta_mean + theta;
929
930 end
931
932 %update data matrix
933 data(:,:,m,n) = theta;
934
935 end
936
937 %update theta means
938 theta_means(:,:,n) = theta_mean/N_L;
939
940end
941
942toc
943
944%compute statistics on data set
945[theta_mean,covars,autocovar] = get_statistics(data,theta_means);
946
947%save data to Matlab workspace, labeled by N and N_L
948save (['data_N_' num2str(N) '_N_L_ ' num2str(N_L) '.mat'])
949@endcode
950
951
952<a name="ann-Matlab/plot_solution.m"></a>
953<h1>Annotated version of Matlab/plot_solution.m</h1>
954@code{.m}
955%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
956%%%%%%%%%%%%%%% plots solution, u, to Poisson equation %%%%%%%%%%%%%%%%%%%%
957%%%%%%%%%%%%%%%% associated to theta and a 32x32 mesh %%%%%%%%%%%%%%%%%%%%%
958%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
959
960%input matrix theta for plotting (e.g., theta = theta_hat)
961theta = input('choose theta for plotting u: theta = ');
962
963%construct mass matrix, M_plot, for plotting
964xsp = 0:0.02:1;
965n = length(xsp);
966Mp = zeros(n,n,33^2);
967for k=1:33^2
968 c = inv_lbl(k);
969 for i=1:n
970 for j=1:n
971 Mp(i,j,k) = phi(xsp(i)-h*c(1),xsp(j)-h*c(2));
972 end
973 end
974end
975Mp = reshape(Mp,[n^2 33^2]);
976
977%run forward solver on mean of theta
978A = zeros(33^2,33^2);
979for i=0:31
980 for j=0:31 %build A by summing over contribution from each cell
981
982 %find local coefficient in 8x8 grid
983 theta_loc = theta(floor(i/4)+1,floor(j/4)+1);
984
985 %update A by including contribution from cell (i,j)
986 dof = [lbl(i,j),lbl(i,j+1),lbl(i+1,j+1),lbl(i+1,j)];
987 A(dof,dof) = A(dof,dof) + theta_loc*A_loc;
988 end
989end
990
991%enforce boundary condition
992A(boundaries,:) = Id(boundaries,:);
993
994%sparsify A
995A = sparse(A);
996
997%solve linear equation for coefficients, U
998U = A\b;
999
1000%close all current plots
1001close all
1002
1003%plot solution
1004figure
1005zs = reshape(Mp*U,[n n]);
1006surf(xsp,xsp,zs)
1007@endcode
1008
1009
1010<a name="ann-Matlab/precomputations.m"></a>
1011<h1>Annotated version of Matlab/precomputations.m</h1>
1012@code{.m}
1013%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1014%%%%%%% do all precomputations necessary for MCMC simulations %%%%%%%%%%%%%
1015%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
1016
1017%define mesh width
1018h = 1/32;
1019
1020%define characteristic function of unit square
1021S = @(x,y) heaviside(x).*heaviside(y) ...
1022 .*(1-heaviside(x-h)).*(1-heaviside(y-h));
1023
1024%define tent function on the domain [-h,h]x[-h,h]
1025phi = @(x,y) ((x+h).*(y+h).*S(x+h,y+h) + (h-x).*(h-y).*S(x,y) ...
1026 + (x+h).*(h-y).*S(x+h,y) + (h-x).*(y+h).*S(x,y+h))/h^2;
1027
1028%define function that converts from (i,j) to dof, and its inverse
1029lbl = @(i,j) 33*j+i+1;
1030inv_lbl = @(k) [k-1-33*floor((k-1)/33),floor((k-1)/33)];
1031
1032%construct measurement matrix, M
1033xs = 1/14:1/14:13/14; %measurement points
1034M = zeros(13,13,33^2);
1035for k=1:33^2
1036 c = inv_lbl(k);
1037 for i=1:13
1038 for j=1:13
1039 M(i,j,k) = phi(xs(i)-h*c(1),xs(j)-h*c(2));
1040 end
1041 end
1042end
1043M = reshape(M,[13^2 33^2]);
1044
1045%construct exact coefficient matrix, theta_hat
1046theta_hat = ones(8,8);
1047theta_hat(2:3,2:3) = 0.1;
1048theta_hat(6:7,6:7) = 10;
1049
1050%construct local overlap matrix, A_loc, and identity matrix Id
1051A_loc = [2/3 -1/6 -1/3 -1/6;
1052 -1/6 2/3 -1/6 -1/3;
1053 -1/3 -1/6 2/3 -1/6;
1054 -1/6 -1/3 -1/6 2/3];
1055Id = eye(33^2);
1056
1057%locate boundary labels
1058boundaries = [lbl(0:1:32,0),lbl(0:1:32,32),lbl(0,1:1:31),lbl(32,1:1:31)];
1059
1060%define RHS of FEM linear system, AU = b
1061b = ones(33^2,1)*10*h^2;
1062b(boundaries) = zeros(128,1); %enforce boundary conditions on b
1063
1064%load exact z_hat values
1065exact_values
1066
1067%set global parameters and functions for simulation
1068sig = 0.05; %likelihood standard deviation
1069sig_pr = 2; %prior (log) standard deviation
1070sig_prop = 0.0725; %proposal (log) standard deviation
1071theta0 = ones(8,8); %initial theta values
1072forward_solver_ = @(theta) ...
1073 forward_solver(theta,lbl,A_loc,Id,boundaries,b,M);
1074log_probability_ = @(theta,z) log_probability(theta,z,z_hat,sig,sig_pr);
1075
1076%find initial z values
1077z0 = forward_solver_(theta0);
1078
1079save precomputations.mat
1080@endcode
1081
1082
1083<a name="ann-Python/forward_solver.py"></a>
1084<h1>Annotated version of Python/forward_solver.py</h1>
1085@code{.py}
1086import numpy as np
1087import scipy.sparse
1088from scipy.sparse.linalg import spsolve
1089import time
1090
1091###########################################################################
1092############ list of "exact" measurement values, z_hat ####################
1093###########################################################################
1094
1095z_hat = np.array(
1096 [0.06076511762259369, 0.09601910120848481,
1097 0.1238852517838584, 0.1495184117375201,
1098 0.1841596127549784, 0.2174525028261122,
1099 0.2250996160898698, 0.2197954769002993,
1100 0.2074695698370926, 0.1889996477663016,
1101 0.1632722532153726, 0.1276782480038186,
1102 0.07711845915789312, 0.09601910120848552,
1103 0.2000589533367983, 0.3385592591951766,
1104 0.3934300024647806, 0.4040223892461541,
1105 0.4122329537843092, 0.4100480091545554,
1106 0.3949151637189968, 0.3697873264791232,
1107 0.33401826235924, 0.2850397806663382,
1108 0.2184260032478671, 0.1271121156350957,
1109 0.1238852517838611, 0.3385592591951819,
1110 0.7119285162766475, 0.8175712861756428,
1111 0.6836254116578105, 0.5779452419831157,
1112 0.5555615956136897, 0.5285181561736719,
1113 0.491439702849224, 0.4409367494853282,
1114 0.3730060082060772, 0.2821694983395214,
1115 0.1610176733857739, 0.1495184117375257,
1116 0.3934300024647929, 0.8175712861756562,
1117 0.9439154625527653, 0.8015904115095128,
1118 0.6859683749254024, 0.6561235366960599,
1119 0.6213197201867315, 0.5753611315000049,
1120 0.5140091754526823, 0.4325325506354165,
1121 0.3248315148915482, 0.1834600412730086,
1122 0.1841596127549917, 0.4040223892461832,
1123 0.6836254116578439, 0.8015904115095396,
1124 0.7870119561144977, 0.7373108331395808,
1125 0.7116558878070463, 0.6745179049094283,
1126 0.6235300574156917, 0.5559332704045935,
1127 0.4670304994474178, 0.3499809143811,
1128 0.19688263746294, 0.2174525028261253,
1129 0.4122329537843404, 0.5779452419831566,
1130 0.6859683749254372, 0.7373108331396063,
1131 0.7458811983178246, 0.7278968022406559,
1132 0.6904793535357751, 0.6369176452710288,
1133 0.5677443693743215, 0.4784738764865867,
1134 0.3602190632823262, 0.2031792054737325,
1135 0.2250996160898818, 0.4100480091545787,
1136 0.5555615956137137, 0.6561235366960938,
1137 0.7116558878070715, 0.727896802240657,
1138 0.7121928678670187, 0.6712187391428729,
1139 0.6139157775591492, 0.5478251665295381,
1140 0.4677122687599031, 0.3587654911000848,
1141 0.2050734291675918, 0.2197954769003094,
1142 0.3949151637190157, 0.5285181561736911,
1143 0.6213197201867471, 0.6745179049094407,
1144 0.690479353535786, 0.6712187391428787,
1145 0.6178408289359514, 0.5453605027237883,
1146 0.489575966490909, 0.4341716881061278,
1147 0.3534389974779456, 0.2083227496961347,
1148 0.207469569837099, 0.3697873264791366,
1149 0.4914397028492412, 0.5753611315000203,
1150 0.6235300574157017, 0.6369176452710497,
1151 0.6139157775591579, 0.5453605027237935,
1152 0.4336604929612851, 0.4109641743019312,
1153 0.3881864790111245, 0.3642640090182592,
1154 0.2179599909280145, 0.1889996477663011,
1155 0.3340182623592461, 0.4409367494853381,
1156 0.5140091754526943, 0.5559332704045969,
1157 0.5677443693743304, 0.5478251665295453,
1158 0.4895759664908982, 0.4109641743019171,
1159 0.395727260284338, 0.3778949322004734,
1160 0.3596268271857124, 0.2191250268948948,
1161 0.1632722532153683, 0.2850397806663325,
1162 0.373006008206081, 0.4325325506354207,
1163 0.4670304994474315, 0.4784738764866023,
1164 0.4677122687599041, 0.4341716881061055,
1165 0.388186479011099, 0.3778949322004602,
1166 0.3633362567187364, 0.3464457261905399,
1167 0.2096362321365655, 0.1276782480038148,
1168 0.2184260032478634, 0.2821694983395252,
1169 0.3248315148915535, 0.3499809143811097,
1170 0.3602190632823333, 0.3587654911000799,
1171 0.3534389974779268, 0.3642640090182283,
1172 0.35962682718569, 0.3464457261905295,
1173 0.3260728953424643, 0.180670595355394,
1174 0.07711845915789244, 0.1271121156350963,
1175 0.1610176733857757, 0.1834600412730144,
1176 0.1968826374629443, 0.2031792054737354,
1177 0.2050734291675885, 0.2083227496961245,
1178 0.2179599909279998, 0.2191250268948822,
1179 0.2096362321365551, 0.1806705953553887,
1180 0.1067965550010013])
1181
1182
1183###########################################################################
1184####### do all precomputations necessary for MCMC simulations #############
1185###########################################################################
1186
1187# Define the mesh width
1188h = 1/32
1189
1190# Define characteristic function of unit square
1191def heaviside(x) :
1192 if x<0 :
1193 return 0
1194 else :
1195 return 1
1196
1197def S(x,y) :
1198 return heaviside(x)*heaviside(y) * (1-heaviside(x-h))*(1-heaviside(y-h));
1199
1200# Define tent function on the domain [0,2h]x[0,2h]
1201def phi(x,y) :
1202 return ((x+h)*(y+h)*S(x+h,y+h) + (h-x)*(h-y)*S(x,y)
1203 + (x+h)*(h-y)*S(x+h,y) + (h-x)*(y+h)*S(x,y+h))/h**2
1204
1205# Define conversion function for dof's from 2D to scalar label, and
1206# its inverse
1207def ij_to_dof_index(i,j) :
1208 return 33*j+i
1209
1210def inv_ij_to_dof_index(k) :
1211 return [k-33*int(k/33),int(k/33)]
1212
1213
1214# Construct measurement matrix, M, for measurements
1215xs = np.arange(1./14,13./14,1./14); #measurement points
1216
1217M = np.zeros((13,13,33**2));
1218for k in range(33**2) :
1219 c = inv_ij_to_dof_index(k)
1220 for i in range(13) :
1221 for j in range(13) :
1222 M[i,j,k] = phi(xs[i]-h*c[0], xs[j]-h*c[1])
1223M = M.reshape((13**2, 33**2))
1224M = scipy.sparse.csr_matrix(M);
1225
1226# Construct local overlap matrix, A_loc, and identity matrix Id
1227A_loc = np.array([[2./3, -1./6, -1./3, -1./6],
1228 [-1./6, 2./3, -1./6, -1./3],
1229 [-1./3, -1./6, 2./3, -1./6],
1230 [-1./6, -1./3, -1./6, 2./3]])
1231Id = np.eye(33**2,33**2)
1232
1233# Locate boundary labels
1234boundaries = ([ij_to_dof_index(i,0) for i in range(33)] +
1235 [ij_to_dof_index(i,32) for i in range(33)] +
1236 [ij_to_dof_index(0,j+1) for j in range(31)] +
1237 [ij_to_dof_index(32,j+1) for j in range(31)])
1238
1239# Define RHS of FEM linear system, AU = b
1240b = np.ones(33**2)*10*h**2
1241b[boundaries] = 0 #enforce boundary conditions on b
1242
1243
1244
1245
1246
1247###########################################################################
1248###################### forward solver function ############################
1249###########################################################################
1250
1251def forward_solver(theta) :
1252 # Initialize matrix A for FEM linear solve, AU = b
1253 A = np.zeros((33**2,33**2))
1254
1255 # Build A by summing over contribution from each cell
1256 for i in range(32) :
1257 for j in range (32) :
1258 # Find local coefficient in 8x8 grid
1259 theta_loc = theta[int(i/4)+int(j/4)*8]
1260
1261 # Update A by including contribution from cell (i,j)
1262 dof = [ij_to_dof_index(i,j),
1263 ij_to_dof_index(i,j+1),
1264 ij_to_dof_index(i+1,j+1),
1265 ij_to_dof_index(i+1,j)]
1266 A[np.ix_(dof,dof)] += theta_loc * A_loc
1267
1268 # Enforce boundary condition: Zero out rows and columns, then
1269 # put a one back into the diagonal entries.
1270 A[boundaries,:] = 0
1271 A[:,boundaries] = 0
1272 A[boundaries,boundaries] = 1
1273
1274 # Solve linear equation for coefficients, U, and then
1275 # get the Z vector by multiplying by the measurement matrix
1276 u = spsolve(scipy.sparse.csr_matrix(A), b)
1277
1278 z = M * u
1279
1280 return z
1281
1282
1283
1284
1285###########################################################################
1286################# compute log probability, log pi #########################
1287###########################################################################
1288
1289def log_likelihood(theta) :
1290 z = forward_solver(theta)
1291 misfit = z - z_hat
1292 sig = 0.05 #likelihood standard deviation
1293 return -np.dot(misfit,misfit)/(2*sig**2)
1294
1295def log_prior(theta) :
1296 sig_pr = 2 #prior (log) standard deviation
1297 return -np.linalg.norm(np.log(theta))**2/(2*sig_pr**2)
1298
1299def log_posterior(theta) :
1300 return log_likelihood(theta) + log_prior(theta)
1301
1302
1303
1304###########################################################################
1305############# A function to test against known output #####################
1306###########################################################################
1307
1308
1309def verify_against_stored_tests() :
1310 for i in range(10) :
1311 print ("Verifying against data set", i)
1312
1313 # Read the input vector
1314 f_input = open ("../testing/input.{}.txt".format(i), 'r')
1315 theta = np.fromfile(f_input, count=64, sep=" ")
1316
1317 # Then compute both the forward solution and its statistics.
1318 # This is not efficiently written here (it calls the forward
1319 # solver twice), but we don't care about efficiency here since
1320 # we are only computing with ten samples
1321 this_z = forward_solver(theta)
1322 this_log_likelihood = log_likelihood(theta)
1323 this_log_prior = log_prior(theta)
1324
1325 # Then also read the reference output generated by the C++ program:
1326 f_output_z = open ("../testing/output.{}.z.txt".format(i), 'r')
1327 f_output_likelihood = open ("../testing/output.{}.likelihood.txt".format(i), 'r')
1328 f_output_prior = open ("../testing/output.{}.prior.txt".format(i), 'r')
1329
1330 reference_z = np.fromfile(f_output_z, count=13**2, sep=" ")
1331 reference_log_likelihood = float(f_output_likelihood.read())
1332 reference_log_prior = float(f_output_prior.read())
1333
1334 print (" || z-z_ref || : ",
1335 np.linalg.norm(this_z - reference_z))
1336 print (" log likelihood : ",
1337 "Python value=", this_log_likelihood,
1338 "(C++ reference value=", reference_log_likelihood,
1339 ", error=", abs(this_log_likelihood - reference_log_likelihood),
1340 ")")
1341 print (" log prior : ",
1342 "Python value=", this_log_prior,
1343 "(C++ reference value=", reference_log_prior,
1344 ", error=", abs(this_log_prior - reference_log_prior),
1345 ")")
1346
1347
1348def time_forward_solver() :
1349 begin = time.time()
1350
1351 n_runs = 100
1352 for i in range(n_runs) :
1353 # Create a random vector (with entries between 0 and 1), scale
1354 # it by a factor of 4, subtract 2, then take the exponential
1355 # of each entry to get random entries between e^{-2} and
1356 # e^{+2}
1357 theta = np.exp(np.random.rand(64) * 4 - 2)
1358 z = forward_solver(theta)
1359 end = time.time()
1360 print ("Time per forward evaluation:", (end-begin)/n_runs)
1361
1362verify_against_stored_tests()
1363time_forward_solver()
1364@endcode
1365
1366
1367<a name="ann-mcmc-laplace.cc"></a>
1368<h1>Annotated version of mcmc-laplace.cc</h1>
1369 *
1370 *
1371 *
1372 *
1373 * @code
1374 * /* ---------------------------------------------------------------------
1375 * *
1376 * * Copyright (C) 2019 by the deal.II authors and Wolfgang Bangerth.
1377 * *
1378 * * This file is part of the deal.II library.
1379 * *
1380 * * The deal.II library is free software; you can use it, redistribute
1381 * * it, and/or modify it under the terms of the GNU Lesser General
1382 * * Public License as published by the Free Software Foundation; either
1383 * * version 2.1 of the License, or (at your option) any later version.
1384 * * The full text of the license can be found in the file LICENSE.md at
1385 * * the top level directory of deal.II.
1386 * *
1387 * * ---------------------------------------------------------------------
1388 *
1389 * *
1390 * * Author: Wolfgang Bangerth, Colorado State University, 2019.
1391 * */
1392 *
1393 *
1394 *
1395 * #include <deal.II/base/timer.h>
1396 * #include <deal.II/grid/tria.h>
1397 * #include <deal.II/dofs/dof_handler.h>
1398 * #include <deal.II/grid/grid_generator.h>
1399 * #include <deal.II/grid/tria_accessor.h>
1400 * #include <deal.II/grid/tria_iterator.h>
1401 * #include <deal.II/dofs/dof_accessor.h>
1402 * #include <deal.II/fe/fe_q.h>
1403 * #include <deal.II/dofs/dof_tools.h>
1404 * #include <deal.II/fe/fe_values.h>
1405 * #include <deal.II/base/quadrature_lib.h>
1406 * #include <deal.II/base/function.h>
1407 * #include <deal.II/numerics/vector_tools.h>
1408 * #include <deal.II/numerics/matrix_tools.h>
1409 * #include <deal.II/lac/vector.h>
1410 * #include <deal.II/lac/full_matrix.h>
1411 * #include <deal.II/lac/sparse_matrix.h>
1412 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
1413 * #include <deal.II/lac/solver_cg.h>
1414 * #include <deal.II/lac/precondition.h>
1415 * #include <deal.II/lac/sparse_ilu.h>
1416 *
1417 * #include <deal.II/numerics/data_out.h>
1418 *
1419 * #include <fstream>
1420 * #include <iostream>
1421 * #include <random>
1422 *
1423 * #include <deal.II/base/logstream.h>
1424 *
1425 * using namespace dealii;
1426 *
1427 *
1428 * @endcode
1429 *
1430 * The following is a namespace in which we define the solver of the PDE.
1431 * The main class implements an abstract `Interface` class declared at
1432 * the top, which provides for an `evaluate()` function that, given
1433 * a coefficient vector, solves the PDE discussed in the Readme file
1434 * and then evaluates the solution at the 169 mentioned points.
1435 *
1436
1437 *
1438 * The solver follows the basic layout of @ref step_4 "step-4", though it precomputes
1439 * a number of things in the `setup_system()` function, such as the
1440 * evaluation of the matrix that corresponds to the point evaluations,
1441 * as well as the local contributions to matrix and right hand side.
1442 *
1443
1444 *
1445 * Rather than commenting on everything in detail, in the following
1446 * we will only document those things that are not already clear from
1447 * @ref step_4 "step-4" and a small number of other tutorial programs.
1448 *
1449 * @code
1450 * namespace ForwardSimulator
1451 * {
1452 * class Interface
1453 * {
1454 * public:
1455 * virtual Vector<double> evaluate(const Vector<double> &coefficients) = 0;
1456 *
1457 * virtual ~Interface() = default;
1458 * };
1459 *
1460 *
1461 *
1462 * template <int dim>
1463 * class PoissonSolver : public Interface
1464 * {
1465 * public:
1466 * PoissonSolver(const unsigned int global_refinements,
1467 * const unsigned int fe_degree,
1468 * const std::string &dataset_name);
1469 * virtual Vector<double>
1470 * evaluate(const Vector<double> &coefficients) override;
1471 *
1472 * private:
1473 * void make_grid(const unsigned int global_refinements);
1474 * void setup_system();
1475 * void assemble_system(const Vector<double> &coefficients);
1476 * void solve();
1477 * void output_results(const Vector<double> &coefficients) const;
1478 *
1479 * Triangulation<dim> triangulation;
1480 * FE_Q<dim> fe;
1481 * DoFHandler<dim> dof_handler;
1482 *
1483 * FullMatrix<double> cell_matrix;
1484 * Vector<double> cell_rhs;
1485 * std::map<types::global_dof_index,double> boundary_values;
1486 *
1487 * SparsityPattern sparsity_pattern;
1488 * SparseMatrix<double> system_matrix;
1489 *
1490 * Vector<double> solution;
1491 * Vector<double> system_rhs;
1492 *
1493 * std::vector<Point<dim>> measurement_points;
1494 *
1495 * SparsityPattern measurement_sparsity;
1496 * SparseMatrix<double> measurement_matrix;
1497 *
1498 * TimerOutput timer;
1499 * unsigned int nth_evaluation;
1500 *
1501 * const std::string &dataset_name;
1502 * };
1503 *
1504 *
1505 *
1506 * template <int dim>
1507 * PoissonSolver<dim>::PoissonSolver(const unsigned int global_refinements,
1508 * const unsigned int fe_degree,
1509 * const std::string &dataset_name)
1510 * : fe(fe_degree)
1511 * , dof_handler(triangulation)
1512 * , timer(std::cout, TimerOutput::summary, TimerOutput::cpu_times)
1513 * , nth_evaluation(0)
1514 * , dataset_name(dataset_name)
1515 * {
1516 * make_grid(global_refinements);
1517 * setup_system();
1518 * }
1519 *
1520 *
1521 *
1522 * template <int dim>
1523 * void PoissonSolver<dim>::make_grid(const unsigned int global_refinements)
1524 * {
1525 * Assert(global_refinements >= 3,
1526 * ExcMessage("This program makes the assumption that the mesh for the "
1527 * "solution of the PDE is at least as fine as the one used "
1528 * "in the definition of the coefficient."));
1529 * GridGenerator::hyper_cube(triangulation, 0, 1);
1530 * triangulation.refine_global(global_refinements);
1531 *
1532 * std::cout << " Number of active cells: " << triangulation.n_active_cells()
1533 * << std::endl;
1534 * }
1535 *
1536 *
1537 *
1538 * template <int dim>
1539 * void PoissonSolver<dim>::setup_system()
1540 * {
1541 * @endcode
1542 *
1543 * First define the finite element space:
1544 *
1545 * @code
1546 * dof_handler.distribute_dofs(fe);
1547 *
1548 * std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1549 * << std::endl;
1550 *
1551 * @endcode
1552 *
1553 * Then set up the main data structures that will hold the discrete problem:
1554 *
1555 * @code
1556 * {
1557 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
1558 * DoFTools::make_sparsity_pattern(dof_handler, dsp);
1559 * sparsity_pattern.copy_from(dsp);
1560 *
1561 * system_matrix.reinit(sparsity_pattern);
1562 *
1563 * solution.reinit(dof_handler.n_dofs());
1564 * system_rhs.reinit(dof_handler.n_dofs());
1565 * }
1566 *
1567 * @endcode
1568 *
1569 * And then define the tools to do point evaluation. We choose
1570 * a set of 13x13 points evenly distributed across the domain:
1571 *
1572 * @code
1573 * {
1574 * const unsigned int n_points_per_direction = 13;
1575 * const double dx = 1. / (n_points_per_direction + 1);
1576 *
1577 * for (unsigned int x = 1; x <= n_points_per_direction; ++x)
1578 * for (unsigned int y = 1; y <= n_points_per_direction; ++y)
1579 * measurement_points.emplace_back(x * dx, y * dx);
1580 *
1581 * @endcode
1582 *
1583 * First build a full matrix of the evaluation process. We do this
1584 * even though the matrix is really sparse -- but we don't know
1585 * which entries are nonzero. Later, the `copy_from()` function
1586 * calls build a sparsity pattern and a sparse matrix from
1587 * the dense matrix.
1588 *
1589 * @code
1590 * Vector<double> weights(dof_handler.n_dofs());
1591 * FullMatrix<double> full_measurement_matrix(n_points_per_direction *
1592 * n_points_per_direction,
1593 * dof_handler.n_dofs());
1594 *
1595 * for (unsigned int index = 0; index < measurement_points.size(); ++index)
1596 * {
1598 * measurement_points[index],
1599 * weights);
1600 * for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
1601 * full_measurement_matrix(index, i) = weights(i);
1602 * }
1603 *
1604 * measurement_sparsity.copy_from(full_measurement_matrix);
1605 * measurement_matrix.reinit(measurement_sparsity);
1606 * measurement_matrix.copy_from(full_measurement_matrix);
1607 * }
1608 *
1609 * @endcode
1610 *
1611 * Next build the mapping from cell to the index in the 64-element
1612 * coefficient vector:
1613 *
1614 * @code
1615 * for (const auto &cell : triangulation.active_cell_iterators())
1616 * {
1617 * const unsigned int i = std::floor(cell->center()[0] * 8);
1618 * const unsigned int j = std::floor(cell->center()[1] * 8);
1619 *
1620 * const unsigned int index = i + 8 * j;
1621 *
1622 * cell->set_user_index(index);
1623 * }
1624 *
1625 * @endcode
1626 *
1627 * Finally prebuild the building blocks of the linear system as
1628 * discussed in the Readme file :
1629 *
1630 * @code
1631 * {
1632 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1633 *
1634 * cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1635 * cell_rhs.reinit(dofs_per_cell);
1636 *
1637 * const QGauss<dim> quadrature_formula(fe.degree+1);
1638 * const unsigned int n_q_points = quadrature_formula.size();
1639 *
1640 * FEValues<dim> fe_values(fe,
1641 * quadrature_formula,
1644 *
1645 * fe_values.reinit(dof_handler.begin_active());
1646 *
1647 * for (unsigned int q_index = 0; q_index < n_q_points; ++q_index)
1648 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1649 * {
1650 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1651 * cell_matrix(i, j) +=
1652 * (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
1653 * fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
1654 * fe_values.JxW(q_index)); // dx
1655 *
1656 * cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
1657 * 10.0 * // f(x_q)
1658 * fe_values.JxW(q_index)); // dx
1659 * }
1660 *
1662 * 0,
1664 * boundary_values);
1665 * }
1666 * }
1667 *
1668 *
1669 *
1670 * @endcode
1671 *
1672 * Given that we have pre-built the matrix and right hand side contributions
1673 * for a (representative) cell, the function that assembles the matrix is
1674 * pretty short and straightforward:
1675 *
1676 * @code
1677 * template <int dim>
1678 * void PoissonSolver<dim>::assemble_system(const Vector<double> &coefficients)
1679 * {
1680 * Assert(coefficients.size() == 64, ExcInternalError());
1681 *
1682 * system_matrix = 0;
1683 * system_rhs = 0;
1684 *
1685 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1686 *
1687 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1688 *
1689 * for (const auto &cell : dof_handler.active_cell_iterators())
1690 * {
1691 * const double coefficient = coefficients(cell->user_index());
1692 *
1693 * cell->get_dof_indices(local_dof_indices);
1694 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1695 * {
1696 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1697 * system_matrix.add(local_dof_indices[i],
1698 * local_dof_indices[j],
1699 * coefficient * cell_matrix(i, j));
1700 *
1701 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
1702 * }
1703 * }
1704 *
1705 * MatrixTools::apply_boundary_values(boundary_values,
1706 * system_matrix,
1707 * solution,
1708 * system_rhs);
1709 * }
1710 *
1711 *
1712 * @endcode
1713 *
1714 * The same is true for the function that solves the linear system:
1715 *
1716 * @code
1717 * template <int dim>
1718 * void PoissonSolver<dim>::solve()
1719 * {
1720 * SparseILU<double> ilu;
1721 * ilu.initialize(system_matrix);
1722 * SolverControl control(100, 1e-10*system_rhs.l2_norm());
1723 * SolverCG<> solver(control);
1724 * solver.solve(system_matrix, solution, system_rhs, ilu);
1725 * }
1726 *
1727 *
1728 *
1729 * @endcode
1730 *
1731 * The following function outputs graphical data for the most recently
1732 * used coefficient and corresponding solution of the PDE. Collecting
1733 * the coefficient values requires translating from the 64-element
1734 * coefficient vector and the cells that correspond to each of these
1735 * elements. The rest remains pretty obvious, with the exception
1736 * of including the number of the current sample into the file name.
1737 *
1738 * @code
1739 * template <int dim>
1740 * void
1741 * PoissonSolver<dim>::output_results(const Vector<double> &coefficients) const
1742 * {
1743 * Vector<float> coefficient_values(triangulation.n_active_cells());
1744 * for (const auto &cell : triangulation.active_cell_iterators())
1745 * coefficient_values[cell->active_cell_index()] =
1746 * coefficients(cell->user_index());
1747 *
1748 * DataOut<dim> data_out;
1749 *
1750 * data_out.attach_dof_handler(dof_handler);
1751 * data_out.add_data_vector(solution, "solution");
1752 * data_out.add_data_vector(coefficient_values, "coefficient");
1753 *
1754 * data_out.build_patches();
1755 *
1756 * std::ofstream output("solution-" +
1757 * Utilities::int_to_string(nth_evaluation, 10) + ".vtu");
1758 * data_out.write_vtu(output);
1759 * }
1760 *
1761 *
1762 *
1763 * @endcode
1764 *
1765 * The following is the main function of this class: Given a coefficient
1766 * vector, it assembles the linear system, solves it, and then evaluates
1767 * the solution at the measurement points by applying the measurement
1768 * matrix to the solution vector. That vector of "measured" values
1769 * is then returned.
1770 *
1771
1772 *
1773 * The function will also output the solution in a graphical format
1774 * if you un-comment the corresponding statement in the third
1775 * code block. However, you may end up with a very large amount
1776 * of data: This code is producing, at the minimum, 10,000 samples
1777 * and creating output for each one of them is surely more data
1778 * than you ever want to see!
1779 *
1780
1781 *
1782 * At the end of the function, we output some timing information
1783 * every 10,000 samples.
1784 *
1785 * @code
1786 * template <int dim>
1788 * PoissonSolver<dim>::evaluate(const Vector<double> &coefficients)
1789 * {
1790 * {
1791 * TimerOutput::Scope section(timer, "Building linear systems");
1792 * assemble_system(coefficients);
1793 * }
1794 *
1795 * {
1796 * TimerOutput::Scope section(timer, "Solving linear systems");
1797 * solve();
1798 * }
1799 *
1800 * Vector<double> measurements(measurement_matrix.m());
1801 * {
1802 * TimerOutput::Scope section(timer, "Postprocessing");
1803 *
1804 * measurement_matrix.vmult(measurements, solution);
1805 * Assert(measurements.size() == measurement_points.size(),
1806 * ExcInternalError());
1807 *
1808 * /* output_results(coefficients); */
1809 * }
1810 *
1811 * ++nth_evaluation;
1812 * if (nth_evaluation % 10000 == 0)
1813 * timer.print_summary();
1814 *
1815 * return measurements;
1816 * }
1817 * } // namespace ForwardSimulator
1818 *
1819 *
1820 * @endcode
1821 *
1822 * The following namespaces define the statistical properties of the Bayesian
1823 * inverse problem. The first is about the definition of the measurement
1824 * statistics (the "likelihood"), which we here assume to be a normal
1825 * distribution @f$N(\mu,\sigma I)@f$ with mean value @f$\mu@f$ given by the
1826 * actual measurement vector (passed as an argument to the constructor
1827 * of the `Gaussian` class and standard deviation @f$\sigma@f$.
1828 *
1829
1830 *
1831 * For reasons of numerical accuracy, it is useful to not return the
1832 * actual likelihood, but its logarithm. This is because these
1833 * values can be very small, occasionally on the order of @f$e^{-100}@f$,
1834 * for which it becomes very difficult to compute accurate
1835 * values.
1836 *
1837 * @code
1838 * namespace LogLikelihood
1839 * {
1840 * class Interface
1841 * {
1842 * public:
1843 * virtual double log_likelihood(const Vector<double> &x) const = 0;
1844 *
1845 * virtual ~Interface() = default;
1846 * };
1847 *
1848 *
1849 * class Gaussian : public Interface
1850 * {
1851 * public:
1852 * Gaussian(const Vector<double> &mu, const double sigma);
1853 *
1854 * virtual double log_likelihood(const Vector<double> &x) const override;
1855 *
1856 * private:
1857 * const Vector<double> mu;
1858 * const double sigma;
1859 * };
1860 *
1861 * Gaussian::Gaussian(const Vector<double> &mu, const double sigma)
1862 * : mu(mu)
1863 * , sigma(sigma)
1864 * {}
1865 *
1866 *
1867 * double Gaussian::log_likelihood(const Vector<double> &x) const
1868 * {
1869 * Vector<double> x_minus_mu = x;
1870 * x_minus_mu -= mu;
1871 *
1872 * return -x_minus_mu.norm_sqr() / (2 * sigma * sigma);
1873 * }
1874 * } // namespace LogLikelihood
1875 *
1876 *
1877 * @endcode
1878 *
1879 * Next up is the "prior" imposed on the coefficients. We assume
1880 * that the logarithms of the entries of the coefficient vector
1881 * are all distributed as a Gaussian with given mean and standard
1882 * deviation. If the logarithms of the coefficients are normally
1883 * distributed, then this implies in particular that the coefficients
1884 * can only be positive, which is a useful property to ensure the
1885 * well-posedness of the forward problem.
1886 *
1887
1888 *
1889 * For the same reasons as for the likelihood above, the interface
1890 * for the prior asks for returning the *logarithm* of the prior,
1891 * instead of the prior probability itself.
1892 *
1893 * @code
1894 * namespace LogPrior
1895 * {
1896 * class Interface
1897 * {
1898 * public:
1899 * virtual double log_prior(const Vector<double> &x) const = 0;
1900 *
1901 * virtual ~Interface() = default;
1902 * };
1903 *
1904 *
1905 * class LogGaussian : public Interface
1906 * {
1907 * public:
1908 * LogGaussian(const double mu, const double sigma);
1909 *
1910 * virtual double log_prior(const Vector<double> &x) const override;
1911 *
1912 * private:
1913 * const double mu;
1914 * const double sigma;
1915 * };
1916 *
1917 * LogGaussian::LogGaussian(const double mu, const double sigma)
1918 * : mu(mu)
1919 * , sigma(sigma)
1920 * {}
1921 *
1922 *
1923 * double LogGaussian::log_prior(const Vector<double> &x) const
1924 * {
1925 * double log_of_product = 0;
1926 *
1927 * for (const auto &el : x)
1928 * log_of_product +=
1929 * -(std::log(el) - mu) * (std::log(el) - mu) / (2 * sigma * sigma);
1930 *
1931 * return log_of_product;
1932 * }
1933 * } // namespace LogPrior
1934 *
1935 *
1936 *
1937 * @endcode
1938 *
1939 * The Metropolis-Hastings algorithm requires a method to create a new sample
1940 * given a previous sample. We do this by perturbing the current (coefficient)
1941 * sample randomly using a Gaussian distribution centered at the current
1942 * sample. To ensure that the samples' individual entries all remain
1943 * positive, we use a Gaussian distribution in logarithm space -- in other
1944 * words, instead of *adding* a small perturbation with mean value zero,
1945 * we *multiply* the entries of the current sample by a factor that
1946 * is the exponential of a random number with mean zero. (Because the
1947 * exponential of zero is one, this means that the most likely factors
1948 * to multiply the existing sample entries by are close to one. And
1949 * because the exponential of a number is always positive, we never
1950 * get negative samples this way.)
1951 *
1952
1953 *
1954 * But the Metropolis-Hastings sampler doesn't just need a perturbed
1955 * sample @f$y@f$ location given the current sample location @f$x@f$. It also
1956 * needs to know the ratio of the probability of reaching @f$y@f$ from
1957 * @f$x@f$, divided by the probability of reaching @f$x@f$ from @f$y@f$. If we
1958 * were to use a symmetric proposal distribution (e.g., a Gaussian
1959 * distribution centered at @f$x@f$ with a width independent of @f$x@f$), then
1960 * these two probabilities would be the same, and the ratio one. But
1961 * that's not the case for the Gaussian in log space. It's not
1962 * terribly difficult to verify that in that case, for a single
1963 * component the ratio of these probabilities is @f$y_i/x_i@f$, and
1964 * consequently for all components of the vector together, the
1965 * probability is the product of these ratios.
1966 *
1967 * @code
1968 * namespace ProposalGenerator
1969 * {
1970 * class Interface
1971 * {
1972 * public:
1973 * virtual
1974 * std::pair<Vector<double>,double>
1975 * perturb(const Vector<double> &current_sample) const = 0;
1976 *
1977 * virtual ~Interface() = default;
1978 * };
1979 *
1980 *
1981 * class LogGaussian : public Interface
1982 * {
1983 * public:
1984 * LogGaussian(const unsigned int random_seed, const double log_sigma);
1985 *
1986 * virtual
1987 * std::pair<Vector<double>,double>
1988 * perturb(const Vector<double> &current_sample) const;
1989 *
1990 * private:
1991 * const double log_sigma;
1992 * mutable std::mt19937 random_number_generator;
1993 * };
1994 *
1995 *
1996 *
1997 * LogGaussian::LogGaussian(const unsigned int random_seed,
1998 * const double log_sigma)
1999 * : log_sigma(log_sigma)
2000 * {
2001 * random_number_generator.seed(random_seed);
2002 * }
2003 *
2004 *
2005 * std::pair<Vector<double>,double>
2006 * LogGaussian::perturb(const Vector<double> &current_sample) const
2007 * {
2008 * Vector<double> new_sample = current_sample;
2009 * double product_of_ratios = 1;
2010 * for (auto &x : new_sample)
2011 * {
2012 * const double rnd = std::normal_distribution<>(0, log_sigma)(random_number_generator);
2013 * const double exp_rnd = std::exp(rnd);
2014 * x *= exp_rnd;
2015 * product_of_ratios *= exp_rnd;
2016 * }
2017 *
2018 * return {new_sample, product_of_ratios};
2019 * }
2020 *
2021 * } // namespace ProposalGenerator
2022 *
2023 *
2024 * @endcode
2025 *
2026 * The last main class is the Metropolis-Hastings sampler itself.
2027 * If you understand the algorithm behind this method, then
2028 * the following implementation should not be too difficult
2029 * to read. The only thing of relevance is that descriptions
2030 * of the algorithm typically ask whether the *ratio* of two
2031 * probabilities (the "posterior" probabilities of the current
2032 * and the previous samples, where the "posterior" is the product of the
2033 * likelihood and the prior probability) is larger or smaller than a
2034 * randomly drawn number. But because our interfaces return the
2035 * *logarithms* of these probabilities, we now need to take
2036 * the ratio of appropriate exponentials -- which is made numerically
2037 * more stable by considering the exponential of the difference of
2038 * the log probabilities. The only other slight complication is that
2039 * we need to multiply this ratio by the ratio of proposal probabilities
2040 * since we use a non-symmetric proposal distribution.
2041 *
2042
2043 *
2044 * Finally, we note that the output is generated with 7 digits of
2045 * accuracy. (The C++ default is 6 digits.) We do this because,
2046 * as shown in the paper, we can determine the mean value of the
2047 * probability distribution we are sampling here to at least six
2048 * digits of accuracy, and do not want to be limited by the precision
2049 * of the output.
2050 *
2051 * @code
2052 * namespace Sampler
2053 * {
2054 * class MetropolisHastings
2055 * {
2056 * public:
2057 * MetropolisHastings(ForwardSimulator::Interface & simulator,
2058 * const LogLikelihood::Interface & likelihood,
2059 * const LogPrior::Interface & prior,
2060 * const ProposalGenerator::Interface &proposal_generator,
2061 * const unsigned int random_seed,
2062 * const std::string & dataset_name);
2063 *
2064 * void sample(const Vector<double> &starting_guess,
2065 * const unsigned int n_samples);
2066 *
2067 * private:
2068 * ForwardSimulator::Interface & simulator;
2069 * const LogLikelihood::Interface & likelihood;
2070 * const LogPrior::Interface & prior;
2071 * const ProposalGenerator::Interface &proposal_generator;
2072 *
2073 * std::mt19937 random_number_generator;
2074 *
2075 * unsigned int sample_number;
2076 * unsigned int accepted_sample_number;
2077 *
2078 * std::ofstream output_file;
2079 *
2080 * void write_sample(const Vector<double> &current_sample,
2081 * const double current_log_likelihood);
2082 * };
2083 *
2084 *
2085 * MetropolisHastings::MetropolisHastings(
2086 * ForwardSimulator::Interface & simulator,
2087 * const LogLikelihood::Interface & likelihood,
2088 * const LogPrior::Interface & prior,
2089 * const ProposalGenerator::Interface &proposal_generator,
2090 * const unsigned int random_seed,
2091 * const std::string & dataset_name)
2092 * : simulator(simulator)
2093 * , likelihood(likelihood)
2094 * , prior(prior)
2095 * , proposal_generator(proposal_generator)
2096 * , sample_number(0)
2097 * , accepted_sample_number(0)
2098 * {
2099 * output_file.open("samples-" + dataset_name + ".txt");
2100 * output_file.precision(7);
2101 *
2102 * random_number_generator.seed(random_seed);
2103 * }
2104 *
2105 *
2106 * void MetropolisHastings::sample(const Vector<double> &starting_guess,
2107 * const unsigned int n_samples)
2108 * {
2109 * std::uniform_real_distribution<> uniform_distribution(0, 1);
2110 *
2111 * Vector<double> current_sample = starting_guess;
2112 * double current_log_posterior =
2113 * (likelihood.log_likelihood(simulator.evaluate(current_sample)) +
2114 * prior.log_prior(current_sample));
2115 *
2116 * ++sample_number;
2117 * ++accepted_sample_number;
2118 * write_sample(current_sample, current_log_posterior);
2119 *
2120 * for (unsigned int k = 1; k < n_samples; ++k, ++sample_number)
2121 * {
2122 * std::pair<Vector<double>,double>
2123 * perturbation = proposal_generator.perturb(current_sample);
2124 * const Vector<double> trial_sample = std::move (perturbation.first);
2125 * const double perturbation_probability_ratio = perturbation.second;
2126 *
2127 * const double trial_log_posterior =
2128 * (likelihood.log_likelihood(simulator.evaluate(trial_sample)) +
2129 * prior.log_prior(trial_sample));
2130 *
2131 * if (std::exp(trial_log_posterior - current_log_posterior) * perturbation_probability_ratio
2132 * >=
2133 * uniform_distribution(random_number_generator))
2134 * {
2135 * current_sample = trial_sample;
2136 * current_log_posterior = trial_log_posterior;
2137 *
2138 * ++accepted_sample_number;
2139 * }
2140 *
2141 * write_sample(current_sample, current_log_posterior);
2142 * }
2143 * }
2144 *
2145 *
2146 *
2147 * void MetropolisHastings::write_sample(const Vector<double> &current_sample,
2148 * const double current_log_posterior)
2149 * {
2150 * output_file << current_log_posterior << '\t';
2151 * output_file << accepted_sample_number << '\t';
2152 * for (const auto &x : current_sample)
2153 * output_file << x << ' ';
2154 * output_file << '\n';
2155 *
2156 * output_file.flush();
2157 * }
2158 * } // namespace Sampler
2159 *
2160 *
2161 * @endcode
2162 *
2163 * The final function is `main()`, which simply puts all of these pieces
2164 * together into one. The "exact solution", i.e., the "measurement values"
2165 * we use for this program are tabulated to make it easier for other
2166 * people to use in their own implementations of this benchmark. These
2167 * values created using the same main class above, but using 8 mesh
2168 * refinements and using a Q3 element -- i.e., using a much more accurate
2169 * method than the one we use in the forward simulator for generating
2170 * samples below (which uses 5 global mesh refinement steps and a Q1
2171 * element). If you wanted to regenerate this set of numbers, then
2172 * the following code snippet would do that:
2173 * <div class=CodeFragmentInTutorialComment>
2174 * @code
2175 * /* Set the exact coefficient: */
2176 * Vector<double> exact_coefficients(64);
2177 * for (auto &el : exact_coefficients)
2178 * el = 1.;
2179 * exact_coefficients(9) = exact_coefficients(10) = exact_coefficients(17) =
2180 * exact_coefficients(18) = 0.1;
2181 * exact_coefficients(45) = exact_coefficients(46) = exact_coefficients(53) =
2182 * exact_coefficients(54) = 10.;
2183 *
2184
2185 * /* Compute the "correct" solution vector: */
2186 * const Vector<double> exact_solution =
2187 * ForwardSimulator::PoissonSolver<2>(/* global_refinements = */ 8,
2188 * /* fe_degree = */ 3,
2189 * /* prefix = */ "exact")
2190 * .evaluate(exact_coefficients);
2191 * @endcode
2192 * </div>
2193 *
2194 * @code
2195 * int main()
2196 * {
2197 * const bool testing = true;
2198 *
2199 * @endcode
2200 *
2201 * Run with one thread, so as to not step on other processes
2202 * doing the same at the same time. It turns out that the problem
2203 * is also so small that running with more than one thread
2204 * *increases* the runtime.
2205 *
2206 * @code
2208 *
2209 * const unsigned int random_seed = (testing ? 1U : std::random_device()());
2210 * const std::string dataset_name = Utilities::to_string(random_seed, 10);
2211 *
2212 * const Vector<double> exact_solution(
2213 * { 0.06076511762259369, 0.09601910120848481,
2214 * 0.1238852517838584, 0.1495184117375201,
2215 * 0.1841596127549784, 0.2174525028261122,
2216 * 0.2250996160898698, 0.2197954769002993,
2217 * 0.2074695698370926, 0.1889996477663016,
2218 * 0.1632722532153726, 0.1276782480038186,
2219 * 0.07711845915789312, 0.09601910120848552,
2220 * 0.2000589533367983, 0.3385592591951766,
2221 * 0.3934300024647806, 0.4040223892461541,
2222 * 0.4122329537843092, 0.4100480091545554,
2223 * 0.3949151637189968, 0.3697873264791232,
2224 * 0.33401826235924, 0.2850397806663382,
2225 * 0.2184260032478671, 0.1271121156350957,
2226 * 0.1238852517838611, 0.3385592591951819,
2227 * 0.7119285162766475, 0.8175712861756428,
2228 * 0.6836254116578105, 0.5779452419831157,
2229 * 0.5555615956136897, 0.5285181561736719,
2230 * 0.491439702849224, 0.4409367494853282,
2231 * 0.3730060082060772, 0.2821694983395214,
2232 * 0.1610176733857739, 0.1495184117375257,
2233 * 0.3934300024647929, 0.8175712861756562,
2234 * 0.9439154625527653, 0.8015904115095128,
2235 * 0.6859683749254024, 0.6561235366960599,
2236 * 0.6213197201867315, 0.5753611315000049,
2237 * 0.5140091754526823, 0.4325325506354165,
2238 * 0.3248315148915482, 0.1834600412730086,
2239 * 0.1841596127549917, 0.4040223892461832,
2240 * 0.6836254116578439, 0.8015904115095396,
2241 * 0.7870119561144977, 0.7373108331395808,
2242 * 0.7116558878070463, 0.6745179049094283,
2243 * 0.6235300574156917, 0.5559332704045935,
2244 * 0.4670304994474178, 0.3499809143811,
2245 * 0.19688263746294, 0.2174525028261253,
2246 * 0.4122329537843404, 0.5779452419831566,
2247 * 0.6859683749254372, 0.7373108331396063,
2248 * 0.7458811983178246, 0.7278968022406559,
2249 * 0.6904793535357751, 0.6369176452710288,
2250 * 0.5677443693743215, 0.4784738764865867,
2251 * 0.3602190632823262, 0.2031792054737325,
2252 * 0.2250996160898818, 0.4100480091545787,
2253 * 0.5555615956137137, 0.6561235366960938,
2254 * 0.7116558878070715, 0.727896802240657,
2255 * 0.7121928678670187, 0.6712187391428729,
2256 * 0.6139157775591492, 0.5478251665295381,
2257 * 0.4677122687599031, 0.3587654911000848,
2258 * 0.2050734291675918, 0.2197954769003094,
2259 * 0.3949151637190157, 0.5285181561736911,
2260 * 0.6213197201867471, 0.6745179049094407,
2261 * 0.690479353535786, 0.6712187391428787,
2262 * 0.6178408289359514, 0.5453605027237883,
2263 * 0.489575966490909, 0.4341716881061278,
2264 * 0.3534389974779456, 0.2083227496961347,
2265 * 0.207469569837099, 0.3697873264791366,
2266 * 0.4914397028492412, 0.5753611315000203,
2267 * 0.6235300574157017, 0.6369176452710497,
2268 * 0.6139157775591579, 0.5453605027237935,
2269 * 0.4336604929612851, 0.4109641743019312,
2270 * 0.3881864790111245, 0.3642640090182592,
2271 * 0.2179599909280145, 0.1889996477663011,
2272 * 0.3340182623592461, 0.4409367494853381,
2273 * 0.5140091754526943, 0.5559332704045969,
2274 * 0.5677443693743304, 0.5478251665295453,
2275 * 0.4895759664908982, 0.4109641743019171,
2276 * 0.395727260284338, 0.3778949322004734,
2277 * 0.3596268271857124, 0.2191250268948948,
2278 * 0.1632722532153683, 0.2850397806663325,
2279 * 0.373006008206081, 0.4325325506354207,
2280 * 0.4670304994474315, 0.4784738764866023,
2281 * 0.4677122687599041, 0.4341716881061055,
2282 * 0.388186479011099, 0.3778949322004602,
2283 * 0.3633362567187364, 0.3464457261905399,
2284 * 0.2096362321365655, 0.1276782480038148,
2285 * 0.2184260032478634, 0.2821694983395252,
2286 * 0.3248315148915535, 0.3499809143811097,
2287 * 0.3602190632823333, 0.3587654911000799,
2288 * 0.3534389974779268, 0.3642640090182283,
2289 * 0.35962682718569, 0.3464457261905295,
2290 * 0.3260728953424643, 0.180670595355394,
2291 * 0.07711845915789244, 0.1271121156350963,
2292 * 0.1610176733857757, 0.1834600412730144,
2293 * 0.1968826374629443, 0.2031792054737354,
2294 * 0.2050734291675885, 0.2083227496961245,
2295 * 0.2179599909279998, 0.2191250268948822,
2296 * 0.2096362321365551, 0.1806705953553887,
2297 * 0.1067965550010013 });
2298 *
2299 * @endcode
2300 *
2301 * Now run the forward simulator for samples:
2302 *
2303 * @code
2304 * ForwardSimulator::PoissonSolver<2> laplace_problem(
2305 * /* global_refinements = */ 5,
2306 * /* fe_degree = */ 1,
2307 * dataset_name);
2308 * LogLikelihood::Gaussian log_likelihood(exact_solution, 0.05);
2309 * LogPrior::LogGaussian log_prior(0, 2);
2310 * ProposalGenerator::LogGaussian proposal_generator(
2311 * random_seed, 0.09); /* so that the acceptance ratio is ~0.24 */
2312 * Sampler::MetropolisHastings sampler(laplace_problem,
2313 * log_likelihood,
2314 * log_prior,
2315 * proposal_generator,
2316 * random_seed,
2317 * dataset_name);
2318 *
2319 * Vector<double> starting_coefficients(64);
2320 * for (auto &el : starting_coefficients)
2321 * el = 1.;
2322 * sampler.sample(starting_coefficients,
2323 * (testing ? 250 * 40 /* takes 10 seconds */
2324 * :
2325 * 100000000 /* takes 1.5 days */
2326 * ));
2327 * }
2328 * @endcode
2329
2330
2331*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1064
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
Definition: vector.h:109
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
Point< 2 > first
Definition: grid_out.cc:4603
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1473
void write_vtu(std::ostream &out) const
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:439
void initialize(const SparseMatrix< somenumber > &matrix, const AdditionalData &parameters=AdditionalData())
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
real_type norm_sqr() const
Expression floor(const Expression &x)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition: advection.h:75
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Definition: matrix_tools.cc:81
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:190
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorType::value_type * end(VectorType &V)
std::string to_string(const number value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition: utilities.cc:482
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition: utilities.cc:473
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
void create_point_source_vector(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const Point< spacedim, double > &p, Vector< double > &rhs_vector)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:474
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation