deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-59.h
Go to the documentation of this file.
1true,
814 *   }
815 *  
816 *  
817 *  
818 * @endcode
819 *
820 * Since the Laplacian is symmetric, the `Tvmult()` (needed by the multigrid
821 * smoother interfaces) operation is simply forwarded to the `vmult()` case.
822 *
823
824 *
825 *
826 * @code
827 *   template <int dim, int fe_degree, typename number>
828 *   void LaplaceOperator<dim, fe_degree, number>::Tvmult(
830 *   const LinearAlgebra::distributed::Vector<number> &src) const
831 *   {
832 *   vmult(dst, src);
833 *   }
834 *  
835 *  
836 *  
837 * @endcode
838 *
839 * The cell operation is very similar to @ref step_37 "step-37". We do not use a
840 * coefficient here, though. The second difference is that we replaced the
841 * two steps of FEEvaluation::read_dof_values() followed by
842 * FEEvaluation::evaluate() by a single function call
843 * FEEvaluation::gather_evaluate() which internally calls the sequence of
844 * the two individual methods. Likewise, FEEvaluation::integrate_scatter()
845 * implements the sequence of FEEvaluation::integrate() followed by
846 * FEEvaluation::distribute_local_to_global(). In this case, these new
847 * functions merely save two lines of code. However, we use them for the
848 * analogy with FEFaceEvaluation where they are more important as
849 * explained below.
850 *
851
852 *
853 *
854 * @code
855 *   template <int dim, int fe_degree, typename number>
856 *   void LaplaceOperator<dim, fe_degree, number>::apply_cell(
857 *   const MatrixFree<dim, number> &data,
858 *   LinearAlgebra::distributed::Vector<number> &dst,
859 *   const LinearAlgebra::distributed::Vector<number> &src,
860 *   const std::pair<unsigned int, unsigned int> &cell_range) const
861 *   {
863 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
864 *   {
865 *   phi.reinit(cell);
866 *   phi.gather_evaluate(src, EvaluationFlags::gradients);
867 *   for (const unsigned int q : phi.quadrature_point_indices())
868 *   phi.submit_gradient(phi.get_gradient(q), q);
869 *   phi.integrate_scatter(EvaluationFlags::gradients, dst);
870 *   }
871 *   }
872 *  
873 *  
874 *  
875 * @endcode
876 *
877 * The face operation implements the terms of the interior penalty method in
878 * analogy to @ref step_39 "step-39", as explained in the introduction. We need two
879 * evaluator objects for this task, one for handling the solution that comes
880 * from the cell on one of the two sides of an interior face, and one for
881 * handling the solution from the other side. The evaluators for face
882 * integrals are called FEFaceEvaluation and take a boolean argument in the
883 * second slot of the constructor to indicate which of the two sides the
884 * evaluator should belong two. In FEFaceEvaluation and MatrixFree, we call
885 * one of the two sides the `interior` one and the other the `exterior`
886 * one. The name `exterior` refers to the fact that the evaluator from both
887 * sides will return the same normal vector. For the `interior` side, the
888 * normal vector points outwards, whereas it points inwards on the other
889 * side, and is opposed to the outer normal vector of that cell. Apart from
890 * the new class name, we again get a range of items to work with in
891 * analogy to what was discussed in @ref step_37 "step-37", but for the interior faces in
892 * this case. Note that the data structure of MatrixFree forms batches of
893 * faces that are analogous to the batches of cells for the cell
894 * integrals. All faces within a batch involve different cell numbers but
895 * have the face number within the reference cell, have the same refinement
896 * configuration (no refinement or the same subface), and the same
897 * orientation, to keep SIMD operations simple and efficient.
898 *
899
900 *
901 * Note that there is no implied meaning in interior versus exterior except
902 * the logic decision of the orientation of the normal, which is pretty
903 * random internally. One can in no way rely on a certain pattern of
904 * assigning interior versus exterior flags, as the decision is made for the
905 * sake of access regularity and uniformity in the MatrixFree setup
906 * routines. Since most sane DG methods are conservative, i.e., fluxes look
907 * the same from both sides of an interface, the mathematics are unaltered
908 * if the interior/exterior flags are switched and normal vectors get the
909 * opposite sign.
910 *
911
912 *
913 *
914 * @code
915 *   template <int dim, int fe_degree, typename number>
916 *   void LaplaceOperator<dim, fe_degree, number>::apply_face(
917 *   const MatrixFree<dim, number> &data,
920 *   const std::pair<unsigned int, unsigned int> &face_range) const
921 *   {
923 *   true);
925 *   false);
926 *   for (unsigned int face = face_range.first; face < face_range.second; ++face)
927 *   {
928 * @endcode
929 *
930 * On a given batch of faces, we first update the pointers to the
931 * current face and then access the vector. As mentioned above, we
932 * combine the vector access with the evaluation. In the case of face
933 * integrals, the data access into the vector can be reduced for the
934 * special case of an FE_DGQHermite basis as explained for the data
935 * exchange above: Since only @f$2(k+1)^{d-1}@f$ out of the @f$(k+1)^d@f$ cell
936 * degrees of freedom get multiplied by a non-zero value or derivative
937 * of a shape function, this structure can be utilized for the
938 * evaluation, significantly reducing the data access. The reduction
939 * of the data access is not only beneficial because it reduces the
940 * data in flight and thus helps caching, but also because the data
941 * access to faces is often more irregular than for cell integrals when
942 * gathering values from cells that are farther apart in the index
943 * list of cells.
944 *
945 * @code
946 *   phi_inner.reinit(face);
947 *   phi_inner.gather_evaluate(src,
950 *   phi_outer.reinit(face);
951 *   phi_outer.gather_evaluate(src,
954 *  
955 * @endcode
956 *
957 * The next two statements compute the penalty parameter for the
958 * interior penalty method. As explained in the introduction, we would
959 * like to have a scaling like @f$\frac{1}{h_\text{i}}@f$ of the length
960 * @f$h_\text{i}@f$ normal to the face. For a general non-Cartesian mesh,
961 * this length must be computed by the product of the inverse Jacobian
962 * times the normal vector in real coordinates. From this vector of
963 * `dim` components, we must finally pick the component that is
964 * oriented normal to the reference cell. In the geometry data stored
965 * in MatrixFree, a permutation of the components in the Jacobian is
966 * applied such that this latter direction is always the last
967 * component `dim-1` (this is beneficial because reference-cell
968 * derivative sorting can be made agnostic of the direction of the
969 * face). This means that we can simply access the last entry `dim-1`
970 * and must not look up the local face number in
971 * `data.get_face_info(face).interior_face_no` and
972 * `data.get_face_info(face).exterior_face_no`. Finally, we must also
973 * take the absolute value of these factors as the normal could point
974 * into either positive or negative direction.
975 *
976 * @code
977 *   const VectorizedArray<number> inverse_length_normal_to_face =
978 *   0.5 * (std::abs((phi_inner.normal_vector(0) *
979 *   phi_inner.inverse_jacobian(0))[dim - 1]) +
980 *   std::abs((phi_outer.normal_vector(0) *
981 *   phi_outer.inverse_jacobian(0))[dim - 1]));
982 *   const VectorizedArray<number> sigma =
983 *   inverse_length_normal_to_face * get_penalty_factor();
984 *  
985 * @endcode
986 *
987 * In the loop over the quadrature points, we eventually compute all
988 * contributions to the interior penalty scheme. According to the
989 * formulas in the introduction, the value of the test function gets
990 * multiplied by the difference of the jump in the solution times the
991 * penalty parameter and the average of the normal derivative in real
992 * space. Since the two evaluators for interior and exterior sides get
993 * different signs due to the jump, we pass the result with a
994 * different sign here. The normal derivative of the test function
995 * gets multiplied by the negative jump in the solution between the
996 * interior and exterior side. This term, coined adjoint consistency
997 * term, must also include the factor of @f$\frac{1}{2}@f$ in the code in
998 * accordance with its relation to the primal consistency term that
999 * gets the factor of one half due to the average in the test function
1000 * slot.
1001 *
1002 * @code
1003 *   for (const unsigned int q : phi_inner.quadrature_point_indices())
1004 *   {
1005 *   const VectorizedArray<number> solution_jump =
1006 *   (phi_inner.get_value(q) - phi_outer.get_value(q));
1007 *   const VectorizedArray<number> average_normal_derivative =
1008 *   (phi_inner.get_normal_derivative(q) +
1009 *   phi_outer.get_normal_derivative(q)) *
1010 *   number(0.5);
1011 *   const VectorizedArray<number> test_by_value =
1012 *   solution_jump * sigma - average_normal_derivative;
1013 *  
1014 *   phi_inner.submit_value(test_by_value, q);
1015 *   phi_outer.submit_value(-test_by_value, q);
1016 *  
1017 *   phi_inner.submit_normal_derivative(-solution_jump * number(0.5), q);
1018 *   phi_outer.submit_normal_derivative(-solution_jump * number(0.5), q);
1019 *   }
1020 *  
1021 * @endcode
1022 *
1023 * Once we are done with the loop over quadrature points, we can do
1024 * the sum factorization operations for the integration loops on faces
1025 * and sum the results into the result vector, using the
1026 * `integrate_scatter` function. The name `scatter` reflects the
1027 * distribution of the vector data into scattered positions in the
1028 * vector using the same pattern as in `gather_evaluate`. Like before,
1029 * the combined integrate + write operation allows us to reduce the
1030 * data access.
1031 *
1032 * @code
1033 *   phi_inner.integrate_scatter(EvaluationFlags::values |
1035 *   dst);
1036 *   phi_outer.integrate_scatter(EvaluationFlags::values |
1038 *   dst);
1039 *   }
1040 *   }
1041 *  
1042 *  
1043 *  
1044 * @endcode
1045 *
1046 * The boundary face function follows by and large the interior face
1047 * function. The only difference is the fact that we do not have a separate
1048 * FEFaceEvaluation object that provides us with exterior values @f$u^+@f$, but
1049 * we must define them from the boundary conditions and interior values
1050 * @f$u^-@f$. As explained in the introduction, we use @f$u^+ = -u^- + 2
1051 * g_\text{D}@f$ and @f$\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla
1052 * u^-@f$ on Dirichlet boundaries and @f$u^+=u^-@f$ and @f$\mathbf{n}^-\cdot \nabla
1053 * u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}@f$ on Neumann
1054 * boundaries. Since this operation implements the homogeneous part, i.e.,
1055 * the matrix-vector product, we must neglect the boundary functions
1056 * @f$g_\text{D}@f$ and @f$g_\text{N}@f$ here, and added them to the right hand side
1057 * in `LaplaceProblem::compute_rhs()`. Note that due to extension of the
1058 * solution @f$u^-@f$ to the exterior via @f$u^+@f$, we can keep all factors @f$0.5@f$
1059 * the same as in the inner face function, see also the discussion in
1060 * @ref step_39 "step-39".
1061 *
1062
1063 *
1064 * There is one catch at this point: The implementation below uses a boolean
1065 * variable `is_dirichlet` to switch between the Dirichlet and the Neumann
1066 * cases. However, we solve a problem where we also want to impose periodic
1067 * boundary conditions on some boundaries, namely along those in the @f$x@f$
1068 * direction. One might wonder how those conditions should be handled
1069 * here. The answer is that MatrixFree automatically treats periodic
1070 * boundaries as what they are technically, namely an inner face where the
1071 * solution values of two adjacent cells meet and must be treated by proper
1072 * numerical fluxes. Thus, all the faces on the periodic boundaries will
1073 * appear in the `apply_face()` function and not in this one.
1074 *
1075
1076 *
1077 *
1078 * @code
1079 *   template <int dim, int fe_degree, typename number>
1080 *   void LaplaceOperator<dim, fe_degree, number>::apply_boundary(
1081 *   const MatrixFree<dim, number> &data,
1084 *   const std::pair<unsigned int, unsigned int> &face_range) const
1085 *   {
1087 *   true);
1088 *   for (unsigned int face = face_range.first; face < face_range.second; ++face)
1089 *   {
1090 *   phi_inner.reinit(face);
1091 *   phi_inner.gather_evaluate(src,
1094 *  
1095 *   const VectorizedArray<number> inverse_length_normal_to_face = std::abs((
1096 *   phi_inner.normal_vector(0) * phi_inner.inverse_jacobian(0))[dim - 1]);
1097 *   const VectorizedArray<number> sigma =
1098 *   inverse_length_normal_to_face * get_penalty_factor();
1099 *  
1100 *   const bool is_dirichlet = (data.get_boundary_id(face) == 0);
1101 *  
1102 *   for (const unsigned int q : phi_inner.quadrature_point_indices())
1103 *   {
1104 *   const VectorizedArray<number> u_inner = phi_inner.get_value(q);
1105 *   const VectorizedArray<number> u_outer =
1106 *   is_dirichlet ? -u_inner : u_inner;
1107 *   const VectorizedArray<number> normal_derivative_inner =
1108 *   phi_inner.get_normal_derivative(q);
1109 *   const VectorizedArray<number> normal_derivative_outer =
1110 *   is_dirichlet ? normal_derivative_inner : -normal_derivative_inner;
1111 *   const VectorizedArray<number> solution_jump = (u_inner - u_outer);
1112 *   const VectorizedArray<number> average_normal_derivative =
1113 *   (normal_derivative_inner + normal_derivative_outer) * number(0.5);
1114 *   const VectorizedArray<number> test_by_value =
1115 *   solution_jump * sigma - average_normal_derivative;
1116 *   phi_inner.submit_normal_derivative(-solution_jump * number(0.5), q);
1117 *   phi_inner.submit_value(test_by_value, q);
1118 *   }
1119 *   phi_inner.integrate_scatter(EvaluationFlags::values |
1121 *   dst);
1122 *   }
1123 *   }
1124 *  
1125 *  
1126 *  
1127 * @endcode
1128 *
1129 * Next we turn to the preconditioner initialization. As explained in the
1130 * introduction, we want to construct an (approximate) inverse of the cell
1131 * matrices from a product of 1d mass and Laplace matrices. Our first task
1132 * is to compute the 1d matrices, which we do by first creating a 1d finite
1133 * element. Instead of anticipating FE_DGQHermite<1> here, we get the finite
1134 * element's name from DoFHandler, replace the @p dim argument (2 or 3) by 1
1135 * to create a 1d name, and construct the 1d element by using FETools.
1136 *
1137
1138 *
1139 *
1140 * @code
1141 *   template <int dim, int fe_degree, typename number>
1142 *   void PreconditionBlockJacobi<dim, fe_degree, number>::initialize(
1143 *   const LaplaceOperator<dim, fe_degree, number> &op)
1144 *   {
1145 *   data = op.get_matrix_free();
1146 *  
1147 *   std::string name = data->get_dof_handler().get_fe().get_name();
1148 *   name.replace(name.find('<') + 1, 1, "1");
1149 *   std::unique_ptr<FiniteElement<1>> fe_1d = FETools::get_fe_by_name<1>(name);
1150 *  
1151 * @endcode
1152 *
1153 * As for computing the 1d matrices on the unit element, we simply write
1154 * down what a typical assembly procedure over rows and columns of the
1155 * matrix as well as the quadrature points would do. We select the same
1156 * Laplace matrices once and for all using the coefficients 0.5 for
1157 * interior faces (but possibly scaled differently in different directions
1158 * as a result of the mesh). Thus, we make a slight mistake at the
1159 * Dirichlet boundary (where the correct factor would be 1 for the
1160 * derivative terms and 2 for the penalty term, see @ref step_39 "step-39") or at the
1161 * Neumann boundary where the factor should be zero. Since we only use
1162 * this class as a smoother inside a multigrid scheme, this error is not
1163 * going to have any significant effect and merely affects smoothing
1164 * quality.
1165 *
1166 * @code
1167 *   const unsigned int N = fe_degree + 1;
1168 *   FullMatrix<double> laplace_unscaled(N, N);
1169 *   std::array<Table<2, VectorizedArray<number>>, dim> mass_matrices;
1170 *   std::array<Table<2, VectorizedArray<number>>, dim> laplace_matrices;
1171 *   for (unsigned int d = 0; d < dim; ++d)
1172 *   {
1173 *   mass_matrices[d].reinit(N, N);
1174 *   laplace_matrices[d].reinit(N, N);
1175 *   }
1176 *  
1177 *   const QGauss<1> quadrature(N);
1178 *   for (unsigned int i = 0; i < N; ++i)
1179 *   for (unsigned int j = 0; j < N; ++j)
1180 *   {
1181 *   double sum_mass = 0, sum_laplace = 0;
1182 *   for (unsigned int q = 0; q < quadrature.size(); ++q)
1183 *   {
1184 *   sum_mass += (fe_1d->shape_value(i, quadrature.point(q)) *
1185 *   fe_1d->shape_value(j, quadrature.point(q))) *
1186 *   quadrature.weight(q);
1187 *   sum_laplace += (fe_1d->shape_grad(i, quadrature.point(q))[0] *
1188 *   fe_1d->shape_grad(j, quadrature.point(q))[0]) *
1189 *   quadrature.weight(q);
1190 *   }
1191 *   for (unsigned int d = 0; d < dim; ++d)
1192 *   mass_matrices[d](i, j) = sum_mass;
1193 *  
1194 * @endcode
1195 *
1196 * The left and right boundary terms assembled by the next two
1197 * statements appear to have somewhat arbitrary signs, but those are
1198 * correct as can be verified by looking at @ref step_39 "step-39" and inserting
1199 * the value -1 and 1 for the normal vector in the 1d case.
1200 *
1201 * @code
1202 *   sum_laplace +=
1203 *   (1. * fe_1d->shape_value(i, Point<1>()) *
1204 *   fe_1d->shape_value(j, Point<1>()) * op.get_penalty_factor() +
1205 *   0.5 * fe_1d->shape_grad(i, Point<1>())[0] *
1206 *   fe_1d->shape_value(j, Point<1>()) +
1207 *   0.5 * fe_1d->shape_grad(j, Point<1>())[0] *
1208 *   fe_1d->shape_value(i, Point<1>()));
1209 *  
1210 *   sum_laplace +=
1211 *   (1. * fe_1d->shape_value(i, Point<1>(1.0)) *
1212 *   fe_1d->shape_value(j, Point<1>(1.0)) * op.get_penalty_factor() -
1213 *   0.5 * fe_1d->shape_grad(i, Point<1>(1.0))[0] *
1214 *   fe_1d->shape_value(j, Point<1>(1.0)) -
1215 *   0.5 * fe_1d->shape_grad(j, Point<1>(1.0))[0] *
1216 *   fe_1d->shape_value(i, Point<1>(1.0)));
1217 *  
1218 *   laplace_unscaled(i, j) = sum_laplace;
1219 *   }
1220 *  
1221 * @endcode
1222 *
1223 * Next, we go through the cells and pass the scaled matrices to
1224 * TensorProductMatrixSymmetricSum to actually compute the generalized
1225 * eigenvalue problem for representing the inverse: Since the matrix
1226 * approximation is constructed as @f$A\otimes M + M\otimes A@f$ and the
1227 * weights are constant for each element, we can apply all weights on the
1228 * Laplace matrix and simply keep the mass matrices unscaled. In the loop
1229 * over cells, we want to make use of the geometry compression provided by
1230 * the MatrixFree class and check if the current geometry is the same as
1231 * on the last cell batch, in which case there is nothing to do. This
1232 * compression can be accessed by
1233 * FEEvaluation::get_mapping_data_index_offset() once `reinit()` has been
1234 * called.
1235 *
1236
1237 *
1238 * Once we have accessed the inverse Jacobian through the FEEvaluation
1239 * access function (we take the one for the zeroth quadrature point as
1240 * they should be the same on all quadrature points for a Cartesian cell),
1241 * we check that it is diagonal and then extract the determinant of the
1242 * original Jacobian, i.e., the inverse of the determinant of the inverse
1243 * Jacobian, and set the weight as @f$\text{det}(J) / h_d^2@f$ according to
1244 * the 1d Laplacian times @f$d-1@f$ copies of the @ref GlossMassMatrix "mass matrix".
1245 *
1246 * @code
1247 *   cell_matrices.clear();
1248 *   FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(*data);
1249 *   unsigned int old_mapping_data_index = numbers::invalid_unsigned_int;
1250 *   for (unsigned int cell = 0; cell < data->n_cell_batches(); ++cell)
1251 *   {
1252 *   phi.reinit(cell);
1253 *  
1254 *   if (phi.get_mapping_data_index_offset() == old_mapping_data_index)
1255 *   continue;
1256 *  
1257 *   Tensor<2, dim, VectorizedArray<number>> inverse_jacobian =
1258 *   phi.inverse_jacobian(0);
1259 *  
1260 *   for (unsigned int d = 0; d < dim; ++d)
1261 *   for (unsigned int e = 0; e < dim; ++e)
1262 *   if (d != e)
1263 *   for (unsigned int v = 0; v < VectorizedArray<number>::size(); ++v)
1264 *   AssertThrow(inverse_jacobian[d][e][v] == 0.,
1265 *   ExcNotImplemented());
1266 *  
1267 *   VectorizedArray<number> jacobian_determinant = inverse_jacobian[0][0];
1268 *   for (unsigned int e = 1; e < dim; ++e)
1269 *   jacobian_determinant *= inverse_jacobian[e][e];
1270 *   jacobian_determinant = 1. / jacobian_determinant;
1271 *  
1272 *   for (unsigned int d = 0; d < dim; ++d)
1273 *   {
1274 *   const VectorizedArray<number> scaling_factor =
1275 *   inverse_jacobian[d][d] * inverse_jacobian[d][d] *
1276 *   jacobian_determinant;
1277 *  
1278 * @endcode
1279 *
1280 * Once we know the factor by which we should scale the Laplace
1281 * matrix, we apply this weight to the unscaled DG Laplace matrix
1282 * and send the array to the class TensorProductMatrixSymmetricSum
1283 * for computing the generalized eigenvalue problem mentioned in
1284 * the introduction.
1285 *
1286
1287 *
1288 *
1289 * @code
1290 *   for (unsigned int i = 0; i < N; ++i)
1291 *   for (unsigned int j = 0; j < N; ++j)
1292 *   laplace_matrices[d](i, j) =
1293 *   scaling_factor * laplace_unscaled(i, j);
1294 *   }
1295 *   if (cell_matrices.size() <= phi.get_mapping_data_index_offset())
1296 *   cell_matrices.resize(phi.get_mapping_data_index_offset() + 1);
1297 *   cell_matrices[phi.get_mapping_data_index_offset()].reinit(
1298 *   mass_matrices, laplace_matrices);
1299 *   }
1300 *   }
1301 *  
1302 *  
1303 *  
1304 * @endcode
1305 *
1306 * The vmult function for the approximate block-Jacobi preconditioner is
1307 * very simple in the DG context: We simply need to read the values of the
1308 * current cell batch, apply the inverse for the given entry in the array of
1309 * tensor product matrix, and write the result back. In this loop, we
1310 * overwrite the content in `dst` rather than first setting the entries to
1311 * zero. This is legitimate for a DG method because every cell has
1312 * independent degrees of freedom. Furthermore, we manually write out the
1313 * loop over all cell batches, rather than going through
1314 * MatrixFree::cell_loop(). We do this because we know that we are not going
1315 * to need data exchange over the MPI network here as all computations are
1316 * done on the cells held locally on each processor.
1317 *
1318
1319 *
1320 *
1321 * @code
1322 *   template <int dim, int fe_degree, typename number>
1323 *   void PreconditionBlockJacobi<dim, fe_degree, number>::vmult(
1324 *   LinearAlgebra::distributed::Vector<number> &dst,
1325 *   const LinearAlgebra::distributed::Vector<number> &src) const
1326 *   {
1327 *   adjust_ghost_range_if_necessary(*data, dst);
1328 *   adjust_ghost_range_if_necessary(*data, src);
1329 *  
1330 *   FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(*data);
1331 *   for (unsigned int cell = 0; cell < data->n_cell_batches(); ++cell)
1332 *   {
1333 *   phi.reinit(cell);
1334 *   phi.read_dof_values(src);
1335 *   cell_matrices[phi.get_mapping_data_index_offset()].apply_inverse(
1336 *   ArrayView<VectorizedArray<number>>(phi.begin_dof_values(),
1337 *   phi.dofs_per_cell),
1338 *   ArrayView<const VectorizedArray<number>>(phi.begin_dof_values(),
1339 *   phi.dofs_per_cell));
1340 *   phi.set_dof_values(dst);
1341 *   }
1342 *   }
1343 *  
1344 *  
1345 *  
1346 * @endcode
1347 *
1348 * The definition of the LaplaceProblem class is very similar to
1349 * @ref step_37 "step-37". One difference is the fact that we add the element degree as a
1350 * template argument to the class, which would allow us to more easily
1351 * include more than one degree in the same program by creating different
1352 * instances in the `main()` function. The second difference is the
1353 * selection of the element, FE_DGQHermite, which is specialized for this
1354 * kind of equations.
1355 *
1356
1357 *
1358 *
1359 * @code
1360 *   template <int dim, int fe_degree>
1361 *   class LaplaceProblem
1362 *   {
1363 *   public:
1364 *   LaplaceProblem();
1365 *   void run();
1366 *  
1367 *   private:
1368 *   void setup_system();
1369 *   void compute_rhs();
1370 *   void solve();
1371 *   void analyze_results() const;
1372 *  
1373 *   #ifdef DEAL_II_WITH_P4EST
1374 *   parallel::distributed::Triangulation<dim> triangulation;
1375 *   #else
1376 *   Triangulation<dim> triangulation;
1377 *   #endif
1378 *  
1379 *   const FE_DGQHermite<dim> fe;
1380 *   DoFHandler<dim> dof_handler;
1381 *  
1382 *   const MappingQ1<dim> mapping;
1383 *  
1384 *   using SystemMatrixType = LaplaceOperator<dim, fe_degree, double>;
1385 *   SystemMatrixType system_matrix;
1386 *  
1387 *   using LevelMatrixType = LaplaceOperator<dim, fe_degree, float>;
1388 *   MGLevelObject<LevelMatrixType> mg_matrices;
1389 *  
1390 *   LinearAlgebra::distributed::Vector<double> solution;
1391 *   LinearAlgebra::distributed::Vector<double> system_rhs;
1392 *  
1393 *   double setup_time;
1394 *   ConditionalOStream pcout;
1395 *   ConditionalOStream time_details;
1396 *   };
1397 *  
1398 *  
1399 *  
1400 *   template <int dim, int fe_degree>
1401 *   LaplaceProblem<dim, fe_degree>::LaplaceProblem()
1402 *   #ifdef DEAL_II_WITH_P4EST
1403 *   : triangulation(MPI_COMM_WORLD,
1404 *   Triangulation<dim>::limit_level_difference_at_vertices,
1405 *   parallel::distributed::Triangulation<
1406 *   dim>::construct_multigrid_hierarchy)
1407 *   #else
1408 *   : triangulation(Triangulation<dim>::limit_level_difference_at_vertices)
1409 *   #endif
1410 *   , fe(fe_degree)
1411 *   , dof_handler(triangulation)
1412 *   , setup_time(0.)
1413 *   , pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
1414 *   , time_details(std::cout,
1415 *   false &&
1416 *   Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
1417 *   {}
1418 *  
1419 *  
1420 *  
1421 * @endcode
1422 *
1423 * The setup function differs in two aspects from @ref step_37 "step-37". The first is that
1424 * we do not need to interpolate any constraints for the discontinuous
1425 * ansatz space, and simply pass a dummy AffineConstraints object into
1426 * Matrixfree::reinit(). The second change arises because we need to tell
1427 * MatrixFree to also initialize the data structures for faces. We do this
1428 * by setting update flags for the inner and boundary faces,
1429 * respectively. On the boundary faces, we need both the function values,
1430 * their gradients, JxW values (for integration), the normal vectors, and
1431 * quadrature points (for the evaluation of the boundary conditions),
1432 * whereas we only need shape function values, gradients, JxW values, and
1433 * normal vectors for interior faces. The face data structures in MatrixFree
1434 * are always built as soon as one of `mapping_update_flags_inner_faces` or
1435 * `mapping_update_flags_boundary_faces` are different from the default
1436 * value `update_default` of UpdateFlags.
1437 *
1438
1439 *
1440 *
1441 * @code
1442 *   template <int dim, int fe_degree>
1443 *   void LaplaceProblem<dim, fe_degree>::setup_system()
1444 *   {
1445 *   Timer time;
1446 *   setup_time = 0;
1447 *  
1448 *   system_matrix.clear();
1449 *   mg_matrices.clear_elements();
1450 *  
1451 *   dof_handler.distribute_dofs(fe);
1452 *   dof_handler.distribute_mg_dofs();
1453 *  
1454 *   pcout << "Number of degrees of freedom: " << dof_handler.n_dofs()
1455 *   << std::endl;
1456 *  
1457 *   setup_time += time.wall_time();
1458 *   time_details << "Distribute DoFs " << time.wall_time() << " s"
1459 *   << std::endl;
1460 *   time.restart();
1461 *  
1462 *   AffineConstraints<double> dummy;
1463 *   dummy.close();
1464 *  
1465 *   {
1466 *   typename MatrixFree<dim, double>::AdditionalData additional_data;
1467 *   additional_data.tasks_parallel_scheme =
1468 *   MatrixFree<dim, double>::AdditionalData::none;
1469 *   additional_data.mapping_update_flags =
1470 *   (update_gradients | update_JxW_values | update_quadrature_points);
1471 *   additional_data.mapping_update_flags_inner_faces =
1472 *   (update_gradients | update_JxW_values | update_normal_vectors);
1473 *   additional_data.mapping_update_flags_boundary_faces =
1474 *   (update_gradients | update_JxW_values | update_normal_vectors |
1475 *   update_quadrature_points);
1476 *   const auto system_mf_storage =
1477 *   std::make_shared<MatrixFree<dim, double>>();
1478 *   system_mf_storage->reinit(
1479 *   mapping, dof_handler, dummy, QGauss<1>(fe.degree + 1), additional_data);
1480 *   system_matrix.initialize(system_mf_storage);
1481 *   }
1482 *  
1483 *   system_matrix.initialize_dof_vector(solution);
1484 *   system_matrix.initialize_dof_vector(system_rhs);
1485 *  
1486 *   setup_time += time.wall_time();
1487 *   time_details << "Setup matrix-free system " << time.wall_time() << " s"
1488 *   << std::endl;
1489 *   time.restart();
1490 *  
1491 *   const unsigned int nlevels = triangulation.n_global_levels();
1492 *   mg_matrices.resize(0, nlevels - 1);
1493 *  
1494 *   for (unsigned int level = 0; level < nlevels; ++level)
1495 *   {
1496 *   typename MatrixFree<dim, float>::AdditionalData additional_data;
1497 *   additional_data.tasks_parallel_scheme =
1498 *   MatrixFree<dim, float>::AdditionalData::none;
1499 *   additional_data.mapping_update_flags =
1500 *   (update_gradients | update_JxW_values);
1501 *   additional_data.mapping_update_flags_inner_faces =
1502 *   (update_gradients | update_JxW_values);
1503 *   additional_data.mapping_update_flags_boundary_faces =
1504 *   (update_gradients | update_JxW_values);
1505 *   additional_data.mg_level = level;
1506 *   const auto mg_mf_storage_level =
1507 *   std::make_shared<MatrixFree<dim, float>>();
1508 *   mg_mf_storage_level->reinit(mapping,
1509 *   dof_handler,
1510 *   dummy,
1511 *   QGauss<1>(fe.degree + 1),
1512 *   additional_data);
1513 *  
1514 *   mg_matrices[level].initialize(mg_mf_storage_level);
1515 *   }
1516 *   setup_time += time.wall_time();
1517 *   time_details << "Setup matrix-free levels " << time.wall_time() << " s"
1518 *   << std::endl;
1519 *   }
1520 *  
1521 *  
1522 *  
1523 * @endcode
1524 *
1525 * The computation of the right hand side is a bit more complicated than in
1526 * @ref step_37 "step-37". The cell term now consists of the negative Laplacian of the
1527 * analytical solution, `RightHandSide`, for which we need to first split up
1528 * the Point of VectorizedArray fields, i.e., a batch of points, into a
1529 * single point by evaluating all lanes in the VectorizedArray
1530 * separately. Remember that the number of lanes depends on the hardware; it
1531 * could be 1 for systems that do not offer vectorization (or where deal.II
1532 * does not have intrinsics), but it could also be 8 or 16 on AVX-512 of
1533 * recent Intel architectures.
1534 *
1535 * @code
1536 *   template <int dim, int fe_degree>
1537 *   void LaplaceProblem<dim, fe_degree>::compute_rhs()
1538 *   {
1539 *   Timer time;
1540 *   system_rhs = 0;
1541 *   const MatrixFree<dim, double> &data = *system_matrix.get_matrix_free();
1542 *   FEEvaluation<dim, fe_degree> phi(data);
1543 *   RightHandSide<dim> rhs_func;
1544 *   Solution<dim> exact_solution;
1545 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
1546 *   {
1547 *   phi.reinit(cell);
1548 *   for (const unsigned int q : phi.quadrature_point_indices())
1549 *   {
1550 *   VectorizedArray<double> rhs_val = VectorizedArray<double>();
1551 *   Point<dim, VectorizedArray<double>> point_batch =
1552 *   phi.quadrature_point(q);
1553 *   for (unsigned int v = 0; v < VectorizedArray<double>::size(); ++v)
1554 *   {
1555 *   Point<dim> single_point;
1556 *   for (unsigned int d = 0; d < dim; ++d)
1557 *   single_point[d] = point_batch[d][v];
1558 *   rhs_val[v] = rhs_func.value(single_point);
1559 *   }
1560 *   phi.submit_value(rhs_val, q);
1561 *   }
1562 *   phi.integrate_scatter(EvaluationFlags::values, system_rhs);
1563 *   }
1564 *  
1565 * @endcode
1566 *
1567 * Secondly, we also need to apply the Dirichlet and Neumann boundary
1568 * conditions. This function is the missing part of to the function
1569 * `LaplaceOperator::apply_boundary()` function once the exterior solution
1570 * values @f$u^+ = -u^- + 2 g_\text{D}@f$ and @f$\mathbf{n}^-\cdot \nabla u^+ =
1571 * \mathbf{n}^-\cdot \nabla u^-@f$ on Dirichlet boundaries and @f$u^+=u^-@f$ and
1572 * @f$\mathbf{n}^-\cdot \nabla u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2
1573 * g_\text{N}@f$ on Neumann boundaries are inserted and expanded in terms of
1574 * the boundary functions @f$g_\text{D}@f$ and @f$g_\text{N}@f$. One thing to
1575 * remember is that we move the boundary conditions to the right hand
1576 * side, so the sign is the opposite from what we imposed on the solution
1577 * part.
1578 *
1579
1580 *
1581 * We could have issued both the cell and the boundary part through a
1582 * MatrixFree::loop part, but we choose to manually write the full loop
1583 * over all faces to learn how the index layout of face indices is set up
1584 * in MatrixFree: Both the inner faces and the boundary faces share the
1585 * index range, and all batches of inner faces have lower numbers than the
1586 * batches of boundary cells. A single index for both variants allows us
1587 * to easily use the same data structure FEFaceEvaluation for both cases
1588 * that attaches to the same data field, just at different positions. The
1589 * number of inner face batches (where a batch is due to the combination
1590 * of several faces into one for vectorization) is given by
1591 * MatrixFree::n_inner_face_batches(), whereas the number of boundary face
1592 * batches is given by MatrixFree::n_boundary_face_batches().
1593 *
1594 * @code
1595 *   FEFaceEvaluation<dim, fe_degree> phi_face(data, true);
1596 *   for (unsigned int face = data.n_inner_face_batches();
1597 *   face < data.n_inner_face_batches() + data.n_boundary_face_batches();
1598 *   ++face)
1599 *   {
1600 *   phi_face.reinit(face);
1601 *  
1602 *   const VectorizedArray<double> inverse_length_normal_to_face = std::abs(
1603 *   (phi_face.normal_vector(0) * phi_face.inverse_jacobian(0))[dim - 1]);
1604 *   const VectorizedArray<double> sigma =
1605 *   inverse_length_normal_to_face * system_matrix.get_penalty_factor();
1606 *  
1607 *   for (const unsigned int q : phi_face.quadrature_point_indices())
1608 *   {
1609 *   VectorizedArray<double> test_value = VectorizedArray<double>(),
1610 *   test_normal_derivative =
1611 *   VectorizedArray<double>();
1612 *   Point<dim, VectorizedArray<double>> point_batch =
1613 *   phi_face.quadrature_point(q);
1614 *  
1615 *   for (unsigned int v = 0; v < VectorizedArray<double>::size(); ++v)
1616 *   {
1617 *   Point<dim> single_point;
1618 *   for (unsigned int d = 0; d < dim; ++d)
1619 *   single_point[d] = point_batch[d][v];
1620 *  
1621 * @endcode
1622 *
1623 * The MatrixFree class lets us query the boundary_id of the
1624 * current face batch. Remember that MatrixFree sets up the
1625 * batches for vectorization such that all faces within a
1626 * batch have the same properties, which includes their
1627 * `boundary_id`. Thus, we can query that id here for the
1628 * current face index `face` and either impose the Dirichlet
1629 * case (where we add something to the function value) or the
1630 * Neumann case (where we add something to the normal
1631 * derivative).
1632 *
1633 * @code
1634 *   if (data.get_boundary_id(face) == 0)
1635 *   test_value[v] = 2.0 * exact_solution.value(single_point);
1636 *   else
1637 *   {
1638 *   Tensor<1, dim> normal;
1639 *   for (unsigned int d = 0; d < dim; ++d)
1640 *   normal[d] = phi_face.normal_vector(q)[d][v];
1641 *   test_normal_derivative[v] =
1642 *   -normal * exact_solution.gradient(single_point);
1643 *   }
1644 *   }
1645 *   phi_face.submit_value(test_value * sigma - test_normal_derivative,
1646 *   q);
1647 *   phi_face.submit_normal_derivative(-0.5 * test_value, q);
1648 *   }
1649 *   phi_face.integrate_scatter(EvaluationFlags::values |
1650 *   EvaluationFlags::gradients,
1651 *   system_rhs);
1652 *   }
1653 *  
1654 * @endcode
1655 *
1656 * Since we have manually run the loop over cells rather than using
1657 * MatrixFree::loop(), we must not forget to perform the data exchange
1658 * with MPI - or actually, we would not need that for DG elements here
1659 * because each cell carries its own degrees of freedom and cell and
1660 * boundary integrals only evaluate quantities on the locally owned
1661 * cells. The coupling to neighboring subdomain only comes in by the inner
1662 * face integrals, which we have not done here. That said, it does not
1663 * hurt to call this function here, so we do it as a reminder of what
1664 * happens inside MatrixFree::loop().
1665 *
1666 * @code
1667 *   system_rhs.compress(VectorOperation::add);
1668 *   setup_time += time.wall_time();
1669 *   time_details << "Compute right hand side " << time.wall_time()
1670 *   << " s\n";
1671 *   }
1672 *  
1673 *  
1674 *  
1675 * @endcode
1676 *
1677 * The `solve()` function is copied almost verbatim from @ref step_37 "step-37". We set up
1678 * the same multigrid ingredients, namely the level transfer, a smoother,
1679 * and a coarse grid solver. The only difference is the fact that we do not
1680 * use the diagonal of the Laplacian for the preconditioner of the Chebyshev
1681 * iteration used for smoothing, but instead our newly resolved class
1682 * `%PreconditionBlockJacobi`. The mechanisms are the same, though.
1683 *
1684 * @code
1685 *   template <int dim, int fe_degree>
1686 *   void LaplaceProblem<dim, fe_degree>::solve()
1687 *   {
1688 *   Timer time;
1689 *   MGTransferMatrixFree<dim, float> mg_transfer;
1690 *   mg_transfer.build(dof_handler);
1691 *   setup_time += time.wall_time();
1692 *   time_details << "MG build transfer time " << time.wall_time()
1693 *   << " s\n";
1694 *   time.restart();
1695 *  
1696 *   using SmootherType =
1697 *   PreconditionChebyshev<LevelMatrixType,
1698 *   LinearAlgebra::distributed::Vector<float>,
1699 *   PreconditionBlockJacobi<dim, fe_degree, float>>;
1700 *   mg::SmootherRelaxation<SmootherType,
1701 *   LinearAlgebra::distributed::Vector<float>>
1702 *   mg_smoother;
1703 *   MGLevelObject<typename SmootherType::AdditionalData> smoother_data;
1704 *   smoother_data.resize(0, triangulation.n_global_levels() - 1);
1705 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
1706 *   ++level)
1707 *   {
1708 *   if (level > 0)
1709 *   {
1710 *   smoother_data[level].smoothing_range = 15.;
1711 *   smoother_data[level].degree = 3;
1712 *   smoother_data[level].eig_cg_n_iterations = 10;
1713 *   }
1714 *   else
1715 *   {
1716 *   smoother_data[0].smoothing_range = 2e-2;
1717 *   smoother_data[0].degree = numbers::invalid_unsigned_int;
1718 *   smoother_data[0].eig_cg_n_iterations = mg_matrices[0].m();
1719 *   }
1720 *   smoother_data[level].preconditioner =
1721 *   std::make_shared<PreconditionBlockJacobi<dim, fe_degree, float>>();
1722 *   smoother_data[level].preconditioner->initialize(mg_matrices[level]);
1723 *   }
1724 *   mg_smoother.initialize(mg_matrices, smoother_data);
1725 *  
1726 *   MGCoarseGridApplySmoother<LinearAlgebra::distributed::Vector<float>>
1727 *   mg_coarse;
1728 *   mg_coarse.initialize(mg_smoother);
1729 *  
1730 *   mg::Matrix<LinearAlgebra::distributed::Vector<float>> mg_matrix(
1731 *   mg_matrices);
1732 *  
1733 *   Multigrid<LinearAlgebra::distributed::Vector<float>> mg(
1734 *   mg_matrix, mg_coarse, mg_transfer, mg_smoother, mg_smoother);
1735 *  
1736 *   PreconditionMG<dim,
1737 *   LinearAlgebra::distributed::Vector<float>,
1738 *   MGTransferMatrixFree<dim, float>>
1739 *   preconditioner(dof_handler, mg, mg_transfer);
1740 *  
1741 *   SolverControl solver_control(10000, 1e-12 * system_rhs.l2_norm());
1742 *   SolverCG<LinearAlgebra::distributed::Vector<double>> cg(solver_control);
1743 *   setup_time += time.wall_time();
1744 *   time_details << "MG build smoother time " << time.wall_time()
1745 *   << "s\n";
1746 *   pcout << "Total setup time " << setup_time << " s\n";
1747 *  
1748 *   time.reset();
1749 *   time.start();
1750 *   cg.solve(system_matrix, solution, system_rhs, preconditioner);
1751 *  
1752 *   pcout << "Time solve (" << solver_control.last_step() << " iterations) "
1753 *   << time.wall_time() << " s" << std::endl;
1754 *   }
1755 *  
1756 *  
1757 *  
1758 * @endcode
1759 *
1760 * Since we have solved a problem with analytic solution, we want to verify
1761 * the correctness of our implementation by computing the L2 error of the
1762 * numerical result against the analytic solution.
1763 *
1764
1765 *
1766 *
1767 * @code
1768 *   template <int dim, int fe_degree>
1769 *   void LaplaceProblem<dim, fe_degree>::analyze_results() const
1770 *   {
1771 *   Vector<float> error_per_cell(triangulation.n_active_cells());
1772 *   VectorTools::integrate_difference(mapping,
1773 *   dof_handler,
1774 *   solution,
1775 *   Solution<dim>(),
1776 *   error_per_cell,
1777 *   QGauss<dim>(fe.degree + 2),
1778 *   VectorTools::L2_norm);
1779 *   pcout << "Verification via L2 error: "
1780 *   << std::sqrt(
1781 *   Utilities::MPI::sum(error_per_cell.norm_sqr(), MPI_COMM_WORLD))
1782 *   << std::endl;
1783 *   }
1784 *  
1785 *  
1786 *  
1787 * @endcode
1788 *
1789 * The `run()` function sets up the initial grid and then runs the multigrid
1790 * program in the usual way. As a domain, we choose a rectangle with
1791 * periodic boundary conditions in the @f$x@f$-direction, a Dirichlet condition
1792 * on the front face in @f$y@f$ direction (i.e., the face with index number 2,
1793 * with boundary id equal to 0), and Neumann conditions on the back face as
1794 * well as the two faces in @f$z@f$ direction for the 3d case (with boundary id
1795 * equal to 1). The extent of the domain is a bit different in the @f$x@f$
1796 * direction (where we want to achieve a periodic solution given the
1797 * definition of `Solution`) as compared to the @f$y@f$ and @f$z@f$ directions.
1798 *
1799
1800 *
1801 *
1802 * @code
1803 *   template <int dim, int fe_degree>
1804 *   void LaplaceProblem<dim, fe_degree>::run()
1805 *   {
1806 *   const unsigned int n_ranks =
1807 *   Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
1808 *   pcout << "Running with " << n_ranks << " MPI process"
1809 *   << (n_ranks > 1 ? "es" : "") << ", element " << fe.get_name()
1810 *   << std::endl
1811 *   << std::endl;
1812 *   for (unsigned int cycle = 0; cycle < 9 - dim; ++cycle)
1813 *   {
1814 *   pcout << "Cycle " << cycle << std::endl;
1815 *  
1816 *   if (cycle == 0)
1817 *   {
1818 *   Point<dim> upper_right;
1819 *   upper_right[0] = 2.5;
1820 *   for (unsigned int d = 1; d < dim; ++d)
1821 *   upper_right[d] = 2.8;
1822 *   GridGenerator::hyper_rectangle(triangulation,
1823 *   Point<dim>(),
1824 *   upper_right);
1825 *   triangulation.begin_active()->face(0)->set_boundary_id(10);
1826 *   triangulation.begin_active()->face(1)->set_boundary_id(11);
1827 *   triangulation.begin_active()->face(2)->set_boundary_id(0);
1828 *   for (unsigned int f = 3;
1829 *   f < triangulation.begin_active()->n_faces();
1830 *   ++f)
1831 *   triangulation.begin_active()->face(f)->set_boundary_id(1);
1832 *  
1833 *   std::vector<GridTools::PeriodicFacePair<
1834 *   typename Triangulation<dim>::cell_iterator>>
1835 *   periodic_faces;
1836 *   GridTools::collect_periodic_faces(
1837 *   triangulation, 10, 11, 0, periodic_faces);
1838 *   triangulation.add_periodicity(periodic_faces);
1839 *  
1840 *   triangulation.refine_global(6 - 2 * dim);
1841 *   }
1842 *   triangulation.refine_global(1);
1843 *   setup_system();
1844 *   compute_rhs();
1845 *   solve();
1846 *   analyze_results();
1847 *   pcout << std::endl;
1848 *   };
1849 *   }
1850 *   } // namespace Step59
1851 *  
1852 *  
1853 *  
1854 * @endcode
1855 *
1856 * There is nothing unexpected in the `main()` function. We call `MPI_Init()`
1857 * through the `MPI_InitFinalize` class, pass on the two parameters on the
1858 * dimension and the degree set at the top of the file, and run the Laplace
1859 * problem.
1860 *
1861
1862 *
1863 *
1864 * @code
1865 *   int main(int argc, char *argv[])
1866 *   {
1867 *   try
1868 *   {
1869 *   using namespace Step59;
1870 *  
1871 *   Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
1872 *  
1873 *   LaplaceProblem<dimension, degree_finite_element> laplace_problem;
1874 *   laplace_problem.run();
1875 *   }
1876 *   catch (std::exception &exc)
1877 *   {
1878 *   std::cerr << std::endl
1879 *   << std::endl
1880 *   << "----------------------------------------------------"
1881 *   << std::endl;
1882 *   std::cerr << "Exception on processing: " << std::endl
1883 *   << exc.what() << std::endl
1884 *   << "Aborting!" << std::endl
1885 *   << "----------------------------------------------------"
1886 *   << std::endl;
1887 *   return 1;
1888 *   }
1889 *   catch (...)
1890 *   {
1891 *   std::cerr << std::endl
1892 *   << std::endl
1893 *   << "----------------------------------------------------"
1894 *   << std::endl;
1895 *   std::cerr << "Unknown exception!" << std::endl
1896 *   << "Aborting!" << std::endl
1897 *   << "----------------------------------------------------"
1898 *   << std::endl;
1899 *   return 1;
1900 *   }
1901 *  
1902 *   return 0;
1903 *   }
1904 * @endcode
1905<a name="step_59-Results"></a><h1>Results</h1>
1906
1907
1908<a name="step_59-Programoutput"></a><h3>Program output</h3>
1909
1910
1911Like in @ref step_37 "step-37", we evaluate the multigrid solver in terms of run time. In
1912two space dimensions with elements of degree 8, a possible output could look
1913as follows:
1914@code
1915Running with 12 MPI processes, element FE_DGQHermite<2>(8)
1916
1917Cycle 0
1918Number of degrees of freedom: 5184
1919Total setup time 0.0282445 s
1920Time solve (14 iterations) 0.0110712 s
1921Verification via L2 error: 1.66232e-07
1922
1923Cycle 1
1924Number of degrees of freedom: 20736
1925Total setup time 0.0126282 s
1926Time solve (14 iterations) 0.0157021 s
1927Verification via L2 error: 2.91505e-10
1928
1929Cycle 2
1930Number of degrees of freedom: 82944
1931Total setup time 0.0227573 s
1932Time solve (14 iterations) 0.026568 s
1933Verification via L2 error: 6.64514e-13
1934
1935Cycle 3
1936Number of degrees of freedom: 331776
1937Total setup time 0.0604685 s
1938Time solve (14 iterations) 0.0628356 s
1939Verification via L2 error: 5.57513e-13
1940
1941Cycle 4
1942Number of degrees of freedom: 1327104
1943Total setup time 0.154359 s
1944Time solve (13 iterations) 0.219555 s
1945Verification via L2 error: 3.08139e-12
1946
1947Cycle 5
1948Number of degrees of freedom: 5308416
1949Total setup time 0.467764 s
1950Time solve (13 iterations) 1.1821 s
1951Verification via L2 error: 3.90334e-12
1952
1953Cycle 6
1954Number of degrees of freedom: 21233664
1955Total setup time 1.73263 s
1956Time solve (13 iterations) 5.21054 s
1957Verification via L2 error: 4.94543e-12
1958@endcode
1959
1960Like in @ref step_37 "step-37", the number of CG iterations remains constant with increasing
1961problem size. The iteration counts are a bit higher, which is because we use a
1962lower degree of the Chebyshev polynomial (2 vs 5 in @ref step_37 "step-37") and because the
1963interior penalty discretization has a somewhat larger spread in
1964eigenvalues. Nonetheless, 13 iterations to reduce the residual by 12 orders of
1965magnitude, or almost a factor of 9 per iteration, indicates an overall very
1966efficient method. In particular, we can solve a system with 21 million degrees
1967of freedom in 5 seconds when using 12 cores, which is a very good
1968efficiency. Of course, in 2D we are well inside the regime of roundoff for a
1969polynomial degree of 8 &ndash; as a matter of fact, around 83k DoFs or 0.025s
1970would have been enough to fully converge this (simple) analytic solution
1971here.
1972
1973Not much changes if we run the program in three spatial dimensions, except for
1974the fact that we now use do something more useful with the higher polynomial
1975degree and increasing mesh sizes, as the roundoff errors are only obtained at
1976the finest mesh. Still, it is remarkable that we can solve a 3D Laplace
1977problem with a wave of three periods to roundoff accuracy on a twelve-core
1978machine pretty easily - using about 3.5 GB of memory in total for the second
1979to largest case with 24m DoFs, taking not more than eight seconds. The largest
1980case uses 30GB of memory with 191m DoFs.
1981
1982@code
1983Running with 12 MPI processes, element FE_DGQHermite<3>(8)
1984
1985Cycle 0
1986Number of degrees of freedom: 5832
1987Total setup time 0.0210681 s
1988Time solve (15 iterations) 0.0956945 s
1989Verification via L2 error: 0.0297194
1990
1991Cycle 1
1992Number of degrees of freedom: 46656
1993Total setup time 0.0452428 s
1994Time solve (15 iterations) 0.113827 s
1995Verification via L2 error: 9.55733e-05
1996
1997Cycle 2
1998Number of degrees of freedom: 373248
1999Total setup time 0.190423 s
2000Time solve (15 iterations) 0.218309 s
2001Verification via L2 error: 2.6868e-07
2002
2003Cycle 3
2004Number of degrees of freedom: 2985984
2005Total setup time 0.627914 s
2006Time solve (15 iterations) 1.0595 s
2007Verification via L2 error: 4.6918e-10
2008
2009Cycle 4
2010Number of degrees of freedom: 23887872
2011Total setup time 2.85215 s
2012Time solve (15 iterations) 8.30576 s
2013Verification via L2 error: 9.38583e-13
2014
2015Cycle 5
2016Number of degrees of freedom: 191102976
2017Total setup time 16.1324 s
2018Time solve (15 iterations) 65.57 s
2019Verification via L2 error: 3.17875e-13
2020@endcode
2021
2022<a name="step_59-Comparisonofefficiencyatdifferentpolynomialdegrees"></a><h3>Comparison of efficiency at different polynomial degrees</h3>
2023
2024
2025In the introduction and in-code comments, it was mentioned several times that
2026high orders are treated very efficiently with the FEEvaluation and
2027FEFaceEvaluation evaluators. Now, we want to substantiate these claims by
2028looking at the throughput of the 3D multigrid solver for various polynomial
2029degrees. We collect the times as follows: We first run a solver at problem
2030size close to ten million, indicated in the first four table rows, and record
2031the timings. Then, we normalize the throughput by recording the number of
2032million degrees of freedom solved per second (MDoFs/s) to be able to compare
2033the efficiency of the different degrees, which is computed by dividing the
2034number of degrees of freedom by the solver time.
2035
2036<table align="center" class="doxtable">
2037 <tr>
2038 <th>degree</th>
2039 <th>1</th>
2040 <th>2</th>
2041 <th>3</th>
2042 <th>4</th>
2043 <th>5</th>
2044 <th>6</th>
2045 <th>7</th>
2046 <th>8</th>
2047 <th>9</th>
2048 <th>10</th>
2049 <th>11</th>
2050 <th>12</th>
2051 </tr>
2052 <tr>
2053 <th>Number of DoFs</th>
2054 <td>2097152</td>
2055 <td>7077888</td>
2056 <td>16777216</td>
2057 <td>32768000</td>
2058 <td>7077888</td>
2059 <td>11239424</td>
2060 <td>16777216</td>
2061 <td>23887872</td>
2062 <td>32768000</td>
2063 <td>43614208</td>
2064 <td>7077888</td>
2065 <td>8998912</td>
2066 </tr>
2067 <tr>
2068 <th>Number of iterations</th>
2069 <td>13</td>
2070 <td>12</td>
2071 <td>12</td>
2072 <td>12</td>
2073 <td>13</td>
2074 <td>13</td>
2075 <td>15</td>
2076 <td>15</td>
2077 <td>17</td>
2078 <td>19</td>
2079 <td>18</td>
2080 <td>18</td>
2081 </tr>
2082 <tr>
2083 <th>Solver time [s]</th>
2084 <td>0.713</td>
2085 <td>2.150</td>
2086 <td>4.638</td>
2087 <td>8.803</td>
2088 <td>2.041</td>
2089 <td>3.295</td>
2090 <td>5.723</td>
2091 <td>8.306</td>
2092 <td>12.75</td>
2093 <td>19.25</td>
2094 <td>3.530</td>
2095 <td>4.814</td>
2096 </tr>
2097 <tr>
2098 <th>MDoFs/s</th>
2099 <td>2.94</td>
2100 <td>3.29</td>
2101 <td>3.62</td>
2102 <td>3.72</td>
2103 <td>3.47</td>
2104 <td>3.41</td>
2105 <td>2.93</td>
2106 <td>2.88</td>
2107 <td>2.57</td>
2108 <td>2.27</td>
2109 <td>2.01</td>
2110 <td>1.87</td>
2111 </tr>
2112</table>
2113
2114We clearly see how the efficiency per DoF initially improves until it reaches
2115a maximum for the polynomial degree @f$k=4@f$. This effect is surprising, not only
2116because higher polynomial degrees often yield a vastly better solution, but
2117especially also when having matrix-based schemes in mind where the denser
2118coupling at higher degree leads to a monotonously decreasing throughput (and a
2119drastic one in 3D, with @f$k=4@f$ being more than ten times slower than
2120@f$k=1@f$!). For higher degrees, the throughput decreases a bit, which is both due
2121to an increase in the number of iterations (going from 12 at @f$k=2,3,4@f$ to 19
2122at @f$k=10@f$) and due to the @f$\mathcal O(k)@f$ complexity of operator
2123evaluation. Nonetheless, efficiency as the time to solution would be still
2124better for higher polynomial degrees because they have better convergence rates (at least
2125for problems as simple as this one): For @f$k=12@f$, we reach roundoff accuracy
2126already with 1 million DoFs (solver time less than a second), whereas for @f$k=8@f$
2127we need 24 million DoFs and 8 seconds. For @f$k=5@f$, the error is around
2128@f$10^{-9}@f$ with 57m DoFs and thus still far away from roundoff, despite taking 16
2129seconds.
2130
2131Note that the above numbers are a bit pessimistic because they include the
2132time it takes the Chebyshev smoother to compute an eigenvalue estimate, which
2133is around 10 percent of the solver time. If the system is solved several times
2134(as e.g. common in fluid dynamics), this eigenvalue cost is only paid once and
2135faster times become available.
2136
2137<a name="step_59-Evaluationofefficiencyofingredients"></a><h3>Evaluation of efficiency of ingredients</h3>
2138
2139
2140Finally, we take a look at some of the special ingredients presented in this
2141tutorial program, namely the FE_DGQHermite basis in particular and the
2142specification of MatrixFree::DataAccessOnFaces. In the following table, the
2143third row shows the optimized solver above, the fourth row shows the timings
2144with only the MatrixFree::DataAccessOnFaces set to `unspecified` rather than
2145the optimal `gradients`, and the last one with replacing FE_DGQHermite by the
2146basic FE_DGQ elements where both the MPI exchange are more expensive and the
2147operations done by FEFaceEvaluation::gather_evaluate() and
2148FEFaceEvaluation::integrate_scatter().
2149
2150<table align="center" class="doxtable">
2151 <tr>
2152 <th>degree</th>
2153 <th>1</th>
2154 <th>2</th>
2155 <th>3</th>
2156 <th>4</th>
2157 <th>5</th>
2158 <th>6</th>
2159 <th>7</th>
2160 <th>8</th>
2161 <th>9</th>
2162 <th>10</th>
2163 <th>11</th>
2164 <th>12</th>
2165 </tr>
2166 <tr>
2167 <th>Number of DoFs</th>
2168 <td>2097152</td>
2169 <td>7077888</td>
2170 <td>16777216</td>
2171 <td>32768000</td>
2172 <td>7077888</td>
2173 <td>11239424</td>
2174 <td>16777216</td>
2175 <td>23887872</td>
2176 <td>32768000</td>
2177 <td>43614208</td>
2178 <td>7077888</td>
2179 <td>8998912</td>
2180 </tr>
2181 <tr>
2182 <th>Solver time optimized as in tutorial [s]</th>
2183 <td>0.713</td>
2184 <td>2.150</td>
2185 <td>4.638</td>
2186 <td>8.803</td>
2187 <td>2.041</td>
2188 <td>3.295</td>
2189 <td>5.723</td>
2190 <td>8.306</td>
2191 <td>12.75</td>
2192 <td>19.25</td>
2193 <td>3.530</td>
2194 <td>4.814</td>
2195 </tr>
2196 <tr>
2197 <th>Solver time MatrixFree::DataAccessOnFaces::unspecified [s]</th>
2198 <td>0.711</td>
2199 <td>2.151</td>
2200 <td>4.675</td>
2201 <td>8.968</td>
2202 <td>2.243</td>
2203 <td>3.655</td>
2204 <td>6.277</td>
2205 <td>9.082</td>
2206 <td>13.50</td>
2207 <td>20.05</td>
2208 <td>3.817</td>
2209 <td>5.178</td>
2210 </tr>
2211 <tr>
2212 <th>Solver time FE_DGQ [s]</th>
2213 <td>0.712</td>
2214 <td>2.041</td>
2215 <td>5.066</td>
2216 <td>9.335</td>
2217 <td>2.379</td>
2218 <td>3.802</td>
2219 <td>6.564</td>
2220 <td>9.714</td>
2221 <td>14.54</td>
2222 <td>22.76</td>
2223 <td>4.148</td>
2224 <td>5.857</td>
2225 </tr>
2226</table>
2227
2228The data in the table shows that not using MatrixFree::DataAccessOnFaces
2229increases costs by around 10% for higher polynomial degrees. For lower
2230degrees, the difference is obviously less pronounced because the
2231volume-to-surface ratio is more beneficial and less data needs to be
2232exchanged. The difference is larger when looking at the matrix-vector product
2233only, rather than the full multigrid solver shown here, with around 20% worse
2234timings just because of the MPI communication.
2235
2236For @f$k=1@f$ and @f$k=2@f$, the Hermite-like basis functions do obviously not really
2237pay off (indeed, for @f$k=1@f$ the polynomials are exactly the same as for FE_DGQ)
2238and the results are similar as with the FE_DGQ basis. However, for degrees
2239starting at three, we see an increasing advantage for FE_DGQHermite, showing
2240the effectiveness of these basis functions.
2241
2242<a name="step_59-Possibilitiesforextension"></a><h3>Possibilities for extension</h3>
2243
2244
2245As mentioned in the introduction, the fast diagonalization method as realized
2246here is tied to a Cartesian mesh with constant coefficients. When dealing with
2247meshes that contain deformed cells or with variable coefficients, it is common
2248to determine a nearby Cartesian mesh cell as an approximation. This can be
2249done with the class TensorProductMatrixSymmetricSumCollection. Here, one can
2250insert cell matrices similarly to the PreconditionBlockJacobi::initialize()
2251function of this tutorial program. The benefit of the collection class is that
2252cells on which the coefficient of the PDE has the same value can re-use the
2253same Laplacian matrix, which reduces the memory consumption for the inverse
2254matrices. As compared to the algorithm implemented in this tutorial program,
2255one would define the length scales as the distances between opposing
2256faces. For continuous elements, the code project <a
2257href=https://github.com/peterrum/dealii-dd-and-schwarz">Cache-optimized and
2258low-overhead implementations of multigrid smoothers for high-order FEM
2259computations</a> presents the computation for continuous elements. There is
2260currently no infrastructure in deal.II to automatically generate the 1D
2261matrices for discontinuous elements with SIP-DG discretization, as opposed to
2262continuous elements, where we provide
2263TensorProductMatrixCreator::create_laplace_tensor_product_matrix().
2264
2265Another way of extending the program would be to include support for adaptive
2266meshes. While the classical approach of defining interface operations at edges
2267of different refinement level, as discussed in @ref step_39 "step-39", is one possibility,
2268for Poisson-type problems another option is typically more beneficial. Using
2269the class MGTransferGlobalCoarsening, which is explained in the @ref step_75 "step-75"
2270tutorial program, one can deal with meshes of hanging nodes on all levels. An
2271algorithmic improvement can be obtained by combining the discontinuous
2272function space with the auxiliary continuous finite element space of the same
2273polynomial degree. This idea, introduced by Antonietti et al.
2274@cite antonietti2016uniform in 2016, allows making the multigrid convergence
2275independent of the penalty parameter. As demonstrated by Fehn et al.
2276@cite fehn2020hybrid, this also gives considerably lower iteration counts than
2277a multigrid solver directly working on levels with discontinuous function
2278spaces. The latter work also proposes p-multigrid techniques and combination
2279with algebraic multigrid coarse spaces as a means to efficiently solve Poisson
2280problems with high-order discontinuous Galerkin discretizations on complicated
2281geometries, representing the current state-of-the-art for simple Poisson-type
2282problems. The class MGTransferGlobalCoarsening provides features for each of
2283these three coarsening variants, the discontinuous-continuous auxiliary
2284function concept, p-multigrid, and traditional h-multigrid. The main
2285ingredient is to define an appropriate MGTwoLevelTransfer object and call
2286MGTwoLevelTransfer::reinit_geometric_transfer() or
2287MGTwoLevelTransfer::reinit_polynomial_transfer(), respectively.
2288 *
2289 *
2290<a name="step_59-PlainProg"></a>
2291<h1> The plain program</h1>
2292@include "step-59.cc"
2293*/
gradient_type get_gradient(const unsigned int q_point) const
void submit_gradient(const gradient_type grad_in, const unsigned int q_point)
void read_dof_values(const VectorType &src, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip())
std_cxx20::ranges::iota_view< unsigned int, unsigned int > quadrature_point_indices() const
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
Expression sign(const Expression &x)
void random(DoFHandler< dim, spacedim > &dof_handler)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ general
No special properties.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
int(&) functions(const void *v1, const void *v2)
STL namespace.
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)