deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-37.h
Go to the documentation of this file.
1) const
756 *   {
757 *   return 1. / (0.05 + 2. * p.square());
758 *   }
759 *  
760 *  
761 *  
762 *   template <int dim>
763 *   double Coefficient<dim>::value(const Point<dim> &p,
764 *   const unsigned int component) const
765 *   {
766 *   return value<double>(p, component);
767 *   }
768 *  
769 *  
770 * @endcode
771 *
772 *
773 * <a name="step_37-Matrixfreeimplementation"></a>
774 * <h3>Matrix-free implementation</h3>
775 *
776
777 *
778 * The following class, called <code>LaplaceOperator</code>, implements the
779 * differential operator. For all practical purposes, it is a matrix, i.e.,
780 * you can ask it for its size (member functions <code>m(), n()</code>) and
781 * you can apply it to a vector (the <code>vmult()</code> function). The
782 * difference to a real matrix of course lies in the fact that this class
783 * does not actually store the <i>elements</i> of the matrix, but only knows
784 * how to compute the action of the operator when applied to a vector.
785 *
786
787 *
788 * The infrastructure describing the matrix size, the initialization from a
789 * MatrixFree object, and the various interfaces to matrix-vector products
790 * through vmult() and Tvmult() methods, is provided by the class
791 * MatrixFreeOperator::Base from which this class derives. The
792 * LaplaceOperator class defined here only has to provide a few interfaces,
793 * namely the actual action of the operator through the apply_add() method
794 * that gets used in the vmult() functions, and a method to compute the
795 * diagonal entries of the underlying matrix. We need the diagonal for the
796 * definition of the multigrid smoother. Since we consider a problem with
797 * variable coefficient, we further implement a method that can fill the
798 * coefficient values.
799 *
800
801 *
802 * Note that the file <code>include/deal.II/matrix_free/operators.h</code>
803 * already contains an implementation of the Laplacian through the class
804 * MatrixFreeOperators::LaplaceOperator. For educational purposes, the
805 * operator is re-implemented in this tutorial program, explaining the
806 * ingredients and concepts used there.
807 *
808
809 *
810 * This program makes use of the data cache for finite element operator
811 * application that is integrated in deal.II. This data cache class is
812 * called MatrixFree. It contains mapping information (Jacobians) and index
813 * relations between local and global degrees of freedom. It also contains
814 * constraints like the ones from hanging nodes or Dirichlet boundary
815 * conditions. Moreover, it can issue a loop over all cells in %parallel,
816 * making sure that only cells are worked on that do not share any degree of
817 * freedom (this makes the loop thread-safe when writing into destination
818 * vectors). This is a more advanced strategy compared to the WorkStream
819 * class described in the @ref threads topic. Of course, to not destroy
820 * thread-safety, we have to be careful when writing into class-global
821 * structures.
822 *
823
824 *
825 * The class implementing the Laplace operator has three template arguments,
826 * one for the dimension (as many deal.II classes carry), one for the degree
827 * of the finite element (which we need to enable efficient computations
828 * through the FEEvaluation class), and one for the underlying scalar
829 * type. We want to use <code>double</code> numbers (i.e., double precision,
830 * 64-bit floating point) for the final matrix, but floats (single
831 * precision, 32-bit floating point numbers) for the multigrid level
832 * matrices (as that is only a preconditioner, and floats can be processed
833 * twice as fast). The class FEEvaluation also takes a template argument for
834 * the number of quadrature points in one dimension. In the code below, we
835 * hard-code it to <code>fe_degree+1</code>. If we wanted to change it
836 * independently of the polynomial degree, we would need to add a template
837 * parameter as is done in the MatrixFreeOperators::LaplaceOperator class.
838 *
839
840 *
841 * As a sidenote, if we implemented several different operations on the same
842 * grid and degrees of freedom (like a @ref GlossMassMatrix "mass matrix" and a Laplace matrix), we
843 * would define two classes like the current one for each of the operators
844 * (derived from the MatrixFreeOperators::Base class), and let both of them
845 * refer to the same MatrixFree data cache from the general problem
846 * class. The interface through MatrixFreeOperators::Base requires us to
847 * only provide a minimal set of functions. This concept allows for writing
848 * complex application codes with many matrix-free operations.
849 *
850
851 *
852 * @note Storing values of type <code>VectorizedArray<number></code>
853 * requires care: Here, we use the deal.II table class which is prepared to
854 * hold the data with correct alignment. However, storing e.g. an
855 * <code>std::vector<VectorizedArray<number> ></code> is not possible with
856 * vectorization: A certain alignment of the data with the memory address
857 * boundaries is required (essentially, a VectorizedArray that is 32 bytes
858 * long in case of AVX needs to start at a memory address that is divisible
859 * by 32). The table class (as well as the AlignedVector class it is based
860 * on) makes sure that this alignment is respected, whereas std::vector does
861 * not in general, which may lead to segmentation faults at strange places
862 * for some systems or suboptimal performance for other systems.
863 *
864 * @code
865 *   template <int dim, int fe_degree, typename number>
866 *   class LaplaceOperator
867 *   : public MatrixFreeOperators::
868 *   Base<dim, LinearAlgebra::distributed::Vector<number>>
869 *   {
870 *   public:
871 *   using value_type = number;
872 *  
873 *   LaplaceOperator();
874 *  
875 *   void clear() override;
876 *  
877 *   void evaluate_coefficient(const Coefficient<dim> &coefficient_function);
878 *  
879 *   virtual void compute_diagonal() override;
880 *  
881 *   private:
882 *   virtual void apply_add(
884 *   const LinearAlgebra::distributed::Vector<number> &src) const override;
885 *  
886 *   void
887 *   local_apply(const MatrixFree<dim, number> &data,
890 *   const std::pair<unsigned int, unsigned int> &cell_range) const;
891 *  
892 *   void local_compute_diagonal(
894 *  
895 *   Table<2, VectorizedArray<number>> coefficient;
896 *   };
897 *  
898 *  
899 *  
900 * @endcode
901 *
902 * This is the constructor of the @p LaplaceOperator class. All it does is
903 * to call the default constructor of the base class
904 * MatrixFreeOperators::Base, which in turn is based on the Subscriptor
905 * class that asserts that this class is not accessed after going out of scope
906 * e.g. in a preconditioner.
907 *
908 * @code
909 *   template <int dim, int fe_degree, typename number>
910 *   LaplaceOperator<dim, fe_degree, number>::LaplaceOperator()
911 *   : MatrixFreeOperators::Base<dim,
913 *   {}
914 *  
915 *  
916 *  
917 *   template <int dim, int fe_degree, typename number>
918 *   void LaplaceOperator<dim, fe_degree, number>::clear()
919 *   {
920 *   coefficient.reinit(0, 0);
922 *   clear();
923 *   }
924 *  
925 *  
926 *  
927 * @endcode
928 *
929 *
930 * <a name="step_37-Computationofcoefficient"></a>
931 * <h4>Computation of coefficient</h4>
932 *
933
934 *
935 * To initialize the coefficient, we directly give it the Coefficient class
936 * defined above and then select the method
937 * <code>coefficient_function.value</code> with vectorized number (which the
938 * compiler can deduce from the point data type). The use of the
939 * FEEvaluation class (and its template arguments) will be explained below.
940 *
941 * @code
942 *   template <int dim, int fe_degree, typename number>
943 *   void LaplaceOperator<dim, fe_degree, number>::evaluate_coefficient(
944 *   const Coefficient<dim> &coefficient_function)
945 *   {
946 *   const unsigned int n_cells = this->data->n_cell_batches();
948 *  
949 *   coefficient.reinit(n_cells, phi.n_q_points);
950 *   for (unsigned int cell = 0; cell < n_cells; ++cell)
951 *   {
952 *   phi.reinit(cell);
953 *   for (const unsigned int q : phi.quadrature_point_indices())
954 *   coefficient(cell, q) =
955 *   coefficient_function.value(phi.quadrature_point(q));
956 *   }
957 *   }
958 *  
959 *  
960 *  
961 * @endcode
962 *
963 *
964 * <a name="step_37-LocalevaluationofLaplaceoperator"></a>
965 * <h4>Local evaluation of Laplace operator</h4>
966 *
967
968 *
969 * Here comes the main function of this class, the evaluation of the
970 * matrix-vector product (or, in general, a finite element operator
971 * evaluation). This is done in a function that takes exactly four
972 * arguments, the MatrixFree object, the destination and source vectors, and
973 * a range of cells that are to be worked on. The method
974 * <code>cell_loop</code> in the MatrixFree class will internally call this
975 * function with some range of cells that is obtained by checking which
976 * cells are possible to work on simultaneously so that write operations do
977 * not cause any race condition. Note that the cell range used in the loop
978 * is not directly the number of (active) cells in the current mesh, but
979 * rather a collection of batches of cells. In other word, "cell" may be
980 * the wrong term to begin with, since FEEvaluation groups data from several
981 * cells together. This means that in the loop over quadrature points we are
982 * actually seeing a group of quadrature points of several cells as one
983 * block. This is done to enable a higher degree of vectorization. The
984 * number of such "cells" or "cell batches" is stored in MatrixFree and can
985 * be queried through MatrixFree::n_cell_batches(). Compared to the deal.II
986 * cell iterators, in this class all cells are laid out in a plain array
987 * with no direct knowledge of level or neighborship relations, which makes
988 * it possible to index the cells by unsigned integers.
989 *
990
991 *
992 * The implementation of the Laplace operator is quite simple: First, we
993 * need to create an object FEEvaluation that contains the computational
994 * kernels and has data fields to store temporary results (e.g. gradients
995 * evaluated on all quadrature points on a collection of a few cells). Note
996 * that temporary results do not use a lot of memory, and since we specify
997 * template arguments with the element order, the data is stored on the
998 * stack (without expensive memory allocation). Usually, one only needs to
999 * set two template arguments, the dimension as a first argument and the
1000 * degree of the finite element as the second argument (this is equal to the
1001 * number of degrees of freedom per dimension minus one for FE_Q
1002 * elements). However, here we also want to be able to use float numbers for
1003 * the multigrid preconditioner, which is the last (fifth) template
1004 * argument. Therefore, we cannot rely on the default template arguments and
1005 * must also fill the third and fourth field, consequently. The third
1006 * argument specifies the number of quadrature points per direction and has
1007 * a default value equal to the degree of the element plus one. The fourth
1008 * argument sets the number of components (one can also evaluate
1009 * vector-valued functions in systems of PDEs, but the default is a scalar
1010 * element), and finally the last argument sets the number type.
1011 *
1012
1013 *
1014 * Next, we loop over the given cell range and then we continue with the
1015 * actual implementation: <ol> <li>Tell the FEEvaluation object the (macro)
1016 * cell we want to work on. <li>Read in the values of the source vectors
1017 * (@p read_dof_values), including the resolution of constraints. This
1018 * stores @f$u_\mathrm{cell}@f$ as described in the introduction. <li>Compute
1019 * the unit-cell gradient (the evaluation of finite element
1020 * functions). Since FEEvaluation can combine value computations with
1021 * gradient computations, it uses a unified interface to all kinds of
1022 * derivatives of order between zero and two. We only want gradients, no
1023 * values and no second derivatives, so we set the function arguments to
1024 * true in the gradient slot (second slot), and to false in the values slot
1025 * (first slot). There is also a third slot for the Hessian which is
1026 * false by default, so it needs not be given. Note that the FEEvaluation
1027 * class internally evaluates shape functions in an efficient way where one
1028 * dimension is worked on at a time (using the tensor product form of shape
1029 * functions and quadrature points as mentioned in the introduction). This
1030 * gives complexity equal to @f$\mathcal O(d^2 (p+1)^{d+1})@f$ for polynomial
1031 * degree @f$p@f$ in @f$d@f$ dimensions, compared to the naive approach with loops
1032 * over all local degrees of freedom and quadrature points that is used in
1033 * FEValues and costs @f$\mathcal O(d (p+1)^{2d})@f$. <li>Next comes the
1034 * application of the Jacobian transformation, the multiplication by the
1035 * variable coefficient and the quadrature weight. FEEvaluation has an
1036 * access function @p get_gradient that applies the Jacobian and returns the
1037 * gradient in real space. Then, we just need to multiply by the (scalar)
1038 * coefficient, and let the function @p submit_gradient apply the second
1039 * Jacobian (for the test function) and the quadrature weight and Jacobian
1040 * determinant (JxW). Note that the submitted gradient is stored in the same
1041 * data field as where it is read from in @p get_gradient. Therefore, you
1042 * need to make sure to not read from the same quadrature point again after
1043 * having called @p submit_gradient on that particular quadrature point. In
1044 * general, it is a good idea to copy the result of @p get_gradient when it
1045 * is used more often than once. <li>Next follows the summation over
1046 * quadrature points for all test functions that corresponds to the actual
1047 * integration step. For the Laplace operator, we just multiply by the
1048 * gradient, so we call the integrate function with the respective argument
1049 * set. If you have an equation where you test by both the values of the
1050 * test functions and the gradients, both template arguments need to be set
1051 * to true. Calling first the integrate function for values and then
1052 * gradients in a separate call leads to wrong results, since the second
1053 * call will internally overwrite the results from the first call. Note that
1054 * there is no function argument for the second derivative for integrate
1055 * step. <li>Eventually, the local contributions in the vector
1056 * @f$v_\mathrm{cell}@f$ as mentioned in the introduction need to be added into
1057 * the result vector (and constraints are applied). This is done with a call
1058 * to @p distribute_local_to_global, the same name as the corresponding
1059 * function in the AffineConstraints (only that we now store the local vector
1060 * in the FEEvaluation object, as are the indices between local and global
1061 * degrees of freedom). </ol>
1062 *
1063 * @code
1064 *   template <int dim, int fe_degree, typename number>
1065 *   void LaplaceOperator<dim, fe_degree, number>::local_apply(
1066 *   const MatrixFree<dim, number> &data,
1069 *   const std::pair<unsigned int, unsigned int> &cell_range) const
1070 *   {
1072 *  
1073 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
1074 *   {
1075 *   AssertDimension(coefficient.size(0), data.n_cell_batches());
1076 *   AssertDimension(coefficient.size(1), phi.n_q_points);
1077 *  
1078 *   phi.reinit(cell);
1079 *   phi.read_dof_values(src);
1080 *   phi.evaluate(EvaluationFlags::gradients);
1081 *   for (const unsigned int q : phi.quadrature_point_indices())
1082 *   phi.submit_gradient(coefficient(cell, q) * phi.get_gradient(q), q);
1083 *   phi.integrate(EvaluationFlags::gradients);
1084 *   phi.distribute_local_to_global(dst);
1085 *   }
1086 *   }
1087 *  
1088 *  
1089 *  
1090 * @endcode
1091 *
1092 * This function implements the loop over all cells for the
1093 * Base::apply_add() interface. This is done with the @p cell_loop of the
1094 * MatrixFree class, which takes the operator() of this class with arguments
1095 * MatrixFree, OutVector, InVector, cell_range. When working with MPI
1096 * parallelization (but no threading) as is done in this tutorial program,
1097 * the cell loop corresponds to the following three lines of code:
1098 *
1099
1100 *
1101 * <div class=CodeFragmentInTutorialComment>
1102 * @code
1103 * src.update_ghost_values();
1104 * local_apply(*this->data, dst, src, std::make_pair(0U,
1105 * data.n_cell_batches()));
1106 * dst.compress(VectorOperation::add);
1107 * @endcode
1108 * </div>
1109 *
1110
1111 *
1112 * Here, the two calls update_ghost_values() and compress() perform the data
1113 * exchange on processor boundaries for MPI, once for the source vector
1114 * where we need to read from entries owned by remote processors, and once
1115 * for the destination vector where we have accumulated parts of the
1116 * residuals that need to be added to the respective entry of the owner
1117 * processor. However, MatrixFree::cell_loop does not only abstract away
1118 * those two calls, but also performs some additional optimizations. On the
1119 * one hand, it will split the update_ghost_values() and compress() calls in
1120 * a way to allow for overlapping communication and computation. The
1121 * local_apply function is then called with three cell ranges representing
1122 * partitions of the cell range from 0 to MatrixFree::n_cell_batches(). On
1123 * the other hand, cell_loop also supports thread parallelism in which case
1124 * the cell ranges are split into smaller chunks and scheduled in an
1125 * advanced way that avoids access to the same vector entry by several
1126 * threads. That feature is explained in @ref step_48 "step-48".
1127 *
1128
1129 *
1130 * Note that after the cell loop, the constrained degrees of freedom need to
1131 * be touched once more for sensible vmult() operators: Since the assembly
1132 * loop automatically resolves constraints (just as the
1133 * AffineConstraints::distribute_local_to_global() call does), it does not
1134 * compute any contribution for constrained degrees of freedom, leaving the
1135 * respective entries zero. This would represent a matrix that had empty
1136 * rows and columns for constrained degrees of freedom. However, iterative
1137 * solvers like CG only work for non-singular matrices. The easiest way to
1138 * do that is to set the sub-block of the matrix that corresponds to
1139 * constrained DoFs to an identity matrix, in which case application of the
1140 * matrix would simply copy the elements of the right hand side vector into
1141 * the left hand side. Fortunately, the vmult() implementations
1142 * MatrixFreeOperators::Base do this automatically for us outside the
1143 * apply_add() function, so we do not need to take further action here.
1144 *
1145
1146 *
1147 * When using the combination of MatrixFree and FEEvaluation in parallel
1148 * with MPI, there is one aspect to be careful about &mdash; the indexing
1149 * used for accessing the vector. For performance reasons, MatrixFree and
1150 * FEEvaluation are designed to access vectors in MPI-local index space also
1151 * when working with multiple processors. Working in local index space means
1152 * that no index translation needs to be performed at the place the vector
1153 * access happens, apart from the unavoidable indirect addressing. However,
1154 * local index spaces are ambiguous: While it is standard convention to
1155 * access the locally owned range of a vector with indices between 0 and the
1156 * local size, the numbering is not so clear for the ghosted entries and
1157 * somewhat arbitrary. For the matrix-vector product, only the indices
1158 * appearing on locally owned cells (plus those referenced via hanging node
1159 * constraints) are necessary. However, in deal.II we often set all the
1160 * degrees of freedom on ghosted elements as ghosted vector entries, called
1161 * the
1162 * @ref GlossLocallyRelevantDof "locally relevant DoFs described in the glossary".
1163 * In that case, the MPI-local index of a ghosted vector entry can in
1164 * general be different in the two possible ghost sets, despite referring
1165 * to the same global index. To avoid problems, FEEvaluation checks that
1166 * the partitioning of the vector used for the matrix-vector product does
1167 * indeed match with the partitioning of the indices in MatrixFree by a
1168 * check called
1169 * LinearAlgebra::distributed::Vector::partitioners_are_compatible. To
1170 * facilitate things, the MatrixFreeOperators::Base class includes a
1171 * mechanism to fit the ghost set to the correct layout. This happens in the
1172 * ghost region of the vector, so keep in mind that the ghost region might
1173 * be modified in both the destination and source vector after a call to a
1174 * vmult() method. This is legitimate because the ghost region of a
1175 * distributed deal.II vector is a mutable section and filled on
1176 * demand. Vectors used in matrix-vector products must not be ghosted upon
1177 * entry of vmult() functions, so no information gets lost.
1178 *
1179 * @code
1180 *   template <int dim, int fe_degree, typename number>
1181 *   void LaplaceOperator<dim, fe_degree, number>::apply_add(
1182 *   LinearAlgebra::distributed::Vector<number> &dst,
1183 *   const LinearAlgebra::distributed::Vector<number> &src) const
1184 *   {
1185 *   this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src);
1186 *   }
1187 *  
1188 *  
1189 *  
1190 * @endcode
1191 *
1192 * The following function implements the computation of the diagonal of the
1193 * operator. Computing matrix entries of a matrix-free operator evaluation
1194 * turns out to be more complicated than evaluating the
1195 * operator. Fundamentally, we could obtain a matrix representation of the
1196 * operator by applying the operator on <i>all</i> unit vectors. Of course,
1197 * that would be very inefficient since we would need to perform <i>n</i>
1198 * operator evaluations to retrieve the whole matrix. Furthermore, this
1199 * approach would completely ignore the matrix sparsity. On an individual
1200 * cell, however, this is the way to go and actually not that inefficient as
1201 * there usually is a coupling between all degrees of freedom inside the
1202 * cell.
1203 *
1204
1205 *
1206 * We first initialize the diagonal vector to the correct parallel
1207 * layout. This vector is encapsulated in a member called
1208 * inverse_diagonal_entries of type DiagonalMatrix in the base class
1209 * MatrixFreeOperators::Base. This member is a shared pointer that we first
1210 * need to initialize and then get the vector representing the diagonal
1211 * entries in the matrix. As to the actual diagonal computation, we could
1212 * manually write a cell_loop and invoke a local worker that applies all unit
1213 * vectors on each cell. Instead, we use MatrixFreeTools::compute_diagonal()
1214 * to do this for us. Afterwards, we need to set the vector entries
1215 * subject to Dirichlet boundary conditions to one (either those on the
1216 * boundary described by the AffineConstraints object inside MatrixFree or
1217 * the indices at the interface between different grid levels in adaptive
1218 * multigrid). This is done through the function
1220 * with the setting in the matrix-vector product provided by the Base
1221 * operator. Finally, we need to invert the diagonal entries which is the
1222 * form required by the Chebyshev smoother based on the Jacobi iteration. In
1223 * the loop, we assert that all entries are non-zero, because they should
1224 * either have obtained a positive contribution from integrals or be
1225 * constrained and treated by @p set_constrained_entries_to_one().
1226 *
1227 * @code
1228 *   template <int dim, int fe_degree, typename number>
1229 *   void LaplaceOperator<dim, fe_degree, number>::compute_diagonal()
1230 *   {
1231 *   this->inverse_diagonal_entries.reset(
1233 *   LinearAlgebra::distributed::Vector<number> &inverse_diagonal =
1234 *   this->inverse_diagonal_entries->get_vector();
1235 *   this->data->initialize_dof_vector(inverse_diagonal);
1236 *  
1237 *   MatrixFreeTools::compute_diagonal(*this->data,
1238 *   inverse_diagonal,
1239 *   &LaplaceOperator::local_compute_diagonal,
1240 *   this);
1241 *  
1242 *   this->set_constrained_entries_to_one(inverse_diagonal);
1243 *  
1244 *   for (unsigned int i = 0; i < inverse_diagonal.locally_owned_size(); ++i)
1245 *   {
1246 *   Assert(inverse_diagonal.local_element(i) > 0.,
1247 *   ExcMessage("No diagonal entry in a positive definite operator "
1248 *   "should be zero"));
1249 *   inverse_diagonal.local_element(i) =
1250 *   1. / inverse_diagonal.local_element(i);
1251 *   }
1252 *   }
1253 *  
1254 *  
1255 *  
1256 * @endcode
1257 *
1258 * In the local compute loop, we compute the diagonal by a loop over all
1259 * columns in the local matrix and putting the entry 1 in the <i>i</i>th
1260 * slot and a zero entry in all other slots, i.e., we apply the cell-wise
1261 * differential operator on one unit vector at a time. The inner part
1262 * invoking FEEvaluation::evaluate(), the loop over quadrature points, and
1263 * FEEvaluation::integrate(), is exactly the same as in the local_apply
1264 * function. Afterwards, we pick out the <i>i</i>th entry of the local
1265 * result and put it to a temporary storage (as we overwrite all entries in
1266 * the array behind FEEvaluation::get_dof_value() with the next loop
1267 * iteration). Finally, the temporary storage is written to the destination
1268 * vector. Note how we use FEEvaluation::get_dof_value() and
1269 * FEEvaluation::submit_dof_value() to read and write to the data field that
1270 * FEEvaluation uses for the integration on the one hand and writes into the
1271 * global vector on the other hand.
1272 *
1273
1274 *
1275 * Given that we are only interested in the matrix diagonal, we simply throw
1276 * away all other entries of the local matrix that have been computed along
1277 * the way. While it might seem wasteful to compute the complete cell matrix
1278 * and then throw away everything but the diagonal, the integration are so
1279 * efficient that the computation does not take too much time. Note that the
1280 * complexity of operator evaluation per element is @f$\mathcal
1281 * O((p+1)^{d+1})@f$ for polynomial degree @f$k@f$, so computing the whole matrix
1282 * costs us @f$\mathcal O((p+1)^{2d+1})@f$ operations, not too far away from
1283 * @f$\mathcal O((p+1)^{2d})@f$ complexity for computing the diagonal with
1284 * FEValues. Since FEEvaluation is also considerably faster due to
1285 * vectorization and other optimizations, the diagonal computation with this
1286 * function is actually the fastest (simple) variant. (It would be possible
1287 * to compute the diagonal with sum factorization techniques in @f$\mathcal
1288 * O((p+1)^{d+1})@f$ operations involving specifically adapted
1289 * kernels&mdash;but since such kernels are only useful in that particular
1290 * context and the diagonal computation is typically not on the critical
1291 * path, they have not been implemented in deal.II.)
1292 *
1293
1294 *
1295 * Note that the code that calls distribute_local_to_global on the vector to
1296 * accumulate the diagonal entries into the global matrix has some
1297 * limitations. For operators with hanging node constraints that distribute
1298 * an integral contribution of a constrained DoF to several other entries
1299 * inside the distribute_local_to_global call, the vector interface used
1300 * here does not exactly compute the diagonal entries, but lumps some
1301 * contributions located on the diagonal of the local matrix that would end
1302 * up in a off-diagonal position of the global matrix to the diagonal. The
1303 * result is correct up to discretization accuracy as explained in <a
1304 * href="http://dx.doi.org/10.4208/cicp.101214.021015a">Kormann (2016),
1305 * section 5.3</a>, but not mathematically equal. In this tutorial program,
1306 * no harm can happen because the diagonal is only used for the multigrid
1307 * level matrices where no hanging node constraints appear.
1308 *
1309 * @code
1310 *   template <int dim, int fe_degree, typename number>
1311 *   void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
1313 *   {
1314 *   const unsigned int cell = phi.get_current_cell_index();
1315 *  
1316 *   phi.evaluate(EvaluationFlags::gradients);
1317 *  
1318 *   for (const unsigned int q : phi.quadrature_point_indices())
1319 *   {
1320 *   phi.submit_gradient(coefficient(cell, q) * phi.get_gradient(q), q);
1321 *   }
1322 *   phi.integrate(EvaluationFlags::gradients);
1323 *   }
1324 *  
1325 *  
1326 *  
1327 * @endcode
1328 *
1329 *
1330 * <a name="step_37-LaplaceProblemclass"></a>
1331 * <h3>LaplaceProblem class</h3>
1332 *
1333
1334 *
1335 * This class is based on the one in @ref step_16 "step-16". However, we replaced the
1336 * SparseMatrix<double> class by our matrix-free implementation, which means
1337 * that we can also skip the sparsity patterns. Notice that we define the
1338 * LaplaceOperator class with the degree of finite element as template
1339 * argument (the value is defined at the top of the file), and that we use
1340 * float numbers for the multigrid level matrices.
1341 *
1342
1343 *
1344 * The class also has a member variable to keep track of all the detailed
1345 * timings for setting up the entire chain of data before we actually go
1346 * about solving the problem. In addition, there is an output stream (that
1347 * is disabled by default) that can be used to output details for the
1348 * individual setup operations instead of the summary only that is printed
1349 * out by default.
1350 *
1351
1352 *
1353 * Since this program is designed to be used with MPI, we also provide the
1354 * usual @p pcout output stream that only prints the information of the
1355 * processor with MPI rank 0. The grid used for this programs can either be
1356 * a distributed triangulation based on p4est (in case deal.II is configured
1357 * to use p4est), otherwise it is a serial grid that only runs without MPI.
1358 *
1359 * @code
1360 *   template <int dim>
1361 *   class LaplaceProblem
1362 *   {
1363 *   public:
1364 *   LaplaceProblem();
1365 *   void run();
1366 *  
1367 *   private:
1368 *   void setup_system();
1369 *   void assemble_rhs();
1370 *   void solve();
1371 *   void output_results(const unsigned int cycle) const;
1372 *  
1373 *   #ifdef DEAL_II_WITH_P4EST
1375 *   #else
1377 *   #endif
1378 *  
1379 *   const FE_Q<dim> fe;
1380 *   DoFHandler<dim> dof_handler;
1381 *  
1382 *   const MappingQ1<dim> mapping;
1383 *  
1384 *   AffineConstraints<double> constraints;
1385 *   using SystemMatrixType =
1386 *   LaplaceOperator<dim, degree_finite_element, double>;
1387 *   SystemMatrixType system_matrix;
1388 *  
1389 *   MGConstrainedDoFs mg_constrained_dofs;
1390 *   using LevelMatrixType = LaplaceOperator<dim, degree_finite_element, float>;
1391 *   MGLevelObject<LevelMatrixType> mg_matrices;
1392 *  
1395 *  
1396 *   double setup_time;
1397 *   ConditionalOStream pcout;
1398 *   ConditionalOStream time_details;
1399 *   };
1400 *  
1401 *  
1402 *  
1403 * @endcode
1404 *
1405 * When we initialize the finite element, we of course have to use the
1406 * degree specified at the top of the file as well (otherwise, an exception
1407 * will be thrown at some point, since the computational kernel defined in
1408 * the templated LaplaceOperator class and the information from the finite
1409 * element read out by MatrixFree will not match). The constructor of the
1410 * triangulation needs to set an additional flag that tells the grid to
1411 * conform to the 2:1 cell balance over vertices, which is needed for the
1412 * convergence of the geometric multigrid routines. For the distributed
1413 * grid, we also need to specifically enable the multigrid hierarchy.
1414 *
1415 * @code
1416 *   template <int dim>
1417 *   LaplaceProblem<dim>::LaplaceProblem()
1418 *   #ifdef DEAL_II_WITH_P4EST
1419 *   : triangulation(MPI_COMM_WORLD,
1422 *   dim>::construct_multigrid_hierarchy)
1423 *   #else
1425 *   #endif
1426 *   , fe(degree_finite_element)
1427 *   , dof_handler(triangulation)
1428 *   , setup_time(0.)
1429 *   , pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
1430 * @endcode
1431 *
1432 * The LaplaceProblem class holds an additional output stream that
1433 * collects detailed timings about the setup phase. This stream, called
1434 * time_details, is disabled by default through the @p false argument
1435 * specified here. For detailed timings, removing the @p false argument
1436 * prints all the details.
1437 *
1438 * @code
1439 *   , time_details(std::cout,
1440 *   false &&
1441 *   Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
1442 *   {}
1443 *  
1444 *  
1445 *  
1446 * @endcode
1447 *
1448 *
1449 * <a name="step_37-LaplaceProblemsetup_system"></a>
1450 * <h4>LaplaceProblem::setup_system</h4>
1451 *
1452
1453 *
1454 * The setup stage is in analogy to @ref step_16 "step-16" with relevant changes due to the
1455 * LaplaceOperator class. The first thing to do is to set up the DoFHandler,
1456 * including the degrees of freedom for the multigrid levels, and to
1457 * initialize constraints from hanging nodes and homogeneous Dirichlet
1458 * conditions. Since we intend to use this programs in %parallel with MPI,
1459 * we need to make sure that the constraints get to know the locally
1460 * relevant degrees of freedom, otherwise the storage would explode when
1461 * using more than a few hundred millions of degrees of freedom, see
1462 * @ref step_40 "step-40".
1463 *
1464
1465 *
1466 * Once we have created the multigrid dof_handler and the constraints, we
1467 * can call the reinit function for the global matrix operator as well as
1468 * each level of the multigrid scheme. The main action is to set up the
1469 * <code> MatrixFree </code> instance for the problem. The base class of the
1470 * <code>LaplaceOperator</code> class, MatrixFreeOperators::Base, is
1471 * initialized with a shared pointer to MatrixFree object. This way, we can
1472 * simply create it here and then pass it on to the system matrix and level
1473 * matrices, respectively. For setting up MatrixFree, we need to activate
1474 * the update flag in the AdditionalData field of MatrixFree that enables
1475 * the storage of quadrature point coordinates in real space (by default, it
1476 * only caches data for gradients (inverse transposed Jacobians) and JxW
1477 * values). Note that if we call the reinit function without specifying the
1478 * level (i.e., giving <code>level = numbers::invalid_unsigned_int</code>),
1479 * MatrixFree constructs a loop over the active cells. In this tutorial, we
1480 * do not use threads in addition to MPI, which is why we explicitly disable
1482 * MatrixFree::AdditionalData::none. Finally, the coefficient is evaluated
1483 * and vectors are initialized as explained above.
1484 *
1485 * @code
1486 *   template <int dim>
1487 *   void LaplaceProblem<dim>::setup_system()
1488 *   {
1489 *   Timer time;
1490 *   setup_time = 0;
1491 *   {
1492 *   system_matrix.clear();
1493 *   mg_matrices.clear_elements();
1494 *  
1495 *   dof_handler.distribute_dofs(fe);
1496 *   dof_handler.distribute_mg_dofs();
1497 *  
1498 *   pcout << "Number of degrees of freedom: " << dof_handler.n_dofs()
1499 *   << std::endl;
1500 *  
1501 *   constraints.clear();
1502 *   constraints.reinit(dof_handler.locally_owned_dofs(),
1503 *   DoFTools::extract_locally_relevant_dofs(dof_handler));
1504 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
1506 *   mapping, dof_handler, 0, Functions::ZeroFunction<dim>(), constraints);
1507 *   constraints.close();
1508 *   }
1509 *   setup_time += time.wall_time();
1510 *   time_details << "Distribute DoFs & B.C. (CPU/wall) " << time.cpu_time()
1511 *   << "s/" << time.wall_time() << 's' << std::endl;
1512 *   time.restart();
1513 *   {
1514 *   {
1515 *   typename MatrixFree<dim, double>::AdditionalData additional_data;
1516 *   additional_data.tasks_parallel_scheme =
1518 *   additional_data.mapping_update_flags =
1520 *   std::shared_ptr<MatrixFree<dim, double>> system_mf_storage(
1521 *   new MatrixFree<dim, double>());
1522 *   system_mf_storage->reinit(mapping,
1523 *   dof_handler,
1524 *   constraints,
1525 *   QGauss<1>(fe.degree + 1),
1526 *   additional_data);
1527 *   system_matrix.initialize(system_mf_storage);
1528 *   }
1529 *  
1530 *   system_matrix.evaluate_coefficient(Coefficient<dim>());
1531 *  
1532 *   system_matrix.initialize_dof_vector(solution);
1533 *   system_matrix.initialize_dof_vector(system_rhs);
1534 *   }
1535 *   setup_time += time.wall_time();
1536 *   time_details << "Setup matrix-free system (CPU/wall) " << time.cpu_time()
1537 *   << "s/" << time.wall_time() << 's' << std::endl;
1538 *   time.restart();
1539 *  
1540 * @endcode
1541 *
1542 * Next, initialize the matrices for the multigrid method on all the
1543 * levels. The data structure MGConstrainedDoFs keeps information about
1544 * the indices subject to boundary conditions as well as the indices on
1545 * edges between different refinement levels as described in the @ref step_16 "step-16"
1546 * tutorial program. We then go through the levels of the mesh and
1547 * construct the constraints and matrices on each level. These follow
1548 * closely the construction of the system matrix on the original mesh,
1549 * except the slight difference in naming when accessing information on
1550 * the levels rather than the active cells.
1551 *
1552 * @code
1553 *   {
1554 *   const unsigned int nlevels = triangulation.n_global_levels();
1555 *   mg_matrices.resize(0, nlevels - 1);
1556 *  
1557 *   const std::set<types::boundary_id> dirichlet_boundary_ids = {0};
1558 *   mg_constrained_dofs.initialize(dof_handler);
1559 *   mg_constrained_dofs.make_zero_boundary_constraints(
1560 *   dof_handler, dirichlet_boundary_ids);
1561 *  
1562 *   for (unsigned int level = 0; level < nlevels; ++level)
1563 *   {
1564 *   AffineConstraints<double> level_constraints(
1565 *   dof_handler.locally_owned_mg_dofs(level),
1567 *   for (const types::global_dof_index dof_index :
1568 *   mg_constrained_dofs.get_boundary_indices(level))
1569 *   level_constraints.constrain_dof_to_zero(dof_index);
1570 *   level_constraints.close();
1571 *  
1572 *   typename MatrixFree<dim, float>::AdditionalData additional_data;
1573 *   additional_data.tasks_parallel_scheme =
1575 *   additional_data.mapping_update_flags =
1577 *   additional_data.mg_level = level;
1578 *   std::shared_ptr<MatrixFree<dim, float>> mg_mf_storage_level =
1579 *   std::make_shared<MatrixFree<dim, float>>();
1580 *   mg_mf_storage_level->reinit(mapping,
1581 *   dof_handler,
1582 *   level_constraints,
1583 *   QGauss<1>(fe.degree + 1),
1584 *   additional_data);
1585 *  
1586 *   mg_matrices[level].initialize(mg_mf_storage_level,
1587 *   mg_constrained_dofs,
1588 *   level);
1589 *   mg_matrices[level].evaluate_coefficient(Coefficient<dim>());
1590 *   }
1591 *   }
1592 *   setup_time += time.wall_time();
1593 *   time_details << "Setup matrix-free levels (CPU/wall) " << time.cpu_time()
1594 *   << "s/" << time.wall_time() << 's' << std::endl;
1595 *   }
1596 *  
1597 *  
1598 *  
1599 * @endcode
1600 *
1601 *
1602 * <a name="step_37-LaplaceProblemassemble_rhs"></a>
1603 * <h4>LaplaceProblem::assemble_rhs</h4>
1604 *
1605
1606 *
1607 * The assemble function is very simple since all we have to do is to
1608 * assemble the right hand side. Thanks to FEEvaluation and all the data
1609 * cached in the MatrixFree class, which we query from
1610 * MatrixFreeOperators::Base, this can be done in a few lines. Since this
1611 * call is not wrapped into a MatrixFree::cell_loop (which would be an
1612 * alternative), we must not forget to call compress() at the end of the
1613 * assembly to send all the contributions of the right hand side to the
1614 * owner of the respective degree of freedom.
1615 *
1616 * @code
1617 *   template <int dim>
1618 *   void LaplaceProblem<dim>::assemble_rhs()
1619 *   {
1620 *   Timer time;
1621 *  
1622 *   system_rhs = 0;
1624 *   *system_matrix.get_matrix_free());
1625 *   for (unsigned int cell = 0;
1626 *   cell < system_matrix.get_matrix_free()->n_cell_batches();
1627 *   ++cell)
1628 *   {
1629 *   phi.reinit(cell);
1630 *   for (const unsigned int q : phi.quadrature_point_indices())
1631 *   phi.submit_value(make_vectorized_array<double>(1.0), q);
1632 *   phi.integrate(EvaluationFlags::values);
1633 *   phi.distribute_local_to_global(system_rhs);
1634 *   }
1635 *   system_rhs.compress(VectorOperation::add);
1636 *  
1637 *   setup_time += time.wall_time();
1638 *   time_details << "Assemble right hand side (CPU/wall) " << time.cpu_time()
1639 *   << "s/" << time.wall_time() << 's' << std::endl;
1640 *   }
1641 *  
1642 *  
1643 *  
1644 * @endcode
1645 *
1646 *
1647 * <a name="step_37-LaplaceProblemsolve"></a>
1648 * <h4>LaplaceProblem::solve</h4>
1649 *
1650
1651 *
1652 * The solution process is similar as in @ref step_16 "step-16". We start with the setup of
1653 * the transfer. For LinearAlgebra::distributed::Vector, there is a very
1654 * fast transfer class called MGTransferMatrixFree that does the
1655 * interpolation between the grid levels with the same fast sum
1656 * factorization kernels that get also used in FEEvaluation.
1657 *
1658 * @code
1659 *   template <int dim>
1660 *   void LaplaceProblem<dim>::solve()
1661 *   {
1662 *   Timer time;
1663 *   MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
1664 *   mg_transfer.build(dof_handler);
1665 *   setup_time += time.wall_time();
1666 *   time_details << "MG build transfer time (CPU/wall) " << time.cpu_time()
1667 *   << "s/" << time.wall_time() << "s\n";
1668 *   time.restart();
1669 *  
1670 * @endcode
1671 *
1672 * As a smoother, this tutorial program uses a Chebyshev iteration instead
1673 * of SOR in @ref step_16 "step-16". (SOR would be very difficult to implement because we
1674 * do not have the matrix elements available explicitly, and it is
1675 * difficult to make it work efficiently in %parallel.) The smoother is
1676 * initialized with our level matrices and the mandatory additional data
1677 * for the Chebyshev smoother. We use a relatively high degree here (5),
1678 * since matrix-vector products are comparably cheap. We choose to smooth
1679 * out a range of @f$[1.2 \hat{\lambda}_{\max}/15,1.2 \hat{\lambda}_{\max}]@f$
1680 * in the smoother where @f$\hat{\lambda}_{\max}@f$ is an estimate of the
1681 * largest eigenvalue (the factor 1.2 is applied inside
1682 * PreconditionChebyshev). In order to compute that eigenvalue, the
1683 * Chebyshev initialization performs a few steps of a CG algorithm
1684 * without preconditioner. Since the highest eigenvalue is usually the
1685 * easiest one to find and a rough estimate is enough, we choose 10
1686 * iterations. Finally, we also set the inner preconditioner type in the
1687 * Chebyshev method which is a Jacobi iteration. This is represented by
1688 * the DiagonalMatrix class that gets the inverse diagonal entry provided
1689 * by our LaplaceOperator class.
1690 *
1691
1692 *
1693 * On level zero, we initialize the smoother differently because we want
1694 * to use the Chebyshev iteration as a solver. PreconditionChebyshev
1695 * allows the user to switch to solver mode where the number of iterations
1696 * is internally chosen to the correct value. In the additional data
1697 * object, this setting is activated by choosing the polynomial degree to
1698 * @p numbers::invalid_unsigned_int. The algorithm will then attack all
1699 * eigenvalues between the smallest and largest one in the coarse level
1700 * matrix. The number of steps in the Chebyshev smoother are chosen such
1701 * that the Chebyshev convergence estimates guarantee to reduce the
1702 * residual by the number specified in the variable @p
1703 * smoothing_range. Note that for solving, @p smoothing_range is a
1704 * relative tolerance and chosen smaller than one, in this case, we select
1705 * three orders of magnitude, whereas it is a number larger than 1 when
1706 * only selected eigenvalues are smoothed.
1707 *
1708
1709 *
1710 * From a computational point of view, the Chebyshev iteration is a very
1711 * attractive coarse grid solver as long as the coarse size is
1712 * moderate. This is because the Chebyshev method performs only
1713 * matrix-vector products and vector updates, which typically parallelize
1714 * better to the largest cluster size with more than a few tens of
1715 * thousands of cores than inner product involved in other iterative
1716 * methods. The former involves only local communication between neighbors
1717 * in the (coarse) mesh, whereas the latter requires global communication
1718 * over all processors.
1719 *
1720 * @code
1721 *   using SmootherType =
1722 *   PreconditionChebyshev<LevelMatrixType,
1724 *   mg::SmootherRelaxation<SmootherType,
1726 *   mg_smoother;
1728 *   smoother_data.resize(0, triangulation.n_global_levels() - 1);
1729 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
1730 *   ++level)
1731 *   {
1732 *   if (level > 0)
1733 *   {
1734 *   smoother_data[level].smoothing_range = 15.;
1735 *   smoother_data[level].degree = 5;
1736 *   smoother_data[level].eig_cg_n_iterations = 10;
1737 *   }
1738 *   else
1739 *   {
1740 *   smoother_data[0].smoothing_range = 1e-3;
1741 *   smoother_data[0].degree = numbers::invalid_unsigned_int;
1742 *   smoother_data[0].eig_cg_n_iterations = mg_matrices[0].m();
1743 *   }
1744 *   mg_matrices[level].compute_diagonal();
1745 *   smoother_data[level].preconditioner =
1746 *   mg_matrices[level].get_matrix_diagonal_inverse();
1747 *   }
1748 *   mg_smoother.initialize(mg_matrices, smoother_data);
1749 *  
1751 *   mg_coarse;
1752 *   mg_coarse.initialize(mg_smoother);
1753 *  
1754 * @endcode
1755 *
1756 * The next step is to set up the interface matrices that are needed for the
1757 * case with hanging nodes. The adaptive multigrid realization in deal.II
1758 * implements an approach called local smoothing. This means that the
1759 * smoothing on the finest level only covers the local part of the mesh
1760 * defined by the fixed (finest) grid level and ignores parts of the
1761 * computational domain where the terminal cells are coarser than this
1762 * level. As the method progresses to coarser levels, more and more of the
1763 * global mesh will be covered. At some coarser level, the whole mesh will
1764 * be covered. Since all level matrices in the multigrid method cover a
1765 * single level in the mesh, no hanging nodes appear on the level matrices.
1766 * At the interface between multigrid levels, homogeneous Dirichlet boundary
1767 * conditions are set while smoothing. When the residual is transferred to
1768 * the next coarser level, however, the coupling over the multigrid
1769 * interface needs to be taken into account. This is done by the so-called
1770 * interface (or edge) matrices that compute the part of the residual that
1771 * is missed by the level matrix with
1772 * homogeneous Dirichlet conditions. We refer to the @ref mg_paper
1773 * "Multigrid paper by Janssen and Kanschat" for more details.
1774 *
1775
1776 *
1777 * For the implementation of those interface matrices, there is already a
1778 * pre-defined class MatrixFreeOperators::MGInterfaceOperator that wraps
1780 * MatrixFreeOperators::Base::vmult_interface_up() in a new class with @p
1781 * vmult() and @p Tvmult() operations (that were originally written for
1782 * matrices, hence expecting those names). Note that vmult_interface_down
1783 * is used during the restriction phase of the multigrid V-cycle, whereas
1784 * vmult_interface_up is used during the prolongation phase.
1785 *
1786
1787 *
1788 * Once the interface matrix is created, we set up the remaining Multigrid
1789 * preconditioner infrastructure in complete analogy to @ref step_16 "step-16" to obtain
1790 * a @p preconditioner object that can be applied to a matrix.
1791 *
1792 * @code
1793 *   mg::Matrix<LinearAlgebra::distributed::Vector<float>> mg_matrix(
1794 *   mg_matrices);
1795 *  
1796 *   MGLevelObject<MatrixFreeOperators::MGInterfaceOperator<LevelMatrixType>>
1797 *   mg_interface_matrices;
1798 *   mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1);
1799 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
1800 *   ++level)
1801 *   mg_interface_matrices[level].initialize(mg_matrices[level]);
1802 *   mg::Matrix<LinearAlgebra::distributed::Vector<float>> mg_interface(
1803 *   mg_interface_matrices);
1804 *  
1805 *   Multigrid<LinearAlgebra::distributed::Vector<float>> mg(
1806 *   mg_matrix, mg_coarse, mg_transfer, mg_smoother, mg_smoother);
1807 *   mg.set_edge_matrices(mg_interface, mg_interface);
1808 *  
1809 *   PreconditionMG<dim,
1810 *   LinearAlgebra::distributed::Vector<float>,
1811 *   MGTransferMatrixFree<dim, float>>
1812 *   preconditioner(dof_handler, mg, mg_transfer);
1813 *  
1814 * @endcode
1815 *
1816 * The setup of the multigrid routines is quite easy and one cannot see
1817 * any difference in the solve process as compared to @ref step_16 "step-16". All the
1818 * magic is hidden behind the implementation of the LaplaceOperator::vmult
1819 * operation. Note that we print out the solve time and the accumulated
1820 * setup time through standard out, i.e., in any case, whereas detailed
1821 * times for the setup operations are only printed in case the flag for
1822 * detail_times in the constructor is changed.
1823 *
1824
1825 *
1826 *
1827 * @code
1828 *   SolverControl solver_control(100, 1e-12 * system_rhs.l2_norm());
1829 *   SolverCG<LinearAlgebra::distributed::Vector<double>> cg(solver_control);
1830 *   setup_time += time.wall_time();
1831 *   time_details << "MG build smoother time (CPU/wall) " << time.cpu_time()
1832 *   << "s/" << time.wall_time() << "s\n";
1833 *   pcout << "Total setup time (wall) " << setup_time << "s\n";
1834 *  
1835 *   time.reset();
1836 *   time.start();
1837 *   constraints.set_zero(solution);
1838 *   cg.solve(system_matrix, solution, system_rhs, preconditioner);
1839 *  
1840 *   constraints.distribute(solution);
1841 *  
1842 *   pcout << "Time solve (" << solver_control.last_step() << " iterations)"
1843 *   << (solver_control.last_step() < 10 ? " " : " ") << "(CPU/wall) "
1844 *   << time.cpu_time() << "s/" << time.wall_time() << "s\n";
1845 *   }
1846 *  
1847 *  
1848 *  
1849 * @endcode
1850 *
1851 *
1852 * <a name="step_37-LaplaceProblemoutput_results"></a>
1853 * <h4>LaplaceProblem::output_results</h4>
1854 *
1855
1856 *
1857 * Here is the data output, which is a simplified version of @ref step_5 "step-5". We use
1858 * the standard VTU (= compressed VTK) output for each grid produced in the
1859 * refinement process. In addition, we use a compression algorithm that is
1860 * optimized for speed rather than disk usage. The default setting (which
1861 * optimizes for disk usage) makes saving the output take about 4 times as
1862 * long as running the linear solver, while setting
1863 * DataOutBase::CompressionLevel to
1864 * best_speed lowers this to only one fourth the time
1865 * of the linear solve.
1866 *
1867
1868 *
1869 * We disable the output when the mesh gets too large. A variant of this
1870 * program has been run on hundreds of thousands MPI ranks with as many as
1871 * 100 billion grid cells, which is not directly accessible to classical
1872 * visualization tools.
1873 *
1874 * @code
1875 *   template <int dim>
1876 *   void LaplaceProblem<dim>::output_results(const unsigned int cycle) const
1877 *   {
1878 *   Timer time;
1879 *   if (triangulation.n_global_active_cells() > 1000000)
1880 *   return;
1881 *  
1882 *   DataOut<dim> data_out;
1883 *  
1884 *   solution.update_ghost_values();
1885 *   data_out.attach_dof_handler(dof_handler);
1886 *   data_out.add_data_vector(solution, "solution");
1887 *   data_out.build_patches(mapping);
1888 *  
1889 *   DataOutBase::VtkFlags flags;
1891 *   data_out.set_flags(flags);
1892 *   data_out.write_vtu_with_pvtu_record(
1893 *   "./", "solution", cycle, MPI_COMM_WORLD, 3);
1894 *  
1895 *   time_details << "Time write output (CPU/wall) " << time.cpu_time()
1896 *   << "s/" << time.wall_time() << "s\n";
1897 *   }
1898 *  
1899 *  
1900 *  
1901 * @endcode
1902 *
1903 *
1904 * <a name="step_37-LaplaceProblemrun"></a>
1905 * <h4>LaplaceProblem::run</h4>
1906 *
1907
1908 *
1909 * The function that runs the program is very similar to the one in
1910 * @ref step_16 "step-16". We do few refinement steps in 3d compared to 2d, but that's
1911 * it.
1912 *
1913
1914 *
1915 * Before we run the program, we output some information about the detected
1916 * vectorization level as discussed in the introduction.
1917 *
1918 * @code
1919 *   template <int dim>
1920 *   void LaplaceProblem<dim>::run()
1921 *   {
1922 *   {
1923 *   const unsigned int n_vect_doubles = VectorizedArray<double>::size();
1924 *   const unsigned int n_vect_bits = 8 * sizeof(double) * n_vect_doubles;
1925 *  
1926 *   pcout << "Vectorization over " << n_vect_doubles
1927 *   << " doubles = " << n_vect_bits << " bits ("
1928 *   << Utilities::System::get_current_vectorization_level() << ')'
1929 *   << std::endl;
1930 *   }
1931 *  
1932 *   for (unsigned int cycle = 0; cycle < 9 - dim; ++cycle)
1933 *   {
1934 *   pcout << "Cycle " << cycle << std::endl;
1935 *  
1936 *   if (cycle == 0)
1937 *   {
1938 *   GridGenerator::hyper_cube(triangulation, 0., 1.);
1939 *   triangulation.refine_global(3 - dim);
1940 *   }
1941 *   triangulation.refine_global(1);
1942 *   setup_system();
1943 *   assemble_rhs();
1944 *   solve();
1945 *   output_results(cycle);
1946 *   pcout << std::endl;
1947 *   };
1948 *   }
1949 *   } // namespace Step37
1950 *  
1951 *  
1952 *  
1953 * @endcode
1954 *
1955 *
1956 * <a name="step_37-Thecodemaincodefunction"></a>
1957 * <h3>The <code>main</code> function</h3>
1958 *
1959
1960 *
1961 * Apart from the fact that we set up the MPI framework according to @ref step_40 "step-40",
1962 * there are no surprises in the main function.
1963 *
1964 * @code
1965 *   int main(int argc, char *argv[])
1966 *   {
1967 *   try
1968 *   {
1969 *   using namespace Step37;
1970 *  
1971 *   Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
1972 *  
1973 *   LaplaceProblem<dimension> laplace_problem;
1974 *   laplace_problem.run();
1975 *   }
1976 *   catch (std::exception &exc)
1977 *   {
1978 *   std::cerr << std::endl
1979 *   << std::endl
1980 *   << "----------------------------------------------------"
1981 *   << std::endl;
1982 *   std::cerr << "Exception on processing: " << std::endl
1983 *   << exc.what() << std::endl
1984 *   << "Aborting!" << std::endl
1985 *   << "----------------------------------------------------"
1986 *   << std::endl;
1987 *   return 1;
1988 *   }
1989 *   catch (...)
1990 *   {
1991 *   std::cerr << std::endl
1992 *   << std::endl
1993 *   << "----------------------------------------------------"
1994 *   << std::endl;
1995 *   std::cerr << "Unknown exception!" << std::endl
1996 *   << "Aborting!" << std::endl
1997 *   << "----------------------------------------------------"
1998 *   << std::endl;
1999 *   return 1;
2000 *   }
2001 *  
2002 *   return 0;
2003 *   }
2004 * @endcode
2005<a name="step_37-Results"></a><h1>Results</h1>
2006
2007
2008<a name="step_37-Programoutput"></a><h3>Program output</h3>
2009
2010
2011Since this example solves the same problem as @ref step_5 "step-5" (except for
2012a different coefficient), there is little to say about the
2013solution. We show a picture anyway, illustrating the size of the
2014solution through both isocontours and volume rendering:
2015
2016<img src="https://www.dealii.org/images/steps/developer/step-37.solution.png" alt="">
2017
2018Of more interest is to evaluate some aspects of the multigrid solver.
2019When we run this program in 2D for quadratic (@f$Q_2@f$) elements, we get the
2020following output (when run on one core in release mode):
2021@code
2022Vectorization over 2 doubles = 128 bits (SSE2)
2023Cycle 0
2024Number of degrees of freedom: 81
2025Total setup time (wall) 0.00159788s
2026Time solve (6 iterations) (CPU/wall) 0.000951s/0.000951052s
2027
2028Cycle 1
2029Number of degrees of freedom: 289
2030Total setup time (wall) 0.00114608s
2031Time solve (6 iterations) (CPU/wall) 0.000935s/0.000934839s
2032
2033Cycle 2
2034Number of degrees of freedom: 1089
2035Total setup time (wall) 0.00244665s
2036Time solve (6 iterations) (CPU/wall) 0.00207s/0.002069s
2037
2038Cycle 3
2039Number of degrees of freedom: 4225
2040Total setup time (wall) 0.00678205s
2041Time solve (6 iterations) (CPU/wall) 0.005616s/0.00561595s
2042
2043Cycle 4
2044Number of degrees of freedom: 16641
2045Total setup time (wall) 0.0241671s
2046Time solve (6 iterations) (CPU/wall) 0.019543s/0.0195441s
2047
2048Cycle 5
2049Number of degrees of freedom: 66049
2050Total setup time (wall) 0.0967851s
2051Time solve (6 iterations) (CPU/wall) 0.07457s/0.0745709s
2052
2053Cycle 6
2054Number of degrees of freedom: 263169
2055Total setup time (wall) 0.346374s
2056Time solve (6 iterations) (CPU/wall) 0.260042s/0.265033s
2057@endcode
2058
2059As in @ref step_16 "step-16", we see that the number of CG iterations remains constant with
2060increasing number of degrees of freedom. A constant number of iterations
2061(together with optimal computational properties) means that the computing time
2062approximately quadruples as the problem size quadruples from one cycle to the
2063next. The code is also very efficient in terms of storage. Around 2-4 million
2064degrees of freedom fit into 1 GB of memory, see also the MPI results below. An
2065interesting fact is that solving one linear system is cheaper than the setup,
2066despite not building a matrix (approximately half of which is spent in the
2067DoFHandler::distribute_dofs() and DoFHandler::distribute_mg_dofs()
2068calls). This shows the high efficiency of this approach, but also that the
2069deal.II data structures are quite expensive to set up and the setup cost must
2070be amortized over several system solves.
2071
2072Not much changes if we run the program in three spatial dimensions. Since we
2073use uniform mesh refinement, we get eight times as many elements and
2074approximately eight times as many degrees of freedom with each cycle:
2075
2076@code
2077Vectorization over 2 doubles = 128 bits (SSE2)
2078Cycle 0
2079Number of degrees of freedom: 125
2080Total setup time (wall) 0.00231099s
2081Time solve (6 iterations) (CPU/wall) 0.000692s/0.000922918s
2082
2083Cycle 1
2084Number of degrees of freedom: 729
2085Total setup time (wall) 0.00289083s
2086Time solve (6 iterations) (CPU/wall) 0.001534s/0.0024128s
2087
2088Cycle 2
2089Number of degrees of freedom: 4913
2090Total setup time (wall) 0.0143182s
2091Time solve (6 iterations) (CPU/wall) 0.010785s/0.0107841s
2092
2093Cycle 3
2094Number of degrees of freedom: 35937
2095Total setup time (wall) 0.087064s
2096Time solve (6 iterations) (CPU/wall) 0.063522s/0.06545s
2097
2098Cycle 4
2099Number of degrees of freedom: 274625
2100Total setup time (wall) 0.596306s
2101Time solve (6 iterations) (CPU/wall) 0.427757s/0.431765s
2102
2103Cycle 5
2104Number of degrees of freedom: 2146689
2105Total setup time (wall) 4.96491s
2106Time solve (6 iterations) (CPU/wall) 3.53126s/3.56142s
2107@endcode
2108
2109Since it is so easy, we look at what happens if we increase the polynomial
2110degree. When selecting the degree as four in 3D, i.e., on @f$\mathcal Q_4@f$
2111elements, by changing the line <code>const unsigned int
2112degree_finite_element=4;</code> at the top of the program, we get the
2113following program output:
2114
2115@code
2116Vectorization over 2 doubles = 128 bits (SSE2)
2117Cycle 0
2118Number of degrees of freedom: 729
2119Total setup time (wall) 0.00633097s
2120Time solve (6 iterations) (CPU/wall) 0.002829s/0.00379395s
2121
2122Cycle 1
2123Number of degrees of freedom: 4913
2124Total setup time (wall) 0.0174279s
2125Time solve (6 iterations) (CPU/wall) 0.012255s/0.012254s
2126
2127Cycle 2
2128Number of degrees of freedom: 35937
2129Total setup time (wall) 0.082655s
2130Time solve (6 iterations) (CPU/wall) 0.052362s/0.0523629s
2131
2132Cycle 3
2133Number of degrees of freedom: 274625
2134Total setup time (wall) 0.507943s
2135Time solve (6 iterations) (CPU/wall) 0.341811s/0.345788s
2136
2137Cycle 4
2138Number of degrees of freedom: 2146689
2139Total setup time (wall) 3.46251s
2140Time solve (7 iterations) (CPU/wall) 3.29638s/3.3265s
2141
2142Cycle 5
2143Number of degrees of freedom: 16974593
2144Total setup time (wall) 27.8989s
2145Time solve (7 iterations) (CPU/wall) 26.3705s/27.1077s
2146@endcode
2147
2148Since @f$\mathcal Q_4@f$ elements on a certain mesh correspond to @f$\mathcal Q_2@f$
2149elements on half the mesh size, we can compare the run time at cycle 4 with
2150fourth degree polynomials with cycle 5 using quadratic polynomials, both at
21512.1 million degrees of freedom. The surprising effect is that the solver for
2152@f$\mathcal Q_4@f$ element is actually slightly faster than for the quadratic
2153case, despite using one more linear iteration. The effect that higher-degree
2154polynomials are similarly fast or even faster than lower degree ones is one of
2155the main strengths of matrix-free operator evaluation through sum
2156factorization, see the <a
2157href="http://dx.doi.org/10.1016/j.compfluid.2012.04.012">matrix-free
2158paper</a>. This is fundamentally different to matrix-based methods that get
2159more expensive per unknown as the polynomial degree increases and the coupling
2160gets denser.
2161
2162In addition, also the setup gets a bit cheaper for higher order, which is
2163because fewer elements need to be set up.
2164
2165Finally, let us look at the timings with degree 8, which corresponds to
2166another round of mesh refinement in the lower order methods:
2167
2168@code
2169Vectorization over 2 doubles = 128 bits (SSE2)
2170Cycle 0
2171Number of degrees of freedom: 4913
2172Total setup time (wall) 0.0842004s
2173Time solve (8 iterations) (CPU/wall) 0.019296s/0.0192959s
2174
2175Cycle 1
2176Number of degrees of freedom: 35937
2177Total setup time (wall) 0.327048s
2178Time solve (8 iterations) (CPU/wall) 0.07517s/0.075999s
2179
2180Cycle 2
2181Number of degrees of freedom: 274625
2182Total setup time (wall) 2.12335s
2183Time solve (8 iterations) (CPU/wall) 0.448739s/0.453698s
2184
2185Cycle 3
2186Number of degrees of freedom: 2146689
2187Total setup time (wall) 16.1743s
2188Time solve (8 iterations) (CPU/wall) 3.95003s/3.97717s
2189
2190Cycle 4
2191Number of degrees of freedom: 16974593
2192Total setup time (wall) 130.8s
2193Time solve (8 iterations) (CPU/wall) 31.0316s/31.767s
2194@endcode
2195
2196Here, the initialization seems considerably slower than before, which is
2197mainly due to the computation of the diagonal of the matrix, which actually
2198computes a 729 x 729 matrix on each cell and throws away everything but the
2199diagonal. The solver times, however, are again very close to the quartic case,
2200showing that the linear increase with the polynomial degree that is
2201theoretically expected is almost completely offset by better computational
2202characteristics and the fact that higher order methods have a smaller share of
2203degrees of freedom living on several cells that add to the evaluation
2204complexity.
2205
2206<a name="step_37-Comparisonwithasparsematrix"></a><h3>Comparison with a sparse matrix</h3>
2207
2208
2209In order to understand the capabilities of the matrix-free implementation, we
2210compare the performance of the 3d example above with a sparse matrix
2211implementation based on TrilinosWrappers::SparseMatrix by measuring both the
2212computation times for the initialization of the problem (distribute DoFs,
2213setup and assemble matrices, setup multigrid structures) and the actual
2214solution for the matrix-free variant and the variant based on sparse
2215matrices. We base the preconditioner on float numbers and the actual matrix
2216and vectors on double numbers, as shown above. Tests are run on an Intel Core
2217i7-5500U notebook processor (two cores and <a
2218href="http://en.wikipedia.org/wiki/Advanced_Vector_Extensions">AVX</a>
2219support, i.e., four operations on doubles can be done with one CPU
2220instruction, which is heavily used in FEEvaluation), optimized mode, and two
2221MPI ranks.
2222
2223<table align="center" class="doxtable">
2224 <tr>
2225 <th>&nbsp;</th>
2226 <th colspan="2">Sparse matrix</th>
2227 <th colspan="2">Matrix-free implementation</th>
2228 </tr>
2229 <tr>
2230 <th>n_dofs</th>
2231 <th>Setup + assemble</th>
2232 <th>&nbsp;Solve&nbsp;</th>
2233 <th>Setup + assemble</th>
2234 <th>&nbsp;Solve&nbsp;</th>
2235 </tr>
2236 <tr>
2237 <td align="right">125</td>
2238 <td align="center">0.0042s</td>
2239 <td align="center">0.0012s</td>
2240 <td align="center">0.0022s</td>
2241 <td align="center">0.00095s</td>
2242 </tr>
2243 <tr>
2244 <td align="right">729</td>
2245 <td align="center">0.012s</td>
2246 <td align="center">0.0040s</td>
2247 <td align="center">0.0027s</td>
2248 <td align="center">0.0021s</td>
2249 </tr>
2250 <tr>
2251 <td align="right">4,913</td>
2252 <td align="center">0.082s</td>
2253 <td align="center">0.012s</td>
2254 <td align="center">0.011s</td>
2255 <td align="center">0.0057s</td>
2256 </tr>
2257 <tr>
2258 <td align="right">35,937</td>
2259 <td align="center">0.73s</td>
2260 <td align="center">0.13s</td>
2261 <td align="center">0.048s</td>
2262 <td align="center">0.040s</td>
2263 </tr>
2264 <tr>
2265 <td align="right">274,625</td>
2266 <td align="center">5.43s</td>
2267 <td align="center">1.01s</td>
2268 <td align="center">0.33s</td>
2269 <td align="center">0.25s</td>
2270 </tr>
2271 <tr>
2272 <td align="right">2,146,689</td>
2273 <td align="center">43.8s</td>
2274 <td align="center">8.24s</td>
2275 <td align="center">2.42s</td>
2276 <td align="center">2.06s</td>
2277 </tr>
2278</table>
2279
2280The table clearly shows that the matrix-free implementation is more than twice
2281as fast for the solver, and more than six times as fast when it comes to
2282initialization costs. As the problem size is made a factor 8 larger, we note
2283that the times usually go up by a factor eight, too (as the solver iterations
2284are constant at six). The main deviation is in the sparse matrix between 5k
2285and 36k degrees of freedom, where the time increases by a factor 12. This is
2286the threshold where the (L3) cache in the processor can no longer hold all
2287data necessary for the matrix-vector products and all matrix elements must be
2288fetched from main memory.
2289
2290Of course, this picture does not necessarily translate to all cases, as there
2291are problems where knowledge of matrix entries enables much better solvers (as
2292happens when the coefficient is varying more strongly than in the above
2293example). Moreover, it also depends on the computer system. The present system
2294has good memory performance, so sparse matrices perform comparably
2295well. Nonetheless, the matrix-free implementation gives a nice speedup already
2296for the <i>Q</i><sub>2</sub> elements used in this example. This becomes
2297particularly apparent for time-dependent or nonlinear problems where sparse
2298matrices would need to be reassembled over and over again, which becomes much
2299easier with this class. And of course, thanks to the better complexity of the
2300products, the method gains increasingly larger advantages when the order of the
2301elements increases (the matrix-free implementation has costs
23024<i>d</i><sup>2</sup><i>p</i> per degree of freedom, compared to
23032<i>p<sup>d</sup></i> for the sparse matrix, so it will win anyway for order 4
2304and higher in 3d).
2305
2306<a name="step_37-ResultsforlargescaleparallelcomputationsonSuperMUC"></a><h3> Results for large-scale parallel computations on SuperMUC</h3>
2307
2308
2309As explained in the introduction and the in-code comments, this program can be
2310run in parallel with MPI. It turns out that geometric multigrid schemes work
2311really well and can scale to very large machines. To the authors' knowledge,
2312the geometric multigrid results shown here are the largest computations done
2313with deal.II as of late 2016, run on up to 147,456 cores of the <a
2314href="https://www.lrz.de/services/compute/supermuc/systemdescription/">complete
2315SuperMUC Phase 1</a>. The ingredients for scalability beyond 1000 cores are
2316that no data structure that depends on the global problem size is held in its
2317entirety on a single processor and that the communication is not too frequent
2318in order not to run into latency issues of the network. For PDEs solved with
2319iterative solvers, the communication latency is often the limiting factor,
2320rather than the throughput of the network. For the example of the SuperMUC
2321system, the point-to-point latency between two processors is between 1e-6 and
23221e-5 seconds, depending on the proximity in the MPI network. The matrix-vector
2323products with @p LaplaceOperator from this class involves several
2324point-to-point communication steps, interleaved with computations on each
2325core. The resulting latency of a matrix-vector product is around 1e-4
2326seconds. Global communication, for example an @p MPI_Allreduce operation that
2327accumulates the sum of a single number per rank over all ranks in the MPI
2328network, has a latency of 1e-4 seconds. The multigrid V-cycle used in this
2329program is also a form of global communication. Think about the coarse grid
2330solve that happens on a single processor: It accumulates the contributions
2331from all processors before it starts. When completed, the coarse grid solution
2332is transferred to finer levels, where more and more processors help in
2333smoothing until the fine grid. Essentially, this is a tree-like pattern over
2334the processors in the network and controlled by the mesh. As opposed to the
2335@p MPI_Allreduce operations where the tree in the reduction is optimized to the
2336actual links in the MPI network, the multigrid V-cycle does this according to
2337the partitioning of the mesh. Thus, we cannot expect the same
2338optimality. Furthermore, the multigrid cycle is not simply a walk up and down
2339the refinement tree, but also communication on each level when doing the
2340smoothing. In other words, the global communication in multigrid is more
2341challenging and related to the mesh that provides less optimization
2342opportunities. The measured latency of the V-cycle is between 6e-3 and 2e-2
2343seconds, i.e., the same as 60 to 200 MPI_Allreduce operations.
2344
2345The following figure shows a scaling experiments on @f$\mathcal Q_3@f$
2346elements. Along the lines, the problem size is held constant as the number of
2347cores is increasing. When doubling the number of cores, one expects a halving
2348of the computational time, indicated by the dotted gray lines. The results
2349show that the implementation shows almost ideal behavior until an absolute
2350time of around 0.1 seconds is reached. The solver tolerances have been set
2351such that the solver performs five iterations. This way of plotting data is
2352the <b>strong scaling</b> of the algorithm. As we go to very large core
2353counts, the curves flatten out a bit earlier, which is because of the
2354communication network in SuperMUC where communication between processors
2355farther away is slightly slower.
2356
2357<img src="https://www.dealii.org/images/steps/developer/step-37.scaling_strong.png" alt="">
2358
2359In addition, the plot also contains results for <b>weak scaling</b> that lists
2360how the algorithm behaves as both the number of processor cores and elements
2361is increased at the same pace. In this situation, we expect that the compute
2362time remains constant. Algorithmically, the number of CG iterations is
2363constant at 5, so we are good from that end. The lines in the plot are
2364arranged such that the top left point in each data series represents the same
2365size per processor, namely 131,072 elements (or approximately 3.5 million
2366degrees of freedom per core). The gray lines indicating ideal strong scaling
2367are by the same factor of 8 apart. The results show again that the scaling is
2368almost ideal. The parallel efficiency when going from 288 cores to 147,456
2369cores is at around 75% for a local problem size of 750,000 degrees of freedom
2370per core which takes 1.0s on 288 cores, 1.03s on 2304 cores, 1.19s on 18k
2371cores, and 1.35s on 147k cores. The algorithms also reach a very high
2372utilization of the processor. The largest computation on 147k cores reaches
2373around 1.7 PFLOPs/s on SuperMUC out of an arithmetic peak of 3.2 PFLOPs/s. For
2374an iterative PDE solver, this is a very high number and significantly more is
2375often only reached for dense linear algebra. Sparse linear algebra is limited
2376to a tenth of this value.
2377
2378As mentioned in the introduction, the matrix-free method reduces the memory
2379consumption of the data structures. Besides the higher performance due to less
2380memory transfer, the algorithms also allow for very large problems to fit into
2381memory. The figure below shows the computational time as we increase the
2382problem size until an upper limit where the computation exhausts memory. We do
2383this for 1k cores, 8k cores, and 65k cores and see that the problem size can
2384be varied over almost two orders of magnitude with ideal scaling. The largest
2385computation shown in this picture involves 292 billion (@f$2.92 \cdot 10^{11}@f$)
2386degrees of freedom. On a DG computation of 147k cores, the above algorithms
2387were also run involving up to 549 billion (2^39) DoFs.
2388
2389<img src="https://www.dealii.org/images/steps/developer/step-37.scaling_size.png" alt="">
2390
2391Finally, we note that while performing the tests on the large-scale system
2392shown above, improvements of the multigrid algorithms in deal.II have been
2393developed. The original version contained the sub-optimal code based on
2394MGSmootherPrecondition where some MPI_Allreduce commands (checking whether
2395all vector entries are zero) were done on each smoothing
2396operation on each level, which only became apparent on 65k cores and
2397more. However, the following picture shows that the improvement already pay
2398off on a smaller scale, here shown on computations on up to 14,336 cores for
2399@f$\mathcal Q_5@f$ elements:
2400
2401<img src="https://www.dealii.org/images/steps/developer/step-37.scaling_oldnew.png" alt="">
2402
2403
2404<a name="step_37-Adaptivity"></a><h3> Adaptivity</h3>
2405
2406
2407As explained in the code, the algorithm presented here is prepared to run on
2408adaptively refined meshes. If only part of the mesh is refined, the multigrid
2409cycle will run with local smoothing and impose Dirichlet conditions along the
2410interfaces which differ in refinement level for smoothing through the
2411MatrixFreeOperators::Base class. Due to the way the degrees of freedom are
2412distributed over levels, relating the owner of the level cells to the owner of
2413the first descendant active cell, there can be an imbalance between different
2414processors in MPI, which limits scalability by a factor of around two to five.
2415
2416<a name="step_37-Possibilitiesforextensions"></a><h3> Possibilities for extensions</h3>
2417
2418
2419<a name="step_37-Kellyerrorestimator"></a><h4> Kelly error estimator </h4>
2420
2421
2422As mentioned above the code is ready for locally adaptive h-refinement.
2423For the Poisson equation one can employ the Kelly error indicator,
2424implemented in the KellyErrorEstimator class. However one needs to be careful
2425with the ghost indices of parallel vectors.
2426In order to evaluate the jump terms in the error indicator, each MPI process
2427needs to know locally relevant DoFs.
2428However MatrixFree::initialize_dof_vector() function initializes the vector only with
2429some locally relevant DoFs.
2430The ghost indices made available in the vector are a tight set of only those indices
2431that are touched in the cell integrals (including constraint resolution).
2432This choice has performance reasons, because sending all locally relevant degrees
2433of freedom would be too expensive compared to the matrix-vector product.
2434Consequently the solution vector as-is is
2435not suitable for the KellyErrorEstimator class.
2436The trick is to change the ghost part of the partition, for example using a
2437temporary vector and LinearAlgebra::distributed::Vector::copy_locally_owned_data_from()
2438as shown below.
2439
2440@code
2441const IndexSet locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler);
2442LinearAlgebra::distributed::Vector<double> copy_vec(solution);
2443solution.reinit(dof_handler.locally_owned_dofs(),
2444 locally_relevant_dofs,
2445 triangulation.get_mpi_communicator());
2446solution.copy_locally_owned_data_from(copy_vec);
2447constraints.distribute(solution);
2448solution.update_ghost_values();
2449@endcode
2450
2451<a name="step_37-Sharedmemoryparallelization"></a><h4> Shared-memory parallelization</h4>
2452
2453
2454This program is parallelized with MPI only. As an alternative, the MatrixFree
2455loop can also be issued in hybrid mode, for example by using MPI parallelizing
2456over the nodes of a cluster and with threads through Intel TBB within the
2457shared memory region of one node. To use this, one would need to both set the
2458number of threads in the MPI_InitFinalize data structure in the main function,
2459and set the MatrixFree::AdditionalData::tasks_parallel_scheme to
2460partition_color to actually do the loop in parallel. This use case is
2461discussed in @ref step_48 "step-48".
2462
2463<a name="step_37-InhomogeneousDirichletboundaryconditions"></a><h4> Inhomogeneous Dirichlet boundary conditions </h4>
2464
2465
2466The presented program assumes homogeneous Dirichlet boundary conditions. When
2467going to non-homogeneous conditions, the situation is a bit more intricate. To
2468understand how to implement such a setting, let us first recall how these
2469arise in the mathematical formulation and how they are implemented in a
2470matrix-based variant. In essence, an inhomogeneous Dirichlet condition sets
2471some of the nodal values in the solution to given values rather than
2472determining them through the variational principles,
2473@f{eqnarray*}{
2474u_h(\mathbf{x}) = \sum_{i\in \mathcal N} \varphi_i(\mathbf{x}) u_i =
2475\sum_{i\in \mathcal N \setminus \mathcal N_D} \varphi_i(\mathbf{x}) u_i +
2476\sum_{i\in \mathcal N_D} \varphi_i(\mathbf{x}) g_i,
2477@f}
2478where @f$u_i@f$ denotes the nodal values of the solution and @f$\mathcal N@f$ denotes
2479the set of all nodes. The set @f$\mathcal N_D\subset \mathcal N@f$ is the subset
2480of the nodes that are subject to Dirichlet boundary conditions where the
2481solution is forced to equal @f$u_i = g_i = g(\mathbf{x}_i)@f$ as the interpolation
2482of boundary values on the Dirichlet-constrained node points @f$i\in \mathcal
2483N_D@f$. We then insert this solution
2484representation into the weak form, e.g. the Laplacian shown above, and move
2485the known quantities to the right hand side:
2486@f{eqnarray*}{
2487(\nabla \varphi_i, \nabla u_h)_\Omega &=& (\varphi_i, f)_\Omega \quad \Rightarrow \\
2488\sum_{j\in \mathcal N \setminus \mathcal N_D}(\nabla \varphi_i,\nabla \varphi_j)_\Omega \, u_j &=&
2489(\varphi_i, f)_\Omega
2490-\sum_{j\in \mathcal N_D} (\nabla \varphi_i,\nabla\varphi_j)_\Omega\, g_j.
2491@f}
2492In this formula, the equations are tested for all basis functions @f$\varphi_i@f$
2493with @f$i\in N \setminus \mathcal N_D@f$ that are not related to the nodes
2494constrained by Dirichlet conditions.
2495
2496In the implementation in deal.II, the integrals @f$(\nabla \varphi_i,\nabla \varphi_j)_\Omega@f$
2497on the right hand side are already contained in the local matrix contributions
2498we assemble on each cell. When using
2500@ref step_6 "step-6" and @ref step_7 "step-7" tutorial programs, we can account for the contribution of
2501inhomogeneous constraints <i>j</i> by multiplying the columns <i>j</i> and
2502rows <i>i</i> of the local matrix according to the integrals @f$(\varphi_i,
2503\varphi_j)_\Omega@f$ by the inhomogeneities and subtracting the resulting from
2504the position <i>i</i> in the global right-hand-side vector, see also the
2505@ref constraints topic. In essence, we use some of the integrals that get
2506eliminated from the left hand side of the equation to finalize the right hand
2507side contribution. Similar mathematics are also involved when first writing
2508all entries into a left hand side matrix and then eliminating matrix rows and
2510
2511In principle, the components that belong to the constrained degrees of freedom
2512could be eliminated from the linear system because they do not carry any
2513information. In practice, in deal.II we always keep the size of the linear
2514system the same to avoid handling two different numbering systems and avoid
2515confusion about the two different index sets. In order to ensure that the
2516linear system does not get singular when not adding anything to constrained
2517rows, we then add dummy entries to the matrix diagonal that are otherwise
2518unrelated to the real entries.
2519
2520In a matrix-free method, we need to take a different approach, since the @p
2521LaplaceOperator class represents the matrix-vector product of a
2522<b>homogeneous</b> operator (the left-hand side of the last formula). It does
2523not matter whether the AffineConstraints object passed to the
2524MatrixFree::reinit() contains inhomogeneous constraints or not, the
2525MatrixFree::cell_loop() call will only resolve the homogeneous part of the
2526constraints as long as it represents a <b>linear</b> operator.
2527
2528In our matrix-free code, the right hand side computation where the
2529contribution of inhomogeneous conditions ends up is completely decoupled from
2530the matrix operator and handled by a different function above. Thus, we need
2531to explicitly generate the data that enters the right hand side rather than
2532using a byproduct of the matrix assembly. Since we already know how to apply
2533the operator on a vector, we could try to use those facilities for a vector
2534where we only set the Dirichlet values:
2535@code
2536 // interpolate boundary values on vector solution
2537 std::map<types::global_dof_index, double> boundary_values;
2539 dof_handler,
2540 0,
2541 BoundaryValueFunction<dim>(),
2542 boundary_values);
2543 for (const std::pair<const types::global_dof_index, double> &pair : boundary_values)
2544 if (solution.locally_owned_elements().is_element(pair.first))
2545 solution(pair.first) = pair.second;
2546@endcode
2547or, equivalently, if we already had filled the inhomogeneous constraints into
2548an AffineConstraints object,
2549@code
2550 solution = 0;
2551 constraints.distribute(solution);
2552@endcode
2553
2554We could then pass the vector @p solution to the @p
2555LaplaceOperator::vmult_add() function and add the new contribution to the @p
2556system_rhs vector that gets filled in the @p LaplaceProblem::assemble_rhs()
2557function. However, this idea does not work because the
2558FEEvaluation::read_dof_values() call used inside the vmult() functions assumes
2559homogeneous values on all constraints (otherwise the operator would not be a
2560linear operator but an affine one). To also retrieve the values of the
2561inhomogeneities, we could select one of two following strategies.
2562
2563<a name="step_37-UseFEEvaluationread_dof_values_plaintoavoidresolvingconstraints"></a><h5> Use FEEvaluation::read_dof_values_plain() to avoid resolving constraints </h5>
2564
2565
2566The class FEEvaluation has a facility that addresses precisely this
2567requirement: For non-homogeneous Dirichlet values, we do want to skip the
2568implicit imposition of homogeneous (Dirichlet) constraints upon reading the
2569data from the vector @p solution. For example, we could extend the @p
2570LaplaceProblem::assemble_rhs() function to deal with inhomogeneous Dirichlet
2571values as follows, assuming the Dirichlet values have been interpolated into
2572the object @p constraints:
2573@code
2574template <int dim>
2575void LaplaceProblem<dim>::assemble_rhs()
2576{
2577 solution = 0;
2578 constraints.distribute(solution);
2579 solution.update_ghost_values();
2580 system_rhs = 0;
2581
2582 const Table<2, VectorizedArray<double>> &coefficient = system_matrix.get_coefficient();
2583 FEEvaluation<dim, degree_finite_element> phi(*system_matrix.get_matrix_free());
2584 for (unsigned int cell = 0;
2585 cell < system_matrix.get_matrix_free()->n_cell_batches();
2586 ++cell)
2587 {
2588 phi.reinit(cell);
2589 phi.read_dof_values_plain(solution);
2590 phi.evaluate(EvaluationFlags::gradients);
2591 for (const unsigned int q : phi.quadrature_point_indices())
2592 {
2593 phi.submit_gradient(-coefficient(cell, q) * phi.get_gradient(q), q);
2594 phi.submit_value(make_vectorized_array<double>(1.0), q);
2595 }
2597 phi.distribute_local_to_global(system_rhs);
2598 }
2599 system_rhs.compress(VectorOperation::add);
2600}
2601@endcode
2602
2603In this code, we replaced the FEEvaluation::read_dof_values() function for the
2604tentative solution vector by FEEvaluation::read_dof_values_plain() that
2605ignores all constraints. Due to this setup, we must make sure that other
2606constraints, e.g. by hanging nodes, are correctly distributed to the input
2607vector already as they are not resolved as in
2608FEEvaluation::read_dof_values_plain(). Inside the loop, we then evaluate the
2609Laplacian and repeat the second derivative call with
2610FEEvaluation::submit_gradient() from the @p LaplaceOperator class, but with the
2611sign switched since we wanted to subtract the contribution of Dirichlet
2612conditions on the right hand side vector according to the formula above. When
2613we invoke the FEEvaluation::integrate() call, we then set both arguments
2614regarding the value slot and first derivative slot to true to account for both
2615terms added in the loop over quadrature points. Once the right hand side is
2616assembled, we then go on to solving the linear system for the homogeneous
2617problem, say involving a variable @p solution_update. After solving, we can
2618add @p solution_update to the @p solution vector that contains the final
2619(inhomogeneous) solution.
2620
2621Note that the negative sign for the Laplacian alongside with a positive sign
2622for the forcing that we needed to build the right hand side is a more general
2623concept: We have implemented nothing else than Newton's method for nonlinear
2624equations, but applied to a linear system. We have used an initial guess for
2625the variable @p solution in terms of the Dirichlet boundary conditions and
2626computed a residual @f$r = f - Au_0@f$. The linear system was then solved as
2627@f$\Delta u = A^{-1} (f-Au)@f$ and we finally computed @f$u = u_0 + \Delta u@f$. For a
2628linear system, we obviously reach the exact solution after a single
2629iteration. If we wanted to extend the code to a nonlinear problem, we would
2630rename the @p assemble_rhs() function into a more descriptive name like @p
2631assemble_residual() that computes the (weak) form of the residual, whereas the
2632@p LaplaceOperator::apply_add() function would get the linearization of the
2633residual with respect to the solution variable.
2634
2635<a name="step_37-UseLaplaceOperatorwithasecondAffineConstraintsobjectwithoutDirichletconditions"></a><h5> Use LaplaceOperator with a second AffineConstraints object without Dirichlet conditions </h5>
2636
2637
2638A second alternative to get the right hand side that re-uses the @p
2639LaplaceOperator::apply_add() function is to instead add a second LaplaceOperator
2640that skips Dirichlet constraints. To do this, we initialize a second MatrixFree
2641object which does not have any boundary value constraints. This @p matrix_free
2642object is then passed to a @p LaplaceOperator class instance @p
2643inhomogeneous_operator that is only used to create the right hand side:
2644@code
2645template <int dim>
2646void LaplaceProblem<dim>::assemble_rhs()
2647{
2648 system_rhs = 0;
2649 AffineConstraints<double> no_constraints;
2650 no_constraints.close();
2651 LaplaceOperator<dim, degree_finite_element, double> inhomogeneous_operator;
2652
2653 typename MatrixFree<dim, double>::AdditionalData additional_data;
2654 additional_data.mapping_update_flags =
2656 std::shared_ptr<MatrixFree<dim, double>> matrix_free(
2658 matrix_free->reinit(dof_handler,
2659 no_constraints,
2660 QGauss<1>(fe.degree + 1),
2661 additional_data);
2662 inhomogeneous_operator.initialize(matrix_free);
2663
2664 solution = 0.0;
2665 constraints.distribute(solution);
2666 inhomogeneous_operator.evaluate_coefficient(Coefficient<dim>());
2667 inhomogeneous_operator.vmult(system_rhs, solution);
2668 system_rhs *= -1.0;
2669
2671 *inhomogeneous_operator.get_matrix_free());
2672 for (unsigned int cell = 0;
2673 cell < inhomogeneous_operator.get_matrix_free()->n_cell_batches();
2674 ++cell)
2675 {
2676 phi.reinit(cell);
2677 for (const unsigned int q : phi.quadrature_point_indices())
2678 phi.submit_value(make_vectorized_array<double>(1.0), q);
2679 phi.integrate(EvaluationFlags::values);
2680 phi.distribute_local_to_global(system_rhs);
2681 }
2682 system_rhs.compress(VectorOperation::add);
2683}
2684@endcode
2685
2686A more sophisticated implementation of this technique could reuse the original
2687MatrixFree object. This can be done by initializing the MatrixFree object with
2688multiple blocks, where each block corresponds to a different AffineConstraints
2689object. Doing this would require making substantial modifications to the
2690LaplaceOperator class, but the MatrixFreeOperators::LaplaceOperator class that
2691comes with the library can do this. See the discussion on blocks in
2692MatrixFreeOperators::Base for more information on how to set up blocks.
2693
2694<a name="step_37-Furtherperformanceimprovements"></a><h4> Further performance improvements </h4>
2695
2696
2697While the performance achieved in this tutorial program is already very good,
2698there is functionality in deal.II to further improve the performance. On the
2699one hand, increasing the polynomial degree to three or four will further
2700improve the time per unknown. (Even higher degrees typically get slower again,
2701because the multigrid iteration counts increase slightly with the chosen
2702simple smoother. One could then use hybrid multigrid algorithms to use
2703polynomial coarsening through MGTransferGlobalCoarsening, to reduce the impact
2704of the coarser level on the communication latency.) A more significant
2705improvement can be obtained by data-locality optimizations. The class
2706PreconditionChebyshev, when combined with a `DiagonalMatrix` inner
2707preconditioner as in the present class, can overlap the vector operations with
2708the matrix-vector product. As the former are typically constrained by memory
2709bandwidth, reducing the number of loads helps to achieve this goal. The two
2710ingredients to achieve this are
2711<ol>
2712<li> to provide LaplaceOperator class of this tutorial program with a `vmult`
2713function that takes two `std::function` objects, which can be passed on to
2714MatrixFree::cell_loop with the respective signature (PreconditionChebyshev
2715will then pick up this interface and schedule its vector operations), and </li>
2716<li> to compute a numbering that optimizes for data locality, as provided by
2718</ol>
2719 *
2720 *
2721<a name="step_37-PlainProg"></a>
2722<h1> The plain program</h1>
2723@include "step-37.cc"
2724*/
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void read_dof_values(const VectorType &src, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip())
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
void integrate(const EvaluationFlags::EvaluationFlags integration_flag)
Definition fe_q.h:554
void reinit(const size_type size, const bool omit_zeroing_entries=false)
void initialize(const MGSmootherBase< VectorType > &coarse_smooth)
void resize(const unsigned int new_minlevel, const unsigned int new_maxlevel, Args &&...args)
void set_constrained_entries_to_one(VectorType &dst) const
Definition operators.h:1443
void vmult_interface_down(VectorType &dst, const VectorType &src) const
Definition operators.h:1623
std::shared_ptr< DiagonalMatrix< VectorType > > inverse_diagonal_entries
Definition operators.h:447
unsigned int n_cell_batches() const
void initialize_dof_vector(VectorType &vec, const unsigned int dof_handler_index=0) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
Definition point.h:111
Definition timer.h:117
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void matrix_free_data_locality(DoFHandler< dim, spacedim > &dof_handler, const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free)
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_locally_relevant_level_dofs(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
void compute_diagonal(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, VectorType &diagonal_global, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &cell_operation, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0, const unsigned int first_vector_component=0)
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
std::string compress(const std::string &input)
Definition utilities.cc:389
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14882
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition mg.h:81
static const unsigned int invalid_unsigned_int
Definition types.h:220
STL namespace.
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DataOutBase::CompressionLevel compression_level
TasksParallelScheme tasks_parallel_scheme
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)