deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-37.h
Go to the documentation of this file.
1) const
757 *   {
758 *   return 1. / (0.05 + 2. * p.square());
759 *   }
760 *  
761 *  
762 *  
763 *   template <int dim>
764 *   double Coefficient<dim>::value(const Point<dim> &p,
765 *   const unsigned int component) const
766 *   {
767 *   return value<double>(p, component);
768 *   }
769 *  
770 *  
771 * @endcode
772 *
773 *
774 * <a name="step_37-Matrixfreeimplementation"></a>
775 * <h3>Matrix-free implementation</h3>
776 *
777
778 *
779 * The following class, called <code>LaplaceOperator</code>, implements the
780 * differential operator. For all practical purposes, it is a matrix, i.e.,
781 * you can ask it for its size (member functions <code>m(), n()</code>) and
782 * you can apply it to a vector (the <code>vmult()</code> function). The
783 * difference to a real matrix of course lies in the fact that this class
784 * does not actually store the <i>elements</i> of the matrix, but only knows
785 * how to compute the action of the operator when applied to a vector.
786 *
787
788 *
790 * MatrixFree object, and the various interfaces to matrix-vector products
791 * through vmult() and Tvmult() methods, is provided by the class
793 * LaplaceOperator class defined here only has to provide a few interfaces,
794 * namely the actual action of the operator through the apply_add() method
795 * that gets used in the vmult() functions, and a method to compute the
796 * diagonal entries of the underlying matrix. We need the diagonal for the
798 * variable coefficient, we further implement a method that can fill the
799 * coefficient values.
800 *
801
802 *
803 * Note that the file <code>include/deal.II/matrix_free/operators.h</code>
805 * MatrixFreeOperators::LaplaceOperator. For educational purposes, the
808 *
809
810 *
811 * This program makes use of the data cache for finite element operator
814 * relations between local and global degrees of freedom. It also contains
815 * constraints like the ones from hanging nodes or Dirichlet boundary
816 * conditions. Moreover, it can issue a loop over all cells in %parallel,
817 * making sure that only cells are worked on that do not share any degree of
819 * vectors). This is a more advanced strategy compared to the WorkStream
820 * class described in the @ref threads topic. Of course, to not destroy
821 * thread-safety, we have to be careful when writing into class-global
822 * structures.
823 *
824
825 *
826 * The class implementing the Laplace operator has three template arguments,
827 * one for the dimension (as many deal.II classes carry), one for the degree
830 * type. We want to use <code>double</code> numbers (i.e., double precision,
831 * 64-bit floating point) for the final matrix, but floats (single
832 * precision, 32-bit floating point numbers) for the multigrid level
833 * matrices (as that is only a preconditioner, and floats can be processed
835 * the number of quadrature points in one dimension. In the code below, we
836 * hard-code it to <code>fe_degree+1</code>. If we wanted to change it
837 * independently of the polynomial degree, we would need to add a template
838 * parameter as is done in the MatrixFreeOperators::LaplaceOperator class.
839 *
840
841 *
843 * grid and degrees of freedom (like a @ref GlossMassMatrix "mass matrix" and a Laplace matrix), we
846 * refer to the same MatrixFree data cache from the general problem
848 * only provide a minimal set of functions. This concept allows for writing
849 * complex application codes with many matrix-free operations.
850 *
851
852 *
853 * @note Storing values of type <code>VectorizedArray<number></code>
856 * <code>std::vector<VectorizedArray<number> ></code> is not possible with
859 * long in case of AVX needs to start at a memory address that is divisible
864 *
865 * @code
866 *   template <int dim, int fe_degree, typename number>
867 *   class LaplaceOperator
869 *   Base<dim, LinearAlgebra::distributed::Vector<number>>
870 *   {
871 *   public:
872 *   using value_type = number;
873 *  
874 *   LaplaceOperator();
875 *  
876 *   void clear() override;
877 *  
879 *  
880 *   virtual void compute_diagonal() override;
881 *  
882 *   private:
883 *   virtual void apply_add(
885 *   const LinearAlgebra::distributed::Vector<number> &src) const override;
886 *  
887 *   void
891 *   const std::pair<unsigned int, unsigned int> &cell_range) const;
892 *  
895 *  
897 *   };
898 *  
899 *  
900 *  
901 * @endcode
902 *
903 * This is the constructor of the @p LaplaceOperator class. All it does is
904 * to call the default constructor of the base class
907 * not accessed after going out of scope e.g. in a preconditioner.
908 *
909 * @code
910 *   template <int dim, int fe_degree, typename number>
911 *   LaplaceOperator<dim, fe_degree, number>::LaplaceOperator()
914 *   {}
915 *  
916 *  
917 *  
918 *   template <int dim, int fe_degree, typename number>
919 *   void LaplaceOperator<dim, fe_degree, number>::clear()
920 *   {
921 *   coefficient.reinit(0, 0);
923 *   clear();
924 *   }
925 *  
926 *  
927 *  
928 * @endcode
929 *
930 *
931 * <a name="step_37-Computationofcoefficient"></a>
933 *
934
935 *
936 * To initialize the coefficient, we directly give it the Coefficient class
937 * defined above and then select the method
939 * compiler can deduce from the point data type). The use of the
940 * FEEvaluation class (and its template arguments) will be explained below.
941 *
942 * @code
944 *   void LaplaceOperator<dim, fe_degree, number>::evaluate_coefficient(
946 *   {
947 *   const unsigned int n_cells = this->data->n_cell_batches();
949 *  
950 *   coefficient.reinit(n_cells, phi.n_q_points);
951 *   for (unsigned int cell = 0; cell < n_cells; ++cell)
952 *   {
953 *   phi.reinit(cell);
954 *   for (const unsigned int q : phi.quadrature_point_indices())
955 *   coefficient(cell, q) =
956 *   coefficient_function.value(phi.quadrature_point(q));
957 *   }
958 *   }
959 *  
960 *  
961 *  
962 * @endcode
963 *
964 *
965 * <a name="step_37-LocalevaluationofLaplaceoperator"></a>
966 * <h4>Local evaluation of Laplace operator</h4>
967 *
968
969 *
970 * Here comes the main function of this class, the evaluation of the
971 * matrix-vector product (or, in general, a finite element operator
972 * evaluation). This is done in a function that takes exactly four
973 * arguments, the MatrixFree object, the destination and source vectors, and
974 * a range of cells that are to be worked on. The method
975 * <code>cell_loop</code> in the MatrixFree class will internally call this
976 * function with some range of cells that is obtained by checking which
977 * cells are possible to work on simultaneously so that write operations do
978 * not cause any race condition. Note that the cell range used in the loop
979 * is not directly the number of (active) cells in the current mesh, but
980 * rather a collection of batches of cells. In other word, "cell" may be
982 * cells together. This means that in the loop over quadrature points we are
983 * actually seeing a group of quadrature points of several cells as one
984 * block. This is done to enable a higher degree of vectorization. The
985 * number of such "cells" or "cell batches" is stored in MatrixFree and can
987 * cell iterators, in this class all cells are laid out in a plain array
989 * it possible to index the cells by unsigned integers.
990 *
991
992 *
993 * The implementation of the Laplace operator is quite simple: First, we
995 * kernels and has data fields to store temporary results (e.g. gradients
996 * evaluated on all quadrature points on a collection of a few cells). Note
997 * that temporary results do not use a lot of memory, and since we specify
998 * template arguments with the element order, the data is stored on the
1000 * set two template arguments, the dimension as a first argument and the
1001 * degree of the finite element as the second argument (this is equal to the
1002 * number of degrees of freedom per dimension minus one for FE_Q
1003 * elements). However, here we also want to be able to use float numbers for
1004 * the multigrid preconditioner, which is the last (fifth) template
1005 * argument. Therefore, we cannot rely on the default template arguments and
1007 * argument specifies the number of quadrature points per direction and has
1008 * a default value equal to the degree of the element plus one. The fourth
1009 * argument sets the number of components (one can also evaluate
1010 * vector-valued functions in systems of PDEs, but the default is a scalar
1011 * element), and finally the last argument sets the number type.
1012 *
1013
1014 *
1015 * Next, we loop over the given cell range and then we continue with the
1017 * cell we want to work on. <li>Read in the values of the source vectors
1018 * (@p read_dof_values), including the resolution of constraints. This
1020 * the unit-cell gradient (the evaluation of finite element
1022 * gradient computations, it uses a unified interface to all kinds of
1024 * values and no second derivatives, so we set the function arguments to
1025 * true in the gradient slot (second slot), and to false in the values slot
1027 * false by default, so it needs not be given. Note that the FEEvaluation
1028 * class internally evaluates shape functions in an efficient way where one
1029 * dimension is worked on at a time (using the tensor product form of shape
1030 * functions and quadrature points as mentioned in the introduction). This
1031 * gives complexity equal to @f$\mathcal O(d^2 (p+1)^{d+1})@f$ for polynomial
1032 * degree @f$p@f$ in @f$d@f$ dimensions, compared to the naive approach with loops
1033 * over all local degrees of freedom and quadrature points that is used in
1034 * FEValues and costs @f$\mathcal O(d (p+1)^{2d})@f$. <li>Next comes the
1036 * variable coefficient and the quadrature weight. FEEvaluation has an
1037 * access function @p get_gradient that applies the Jacobian and returns the
1038 * gradient in real space. Then, we just need to multiply by the (scalar)
1039 * coefficient, and let the function @p submit_gradient apply the second
1040 * Jacobian (for the test function) and the quadrature weight and Jacobian
1042 * data field as where it is read from in @p get_gradient. Therefore, you
1043 * need to make sure to not read from the same quadrature point again after
1044 * having called @p submit_gradient on that particular quadrature point. In
1045 * general, it is a good idea to copy the result of @p get_gradient when it
1047 * quadrature points for all test functions that corresponds to the actual
1048 * integration step. For the Laplace operator, we just multiply by the
1049 * gradient, so we call the integrate function with the respective argument
1050 * set. If you have an equation where you test by both the values of the
1051 * test functions and the gradients, both template arguments need to be set
1052 * to true. Calling first the integrate function for values and then
1053 * gradients in a separate call leads to wrong results, since the second
1054 * call will internally overwrite the results from the first call. Note that
1055 * there is no function argument for the second derivative for integrate
1056 * step. <li>Eventually, the local contributions in the vector
1058 * the result vector (and constraints are applied). This is done with a call
1059 * to @p distribute_local_to_global, the same name as the corresponding
1060 * function in the AffineConstraints (only that we now store the local vector
1061 * in the FEEvaluation object, as are the indices between local and global
1062 * degrees of freedom). </ol>
1063 *
1064 * @code
1065 *   template <int dim, int fe_degree, typename number>
1066 *   void LaplaceOperator<dim, fe_degree, number>::local_apply(
1070 *   const std::pair<unsigned int, unsigned int> &cell_range) const
1071 *   {
1073 *  
1074 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
1075 *   {
1076 *   AssertDimension(coefficient.size(0), data.n_cell_batches());
1077 *   AssertDimension(coefficient.size(1), phi.n_q_points);
1078 *  
1079 *   phi.reinit(cell);
1080 *   phi.read_dof_values(src);
1081 *   phi.evaluate(EvaluationFlags::gradients);
1082 *   for (const unsigned int q : phi.quadrature_point_indices())
1083 *   phi.submit_gradient(coefficient(cell, q) * phi.get_gradient(q), q);
1084 *   phi.integrate(EvaluationFlags::gradients);
1085 *   phi.distribute_local_to_global(dst);
1086 *   }
1087 *   }
1088 *  
1089 *  
1090 *  
1091 * @endcode
1092 *
1093 * This function implements the loop over all cells for the
1094 * Base::apply_add() interface. This is done with the @p cell_loop of the
1098 * the cell loop corresponds to the following three lines of code:
1099 *
1100
1101 *
1103 * @code
1104 * src.update_ghost_values();
1105 * local_apply(*this->data, dst, src, std::make_pair(0U,
1106 * data.n_cell_batches()));
1107 * dst.compress(VectorOperation::add);
1108 * @endcode
1109 * </div>
1110 *
1111
1112 *
1113 * Here, the two calls update_ghost_values() and compress() perform the data
1115 * where we need to read from entries owned by remote processors, and once
1117 * residuals that need to be added to the respective entry of the owner
1120 * one hand, it will split the update_ghost_values() and compress() calls in
1122 * local_apply function is then called with three cell ranges representing
1123 * partitions of the cell range from 0 to MatrixFree::n_cell_batches(). On
1124 * the other hand, cell_loop also supports thread parallelism in which case
1125 * the cell ranges are split into smaller chunks and scheduled in an
1126 * advanced way that avoids access to the same vector entry by several
1127 * threads. That feature is explained in @ref step_48 "step-48".
1128 *
1129
1130 *
1131 * Note that after the cell loop, the constrained degrees of freedom need to
1133 * loop automatically resolves constraints (just as the
1134 * AffineConstraints::distribute_local_to_global() call does), it does not
1135 * compute any contribution for constrained degrees of freedom, leaving the
1136 * respective entries zero. This would represent a matrix that had empty
1137 * rows and columns for constrained degrees of freedom. However, iterative
1138 * solvers like CG only work for non-singular matrices. The easiest way to
1139 * do that is to set the sub-block of the matrix that corresponds to
1141 * matrix would simply copy the elements of the right hand side vector into
1142 * the left hand side. Fortunately, the vmult() implementations
1144 * apply_add() function, so we do not need to take further action here.
1145 *
1146
1147 *
1151 * FEEvaluation are designed to access vectors in MPI-local index space also
1153 * that no index translation needs to be performed at the place the vector
1156 * access the locally owned range of a vector with indices between 0 and the
1157 * local size, the numbering is not so clear for the ghosted entries and
1158 * somewhat arbitrary. For the matrix-vector product, only the indices
1160 * constraints) are necessary. However, in deal.II we often set all the
1161 * degrees of freedom on ghosted elements as ghosted vector entries, called
1162 * the
1164 * In that case, the MPI-local index of a ghosted vector entry can in
1165 * general be different in the two possible ghost sets, despite referring
1166 * to the same global index. To avoid problems, FEEvaluation checks that
1167 * the partitioning of the vector used for the matrix-vector product does
1168 * indeed match with the partitioning of the indices in MatrixFree by a
1169 * check called
1170 * LinearAlgebra::distributed::Vector::partitioners_are_compatible. To
1173 * ghost region of the vector, so keep in mind that the ghost region might
1174 * be modified in both the destination and source vector after a call to a
1175 * vmult() method. This is legitimate because the ghost region of a
1176 * distributed deal.II vector is a mutable section and filled on
1177 * demand. Vectors used in matrix-vector products must not be ghosted upon
1178 * entry of vmult() functions, so no information gets lost.
1179 *
1180 * @code
1181 *   template <int dim, int fe_degree, typename number>
1182 *   void LaplaceOperator<dim, fe_degree, number>::apply_add(
1183 *   LinearAlgebra::distributed::Vector<number> &dst,
1184 *   const LinearAlgebra::distributed::Vector<number> &src) const
1185 *   {
1186 *   this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src);
1187 *   }
1188 *  
1189 *  
1190 *  
1191 * @endcode
1192 *
1194 * operator. Computing matrix entries of a matrix-free operator evaluation
1197 * operator by applying the operator on <i>all</i> unit vectors. Of course,
1203 * cell.
1204 *
1205
1206 *
1207 * We first initialize the diagonal vector to the correct parallel
1208 * layout. This vector is encapsulated in a member called
1209 * inverse_diagonal_entries of type DiagonalMatrix in the base class
1210 * MatrixFreeOperators::Base. This member is a shared pointer that we first
1211 * need to initialize and then get the vector representing the diagonal
1213 * manually write a cell_loop and invoke a local worker that applies all unit
1215 * to do this for us. Afterwards, we need to set the vector entries
1217 * boundary described by the AffineConstraints object inside MatrixFree or
1218 * the indices at the interface between different grid levels in adaptive
1219 * multigrid). This is done through the function
1221 * with the setting in the matrix-vector product provided by the Base
1222 * operator. Finally, we need to invert the diagonal entries which is the
1223 * form required by the Chebyshev smoother based on the Jacobi iteration. In
1224 * the loop, we assert that all entries are non-zero, because they should
1226 * constrained and treated by @p set_constrained_entries_to_one().
1227 *
1228 * @code
1229 *   template <int dim, int fe_degree, typename number>
1230 *   void LaplaceOperator<dim, fe_degree, number>::compute_diagonal()
1231 *   {
1232 *   this->inverse_diagonal_entries.reset(
1235 *   this->inverse_diagonal_entries->get_vector();
1236 *   this->data->initialize_dof_vector(inverse_diagonal);
1237 *  
1240 *   &LaplaceOperator::local_compute_diagonal,
1241 *   this);
1242 *  
1243 *   this->set_constrained_entries_to_one(inverse_diagonal);
1244 *  
1245 *   for (unsigned int i = 0; i < inverse_diagonal.locally_owned_size(); ++i)
1246 *   {
1247 *   Assert(inverse_diagonal.local_element(i) > 0.,
1248 *   ExcMessage("No diagonal entry in a positive definite operator "
1249 *   "should be zero"));
1250 *   inverse_diagonal.local_element(i) =
1251 *   1. / inverse_diagonal.local_element(i);
1252 *   }
1253 *   }
1254 *  
1255 *  
1256 *  
1257 * @endcode
1258 *
1259 * In the local compute loop, we compute the diagonal by a loop over all
1260 * columns in the local matrix and putting the entry 1 in the <i>i</i>th
1261 * slot and a zero entry in all other slots, i.e., we apply the cell-wise
1262 * differential operator on one unit vector at a time. The inner part
1263 * invoking FEEvaluation::evaluate(), the loop over quadrature points, and
1265 * function. Afterwards, we pick out the <i>i</i>th entry of the local
1266 * result and put it to a temporary storage (as we overwrite all entries in
1267 * the array behind FEEvaluation::get_dof_value() with the next loop
1270 * FEEvaluation::submit_dof_value() to read and write to the data field that
1272 * global vector on the other hand.
1273 *
1274
1275 *
1276 * Given that we are only interested in the matrix diagonal, we simply throw
1277 * away all other entries of the local matrix that have been computed along
1278 * the way. While it might seem wasteful to compute the complete cell matrix
1282 * O((p+1)^{d+1})@f$ for polynomial degree @f$k@f$, so computing the whole matrix
1283 * costs us @f$\mathcal O((p+1)^{2d+1})@f$ operations, not too far away from
1284 * @f$\mathcal O((p+1)^{2d})@f$ complexity for computing the diagonal with
1287 * function is actually the fastest (simple) variant. (It would be possible
1292 * path, they have not been implemented in deal.II.)
1293 *
1294
1295 *
1296 * Note that the code that calls distribute_local_to_global on the vector to
1297 * accumulate the diagonal entries into the global matrix has some
1298 * limitations. For operators with hanging node constraints that distribute
1300 * inside the distribute_local_to_global call, the vector interface used
1301 * here does not exactly compute the diagonal entries, but lumps some
1303 * up in a off-diagonal position of the global matrix to the diagonal. The
1305 * href="http://dx.doi.org/10.4208/cicp.101214.021015a">Kormann (2016),
1307 * no harm can happen because the diagonal is only used for the multigrid
1308 * level matrices where no hanging node constraints appear.
1309 *
1310 * @code
1311 *   template <int dim, int fe_degree, typename number>
1312 *   void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
1314 *   {
1315 *   const unsigned int cell = phi.get_current_cell_index();
1316 *  
1317 *   phi.evaluate(EvaluationFlags::gradients);
1318 *  
1319 *   for (const unsigned int q : phi.quadrature_point_indices())
1320 *   {
1321 *   phi.submit_gradient(coefficient(cell, q) * phi.get_gradient(q), q);
1322 *   }
1323 *   phi.integrate(EvaluationFlags::gradients);
1324 *   }
1325 *  
1326 *  
1327 *  
1328 * @endcode
1329 *
1330 *
1331 * <a name="step_37-LaplaceProblemclass"></a>
1332 * <h3>LaplaceProblem class</h3>
1333 *
1334
1335 *
1336 * This class is based on the one in @ref step_16 "step-16". However, we replaced the
1338 * that we can also skip the sparsity patterns. Notice that we define the
1339 * LaplaceOperator class with the degree of finite element as template
1340 * argument (the value is defined at the top of the file), and that we use
1341 * float numbers for the multigrid level matrices.
1342 *
1343
1344 *
1345 * The class also has a member variable to keep track of all the detailed
1347 * about solving the problem. In addition, there is an output stream (that
1348 * is disabled by default) that can be used to output details for the
1350 * out by default.
1351 *
1352
1353 *
1355 * usual @p pcout output stream that only prints the information of the
1356 * processor with MPI rank 0. The grid used for this programs can either be
1357 * a distributed triangulation based on p4est (in case deal.II is configured
1358 * to use p4est), otherwise it is a serial grid that only runs without MPI.
1359 *
1360 * @code
1361 *   template <int dim>
1362 *   class LaplaceProblem
1363 *   {
1364 *   public:
1365 *   LaplaceProblem();
1366 *   void run();
1367 *  
1368 *   private:
1369 *   void setup_system();
1370 *   void assemble_rhs();
1371 *   void solve();
1372 *   void output_results(const unsigned int cycle) const;
1373 *  
1376 *   #else
1378 *   #endif
1379 *  
1380 *   const FE_Q<dim> fe;
1381 *   DoFHandler<dim> dof_handler;
1382 *  
1383 *   const MappingQ1<dim> mapping;
1384 *  
1385 *   AffineConstraints<double> constraints;
1386 *   using SystemMatrixType =
1388 *   SystemMatrixType system_matrix;
1389 *  
1390 *   MGConstrainedDoFs mg_constrained_dofs;
1393 *  
1396 *  
1397 *   double setup_time;
1400 *   };
1401 *  
1402 *  
1403 *  
1404 * @endcode
1405 *
1406 * When we initialize the finite element, we of course have to use the
1407 * degree specified at the top of the file as well (otherwise, an exception
1409 * the templated LaplaceOperator class and the information from the finite
1410 * element read out by MatrixFree will not match). The constructor of the
1412 * conform to the 2:1 cell balance over vertices, which is needed for the
1413 * convergence of the geometric multigrid routines. For the distributed
1414 * grid, we also need to specifically enable the multigrid hierarchy.
1415 *
1416 * @code
1417 *   template <int dim>
1423 *   dim>::construct_multigrid_hierarchy)
1424 *   #else
1426 *   #endif
1428 *   , dof_handler(triangulation)
1429 *   , setup_time(0.)
1431 * @endcode
1432 *
1433 * The LaplaceProblem class holds an additional output stream that
1435 * time_details, is disabled by default through the @p false argument
1437 * prints all the details.
1438 *
1439 * @code
1440 *   , time_details(std::cout,
1441 *   false &&
1443 *   {}
1444 *  
1445 *  
1446 *  
1447 * @endcode
1448 *
1449 *
1450 * <a name="step_37-LaplaceProblemsetup_system"></a>
1452 *
1453
1454 *
1455 * The setup stage is in analogy to @ref step_16 "step-16" with relevant changes due to the
1456 * LaplaceOperator class. The first thing to do is to set up the DoFHandler,
1457 * including the degrees of freedom for the multigrid levels, and to
1458 * initialize constraints from hanging nodes and homogeneous Dirichlet
1460 * we need to make sure that the constraints get to know the locally
1461 * relevant degrees of freedom, otherwise the storage would explode when
1462 * using more than a few hundred millions of degrees of freedom, see
1463 * @ref step_40 "step-40".
1464 *
1465
1466 *
1467 * Once we have created the multigrid dof_handler and the constraints, we
1468 * can call the reinit function for the global matrix operator as well as
1469 * each level of the multigrid scheme. The main action is to set up the
1470 * <code> MatrixFree </code> instance for the problem. The base class of the
1471 * <code>LaplaceOperator</code> class, MatrixFreeOperators::Base, is
1472 * initialized with a shared pointer to MatrixFree object. This way, we can
1475 * the update flag in the AdditionalData field of MatrixFree that enables
1476 * the storage of quadrature point coordinates in real space (by default, it
1477 * only caches data for gradients (inverse transposed Jacobians) and JxW
1478 * values). Note that if we call the reinit function without specifying the
1479 * level (i.e., giving <code>level = numbers::invalid_unsigned_int</code>),
1481 * do not use threads in addition to MPI, which is why we explicitly disable
1484 * and vectors are initialized as explained above.
1485 *
1486 * @code
1487 *   template <int dim>
1489 *   {
1490 *   Timer time;
1491 *   setup_time = 0;
1492 *   {
1493 *   system_matrix.clear();
1494 *   mg_matrices.clear_elements();
1495 *  
1496 *   dof_handler.distribute_dofs(fe);
1497 *   dof_handler.distribute_mg_dofs();
1498 *  
1499 *   pcout << "Number of degrees of freedom: " << dof_handler.n_dofs()
1500 *   << std::endl;
1501 *  
1502 *   constraints.clear();
1503 *   constraints.reinit(dof_handler.locally_owned_dofs(),
1505 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
1507 *   mapping, dof_handler, 0, Functions::ZeroFunction<dim>(), constraints);
1508 *   constraints.close();
1509 *   }
1510 *   setup_time += time.wall_time();
1511 *   time_details << "Distribute DoFs & B.C. (CPU/wall) " << time.cpu_time()
1512 *   << "s/" << time.wall_time() << 's' << std::endl;
1513 *   time.restart();
1514 *   {
1515 *   {
1516 *   typename MatrixFree<dim, double>::AdditionalData additional_data;
1517 *   additional_data.tasks_parallel_scheme =
1519 *   additional_data.mapping_update_flags =
1521 *   std::shared_ptr<MatrixFree<dim, double>> system_mf_storage(
1522 *   new MatrixFree<dim, double>());
1523 *   system_mf_storage->reinit(mapping,
1524 *   dof_handler,
1525 *   constraints,
1526 *   QGauss<1>(fe.degree + 1),
1527 *   additional_data);
1528 *   system_matrix.initialize(system_mf_storage);
1529 *   }
1530 *  
1531 *   system_matrix.evaluate_coefficient(Coefficient<dim>());
1532 *  
1533 *   system_matrix.initialize_dof_vector(solution);
1534 *   system_matrix.initialize_dof_vector(system_rhs);
1535 *   }
1536 *   setup_time += time.wall_time();
1537 *   time_details << "Setup matrix-free system (CPU/wall) " << time.cpu_time()
1538 *   << "s/" << time.wall_time() << 's' << std::endl;
1539 *   time.restart();
1540 *  
1541 * @endcode
1542 *
1543 * Next, initialize the matrices for the multigrid method on all the
1545 * the indices subject to boundary conditions as well as the indices on
1546 * edges between different refinement levels as described in the @ref step_16 "step-16"
1548 * construct the constraints and matrices on each level. These follow
1551 * the levels rather than the active cells.
1552 *
1553 * @code
1554 *   {
1555 *   const unsigned int nlevels = triangulation.n_global_levels();
1556 *   mg_matrices.resize(0, nlevels - 1);
1557 *  
1558 *   const std::set<types::boundary_id> dirichlet_boundary_ids = {0};
1559 *   mg_constrained_dofs.initialize(dof_handler);
1560 *   mg_constrained_dofs.make_zero_boundary_constraints(
1561 *   dof_handler, dirichlet_boundary_ids);
1562 *  
1563 *   for (unsigned int level = 0; level < nlevels; ++level)
1564 *   {
1565 *   AffineConstraints<double> level_constraints(
1566 *   dof_handler.locally_owned_mg_dofs(level),
1568 *   for (const types::global_dof_index dof_index :
1569 *   mg_constrained_dofs.get_boundary_indices(level))
1570 *   level_constraints.constrain_dof_to_zero(dof_index);
1571 *   level_constraints.close();
1572 *  
1573 *   typename MatrixFree<dim, float>::AdditionalData additional_data;
1574 *   additional_data.tasks_parallel_scheme =
1576 *   additional_data.mapping_update_flags =
1578 *   additional_data.mg_level = level;
1579 *   std::shared_ptr<MatrixFree<dim, float>> mg_mf_storage_level =
1580 *   std::make_shared<MatrixFree<dim, float>>();
1581 *   mg_mf_storage_level->reinit(mapping,
1582 *   dof_handler,
1583 *   level_constraints,
1584 *   QGauss<1>(fe.degree + 1),
1585 *   additional_data);
1586 *  
1588 *   mg_constrained_dofs,
1589 *   level);
1590 *   mg_matrices[level].evaluate_coefficient(Coefficient<dim>());
1591 *   }
1592 *   }
1593 *   setup_time += time.wall_time();
1594 *   time_details << "Setup matrix-free levels (CPU/wall) " << time.cpu_time()
1595 *   << "s/" << time.wall_time() << 's' << std::endl;
1596 *   }
1597 *  
1598 *  
1599 *  
1600 * @endcode
1601 *
1602 *
1603 * <a name="step_37-LaplaceProblemassemble_rhs"></a>
1605 *
1606
1607 *
1608 * The assemble function is very simple since all we have to do is to
1609 * assemble the right hand side. Thanks to FEEvaluation and all the data
1610 * cached in the MatrixFree class, which we query from
1611 * MatrixFreeOperators::Base, this can be done in a few lines. Since this
1613 * alternative), we must not forget to call compress() at the end of the
1614 * assembly to send all the contributions of the right hand side to the
1615 * owner of the respective degree of freedom.
1616 *
1617 * @code
1618 *   template <int dim>
1619 *   void LaplaceProblem<dim>::assemble_rhs()
1620 *   {
1621 *   Timer time;
1622 *  
1623 *   system_rhs = 0;
1625 *   *system_matrix.get_matrix_free());
1626 *   for (unsigned int cell = 0;
1627 *   cell < system_matrix.get_matrix_free()->n_cell_batches();
1628 *   ++cell)
1629 *   {
1630 *   phi.reinit(cell);
1631 *   for (const unsigned int q : phi.quadrature_point_indices())
1632 *   phi.submit_value(make_vectorized_array<double>(1.0), q);
1633 *   phi.integrate(EvaluationFlags::values);
1634 *   phi.distribute_local_to_global(system_rhs);
1635 *   }
1637 *  
1638 *   setup_time += time.wall_time();
1639 *   time_details << "Assemble right hand side (CPU/wall) " << time.cpu_time()
1640 *   << "s/" << time.wall_time() << 's' << std::endl;
1641 *   }
1642 *  
1643 *  
1644 *  
1645 * @endcode
1646 *
1647 *
1648 * <a name="step_37-LaplaceProblemsolve"></a>
1649 * <h4>LaplaceProblem::solve</h4>
1650 *
1651
1652 *
1653 * The solution process is similar as in @ref step_16 "step-16". We start with the setup of
1655 * fast transfer class called MGTransferMatrixFree that does the
1658 *
1659 * @code
1660 *   template <int dim>
1662 *   {
1663 *   Timer time;
1664 *   MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
1665 *   mg_transfer.build(dof_handler);
1666 *   setup_time += time.wall_time();
1667 *   time_details << "MG build transfer time (CPU/wall) " << time.cpu_time()
1668 *   << "s/" << time.wall_time() << "s\n";
1669 *   time.restart();
1670 *  
1671 * @endcode
1672 *
1673 * As a smoother, this tutorial program uses a Chebyshev iteration instead
1674 * of SOR in @ref step_16 "step-16". (SOR would be very difficult to implement because we
1675 * do not have the matrix elements available explicitly, and it is
1677 * initialized with our level matrices and the mandatory additional data
1678 * for the Chebyshev smoother. We use a relatively high degree here (5),
1680 * out a range of @f$[1.2 \hat{\lambda}_{\max}/15,1.2 \hat{\lambda}_{\max}]@f$
1681 * in the smoother where @f$\hat{\lambda}_{\max}@f$ is an estimate of the
1682 * largest eigenvalue (the factor 1.2 is applied inside
1683 * PreconditionChebyshev). In order to compute that eigenvalue, the
1684 * Chebyshev initialization performs a few steps of a CG algorithm
1685 * without preconditioner. Since the highest eigenvalue is usually the
1686 * easiest one to find and a rough estimate is enough, we choose 10
1687 * iterations. Finally, we also set the inner preconditioner type in the
1688 * Chebyshev method which is a Jacobi iteration. This is represented by
1689 * the DiagonalMatrix class that gets the inverse diagonal entry provided
1690 * by our LaplaceOperator class.
1691 *
1692
1693 *
1695 * to use the Chebyshev iteration as a solver. PreconditionChebyshev
1696 * allows the user to switch to solver mode where the number of iterations
1698 * object, this setting is activated by choosing the polynomial degree to
1701 * matrix. The number of steps in the Chebyshev smoother are chosen such
1703 * residual by the number specified in the variable @p
1704 * smoothing_range. Note that for solving, @p smoothing_range is a
1705 * relative tolerance and chosen smaller than one, in this case, we select
1707 * only selected eigenvalues are smoothed.
1708 *
1709
1710 *
1711 * From a computational point of view, the Chebyshev iteration is a very
1712 * attractive coarse grid solver as long as the coarse size is
1713 * moderate. This is because the Chebyshev method performs only
1718 * in the (coarse) mesh, whereas the latter requires global communication
1719 * over all processors.
1720 *
1721 * @code
1722 *   using SmootherType =
1727 *   mg_smoother;
1729 *   smoother_data.resize(0, triangulation.n_global_levels() - 1);
1730 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
1731 *   ++level)
1732 *   {
1733 *   if (level > 0)
1734 *   {
1735 *   smoother_data[level].smoothing_range = 15.;
1736 *   smoother_data[level].degree = 5;
1737 *   smoother_data[level].eig_cg_n_iterations = 10;
1738 *   }
1739 *   else
1740 *   {
1741 *   smoother_data[0].smoothing_range = 1e-3;
1743 *   smoother_data[0].eig_cg_n_iterations = mg_matrices[0].m();
1744 *   }
1745 *   mg_matrices[level].compute_diagonal();
1746 *   smoother_data[level].preconditioner =
1747 *   mg_matrices[level].get_matrix_diagonal_inverse();
1748 *   }
1749 *   mg_smoother.initialize(mg_matrices, smoother_data);
1750 *  
1752 *   mg_coarse;
1753 *   mg_coarse.initialize(mg_smoother);
1754 *  
1755 * @endcode
1756 *
1757 * The next step is to set up the interface matrices that are needed for the
1758 * case with hanging nodes. The adaptive multigrid realization in deal.II
1759 * implements an approach called local smoothing. This means that the
1760 * smoothing on the finest level only covers the local part of the mesh
1763 * level. As the method progresses to coarser levels, more and more of the
1765 * be covered. Since all level matrices in the multigrid method cover a
1766 * single level in the mesh, no hanging nodes appear on the level matrices.
1767 * At the interface between multigrid levels, homogeneous Dirichlet boundary
1768 * conditions are set while smoothing. When the residual is transferred to
1769 * the next coarser level, however, the coupling over the multigrid
1771 * interface (or edge) matrices that compute the part of the residual that
1774 * "Multigrid paper by Janssen and Kanschat" for more details.
1775 *
1776
1777 *
1778 * For the implementation of those interface matrices, there is already a
1781 * MatrixFreeOperators::Base::vmult_interface_up() in a new class with @p
1782 * vmult() and @p Tvmult() operations (that were originally written for
1783 * matrices, hence expecting those names). Note that vmult_interface_down
1784 * is used during the restriction phase of the multigrid V-cycle, whereas
1785 * vmult_interface_up is used during the prolongation phase.
1786 *
1787
1788 *
1790 * preconditioner infrastructure in complete analogy to @ref step_16 "step-16" to obtain
1791 * a @p preconditioner object that can be applied to a matrix.
1792 *
1793 * @code
1794 *   mg::Matrix<LinearAlgebra::distributed::Vector<float>> mg_matrix(
1795 *   mg_matrices);
1796 *  
1797 *   MGLevelObject<MatrixFreeOperators::MGInterfaceOperator<LevelMatrixType>>
1799 *   mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1);
1800 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
1801 *   ++level)
1803 *   mg::Matrix<LinearAlgebra::distributed::Vector<float>> mg_interface(
1805 *  
1806 *   Multigrid<LinearAlgebra::distributed::Vector<float>> mg(
1808 *   mg.set_edge_matrices(mg_interface, mg_interface);
1809 *  
1810 *   PreconditionMG<dim,
1811 *   LinearAlgebra::distributed::Vector<float>,
1812 *   MGTransferMatrixFree<dim, float>>
1813 *   preconditioner(dof_handler, mg, mg_transfer);
1814 *  
1815 * @endcode
1816 *
1817 * The setup of the multigrid routines is quite easy and one cannot see
1818 * any difference in the solve process as compared to @ref step_16 "step-16". All the
1819 * magic is hidden behind the implementation of the LaplaceOperator::vmult
1820 * operation. Note that we print out the solve time and the accumulated
1821 * setup time through standard out, i.e., in any case, whereas detailed
1822 * times for the setup operations are only printed in case the flag for
1824 *
1825
1826 *
1827 *
1828 * @code
1829 *   SolverControl solver_control(100, 1e-12 * system_rhs.l2_norm());
1830 *   SolverCG<LinearAlgebra::distributed::Vector<double>> cg(solver_control);
1831 *   setup_time += time.wall_time();
1832 *   time_details << "MG build smoother time (CPU/wall) " << time.cpu_time()
1833 *   << "s/" << time.wall_time() << "s\n";
1834 *   pcout << "Total setup time (wall) " << setup_time << "s\n";
1835 *  
1836 *   time.reset();
1837 *   time.start();
1838 *   constraints.set_zero(solution);
1839 *   cg.solve(system_matrix, solution, system_rhs, preconditioner);
1840 *  
1841 *   constraints.distribute(solution);
1842 *  
1843 *   pcout << "Time solve (" << solver_control.last_step() << " iterations)"
1844 *   << (solver_control.last_step() < 10 ? " " : " ") << "(CPU/wall) "
1845 *   << time.cpu_time() << "s/" << time.wall_time() << "s\n";
1846 *   }
1847 *  
1848 *  
1849 *  
1850 * @endcode
1851 *
1852 *
1853 * <a name="step_37-LaplaceProblemoutput_results"></a>
1855 *
1856
1857 *
1858 * Here is the data output, which is a simplified version of @ref step_5 "step-5". We use
1859 * the standard VTU (= compressed VTK) output for each grid produced in the
1863 * long as running the linear solver, while setting
1864 * DataOutBase::CompressionLevel to
1865 * best_speed lowers this to only one fourth the time
1866 * of the linear solve.
1867 *
1868
1869 *
1870 * We disable the output when the mesh gets too large. A variant of this
1872 * 100 billion grid cells, which is not directly accessible to classical
1874 *
1875 * @code
1876 *   template <int dim>
1877 *   void LaplaceProblem<dim>::output_results(const unsigned int cycle) const
1878 *   {
1879 *   Timer time;
1880 *   if (triangulation.n_global_active_cells() > 1000000)
1881 *   return;
1882 *  
1883 *   DataOut<dim> data_out;
1884 *  
1885 *   solution.update_ghost_values();
1886 *   data_out.attach_dof_handler(dof_handler);
1887 *   data_out.add_data_vector(solution, "solution");
1888 *   data_out.build_patches(mapping);
1889 *  
1890 *   DataOutBase::VtkFlags flags;
1891 *   flags.compression_level = DataOutBase::CompressionLevel::best_speed;
1892 *   data_out.set_flags(flags);
1893 *   data_out.write_vtu_with_pvtu_record(
1894 *   "./", "solution", cycle, MPI_COMM_WORLD, 3);
1895 *  
1896 *   time_details << "Time write output (CPU/wall) " << time.cpu_time()
1897 *   << "s/" << time.wall_time() << "s\n";
1898 *   }
1899 *  
1900 *  
1901 *  
1902 * @endcode
1903 *
1904 *
1905 * <a name="step_37-LaplaceProblemrun"></a>
1906 * <h4>LaplaceProblem::run</h4>
1907 *
1908
1909 *
1910 * The function that runs the program is very similar to the one in
1911 * @ref step_16 "step-16". We do few refinement steps in 3d compared to 2d, but that's
1912 * it.
1913 *
1914
1915 *
1916 * Before we run the program, we output some information about the detected
1917 * vectorization level as discussed in the introduction.
1918 *
1919 * @code
1920 *   template <int dim>
1921 *   void LaplaceProblem<dim>::run()
1922 *   {
1923 *   {
1924 *   const unsigned int n_vect_doubles = VectorizedArray<double>::size();
1925 *   const unsigned int n_vect_bits = 8 * sizeof(double) * n_vect_doubles;
1926 *  
1927 *   pcout << "Vectorization over " << n_vect_doubles
1928 *   << " doubles = " << n_vect_bits << " bits ("
1929 *   << Utilities::System::get_current_vectorization_level() << ')'
1930 *   << std::endl;
1931 *   }
1932 *  
1933 *   for (unsigned int cycle = 0; cycle < 9 - dim; ++cycle)
1934 *   {
1935 *   pcout << "Cycle " << cycle << std::endl;
1936 *  
1937 *   if (cycle == 0)
1938 *   {
1939 *   GridGenerator::hyper_cube(triangulation, 0., 1.);
1940 *   triangulation.refine_global(3 - dim);
1941 *   }
1942 *   triangulation.refine_global(1);
1943 *   setup_system();
1944 *   assemble_rhs();
1945 *   solve();
1946 *   output_results(cycle);
1947 *   pcout << std::endl;
1948 *   };
1949 *   }
1950 *   } // namespace Step37
1951 *  
1952 *  
1953 *  
1954 * @endcode
1955 *
1956 *
1957 * <a name="step_37-Thecodemaincodefunction"></a>
1958 * <h3>The <code>main</code> function</h3>
1959 *
1960
1961 *
1962 * Apart from the fact that we set up the MPI framework according to @ref step_40 "step-40",
1963 * there are no surprises in the main function.
1964 *
1965 * @code
1966 *   int main(int argc, char *argv[])
1967 *   {
1968 *   try
1969 *   {
1970 *   using namespace Step37;
1971 *  
1972 *   Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
1973 *  
1974 *   LaplaceProblem<dimension> laplace_problem;
1975 *   laplace_problem.run();
1976 *   }
1977 *   catch (std::exception &exc)
1978 *   {
1979 *   std::cerr << std::endl
1980 *   << std::endl
1981 *   << "----------------------------------------------------"
1982 *   << std::endl;
1983 *   std::cerr << "Exception on processing: " << std::endl
1984 *   << exc.what() << std::endl
1985 *   << "Aborting!" << std::endl
1986 *   << "----------------------------------------------------"
1987 *   << std::endl;
1988 *   return 1;
1989 *   }
1990 *   catch (...)
1991 *   {
1992 *   std::cerr << std::endl
1993 *   << std::endl
1994 *   << "----------------------------------------------------"
1995 *   << std::endl;
1996 *   std::cerr << "Unknown exception!" << std::endl
1997 *   << "Aborting!" << std::endl
1998 *   << "----------------------------------------------------"
1999 *   << std::endl;
2000 *   return 1;
2001 *   }
2002 *  
2003 *   return 0;
2004 *   }
2005 * @endcode
2006<a name="step_37-Results"></a><h1>Results</h1>
2007
2008
2009<a name="step_37-Programoutput"></a><h3>Program output</h3>
2010
2011
2012Since this example solves the same problem as @ref step_5 "step-5" (except for
2013a different coefficient), there is little to say about the
2014solution. We show a picture anyway, illustrating the size of the
2015solution through both isocontours and volume rendering:
2016
2017<img src="https://www.dealii.org/images/steps/developer/step-37.solution.png" alt="">
2018
2019Of more interest is to evaluate some aspects of the multigrid solver.
2020When we run this program in 2D for quadratic (@f$Q_2@f$) elements, we get the
2021following output (when run on one core in release mode):
2022@code
2023Vectorization over 2 doubles = 128 bits (SSE2)
2024Cycle 0
2025Number of degrees of freedom: 81
2026Total setup time (wall) 0.00159788s
2027Time solve (6 iterations) (CPU/wall) 0.000951s/0.000951052s
2028
2029Cycle 1
2030Number of degrees of freedom: 289
2031Total setup time (wall) 0.00114608s
2032Time solve (6 iterations) (CPU/wall) 0.000935s/0.000934839s
2033
2034Cycle 2
2035Number of degrees of freedom: 1089
2036Total setup time (wall) 0.00244665s
2037Time solve (6 iterations) (CPU/wall) 0.00207s/0.002069s
2038
2039Cycle 3
2040Number of degrees of freedom: 4225
2041Total setup time (wall) 0.00678205s
2042Time solve (6 iterations) (CPU/wall) 0.005616s/0.00561595s
2043
2044Cycle 4
2045Number of degrees of freedom: 16641
2046Total setup time (wall) 0.0241671s
2047Time solve (6 iterations) (CPU/wall) 0.019543s/0.0195441s
2048
2049Cycle 5
2050Number of degrees of freedom: 66049
2051Total setup time (wall) 0.0967851s
2052Time solve (6 iterations) (CPU/wall) 0.07457s/0.0745709s
2053
2054Cycle 6
2055Number of degrees of freedom: 263169
2056Total setup time (wall) 0.346374s
2057Time solve (6 iterations) (CPU/wall) 0.260042s/0.265033s
2058@endcode
2059
2060As in @ref step_16 "step-16", we see that the number of CG iterations remains constant with
2061increasing number of degrees of freedom. A constant number of iterations
2062(together with optimal computational properties) means that the computing time
2063approximately quadruples as the problem size quadruples from one cycle to the
2064next. The code is also very efficient in terms of storage. Around 2-4 million
2065degrees of freedom fit into 1 GB of memory, see also the MPI results below. An
2066interesting fact is that solving one linear system is cheaper than the setup,
2067despite not building a matrix (approximately half of which is spent in the
2068DoFHandler::distribute_dofs() and DoFHandler::distribute_mg_dofs()
2069calls). This shows the high efficiency of this approach, but also that the
2070deal.II data structures are quite expensive to set up and the setup cost must
2071be amortized over several system solves.
2072
2073Not much changes if we run the program in three spatial dimensions. Since we
2074use uniform mesh refinement, we get eight times as many elements and
2075approximately eight times as many degrees of freedom with each cycle:
2076
2077@code
2078Vectorization over 2 doubles = 128 bits (SSE2)
2079Cycle 0
2080Number of degrees of freedom: 125
2081Total setup time (wall) 0.00231099s
2082Time solve (6 iterations) (CPU/wall) 0.000692s/0.000922918s
2083
2084Cycle 1
2085Number of degrees of freedom: 729
2086Total setup time (wall) 0.00289083s
2087Time solve (6 iterations) (CPU/wall) 0.001534s/0.0024128s
2088
2089Cycle 2
2090Number of degrees of freedom: 4913
2091Total setup time (wall) 0.0143182s
2092Time solve (6 iterations) (CPU/wall) 0.010785s/0.0107841s
2093
2094Cycle 3
2095Number of degrees of freedom: 35937
2096Total setup time (wall) 0.087064s
2097Time solve (6 iterations) (CPU/wall) 0.063522s/0.06545s
2098
2099Cycle 4
2100Number of degrees of freedom: 274625
2101Total setup time (wall) 0.596306s
2102Time solve (6 iterations) (CPU/wall) 0.427757s/0.431765s
2103
2104Cycle 5
2105Number of degrees of freedom: 2146689
2106Total setup time (wall) 4.96491s
2107Time solve (6 iterations) (CPU/wall) 3.53126s/3.56142s
2108@endcode
2109
2110Since it is so easy, we look at what happens if we increase the polynomial
2111degree. When selecting the degree as four in 3D, i.e., on @f$\mathcal Q_4@f$
2112elements, by changing the line <code>const unsigned int
2113degree_finite_element=4;</code> at the top of the program, we get the
2114following program output:
2115
2116@code
2117Vectorization over 2 doubles = 128 bits (SSE2)
2118Cycle 0
2119Number of degrees of freedom: 729
2120Total setup time (wall) 0.00633097s
2121Time solve (6 iterations) (CPU/wall) 0.002829s/0.00379395s
2122
2123Cycle 1
2124Number of degrees of freedom: 4913
2125Total setup time (wall) 0.0174279s
2126Time solve (6 iterations) (CPU/wall) 0.012255s/0.012254s
2127
2128Cycle 2
2129Number of degrees of freedom: 35937
2130Total setup time (wall) 0.082655s
2131Time solve (6 iterations) (CPU/wall) 0.052362s/0.0523629s
2132
2133Cycle 3
2134Number of degrees of freedom: 274625
2135Total setup time (wall) 0.507943s
2136Time solve (6 iterations) (CPU/wall) 0.341811s/0.345788s
2137
2138Cycle 4
2139Number of degrees of freedom: 2146689
2140Total setup time (wall) 3.46251s
2141Time solve (7 iterations) (CPU/wall) 3.29638s/3.3265s
2142
2143Cycle 5
2144Number of degrees of freedom: 16974593
2145Total setup time (wall) 27.8989s
2146Time solve (7 iterations) (CPU/wall) 26.3705s/27.1077s
2147@endcode
2148
2149Since @f$\mathcal Q_4@f$ elements on a certain mesh correspond to @f$\mathcal Q_2@f$
2150elements on half the mesh size, we can compare the run time at cycle 4 with
2151fourth degree polynomials with cycle 5 using quadratic polynomials, both at
21522.1 million degrees of freedom. The surprising effect is that the solver for
2153@f$\mathcal Q_4@f$ element is actually slightly faster than for the quadratic
2154case, despite using one more linear iteration. The effect that higher-degree
2155polynomials are similarly fast or even faster than lower degree ones is one of
2156the main strengths of matrix-free operator evaluation through sum
2157factorization, see the <a
2158href="http://dx.doi.org/10.1016/j.compfluid.2012.04.012">matrix-free
2159paper</a>. This is fundamentally different to matrix-based methods that get
2160more expensive per unknown as the polynomial degree increases and the coupling
2161gets denser.
2162
2163In addition, also the setup gets a bit cheaper for higher order, which is
2164because fewer elements need to be set up.
2165
2166Finally, let us look at the timings with degree 8, which corresponds to
2167another round of mesh refinement in the lower order methods:
2168
2169@code
2170Vectorization over 2 doubles = 128 bits (SSE2)
2171Cycle 0
2172Number of degrees of freedom: 4913
2173Total setup time (wall) 0.0842004s
2174Time solve (8 iterations) (CPU/wall) 0.019296s/0.0192959s
2175
2176Cycle 1
2177Number of degrees of freedom: 35937
2178Total setup time (wall) 0.327048s
2179Time solve (8 iterations) (CPU/wall) 0.07517s/0.075999s
2180
2181Cycle 2
2182Number of degrees of freedom: 274625
2183Total setup time (wall) 2.12335s
2184Time solve (8 iterations) (CPU/wall) 0.448739s/0.453698s
2185
2186Cycle 3
2187Number of degrees of freedom: 2146689
2188Total setup time (wall) 16.1743s
2189Time solve (8 iterations) (CPU/wall) 3.95003s/3.97717s
2190
2191Cycle 4
2192Number of degrees of freedom: 16974593
2193Total setup time (wall) 130.8s
2194Time solve (8 iterations) (CPU/wall) 31.0316s/31.767s
2195@endcode
2196
2197Here, the initialization seems considerably slower than before, which is
2198mainly due to the computation of the diagonal of the matrix, which actually
2199computes a 729 x 729 matrix on each cell and throws away everything but the
2200diagonal. The solver times, however, are again very close to the quartic case,
2201showing that the linear increase with the polynomial degree that is
2202theoretically expected is almost completely offset by better computational
2203characteristics and the fact that higher order methods have a smaller share of
2204degrees of freedom living on several cells that add to the evaluation
2205complexity.
2206
2207<a name="step_37-Comparisonwithasparsematrix"></a><h3>Comparison with a sparse matrix</h3>
2208
2209
2210In order to understand the capabilities of the matrix-free implementation, we
2211compare the performance of the 3d example above with a sparse matrix
2212implementation based on TrilinosWrappers::SparseMatrix by measuring both the
2213computation times for the initialization of the problem (distribute DoFs,
2214setup and assemble matrices, setup multigrid structures) and the actual
2215solution for the matrix-free variant and the variant based on sparse
2216matrices. We base the preconditioner on float numbers and the actual matrix
2217and vectors on double numbers, as shown above. Tests are run on an Intel Core
2218i7-5500U notebook processor (two cores and <a
2219href="http://en.wikipedia.org/wiki/Advanced_Vector_Extensions">AVX</a>
2220support, i.e., four operations on doubles can be done with one CPU
2221instruction, which is heavily used in FEEvaluation), optimized mode, and two
2222MPI ranks.
2223
2224<table align="center" class="doxtable">
2225 <tr>
2226 <th>&nbsp;</th>
2227 <th colspan="2">Sparse matrix</th>
2228 <th colspan="2">Matrix-free implementation</th>
2229 </tr>
2230 <tr>
2231 <th>n_dofs</th>
2232 <th>Setup + assemble</th>
2233 <th>&nbsp;Solve&nbsp;</th>
2234 <th>Setup + assemble</th>
2235 <th>&nbsp;Solve&nbsp;</th>
2236 </tr>
2237 <tr>
2238 <td align="right">125</td>
2239 <td align="center">0.0042s</td>
2240 <td align="center">0.0012s</td>
2241 <td align="center">0.0022s</td>
2242 <td align="center">0.00095s</td>
2243 </tr>
2244 <tr>
2245 <td align="right">729</td>
2246 <td align="center">0.012s</td>
2247 <td align="center">0.0040s</td>
2248 <td align="center">0.0027s</td>
2249 <td align="center">0.0021s</td>
2250 </tr>
2251 <tr>
2252 <td align="right">4,913</td>
2253 <td align="center">0.082s</td>
2254 <td align="center">0.012s</td>
2255 <td align="center">0.011s</td>
2256 <td align="center">0.0057s</td>
2257 </tr>
2258 <tr>
2259 <td align="right">35,937</td>
2260 <td align="center">0.73s</td>
2261 <td align="center">0.13s</td>
2262 <td align="center">0.048s</td>
2263 <td align="center">0.040s</td>
2264 </tr>
2265 <tr>
2266 <td align="right">274,625</td>
2267 <td align="center">5.43s</td>
2268 <td align="center">1.01s</td>
2269 <td align="center">0.33s</td>
2270 <td align="center">0.25s</td>
2271 </tr>
2272 <tr>
2273 <td align="right">2,146,689</td>
2274 <td align="center">43.8s</td>
2275 <td align="center">8.24s</td>
2276 <td align="center">2.42s</td>
2277 <td align="center">2.06s</td>
2278 </tr>
2279</table>
2280
2281The table clearly shows that the matrix-free implementation is more than twice
2282as fast for the solver, and more than six times as fast when it comes to
2283initialization costs. As the problem size is made a factor 8 larger, we note
2284that the times usually go up by a factor eight, too (as the solver iterations
2285are constant at six). The main deviation is in the sparse matrix between 5k
2286and 36k degrees of freedom, where the time increases by a factor 12. This is
2287the threshold where the (L3) cache in the processor can no longer hold all
2288data necessary for the matrix-vector products and all matrix elements must be
2289fetched from main memory.
2290
2291Of course, this picture does not necessarily translate to all cases, as there
2292are problems where knowledge of matrix entries enables much better solvers (as
2293happens when the coefficient is varying more strongly than in the above
2294example). Moreover, it also depends on the computer system. The present system
2295has good memory performance, so sparse matrices perform comparably
2296well. Nonetheless, the matrix-free implementation gives a nice speedup already
2297for the <i>Q</i><sub>2</sub> elements used in this example. This becomes
2298particularly apparent for time-dependent or nonlinear problems where sparse
2299matrices would need to be reassembled over and over again, which becomes much
2300easier with this class. And of course, thanks to the better complexity of the
2301products, the method gains increasingly larger advantages when the order of the
2302elements increases (the matrix-free implementation has costs
23034<i>d</i><sup>2</sup><i>p</i> per degree of freedom, compared to
23042<i>p<sup>d</sup></i> for the sparse matrix, so it will win anyway for order 4
2305and higher in 3d).
2306
2307<a name="step_37-ResultsforlargescaleparallelcomputationsonSuperMUC"></a><h3> Results for large-scale parallel computations on SuperMUC</h3>
2308
2309
2310As explained in the introduction and the in-code comments, this program can be
2311run in parallel with MPI. It turns out that geometric multigrid schemes work
2312really well and can scale to very large machines. To the authors' knowledge,
2314with deal.II as of late 2016, run on up to 147,456 cores of the <a
2315href="https://www.lrz.de/services/compute/supermuc/systemdescription/">complete
23231e-5 seconds, depending on the proximity in the MPI network. The matrix-vector
2324products with @p LaplaceOperator from this class involves several
2325point-to-point communication steps, interleaved with computations on each
2326core. The resulting latency of a matrix-vector product is around 1e-4
2328accumulates the sum of a single number per rank over all ranks in the MPI
2329network, has a latency of 1e-4 seconds. The multigrid V-cycle used in this
2330program is also a form of global communication. Think about the coarse grid
2332from all processors before it starts. When completed, the coarse grid solution
2334smoothing until the fine grid. Essentially, this is a tree-like pattern over
2336@p MPI_Allreduce operations where the tree in the reduction is optimized to the
2337actual links in the MPI network, the multigrid V-cycle does this according to
2339optimality. Furthermore, the multigrid cycle is not simply a walk up and down
2341smoothing. In other words, the global communication in multigrid is more
2345
2347elements. Along the lines, the problem size is held constant as the number of
2349of the computational time, indicated by the dotted gray lines. The results
2351time of around 0.1 seconds is reached. The solver tolerances have been set
2357
2358<img src="https://www.dealii.org/images/steps/developer/step-37.scaling_strong.png" alt="">
2359
2360In addition, the plot also contains results for <b>weak scaling</b> that lists
2361how the algorithm behaves as both the number of processor cores and elements
2364constant at 5, so we are good from that end. The lines in the plot are
2366size per processor, namely 131,072 elements (or approximately 3.5 million
2367degrees of freedom per core). The gray lines indicating ideal strong scaling
2368are by the same factor of 8 apart. The results show again that the scaling is
2370cores is at around 75% for a local problem size of 750,000 degrees of freedom
2371per core which takes 1.0s on 288 cores, 1.03s on 2304 cores, 1.19s on 18k
2377to a tenth of this value.
2378
2379As mentioned in the introduction, the matrix-free method reduces the memory
2386computation shown in this picture involves 292 billion (@f$2.92 \cdot 10^{11}@f$)
2388were also run involving up to 549 billion (2^39) DoFs.
2389
2390<img src="https://www.dealii.org/images/steps/developer/step-37.scaling_size.png" alt="">
2391
2396all vector entries are zero) were done on each smoothing
2400@f$\mathcal Q_5@f$ elements:
2401
2402<img src="https://www.dealii.org/images/steps/developer/step-37.scaling_oldnew.png" alt="">
2403
2404
2405<a name="step_37-Adaptivity"></a><h3> Adaptivity</h3>
2406
2407
2410cycle will run with local smoothing and impose Dirichlet conditions along the
2413distributed over levels, relating the owner of the level cells to the owner of
2416
2417<a name="step_37-Possibilitiesforextensions"></a><h3> Possibilities for extensions</h3>
2418
2419
2420<a name="step_37-Kellyerrorestimator"></a><h4> Kelly error estimator </h4>
2421
2422
2426with the ghost indices of parallel vectors.
2427In order to evaluate the jump terms in the error indicator, each MPI process
2431The ghost indices made available in the vector are a tight set of only those indices
2435Consequently the solution vector as-is is
2437The trick is to change the ghost part of the partition, for example using a
2438temporary vector and LinearAlgebra::distributed::Vector::copy_locally_owned_data_from()
2439as shown below.
2440
2441@code
2442const IndexSet locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler);
2443LinearAlgebra::distributed::Vector<double> copy_vec(solution);
2444solution.reinit(dof_handler.locally_owned_dofs(),
2446 triangulation.get_mpi_communicator());
2447solution.copy_locally_owned_data_from(copy_vec);
2448constraints.distribute(solution);
2449solution.update_ghost_values();
2450@endcode
2451
2452<a name="step_37-Sharedmemoryparallelization"></a><h4> Shared-memory parallelization</h4>
2453
2454
2458shared memory region of one node. To use this, one would need to both set the
2459number of threads in the MPI_InitFinalize data structure in the main function,
2460and set the MatrixFree::AdditionalData::tasks_parallel_scheme to
2461partition_color to actually do the loop in parallel. This use case is
2462discussed in @ref step_48 "step-48".
2463
2465
2466
2472some of the nodal values in the solution to given values rather than
2474@f{eqnarray*}{
2478@f}
2483of boundary values on the Dirichlet-constrained node points @f$i\in \mathcal
2484N_D@f$. We then insert this solution
2486the known quantities to the right hand side:
2487@f{eqnarray*}{
2490(\varphi_i, f)_\Omega
2492@f}
2493In this formula, the equations are tested for all basis functions @f$\varphi_i@f$
2496
2499we assemble on each cell. When using
2501@ref step_6 "step-6" and @ref step_7 "step-7" tutorial programs, we can account for the contribution of
2505the position <i>i</i> in the global right-hand-side vector, see also the
2506@ref constraints topic. In essence, we use some of the integrals that get
2507eliminated from the left hand side of the equation to finalize the right hand
2509all entries into a left hand side matrix and then eliminating matrix rows and
2511
2512In principle, the components that belong to the constrained degrees of freedom
2518rows, we then add dummy entries to the matrix diagonal that are otherwise
2519unrelated to the real entries.
2520
2521In a matrix-free method, we need to take a different approach, since the @p
2522LaplaceOperator class represents the matrix-vector product of a
2523<b>homogeneous</b> operator (the left-hand side of the last formula). It does
2527constraints as long as it represents a <b>linear</b> operator.
2528
2529In our matrix-free code, the right hand side computation where the
2532to explicitly generate the data that enters the right hand side rather than
2534the operator on a vector, we could try to use those facilities for a vector
2536@code
2537 // interpolate boundary values on vector solution
2538 std::map<types::global_dof_index, double> boundary_values;
2540 dof_handler,
2541 0,
2544 for (const std::pair<const types::global_dof_index, double> &pair : boundary_values)
2545 if (solution.locally_owned_elements().is_element(pair.first))
2546 solution(pair.first) = pair.second;
2547@endcode
2549an AffineConstraints object,
2550@code
2551 solution = 0;
2552 constraints.distribute(solution);
2553@endcode
2554
2555We could then pass the vector @p solution to the @p
2556LaplaceOperator::vmult_add() function and add the new contribution to the @p
2558function. However, this idea does not work because the
2559FEEvaluation::read_dof_values() call used inside the vmult() functions assumes
2560homogeneous values on all constraints (otherwise the operator would not be a
2561linear operator but an affine one). To also retrieve the values of the
2563
2564<a name="step_37-UseFEEvaluationread_dof_values_plaintoavoidresolvingconstraints"></a><h5> Use FEEvaluation::read_dof_values_plain() to avoid resolving constraints </h5>
2565
2566
2570data from the vector @p solution. For example, we could extend the @p
2573the object @p constraints:
2574@code
2575template <int dim>
2576void LaplaceProblem<dim>::assemble_rhs()
2577{
2578 solution = 0;
2579 constraints.distribute(solution);
2580 solution.update_ghost_values();
2581 system_rhs = 0;
2582
2583 const Table<2, VectorizedArray<double>> &coefficient = system_matrix.get_coefficient();
2584 FEEvaluation<dim, degree_finite_element> phi(*system_matrix.get_matrix_free());
2585 for (unsigned int cell = 0;
2586 cell < system_matrix.get_matrix_free()->n_cell_batches();
2587 ++cell)
2588 {
2589 phi.reinit(cell);
2590 phi.read_dof_values_plain(solution);
2592 for (const unsigned int q : phi.quadrature_point_indices())
2593 {
2594 phi.submit_gradient(-coefficient(cell, q) * phi.get_gradient(q), q);
2595 phi.submit_value(make_vectorized_array<double>(1.0), q);
2596 }
2598 phi.distribute_local_to_global(system_rhs);
2599 }
2601}
2602@endcode
2603
2605tentative solution vector by FEEvaluation::read_dof_values_plain() that
2606ignores all constraints. Due to this setup, we must make sure that other
2607constraints, e.g. by hanging nodes, are correctly distributed to the input
2608vector already as they are not resolved as in
2609FEEvaluation::read_dof_values_plain(). Inside the loop, we then evaluate the
2610Laplacian and repeat the second derivative call with
2611FEEvaluation::submit_gradient() from the @p LaplaceOperator class, but with the
2613conditions on the right hand side vector according to the formula above. When
2614we invoke the FEEvaluation::integrate() call, we then set both arguments
2615regarding the value slot and first derivative slot to true to account for both
2616terms added in the loop over quadrature points. Once the right hand side is
2619add @p solution_update to the @p solution vector that contains the final
2620(inhomogeneous) solution.
2621
2622Note that the negative sign for the Laplacian alongside with a positive sign
2623for the forcing that we needed to build the right hand side is a more general
2624concept: We have implemented nothing else than Newton's method for nonlinear
2625equations, but applied to a linear system. We have used an initial guess for
2626the variable @p solution in terms of the Dirichlet boundary conditions and
2627computed a residual @f$r = f - Au_0@f$. The linear system was then solved as
2628@f$\Delta u = A^{-1} (f-Au)@f$ and we finally computed @f$u = u_0 + \Delta u@f$. For a
2629linear system, we obviously reach the exact solution after a single
2631rename the @p assemble_rhs() function into a more descriptive name like @p
2633@p LaplaceOperator::apply_add() function would get the linearization of the
2634residual with respect to the solution variable.
2635
2637
2638
2639A second alternative to get the right hand side that re-uses the @p
2640LaplaceOperator::apply_add() function is to instead add a second LaplaceOperator
2641that skips Dirichlet constraints. To do this, we initialize a second MatrixFree
2642object which does not have any boundary value constraints. This @p matrix_free
2643object is then passed to a @p LaplaceOperator class instance @p
2644inhomogeneous_operator that is only used to create the right hand side:
2645@code
2646template <int dim>
2647void LaplaceProblem<dim>::assemble_rhs()
2648{
2649 system_rhs = 0;
2651 no_constraints.close();
2653
2654 typename MatrixFree<dim, double>::AdditionalData additional_data;
2655 additional_data.mapping_update_flags =
2657 std::shared_ptr<MatrixFree<dim, double>> matrix_free(
2659 matrix_free->reinit(dof_handler,
2661 QGauss<1>(fe.degree + 1),
2662 additional_data);
2663 inhomogeneous_operator.initialize(matrix_free);
2664
2665 solution = 0.0;
2666 constraints.distribute(solution);
2667 inhomogeneous_operator.evaluate_coefficient(Coefficient<dim>());
2668 inhomogeneous_operator.vmult(system_rhs, solution);
2669 system_rhs *= -1.0;
2670
2672 *inhomogeneous_operator.get_matrix_free());
2673 for (unsigned int cell = 0;
2674 cell < inhomogeneous_operator.get_matrix_free()->n_cell_batches();
2675 ++cell)
2676 {
2677 phi.reinit(cell);
2678 for (const unsigned int q : phi.quadrature_point_indices())
2679 phi.submit_value(make_vectorized_array<double>(1.0), q);
2680 phi.integrate(EvaluationFlags::values);
2681 phi.distribute_local_to_global(system_rhs);
2682 }
2684}
2685@endcode
2686
2691LaplaceOperator class, but the MatrixFreeOperators::LaplaceOperator class that
2694
2695<a name="step_37-Furtherperformanceimprovements"></a><h4> Further performance improvements </h4>
2696
2697
2700one hand, increasing the polynomial degree to three or four will further
2701improve the time per unknown. (Even higher degrees typically get slower again,
2702because the multigrid iteration counts increase slightly with the chosen
2703simple smoother. One could then use hybrid multigrid algorithms to use
2704polynomial coarsening through MGTransferGlobalCoarsening, to reduce the impact
2705of the coarser level on the communication latency.) A more significant
2708preconditioner as in the present class, can overlap the vector operations with
2710bandwidth, reducing the number of loads helps to achieve this goal. The two
2712<ol>
2713<li> to provide LaplaceOperator class of this tutorial program with a `vmult`
2714function that takes two `std::function` objects, which can be passed on to
2715MatrixFree::cell_loop with the respective signature (PreconditionChebyshev
2716will then pick up this interface and schedule its vector operations), and </li>
2719</ol>
2720 *
2721 *
2722<a name="step_37-PlainProg"></a>
2723<h1> The plain program</h1>
2724@include "step-37.cc"
2725*/
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
void read_dof_values(const VectorType &src, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip())
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
void integrate(const EvaluationFlags::EvaluationFlags integration_flag)
Definition fe_q.h:554
void set_constrained_entries_to_one(VectorType &dst) const
Definition operators.h:1442
void vmult_interface_down(VectorType &dst, const VectorType &src) const
Definition operators.h:1622
std::shared_ptr< DiagonalMatrix< VectorType > > inverse_diagonal_entries
Definition operators.h:447
unsigned int n_cell_batches() const
void initialize_dof_vector(VectorType &vec, const unsigned int dof_handler_index=0) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
constexpr void clear()
friend class Tensor
Definition tensor.h:865
static constexpr unsigned int dimension
Definition tensor.h:476
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
Definition timer.h:117
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
unsigned int level
Definition grid_out.cc:4632
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< index_type > data
Definition mpi.cc:740
std::size_t size
Definition mpi.cc:739
void matrix_free_data_locality(DoFHandler< dim, spacedim > &dof_handler, const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free)
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_locally_relevant_level_dofs(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
constexpr char O
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
constexpr char N
constexpr char V
constexpr types::blas_int zero
constexpr char A
constexpr types::blas_int one
void compute_diagonal(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, VectorType &diagonal_global, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &cell_operation, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0, const unsigned int first_vector_component=0)
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:112
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
std::string compress(const std::string &input)
Definition utilities.cc:393
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14889
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition mg.h:81
constexpr unsigned int invalid_unsigned_int
Definition types.h:232
STL namespace.
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
TasksParallelScheme tasks_parallel_scheme
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)