757 *
return 1. / (0.05 + 2. * p.square());
763 *
double Coefficient<dim>::value(
const Point<dim> & p,
764 *
const unsigned int component)
const
766 *
return value<double>(p, component);
773 * <a name=
"Matrixfreeimplementation"></a>
774 * <h3>Matrix-
free implementation</h3>
778 * The following
class, called <code>LaplaceOperator</code>, implements the
779 * differential
operator. For all practical purposes, it is a
matrix, i.e.,
780 * you can ask it
for its size (member functions <code>m(), n()</code>) and
781 * you can apply it to a vector (the <code>vmult()</code> function). The
782 * difference to a real matrix of course lies in the fact that this class
783 * does not actually store the <i>elements</i> of the matrix, but only knows
784 * how to compute the action of the operator when applied to a vector.
788 * The infrastructure describing the matrix size, the initialization from a
789 *
MatrixFree object, and the various interfaces to matrix-vector products
790 * through vmult() and Tvmult() methods, is provided by the class
791 * MatrixFreeOperator::Base from which this class derives. The
792 * LaplaceOperator class defined here only has to provide a few interfaces,
793 * namely the actual action of the operator through the apply_add() method
794 * that gets used in the vmult() functions, and a method to compute the
795 * diagonal entries of the underlying matrix. We need the diagonal for the
796 * definition of the multigrid smoother. Since we consider a problem with
797 * variable coefficient, we further implement a method that can fill the
798 * coefficient values.
802 * Note that the file <code>include/deal.II/matrix_free/operators.h</code>
803 * already contains an implementation of the Laplacian through the class
805 * operator is re-implemented in this tutorial program, explaining the
806 * ingredients and concepts used there.
810 * This program makes use of the data cache for finite element operator
811 * application that is integrated in deal.II. This data cache class is
812 * called
MatrixFree. It contains mapping information (Jacobians) and index
813 * relations between local and global degrees of freedom. It also contains
814 * constraints like the ones from hanging nodes or Dirichlet boundary
815 * conditions. Moreover, it can issue a loop over all cells in %
parallel,
816 * making sure that only cells are worked on that do not share any degree of
817 * freedom (this makes the loop thread-safe when writing into destination
818 * vectors). This is a more advanced strategy compared to the
WorkStream
819 * class described in the @ref threads module. Of course, to not destroy
820 * thread-safety, we have to be careful when writing into class-global
825 * The class implementing the Laplace operator has three template arguments,
826 * one for the dimension (as many deal.II classes carry), one for the degree
827 * of the finite element (which we need to enable efficient computations
828 * through the
FEEvaluation class), and one for the underlying scalar
829 * type. We want to use <code>
double</code>
numbers (i.e.,
double precision,
830 * 64-bit floating point) for the final matrix, but floats (single
831 * precision, 32-bit floating point
numbers) for the multigrid
level
832 * matrices (as that is only a preconditioner, and floats can be processed
833 * twice as fast). The class
FEEvaluation also takes a template argument for
834 * the number of quadrature points in one dimension. In the code below, we
835 * hard-code it to <code>fe_degree+1</code>. If we wanted to change it
836 * independently of the polynomial degree, we would need to add a template
841 * As a sidenote, if we implemented several different operations on the same
842 * grid and degrees of freedom (like a mass matrix and a Laplace matrix), we
843 * would define two classes like the current one for each of the operators
845 * refer to the same
MatrixFree data cache from the general problem
847 * only provide a minimal set of functions. This concept allows for writing
848 * complex application codes with many matrix-free operations.
853 * requires care: Here, we use the deal.II table class which is prepared to
854 * hold the data with correct alignment. However, storing e.g. an
856 * vectorization: A certain alignment of the data with the memory address
857 * boundaries is required (essentially, a
VectorizedArray that is 32 bytes
858 *
long in case of AVX needs to start at a memory address that is divisible
859 * by 32). The table class (as well as the
AlignedVector class it is based
860 * on) makes sure that this alignment is respected, whereas
std::vector does
861 * not in general, which may lead to segmentation faults at strange places
862 * for some systems or suboptimal performance for other systems.
865 * template <
int dim,
int fe_degree, typename number>
866 * class LaplaceOperator
871 *
using value_type = number;
875 *
void clear()
override;
877 *
void evaluate_coefficient(
const Coefficient<dim> &coefficient_function);
882 *
virtual void apply_add(
890 *
const std::pair<unsigned int, unsigned int> &cell_range)
const;
892 *
void local_compute_diagonal(
895 *
const unsigned int & dummy,
896 *
const std::pair<unsigned int, unsigned int> &cell_range)
const;
905 * This is the constructor of the @p LaplaceOperator
class. All it does is
906 * to
call the
default constructor of the base
class
908 *
class that asserts that this class is not accessed after going out of scope
909 *
e.g. in a preconditioner.
912 *
template <
int dim,
int fe_degree,
typename number>
913 * LaplaceOperator<dim, fe_degree, number>::LaplaceOperator()
920 *
template <
int dim,
int fe_degree,
typename number>
921 *
void LaplaceOperator<dim, fe_degree, number>::clear()
923 * coefficient.
reinit(0, 0);
933 * <a name=
"Computationofcoefficient"></a>
934 * <h4>Computation of coefficient</h4>
938 * To initialize the coefficient, we directly give it the Coefficient
class
939 * defined above and then select the method
940 * <code>coefficient_function.value</code> with
vectorized number (which the
941 * compiler can deduce from the point data type). The use of the
942 *
FEEvaluation class (and its
template arguments) will be explained below.
945 * template <int dim, int fe_degree, typename number>
946 *
void LaplaceOperator<dim, fe_degree, number>::evaluate_coefficient(
947 *
const Coefficient<dim> &coefficient_function)
949 *
const unsigned int n_cells = this->data->n_cell_batches();
952 * coefficient.reinit(n_cells, phi.n_q_points);
953 *
for (
unsigned int cell = 0; cell <
n_cells; ++cell)
956 *
for (
unsigned int q = 0; q < phi.n_q_points; ++q)
957 * coefficient(cell, q) =
958 * coefficient_function.value(phi.quadrature_point(q));
967 * <a name=
"LocalevaluationofLaplaceoperator"></a>
968 * <h4>Local evaluation of Laplace
operator</h4>
972 * Here comes the main function of
this class, the evaluation of the
973 *
matrix-vector product (or, in general, a finite element
operator
974 * evaluation). This is done in a function that takes exactly four
975 * arguments, the
MatrixFree object, the destination and source vectors, and
976 * a range of cells that are to be worked on. The method
977 * <code>cell_loop</code> in the
MatrixFree class will internally
call this
978 * function with some range of cells that is obtained by checking which
979 * cells are possible to work on simultaneously so that write operations
do
980 * not cause any race condition. Note that the cell range used in the
loop
981 * is not directly the number of (active) cells in the current mesh, but
982 * rather a collection of batches of cells. In other word,
"cell" may be
984 * cells together. This means that in the
loop over quadrature points we are
985 * actually seeing a
group of quadrature points of several cells as one
986 * block. This is done to enable a higher degree of vectorization. The
987 * number of such
"cells" or
"cell batches" is stored in
MatrixFree and can
989 * cell iterators, in
this class all cells are laid out in a plain array
990 * with no direct knowledge of
level or neighborship relations, which makes
991 * it possible to
index the cells by
unsigned integers.
995 * The implementation of the Laplace
operator is quite simple: First, we
996 * need to create an
object FEEvaluation that contains the computational
997 * kernels and has data fields to store temporary results (
e.g. gradients
998 * evaluated on all quadrature points on a collection of a few cells). Note
999 * that temporary results
do not use a lot of memory, and since we specify
1000 *
template arguments with the element order, the data is stored on the
1001 * stack (without expensive memory allocation). Usually, one only needs to
1002 *
set two
template arguments, the dimension as a
first argument and the
1003 * degree of the finite element as the
second argument (
this is
equal to the
1004 * number of degrees of freedom per dimension minus one
for FE_Q
1005 * elements). However, here we also want to be able to use
float numbers for
1006 * the multigrid preconditioner, which is the last (fifth)
template
1007 * argument. Therefore, we cannot rely on the
default template arguments and
1008 * must also fill the third and fourth field, consequently. The third
1009 * argument specifies the number of quadrature points per direction and has
1010 * a
default value equal to the degree of the element plus one. The fourth
1011 * argument sets the number of components (one can also evaluate
1012 * vector-valued functions in systems of PDEs, but the
default is a scalar
1013 * element), and
finally the last argument sets the number type.
1017 * Next, we
loop over the given cell range and then we
continue with the
1018 * actual implementation: <ol> <li>Tell the
FEEvaluation object the (macro)
1019 * cell we want to work on. <li>Read in the
values of the source vectors
1020 * (@p read_dof_values), including the resolution of constraints. This
1021 * stores @f$u_\mathrm{cell}@f$ as described in the introduction. <li>Compute
1022 * the unit-cell
gradient (the evaluation of finite element
1024 *
gradient computations, it uses a unified
interface to all kinds of
1025 * derivatives of order between zero and two. We only want
gradients, no
1026 *
values and no
second derivatives, so we
set the function arguments to
1028 * (
first slot). There is also a third slot
for the Hessian which is
1029 *
false by
default, so it needs not be given. Note that the
FEEvaluation
1030 *
class internally evaluates shape functions in an efficient way where one
1031 * dimension is worked on at a time (
using the tensor product form of shape
1032 * functions and quadrature points as mentioned in the introduction). This
1033 * gives complexity
equal to @f$\mathcal O(d^2 (p+1)^{
d+1})@f$
for polynomial
1034 * degree @f$p@f$ in @f$d@f$ dimensions, compared to the naive approach with loops
1035 * over all local degrees of freedom and quadrature points that is used in
1036 *
FEValues and costs @f$\mathcal O(d (p+1)^{2
d})@f$. <li>Next comes the
1037 * application of the Jacobian transformation, the multiplication by the
1038 * variable coefficient and the quadrature weight.
FEEvaluation has an
1039 * access function @p get_gradient that applies the Jacobian and returns the
1040 * gradient in real space. Then, we just need to multiply by the (scalar)
1041 * coefficient, and let the function @p submit_gradient apply the
second
1042 * Jacobian (
for the test function) and the quadrature weight and Jacobian
1044 * data field as where it is read from in @p get_gradient. Therefore, you
1045 * need to make sure to not read from the same quadrature
point again after
1046 * having called @p submit_gradient on that particular quadrature
point. In
1047 *
general, it is a good idea to
copy the result of @p get_gradient when it
1048 * is used more often than once. <li>Next follows the summation over
1049 * quadrature points
for all test
functions that corresponds to the actual
1050 * integration step. For the Laplace
operator, we just multiply by the
1051 *
gradient, so we
call the integrate function with the respective argument
1052 *
set. If you have an equation where you test by both the
values of the
1054 * to
true. Calling
first the integrate function
for values and then
1056 *
call will internally overwrite the results from the
first call. Note that
1057 * there is no function argument
for the
second derivative
for integrate
1058 * step. <li>Eventually, the local contributions in the vector
1059 * @f$v_\mathrm{cell}@f$ as mentioned in the introduction need to be added into
1060 * the result vector (and constraints are applied). This is done with a
call
1061 * to @p distribute_local_to_global, the same name as the corresponding
1063 * in the
FEEvaluation object, as are the indices between local and global
1064 * degrees of freedom). </ol>
1067 *
template <
int dim,
int fe_degree,
typename number>
1068 *
void LaplaceOperator<dim, fe_degree, number>::local_apply(
1072 *
const std::pair<unsigned int, unsigned int> & cell_range)
const
1076 *
for (
unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
1082 * phi.read_dof_values(src);
1084 *
for (
unsigned int q = 0; q < phi.n_q_points; ++q)
1085 * phi.submit_gradient(coefficient(cell, q) * phi.get_gradient(q), q);
1087 * phi.distribute_local_to_global(dst);
1095 * This function implements the
loop over all cells
for the
1096 * Base::apply_add() interface. This is done with the @p cell_loop of the
1097 *
MatrixFree class, which takes the operator() of this class with arguments
1098 *
MatrixFree, OutVector, InVector, cell_range. When working with MPI
1099 * parallelization (but no threading) as is done in this tutorial program,
1100 * the cell loop corresponds to the following three lines of code:
1104 * <div class=CodeFragmentInTutorialComment>
1106 * src.update_ghost_values();
1107 * local_apply(*this->data, dst, src,
std::make_pair(0U,
1108 * data.n_cell_batches()));
1115 * Here, the two calls update_ghost_values() and compress() perform the data
1116 * exchange on processor boundaries for MPI, once for the source vector
1117 * where we need to read from entries owned by remote processors, and once
1118 * for the destination vector where we have accumulated parts of the
1119 * residuals that need to be added to the respective entry of the owner
1120 * processor. However,
MatrixFree::cell_loop does not only abstract away
1121 * those two calls, but also performs some additional optimizations. On the
1122 * one hand, it will split the update_ghost_values() and compress() calls in
1123 * a way to allow for overlapping communication and computation. The
1124 * local_apply function is then called with three cell ranges representing
1125 * partitions of the cell range from 0 to
MatrixFree::n_cell_batches(). On
1126 * the other hand, cell_loop also supports thread parallelism in which case
1127 * the cell ranges are split into smaller chunks and scheduled in an
1128 * advanced way that avoids access to the same vector entry by several
1129 * threads. That feature is explained in @ref step_48 "step-48".
1133 * Note that after the cell loop, the constrained degrees of freedom need to
1134 * be touched once more for sensible vmult() operators: Since the assembly
1135 * loop automatically resolves constraints (just as the
1137 * compute any contribution for constrained degrees of freedom, leaving the
1138 * respective entries zero. This would represent a matrix that had empty
1139 * rows and columns for constrained degrees of freedom. However, iterative
1140 * solvers like CG only work for non-singular matrices. The easiest way to
1141 * do that is to set the sub-block of the matrix that corresponds to
1142 * constrained DoFs to an
identity matrix, in which case application of the
1143 * matrix would simply copy the elements of the right hand side vector into
1144 * the left hand side. Fortunately, the vmult() implementations
1146 * apply_add() function, so we do not need to take further action here.
1151 * with MPI, there is one aspect to be careful about — the indexing
1152 * used for accessing the vector. For performance reasons,
MatrixFree and
1153 *
FEEvaluation are designed to access vectors in MPI-local index space also
1154 * when working with multiple processors. Working in local index space means
1155 * that no index translation needs to be performed at the place the vector
1156 * access happens, apart from the unavoidable indirect addressing. However,
1157 * local index spaces are ambiguous: While it is standard convention to
1158 * access the locally owned range of a vector with indices between 0 and the
1159 * local size, the numbering is not so clear for the ghosted entries and
1160 * somewhat arbitrary. For the matrix-vector product, only the indices
1161 * appearing on locally owned cells (plus those referenced via hanging node
1162 * constraints) are necessary. However, in deal.II we often set all the
1163 * degrees of freedom on ghosted elements as ghosted vector entries, called
1165 * @ref GlossLocallyRelevantDof "locally relevant DoFs described in the glossary".
1166 * In that case, the MPI-local index of a ghosted vector entry can in
1167 * general be different in the two possible ghost sets, despite referring
1168 * to the same global index. To avoid problems,
FEEvaluation checks that
1169 * the partitioning of the vector used for the matrix-vector product does
1170 * indeed match with the partitioning of the indices in
MatrixFree by a
1174 * mechanism to fit the ghost set to the correct layout. This happens in the
1175 * ghost region of the vector, so keep in mind that the ghost region might
1176 * be modified in both the destination and source vector after a call to a
1177 * vmult() method. This is legitimate because the ghost region of a
1178 * distributed deal.II vector is a mutable section and filled on
1179 * demand. Vectors used in matrix-vector products must not be ghosted upon
1180 * entry of vmult() functions, so no information gets lost.
1183 * template <
int dim,
int fe_degree, typename number>
1184 *
void LaplaceOperator<dim, fe_degree, number>::apply_add(
1188 * this->data->cell_loop(&LaplaceOperator::local_apply,
this, dst, src);
1195 * The following function implements the computation of the
diagonal of the
1197 * turns out to be more complicated than evaluating the
1198 *
operator. Fundamentally, we could obtain a
matrix representation of the
1199 *
operator by applying the
operator on <i>all</i> unit vectors. Of course,
1200 * that would be very inefficient since we would need to perform <i>n</i>
1201 *
operator evaluations to retrieve the whole
matrix. Furthermore,
this
1202 * approach would completely ignore the
matrix sparsity. On an individual
1203 * cell, however,
this is the way to go and actually not that inefficient as
1204 * there usually is a coupling between all degrees of freedom
inside the
1210 * layout. This vector is encapsulated in a member called
1211 * inverse_diagonal_entries of type
DiagonalMatrix in the base
class
1213 * need to initialize and then get the vector representing the
diagonal
1214 * entries in the
matrix. As to the actual
diagonal computation, we again
1215 * use the cell_loop infrastructure of
MatrixFree to invoke a local worker
1216 * routine called local_compute_diagonal(). Since we will only write into a
1217 * vector but not have any source vector, we put a dummy argument of type
1218 * <tt>
unsigned int</tt> in place of the source vector to confirm with the
1219 * cell_loop interface. After the
loop, we need to
set the vector entries
1220 * subject to Dirichlet boundary conditions to one (either those on the
1222 * the indices at the interface between different grid levels in adaptive
1223 * multigrid). This is done through the function
1225 * with the setting in the matrix-vector product provided by the Base
1226 * operator. Finally, we need to
invert the diagonal entries which is the
1227 * form required by the Chebyshev smoother based on the Jacobi iteration. In
1228 * the loop, we assert that all entries are non-zero, because they should
1229 * either have obtained a positive contribution from integrals or be
1230 * constrained and treated by @p set_constrained_entries_to_one() following
1234 * template <
int dim,
int fe_degree, typename number>
1235 *
void LaplaceOperator<dim, fe_degree, number>::compute_diagonal()
1241 * this->data->initialize_dof_vector(inverse_diagonal);
1242 *
unsigned int dummy = 0;
1243 * this->data->cell_loop(&LaplaceOperator::local_compute_diagonal,
1253 * ExcMessage(
"No diagonal entry in a positive definite operator "
1254 *
"should be zero"));
1265 * columns in the local
matrix and putting the entry 1 in the <i>i</i>th
1266 * slot and a zero entry in all other slots, i.e., we apply the cell-wise
1267 * differential
operator on one unit vector at a time. The inner part
1269 * FEEvalution::integrate, is exactly the same as in the local_apply
1270 * function. Afterwards, we pick out the <i>i</i>th entry of the local
1271 * result and put it to a temporary storage (as we overwrite all entries in
1273 * iteration). Finally, the temporary storage is written to the destination
1275 *
FEEvaluation::submit_dof_value() to read and write to the data field that
1276 *
FEEvaluation uses for the integration on the one hand and writes into the
1277 * global vector on the other hand.
1281 * Given that we are only interested in the matrix diagonal, we simply throw
1282 * away all other entries of the local matrix that have been computed along
1283 * the way. While it might seem wasteful to compute the complete cell matrix
1284 * and then throw away everything but the diagonal, the integration are so
1285 * efficient that the computation does not take too much time. Note that the
1286 * complexity of operator evaluation per element is @f$\mathcal
1287 * O((p+1)^{
d+1})@f$
for polynomial degree @f$k@f$, so computing the whole matrix
1288 * costs us @f$\mathcal O((p+1)^{2
d+1})@f$ operations, not too far away from
1289 * @f$\mathcal O((p+1)^{2
d})@f$ complexity
for computing the diagonal with
1291 * vectorization and other optimizations, the diagonal computation with
this
1292 * function is actually the fastest (simple) variant. (It would be possible
1293 * to compute the
diagonal with
sum factorization techniques in @f$\mathcal
1294 * O((p+1)^{
d+1})@f$ operations involving specifically adapted
1295 * kernels—but since such kernels are only useful in that particular
1296 * context and the
diagonal computation is typically not on the critical
1297 * path, they have not been implemented in deal.II.)
1301 * Note that the code that calls distribute_local_to_global on the vector to
1302 * accumulate the
diagonal entries into the global
matrix has some
1303 * limitations. For operators with hanging node constraints that distribute
1304 * an integral contribution of a constrained DoF to several other entries
1305 *
inside the distribute_local_to_global
call, the vector
interface used
1306 * here does not exactly compute the
diagonal entries, but lumps some
1309 * result is correct up to discretization accuracy as explained in <a
1310 * href=
"http://dx.doi.org/10.4208/cicp.101214.021015a">Kormann (2016),
1311 * section 5.3</a>, but not mathematically
equal. In
this tutorial program,
1312 * no harm can happen because the
diagonal is only used
for the multigrid
1313 *
level matrices where no hanging node constraints appear.
1316 *
template <
int dim,
int fe_degree,
typename number>
1317 *
void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
1320 *
const unsigned int &,
1321 *
const std::pair<unsigned int, unsigned int> &cell_range)
const
1327 *
for (
unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
1333 *
for (
unsigned int i = 0; i < phi.dofs_per_cell; ++i)
1335 *
for (
unsigned int j = 0; j < phi.dofs_per_cell; ++j)
1337 * phi.submit_dof_value(make_vectorized_array<number>(1.), i);
1340 *
for (
unsigned int q = 0; q < phi.n_q_points; ++q)
1341 * phi.submit_gradient(coefficient(cell, q) * phi.get_gradient(q),
1344 *
diagonal[i] = phi.get_dof_value(i);
1346 *
for (
unsigned int i = 0; i < phi.dofs_per_cell; ++i)
1347 * phi.submit_dof_value(diagonal[i], i);
1348 * phi.distribute_local_to_global(dst);
1357 * <a name=
"LaplaceProblemclass"></a>
1358 * <h3>LaplaceProblem
class</h3>
1362 * This
class is based on the one in @ref step_16
"step-16". However, we replaced the
1364 * that we can also skip the sparsity patterns. Notice that we define the
1365 * LaplaceOperator
class with the degree of finite element as template
1366 * argument (the value is defined at the top of the file), and that we use
1371 * The
class also has a member variable to keep track of all the detailed
1372 * timings
for setting up the entire chain of data before we actually go
1373 * about solving the problem. In addition, there is an output stream (that
1374 * is disabled by
default) that can be used to output details
for the
1375 * individual setup operations instead of the summary only that is printed
1380 * Since
this program is designed to be used with MPI, we also provide the
1381 * usual @p pcout output stream that only prints the information of the
1382 * processor with MPI rank 0. The grid used
for this programs can either be
1383 * a distributed
triangulation based on p4est (in
case deal.II is configured
1384 * to use p4est), otherwise it is a
serial grid that only runs without MPI.
1387 *
template <
int dim>
1388 *
class LaplaceProblem
1395 *
void setup_system();
1396 *
void assemble_rhs();
1398 *
void output_results(
const unsigned int cycle)
const;
1400 * #ifdef DEAL_II_WITH_P4EST
1412 *
using SystemMatrixType =
1413 * LaplaceOperator<dim, degree_finite_element, double>;
1414 * SystemMatrixType system_matrix;
1417 *
using LevelMatrixType = LaplaceOperator<dim, degree_finite_element, float>;
1423 *
double setup_time;
1432 * When we initialize the finite element, we of course have to use the
1433 * degree specified at the top of the file as well (otherwise, an exception
1434 * will be thrown at some point, since the computational kernel defined in
1435 * the templated LaplaceOperator
class and the information from the finite
1436 * element read out by
MatrixFree will not match). The constructor of the
1438 * conform to the 2:1 cell balance over
vertices, which is needed
for the
1439 * convergence of the geometric multigrid routines. For the distributed
1440 * grid, we also need to specifically enable the multigrid hierarchy.
1443 *
template <
int dim>
1444 * LaplaceProblem<dim>::LaplaceProblem()
1445 * #ifdef DEAL_II_WITH_P4EST
1449 * dim>::construct_multigrid_hierarchy)
1453 * , fe(degree_finite_element)
1459 * The LaplaceProblem
class holds an additional output stream that
1460 * collects detailed timings about the setup phase. This stream, called
1461 * time_details, is disabled by
default through the @p
false argument
1462 * specified here. For detailed timings, removing the @p
false argument
1463 * prints all the details.
1466 * , time_details(std::cout,
1476 * <a name=
"LaplaceProblemsetup_system"></a>
1477 * <h4>LaplaceProblem::setup_system</h4>
1481 * The setup stage is in analogy to @ref step_16
"step-16" with relevant changes due to the
1483 * including the degrees of freedom
for the multigrid levels, and to
1484 * initialize constraints from hanging nodes and homogeneous Dirichlet
1485 * conditions. Since we intend to use
this programs in %
parallel with MPI,
1486 * we need to make sure that the constraints get to know the locally
1487 * relevant degrees of freedom, otherwise the storage would explode when
1488 *
using more than a few hundred millions of degrees of freedom, see
1489 * @ref step_40
"step-40".
1493 * Once we have created the multigrid dof_handler and the constraints, we
1495 * each
level of the multigrid scheme. The main action is to
set up the
1496 * <code>
MatrixFree </code> instance
for the problem. The base
class of the
1498 * initialized with a shared pointer to
MatrixFree object. This way, we can
1499 * simply create it here and then pass it on to the system
matrix and
level
1500 * matrices, respectively. For setting up
MatrixFree, we need to activate
1501 * the update flag in the AdditionalData field of
MatrixFree that enables
1502 * the storage of quadrature
point coordinates in real space (by
default, it
1503 * only caches data
for gradients (inverse transposed Jacobians) and JxW
1504 * values). Note that
if we
call the
reinit function without specifying the
1505 *
level (i.e., giving <code>
level = numbers::invalid_unsigned_int</code>),
1506 *
MatrixFree constructs a
loop over the active cells. In
this tutorial, we
1507 *
do not use threads in addition to MPI, which is why we explicitly disable
1510 * and vectors are initialized as explained above.
1513 *
template <
int dim>
1514 *
void LaplaceProblem<dim>::setup_system()
1519 * system_matrix.clear();
1520 * mg_matrices.clear_elements();
1522 * dof_handler.distribute_dofs(fe);
1523 * dof_handler.distribute_mg_dofs();
1525 * pcout <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
1528 *
const IndexSet locally_relevant_dofs =
1531 * constraints.clear();
1532 * constraints.reinit(locally_relevant_dofs);
1536 * constraints.close();
1538 * time_details <<
"Distribute DoFs & B.C. (CPU/wall) " << time.
cpu_time()
1539 * <<
"s/" << time.
wall_time() <<
's' << std::endl;
1548 * std::shared_ptr<MatrixFree<dim, double>> system_mf_storage(
1550 * system_mf_storage->reinit(mapping,
1555 * system_matrix.initialize(system_mf_storage);
1558 * system_matrix.evaluate_coefficient(Coefficient<dim>());
1560 * system_matrix.initialize_dof_vector(solution);
1561 * system_matrix.initialize_dof_vector(system_rhs);
1564 * time_details <<
"Setup matrix-free system (CPU/wall) " << time.
cpu_time()
1565 * <<
"s/" << time.
wall_time() <<
's' << std::endl;
1570 * Next, initialize the matrices
for the multigrid method on all the
1572 * the indices subject to boundary conditions as well as the indices on
1573 * edges between different refinement levels as described in the @ref step_16
"step-16"
1574 * tutorial program. We then go through the levels of the mesh and
1575 * construct the constraints and matrices on each
level. These follow
1576 * closely the construction of the system
matrix on the original mesh,
1577 * except the slight difference in naming when accessing information on
1578 * the levels rather than the active cells.
1581 *
const unsigned int nlevels =
triangulation.n_global_levels();
1582 * mg_matrices.resize(0, nlevels - 1);
1584 *
const std::set<types::boundary_id> dirichlet_boundary_ids = {0};
1585 * mg_constrained_dofs.initialize(dof_handler);
1586 * mg_constrained_dofs.make_zero_boundary_constraints(dof_handler,
1587 * dirichlet_boundary_ids);
1594 * level_constraints.
reinit(relevant_dofs);
1596 * mg_constrained_dofs.get_boundary_indices(
level));
1597 * level_constraints.
close();
1605 * std::shared_ptr<MatrixFree<dim, float>> mg_mf_storage_level(
1607 * mg_mf_storage_level->reinit(mapping,
1609 * level_constraints,
1613 * mg_matrices[
level].initialize(mg_mf_storage_level,
1614 * mg_constrained_dofs,
1616 * mg_matrices[
level].evaluate_coefficient(Coefficient<dim>());
1619 * time_details <<
"Setup matrix-free levels (CPU/wall) " << time.
cpu_time()
1620 * <<
"s/" << time.
wall_time() <<
's' << std::endl;
1628 * <a name=
"LaplaceProblemassemble_rhs"></a>
1629 * <h4>LaplaceProblem::assemble_rhs</h4>
1633 * The
assemble function is very simple since all we have to
do is to
1635 * cached in the
MatrixFree class, which we query from
1638 * alternative), we must not forget to
call compress() at the end of the
1639 * assembly to send all the contributions of the right hand side to the
1640 * owner of the respective degree of freedom.
1643 * template <
int dim>
1644 *
void LaplaceProblem<dim>::assemble_rhs()
1650 * *system_matrix.get_matrix_free());
1651 *
for (
unsigned int cell = 0;
1652 * cell < system_matrix.get_matrix_free()->n_cell_batches();
1656 *
for (
unsigned int q = 0; q < phi.n_q_points; ++q)
1657 * phi.submit_value(make_vectorized_array<double>(1.0), q);
1659 * phi.distribute_local_to_global(system_rhs);
1664 * time_details <<
"Assemble right hand side (CPU/wall) " << time.
cpu_time()
1665 * <<
"s/" << time.
wall_time() <<
's' << std::endl;
1673 * <a name=
"LaplaceProblemsolve"></a>
1674 * <h4>LaplaceProblem::solve</h4>
1678 * The solution process is similar as in @ref step_16
"step-16". We start with the setup of
1681 * interpolation between the grid levels with the same fast
sum
1682 * factorization kernels that get also used in
FEEvaluation.
1685 *
template <
int dim>
1686 *
void LaplaceProblem<dim>::solve()
1690 * mg_transfer.build(dof_handler);
1692 * time_details <<
"MG build transfer time (CPU/wall) " << time.
cpu_time()
1698 * As a smoother,
this tutorial program uses a Chebyshev iteration instead
1699 * of SOR in @ref step_16
"step-16". (SOR would be very difficult to implement because we
1700 *
do not have the
matrix elements available explicitly, and it is
1701 * difficult to make it work efficiently in %
parallel.) The smoother is
1702 * initialized with our
level matrices and the mandatory additional data
1703 *
for the Chebyshev smoother. We use a relatively high degree here (5),
1704 * since
matrix-vector products are comparably cheap. We choose to smooth
1706 * in the smoother where @f$\hat{\
lambda}_{\
max}@f$ is an estimate of the
1707 * largest eigenvalue (the factor 1.2 is applied inside
1709 * Chebyshev initialization performs a few steps of a CG algorithm
1710 * without preconditioner. Since the highest eigenvalue is usually the
1711 * easiest one to find and a rough estimate is enough, we choose 10
1712 * iterations. Finally, we also
set the inner preconditioner type in the
1713 * Chebyshev method which is a Jacobi iteration. This is represented by
1715 * by our LaplaceOperator
class.
1719 * On
level zero, we initialize the smoother differently because we want
1721 * allows the user to
switch to solver mode where the number of iterations
1722 * is internally chosen to the correct
value. In the additional data
1723 * object,
this setting is activated by choosing the polynomial degree to
1726 *
matrix. The number of steps in the Chebyshev smoother are chosen such
1727 * that the Chebyshev convergence estimates guarantee to
reduce the
1728 * residual by the number specified in the variable @p
1729 * smoothing_range. Note that
for solving, @p smoothing_range is a
1730 * relative tolerance and chosen smaller than one, in
this case, we select
1731 * three orders of magnitude, whereas it is a number larger than 1 when
1736 * From a computational
point of view, the Chebyshev iteration is a very
1737 * attractive coarse grid solver as
long as the coarse size is
1738 * moderate. This is because the Chebyshev method performs only
1739 *
matrix-vector products and vector updates, which typically parallelize
1740 * better to the largest cluster size with more than a few tens of
1741 * thousands of cores than inner product involved in other iterative
1742 * methods. The former involves only local communication between neighbors
1743 * in the (coarse) mesh, whereas the latter
requires global communication
1744 * over all processors.
1747 *
using SmootherType =
1760 * smoother_data[
level].smoothing_range = 15.;
1761 * smoother_data[
level].degree = 5;
1762 * smoother_data[
level].eig_cg_n_iterations = 10;
1766 * smoother_data[0].smoothing_range = 1
e-3;
1768 * smoother_data[0].eig_cg_n_iterations = mg_matrices[0].m();
1770 * mg_matrices[
level].compute_diagonal();
1771 * smoother_data[
level].preconditioner =
1772 * mg_matrices[
level].get_matrix_diagonal_inverse();
1774 * mg_smoother.initialize(mg_matrices, smoother_data);
1782 * The next step is to
set up the
interface matrices that are needed for the
1783 *
case with hanging nodes. The adaptive multigrid realization in deal.II
1784 * implements an approach called local smoothing. This means that the
1785 * smoothing on the finest
level only covers the local part of the mesh
1786 * defined by the fixed (finest) grid
level and ignores parts of the
1787 * computational domain where the terminal cells are coarser than
this
1788 *
level. As the method progresses to coarser levels, more and more of the
1789 * global mesh will be covered. At some coarser
level, the whole mesh will
1790 * be covered. Since all
level matrices in the multigrid method cover a
1791 * single
level in the mesh, no hanging nodes appear on the
level matrices.
1792 * At the
interface between multigrid levels, homogeneous Dirichlet boundary
1793 * conditions are
set while smoothing. When the residual is transferred to
1794 * the next coarser
level, however, the coupling over the multigrid
1795 *
interface needs to be taken into account. This is done by the so-called
1796 * interface (or edge) matrices that compute the part of the residual that
1798 * homogeneous Dirichlet conditions. We refer to the @ref mg_paper
1799 *
"Multigrid paper by Janssen and Kanschat" for more details.
1803 * For the implementation of those
interface matrices, there is already a
1807 * vmult() and @p Tvmult() operations (that were originally written for
1808 * matrices, hence expecting those names). Note that vmult_interface_down
1809 * is used during the restriction phase of the multigrid V-cycle, whereas
1810 * vmult_interface_up is used during the prolongation phase.
1814 * Once the interface matrix is created, we set up the remaining
Multigrid
1815 * preconditioner infrastructure in complete analogy to @ref step_16 "step-16" to obtain
1816 * a @p preconditioner
object that can be applied to a matrix.
1823 * mg_interface_matrices;
1824 * mg_interface_matrices.resize(0,
triangulation.n_global_levels() - 1);
1827 * mg_interface_matrices[
level].initialize(mg_matrices[
level]);
1829 * mg_interface_matrices);
1832 * mg_matrix, mg_coarse, mg_transfer, mg_smoother, mg_smoother);
1833 *
mg.set_edge_matrices(mg_interface, mg_interface);
1838 * preconditioner(dof_handler,
mg, mg_transfer);
1842 * The setup of the multigrid routines is quite easy and one cannot see
1843 * any difference in the solve process as compared to @ref step_16 "step-16". All the
1844 * magic is hidden behind the implementation of the LaplaceOperator::vmult
1845 * operation. Note that we print out the solve time and the accumulated
1846 * setup time through standard out, i.e., in any case, whereas detailed
1847 * times for the setup operations are only printed in case the flag for
1848 * detail_times in the constructor is changed.
1854 *
SolverControl solver_control(100, 1e-12 * system_rhs.l2_norm());
1856 * setup_time += time.wall_time();
1857 * time_details << "MG build smoother time (CPU/wall) " << time.cpu_time()
1858 * << "s/" << time.wall_time() << "s\n";
1859 * pcout << "Total setup time (wall) " << setup_time << "s\n";
1863 * constraints.set_zero(solution);
1864 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
1866 * constraints.distribute(solution);
1868 * pcout << "Time solve (" << solver_control.last_step() << " iterations)"
1869 * << (solver_control.last_step() < 10 ? " " : " ") << "(CPU/wall) "
1870 * << time.cpu_time() << "s/" << time.wall_time() << "s\n";
1878 * <a name="LaplaceProblemoutput_results"></a>
1879 * <h4>LaplaceProblem::output_results</h4>
1883 * Here is the data output, which is a simplified version of @ref step_5 "step-5". We use
1884 * the standard VTU (= compressed VTK) output for each grid produced in the
1885 * refinement process. In addition, we use a compression algorithm that is
1886 * optimized for speed rather than disk usage. The default setting (which
1887 * optimizes for disk usage) makes saving the output take about 4 times as
1888 *
long as running the linear solver, while setting
1890 *
DataOutBase::VtkFlags::best_speed lowers this to only one fourth the time
1891 * of the linear solve.
1895 * We disable the output when the mesh gets too large. A variant of this
1896 * program has been run on hundreds of thousands MPI ranks with as many as
1897 * 100 billion grid cells, which is not directly accessible to classical
1898 * visualization tools.
1901 * template <
int dim>
1902 *
void LaplaceProblem<dim>::output_results(const
unsigned int cycle) const
1910 * solution.update_ghost_values();
1919 *
"./",
"solution", cycle, MPI_COMM_WORLD, 3);
1921 * time_details <<
"Time write output (CPU/wall) " << time.
cpu_time()
1930 * <a name=
"LaplaceProblemrun"></a>
1931 * <h4>LaplaceProblem::run</h4>
1935 * The function that runs the program is very similar to the one in
1936 * @ref step_16
"step-16". We
do few refinement steps in 3D compared to 2D, but that
's
1941 * Before we run the program, we output some information about the detected
1942 * vectorization level as discussed in the introduction.
1945 * template <int dim>
1946 * void LaplaceProblem<dim>::run()
1949 * const unsigned int n_vect_doubles = VectorizedArray<double>::size();
1950 * const unsigned int n_vect_bits = 8 * sizeof(double) * n_vect_doubles;
1952 * pcout << "Vectorization over " << n_vect_doubles
1953 * << " doubles = " << n_vect_bits << " bits ("
1954 * << Utilities::System::get_current_vectorization_level() << ')
'
1958 * for (unsigned int cycle = 0; cycle < 9 - dim; ++cycle)
1960 * pcout << "Cycle " << cycle << std::endl;
1964 * GridGenerator::hyper_cube(triangulation, 0., 1.);
1965 * triangulation.refine_global(3 - dim);
1967 * triangulation.refine_global(1);
1971 * output_results(cycle);
1972 * pcout << std::endl;
1975 * } // namespace Step37
1982 * <a name="Thecodemaincodefunction"></a>
1983 * <h3>The <code>main</code> function</h3>
1987 * Apart from the fact that we set up the MPI framework according to @ref step_40 "step-40",
1988 * there are no surprises in the main function.
1991 * int main(int argc, char *argv[])
1995 * using namespace Step37;
1997 * Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
1999 * LaplaceProblem<dimension> laplace_problem;
2000 * laplace_problem.run();
2002 * catch (std::exception &exc)
2004 * std::cerr << std::endl
2006 * << "----------------------------------------------------"
2008 * std::cerr << "Exception on processing: " << std::endl
2009 * << exc.what() << std::endl
2010 * << "Aborting!" << std::endl
2011 * << "----------------------------------------------------"
2017 * std::cerr << std::endl
2019 * << "----------------------------------------------------"
2021 * std::cerr << "Unknown exception!" << std::endl
2022 * << "Aborting!" << std::endl
2023 * << "----------------------------------------------------"
2031<a name="Results"></a><h1>Results</h1>
2034<a name="Programoutput"></a><h3>Program output</h3>
2037Since this example solves the same problem as @ref step_5 "step-5" (except for
2038a different coefficient), there is little to say about the
2039solution. We show a picture anyway, illustrating the size of the
2040solution through both isocontours and volume rendering:
2042<img src="https://www.dealii.org/images/steps/developer/step-37.solution.png" alt="">
2044Of more interest is to evaluate some aspects of the multigrid solver.
2045When we run this program in 2D for quadratic (@f$Q_2@f$) elements, we get the
2046following output (when run on one core in release mode):
2048Vectorization over 2 doubles = 128 bits (SSE2)
2050Number of degrees of freedom: 81
2051Total setup time (wall) 0.00159788s
2052Time solve (6 iterations) (CPU/wall) 0.000951s/0.000951052s
2055Number of degrees of freedom: 289
2056Total setup time (wall) 0.00114608s
2057Time solve (6 iterations) (CPU/wall) 0.000935s/0.000934839s
2060Number of degrees of freedom: 1089
2061Total setup time (wall) 0.00244665s
2062Time solve (6 iterations) (CPU/wall) 0.00207s/0.002069s
2065Number of degrees of freedom: 4225
2066Total setup time (wall) 0.00678205s
2067Time solve (6 iterations) (CPU/wall) 0.005616s/0.00561595s
2070Number of degrees of freedom: 16641
2071Total setup time (wall) 0.0241671s
2072Time solve (6 iterations) (CPU/wall) 0.019543s/0.0195441s
2075Number of degrees of freedom: 66049
2076Total setup time (wall) 0.0967851s
2077Time solve (6 iterations) (CPU/wall) 0.07457s/0.0745709s
2080Number of degrees of freedom: 263169
2081Total setup time (wall) 0.346374s
2082Time solve (6 iterations) (CPU/wall) 0.260042s/0.265033s
2085As in @ref step_16 "step-16", we see that the number of CG iterations remains constant with
2086increasing number of degrees of freedom. A constant number of iterations
2087(together with optimal computational properties) means that the computing time
2088approximately quadruples as the problem size quadruples from one cycle to the
2089next. The code is also very efficient in terms of storage. Around 2-4 million
2090degrees of freedom fit into 1 GB of memory, see also the MPI results below. An
2091interesting fact is that solving one linear system is cheaper than the setup,
2092despite not building a matrix (approximately half of which is spent in the
2093DoFHandler::distribute_dofs() and DoFHandler::distribute_mg_dofs()
2094calls). This shows the high efficiency of this approach, but also that the
2095deal.II data structures are quite expensive to set up and the setup cost must
2096be amortized over several system solves.
2098Not much changes if we run the program in three spatial dimensions. Since we
2099use uniform mesh refinement, we get eight times as many elements and
2100approximately eight times as many degrees of freedom with each cycle:
2103Vectorization over 2 doubles = 128 bits (SSE2)
2105Number of degrees of freedom: 125
2106Total setup time (wall) 0.00231099s
2107Time solve (6 iterations) (CPU/wall) 0.000692s/0.000922918s
2110Number of degrees of freedom: 729
2111Total setup time (wall) 0.00289083s
2112Time solve (6 iterations) (CPU/wall) 0.001534s/0.0024128s
2115Number of degrees of freedom: 4913
2116Total setup time (wall) 0.0143182s
2117Time solve (6 iterations) (CPU/wall) 0.010785s/0.0107841s
2120Number of degrees of freedom: 35937
2121Total setup time (wall) 0.087064s
2122Time solve (6 iterations) (CPU/wall) 0.063522s/0.06545s
2125Number of degrees of freedom: 274625
2126Total setup time (wall) 0.596306s
2127Time solve (6 iterations) (CPU/wall) 0.427757s/0.431765s
2130Number of degrees of freedom: 2146689
2131Total setup time (wall) 4.96491s
2132Time solve (6 iterations) (CPU/wall) 3.53126s/3.56142s
2135Since it is so easy, we look at what happens if we increase the polynomial
2136degree. When selecting the degree as four in 3D, i.e., on @f$\mathcal Q_4@f$
2137elements, by changing the line <code>const unsigned int
2138degree_finite_element=4;</code> at the top of the program, we get the
2139following program output:
2142Vectorization over 2 doubles = 128 bits (SSE2)
2144Number of degrees of freedom: 729
2145Total setup time (wall) 0.00633097s
2146Time solve (6 iterations) (CPU/wall) 0.002829s/0.00379395s
2149Number of degrees of freedom: 4913
2150Total setup time (wall) 0.0174279s
2151Time solve (6 iterations) (CPU/wall) 0.012255s/0.012254s
2154Number of degrees of freedom: 35937
2155Total setup time (wall) 0.082655s
2156Time solve (6 iterations) (CPU/wall) 0.052362s/0.0523629s
2159Number of degrees of freedom: 274625
2160Total setup time (wall) 0.507943s
2161Time solve (6 iterations) (CPU/wall) 0.341811s/0.345788s
2164Number of degrees of freedom: 2146689
2165Total setup time (wall) 3.46251s
2166Time solve (7 iterations) (CPU/wall) 3.29638s/3.3265s
2169Number of degrees of freedom: 16974593
2170Total setup time (wall) 27.8989s
2171Time solve (7 iterations) (CPU/wall) 26.3705s/27.1077s
2174Since @f$\mathcal Q_4@f$ elements on a certain mesh correspond to @f$\mathcal Q_2@f$
2175elements on half the mesh size, we can compare the run time at cycle 4 with
2176fourth degree polynomials with cycle 5 using quadratic polynomials, both at
21772.1 million degrees of freedom. The surprising effect is that the solver for
2178@f$\mathcal Q_4@f$ element is actually slightly faster than for the quadratic
2179case, despite using one more linear iteration. The effect that higher-degree
2180polynomials are similarly fast or even faster than lower degree ones is one of
2181the main strengths of matrix-free operator evaluation through sum
2182factorization, see the <a
2183href="http://dx.doi.org/10.1016/j.compfluid.2012.04.012">matrix-free
2184paper</a>. This is fundamentally different to matrix-based methods that get
2185more expensive per unknown as the polynomial degree increases and the coupling
2188In addition, also the setup gets a bit cheaper for higher order, which is
2189because fewer elements need to be set up.
2191Finally, let us look at the timings with degree 8, which corresponds to
2192another round of mesh refinement in the lower order methods:
2195Vectorization over 2 doubles = 128 bits (SSE2)
2197Number of degrees of freedom: 4913
2198Total setup time (wall) 0.0842004s
2199Time solve (8 iterations) (CPU/wall) 0.019296s/0.0192959s
2202Number of degrees of freedom: 35937
2203Total setup time (wall) 0.327048s
2204Time solve (8 iterations) (CPU/wall) 0.07517s/0.075999s
2207Number of degrees of freedom: 274625
2208Total setup time (wall) 2.12335s
2209Time solve (8 iterations) (CPU/wall) 0.448739s/0.453698s
2212Number of degrees of freedom: 2146689
2213Total setup time (wall) 16.1743s
2214Time solve (8 iterations) (CPU/wall) 3.95003s/3.97717s
2217Number of degrees of freedom: 16974593
2218Total setup time (wall) 130.8s
2219Time solve (8 iterations) (CPU/wall) 31.0316s/31.767s
2222Here, the initialization seems considerably slower than before, which is
2223mainly due to the computation of the diagonal of the matrix, which actually
2224computes a 729 x 729 matrix on each cell and throws away everything but the
2225diagonal. The solver times, however, are again very close to the quartic case,
2226showing that the linear increase with the polynomial degree that is
2227theoretically expected is almost completely offset by better computational
2228characteristics and the fact that higher order methods have a smaller share of
2229degrees of freedom living on several cells that add to the evaluation
2232<a name="Comparisonwithasparsematrix"></a><h3>Comparison with a sparse matrix</h3>
2235In order to understand the capabilities of the matrix-free implementation, we
2236compare the performance of the 3d example above with a sparse matrix
2237implementation based on TrilinosWrappers::SparseMatrix by measuring both the
2238computation times for the initialization of the problem (distribute DoFs,
2239setup and assemble matrices, setup multigrid structures) and the actual
2240solution for the matrix-free variant and the variant based on sparse
2241matrices. We base the preconditioner on float numbers and the actual matrix
2242and vectors on double numbers, as shown above. Tests are run on an Intel Core
2243i7-5500U notebook processor (two cores and <a
2244href="http://en.wikipedia.org/wiki/Advanced_Vector_Extensions">AVX</a>
2245support, i.e., four operations on doubles can be done with one CPU
2246instruction, which is heavily used in FEEvaluation), optimized mode, and two
2249<table align="center" class="doxtable">
2252 <th colspan="2">Sparse matrix</th>
2253 <th colspan="2">Matrix-free implementation</th>
2257 <th>Setup + assemble</th>
2258 <th> Solve </th>
2259 <th>Setup + assemble</th>
2260 <th> Solve </th>
2263 <td align="right">125</td>
2264 <td align="center">0.0042s</td>
2265 <td align="center">0.0012s</td>
2266 <td align="center">0.0022s</td>
2267 <td align="center">0.00095s</td>
2270 <td align="right">729</td>
2271 <td align="center">0.012s</td>
2272 <td align="center">0.0040s</td>
2273 <td align="center">0.0027s</td>
2274 <td align="center">0.0021s</td>
2277 <td align="right">4,913</td>
2278 <td align="center">0.082s</td>
2279 <td align="center">0.012s</td>
2280 <td align="center">0.011s</td>
2281 <td align="center">0.0057s</td>
2284 <td align="right">35,937</td>
2285 <td align="center">0.73s</td>
2286 <td align="center">0.13s</td>
2287 <td align="center">0.048s</td>
2288 <td align="center">0.040s</td>
2291 <td align="right">274,625</td>
2292 <td align="center">5.43s</td>
2293 <td align="center">1.01s</td>
2294 <td align="center">0.33s</td>
2295 <td align="center">0.25s</td>
2298 <td align="right">2,146,689</td>
2299 <td align="center">43.8s</td>
2300 <td align="center">8.24s</td>
2301 <td align="center">2.42s</td>
2302 <td align="center">2.06s</td>
2306The table clearly shows that the matrix-free implementation is more than twice
2307as fast for the solver, and more than six times as fast when it comes to
2308initialization costs. As the problem size is made a factor 8 larger, we note
2309that the times usually go up by a factor eight, too (as the solver iterations
2310are constant at six). The main deviation is in the sparse matrix between 5k
2311and 36k degrees of freedom, where the time increases by a factor 12. This is
2312the threshold where the (L3) cache in the processor can no longer hold all
2313data necessary for the matrix-vector products and all matrix elements must be
2314fetched from main memory.
2316Of course, this picture does not necessarily translate to all cases, as there
2317are problems where knowledge of matrix entries enables much better solvers (as
2318happens when the coefficient is varying more strongly than in the above
2319example). Moreover, it also depends on the computer system. The present system
2320has good memory performance, so sparse matrices perform comparably
2321well. Nonetheless, the matrix-free implementation gives a nice speedup already
2322for the <i>Q</i><sub>2</sub> elements used in this example. This becomes
2323particularly apparent for time-dependent or nonlinear problems where sparse
2324matrices would need to be reassembled over and over again, which becomes much
2325easier with this class. And of course, thanks to the better complexity of the
2326products, the method gains increasingly larger advantages when the order of the
2327elements increases (the matrix-free implementation has costs
23284<i>d</i><sup>2</sup><i>p</i> per degree of freedom, compared to
23292<i>p<sup>d</sup></i> for the sparse matrix, so it will win anyway for order 4
2332<a name="ResultsforlargescaleparallelcomputationsonSuperMUC"></a><h3> Results for large-scale parallel computations on SuperMUC</h3>
2335As explained in the introduction and the in-code comments, this program can be
2336run in parallel with MPI. It turns out that geometric multigrid schemes work
2337really well and can scale to very large machines. To the authors' knowledge,
2338the geometric multigrid results shown here are the largest computations done
2339with deal.II as of late 2016, run on up to 147,456 cores of the <a
2340href=
"https://www.lrz.de/services/compute/supermuc/systemdescription/">complete
2341SuperMUC Phase 1</a>. The ingredients
for scalability beyond 1000 cores are
2342that no data structure that depends on the global problem size is held in its
2343entirety on a single processor and that the communication is not too frequent
2344in order not to run into latency issues of the network. For PDEs solved with
2345iterative solvers, the communication latency is often the limiting factor,
2346rather than the throughput of the network. For the example of the SuperMUC
2347system, the point-to-point latency between two processors is between 1e-6 and
23481e-5 seconds, depending on the proximity in the MPI network. The matrix-vector
2349products with @p LaplaceOperator from
this class involves several
2350point-to-point communication steps, interleaved with computations on each
2351core. The resulting latency of a matrix-vector product is around 1e-4
2352seconds. Global communication,
for example an @p MPI_Allreduce operation that
2353accumulates the sum of a single number per rank over all ranks in the MPI
2354network, has a latency of 1e-4 seconds. The multigrid V-cycle used in
this
2355program is also a form of global communication. Think about the coarse grid
2356solve that happens on a single processor: It accumulates the contributions
2357from all processors before it starts. When completed, the coarse grid solution
2358is transferred to finer levels, where more and more processors help in
2359smoothing until the fine grid. Essentially,
this is a tree-like pattern over
2360the processors in the network and controlled by the mesh. As opposed to the
2361@p MPI_Allreduce operations where the tree in the reduction is optimized to the
2362actual links in the MPI network, the multigrid V-cycle does
this according to
2363the partitioning of the mesh. Thus, we cannot expect the same
2364optimality. Furthermore, the multigrid cycle is not simply a walk up and down
2365the refinement tree, but also communication on each
level when doing the
2366smoothing. In other words, the global communication in multigrid is more
2367challenging and related to the mesh that provides less optimization
2368opportunities. The measured latency of the V-cycle is between 6e-3 and 2e-2
2369seconds, i.e., the same as 60 to 200 MPI_Allreduce operations.
2371The following figure shows a scaling experiments on @f$\mathcal Q_3@f$
2372elements. Along the lines, the problem size is held constant as the number of
2373cores is increasing. When doubling the number of cores, one expects a halving
2374of the computational time, indicated by the dotted gray lines. The results
2375show that the implementation shows almost ideal behavior until an absolute
2376time of around 0.1 seconds is reached. The solver tolerances have been set
2377such that the solver performs five iterations. This way of plotting data is
2378the <b>strong scaling</b> of the algorithm. As we go to very large core
2379counts, the curves flatten out a bit earlier, which is because of the
2380communication network in SuperMUC where communication between processors
2381farther away is slightly slower.
2383<img src=
"https://www.dealii.org/images/steps/developer/step-37.scaling_strong.png" alt=
"">
2385In addition, the plot also contains results for <b>weak scaling</b> that lists
2386how the algorithm behaves as both the number of processor cores and elements
2387is increased at the same pace. In
this situation, we expect that the compute
2388time remains
constant. Algorithmically, the number of CG iterations is
2389constant at 5, so we are good from that
end. The lines in the plot are
2390arranged such that the top left point in each data series represents the same
2391size per processor, namely 131,072 elements (or approximately 3.5 million
2392degrees of freedom per core). The gray lines indicating ideal strong scaling
2393are by the same factor of 8 apart. The results show again that the scaling is
2394almost ideal. The
parallel efficiency when going from 288 cores to 147,456
2395cores is at around 75%
for a local problem size of 750,000 degrees of freedom
2396per core which takes 1.0s on 288 cores, 1.03s on 2304 cores, 1.19s on 18k
2397cores, and 1.35s on 147k cores. The algorithms also reach a very high
2398utilization of the processor. The largest computation on 147k cores reaches
2399around 1.7 PFLOPs/s on SuperMUC out of an arithmetic peak of 3.2 PFLOPs/s. For
2400an iterative PDE solver,
this is a very high number and significantly more is
2401often only reached
for dense linear algebra. Sparse linear algebra is limited
2402to a tenth of
this value.
2404As mentioned in the introduction, the
matrix-
free method reduces the memory
2405consumption of the data structures. Besides the higher performance due to less
2406memory transfer, the algorithms also allow
for very large problems to fit into
2407memory. The figure below shows the computational time as we increase the
2408problem size until an upper limit where the computation exhausts memory. We
do
2409this for 1k cores, 8k cores, and 65k cores and see that the problem size can
2410be varied over almost two orders of magnitude with ideal scaling. The largest
2411computation shown in
this picture involves 292 billion (@f$2.92 \cdot 10^{11}@f$)
2412degrees of freedom. On a DG computation of 147k cores, the above algorithms
2413were also
run involving up to 549 billion (2^39) DoFs.
2417Finally, we note that while performing the tests on the large-scale system
2418shown above, improvements of the multigrid algorithms in deal.II have been
2419developed. The original version contained the sub-optimal code based on
2421all vector entries are zero) were done on each smoothing
2422operation on each
level, which only became apparent on 65k cores and
2423more. However, the following picture shows that the improvement already pay
2424off on a smaller scale, here shown on computations on up to 14,336 cores for
2425@f$\mathcal Q_5@f$ elements:
2430<a name="Adaptivity"></a><h3> Adaptivity</h3>
2433As explained in the code, the algorithm presented here is prepared to run on
2434adaptively refined meshes. If only part of the mesh is refined, the multigrid
2435cycle will run with local smoothing and impose Dirichlet conditions along the
2436interfaces which differ in refinement
level for smoothing through the
2438distributed over levels, relating the owner of the
level cells to the owner of
2439the
first descendant active cell, there can be an imbalance between different
2440processors in MPI, which limits scalability by a factor of around two to five.
2442<a name="Possibilitiesforextensions"></a><h3> Possibilities for extensions</h3>
2445<a name="Kellyerrorestimator"></a><h4> Kelly error estimator </h4>
2448As mentioned above the code is ready for locally adaptive h-refinement.
2449For the Poisson equation one can employ the Kelly error indicator,
2451with the ghost indices of
parallel vectors.
2452In order to evaluate the jump terms in the error indicator, each MPI process
2453needs to know locally relevant DoFs.
2454However
MatrixFree::initialize_dof_vector() function initializes the vector only with
2455some locally relevant DoFs.
2456The ghost indices made available in the vector are a tight set of only those indices
2457that are touched in the cell integrals (including constraint resolution).
2458This choice has performance reasons, because sending all locally relevant degrees
2459of freedom would be too expensive compared to the matrix-vector product.
2460Consequently the solution vector as-is is
2462The trick is to change the ghost part of the partition, for example using a
2467const
IndexSet locally_relevant_dofs =
DoFTools::extract_locally_relevant_dofs(dof_handler);
2469solution.reinit(dof_handler.locally_owned_dofs(),
2470 locally_relevant_dofs,
2472solution.copy_locally_owned_data_from(copy_vec);
2473constraints.distribute(solution);
2474solution.update_ghost_values();
2477<a name="Sharedmemoryparallelization"></a><h4> Shared-memory parallelization</h4>
2480This program is parallelized with MPI only. As an alternative, the
MatrixFree
2481loop can also be issued in
hybrid mode, for example by using MPI parallelizing
2482over the nodes of a cluster and with threads through Intel TBB within the
2483shared memory region of one node. To use this, one would need to both set the
2484number of threads in the MPI_InitFinalize data structure in the main function,
2485and set the
MatrixFree::AdditionalData::tasks_parallel_scheme to
2486partition_color to actually do the loop in
parallel. This use case is
2487discussed in @ref step_48 "step-48".
2489<a name="InhomogeneousDirichletboundaryconditions"></a><h4> Inhomogeneous Dirichlet boundary conditions </h4>
2492The presented program assumes homogeneous Dirichlet boundary conditions. When
2493going to non-homogeneous conditions, the situation is a bit more intricate. To
2494understand how to implement such a setting, let us
first recall how these
2495arise in the mathematical formulation and how they are implemented in a
2496matrix-based variant. In essence, an inhomogeneous Dirichlet condition sets
2497some of the nodal values in the solution to given values rather than
2498determining them through the variational principles,
2500u_h(\mathbf{x}) = \sum_{i\in \mathcal N} \varphi_i(\mathbf{x}) u_i =
2501\sum_{i\in \mathcal N \setminus \mathcal N_D} \varphi_i(\mathbf{x}) u_i +
2502\sum_{i\in \mathcal N_D} \varphi_i(\mathbf{x}) g_i,
2504where @f$u_i@f$ denotes the nodal
values of the solution and @f$\mathcal N@f$ denotes
2505the
set of all nodes. The
set @f$\mathcal N_D\subset \mathcal N@f$ is the subset
2506of the nodes that are subject to Dirichlet boundary conditions where the
2507solution is forced to
equal @f$u_i = g_i = g(\mathbf{x}_i)@f$ as the interpolation
2508of boundary values on the Dirichlet-constrained node points @f$i\in \mathcal
2509N_D@f$. We then insert
this solution
2510representation into the weak form,
e.g. the Laplacian shown above, and move
2511the known quantities to the right hand side:
2513(\nabla \varphi_i, \nabla u_h)_\Omega &=& (\varphi_i, f)_\Omega \quad \Rightarrow \\
2514\sum_{j\in \mathcal N \setminus \mathcal N_D}(\nabla \varphi_i,\nabla \varphi_j)_\Omega \, u_j &=&
2515(\varphi_i, f)_\Omega
2516-\sum_{j\in \mathcal N_D} (\nabla \varphi_i,\nabla\varphi_j)_\Omega\, g_j.
2518In
this formula, the equations are tested
for all basis
functions @f$\varphi_i@f$
2519with @f$i\in N \setminus \mathcal N_D@f$ that are not related to the nodes
2520constrained by Dirichlet conditions.
2522In the implementation in deal.II, the integrals @f$(\nabla \varphi_i,\nabla \varphi_j)_\Omega@f$
2523on the right hand side are already contained in the local
matrix contributions
2524we
assemble on each cell. When
using
2526@ref step_6
"step-6" and @ref step_7
"step-7" tutorial programs, we can account
for the contribution of
2527inhomogeneous constraints <i>j</i> by multiplying the columns <i>j</i> and
2528rows <i>i</i> of the local matrix according to the integrals @f$(\varphi_i,
2529\varphi_j)_\Omega@f$ by the inhomogeneities and subtracting the resulting from
2530the position <i>i</i> in the global right-hand-side vector, see also the @ref
2531constraints module. In essence, we use some of the integrals that get
2532eliminated from the left hand side of the equation to finalize the right hand
2533side contribution. Similar mathematics are also involved when
first writing
2534all entries into a left hand side
matrix and then eliminating
matrix rows and
2537In principle, the components that belong to the constrained degrees of freedom
2538could be eliminated from the linear system because they
do not carry any
2539information. In practice, in deal.II we
always keep the size of the linear
2540system the same to avoid handling two different numbering systems and avoid
2541confusion about the two different
index sets. In order to ensure that the
2542linear system does not get singular when not adding anything to constrained
2543rows, we then add dummy entries to the
matrix diagonal that are otherwise
2544unrelated to the real entries.
2546In a
matrix-
free method, we need to take a different approach, since the @p
2547LaplaceOperator
class represents the
matrix-vector product of a
2548<
b>homogeneous</
b> operator (the left-hand side of the last formula). It does
2552constraints as long as it represents a <b>linear</b> operator.
2554In our
matrix-
free code, the right hand side computation where the
2555contribution of inhomogeneous conditions ends up is completely decoupled from
2556the
matrix operator and handled by a different function above. Thus, we need
2557to explicitly generate the data that enters the right hand side rather than
2558using a byproduct of the
matrix assembly. Since we already know how to apply
2559the operator on a vector, we could try to use those facilities for a vector
2563 std::map<types::global_dof_index, double> boundary_values;
2567 BoundaryValueFunction<dim>(),
2569 for (
const std::pair<const types::global_dof_index, double> &pair : boundary_values)
2570 if (solution.locally_owned_elements().is_element(pair.first))
2571 solution(pair.first) = pair.second;
2573or, equivalently,
if we already had filled the inhomogeneous constraints into
2580We could then pass the vector @p solution to the @p
2581LaplaceOperator::vmult_add() function and add the new contribution to the @p
2582system_rhs vector that gets filled in the @p LaplaceProblem::assemble_rhs()
2583function. However, this idea does not work because the
2584FEEvaluation::read_dof_values() call used inside the vmult() functions assumes
2585homogeneous values on all constraints (otherwise the operator would not be a
2586linear operator but an affine one). To also retrieve the values of the
2587inhomogeneities, we could select one of two following strategies.
2589<a name="UseFEEvaluationread_dof_values_plaintoavoidresolvingconstraints"></a><h5> Use
FEEvaluation::read_dof_values_plain() to avoid resolving constraints </h5>
2592The class
FEEvaluation has a facility that addresses precisely this
2593requirement: For non-homogeneous Dirichlet values, we do want to skip the
2594implicit imposition of homogeneous (Dirichlet) constraints upon reading the
2595data from the vector @p solution. For example, we could extend the @p
2596LaplaceProblem::assemble_rhs() function to deal with inhomogeneous Dirichlet
2597values as follows, assuming the Dirichlet values have been interpolated into
2598the
object @p constraints:
2601void LaplaceProblem<dim>::assemble_rhs()
2604 constraints.distribute(solution);
2605 solution.update_ghost_values();
2610 for (
unsigned int cell = 0;
2611 cell < system_matrix.get_matrix_free()->n_cell_batches();
2615 phi.read_dof_values_plain(solution);
2617 for (
unsigned int q = 0; q < phi.n_q_points; ++q)
2619 phi.submit_gradient(-coefficient(cell, q) * phi.get_gradient(q), q);
2620 phi.submit_value(make_vectorized_array<double>(1.0), q);
2623 phi.distribute_local_to_global(system_rhs);
2630tentative solution vector by
FEEvaluation::read_dof_values_plain() that
2631ignores all constraints. Due to this setup, we must make sure that other
2632constraints, e.g. by hanging nodes, are correctly distributed to the input
2633vector already as they are not resolved as in
2634FEEvaluation::read_dof_values_plain(). Inside the loop, we then evaluate the
2635Laplacian and repeat the
second derivative call with
2636FEEvaluation::submit_gradient() from the @p LaplaceOperator class, but with the
2637sign switched since we wanted to subtract the contribution of Dirichlet
2638conditions on the right hand side vector according to the formula above. When
2639we invoke the
FEEvaluation::integrate() call, we then set both arguments
2640regarding the value slot and
first derivative slot to true to account for both
2641terms added in the loop over quadrature points. Once the right hand side is
2642assembled, we then go on to solving the linear system for the homogeneous
2643problem, say involving a variable @p solution_update. After solving, we can
2644add @p solution_update to the @p solution vector that contains the final
2645(inhomogeneous) solution.
2647Note that the negative sign for the Laplacian alongside with a positive sign
2648for the forcing that we needed to build the right hand side is a more general
2649concept: We have implemented nothing else than Newton's method for nonlinear
2650equations, but applied to a linear system. We have used an initial guess for
2651the variable @p solution in terms of the Dirichlet boundary conditions and
2652computed a residual @f$r = f - Au_0@f$. The linear system was then solved as
2653@f$\Delta u = A^{-1} (f-Au)@f$ and we
finally computed @f$u = u_0 + \Delta u@f$. For a
2654linear system, we obviously reach the exact solution after a single
2655iteration. If we wanted to extend the code to a nonlinear problem, we would
2656rename the @p assemble_rhs() function into a more descriptive name like @p
2657assemble_residual() that computes the (weak) form of the residual, whereas the
2658@p LaplaceOperator::apply_add() function would get the linearization of the
2659residual with respect to the solution variable.
2661<a name="UseLaplaceOperatorwithasecondAffineConstraintsobjectwithoutDirichletconditions"></a><h5> Use LaplaceOperator with a
second AffineConstraints object without Dirichlet conditions </h5>
2664A
second alternative to get the right hand side that re-uses the @p
2665LaplaceOperator::apply_add() function is to instead add a
second LaplaceOperator
2666that skips Dirichlet constraints. To do this, we initialize a
second MatrixFree
2667object which does not have any boundary value constraints. This @p matrix_free
2668object is then passed to a @p LaplaceOperator class instance @p
2669inhomogeneous_operator that is only used to create the right hand side:
2672void LaplaceProblem<dim>::assemble_rhs()
2676 no_constraints.
close();
2677 LaplaceOperator<dim, degree_finite_element, double> inhomogeneous_operator;
2682 std::shared_ptr<MatrixFree<dim, double>> matrix_free(
2684 matrix_free->reinit(dof_handler,
2688 inhomogeneous_operator.initialize(matrix_free);
2691 constraints.distribute(solution);
2692 inhomogeneous_operator.evaluate_coefficient(Coefficient<dim>());
2693 inhomogeneous_operator.vmult(system_rhs, solution);
2697 *inhomogeneous_operator.get_matrix_free());
2698 for (
unsigned int cell = 0;
2699 cell < inhomogeneous_operator.get_matrix_free()->n_cell_batches();
2703 for (
unsigned int q = 0; q < phi.n_q_points; ++q)
2704 phi.submit_value(make_vectorized_array<double>(1.0), q);
2706 phi.distribute_local_to_global(system_rhs);
2712A more sophisticated implementation of
this technique could reuse the original
2715object. Doing
this would require making substantial modifications to the
2717comes with the library can do this. See the discussion on blocks in
2720<a name="Furtherperformanceimprovements"></a><h4> Further performance improvements </h4>
2723While the performance achieved in this tutorial program is already very good,
2724there is functionality in deal.II to further improve the performance. On the
2725one hand, increasing the polynomial degree to three or four will further
2726improve the time per unknown. (Even higher degrees typically get slower again,
2727because the multigrid iteration counts increase slightly with the chosen
2728simple smoother. One could then use hybrid multigrid algorithms to use
2729polynomial coarsening through MGTransferGlobalCoarsening, to reduce the impact
2730of the coarser level on the communication latency.) A more significant
2731improvement can be obtained by data-locality optimizations. The class
2733preconditioner as in the present class, can overlap the vector operations with
2734the
matrix-vector product. As the former are typically constrained by memory
2735bandwidth, reducing the number of loads helps to achieve this goal. The two
2736ingredients to achieve this are
2738<li> to provide LaplaceOperator class of this tutorial program with a `vmult`
2739function that takes two `std::function` objects, which can be passed on to
2741will then pick up this interface and schedule its vector operations), and </li>
2742<li> to compute a numbering that optimizes for data locality, as provided by
2747<a name="PlainProg"></a>
2748<h1> The plain program</h1>
2749@include
"step-37.cc"
void reinit(const IndexSet &local_constraints=IndexSet())
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
void distribute(VectorType &vec) const
void add_lines(const std::vector< bool > &lines)
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
virtual void build_patches(const unsigned int n_subdivisions=0)
value_type get_dof_value(const unsigned int dof) const
void read_dof_values(const VectorType &src, const unsigned int first_index=0, const std::bitset< VectorizedArrayType::size()> &mask=std::bitset< VectorizedArrayType::size()>().flip())
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
void initialize(const MGSmootherBase< VectorType > &coarse_smooth)
void resize(const unsigned int new_minlevel, const unsigned int new_maxlevel, Args &&...args)
void set_constrained_entries_to_one(VectorType &dst) const
void vmult_interface_down(VectorType &dst, const VectorType &src) const
std::shared_ptr< DiagonalMatrix< VectorType > > inverse_diagonal_entries
unsigned int n_cell_batches() const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
__global__ void set(Number *val, const Number s, const size_type N)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
ZlibCompressionLevel compression_level
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
void set_flags(const FlagType &flags)
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Number local_element(const size_type local_index) const
size_type locally_owned_size() const
void reinit(const size_type size, const bool omit_zeroing_entries=false)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void matrix_free_data_locality(DoFHandler< dim, spacedim > &dof_handler, const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm &comm)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm &mpi_communicator)
std::string compress(const std::string &input)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
static const unsigned int invalid_unsigned_int
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
TasksParallelScheme tasks_parallel_scheme
UpdateFlags mapping_update_flags
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)