469 *
const std::array<double, 2> polar =
472 *
constexpr const double alpha = 2. / 3.;
482 * <a name=
"step_75-Parameters"></a>
483 * <h3>Parameters</h3>
487 * For
this tutorial, we will use a simplified set of parameters. It is also
489 *
short we decided on
using simple structs. The actual intention of all these
490 * parameters will be described in the upcoming classes at their respective
491 * location where they are used.
495 * The following parameter set controls the coarse-grid solver, the smoothers,
496 * and the inter-grid transfer scheme of the multigrid mechanism.
497 * We populate it with
default parameters.
500 *
struct MultigridParameters
504 * std::string type =
"cg_with_amg";
505 *
unsigned int maxiter = 10000;
506 *
double abstol = 1
e-20;
507 *
double reltol = 1
e-4;
508 *
unsigned int smoother_sweeps = 1;
509 *
unsigned int n_cycles = 1;
510 * std::string smoother_type =
"ILU";
515 * std::string type =
"chebyshev";
516 *
double smoothing_range = 20;
517 *
unsigned int degree = 5;
518 *
unsigned int eig_cg_n_iterations = 20;
525 * PolynomialCoarseningSequenceType::decrease_by_one;
526 *
bool perform_h_transfer =
true;
534 * This is the
general parameter
struct for the problem class. You will find
535 *
this struct divided into several categories, including
general runtime
537 * parameters
for cell weighting. It also contains an instance of the above
538 *
struct for multigrid parameters which will be passed to the multigrid
544 *
unsigned int n_cycles = 8;
545 *
double tolerance_factor = 1
e-12;
547 * MultigridParameters mg_data;
549 *
unsigned int min_h_level = 5;
550 *
unsigned int max_h_level = 12;
551 *
unsigned int min_p_degree = 2;
552 *
unsigned int max_p_degree = 6;
553 *
unsigned int max_p_level_difference = 1;
555 *
double refine_fraction = 0.3;
556 *
double coarsen_fraction = 0.03;
557 *
double p_refine_fraction = 0.9;
558 *
double p_coarsen_fraction = 0.9;
560 *
double weighting_factor = 1.;
561 *
double weighting_exponent = 1.;
569 * <a name=
"step_75-MatrixfreeLaplaceoperator"></a>
570 * <h3>Matrix-free Laplace
operator</h3>
574 * This is a
matrix-free implementation of the Laplace
operator that will
575 * basically take over the part of the `assemble_system()` function from other
576 * tutorials. The meaning of all member
functions will be explained at their
581 * We will use the
FEEvaluation class to evaluate the solution vector
582 * at the quadrature points and to perform the integration. In contrast to
583 * other tutorials, the
template arguments `degree` is set to @f$-1@f$ and
584 * `number of quadrature in 1
d` to @f$0@f$. In
this case,
FEEvaluation selects
585 * dynamically the correct polynomial degree and number of quadrature
586 * points. Here, we introduce an alias to
FEEvaluation with the correct
587 *
template parameters so that we
do not have to worry about them later on.
590 *
template <
int dim,
typename number>
596 *
using FECellIntegrator =
FEEvaluation<dim, -1, 0, 1, number>;
598 * LaplaceOperator() =
default;
604 * VectorType &system_rhs);
610 * VectorType &system_rhs);
614 * number el(
unsigned int,
unsigned int)
const;
616 *
void initialize_dof_vector(VectorType &vec)
const;
618 *
void vmult(VectorType &dst,
const VectorType &src)
const;
620 *
void Tvmult(VectorType &dst,
const VectorType &src)
const;
624 *
void compute_inverse_diagonal(VectorType &diagonal)
const;
627 *
void do_cell_integral_local(FECellIntegrator &integrator)
const;
629 *
void do_cell_integral_global(FECellIntegrator &integrator,
631 *
const VectorType &src)
const;
634 *
void do_cell_integral_range(
637 *
const VectorType &src,
638 *
const std::pair<unsigned int, unsigned int> &range)
const;
644 * To solve the equation system on the coarsest
level with an AMG
645 * preconditioner, we need an actual system
matrix on the coarsest
level.
646 * For
this purpose, we provide a mechanism that optionally computes a
647 *
matrix from the
matrix-free formulation,
for which we introduce a
649 * empty. Once `get_system_matrix()` is called,
this matrix is filled (lazy
650 * allocation). Since
this is a `
const` function, we need the
"mutable"
651 * keyword here. We also need a the constraints
object to build the
matrix.
662 * The following section contains
functions to initialize and reinitialize
664 *
MatrixFree instance. For sake of simplicity, we also compute the system
665 * right-hand-side vector.
668 *
template <
int dim,
typename number>
669 * LaplaceOperator<dim, number>::LaplaceOperator(
674 * VectorType &system_rhs)
676 * this->
reinit(mapping, dof_handler, quad, constraints, system_rhs);
681 *
template <
int dim,
typename number>
682 *
void LaplaceOperator<dim, number>::reinit(
687 * VectorType &system_rhs)
691 * Clear
internal data structures (in the
case that the
operator is reused).
694 * this->system_matrix.clear();
698 * Copy the constraints, since they might be needed
for computation of the
702 * this->constraints.copy_from(constraints);
706 * Set up
MatrixFree. At the quadrature points, we only need to evaluate
714 * matrix_free.reinit(mapping, dof_handler, constraints, quad,
data);
718 * Compute the right-hand side vector. For
this purpose, we set up a
second
720 * the constraints due to Dirichlet-boundary conditions. This modified
721 *
operator is applied to a vector with only the Dirichlet
values set. The
722 * result is the
negative right-hand-side vector.
727 * dof_handler.locally_owned_dofs(),
731 * constraints_without_dbc);
732 * constraints_without_dbc.close();
736 * this->initialize_dof_vector(system_rhs);
740 * mapping, dof_handler, constraints_without_dbc, quad,
data);
742 * matrix_free.initialize_dof_vector(b);
743 * matrix_free.initialize_dof_vector(x);
745 * constraints.distribute(x);
747 * matrix_free.cell_loop(&LaplaceOperator::do_cell_integral_range,
752 * constraints.set_zero(b);
762 * The following
functions are implicitly needed by the multigrid algorithm,
763 * including the smoothers.
768 * degrees of freedom.
771 *
template <
int dim,
typename number>
774 *
return matrix_free.get_dof_handler().n_dofs();
781 * Access a particular element in the
matrix. This function is neither
782 * needed nor implemented, however, is required to compile the program.
785 *
template <
int dim,
typename number>
786 * number LaplaceOperator<dim, number>::el(
unsigned int,
unsigned int)
const
796 * Initialize the given vector. We simply delegate the task to the
800 *
template <
int dim,
typename number>
802 * LaplaceOperator<dim, number>::initialize_dof_vector(VectorType &vec)
const
811 * Perform an
operator evaluation by looping with the help of
MatrixFree
812 * over all cells and evaluating the effect of the cell integrals (see also:
813 * `do_cell_integral_local()` and `do_cell_integral_global()`).
816 * template <
int dim, typename number>
817 * void LaplaceOperator<dim, number>::vmult(
VectorType &dst,
820 * this->matrix_free.cell_loop(
821 * &LaplaceOperator::do_cell_integral_range, this, dst, src, true);
828 * Perform the transposed
operator evaluation. Since we are considering
829 *
symmetric "matrices",
this function can simply delegate it task to vmult().
832 *
template <
int dim,
typename number>
833 *
void LaplaceOperator<dim, number>::Tvmult(VectorType &dst,
834 *
const VectorType &src)
const
836 * this->vmult(dst, src);
843 * Since we
do not have a system
matrix, we cannot
loop over the the
845 * performing a sequence of
operator evaluations to unit basis vectors.
847 *
namespace is used. The inversion is performed manually afterwards.
850 *
template <
int dim,
typename number>
851 *
void LaplaceOperator<dim, number>::compute_inverse_diagonal(
852 * VectorType &diagonal)
const
854 * this->matrix_free.initialize_dof_vector(diagonal);
857 * &LaplaceOperator::do_cell_integral_local,
861 * i = (
std::
abs(i) > 1.0e-10) ? (1.0 / i) : 1.0;
868 * In the
matrix-free context, no system
matrix is set up during
869 * initialization of
this class. As a consequence, it has to be computed
870 * here
if it should be requested. Since the
matrix is only computed in
871 *
this tutorial
for linear elements (on the coarse grid),
this is
873 * The
matrix entries are obtained via sequence of
operator evaluations.
875 * is used. The
matrix will only be computed
if it has not been set up yet
879 * template <int dim, typename number>
881 * LaplaceOperator<dim, number>::get_system_matrix() const
883 *
if (system_matrix.m() == 0 && system_matrix.n() == 0)
885 *
const auto &dof_handler = this->matrix_free.get_dof_handler();
888 * dof_handler.locally_owned_dofs(),
889 * dof_handler.get_triangulation().get_mpi_communicator());
894 * system_matrix.reinit(dsp);
900 * &LaplaceOperator::do_cell_integral_local,
904 *
return this->system_matrix;
911 * Perform cell integral on a cell batch without gathering and scattering
916 *
template <
int dim,
typename number>
917 *
void LaplaceOperator<dim, number>::do_cell_integral_local(
918 * FECellIntegrator &integrator)
const
922 *
for (
const unsigned int q : integrator.quadrature_point_indices())
923 * integrator.submit_gradient(integrator.get_gradient(q), q);
932 * Same as above but with access to the global vectors.
935 *
template <
int dim,
typename number>
936 *
void LaplaceOperator<dim, number>::do_cell_integral_global(
937 * FECellIntegrator &integrator,
939 *
const VectorType &src)
const
943 *
for (
const unsigned int q : integrator.quadrature_point_indices())
944 * integrator.submit_gradient(integrator.get_gradient(q), q);
953 * This function loops over all cell batches within a cell-batch range and
954 * calls the above function.
957 *
template <
int dim,
typename number>
958 *
void LaplaceOperator<dim, number>::do_cell_integral_range(
961 *
const VectorType &src,
962 *
const std::pair<unsigned int, unsigned int> &range)
const
964 * FECellIntegrator integrator(matrix_free, range);
966 *
for (
unsigned cell = range.first; cell < range.second; ++cell)
968 * integrator.reinit(cell);
970 * do_cell_integral_global(integrator, dst, src);
979 * <a name=
"step_75-Solverandpreconditioner"></a>
980 * <h3>Solver and preconditioner</h3>
985 * <a name=
"step_75-Conjugategradientsolverwithmultigridpreconditioner"></a>
986 * <h4>Conjugate-
gradient solver with multigrid preconditioner</h4>
990 * This function solves the equation system with a sequence of provided
991 * multigrid objects. It is meant to be treated as
general as possible, hence
992 * the multitude of
template parameters.
997 *
typename SystemMatrixType,
998 *
typename LevelMatrixType,
999 *
typename MGTransferType>
1003 *
const VectorType &src,
1004 *
const MultigridParameters &mg_data,
1006 *
const SystemMatrixType &fine_matrix,
1007 *
const MGLevelObject<std::unique_ptr<LevelMatrixType>> &mg_matrices,
1008 *
const MGTransferType &mg_transfer)
1010 *
AssertThrow(mg_data.coarse_solver.type ==
"cg_with_amg",
1011 * ExcNotImplemented());
1012 *
AssertThrow(mg_data.smoother.type ==
"chebyshev", ExcNotImplemented());
1014 *
const unsigned int min_level = mg_matrices.min_level();
1015 *
const unsigned int max_level = mg_matrices.max_level();
1020 * SmootherPreconditionerType>;
1025 * We initialize
level operators and Chebyshev smoothers here.
1031 * min_level, max_level);
1035 * smoother_data[
level].preconditioner =
1036 * std::make_shared<SmootherPreconditionerType>();
1037 * mg_matrices[
level]->compute_inverse_diagonal(
1038 * smoother_data[
level].preconditioner->get_vector());
1039 * smoother_data[
level].smoothing_range = mg_data.smoother.smoothing_range;
1040 * smoother_data[
level].degree = mg_data.smoother.degree;
1041 * smoother_data[
level].eig_cg_n_iterations =
1042 * mg_data.smoother.eig_cg_n_iterations;
1047 * mg_smoother.
initialize(mg_matrices, smoother_data);
1051 * Next, we initialize the coarse-grid solver. We use conjugate-
gradient
1052 * method with AMG as preconditioner.
1055 *
ReductionControl coarse_grid_solver_control(mg_data.coarse_solver.maxiter,
1056 * mg_data.coarse_solver.abstol,
1057 * mg_data.coarse_solver.reltol,
1062 * std::unique_ptr<MGCoarseGridBase<VectorType>> mg_coarse;
1067 * amg_data.n_cycles = mg_data.coarse_solver.n_cycles;
1068 * amg_data.smoother_type = mg_data.coarse_solver.smoother_type.c_str();
1070 * precondition_amg.initialize(mg_matrices[min_level]->get_system_matrix(),
1077 *
decltype(precondition_amg)>>(
1078 * coarse_grid_solver, *mg_matrices[min_level], precondition_amg);
1082 * Finally, we create the
Multigrid object, convert it to a preconditioner,
1083 * and use it
inside of a conjugate-
gradient solver to solve the linear
1084 * system of equations.
1088 * mg_matrix, *mg_coarse, mg_transfer, mg_smoother, mg_smoother);
1090 * PreconditionerType preconditioner(dof,
mg, mg_transfer);
1093 * .
solve(fine_matrix, dst, src, preconditioner);
1101 * <a name=
"step_75-Hybridpolynomialgeometricglobalcoarseningmultigridpreconditioner"></a>
1102 * <h4>Hybrid polynomial/geometric-global-coarsening multigrid preconditioner</h4>
1106 * The above function deals with the actual solution
for a given sequence of
1107 * multigrid objects. This
functions creates the actual multigrid levels, in
1108 * particular the operators, and the transfer
operator as a
1112 *
template <
typename VectorType,
typename OperatorType,
int dim>
1114 *
const OperatorType &system_matrix,
1116 *
const VectorType &src,
1117 *
const MultigridParameters &mg_data,
1125 * as well as, create transfer operators. To be able to
1126 * set up the operators, we need a set of
DoFHandler that we create
1127 * via global coarsening of p or h. For latter, we need also a sequence
1133 * In
case no h-transfer is requested, we provide an empty deleter
for the
1135 * an external field and its destructor is called somewhere
else.
1142 * std::vector<std::shared_ptr<const Triangulation<dim>>>
1143 * coarse_grid_triangulations;
1144 *
if (mg_data.transfer.perform_h_transfer)
1145 * coarse_grid_triangulations =
1147 * dof_handler.get_triangulation());
1149 * coarse_grid_triangulations.emplace_back(
1150 * &(dof_handler.get_triangulation()), [](
auto *) {});
1154 * Determine the total number of levels
for the multigrid operation and
1155 * allocate sufficient memory
for all levels.
1158 *
const unsigned int n_h_levels = coarse_grid_triangulations.size() - 1;
1160 *
const auto get_max_active_fe_degree = [&](
const auto &dof_handler) {
1161 *
unsigned int max = 0;
1163 *
for (
auto &cell : dof_handler.active_cell_iterators())
1164 * if (cell->is_locally_owned())
1166 *
std::
max(
max, dof_handler.get_fe(cell->active_fe_index()).degree);
1171 *
const unsigned int n_p_levels =
1173 * get_max_active_fe_degree(dof_handler), mg_data.transfer.p_sequence)
1176 * std::map<unsigned int, unsigned int> fe_index_for_degree;
1177 *
for (
unsigned int i = 0; i < dof_handler.get_fe_collection().
size(); ++i)
1179 *
const unsigned int degree = dof_handler.get_fe(i).degree;
1180 *
Assert(fe_index_for_degree.find(degree) == fe_index_for_degree.end(),
1181 * ExcMessage(
"FECollection does not contain unique degrees."));
1182 * fe_index_for_degree[degree] = i;
1185 *
unsigned int minlevel = 0;
1186 *
unsigned int maxlevel = n_h_levels + n_p_levels - 1;
1188 * dof_handlers.resize(minlevel, maxlevel);
1189 * operators.resize(minlevel, maxlevel);
1190 * transfers.resize(minlevel, maxlevel);
1194 * Loop from the minimum (coarsest) to the maximum (finest)
level and set up
1195 *
DoFHandler accordingly. We start with the h-levels, where we distribute
1196 * on increasingly finer meshes linear elements.
1199 *
for (
unsigned int l = 0;
l < n_h_levels; ++
l)
1201 * dof_handlers[
l].
reinit(*coarse_grid_triangulations[l]);
1202 * dof_handlers[
l].distribute_dofs(dof_handler.get_fe_collection());
1207 * After we reached the finest mesh, we will adjust the polynomial degrees
1208 * on each
level. We reverse iterate over our
data structure and start at
1209 * the finest mesh that contains all information about the active FE
1210 * indices. We then lower the polynomial degree of each cell
level by
level.
1213 *
for (
unsigned int i = 0, l = maxlevel; i < n_p_levels; ++i, --
l)
1215 * dof_handlers[
l].reinit(dof_handler.get_triangulation());
1217 *
if (l == maxlevel)
1219 *
auto &dof_handler_mg = dof_handlers[
l];
1221 *
auto cell_other = dof_handler.begin_active();
1222 *
for (
auto &cell : dof_handler_mg.active_cell_iterators())
1224 * if (cell->is_locally_owned())
1225 * cell->set_active_fe_index(cell_other->active_fe_index());
1231 *
auto &dof_handler_fine = dof_handlers[
l + 1];
1232 *
auto &dof_handler_coarse = dof_handlers[
l + 0];
1234 *
auto cell_other = dof_handler_fine.begin_active();
1235 *
for (
auto &cell : dof_handler_coarse.active_cell_iterators())
1237 * if (cell->is_locally_owned())
1239 * const unsigned
int next_degree =
1242 * cell_other->get_fe().degree,
1243 * mg_data.transfer.p_sequence);
1244 *
Assert(fe_index_for_degree.find(next_degree) !=
1245 * fe_index_for_degree.end(),
1246 * ExcMessage(
"Next polynomial degree in sequence "
1247 *
"does not exist in FECollection."));
1249 * cell->set_active_fe_index(fe_index_for_degree[next_degree]);
1255 * dof_handlers[
l].distribute_dofs(dof_handler.get_fe_collection());
1260 * Next, we will create all
data structures additionally needed on each
1261 * multigrid
level. This involves determining constraints with homogeneous
1262 * Dirichlet boundary conditions, and building the
operator just like on the
1267 * constraints(minlevel, maxlevel);
1271 *
const auto &dof_handler = dof_handlers[
level];
1272 *
auto &constraint = constraints[
level];
1274 * constraint.reinit(dof_handler.locally_owned_dofs(),
1283 * constraint.close();
1287 * operators[
level] = std::make_unique<OperatorType>(mapping_collection,
1289 * quadrature_collection,
1296 * Set up intergrid operators and collect transfer operators within a single
1297 *
operator as needed by the
Multigrid solver
class.
1302 * dof_handlers[
level],
1303 * constraints[
level + 1],
1304 * constraints[
level]);
1307 * transfers, [&](
const auto l,
auto &vec) {
1308 * operators[
l]->initialize_dof_vector(vec);
1313 * Finally, proceed to solve the problem with multigrid.
1316 * mg_solve(solver_control,
1331 * <a name=
"step_75-ThecodeLaplaceProblemcodeclasstemplate"></a>
1332 * <h3>The <code>LaplaceProblem</code>
class template</h3>
1336 * Now we will
finally declare the main
class of this program, which solves
1337 * the Laplace equation on subsequently refined function spaces. Its structure
1338 * will look familiar as it is similar to the main classes of @ref step_27
"step-27" and
1339 * @ref step_40
"step-40". There are basically just two additions:
1341 * replaced by an
object of the LaplaceOperator
class for the
MatrixFree
1344 * balancing, has been added.
1347 *
template <
int dim>
1348 *
class LaplaceProblem
1351 * LaplaceProblem(
const Parameters ¶meters);
1356 *
void initialize_grid();
1357 *
void setup_system();
1358 *
void print_diagnostics();
1359 *
void solve_system();
1360 *
void compute_indicators();
1361 *
void adapt_resolution();
1362 *
void output_results(
const unsigned int cycle);
1366 *
const Parameters prm;
1381 * LaplaceOperator<dim, double> laplace_operator;
1385 * std::unique_ptr<FESeries::Legendre<dim>>
legendre;
1386 * std::unique_ptr<parallel::CellWeights<dim>> cell_weights;
1400 * <a name=
"step_75-ThecodeLaplaceProblemcodeclassimplementation"></a>
1401 * <h3>The <code>LaplaceProblem</code>
class implementation</h3>
1406 * <a name=
"step_75-Constructor"></a>
1407 * <h4>Constructor</h4>
1411 * The constructor starts with an initializer list that looks similar to the
1412 * one of @ref step_40
"step-40". We again prepare the
ConditionalOStream object to allow
1413 * only the
first process to output anything over the console, and initialize
1414 * the computing timer properly.
1417 *
template <
int dim>
1418 * LaplaceProblem<dim>::LaplaceProblem(
const Parameters ¶meters)
1419 * : mpi_communicator(MPI_COMM_WORLD)
1423 * , pcout(std::cout,
1425 * , computing_timer(mpi_communicator,
1430 *
Assert(prm.min_h_level <= prm.max_h_level,
1432 *
"Triangulation level limits have been incorrectly set up."));
1433 *
Assert(prm.min_p_degree <= prm.max_p_degree,
1434 * ExcMessage(
"FECollection degrees have been incorrectly set up."));
1438 * We need to prepare the
data structures
for the
hp-functionality in the
1439 * actual body of the constructor, and create corresponding objects
for
1440 * every degree in the specified range from the parameter
struct. As we are
1441 * only dealing with non-distorted rectangular cells, a linear mapping
1442 *
object is sufficient in
this context.
1446 * In the Parameters
struct, we provide ranges
for levels on which the
1447 * function space is operating with a reasonable resolution. The multigrid
1448 * algorithm
requires linear elements on the coarsest possible
level. So we
1449 * start with the lowest polynomial degree and fill the collection with
1450 * consecutively higher degrees until the user-specified maximum is
1456 *
for (
unsigned int degree = 1; degree <= prm.max_p_degree; ++degree)
1458 * fe_collection.push_back(
FE_Q<dim>(degree));
1459 * quadrature_collection.push_back(
QGauss<dim>(degree + 1));
1465 * As our FECollection contains more finite elements than we want to use
for
1466 * the finite element approximation of our solution, we would like to limit
1467 * the range on which active FE indices can operate on. For
this, the
1468 * FECollection
class allows to register a hierarchy that determines the
1469 * succeeding and preceding finite element in
case of of p-refinement and
1471 * consult
this hierarchy to determine future FE indices. We will
register
1472 * such a hierarchy that only works on finite elements with polynomial
1473 * degrees in the proposed range <code>[min_p_degree, max_p_degree]</code>.
1476 *
const unsigned int min_fe_index = prm.min_p_degree - 1;
1477 * fe_collection.set_hierarchy(
1480 *
const unsigned int fe_index) ->
unsigned int {
1481 *
return ((fe_index + 1) < fe_collection.
size()) ? fe_index + 1 :
1486 *
const unsigned int fe_index) ->
unsigned int {
1487 *
Assert(fe_index >= min_fe_index,
1488 * ExcMessage(
"Finite element is not part of hierarchy!"));
1495 *
for smoothness estimation.
1498 *
legendre = std::make_unique<FESeries::Legendre<dim>>(
1503 * The next part is going to be tricky. During execution of refinement, a
1504 * few
hp-algorithms need to interfere with the actual refinement process on
1507 * the actual refinement process and trigger all connected
functions. We
1508 * require
this functionality
for load balancing and to limit the polynomial
1509 * degrees of neighboring cells.
1513 * For the former, we would like to assign a weight to every cell that is
1514 * proportional to the number of degrees of freedom of its future finite
1516 * easily attach individual weights at the right place during the refinement
1517 * process, i.e., after all
refine and
coarsen flags have been set correctly
1518 *
for hp-adaptation and right before repartitioning
for load balancing is
1519 * about to happen.
Functions can be registered that will attach weights in
1520 * the form that @f$a (n_\text{dofs})^b@f$ with a provided pair of parameters
1521 * @f$(a,b)@f$. We
register such a function in the following.
1525 * For load balancing, efficient solvers like the one we use should
scale
1526 * linearly with the number of degrees of freedom owned. We set the
1527 * parameters
for cell weighting correspondingly: A weighting factor of @f$1@f$
1528 * and an exponent of @f$1@f$ (see the definitions of the `weighting_factor` and
1529 * `weighting_exponent` above).
1532 * cell_weights = std::make_unique<parallel::CellWeights<dim>>(
1535 * {prm.weighting_factor, prm.weighting_exponent}));
1539 * In h-adaptive applications, we ensure a 2:1 mesh balance by limiting the
1540 * difference of refinement levels of neighboring cells to one. With the
1541 *
second call in the following code snippet, we will ensure the same
for
1542 * p-levels on neighboring cells: levels of future finite elements are not
1543 * allowed to differ by more than a specified difference. The function
1545 * be connected to a very specific signal in the
parallel context. The issue
1546 * is that we need to know how the mesh will be actually refined to set
1547 * future FE indices accordingly. As we ask the p4est oracle to perform
1548 * refinement, we need to ensure that the
Triangulation has been updated
1549 * with the adaptation flags of the oracle
first. An instantiation of
1551 * that
for the duration of its life. Thus, we will create an
object of
this
1552 *
class right before limiting the p-
level difference, and connect the
1553 * corresponding
lambda function to the signal
1555 * after the oracle got refined, but before the
Triangulation is refined.
1556 * Furthermore, we specify that
this function will be connected to the front
1557 * of the signal, to ensure that the modification is performed before any
1558 * other function connected to the same signal.
1562 * [&, min_fe_index]() {
1566 * prm.max_p_level_difference,
1569 * boost::signals2::at_front);
1577 * <a name=
"step_75-LaplaceProbleminitialize_grid"></a>
1578 * <h4>LaplaceProblem::initialize_grid</h4>
1583 * as demonstrated in @ref step_50
"step-50". However in the 2
d case, that particular
1584 * function removes the
first quadrant,
while we need the fourth quadrant
1585 * removed in our scenario. Thus, we will use a different function
1587 * the mesh. Furthermore, we formulate that function in a way that it also
1588 * generates a 3d mesh: the 2d L-shaped domain will basically elongated by 1
1589 * in the positive z-direction.
1594 * The parameters that we need to provide are
Point objects for the lower left
1595 * and top right corners, as well as the number of repetitions that the base
1596 * mesh will have in each direction. We provide them for the
first two
1597 * dimensions and treat the higher third dimension separately.
1601 * To create a L-shaped domain, we need to remove the excess cells. For this,
1602 * we specify the <code>cells_to_remove</code> accordingly. We would like to
1603 * remove one cell in every cell from the negative direction, but remove one
1604 * from the positive x-direction.
1608 * On the coarse grid, we set the initial active FE indices and distribute the
1609 * degrees of freedom once. We do that in order to assign the
hp::FECollection
1610 * to the
DoFHandler, so that all cells know how many DoFs they are going to
1611 * have. This step is mandatory for the weighted load balancing algorithm,
1612 * which will be called implicitly in
1616 * template <
int dim>
1617 *
void LaplaceProblem<dim>::initialize_grid()
1621 * std::vector<unsigned int> repetitions(dim);
1623 *
for (
unsigned int d = 0;
d < dim; ++
d)
1626 * repetitions[
d] = 2;
1627 * bottom_left[
d] = -1.;
1628 * top_right[
d] = 1.;
1632 * repetitions[
d] = 1;
1633 * bottom_left[
d] = 0.;
1634 * top_right[
d] = 1.;
1637 * std::vector<int> cells_to_remove(dim, 1);
1638 * cells_to_remove[0] = -1;
1641 *
triangulation, repetitions, bottom_left, top_right, cells_to_remove);
1643 *
const unsigned int min_fe_index = prm.min_p_degree - 1;
1644 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1645 * if (cell->is_locally_owned())
1646 * cell->set_active_fe_index(min_fe_index);
1648 * dof_handler.distribute_dofs(fe_collection);
1658 * <a name=
"step_75-LaplaceProblemsetup_system"></a>
1659 * <h4>LaplaceProblem::setup_system</h4>
1663 * This function looks exactly the same to the one of @ref step_40
"step-40", but you will
1664 * notice the absence of the system
matrix as well as the scaffold that
1665 * surrounds it. Instead, we will initialize the
MatrixFree formulation of the
1666 * <code>laplace_operator</code> here. For boundary conditions, we will use
1667 * the Solution
class introduced earlier in this tutorial.
1670 *
template <
int dim>
1671 *
void LaplaceProblem<dim>::setup_system()
1675 * dof_handler.distribute_dofs(fe_collection);
1677 * locally_owned_dofs = dof_handler.locally_owned_dofs();
1678 * locally_relevant_dofs =
1681 * locally_relevant_solution.reinit(locally_owned_dofs,
1682 * locally_relevant_dofs,
1683 * mpi_communicator);
1684 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1686 * constraints.clear();
1687 * constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1690 * mapping_collection, dof_handler, 0, Solution<dim>(), constraints);
1691 * constraints.close();
1693 * laplace_operator.reinit(mapping_collection,
1695 * quadrature_collection,
1705 * <a name=
"step_75-LaplaceProblemprint_diagnostics"></a>
1706 * <h4>LaplaceProblem::print_diagnostics</h4>
1710 * This is a function that prints additional diagnostics about the equation
1711 * system and its partitioning. In addition to the usual global number of
1712 * active cells and degrees of freedom, we also output their local
1713 * equivalents. For a regulated output, we will communicate the local
1715 * which will then output all information. Output of local quantities is
1716 * limited to the
first 8 processes to avoid cluttering the terminal.
1720 * On all other processes, the containers
for the collected
data remain empty.
1721 * To ensure that we
do not access
invalid memory with the insertion
operator
1722 * (`<<`) on these processes, we need to check that the containers are not
1727 * Furthermore, we would like to print the frequencies of the polynomial
1728 * degrees in the numerical discretization. Since
this information is only
1729 * stored locally, we will count the finite elements on locally owned cells
1733 * template <int dim>
1734 *
void LaplaceProblem<dim>::print_diagnostics()
1736 *
const unsigned int first_n_processes =
1737 * std::min<unsigned int>(8,
1739 *
const bool output_cropped =
1743 * pcout <<
" Number of active cells: "
1745 * <<
" by partition: ";
1747 * std::vector<unsigned int> n_active_cells_per_subdomain =
1750 *
for (
unsigned int i = 0; i < first_n_processes; ++i)
1751 *
if (n_active_cells_per_subdomain.size() > 0)
1752 * pcout <<
' ' << n_active_cells_per_subdomain[i];
1753 *
if (output_cropped)
1755 * pcout << std::endl;
1759 * pcout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
1761 * <<
" by partition: ";
1763 * std::vector<types::global_dof_index> n_dofs_per_subdomain =
1765 * dof_handler.n_locally_owned_dofs());
1766 *
for (
unsigned int i = 0; i < first_n_processes; ++i)
1767 *
if (n_dofs_per_subdomain.size() > 0)
1768 * pcout <<
' ' << n_dofs_per_subdomain[i];
1769 *
if (output_cropped)
1771 * pcout << std::endl;
1775 * std::vector<types::global_dof_index> n_constraints_per_subdomain =
1778 * pcout <<
" Number of constraints: "
1779 * << std::accumulate(n_constraints_per_subdomain.begin(),
1780 * n_constraints_per_subdomain.end(),
1783 * <<
" by partition: ";
1784 *
for (
unsigned int i = 0; i < first_n_processes; ++i)
1785 *
if (n_constraints_per_subdomain.size() > 0)
1786 * pcout <<
' ' << n_constraints_per_subdomain[i];
1787 *
if (output_cropped)
1789 * pcout << std::endl;
1793 * std::vector<unsigned int> n_fe_indices(fe_collection.
size(), 0);
1794 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1795 * if (cell->is_locally_owned())
1796 * n_fe_indices[cell->active_fe_index()]++;
1800 * pcout <<
" Frequencies of poly. degrees:";
1801 *
for (
unsigned int i = 0; i < fe_collection.
size(); ++i)
1802 *
if (n_fe_indices[i] > 0)
1803 * pcout <<
' ' << fe_collection[i].degree <<
':' << n_fe_indices[i];
1804 * pcout << std::endl;
1813 * <a name=
"step_75-LaplaceProblemsolve_system"></a>
1814 * <h4>LaplaceProblem::solve_system</h4>
1818 * The scaffold around the solution is similar to the one of @ref step_40
"step-40". We
1819 * prepare a vector that matches the requirements of
MatrixFree and collect
1820 * the locally-relevant degrees of freedoms we solved the equation system. The
1821 * solution happens with the function introduced earlier.
1824 *
template <
int dim>
1825 *
void LaplaceProblem<dim>::solve_system()
1830 * laplace_operator.initialize_dof_vector(completely_distributed_solution);
1833 * prm.tolerance_factor * system_rhs.l2_norm());
1835 * solve_with_gmg(solver_control,
1837 * completely_distributed_solution,
1840 * mapping_collection,
1842 * quadrature_collection);
1844 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
1847 * constraints.distribute(completely_distributed_solution);
1849 * locally_relevant_solution.copy_locally_owned_data_from(
1850 * completely_distributed_solution);
1851 * locally_relevant_solution.update_ghost_values();
1859 * <a name=
"step_75-LaplaceProblemcompute_indicators"></a>
1860 * <h4>LaplaceProblem::compute_indicators</h4>
1864 * This function contains only a part of the typical <code>refine_grid</code>
1865 * function from other tutorials and is
new in that sense. Here, we will only
1866 * calculate all indicators
for adaptation with actually refining the grid. We
1867 *
do this for the purpose of writing all indicators to the file system, so we
1868 * store them
for later.
1872 * Since we are dealing the an elliptic problem, we will make use of the
1874 * scaling factor of the underlying face integrals to be dependent on the
1875 * actual polynomial degree of the neighboring elements is favorable in
1876 *
hp-adaptive applications @cite davydov2017hp. We can
do this by specifying
1877 * the very last parameter from the additional ones you notices. The others
1878 * are actually just the defaults.
1882 * For the purpose of
hp-adaptation, we will calculate smoothness estimates
1883 * with the strategy presented in the tutorial introduction and use the
1885 * we set the minimal polynomial degree to 2 as it seems that the smoothness
1886 * estimation algorithms have trouble with linear elements.
1889 *
template <
int dim>
1890 *
void LaplaceProblem<dim>::compute_indicators()
1894 * estimated_error_per_cell.grow_or_shrink(
triangulation.n_active_cells());
1897 * face_quadrature_collection,
1899 * locally_relevant_solution,
1900 * estimated_error_per_cell,
1909 * hp_decision_indicators.grow_or_shrink(
triangulation.n_active_cells());
1912 * locally_relevant_solution,
1913 * hp_decision_indicators);
1921 * <a name=
"step_75-LaplaceProblemadapt_resolution"></a>
1922 * <h4>LaplaceProblem::adapt_resolution</h4>
1926 * With the previously calculated indicators, we will
finally flag all cells
1927 *
for adaptation and also execute refinement in
this function. As in previous
1928 * tutorials, we will use the
"fixed number" strategy, but now
for
1932 *
template <
int dim>
1933 *
void LaplaceProblem<dim>::adapt_resolution()
1939 * First, we will set
refine and
coarsen flags based on the error estimates
1940 * on each cell. There is
nothing new here.
1945 * elaborated in the other deal.II tutorials:
using the fixed number
1946 * strategy, we will flag 30% of all cells
for refinement and 3%
for
1947 * coarsening, as provided in the Parameters
struct.
1952 * estimated_error_per_cell,
1953 * prm.refine_fraction,
1954 * prm.coarsen_fraction);
1958 * Next, we will make all adjustments
for hp-adaptation. We want to
refine
1959 * and
coarsen those cells flagged in the previous step, but need to decide
1960 *
if we would like to
do it by adjusting the grid resolution or the
1961 * polynomial degree.
1965 * The next function call sets future FE indices according to the previously
1966 * calculated smoothness indicators as p-adaptation indicators. These
1967 * indices will only be set on those cells that have
refine or
coarsen flags
1972 * For the p-adaptation fractions, we will take an educated guess. Since we
1973 * only expect a single singularity in our scenario, i.e., in the origin of
1974 * the domain, and a smooth solution anywhere
else, we would like to
1975 * strongly prefer to use p-adaptation over h-adaptation. This reflects in
1976 * our choice of a fraction of 90%
for both p-refinement and p-coarsening.
1980 * hp_decision_indicators,
1981 * prm.p_refine_fraction,
1982 * prm.p_coarsen_fraction);
1986 * After setting all indicators, we will remove those that exceed the
1987 * specified limits of the provided
level ranges in the Parameters
struct.
1988 * This limitation naturally arises
for p-adaptation as the number of
1989 * supplied finite elements is limited. In addition, we registered a custom
1990 * hierarchy
for p-adaptation in the constructor. Now, we need to
do this
1991 * manually in the h-adaptive context like in @ref step_31
"step-31".
1995 * We will iterate over all cells on the designated
min and
max levels and
1996 * remove the corresponding flags. As an alternative, we could also flag
1997 * these cells
for p-adaptation by setting future FE indices accordingly
2003 * ExcInternalError());
2006 *
for (
const auto &cell :
2007 *
triangulation.active_cell_iterators_on_level(prm.max_h_level))
2008 * cell->clear_refine_flag();
2010 *
for (
const auto &cell :
2011 *
triangulation.active_cell_iterators_on_level(prm.min_h_level))
2012 * cell->clear_coarsen_flag();
2016 * At
this stage, we have both the future FE indices and the classic
refine
2017 * and
coarsen flags set. The latter will be interpreted by
2019 * our previous modification ensures that the resulting
Triangulation stays
2020 * within the specified
level range.
2024 * Now, we would like to only impose one type of adaptation on cells, which
2025 * is what the next function will sort out for us. In
short, on cells which
2026 * have both
types of indicators assigned, we will favor the p-adaptation
2027 * one and remove the h-adaptation one.
2030 *
hp::Refinement::choose_p_over_h(dof_handler);
2034 * In the end, we are left to execute coarsening and refinement. Here, not
2035 * only the grid will be updated, but also all previous future FE indices
2036 * will become active.
2040 * Remember that we have attached functions to
triangulation signals in the
2041 * constructor, will be triggered in this function call. So there is even
2042 * more happening: weighted repartitioning will be performed to ensure load
2043 * balancing, as well as we will limit the difference of p-levels between
2044 * neighboring cells.
2055 * <a name="step_75-LaplaceProblemoutput_results"></a>
2056 * <h4>LaplaceProblem::output_results</h4>
2060 * Writing results to the file system in
parallel applications works exactly
2061 * like in @ref step_40 "step-40". In addition to the
data containers that we prepared
2062 * throughout the tutorial, we would also like to write out the polynomial
2063 * degree of each finite element on the grid as well as the subdomain each
2064 * cell belongs to. We prepare necessary containers for this in the scope of
2068 * template <
int dim>
2069 *
void LaplaceProblem<dim>::output_results(const
unsigned int cycle)
2075 * if (cell->is_locally_owned())
2076 * fe_degrees(cell->active_cell_index()) = cell->get_fe().degree;
2079 *
for (
auto &subd : subdomain)
2084 * data_out.add_data_vector(locally_relevant_solution,
"solution");
2085 * data_out.add_data_vector(fe_degrees,
"fe_degree");
2086 * data_out.add_data_vector(subdomain,
"subdomain");
2087 * data_out.add_data_vector(estimated_error_per_cell,
"error");
2088 * data_out.add_data_vector(hp_decision_indicators,
"hp_indicator");
2089 * data_out.build_patches(mapping_collection);
2091 * data_out.write_vtu_with_pvtu_record(
2092 *
"./",
"solution", cycle, mpi_communicator, 2, 1);
2100 * <a name=
"step_75-LaplaceProblemrun"></a>
2101 * <h4>LaplaceProblem::run</h4>
2105 * The actual
run function again looks very familiar to @ref step_40
"step-40". The only
2106 * addition is the bracketed section that precedes the actual cycle
loop.
2107 * Here, we will pre-calculate the Legendre transformation matrices. In
2108 *
general, these will be calculated on the fly via lazy allocation whenever a
2109 * certain
matrix is needed. For timing purposes however, we would like to
2110 * calculate them all at once before the actual time measurement begins. We
2111 * will thus designate their calculation to their own scope.
2114 *
template <
int dim>
2115 *
void LaplaceProblem<dim>::run()
2117 * pcout <<
"Running with Trilinos on "
2119 * <<
" MPI rank(s)..." << std::endl;
2122 * pcout <<
"Calculating transformation matrices..." << std::endl;
2124 *
legendre->precalculate_all_transformation_matrices();
2127 *
for (
unsigned int cycle = 0; cycle < prm.n_cycles; ++cycle)
2129 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
2132 * initialize_grid();
2134 * adapt_resolution();
2138 * print_diagnostics();
2142 * compute_indicators();
2145 * output_results(cycle);
2147 * computing_timer.print_summary();
2148 * computing_timer.reset();
2150 * pcout << std::endl;
2160 * <a name=
"step_75-main"></a>
2165 * The
final function is the <code>main</code> function that will ultimately
2166 * create and
run a LaplaceOperator instantiation. Its structure is similar to
2167 * most other tutorial programs.
2170 *
int main(
int argc,
char *argv[])
2174 *
using namespace dealii;
2175 *
using namespace Step75;
2180 * LaplaceProblem<2> laplace_problem(prm);
2181 * laplace_problem.run();
2183 *
catch (std::exception &exc)
2185 * std::cerr << std::endl
2187 * <<
"----------------------------------------------------"
2189 * std::cerr <<
"Exception on processing: " << std::endl
2190 * << exc.what() << std::endl
2191 * <<
"Aborting!" << std::endl
2192 * <<
"----------------------------------------------------"
2199 * std::cerr << std::endl
2201 * <<
"----------------------------------------------------"
2203 * std::cerr <<
"Unknown exception!" << std::endl
2204 * <<
"Aborting!" << std::endl
2205 * <<
"----------------------------------------------------"
2213<a name=
"step_75-Results"></a><h1>Results</h1>
2216When you
run the program with the given parameters on four processes in
2217release mode, your terminal output should look like
this:
2219Running with Trilinos on 4
MPI rank(s)...
2220Calculating transformation matrices...
2222 Number of active cells: 3072
2224 Number of degrees of freedom: 12545
2226 Number of constraints: 542
2228 Frequencies of poly. degrees: 2:3072
2229 Solved in 7 iterations.
2232+---------------------------------------------+------------+------------+
2233| Total wallclock time elapsed since start | 0.172s | |
2235| Section | no. calls | wall time | % of total |
2236+---------------------------------+-----------+------------+------------+
2237| calculate transformation | 1 | 0.0194s | 11% |
2238| compute indicators | 1 | 0.00676s | 3.9% |
2239| initialize grid | 1 | 0.011s | 6.4% |
2240| output results | 1 | 0.0343s | 20% |
2241| setup system | 1 | 0.00839s | 4.9% |
2242| solve system | 1 | 0.0896s | 52% |
2243+---------------------------------+-----------+------------+------------+
2247 Number of active cells: 3351
2249 Number of degrees of freedom: 18228
2251 Number of constraints: 1202
2253 Frequencies of poly. degrees: 2:2522 3:829
2254 Solved in 7 iterations.
2257+---------------------------------------------+------------+------------+
2258| Total wallclock time elapsed since start | 0.165s | |
2260| Section | no. calls | wall time | % of total |
2261+---------------------------------+-----------+------------+------------+
2262| adapt resolution | 1 | 0.00473s | 2.9% |
2263| compute indicators | 1 | 0.00764s | 4.6% |
2264| output results | 1 | 0.0243s | 15% |
2265| setup system | 1 | 0.00662s | 4% |
2266| solve system | 1 | 0.121s | 74% |
2267+---------------------------------+-----------+------------+------------+
2274 Number of active cells: 5610
2276 Number of degrees of freedom: 82047
2278 Number of constraints: 14357
2280 Frequencies of poly. degrees: 2:1126 3:1289 4:2725 5:465 6:5
2281 Solved in 7 iterations.
2284+---------------------------------------------+------------+------------+
2285| Total wallclock time elapsed since start | 1.83s | |
2287| Section | no. calls | wall time | % of total |
2288+---------------------------------+-----------+------------+------------+
2289| adapt resolution | 1 | 0.00834s | 0.46% |
2290| compute indicators | 1 | 0.0178s | 0.97% |
2291| output results | 1 | 0.0434s | 2.4% |
2292| setup system | 1 | 0.0165s | 0.9% |
2293| solve system | 1 | 1.74s | 95% |
2294+---------------------------------+-----------+------------+------------+
2297When running the code with more processes, you will notice slight
2298differences in the number of active cells and degrees of freedom. This
2299is due to the fact that solver and preconditioner depend on the
2300partitioning of the problem, which might yield to slight differences of
2301the solution in the last digits and ultimately yields to different
2304Furthermore, the number of iterations
for the solver stays about the
2305same in all cycles despite
hp-adaptation, indicating the robustness of
2306the proposed algorithms and promising good scalability
for even larger
2307problem sizes and on more processes.
2309Let us have a look at the graphical output of the program. After all
2310refinement cycles in the given parameter configuration, the actual
2311discretized function space looks like the following with its
2312partitioning on twelve processes on the left and the polynomial degrees
2313of finite elements on the right. In the left picture, each color
2314represents a unique subdomain. In the right picture, the lightest color
2315corresponds to the polynomial degree two and the darkest one corresponds
2318<div
class=
"twocolumn" style=
"width: 80%; text-align: center;">
2320 <img src=
"https://www.dealii.org/images/steps/developer/step-75.subdomains-07.svg"
2321 alt=
"Partitioning after seven refinements.">
2324 <img src=
"https://www.dealii.org/images/steps/developer/step-75.fedegrees-07.svg"
2325 alt=
"Local approximation degrees after seven refinements.">
2331<a name=
"step-75-extensions"></a>
2332<a name=
"step_75-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
2335This tutorial shows only one particular way how to use
parallel
2336hp-adaptive finite element methods. In the following paragraphs, you
2337will get to know which alternatives are possible. Most of these
2338extensions are already part of https:
2339which provides you with implementation examples that you can play
2343<a name=
"step_75-Differenthpdecisionstrategies"></a><h4>Different
hp-decision strategies</h4>
2346The deal.II library offers multiple strategies to decide which type of
2347adaptation to impose on cells: either adjust the grid resolution or
2348change the polynomial degree. We only presented the <i>Legendre
2349coefficient decay</i> strategy in
this tutorial,
while @ref step_27
"step-27"
2350demonstrated the <i>Fourier</i> equivalent of the same idea.
2352See the
"possibilities for extensions" section of @ref step_27
"step-27" for an
2353overview over these strategies, or the corresponding documentation
2354for a detailed description.
2356There, another strategy is mentioned that has not been shown in any
2357tutorial so far: the strategy based on <i>refinement history</i>. The
2358usage of
this method
for parallel distributed applications is more
2359tricky than the others, so we will highlight the challenges that come
2360along with it. We need information about the
final state of refinement
2361flags, and we need to transfer the solution across refined meshes. For
2363function to the
Triangulation::Signals::post_p4est_refinement signal in
2364a way that it will be called <i>after</i> the
2365hp::Refinement::limit_p_level_difference() function. At this stage, all
2366refinement flags and future FE indices are terminally set and a reliable
2367prediction of the error is possible. The predicted error then needs to
2368be transferred across refined meshes with the aid of
2371Try implementing one of these strategies into this tutorial and observe
2372the subtle changes to the results. You will notice that all strategies
2373are capable of identifying the singularities near the reentrant corners
2374and will perform @f$h@f$-refinement in these regions, while preferring
2375@f$p@f$-refinement in the bulk domain. A detailed comparison of these
2376strategies is presented in @cite fehling2020 .
2379<a name="step_75-Solvewithmatrixbasedmethods"></a><h4>Solve with matrix-based methods</h4>
2382This tutorial focuses solely on matrix-free strategies. All
hp-adaptive
2383algorithms however also work with matrix-based approaches in the
2386To create a system matrix, you can either use the
2387LaplaceOperator::get_system_matrix() function, or use an
2388<code>assemble_system()</code> function similar to the one of @ref step_27 "step-27".
2389You can then pass the system matrix to the solver as usual.
2391You can time the results of both matrix-based and matrix-free
2392implementations, quantify the speed-up, and convince yourself which
2396<a name="step_75-Multigridvariants"></a><h4>
Multigrid variants</h4>
2399For sake of simplicity, we have restricted ourselves to a single type of
2400coarse-grid solver (CG with AMG), smoother (Chebyshev smoother with
2401point Jacobi preconditioner), and geometric-coarsening scheme (global
2402coarsening) within the multigrid algorithm. Feel free to try out
2403alternatives and investigate their performance and robustness.
2406<a name="step_75-PlainProg"></a>
2407<h1> The plain program</h1>
2408@include "step-75.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void reinit(const Triangulation< dim, spacedim > &tria)
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MGLevelObject< MatrixType2 > &matrices, const typename PreconditionerType::AdditionalData &additional_data=typename PreconditionerType::AdditionalData())
void initialize_dof_vector(VectorType &vec, const unsigned int dof_handler_index=0) const
void reinit(const MappingType &mapping, const DoFHandler< dim > &dof_handler, const AffineConstraints< number2 > &constraint, const QuadratureType &quad, const AdditionalData &additional_data=AdditionalData())
void solve(const MatrixType &A, VectorType &x, const VectorType &b, const PreconditionerType &preconditioner)
virtual types::subdomain_id locally_owned_subdomain() const
void coarsen_global(const unsigned int times=1)
virtual void execute_coarsening_and_refinement()
unsigned int size() const
static WeightingFunction ndofs_weighting(const std::pair< float, float > &coefficients)
IteratorRange< active_cell_iterator > active_cell_iterators() const
#define Assert(cond, exc)
#define AssertThrow(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_gradients
Shape function gradients.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< index_type > data
std::array< double, dim > to_spherical(const Point< dim > &point)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void subdivided_hyper_L(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &bottom_left, const Point< dim > &top_right, const std::vector< int > &n_cells_to_remove)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
Tpetra::Vector< Number, LO, GO, NodeType< MemorySpace > > VectorType
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
FESeries::Legendre< dim, spacedim > default_fe_series(const hp::FECollection< dim, spacedim > &fe_collection, const unsigned int component=numbers::invalid_unsigned_int)
void coefficient_decay(FESeries::Legendre< dim, spacedim > &fe_legendre, const DoFHandler< dim, spacedim > &dof_handler, const VectorType &solution, Vector< float > &smoothness_indicators, const VectorTools::NormType regression_strategy=VectorTools::Linfty_norm, const double smallest_abs_coefficient=1e-10, const bool only_flagged_cells=false)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool limit_p_level_difference(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int max_difference=1, const unsigned int contains_fe_index=0)
void predict_error(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &error_indicators, Vector< Number > &predicted_errors, const double gamma_p=std::sqrt(0.4), const double gamma_h=2., const double gamma_n=1.)
void p_adaptivity_fixed_number(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_coarsen=std::less_equal< Number >())
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
const types::material_id invalid_material_id
const types::subdomain_id invalid_subdomain_id
static const unsigned int invalid_unsigned_int
void refine_and_coarsen_fixed_number(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
double legendre(unsigned int l, double x)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
unsigned short int fe_index
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
UpdateFlags mapping_update_flags
boost::signals2::signal< void()> post_p4est_refinement
unsigned int smoother_sweeps