477 *
const unsigned int = 0)
const override
483 *
template <
typename number>
486 *
const unsigned int = 0) const
495 * This next
class represents the diffusion coefficient. We use a variable
496 * coefficient which is 100.0 at any
point where at least
one coordinate is
497 * less than -0.5, and 1.0 at all other points. As above, a separate
value()
499 * average() function computes the arithmetic average for a
set of points.
503 * class Coefficient : public
Function<dim>
507 *
const unsigned int = 0)
const override;
509 *
template <
typename number>
511 *
const unsigned int = 0)
const;
513 *
template <
typename number>
518 * When
using a coefficient in the
MatrixFree framework, we also
519 * need a
function that creates a
Table of coefficient values
for a
520 *
set of cells provided by the
MatrixFree operator argument here.
523 *
template <
typename number>
524 * std::shared_ptr<Table<2, VectorizedArray<number>>> make_coefficient_table(
533 *
for (
int d = 0;
d < dim; ++
d)
544 *
template <
typename number>
547 *
const unsigned int)
const
550 *
for (
unsigned int i = 0; i < VectorizedArray<number>::size(); ++i)
552 *
for (
int d = 0;
d < dim; ++
d)
553 *
if (p[
d][i] < -0.5)
555 * return_value[i] = 100.0;
560 *
return return_value;
566 *
template <
typename number>
567 * number Coefficient<dim>::average_value(
571 *
for (
unsigned int i = 0; i < points.size(); ++i)
572 * average +=
value(points[i]);
573 * average /= points.size();
581 *
template <
typename number>
582 * std::shared_ptr<Table<2, VectorizedArray<number>>>
583 * Coefficient<dim>::make_coefficient_table(
586 *
auto coefficient_table =
587 * std::make_shared<Table<2, VectorizedArray<number>>>();
589 *
FEEvaluation<dim, -1, 0, 1, number> fe_eval(mf_storage);
591 *
const unsigned int n_cells = mf_storage.n_macro_cells();
592 *
const unsigned int n_q_points = fe_eval.n_q_points;
594 * coefficient_table->reinit(
n_cells, 1);
596 *
for (
unsigned int cell = 0; cell <
n_cells; ++cell)
598 * fe_eval.reinit(cell);
601 *
for (
unsigned int q = 0; q < n_q_points; ++q)
602 * average_value +=
value(fe_eval.quadrature_point(q));
603 * average_value /= n_q_points;
605 * (*coefficient_table)(cell, 0) = average_value;
608 *
return coefficient_table;
616 * <a name=
"Runtimeparameters"></a>
617 * <h3>Run time parameters</h3>
622 * structure @p
Settings parses and stores these parameters to be queried
623 * throughout the program.
628 *
bool try_parse(
const std::string &prm_filename);
640 *
double smoother_dampen;
641 *
unsigned int smoother_steps;
642 *
unsigned int n_steps;
648 *
bool Settings::try_parse(
const std::string &prm_filename)
655 *
"Number of adaptive refinement steps.");
659 *
"Dampen factor for the smoother.");
663 *
"Number of smoother steps.");
667 *
"Switch between matrix-free GMG, "
668 *
"matrix-based GMG, and AMG.");
672 *
"Output graphical results.");
674 *
if (prm_filename.size() == 0)
676 * std::cout <<
"**** Error: No input file provided!\n"
677 * <<
"**** Error: Call this program as './step-50 input.prm\n"
679 * <<
"**** You may want to use one of the input files in this\n"
680 * <<
"**** directory, or use the following default values\n"
681 * <<
"**** to create an input file:\n";
691 *
catch (std::exception &
e)
694 * std::cerr <<
e.what() << std::endl;
698 *
if (prm.
get(
"solver") ==
"MF")
699 * this->solver = gmg_mf;
700 *
else if (prm.
get(
"solver") ==
"MB")
701 * this->solver = gmg_mb;
702 *
else if (prm.
get(
"solver") ==
"AMG")
703 * this->solver = amg;
709 * this->smoother_dampen = prm.
get_double(
"smoother dampen");
710 * this->smoother_steps = prm.
get_integer(
"smoother steps");
711 * this->output = prm.
get_bool(
"output");
721 * <a name=
"LaplaceProblemclass"></a>
722 * <h3>LaplaceProblem
class</h3>
726 * This is the main
class of the program. It looks very similar to
727 * @ref step_16
"step-16", @ref step_37
"step-37", and @ref step_40
"step-40". For the
MatrixFree setup, we use the
729 * `compute_diagonal()`, and `set_coefficient()`
functions internally. Note that
730 * the polynomial degree is a
template parameter of
this class. This is
734 *
template <
int dim,
int degree>
735 *
class LaplaceProblem
738 * LaplaceProblem(
const Settings &settings);
744 * We will use the following
types throughout the program. First the
770 *
void setup_system();
771 *
void setup_multigrid();
772 *
void assemble_system();
773 *
void assemble_multigrid();
774 *
void assemble_rhs();
777 *
void refine_grid();
778 *
void output_results(
const unsigned int cycle);
795 * MatrixType system_matrix;
796 * MatrixFreeActiveMatrix mf_system_matrix;
813 * The only interesting part about the constructor is that we construct the
814 * multigrid hierarchy unless we use AMG. For that, we need to parse the
815 *
run time parameters before
this constructor completes.
818 *
template <
int dim,
int degree>
819 * LaplaceProblem<dim, degree>::LaplaceProblem(
const Settings &settings)
820 * : settings(settings)
821 * , mpi_communicator(MPI_COMM_WORLD)
825 * (settings.solver == Settings::amg) ?
843 * <a name=
"LaplaceProblemsetup_system"></a>
844 * <h4>LaplaceProblem::setup_system()</h4>
848 * Unlike @ref step_16
"step-16" and @ref step_37
"step-37", we
split the
set up into two parts,
849 * setup_system() and setup_multigrid(). Here is the typical setup_system()
850 * function for the active mesh found in most tutorials. For
matrix-
free, the
851 * active mesh
set up is similar to @ref step_37 "step-37"; for
matrix-based (GMG and AMG
852 * solvers), the setup is similar to @ref step_40 "step-40".
855 * template <
int dim,
int degree>
856 *
void LaplaceProblem<dim, degree>::setup_system()
860 * dof_handler.distribute_dofs(fe);
863 * locally_owned_dofs = dof_handler.locally_owned_dofs();
865 * solution.reinit(locally_owned_dofs, mpi_communicator);
866 * right_hand_side.reinit(locally_owned_dofs, mpi_communicator);
867 * constraints.reinit(locally_relevant_dofs);
872 * constraints.close();
874 *
switch (settings.solver)
876 *
case Settings::gmg_mf:
883 * std::shared_ptr<MatrixFree<dim, double>> mf_storage =
884 * std::make_shared<MatrixFree<dim, double>>();
885 * mf_storage->reinit(dof_handler,
890 * mf_system_matrix.initialize(mf_storage);
892 *
const Coefficient<dim> coefficient;
893 * mf_system_matrix.set_coefficient(
894 * coefficient.make_coefficient_table(*mf_storage));
899 *
case Settings::gmg_mb:
900 *
case Settings::amg:
902 * #ifdef USE_PETSC_LA
907 * locally_owned_dofs,
909 * locally_relevant_dofs);
911 * system_matrix.reinit(locally_owned_dofs,
912 * locally_owned_dofs,
917 * locally_owned_dofs,
918 * locally_relevant_dofs,
922 * system_matrix.reinit(dsp);
936 * <a name=
"LaplaceProblemsetup_multigrid"></a>
937 * <h4>LaplaceProblem::setup_multigrid()</h4>
941 * This
function does the multilevel setup
for both
matrix-
free and
942 *
matrix-based GMG. The
matrix-
free setup is similar to that of @ref step_37
"step-37", and
943 * the
matrix-based is similar to @ref step_16
"step-16", except we must use appropriate
944 * distributed sparsity patterns.
948 * The
function is not called
for the AMG approach, but to err on the
949 * safe side, the main `
switch` statement of
this function
950 * nevertheless makes sure that the
function only operates on known
951 * multigrid settings by throwing an assertion
if the
function were
952 * called
for anything other than the two geometric multigrid methods.
955 *
template <
int dim,
int degree>
956 *
void LaplaceProblem<dim, degree>::setup_multigrid()
960 * dof_handler.distribute_mg_dofs();
962 * mg_constrained_dofs.clear();
963 * mg_constrained_dofs.initialize(dof_handler);
966 * mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, boundary_ids);
968 *
const unsigned int n_levels =
triangulation.n_global_levels();
970 *
switch (settings.solver)
972 *
case Settings::gmg_mf:
974 * mf_mg_matrix.resize(0, n_levels - 1);
983 * level_constraints.
reinit(relevant_dofs);
985 * mg_constrained_dofs.get_boundary_indices(
level));
986 * level_constraints.
close();
995 * std::shared_ptr<MatrixFree<dim, float>> mf_storage_level(
997 * mf_storage_level->reinit(dof_handler,
1002 * mf_mg_matrix[
level].initialize(mf_storage_level,
1003 * mg_constrained_dofs,
1006 *
const Coefficient<dim> coefficient;
1007 * mf_mg_matrix[
level].set_coefficient(
1008 * coefficient.make_coefficient_table(*mf_storage_level));
1010 * mf_mg_matrix[
level].compute_diagonal();
1016 *
case Settings::gmg_mb:
1018 * mg_matrix.resize(0, n_levels - 1);
1019 * mg_matrix.clear_elements();
1020 * mg_interface_in.resize(0, n_levels - 1);
1021 * mg_interface_in.clear_elements();
1031 * #ifdef USE_PETSC_LA
1037 * dof_handler.locally_owned_mg_dofs(
level),
1041 * mg_matrix[
level].reinit(
1042 * dof_handler.locally_owned_mg_dofs(
level),
1043 * dof_handler.locally_owned_mg_dofs(
level),
1045 * mpi_communicator);
1048 * dof_handler.locally_owned_mg_dofs(
level),
1049 * dof_handler.locally_owned_mg_dofs(
level),
1051 * mpi_communicator);
1055 * mg_matrix[
level].reinit(dsp);
1060 * #ifdef USE_PETSC_LA
1063 * mg_constrained_dofs,
1069 * dof_handler.locally_owned_mg_dofs(
level),
1073 * mg_interface_in[
level].reinit(
1074 * dof_handler.locally_owned_mg_dofs(
level),
1075 * dof_handler.locally_owned_mg_dofs(
level),
1077 * mpi_communicator);
1080 * dof_handler.locally_owned_mg_dofs(
level),
1081 * dof_handler.locally_owned_mg_dofs(
level),
1083 * mpi_communicator);
1086 * mg_constrained_dofs,
1090 * mg_interface_in[
level].reinit(dsp);
1106 * <a name=
"LaplaceProblemassemble_system"></a>
1107 * <h4>LaplaceProblem::assemble_system()</h4>
1111 * The assembly is
split into three parts: `assemble_system()`,
1112 * `assemble_multigrid()`, and `assemble_rhs()`. The
1113 * `assemble_system()`
function here assembles and stores the (global)
1114 * system
matrix and the right-hand side
for the
matrix-based
1115 * methods. It is similar to the assembly in @ref step_40
"step-40".
1119 * Note that the
matrix-
free method does not execute
this function as it does
1121 * side in assemble_rhs().
1124 *
template <
int dim,
int degree>
1125 *
void LaplaceProblem<dim, degree>::assemble_system()
1129 *
const QGauss<dim> quadrature_formula(degree + 1);
1132 * quadrature_formula,
1136 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1137 *
const unsigned int n_q_points = quadrature_formula.size();
1142 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1144 *
const Coefficient<dim> coefficient;
1145 * RightHandSide<dim> rhs;
1146 * std::vector<double> rhs_values(n_q_points);
1148 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1149 *
if (cell->is_locally_owned())
1154 * fe_values.reinit(cell);
1156 *
const double coefficient_value =
1157 * coefficient.average_value(fe_values.get_quadrature_points());
1158 * rhs.value_list(fe_values.get_quadrature_points(), rhs_values);
1160 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1161 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1163 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1165 * coefficient_value *
1166 * fe_values.shape_grad(i, q_point) *
1167 * fe_values.shape_grad(j, q_point) *
1168 * fe_values.JxW(q_point);
1171 * fe_values.shape_value(i, q_point) *
1172 * rhs_values[q_point] *
1173 * fe_values.JxW(q_point);
1176 * cell->get_dof_indices(local_dof_indices);
1177 * constraints.distribute_local_to_global(
cell_matrix,
1179 * local_dof_indices,
1192 * <a name=
"LaplaceProblemassemble_multigrid"></a>
1193 * <h4>LaplaceProblem::assemble_multigrid()</h4>
1197 * The following
function assembles and stores the multilevel matrices
for the
1198 *
matrix-based GMG method. This
function is similar to the
one found in
1199 * @ref step_16
"step-16", only here it works
for distributed meshes. This difference amounts
1200 * to adding a condition that we only
assemble on locally owned
level cells and
1204 * template <
int dim,
int degree>
1205 *
void LaplaceProblem<dim, degree>::assemble_multigrid()
1212 * quadrature_formula,
1216 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
1217 *
const unsigned int n_q_points = quadrature_formula.size();
1221 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1223 *
const Coefficient<dim> coefficient;
1225 * std::vector<AffineConstraints<double>> boundary_constraints(
1233 * boundary_constraints[
level].reinit(dof_set);
1234 * boundary_constraints[
level].add_lines(
1235 * mg_constrained_dofs.get_refinement_edge_indices(
level));
1236 * boundary_constraints[
level].add_lines(
1237 * mg_constrained_dofs.get_boundary_indices(
level));
1239 * boundary_constraints[
level].close();
1242 *
for (
const auto &cell : dof_handler.cell_iterators())
1243 *
if (cell->level_subdomain_id() ==
triangulation.locally_owned_subdomain())
1246 * fe_values.reinit(cell);
1248 *
const double coefficient_value =
1249 * coefficient.average_value(fe_values.get_quadrature_points());
1251 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1252 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1253 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1255 * coefficient_value * fe_values.shape_grad(i, q_point) *
1256 * fe_values.shape_grad(j, q_point) * fe_values.JxW(q_point);
1258 * cell->get_mg_dof_indices(local_dof_indices);
1260 * boundary_constraints[cell->level()].distribute_local_to_global(
1261 *
cell_matrix, local_dof_indices, mg_matrix[cell->level()]);
1263 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1264 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1265 *
if (mg_constrained_dofs.is_interface_matrix_entry(
1266 * cell->level(), local_dof_indices[i], local_dof_indices[j]))
1267 * mg_interface_in[cell->level()].add(local_dof_indices[i],
1268 * local_dof_indices[j],
1272 *
for (
unsigned int i = 0; i <
triangulation.n_global_levels(); ++i)
1284 * <a name=
"LaplaceProblemassemble_rhs"></a>
1285 * <h4>LaplaceProblem::assemble_rhs()</h4>
1289 * The
final function in
this triptych assembles the right-hand side
1291 * framework, we don
't have to assemble the matrix and can get away
1292 * with only assembling the right hand side. We could do this by extracting the
1293 * code from the `assemble_system()` function above that deals with the right
1294 * hand side, but we decide instead to go all in on the matrix-free approach and
1295 * do the assembly using that way as well.
1299 * The result is a function that is similar
1300 * to the one found in the "Use FEEvaluation::read_dof_values_plain()
1301 * to avoid resolving constraints" subsection in the "Possibilities
1302 * for extensions" section of @ref step_37 "step-37".
1306 * The reason for this function is that the MatrixFree operators do not take
1307 * into account non-homogeneous Dirichlet constraints, instead treating all
1308 * Dirichlet constraints as homogeneous. To account for this, the right-hand
1309 * side here is assembled as the residual @f$r_0 = f-Au_0@f$, where @f$u_0@f$ is a
1310 * zero vector except in the Dirichlet values. Then when solving, we have that
1311 * the solution is @f$u = u_0 + A^{-1}r_0@f$. This can be seen as a Newton
1312 * iteration on a linear system with initial guess @f$u_0@f$. The CG solve in the
1313 * `solve()` function below computes @f$A^{-1}r_0@f$ and the call to
1314 * `constraints.distribute()` (which directly follows) adds the @f$u_0@f$.
1318 * Obviously, since we are considering a problem with zero Dirichlet boundary,
1319 * we could have taken a similar approach to @ref step_37 "step-37" `assemble_rhs()`, but this
1320 * additional work allows us to change the problem declaration if we so
1325 * This function has two parts in the integration loop: applying the negative
1326 * of matrix @f$A@f$ to @f$u_0@f$ by submitting the negative of the gradient, and adding
1327 * the right-hand side contribution by submitting the value @f$f@f$. We must be sure
1328 * to use `read_dof_values_plain()` for evaluating @f$u_0@f$ as `read_dof_vaues()`
1329 * would set all Dirichlet values to zero.
1333 * Finally, the system_rhs vector is of type LA::MPI::Vector, but the
1334 * MatrixFree class only work for
1335 * ::LinearAlgebra::distributed::Vector. Therefore we must
1336 * compute the right-hand side using MatrixFree funtionality and then
1337 * use the functions in the `ChangeVectorType` namespace to copy it to
1341 * template <int dim, int degree>
1342 * void LaplaceProblem<dim, degree>::assemble_rhs()
1344 * TimerOutput::Scope timing(computing_timer, "Assemble right-hand side");
1346 * MatrixFreeActiveVector solution_copy;
1347 * MatrixFreeActiveVector right_hand_side_copy;
1348 * mf_system_matrix.initialize_dof_vector(solution_copy);
1349 * mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1351 * solution_copy = 0.;
1352 * constraints.distribute(solution_copy);
1353 * solution_copy.update_ghost_values();
1354 * right_hand_side_copy = 0;
1355 * const Table<2, VectorizedArray<double>> &coefficient =
1356 * *(mf_system_matrix.get_coefficient());
1358 * RightHandSide<dim> right_hand_side_function;
1360 * FEEvaluation<dim, degree, degree + 1, 1, double> phi(
1361 * *mf_system_matrix.get_matrix_free());
1363 * for (unsigned int cell = 0;
1364 * cell < mf_system_matrix.get_matrix_free()->n_macro_cells();
1368 * phi.read_dof_values_plain(solution_copy);
1369 * phi.evaluate(false, true, false);
1371 * for (unsigned int q = 0; q < phi.n_q_points; ++q)
1373 * phi.submit_gradient(-1.0 *
1374 * (coefficient(cell, 0) * phi.get_gradient(q)),
1377 * right_hand_side_function.value(phi.quadrature_point(q)), q);
1380 * phi.integrate_scatter(true, true, right_hand_side_copy);
1383 * right_hand_side_copy.compress(VectorOperation::add);
1385 * ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy);
1393 * <a name="LaplaceProblemsolve"></a>
1394 * <h4>LaplaceProblem::solve()</h4>
1398 * Here we set up the multigrid preconditioner, test the timing of a single
1399 * V-cycle, and solve the linear system. Unsurprisingly, this is one of the
1400 * places where the three methods differ the most.
1403 * template <int dim, int degree>
1404 * void LaplaceProblem<dim, degree>::solve()
1406 * TimerOutput::Scope timing(computing_timer, "Solve");
1408 * SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm());
1409 * solver_control.enable_history_data();
1415 * The solver for the matrix-free GMG method is similar to @ref step_37 "step-37", apart
1416 * from adding some interface matrices in complete analogy to @ref step_16 "step-16".
1419 * switch (settings.solver)
1421 * case Settings::gmg_mf:
1423 * computing_timer.enter_subsection("Solve: Preconditioner setup");
1425 * MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
1426 * mg_transfer.build(dof_handler);
1428 * SolverControl coarse_solver_control(1000, 1e-12, false, false);
1429 * SolverCG<MatrixFreeLevelVector> coarse_solver(coarse_solver_control);
1430 * PreconditionIdentity identity;
1431 * MGCoarseGridIterativeSolver<MatrixFreeLevelVector,
1432 * SolverCG<MatrixFreeLevelVector>,
1433 * MatrixFreeLevelMatrix,
1434 * PreconditionIdentity>
1435 * coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity);
1437 * using Smoother = ::PreconditionJacobi<MatrixFreeLevelMatrix>;
1438 * MGSmootherPrecondition<MatrixFreeLevelMatrix,
1440 * MatrixFreeLevelVector>
1442 * smoother.initialize(mf_mg_matrix,
1443 * typename Smoother::AdditionalData(
1444 * settings.smoother_dampen));
1445 * smoother.set_steps(settings.smoother_steps);
1447 * mg::Matrix<MatrixFreeLevelVector> mg_m(mf_mg_matrix);
1450 * MatrixFreeOperators::MGInterfaceOperator<MatrixFreeLevelMatrix>>
1451 * mg_interface_matrices;
1452 * mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1);
1453 * for (unsigned int level = 0; level < triangulation.n_global_levels();
1455 * mg_interface_matrices[level].initialize(mf_mg_matrix[level]);
1456 * mg::Matrix<MatrixFreeLevelVector> mg_interface(mg_interface_matrices);
1458 * Multigrid<MatrixFreeLevelVector> mg(
1459 * mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1460 * mg.set_edge_matrices(mg_interface, mg_interface);
1462 * PreconditionMG<dim,
1463 * MatrixFreeLevelVector,
1464 * MGTransferMatrixFree<dim, float>>
1465 * preconditioner(dof_handler, mg, mg_transfer);
1469 * Copy the solution vector and right-hand side from LA::MPI::Vector
1470 * to ::LinearAlgebra::distributed::Vector so that we can solve.
1473 * MatrixFreeActiveVector solution_copy;
1474 * MatrixFreeActiveVector right_hand_side_copy;
1475 * mf_system_matrix.initialize_dof_vector(solution_copy);
1476 * mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1478 * ChangeVectorTypes::copy(solution_copy, solution);
1479 * ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side);
1480 * computing_timer.leave_subsection("Solve: Preconditioner setup");
1484 * Timing for 1 V-cycle.
1488 * TimerOutput::Scope timing(computing_timer,
1489 * "Solve: 1 multigrid V-cycle");
1490 * preconditioner.vmult(solution_copy, right_hand_side_copy);
1492 * solution_copy = 0.;
1496 * Solve the linear system, update the ghost values of the solution,
1497 * copy back to LA::MPI::Vector and distribute constraints.
1501 * SolverCG<MatrixFreeActiveVector> solver(solver_control);
1503 * TimerOutput::Scope timing(computing_timer, "Solve: CG");
1504 * solver.solve(mf_system_matrix,
1506 * right_hand_side_copy,
1510 * solution_copy.update_ghost_values();
1511 * ChangeVectorTypes::copy(solution, solution_copy);
1512 * constraints.distribute(solution);
1519 * Solver for the matrix-based GMG method, similar to @ref step_16 "step-16", only
1520 * using a Jacobi smoother instead of a SOR smoother (which is not
1521 * implemented in parallel).
1524 * case Settings::gmg_mb:
1526 * computing_timer.enter_subsection("Solve: Preconditioner setup");
1528 * MGTransferPrebuilt<VectorType> mg_transfer(mg_constrained_dofs);
1529 * mg_transfer.build(dof_handler);
1531 * SolverControl coarse_solver_control(1000, 1e-12, false, false);
1532 * SolverCG<VectorType> coarse_solver(coarse_solver_control);
1533 * PreconditionIdentity identity;
1534 * MGCoarseGridIterativeSolver<VectorType,
1535 * SolverCG<VectorType>,
1537 * PreconditionIdentity>
1538 * coarse_grid_solver(coarse_solver, mg_matrix[0], identity);
1540 * using Smoother = LA::MPI::PreconditionJacobi;
1541 * MGSmootherPrecondition<MatrixType, Smoother, VectorType> smoother;
1543 * #ifdef USE_PETSC_LA
1544 * smoother.initialize(mg_matrix);
1546 * settings.smoother_dampen == 1.0,
1547 * ExcNotImplemented(
1550 * smoother.initialize(mg_matrix, settings.smoother_dampen);
1553 * smoother.set_steps(settings.smoother_steps);
1555 * mg::Matrix<VectorType> mg_m(mg_matrix);
1556 * mg::Matrix<VectorType> mg_in(mg_interface_in);
1557 * mg::Matrix<VectorType> mg_out(mg_interface_in);
1559 * Multigrid<VectorType> mg(
1560 * mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1561 * mg.set_edge_matrices(mg_out, mg_in);
1564 * PreconditionMG<dim, VectorType, MGTransferPrebuilt<VectorType>>
1565 * preconditioner(dof_handler, mg, mg_transfer);
1567 * computing_timer.leave_subsection("Solve: Preconditioner setup
");
1571 * Timing for 1 V-cycle.
1575 * TimerOutput::Scope timing(computing_timer,
1576 * "Solve: 1 multigrid
V-cycle
");
1577 * preconditioner.vmult(solution, right_hand_side);
1583 * Solve the linear system and distribute constraints.
1587 * SolverCG<VectorType> solver(solver_control);
1589 * TimerOutput::Scope timing(computing_timer, "Solve: CG
");
1590 * solver.solve(system_matrix,
1596 * constraints.distribute(solution);
1603 * Solver for the AMG method, similar to @ref step_40 "step-40
".
1606 * case Settings::amg:
1608 * computing_timer.enter_subsection("Solve: Preconditioner setup
");
1610 * PreconditionAMG preconditioner;
1611 * PreconditionAMG::AdditionalData Amg_data;
1613 * #ifdef USE_PETSC_LA
1614 * Amg_data.symmetric_operator = true;
1616 * Amg_data.elliptic = true;
1617 * Amg_data.smoother_type = "Jacobi
";
1618 * Amg_data.higher_order_elements = true;
1619 * Amg_data.smoother_sweeps = settings.smoother_steps;
1620 * Amg_data.aggregation_threshold = 0.02;
1623 * Amg_data.output_details = false;
1625 * preconditioner.initialize(system_matrix, Amg_data);
1626 * computing_timer.leave_subsection("Solve: Preconditioner setup
");
1630 * Timing for 1 V-cycle.
1634 * TimerOutput::Scope timing(computing_timer,
1635 * "Solve: 1 multigrid
V-cycle
");
1636 * preconditioner.vmult(solution, right_hand_side);
1642 * Solve the linear system and distribute constraints.
1646 * SolverCG<VectorType> solver(solver_control);
1648 * TimerOutput::Scope timing(computing_timer, "Solve: CG
");
1649 * solver.solve(system_matrix,
1654 * constraints.distribute(solution);
1660 * Assert(false, ExcInternalError());
1663 * pcout << " Number of CG iterations:
" << solver_control.last_step()
1671 * <a name="Theerrorestimator
"></a>
1672 * <h3>The error estimator</h3>
1676 * We use the FEInterfaceValues class to assemble an error estimator to decide
1677 * which cells to refine. See the exact definition of the cell and face
1678 * integrals in the introduction. To use the method, we define Scratch and
1679 * Copy objects for the MeshWorker::mesh_loop() with much of the following code
1680 * being in essence as was set up in @ref step_12 "step-12
" already (or at least similar in
1684 * template <int dim>
1685 * struct ScratchData
1687 * ScratchData(const Mapping<dim> & mapping,
1688 * const FiniteElement<dim> &fe,
1689 * const unsigned int quadrature_degree,
1690 * const UpdateFlags update_flags,
1691 * const UpdateFlags interface_update_flags)
1692 * : fe_values(mapping, fe, QGauss<dim>(quadrature_degree), update_flags)
1693 * , fe_interface_values(mapping,
1695 * QGauss<dim - 1>(quadrature_degree),
1696 * interface_update_flags)
1700 * ScratchData(const ScratchData<dim> &scratch_data)
1701 * : fe_values(scratch_data.fe_values.get_mapping(),
1702 * scratch_data.fe_values.get_fe(),
1703 * scratch_data.fe_values.get_quadrature(),
1704 * scratch_data.fe_values.get_update_flags())
1705 * , fe_interface_values(scratch_data.fe_values.get_mapping(),
1706 * scratch_data.fe_values.get_fe(),
1707 * scratch_data.fe_interface_values.get_quadrature(),
1708 * scratch_data.fe_interface_values.get_update_flags())
1711 * FEValues<dim> fe_values;
1712 * FEInterfaceValues<dim> fe_interface_values;
1720 * : cell_index(numbers::invalid_unsigned_int)
1724 * CopyData(const CopyData &) = default;
1728 * unsigned int cell_indices[2];
1732 * unsigned int cell_index;
1734 * std::vector<FaceData> face_data;
1738 * template <int dim, int degree>
1739 * void LaplaceProblem<dim, degree>::estimate()
1741 * TimerOutput::Scope timing(computing_timer, "Estimate
");
1743 * VectorType temp_solution;
1744 * temp_solution.reinit(locally_owned_dofs,
1745 * locally_relevant_dofs,
1746 * mpi_communicator);
1747 * temp_solution = solution;
1749 * const Coefficient<dim> coefficient;
1751 * estimated_error_per_cell.reinit(triangulation.n_active_cells());
1753 * using Iterator = typename DoFHandler<dim>::active_cell_iterator;
1757 * Assembler for cell residual @f$h \| f + \epsilon \triangle u \|_K@f$
1760 * auto cell_worker = [&](const Iterator & cell,
1761 * ScratchData<dim> &scratch_data,
1762 * CopyData & copy_data) {
1763 * FEValues<dim> &fe_values = scratch_data.fe_values;
1764 * fe_values.reinit(cell);
1766 * RightHandSide<dim> rhs;
1767 * const double rhs_value = rhs.value(cell->center());
1769 * const double nu = coefficient.value(cell->center());
1771 * std::vector<Tensor<2, dim>> hessians(fe_values.n_quadrature_points);
1772 * fe_values.get_function_hessians(temp_solution, hessians);
1774 * copy_data.cell_index = cell->active_cell_index();
1776 * double residual_norm_square = 0.;
1777 * for (unsigned k = 0; k < fe_values.n_quadrature_points; ++k)
1779 * const double residual = (rhs_value + nu * trace(hessians[k]));
1780 * residual_norm_square += residual * residual * fe_values.JxW(k);
1783 * copy_data.value = cell->diameter() * std::sqrt(residual_norm_square);
1788 * Assembler for face term @f$\sum_F h_F \| \jump{\epsilon \nabla u \cdot n}
1792 * auto face_worker = [&](const Iterator & cell,
1793 * const unsigned int &f,
1794 * const unsigned int &sf,
1795 * const Iterator & ncell,
1796 * const unsigned int &nf,
1797 * const unsigned int &nsf,
1798 * ScratchData<dim> & scratch_data,
1799 * CopyData & copy_data) {
1800 * FEInterfaceValues<dim> &fe_interface_values =
1801 * scratch_data.fe_interface_values;
1802 * fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf);
1804 * copy_data.face_data.emplace_back();
1805 * CopyData::FaceData ©_data_face = copy_data.face_data.back();
1807 * copy_data_face.cell_indices[0] = cell->active_cell_index();
1808 * copy_data_face.cell_indices[1] = ncell->active_cell_index();
1810 * const double coeff1 = coefficient.value(cell->center());
1811 * const double coeff2 = coefficient.value(ncell->center());
1813 * std::vector<Tensor<1, dim>> grad_u[2];
1815 * for (unsigned int i = 0; i < 2; ++i)
1817 * grad_u[i].resize(fe_interface_values.n_quadrature_points);
1818 * fe_interface_values.get_fe_face_values(i).get_function_gradients(
1819 * temp_solution, grad_u[i]);
1822 * double jump_norm_square = 0.;
1824 * for (unsigned int qpoint = 0;
1825 * qpoint < fe_interface_values.n_quadrature_points;
1828 * const double jump =
1829 * coeff1 * grad_u[0][qpoint] * fe_interface_values.normal(qpoint) -
1830 * coeff2 * grad_u[1][qpoint] * fe_interface_values.normal(qpoint);
1832 * jump_norm_square += jump * jump * fe_interface_values.JxW(qpoint);
1835 * const double h = cell->face(f)->measure();
1836 * copy_data_face.values[0] = 0.5 * h * std::sqrt(jump_norm_square);
1837 * copy_data_face.values[1] = copy_data_face.values[0];
1840 * auto copier = [&](const CopyData ©_data) {
1841 * if (copy_data.cell_index != numbers::invalid_unsigned_int)
1842 * estimated_error_per_cell[copy_data.cell_index] += copy_data.value;
1844 * for (auto &cdf : copy_data.face_data)
1845 * for (unsigned int j = 0; j < 2; ++j)
1846 * estimated_error_per_cell[cdf.cell_indices[j]] += cdf.values[j];
1849 * const unsigned int n_gauss_points = degree + 1;
1850 * ScratchData<dim> scratch_data(mapping,
1853 * update_hessians | update_quadrature_points |
1854 * update_JxW_values,
1855 * update_values | update_gradients |
1856 * update_JxW_values | update_normal_vectors);
1857 * CopyData copy_data;
1861 * We need to assemble each interior face once but we need to make sure that
1862 * both processes assemble the face term between a locally owned and a ghost
1863 * cell. This is achieved by setting the
1864 * MeshWorker::assemble_ghost_faces_both flag. We need to do this, because
1865 * we do not communicate the error estimator contributions here.
1868 * MeshWorker::mesh_loop(dof_handler.begin_active(),
1869 * dof_handler.end(),
1874 * MeshWorker::assemble_own_cells |
1875 * MeshWorker::assemble_ghost_faces_both |
1876 * MeshWorker::assemble_own_interior_faces_once,
1877 * /*boundary_worker=*/nullptr,
1885 * <a name="LaplaceProblemrefine_grid
"></a>
1886 * <h4>LaplaceProblem::refine_grid()</h4>
1890 * We use the cell-wise estimator stored in the vector @p estimate_vector and
1891 * refine a fixed number of cells (chosen here to roughly double the number of
1892 * DoFs in each step).
1895 * template <int dim, int degree>
1896 * void LaplaceProblem<dim, degree>::refine_grid()
1898 * TimerOutput::Scope timing(computing_timer, "Refine grid
");
1900 * const double refinement_fraction = 1. / (std::pow(2.0, dim) - 1.);
1901 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
1902 * triangulation, estimated_error_per_cell, refinement_fraction, 0.0);
1904 * triangulation.execute_coarsening_and_refinement();
1911 * <a name="LaplaceProblemoutput_results
"></a>
1912 * <h4>LaplaceProblem::output_results()</h4>
1916 * The output_results() function is similar to the ones found in many of the
1917 * tutorials (see @ref step_40 "step-40
" for example).
1920 * template <int dim, int degree>
1921 * void LaplaceProblem<dim, degree>::output_results(const unsigned int cycle)
1923 * TimerOutput::Scope timing(computing_timer, "Output results
");
1925 * VectorType temp_solution;
1926 * temp_solution.reinit(locally_owned_dofs,
1927 * locally_relevant_dofs,
1928 * mpi_communicator);
1929 * temp_solution = solution;
1931 * DataOut<dim> data_out;
1932 * data_out.attach_dof_handler(dof_handler);
1933 * data_out.add_data_vector(temp_solution, "solution
");
1935 * Vector<float> subdomain(triangulation.n_active_cells());
1936 * for (unsigned int i = 0; i < subdomain.size(); ++i)
1937 * subdomain(i) = triangulation.locally_owned_subdomain();
1938 * data_out.add_data_vector(subdomain, "subdomain
");
1940 * Vector<float> level(triangulation.n_active_cells());
1941 * for (const auto &cell : triangulation.active_cell_iterators())
1942 * level(cell->active_cell_index()) = cell->level();
1943 * data_out.add_data_vector(level, "level");
1945 * if (estimated_error_per_cell.size() > 0)
1946 * data_out.add_data_vector(estimated_error_per_cell,
1947 * "estimated_error_per_cell
");
1949 * data_out.build_patches();
1951 * const std::string master = data_out.write_vtu_with_pvtu_record(
1952 * "", "solution
", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/);
1954 * pcout << " Wrote
" << master << std::endl;
1961 * <a name="LaplaceProblemrun
"></a>
1962 * <h4>LaplaceProblem::run()</h4>
1966 * As in most tutorials, this function calls the various functions defined
1967 * above to setup, assemble, solve, and output the results.
1970 * template <int dim, int degree>
1971 * void LaplaceProblem<dim, degree>::run()
1973 * for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle)
1975 * pcout << "Cycle
" << cycle << ':' << std::endl;
1979 * pcout << " Number of active cells:
"
1980 * << triangulation.n_global_active_cells();
1984 * We only output level cell data for the GMG methods (same with DoF
1985 * data below). Note that the partition efficiency is irrelevant for AMG
1986 * since the level hierarchy is not distributed or used during the
1990 * if (settings.solver == Settings::gmg_mf ||
1991 * settings.solver == Settings::gmg_mb)
1992 * pcout << " (
" << triangulation.n_global_levels() << " global levels)
"
1994 * << " Partition efficiency:
"
1995 * << 1.0 / MGTools::workload_imbalance(triangulation);
1996 * pcout << std::endl;
2002 * Only set up the multilevel hierarchy for GMG.
2005 * if (settings.solver == Settings::gmg_mf ||
2006 * settings.solver == Settings::gmg_mb)
2007 * setup_multigrid();
2009 * pcout << " Number of degrees of freedom:
" << dof_handler.n_dofs();
2010 * if (settings.solver == Settings::gmg_mf ||
2011 * settings.solver == Settings::gmg_mb)
2013 * pcout << " (by
level:
";
2014 * for (unsigned int level = 0; level < triangulation.n_global_levels();
2016 * pcout << dof_handler.n_dofs(level)
2017 * << (level == triangulation.n_global_levels() - 1 ? ")
" :
2020 * pcout << std::endl;
2024 * For the matrix-free method, we only assemble the right-hand side.
2025 * For both matrix-based methods, we assemble both active matrix and
2026 * right-hand side, and only assemble the multigrid matrices for
2030 * if (settings.solver == Settings::gmg_mf)
2032 * else /*gmg_mb or amg*/
2034 * assemble_system();
2035 * if (settings.solver == Settings::gmg_mb)
2036 * assemble_multigrid();
2042 * if (settings.output)
2043 * output_results(cycle);
2045 * computing_timer.print_summary();
2046 * computing_timer.reset();
2054 * <a name="Themainfunction
"></a>
2055 * <h3>The main() function</h3>
2059 * This is a similar main function to @ref step_40 "step-40
", with the exception that
2060 * we require the user to pass a .prm file as a sole command line
2061 * argument (see @ref step_29 "step-29
" and the documentation of the ParameterHandler
2062 * class for a complete discussion of parameter files).
2065 * int main(int argc, char *argv[])
2067 * using namespace dealii;
2068 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2070 * Settings settings;
2071 * if (!settings.try_parse((argc > 1) ? (argv[1]) : ""))
2076 * constexpr unsigned int fe_degree = 2;
2078 * switch (settings.dimension)
2082 * LaplaceProblem<2, fe_degree> test(settings);
2090 * LaplaceProblem<3, fe_degree> test(settings);
2097 * Assert(false, ExcMessage("This program only works in 2
d and 3
d.
"));
2100 * catch (std::exception &exc)
2102 * std::cerr << std::endl
2104 * << "----------------------------------------------------
"
2106 * std::cerr << "Exception on processing:
" << std::endl
2107 * << exc.what() << std::endl
2108 * << "Aborting!
" << std::endl
2109 * << "----------------------------------------------------
"
2111 * MPI_Abort(MPI_COMM_WORLD, 1);
2116 * std::cerr << std::endl
2118 * << "----------------------------------------------------
"
2120 * std::cerr << "Unknown exception!
" << std::endl
2121 * << "Aborting!
" << std::endl
2122 * << "----------------------------------------------------
"
2124 * MPI_Abort(MPI_COMM_WORLD, 2);
2131 <a name="Results
"></a><h1>Results</h1>
2134 When you run the program, the screen output should look like the following:
2137 Number of active cells: 56 (2 global levels)
2138 Workload imbalance: 1.14286
2139 Number of degrees of freedom: 665 (by level: 117, 665)
2140 Number of CG iterations: 10
2141 Wrote solution_00.pvtu
2144 +---------------------------------------------+------------+------------+
2145 | Total wallclock time elapsed since start | 0.0457s | |
2147 | Section | no. calls | wall time | % of total |
2148 +---------------------------------+-----------+------------+------------+
2149 | Assemble right hand side | 1 | 0.000241s | 0.53% |
2150 | Estimate | 1 | 0.0288s | 63% |
2151 | Output results | 1 | 0.00219s | 4.8% |
2152 | Setup | 1 | 0.00264s | 5.8% |
2153 | Setup multigrid | 1 | 0.00261s | 5.7% |
2154 | Solve | 1 | 0.00355s | 7.8% |
2155 | Solve: 1 multigrid V-cycle | 1 | 0.000315s | 0.69% |
2156 | Solve: CG | 1 | 0.00186s | 4.1% |
2157 | Solve: Preconditioner setup | 1 | 0.000968s | 2.1% |
2158 +---------------------------------+-----------+------------+------------+
2161 Number of active cells: 126 (3 global levels)
2162 Workload imbalance: 1.17483
2163 Number of degrees of freedom: 1672 (by level: 117, 665, 1100)
2164 Number of CG iterations: 11
2165 Wrote solution_01.pvtu
2168 +---------------------------------------------+------------+------------+
2169 | Total wallclock time elapsed since start | 0.0433s | |
2171 | Section | no. calls | wall time | % of total |
2172 +---------------------------------+-----------+------------+------------+
2173 | Assemble right hand side | 1 | 0.000286s | 0.66% |
2174 | Estimate | 1 | 0.0272s | 63% |
2175 | Output results | 1 | 0.00333s | 7.7% |
2176 | Refine grid | 1 | 0.00196s | 4.5% |
2177 | Setup | 1 | 0.0023s | 5.3% |
2178 | Setup multigrid | 1 | 0.00262s | 6% |
2179 | Solve | 1 | 0.00549s | 13% |
2180 | Solve: 1 multigrid V-cycle | 1 | 0.000343s | 0.79% |
2181 | Solve: CG | 1 | 0.00293s | 6.8% |
2182 | Solve: Preconditioner setup | 1 | 0.00174s | 4% |
2183 +---------------------------------+-----------+------------+------------+
2190 Here, the timing of the `solve()` function is split up in 3 parts: setting
2191 up the multigrid preconditioner, execution of a single multigrid V-cycle, and
2192 the CG solver. The V-cycle that is timed is unnecessary for the overall solve
2193 and only meant to give an insight at the different costs for AMG and GMG.
2194 Also it should be noted that when using the AMG solver, "Workload imbalance
"
2195 is not included in the output since the hierarchy of coarse meshes is not
2198 All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade
2199 Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions,
2200 allowing for vectorization over 8 doubles (vectorization used only in the matrix-free
2201 computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos
2202 12.10.1 is used for the matrix-based GMG/AMG computations.
2204 We can then gather a variety of information by calling the program
2205 with the input files that are provided in the directory in which
2206 @ref step_50 "step-50
" is located. Using these, and adjusting the number of mesh
2207 refinement steps, we can produce information about how well the
2210 The following table gives weak scaling timings for this program on up to 256M DoFs
2211 and 7,168 processors. (Recall that weak scaling keeps the number of
2212 degrees of freedom per processor constant while increasing the number of
2213 processors; i.e., it considers larger and larger problems.)
2214 Here, @f$\mathbb{E}@f$ is the partition efficiency from the
2215 introduction (also equal to 1.0/workload imbalance), "Setup
" is a combination
2216 of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks,
2217 and "Prec
" is the preconditioner setup. Ideally all times would stay constant
2218 over each problem size for the individual solvers, but since the partition
2219 efficiency decreases from 0.371 to 0.161 from largest to smallest problem size,
2220 we expect to see an approximately @f$0.371/0.161=2.3@f$ times increase in timings
2221 for GMG. This is, in fact, pretty close to what we really get:
2223 <table align="center" class="doxtable
">
2225 <th colspan="4
"></th>
2227 <th colspan="4
">MF-GMG</th>
2229 <th colspan="4
">MB-GMG</th>
2231 <th colspan="4
">AMG</th>
2234 <th align="right
">Procs</th>
2235 <th align="right
">Cycle</th>
2236 <th align="right
">DoFs</th>
2237 <th align="right
">@f$\mathbb{E}@f$</th>
2239 <th align="right
">Setup</th>
2240 <th align="right
">Prec</th>
2241 <th align="right
">Solve</th>
2242 <th align="right
">Total</th>
2244 <th align="right
">Setup</th>
2245 <th align="right
">Prec</th>
2246 <th align="right
">Solve</th>
2247 <th align="right
">Total</th>
2249 <th align="right
">Setup</th>
2250 <th align="right
">Prec</th>
2251 <th align="right
">Solve</th>
2252 <th align="right
">Total</th>
2255 <td align="right
">112</th>
2256 <td align="right
">13</th>
2257 <td align="right
">4M</th>
2258 <td align="right
">0.37</th>
2260 <td align="right
">0.742</th>
2261 <td align="right
">0.393</th>
2262 <td align="right
">0.200</th>
2263 <td align="right
">1.335</th>
2265 <td align="right
">1.714</th>
2266 <td align="right
">2.934</th>
2267 <td align="right
">0.716</th>
2268 <td align="right
">5.364</th>
2270 <td align="right
">1.544</th>
2271 <td align="right
">0.456</th>
2272 <td align="right
">1.150</th>
2273 <td align="right
">3.150</th>
2276 <td align="right
">448</th>
2277 <td align="right
">15</th>
2278 <td align="right
">16M</th>
2279 <td align="right
">0.29</th>
2281 <td align="right
">0.884</th>
2282 <td align="right
">0.535</th>
2283 <td align="right
">0.253</th>
2284 <td align="right
">1.672</th>
2286 <td align="right
">1.927</th>
2287 <td align="right
">3.776</th>
2288 <td align="right
">1.190</th>
2289 <td align="right
">6.893</th>
2291 <td align="right
">1.544</th>
2292 <td align="right
">0.456</th>
2293 <td align="right
">1.150</th>
2294 <td align="right
">3.150</th>
2297 <td align="right
">1,792</th>
2298 <td align="right
">17</th>
2299 <td align="right
">65M</th>
2300 <td align="right
">0.22</th>
2302 <td align="right
">1.122</th>
2303 <td align="right
">0.686</th>
2304 <td align="right
">0.309</th>
2305 <td align="right
">2.117</th>
2307 <td align="right
">2.171</th>
2308 <td align="right
">4.862</th>
2309 <td align="right
">1.660</th>
2310 <td align="right
">8.693</th>
2312 <td align="right
">1.654</th>
2313 <td align="right
">0.546</th>
2314 <td align="right
">1.460</th>
2315 <td align="right
">3.660</th>
2318 <td align="right
">7,168</th>
2319 <td align="right
">19</th>
2320 <td align="right
">256M</th>
2321 <td align="right
">0.16</th>
2323 <td align="right
">1.214</th>
2324 <td align="right
">0.893</th>
2325 <td align="right
">0.521</th>
2326 <td align="right
">2.628</th>
2328 <td align="right
">2.386</th>
2329 <td align="right
">7.260</th>
2330 <td align="right
">2.560</th>
2331 <td align="right
">12.206</th>
2333 <td align="right
">1.844</th>
2334 <td align="right
">1.010</th>
2335 <td align="right
">1.890</th>
2336 <td align="right
">4.744</th>
2340 On the other hand, the algebraic multigrid in the last set of columns
2341 is relatively unaffected by the increasing imbalance of the mesh
2342 hierarchy (because it doesn't use the mesh hierarchy) and the growth
2343 in time is rather driven by other factors that are well documented in
2344 the literature (most notably that the algorithmic complexity of
2345 some parts of algebraic multigrid methods appears to be @f${\cal O}(N
2346 \log N)@f$ instead of @f${\cal O}(N)@f$ for geometric multigrid).
2348 The upshort of the table above is that the matrix-free geometric multigrid
2349 method appears to be the fastest approach to solving this equation if
2350 not by a huge margin. Matrix-based methods, on the other hand, are
2351 consistently the worst.
2353 The following figure provides strong scaling results for each method, i.e.,
2354 we solve the same problem on more and more processors. Specifically,
2355 we consider the problems after 16 mesh refinement cycles
2356 (32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:
2358 <img width="600px
" src="https:
2360 While the
matrix-based GMG solver and AMG
scale similarly and have a
2361 similar time to solution (at least as
long as there is a substantial
2362 number of unknowns per processor -- say, several 10,000), the
2363 matrix-
free GMG solver scales much better and solves the finer problem
2364 in roughly the same time as the AMG solver
for the coarser mesh with
2365 only an eighth of the number of processors. Conversely, it can solve the
2366 same problem on the same number of processors in about
one eighth the
2370 <a name=
"Possibleextensions"></a><h3> Possible extensions </h3>
2373 <a name=
"Testingconvergenceandhigherorderelements"></a><h4> Testing convergence and higher order elements </h4>
2376 The finite element degree is currently hard-coded as 2, see the
template
2377 arguments of the main
class. It is easy to change. To test, it would be
2378 interesting to
switch to a test problem with a reference solution. This way,
2379 you can compare error rates.
2381 <a name=
"Coarsesolver"></a><h4> Coarse solver </h4>
2384 A more interesting example would involve a more complicated coarse mesh (see
2385 @ref step_49
"step-49" for inspiration). The issue in that
case is that the coarsest
2386 level of the mesh hierarchy is actually quite large, and
one would
2387 have to think about ways to solve the coarse
level problem
2388 efficiently. (This is not an issue
for algebraic multigrid methods
2389 because they would just
continue to build coarser and coarser levels
2390 of the
matrix, regardless of their geometric origin.)
2392 In the program here, we simply solve the coarse
level problem with a
2393 Conjugate Gradient method without any preconditioner. That is acceptable
2394 if the coarse problem is really small --
for example,
if the coarse
2395 mesh had a single cell, then the coarse mesh problems has a @f$9\times 9@f$
2396 matrix in 2
d, and a @f$27\times 27@f$
matrix in 3
d;
for the coarse mesh we
2397 use on the @f$L@f$-shaped domain of the current program, these sizes are
2398 @f$21\times 21@f$ in 2
d and @f$117\times 117@f$ in 3
d. But
if the coarse mesh
2399 consists of hundreds or thousands of cells,
this approach will no
2400 longer work and might start to dominate the overall
run-time of each
V-cyle.
2401 A common approach is then to solve the coarse mesh problem
using an
2402 algebraic multigrid preconditioner;
this would then, however, require
2404 input to the AMG implementation.
2407 <a name=
"PlainProg"></a>
2408 <h1> The plain program</h1>
2409 @include
"step-50.cc"