482 *
const unsigned int = 0)
const override
488 *
template <
typename number>
491 *
const unsigned int = 0)
const
512 *
const unsigned int = 0)
const override;
514 *
template <
typename number>
516 *
const unsigned int = 0)
const;
518 *
template <
typename number>
528 *
template <
typename number>
538 *
for (
int d = 0;
d < dim; ++
d)
549 *
template <
typename number>
552 *
const unsigned int)
const
557 *
for (
int d = 0;
d < dim; ++
d)
558 *
if (p[d][i] < -0.5)
560 *
return_value[i] = 100.0;
565 *
return return_value;
571 *
template <
typename number>
576 *
for (
unsigned int i = 0; i < points.size(); ++i)
586 *
template <
typename number>
587 *
std::shared_ptr<Table<2, VectorizedArray<number>>>
592 *
std::make_shared<Table<2, VectorizedArray<number>>>();
600 *
for (
unsigned int cell = 0; cell <
n_cells; ++cell)
602 *
fe_eval.reinit(cell);
605 *
for (
const unsigned int q : fe_eval.quadrature_point_indices())
620 * <a name=
"step_50-Runtimeparameters"></a>
652 *
bool Settings::try_parse(
const std::string &
prm_filename)
655 *
prm.declare_entry(
"dim",
658 *
"The problem dimension.");
659 *
prm.declare_entry(
"n_steps",
662 *
"Number of adaptive refinement steps.");
663 *
prm.declare_entry(
"smoother dampen",
666 *
"Dampen factor for the smoother.");
667 *
prm.declare_entry(
"smoother steps",
670 *
"Number of smoother steps.");
671 *
prm.declare_entry(
"solver",
674 *
"Switch between matrix-free GMG, "
675 *
"matrix-based GMG, and AMG.");
676 *
prm.declare_entry(
"output",
679 *
"Output graphical results.");
684 *
<<
"**** Error: No input file provided!\n"
685 *
<<
"**** Error: Call this program as './step-50 input.prm\n"
687 *
<<
"**** You may want to use one of the input files in this\n"
688 *
<<
"**** directory, or use the following default values\n"
689 *
<<
"**** to create an input file:\n";
699 *
catch (std::exception &e)
702 *
std::cerr <<
e.what() << std::endl;
706 *
if (prm.get(
"solver") ==
"MF")
708 *
else if (prm.get(
"solver") ==
"MB")
710 *
else if (prm.get(
"solver") ==
"AMG")
711 *
this->solver =
amg;
715 *
this->dimension = prm.get_integer(
"dim");
716 *
this->
n_steps = prm.get_integer(
"n_steps");
719 *
this->output = prm.get_bool(
"output");
729 * <a name=
"step_50-LaplaceProblemclass"></a>
742 *
template <
int dim,
int degree>
784 *
void refine_grid();
825 *
template <
int dim,
int degree>
827 *
: settings(settings)
834 *
(settings.solver == Settings::amg) ?
852 * <a name=
"step_50-LaplaceProblemsetup_system"></a>
869 *
dof_handler.distribute_dofs(fe);
873 *
locally_owned_dofs = dof_handler.locally_owned_dofs();
875 *
solution.reinit(locally_owned_dofs, mpi_communicator);
882 *
constraints.close();
884 *
switch (settings.solver)
886 *
case Settings::gmg_mf:
889 *
additional_data.tasks_parallel_scheme =
891 *
additional_data.mapping_update_flags =
893 *
std::shared_ptr<MatrixFree<dim, double>>
mf_storage =
894 *
std::make_shared<MatrixFree<dim, double>>();
910 *
case Settings::gmg_mb:
911 *
case Settings::amg:
918 *
locally_owned_dofs,
922 *
system_matrix.reinit(locally_owned_dofs,
923 *
locally_owned_dofs,
928 *
locally_owned_dofs,
933 *
system_matrix.reinit(
dsp);
947 * <a name=
"step_50-LaplaceProblemsetup_multigrid"></a>
955 * distributed sparsity patterns.
966 *
template <
int dim,
int degree>
971 *
dof_handler.distribute_mg_dofs();
973 *
mg_constrained_dofs.
clear();
974 *
mg_constrained_dofs.initialize(dof_handler);
977 *
mg_constrained_dofs.make_zero_boundary_constraints(dof_handler,
980 *
const unsigned int n_levels =
triangulation.n_global_levels();
982 *
switch (settings.solver)
984 *
case Settings::gmg_mf:
991 *
dof_handler.locally_owned_mg_dofs(
level),
995 *
mg_constrained_dofs.get_boundary_indices(
level))
996 *
level_constraints.constrain_dof_to_zero(dof_index);
997 *
level_constraints.close();
1000 *
additional_data.tasks_parallel_scheme =
1002 *
additional_data.mapping_update_flags =
1005 *
additional_data.mg_level =
level;
1010 *
level_constraints,
1015 *
mg_constrained_dofs,
1028 *
case Settings::gmg_mb:
1048 *
dof_handler.locally_owned_mg_dofs(
level),
1053 *
dof_handler.locally_owned_mg_dofs(
level),
1054 *
dof_handler.locally_owned_mg_dofs(
level),
1056 *
mpi_communicator);
1059 *
dof_handler.locally_owned_mg_dofs(
level),
1060 *
dof_handler.locally_owned_mg_dofs(
level),
1062 *
mpi_communicator);
1074 *
mg_constrained_dofs,
1080 *
dof_handler.locally_owned_mg_dofs(
level),
1085 *
dof_handler.locally_owned_mg_dofs(
level),
1086 *
dof_handler.locally_owned_mg_dofs(
level),
1088 *
mpi_communicator);
1091 *
dof_handler.locally_owned_mg_dofs(
level),
1092 *
dof_handler.locally_owned_mg_dofs(
level),
1094 *
mpi_communicator);
1097 *
mg_constrained_dofs,
1117 * <a name=
"step_50-LaplaceProblemassemble_system"></a>
1135 *
template <
int dim,
int degree>
1147 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1153 *
std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1157 *
std::vector<double>
rhs_values(n_q_points);
1159 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1160 *
if (cell->is_locally_owned())
1165 *
fe_values.reinit(cell);
1168 *
coefficient.average_value(fe_values.get_quadrature_points());
1169 *
rhs.value_list(fe_values.get_quadrature_points(),
rhs_values);
1172 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1174 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
1177 *
fe_values.shape_grad(i,
q_point) *
1178 *
fe_values.shape_grad(
j,
q_point) *
1182 *
fe_values.shape_value(i,
q_point) *
1187 *
cell->get_dof_indices(local_dof_indices);
1188 *
constraints.distribute_local_to_global(cell_matrix,
1190 *
local_dof_indices,
1203 * <a name=
"step_50-LaplaceProblemassemble_multigrid"></a>
1227 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1232 *
std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1242 *
dof_handler.locally_owned_mg_dofs(
level),
1246 *
mg_constrained_dofs.get_refinement_edge_indices(
level))
1249 *
mg_constrained_dofs.get_boundary_indices(
level))
1254 *
for (
const auto &cell : dof_handler.cell_iterators())
1255 *
if (cell->level_subdomain_id() ==
triangulation.locally_owned_subdomain())
1258 *
fe_values.reinit(cell);
1261 *
coefficient.average_value(fe_values.get_quadrature_points());
1264 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1265 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
1270 *
cell->get_mg_dof_indices(local_dof_indices);
1273 *
cell_matrix, local_dof_indices,
mg_matrix[cell->level()]);
1275 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1276 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
1277 *
if (mg_constrained_dofs.is_interface_matrix_entry(
1278 *
cell->level(), local_dof_indices[i], local_dof_indices[
j]))
1280 *
local_dof_indices[
j],
1284 *
for (
unsigned int i = 0; i <
triangulation.n_global_levels(); ++i)
1296 * <a name=
"step_50-LaplaceProblemassemble_rhs"></a>
1303 *
framework,
we don't have to assemble the matrix and can get away
1304 * with only assembling the right hand side. We could do this by extracting
1305 * the code from the `assemble_system()` function above that deals with the
1306 * right hand side, but we decide instead to go all in on the matrix-free
1307 * approach and do the assembly using that way as well.
1311 * The result is a function that is similar
1312 * to the one found in the "Use FEEvaluation::read_dof_values_plain()
1313 * to avoid resolving constraints" subsection in the "Possibilities
1314 * for extensions" section of @ref step_37 "step-37".
1318 * The reason for this function is that the MatrixFree operators do not take
1319 * into account non-homogeneous Dirichlet constraints, instead treating all
1320 * Dirichlet constraints as homogeneous. To account for this, the right-hand
1321 * side here is assembled as the residual @f$r_0 = f-Au_0@f$, where @f$u_0@f$ is a
1322 * zero vector except in the Dirichlet values. Then when solving, we have that
1323 * the solution is @f$u = u_0 + A^{-1}r_0@f$. This can be seen as a Newton
1324 * iteration on a linear system with initial guess @f$u_0@f$. The CG solve in the
1325 * `solve()` function below computes @f$A^{-1}r_0@f$ and the call to
1326 * `constraints.distribute()` (which directly follows) adds the @f$u_0@f$.
1330 * Obviously, since we are considering a problem with zero Dirichlet boundary,
1331 * we could have taken a similar approach to @ref step_37 "step-37" `assemble_rhs()`, but
1332 * this additional work allows us to change the problem declaration if we so
1337 * This function has two parts in the integration loop: applying the negative
1338 * of matrix @f$A@f$ to @f$u_0@f$ by submitting the negative of the gradient, and
1339 * adding the right-hand side contribution by submitting the value @f$f@f$. We
1340 * must be sure to use `read_dof_values_plain()` for evaluating @f$u_0@f$ as
1341 * `read_dof_values()` would set all Dirichlet values to zero.
1345 * Finally, the system_rhs vector is of type LA::MPI::Vector, but the
1346 * MatrixFree class only work for
1347 * LinearAlgebra::distributed::Vector. Therefore we must
1348 * compute the right-hand side using MatrixFree functionality and then
1349 * use the functions in the `ChangeVectorType` namespace to copy it to
1353 * template <int dim, int degree>
1354 * void LaplaceProblem<dim, degree>::assemble_rhs()
1356 * TimerOutput::Scope timing(computing_timer, "Assemble right-hand side");
1358 * MatrixFreeActiveVector solution_copy;
1359 * MatrixFreeActiveVector right_hand_side_copy;
1360 * mf_system_matrix.initialize_dof_vector(solution_copy);
1361 * mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1363 * solution_copy = 0.;
1364 * constraints.distribute(solution_copy);
1365 * solution_copy.update_ghost_values();
1366 * right_hand_side_copy = 0;
1367 * const Table<2, VectorizedArray<double>> &coefficient =
1368 * *(mf_system_matrix.get_coefficient());
1370 * RightHandSide<dim> right_hand_side_function;
1372 * FEEvaluation<dim, degree, degree + 1, 1, double> phi(
1373 * *mf_system_matrix.get_matrix_free());
1375 * for (unsigned int cell = 0;
1376 * cell < mf_system_matrix.get_matrix_free()->n_cell_batches();
1380 * phi.read_dof_values_plain(solution_copy);
1381 * phi.evaluate(EvaluationFlags::gradients);
1383 * for (const unsigned int q : phi.quadrature_point_indices())
1385 * phi.submit_gradient(-1.0 *
1386 * (coefficient(cell, 0) * phi.get_gradient(q)),
1389 * right_hand_side_function.value(phi.quadrature_point(q)), q);
1392 * phi.integrate_scatter(EvaluationFlags::values |
1393 * EvaluationFlags::gradients,
1394 * right_hand_side_copy);
1397 * right_hand_side_copy.compress(VectorOperation::add);
1399 * ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy);
1407 * <a name="step_50-LaplaceProblemsolve"></a>
1408 * <h4>LaplaceProblem::solve()</h4>
1412 * Here we set up the multigrid preconditioner, test the timing of a single
1413 * V-cycle, and solve the linear system. Unsurprisingly, this is one of the
1414 * places where the three methods differ the most.
1417 * template <int dim, int degree>
1418 * void LaplaceProblem<dim, degree>::solve()
1420 * TimerOutput::Scope timing(computing_timer, "Solve");
1422 * SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm());
1423 * solver_control.enable_history_data();
1429 * The solver for the matrix-free GMG method is similar to @ref step_37 "step-37", apart
1430 * from adding some interface matrices in complete analogy to @ref step_16 "step-16".
1433 * switch (settings.solver)
1435 * case Settings::gmg_mf:
1437 * computing_timer.enter_subsection("Solve: Preconditioner setup");
1439 * MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
1440 * mg_transfer.build(dof_handler);
1442 * SolverControl coarse_solver_control(1000, 1e-12, false, false);
1443 * SolverCG<MatrixFreeLevelVector> coarse_solver(
1444 * coarse_solver_control);
1445 * PreconditionIdentity identity;
1446 * MGCoarseGridIterativeSolver<MatrixFreeLevelVector,
1447 * SolverCG<MatrixFreeLevelVector>,
1448 * MatrixFreeLevelMatrix,
1449 * PreconditionIdentity>
1450 * coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity);
1452 * using Smoother = PreconditionJacobi<MatrixFreeLevelMatrix>;
1453 * MGSmootherPrecondition<MatrixFreeLevelMatrix,
1455 * MatrixFreeLevelVector>
1457 * smoother.initialize(mf_mg_matrix,
1458 * typename Smoother::AdditionalData(
1459 * settings.smoother_dampen));
1460 * smoother.set_steps(settings.smoother_steps);
1462 * mg::Matrix<MatrixFreeLevelVector> mg_m(mf_mg_matrix);
1465 * MatrixFreeOperators::MGInterfaceOperator<MatrixFreeLevelMatrix>>
1466 * mg_interface_matrices;
1467 * mg_interface_matrices.resize(0,
1468 * triangulation.n_global_levels() - 1);
1469 * for (unsigned int level = 0;
1470 * level < triangulation.n_global_levels();
1472 * mg_interface_matrices[level].initialize(mf_mg_matrix[level]);
1473 * mg::Matrix<MatrixFreeLevelVector> mg_interface(
1474 * mg_interface_matrices);
1476 * Multigrid<MatrixFreeLevelVector> mg(
1477 * mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1478 * mg.set_edge_matrices(mg_interface, mg_interface);
1480 * PreconditionMG<dim,
1481 * MatrixFreeLevelVector,
1482 * MGTransferMatrixFree<dim, float>>
1483 * preconditioner(dof_handler, mg, mg_transfer);
1487 * Copy the solution vector and right-hand side from LA::MPI::Vector
1488 * to LinearAlgebra::distributed::Vector so that we can solve.
1491 * MatrixFreeActiveVector solution_copy;
1492 * MatrixFreeActiveVector right_hand_side_copy;
1493 * mf_system_matrix.initialize_dof_vector(solution_copy);
1494 * mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1496 * ChangeVectorTypes::copy(solution_copy, solution);
1497 * ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side);
1498 * computing_timer.leave_subsection("Solve: Preconditioner setup");
1502 * Timing for 1 V-cycle.
1506 * TimerOutput::Scope timing(computing_timer,
1507 * "Solve: 1 multigrid V-cycle");
1508 * preconditioner.vmult(solution_copy, right_hand_side_copy);
1510 * solution_copy = 0.;
1514 * Solve the linear system, update the ghost values of the solution,
1515 * copy back to LA::MPI::Vector and distribute constraints.
1519 * SolverCG<MatrixFreeActiveVector> solver(solver_control);
1521 * TimerOutput::Scope timing(computing_timer, "Solve: CG");
1522 * solver.solve(mf_system_matrix,
1524 * right_hand_side_copy,
1528 * solution_copy.update_ghost_values();
1529 * ChangeVectorTypes::copy(solution, solution_copy);
1530 * constraints.distribute(solution);
1537 * Solver for the matrix-based GMG method, similar to @ref step_16 "step-16", only
1538 * using a Jacobi smoother instead of a SOR smoother (which is not
1539 * implemented in parallel).
1542 * case Settings::gmg_mb:
1544 * computing_timer.enter_subsection("Solve: Preconditioner setup");
1546 * MGTransferPrebuilt<VectorType> mg_transfer(mg_constrained_dofs);
1547 * mg_transfer.build(dof_handler);
1549 * SolverControl coarse_solver_control(1000, 1e-12, false, false);
1550 * SolverCG<VectorType> coarse_solver(coarse_solver_control);
1551 * PreconditionIdentity identity;
1552 * MGCoarseGridIterativeSolver<VectorType,
1553 * SolverCG<VectorType>,
1555 * PreconditionIdentity>
1556 * coarse_grid_solver(coarse_solver, mg_matrix[0], identity);
1558 * using Smoother = LA::MPI::PreconditionJacobi;
1559 * MGSmootherPrecondition<MatrixType, Smoother, VectorType> smoother;
1561 * #ifdef USE_PETSC_LA
1562 * smoother.initialize(mg_matrix);
1564 * settings.smoother_dampen == 1.0,
1565 * ExcNotImplemented(
1568 * smoother.initialize(mg_matrix, settings.smoother_dampen);
1571 * smoother.set_steps(settings.smoother_steps);
1573 * mg::Matrix<VectorType> mg_m(mg_matrix);
1574 * mg::Matrix<VectorType> mg_in(mg_interface_in);
1575 * mg::Matrix<VectorType> mg_out(mg_interface_in);
1577 * Multigrid<VectorType> mg(
1578 * mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1579 * mg.set_edge_matrices(mg_out, mg_in);
1582 * PreconditionMG<dim, VectorType, MGTransferPrebuilt<VectorType>>
1583 * preconditioner(dof_handler, mg, mg_transfer);
1585 * computing_timer.leave_subsection("Solve:
Preconditioner setup
");
1589 * Timing for 1 V-cycle.
1593 * TimerOutput::Scope timing(computing_timer,
1594 * "Solve: 1 multigrid
V-cycle
");
1595 * preconditioner.vmult(solution, right_hand_side);
1601 * Solve the linear system and distribute constraints.
1605 * SolverCG<VectorType> solver(solver_control);
1607 * TimerOutput::Scope timing(computing_timer, "Solve: CG
");
1608 * solver.solve(system_matrix,
1614 * constraints.distribute(solution);
1621 * Solver for the AMG method, similar to @ref step_40 "step-40
".
1624 * case Settings::amg:
1628 * PreconditionAMG preconditioner;
1629 * PreconditionAMG::AdditionalData Amg_data;
1631 * #ifdef USE_PETSC_LA
1632 * Amg_data.symmetric_operator = true;
1634 * Amg_data.elliptic = true;
1635 * Amg_data.smoother_type = "Jacobi
";
1636 * Amg_data.higher_order_elements = true;
1637 * Amg_data.smoother_sweeps = settings.smoother_steps;
1638 * Amg_data.aggregation_threshold = 0.02;
1641 * Amg_data.output_details = false;
1643 * preconditioner.initialize(system_matrix, Amg_data);
1648 * Timing for 1 V-cycle.
1652 * TimerOutput::Scope timing(computing_timer,
1653 * "Solve: 1 multigrid
V-cycle
");
1654 * preconditioner.vmult(solution, right_hand_side);
1660 * Solve the linear system and distribute constraints.
1664 * SolverCG<VectorType> solver(solver_control);
1666 * TimerOutput::Scope timing(computing_timer, "Solve: CG
");
1667 * solver.solve(system_matrix,
1672 * constraints.distribute(solution);
1678 * DEAL_II_ASSERT_UNREACHABLE();
1681 * pcout << " Number
of CG
iterations:
" << solver_control.last_step()
1690 * <h3>The error estimator</h3>
1694 * We use the FEInterfaceValues class to assemble an error estimator to decide
1695 * which cells to refine. See the exact definition of the cell and face
1696 * integrals in the introduction. To use the method, we define Scratch and
1697 * Copy objects for the MeshWorker::mesh_loop() with much of the following
1698 * code being in essence as was set up in @ref step_12 "step-12
" already (or at least similar
1702 * template <int dim>
1703 * struct ScratchData
1705 * ScratchData(const Mapping<dim> &mapping,
1706 * const FiniteElement<dim> &fe,
1707 * const unsigned int quadrature_degree,
1708 * const UpdateFlags update_flags,
1709 * const UpdateFlags interface_update_flags)
1710 * : fe_values(mapping, fe, QGauss<dim>(quadrature_degree), update_flags)
1711 * , fe_interface_values(mapping,
1713 * QGauss<dim - 1>(quadrature_degree),
1714 * interface_update_flags)
1718 * ScratchData(const ScratchData<dim> &scratch_data)
1719 * : fe_values(scratch_data.fe_values.get_mapping(),
1720 * scratch_data.fe_values.get_fe(),
1721 * scratch_data.fe_values.get_quadrature(),
1722 * scratch_data.fe_values.get_update_flags())
1723 * , fe_interface_values(scratch_data.fe_values.get_mapping(),
1724 * scratch_data.fe_values.get_fe(),
1725 * scratch_data.fe_interface_values.get_quadrature(),
1726 * scratch_data.fe_interface_values.get_update_flags())
1729 * FEValues<dim> fe_values;
1730 * FEInterfaceValues<dim> fe_interface_values;
1738 * : cell_index(numbers::invalid_unsigned_int)
1744 * unsigned int cell_indices[2];
1748 * unsigned int cell_index;
1750 * std::vector<FaceData> face_data;
1754 * template <int dim, int degree>
1755 * void LaplaceProblem<dim, degree>::estimate()
1757 * TimerOutput::Scope timing(computing_timer, "Estimate");
1759 * VectorType temp_solution;
1760 * temp_solution.reinit(locally_owned_dofs,
1761 * locally_relevant_dofs,
1762 * mpi_communicator);
1763 * temp_solution = solution;
1765 * const Coefficient<dim> coefficient;
1767 * estimated_error_square_per_cell.reinit(triangulation.n_active_cells());
1769 * using Iterator = typename DoFHandler<dim>::active_cell_iterator;
1773 * Assembler for cell residual @f$h^2 \| f + \epsilon \triangle u \|_K^2@f$
1776 * auto cell_worker = [&](const Iterator &cell,
1777 * ScratchData<dim> &scratch_data,
1778 * CopyData ©_data) {
1779 * FEValues<dim> &fe_values = scratch_data.fe_values;
1780 * fe_values.reinit(cell);
1782 * RightHandSide<dim> rhs;
1783 * const double rhs_value = rhs.value(cell->center());
1785 * const double nu = coefficient.value(cell->center());
1787 * std::vector<Tensor<2, dim>> hessians(fe_values.n_quadrature_points);
1788 * fe_values.get_function_hessians(temp_solution, hessians);
1790 * copy_data.cell_index = cell->active_cell_index();
1792 * double residual_norm_square = 0.;
1793 * for (unsigned k = 0; k < fe_values.n_quadrature_points; ++k)
1795 * const double residual = (rhs_value + nu * trace(hessians[k]));
1796 * residual_norm_square += residual * residual * fe_values.JxW(k);
1800 * cell->diameter() * cell->diameter() * residual_norm_square;
1805 * Assembler for face term @f$\sum_F h_F \| \jump{\epsilon \nabla u \cdot n}
1809 * auto face_worker = [&](const Iterator &cell,
1810 * const unsigned int &f,
1811 * const unsigned int &sf,
1812 * const Iterator &ncell,
1813 * const unsigned int &nf,
1814 * const unsigned int &nsf,
1815 * ScratchData<dim> &scratch_data,
1816 * CopyData ©_data) {
1817 * FEInterfaceValues<dim> &fe_interface_values =
1818 * scratch_data.fe_interface_values;
1819 * fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf);
1821 * copy_data.face_data.emplace_back();
1822 * CopyData::FaceData ©_data_face = copy_data.face_data.back();
1824 * copy_data_face.cell_indices[0] = cell->active_cell_index();
1825 * copy_data_face.cell_indices[1] = ncell->active_cell_index();
1827 * const double coeff1 = coefficient.value(cell->center());
1828 * const double coeff2 = coefficient.value(ncell->center());
1830 * std::vector<Tensor<1, dim>> grad_u[2];
1832 * for (unsigned int i = 0; i < 2; ++i)
1834 * grad_u[i].resize(fe_interface_values.n_quadrature_points);
1835 * fe_interface_values.get_fe_face_values(i).get_function_gradients(
1836 * temp_solution, grad_u[i]);
1839 * double jump_norm_square = 0.;
1841 * for (unsigned int qpoint = 0;
1842 * qpoint < fe_interface_values.n_quadrature_points;
1845 * const double jump = coeff1 * grad_u[0][qpoint] *
1846 * fe_interface_values.normal_vector(qpoint) -
1847 * coeff2 * grad_u[1][qpoint] *
1848 * fe_interface_values.normal_vector(qpoint);
1850 * jump_norm_square += jump * jump * fe_interface_values.JxW(qpoint);
1853 * const double h = cell->face(f)->measure();
1854 * copy_data_face.values[0] = 0.5 * h * jump_norm_square;
1855 * copy_data_face.values[1] = copy_data_face.values[0];
1858 * auto copier = [&](const CopyData ©_data) {
1859 * if (copy_data.cell_index != numbers::invalid_unsigned_int)
1860 * estimated_error_square_per_cell[copy_data.cell_index] +=
1863 * for (const auto &cdf : copy_data.face_data)
1864 * for (unsigned int j = 0; j < 2; ++j)
1865 * estimated_error_square_per_cell[cdf.cell_indices[j]] += cdf.values[j];
1868 * const unsigned int n_gauss_points = degree + 1;
1869 * ScratchData<dim> scratch_data(mapping,
1872 * update_hessians | update_quadrature_points |
1873 * update_JxW_values,
1874 * update_values | update_gradients |
1875 * update_JxW_values | update_normal_vectors);
1876 * CopyData copy_data;
1880 * We need to assemble each interior face once but we need to make sure that
1881 * both processes assemble the face term between a locally owned and a ghost
1882 * cell. This is achieved by setting the
1883 * MeshWorker::assemble_ghost_faces_both flag. We need to do this, because
1884 * we do not communicate the error estimator contributions here.
1887 * MeshWorker::mesh_loop(dof_handler.begin_active(),
1888 * dof_handler.end(),
1893 * MeshWorker::assemble_own_cells |
1894 * MeshWorker::assemble_ghost_faces_both |
1895 * MeshWorker::assemble_own_interior_faces_once,
1896 * /*boundary_worker=*/nullptr,
1899 * const double global_error_estimate =
1900 * std::sqrt(Utilities::MPI::sum(estimated_error_square_per_cell.l1_norm(),
1901 * mpi_communicator));
1902 * pcout << " Global error estimate:
" << global_error_estimate
1911 * <h4>LaplaceProblem::refine_grid()</h4>
1915 * We use the cell-wise estimator stored in the vector @p estimate_vector and
1916 * refine a fixed number of cells (chosen here to roughly double the number of
1917 * DoFs in each step).
1920 * template <int dim, int degree>
1921 * void LaplaceProblem<dim, degree>::refine_grid()
1923 * TimerOutput::Scope timing(computing_timer, "Refine grid
");
1925 * const double refinement_fraction = 1. / (std::pow(2.0, dim) - 1.);
1926 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
1927 * triangulation, estimated_error_square_per_cell, refinement_fraction, 0.0);
1929 * triangulation.execute_coarsening_and_refinement();
1937 * <h4>LaplaceProblem::output_results()</h4>
1941 * The output_results() function is similar to the ones found in many of the
1942 * tutorials (see @ref step_40 "step-40
" for example).
1945 * template <int dim, int degree>
1946 * void LaplaceProblem<dim, degree>::output_results(const unsigned int cycle)
1948 * TimerOutput::Scope timing(computing_timer, "Output results
");
1950 * VectorType temp_solution;
1951 * temp_solution.reinit(locally_owned_dofs,
1952 * locally_relevant_dofs,
1953 * mpi_communicator);
1954 * temp_solution = solution;
1956 * DataOut<dim> data_out;
1957 * data_out.attach_dof_handler(dof_handler);
1958 * data_out.add_data_vector(temp_solution, "solution
");
1960 * Vector<float> subdomain(triangulation.n_active_cells());
1961 * for (unsigned int i = 0; i < subdomain.size(); ++i)
1962 * subdomain(i) = triangulation.locally_owned_subdomain();
1963 * data_out.add_data_vector(subdomain, "subdomain");
1965 * Vector<float> level(triangulation.n_active_cells());
1966 * for (const auto &cell : triangulation.active_cell_iterators())
1967 * level(cell->active_cell_index()) = cell->level();
1968 * data_out.add_data_vector(level, "level");
1970 * if (estimated_error_square_per_cell.size() > 0)
1971 * data_out.add_data_vector(estimated_error_square_per_cell,
1974 * data_out.build_patches();
1976 * const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record(
1977 * "", "solution
", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/);
1979 * pcout << " Wrote " << pvtu_filename << std::endl;
1987 * <h4>LaplaceProblem::run()</h4>
1991 * As in most tutorials, this function calls the various functions defined
1992 * above to set up, assemble, solve, and output the results.
1995 * template <int dim, int degree>
1996 * void LaplaceProblem<dim, degree>::run()
1998 * for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle)
2000 * pcout << "Cycle
" << cycle << ':' << std::endl;
2004 * pcout << " Number
of active cells:
"
2005 * << triangulation.n_global_active_cells();
2009 * We only output level cell data for the GMG methods (same with DoF
2010 * data below). Note that the partition efficiency is irrelevant for AMG
2011 * since the level hierarchy is not distributed or used during the
2015 * if (settings.solver == Settings::gmg_mf ||
2016 * settings.solver == Settings::gmg_mb)
2017 * pcout << " (
" << triangulation.n_global_levels() << " global levels)
"
2020 * << 1.0 / MGTools::workload_imbalance(triangulation);
2021 * pcout << std::endl;
2027 * Only set up the multilevel hierarchy for GMG.
2030 * if (settings.solver == Settings::gmg_mf ||
2031 * settings.solver == Settings::gmg_mb)
2032 * setup_multigrid();
2034 * pcout << " Number
of degrees
of freedom:
" << dof_handler.n_dofs();
2035 * if (settings.solver == Settings::gmg_mf ||
2036 * settings.solver == Settings::gmg_mb)
2039 * for (unsigned int level = 0;
2040 * level < triangulation.n_global_levels();
2042 * pcout << dof_handler.n_dofs(level)
2043 * << (level == triangulation.n_global_levels() - 1 ? ")
" :
2046 * pcout << std::endl;
2050 * For the matrix-free method, we only assemble the right-hand side.
2051 * For both matrix-based methods, we assemble both active matrix and
2052 * right-hand side, and only assemble the multigrid matrices for
2056 * if (settings.solver == Settings::gmg_mf)
2058 * else /*gmg_mb or amg*/
2060 * assemble_system();
2061 * if (settings.solver == Settings::gmg_mb)
2062 * assemble_multigrid();
2068 * if (settings.output)
2069 * output_results(cycle);
2071 * computing_timer.print_summary();
2072 * computing_timer.reset();
2075 * } // namespace Step50
2081 * <h3>The main() function</h3>
2085 * This is a similar main function to @ref step_40 "step-40
", with the exception that
2086 * we require the user to pass a .prm file as a sole command line
2087 * argument (see @ref step_29 "step-29
" and the documentation of the ParameterHandler
2088 * class for a complete discussion of parameter files).
2091 * int main(int argc, char *argv[])
2093 * using namespace dealii;
2094 * using namespace Step50;
2095 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2097 * Settings settings;
2098 * if (!settings.try_parse((argc > 1) ? (argv[1]) : ""))
2103 * constexpr unsigned int fe_degree = 2;
2105 * switch (settings.dimension)
2109 * LaplaceProblem<2, fe_degree> test(settings);
2117 * LaplaceProblem<3, fe_degree> test(settings);
2124 * DEAL_II_NOT_IMPLEMENTED();
2127 * catch (std::exception &exc)
2129 * std::cerr << std::endl
2131 * << "----------------------------------------------------
"
2134 * << exc.what() << std::endl
2136 * << "----------------------------------------------------
"
2138 * MPI_Abort(MPI_COMM_WORLD, 1);
2143 * std::cerr << std::endl
2145 * << "----------------------------------------------------
"
2147 * std::cerr << "Unknown exception!
" << std::endl
2149 * << "----------------------------------------------------
"
2151 * MPI_Abort(MPI_COMM_WORLD, 2);
2158<a name="step_50-
Results"></a><h1>Results</h1>
2161When you run the program using the following command
2163mpirun -np 16 ./step-50 gmg_mf_2d.prm
2165the screen output should look like the following:
2168 Number of active cells: 12 (2 global levels)
2169 Partition efficiency: 0.1875
2170 Number of degrees of freedom: 65 (by level: 21, 65)
2171 Number of CG iterations: 10
2172 Global error estimate: 0.355373
2173 Wrote solution_00.pvtu
2176+---------------------------------------------+------------+------------+
2177| Total wallclock time elapsed since start | 0.0163s | |
2179| Section | no. calls | wall time | % of total |
2180+---------------------------------+-----------+------------+------------+
2181| Assemble right-hand side | 1 | 0.000374s | 2.3% |
2182| Estimate | 1 | 0.000724s | 4.4% |
2183| Output results | 1 | 0.00277s | 17% |
2184| Setup | 1 | 0.00225s | 14% |
2185| Setup multigrid | 1 | 0.00181s | 11% |
2186| Solve | 1 | 0.00364s | 22% |
2187| Solve: 1 multigrid V-cycle | 1 | 0.000354s | 2.2% |
2188| Solve: CG | 1 | 0.00151s | 9.3% |
2189| Solve: Preconditioner setup | 1 | 0.00125s | 7.7% |
2190+---------------------------------+-----------+------------+------------+
2193 Number of active cells: 24 (3 global levels)
2194 Partition efficiency: 0.276786
2195 Number of degrees of freedom: 139 (by level: 21, 65, 99)
2196 Number of CG iterations: 10
2197 Global error estimate: 0.216726
2198 Wrote solution_01.pvtu
2201+---------------------------------------------+------------+------------+
2202| Total wallclock time elapsed since start | 0.0169s | |
2204| Section | no. calls | wall time | % of total |
2205+---------------------------------+-----------+------------+------------+
2206| Assemble right-hand side | 1 | 0.000309s | 1.8% |
2207| Estimate | 1 | 0.00156s | 9.2% |
2208| Output results | 1 | 0.00222s | 13% |
2209| Refine grid | 1 | 0.00278s | 16% |
2210| Setup | 1 | 0.00196s | 12% |
2211| Setup multigrid | 1 | 0.0023s | 14% |
2212| Solve | 1 | 0.00565s | 33% |
2213| Solve: 1 multigrid V-cycle | 1 | 0.000349s | 2.1% |
2214| Solve: CG | 1 | 0.00285s | 17% |
2215| Solve: Preconditioner setup | 1 | 0.00195s | 12% |
2216+---------------------------------+-----------+------------+------------+
2219 Number of active cells: 51 (4 global levels)
2220 Partition efficiency: 0.41875
2221 Number of degrees of freedom: 245 (by level: 21, 65, 225, 25)
2222 Number of CG iterations: 11
2223 Global error estimate: 0.112098
2224 Wrote solution_02.pvtu
2227+---------------------------------------------+------------+------------+
2228| Total wallclock time elapsed since start | 0.0183s | |
2230| Section | no. calls | wall time | % of total |
2231+---------------------------------+-----------+------------+------------+
2232| Assemble right-hand side | 1 | 0.000274s | 1.5% |
2233| Estimate | 1 | 0.00127s | 6.9% |
2234| Output results | 1 | 0.00227s | 12% |
2235| Refine grid | 1 | 0.0024s | 13% |
2236| Setup | 1 | 0.00191s | 10% |
2237| Setup multigrid | 1 | 0.00295s | 16% |
2238| Solve | 1 | 0.00702s | 38% |
2239| Solve: 1 multigrid V-cycle | 1 | 0.000398s | 2.2% |
2240| Solve: CG | 1 | 0.00376s | 21% |
2241| Solve: Preconditioner setup | 1 | 0.00238s | 13% |
2242+---------------------------------+-----------+------------+------------+
2247Here, the timing of the `solve()` function is split up in 3 parts: setting
2248up the multigrid preconditioner, execution of a single multigrid V-cycle, and
2249the CG solver. The V-cycle that is timed is unnecessary for the overall solve
2250and only meant to give an insight at the different costs for AMG and GMG.
2252is not included in the output since the hierarchy of coarse meshes is not
2255All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade
2256Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions,
2257allowing for vectorization over 8 doubles (vectorization used only in the matrix-free
2258computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos
225912.10.1 is used for the matrix-based GMG/AMG computations.
2261We can then gather a variety of information by calling the program
2262with the input files that are provided in the directory in which
2263@ref step_50 "step-50
" is located. Using these, and adjusting the number of mesh
2264refinement steps, we can produce information about how well the
2267The following table gives weak scaling timings for this program on up to 256M DoFs
2268and 7,168 processors. (Recall that weak scaling keeps the number of
2269degrees of freedom per processor constant while increasing the number of
2270processors; i.e., it considers larger and larger problems.)
2271Here, @f$\mathbb{E}@f$ is the partition efficiency from the
2272 introduction (also equal to 1.0/workload imbalance), "Setup" is a combination
2273of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks,
2274and "Prec" is the preconditioner setup. Ideally all times would stay constant
2275over each problem size for the individual solvers, but since the partition
2276efficiency decreases from 0.371 to 0.161 from largest to smallest problem size,
2277we expect to see an approximately @f$0.371/0.161=2.3@f$ times increase in timings
2278for GMG. This is, in fact, pretty close to what we really get:
2280<table align="center
" class="doxtable">
2282 <th colspan="4
"></th>
2284 <th colspan="4
">MF-GMG</th>
2286 <th colspan="4
">MB-GMG</th>
2288 <th colspan="4
">AMG</th>
2291 <th align="right
">Procs</th>
2292 <th align="right
">Cycle</th>
2293 <th align="right
">DoFs</th>
2294 <th align="right
">@f$\mathbb{E}@f$</th>
2296 <th align="right
">Setup</th>
2297 <th align="right
">Prec</th>
2298 <th align="right
">Solve</th>
2299 <th align="right
">Total</th>
2301 <th align="right
">Setup</th>
2302 <th align="right
">Prec</th>
2303 <th align="right
">Solve</th>
2304 <th align="right
">Total</th>
2306 <th align="right
">Setup</th>
2307 <th align="right
">Prec</th>
2308 <th align="right
">Solve</th>
2309 <th align="right
">Total</th>
2312 <td align="right
">112</th>
2313 <td align="right
">13</th>
2314 <td align="right
">4M</th>
2315 <td align="right
">0.37</th>
2317 <td align="right
">0.742</th>
2318 <td align="right
">0.393</th>
2319 <td align="right
">0.200</th>
2320 <td align="right
">1.335</th>
2322 <td align="right
">1.714</th>
2323 <td align="right
">2.934</th>
2324 <td align="right
">0.716</th>
2325 <td align="right
">5.364</th>
2327 <td align="right
">1.544</th>
2328 <td align="right
">0.456</th>
2329 <td align="right
">1.150</th>
2330 <td align="right
">3.150</th>
2333 <td align="right
">448</th>
2334 <td align="right
">15</th>
2335 <td align="right
">16M</th>
2336 <td align="right
">0.29</th>
2338 <td align="right
">0.884</th>
2339 <td align="right
">0.535</th>
2340 <td align="right
">0.253</th>
2341 <td align="right
">1.672</th>
2343 <td align="right
">1.927</th>
2344 <td align="right
">3.776</th>
2345 <td align="right
">1.190</th>
2346 <td align="right
">6.893</th>
2348 <td align="right
">1.544</th>
2349 <td align="right
">0.456</th>
2350 <td align="right
">1.150</th>
2351 <td align="right
">3.150</th>
2354 <td align="right
">1,792</th>
2355 <td align="right
">17</th>
2356 <td align="right
">65M</th>
2357 <td align="right
">0.22</th>
2359 <td align="right
">1.122</th>
2360 <td align="right
">0.686</th>
2361 <td align="right
">0.309</th>
2362 <td align="right
">2.117</th>
2364 <td align="right
">2.171</th>
2365 <td align="right
">4.862</th>
2366 <td align="right
">1.660</th>
2367 <td align="right
">8.693</th>
2369 <td align="right
">1.654</th>
2370 <td align="right
">0.546</th>
2371 <td align="right
">1.460</th>
2372 <td align="right
">3.660</th>
2375 <td align="right
">7,168</th>
2376 <td align="right
">19</th>
2377 <td align="right
">256M</th>
2378 <td align="right
">0.16</th>
2380 <td align="right
">1.214</th>
2381 <td align="right
">0.893</th>
2382 <td align="right
">0.521</th>
2383 <td align="right
">2.628</th>
2385 <td align="right
">2.386</th>
2386 <td align="right
">7.260</th>
2387 <td align="right
">2.560</th>
2388 <td align="right
">12.206</th>
2390 <td align="right
">1.844</th>
2391 <td align="right
">1.010</th>
2392 <td align="right
">1.890</th>
2393 <td align="right
">4.744</th>
2397On the other hand, the algebraic multigrid in the last set of columns
2398is relatively unaffected by the increasing imbalance of the mesh
2399hierarchy (because it doesn't use the mesh hierarchy) and the growth
2400in time is rather driven by other factors that are well documented in
2401the literature (most notably that the algorithmic complexity of
2402some parts of algebraic multigrid methods appears to be @f${\cal O}(N
2403\log N)@f$ instead of @f${\cal O}(N)@f$ for geometric multigrid).
2405The upshort of the table above is that the matrix-free geometric multigrid
2406method appears to be the fastest approach to solving this equation if
2407not by a huge margin. Matrix-based methods, on the other hand, are
2408consistently the worst.
2410The following figure provides strong scaling results for each method, i.e.,
2411we solve the same problem on more and more processors. Specifically,
2412we consider the problems after 16 mesh refinement cycles
2413(32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:
2427<a name=
"step_50-Possibilitiesforextensions"></a><
h3> Possibilities
for extensions </
h3>
2438<a name=
"step_50-Coarsesolver"></a><
h4>
Coarse solver </
h4>
2464<a name=
"step_50-PlainProg"></a>
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
#define AssertThrow(cond, exc)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
@ matrix
Contents is actually a matrix.
constexpr types::blas_int one
PETScWrappers::PreconditionBoomerAMG PreconditionAMG
Tpetra::Vector< Number, LO, GO, NodeType< MemorySpace > > VectorType
Tpetra::CrsMatrix< Number, LO, GO, NodeType< MemorySpace > > MatrixType
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
@ construct_multigrid_hierarchy
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string compress(const std::string &input)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation