525 *
"Diffusion parameter");
527 * prm.declare_entry(
"Fe degree",
530 *
"Finite Element degree");
531 * prm.declare_entry(
"Smoother type",
534 *
"Select smoother: SOR|Jacobi|block SOR|block Jacobi");
535 * prm.declare_entry(
"Smoothing steps",
538 *
"Number of smoothing steps");
543 *
"Select DoF renumbering: none|downstream|upstream|random");
544 * prm.declare_entry(
"With streamline diffusion",
547 *
"Enable streamline diffusion stabilization: true|false");
548 * prm.declare_entry(
"Output",
551 *
"Generate graphical output: true|false");
554 *
if (prm_filename.empty())
558 *
false, ExcMessage(
"Please pass a .prm file as the first argument!"));
561 * prm.parse_input(prm_filename);
563 *
epsilon = prm.get_double(
"Epsilon");
564 * fe_degree = prm.get_integer(
"Fe degree");
565 * smoother_type = prm.get(
"Smoother type");
566 * smoothing_steps = prm.get_integer(
"Smoothing steps");
568 *
const std::string renumbering = prm.get(
"DoF renumbering");
569 *
if (renumbering ==
"none")
570 * dof_renumbering = DoFRenumberingStrategy::none;
571 *
else if (renumbering ==
"downstream")
572 * dof_renumbering = DoFRenumberingStrategy::downstream;
573 *
else if (renumbering ==
"upstream")
574 * dof_renumbering = DoFRenumberingStrategy::upstream;
575 *
else if (renumbering ==
"random")
576 * dof_renumbering = DoFRenumberingStrategy::random;
579 * ExcMessage(
"The <DoF renumbering> parameter has "
580 *
"an invalid value."));
582 * with_streamline_diffusion = prm.get_bool(
"With streamline diffusion");
583 * output = prm.get_bool(
"Output");
590 * <a name=
"Cellpermutations"></a>
591 * <h3>Cell permutations</h3>
595 * The ordering in which cells and degrees of freedom are traversed
596 * will play a role in the speed of convergence
for multiplicative
597 * methods. Here we define
functions which
return a specific ordering
598 * of cells to be used by the block smoothers.
602 * For each type of cell ordering, we define a function
for the
603 * active mesh and one
for a
level mesh (i.e.,
for the cells at one
604 *
level of a multigrid hierarchy). While the only reordering
605 * necessary
for solving the system will be on the
level meshes, we
606 * include the active reordering
for visualization purposes in
612 * array with all of the relevant cells that we then sort in
613 *
downstream direction
using a
"comparator" object. The output of
614 * the
functions is then simply an array of the indices of the cells
615 * in the just computed order.
619 * std::vector<unsigned int>
622 *
const unsigned int level)
624 * std::vector<typename DoFHandler<dim>::level_cell_iterator> ordered_cells;
625 * ordered_cells.reserve(dof_handler.get_triangulation().n_cells(
level));
626 *
for (
const auto &cell : dof_handler.cell_iterators_on_level(
level))
627 * ordered_cells.push_back(cell);
630 * CompareDownstream<typename DoFHandler<dim>::level_cell_iterator, dim>
631 * comparator(direction);
632 * std::sort(ordered_cells.begin(), ordered_cells.end(), comparator);
634 * std::vector<unsigned> ordered_indices;
635 * ordered_indices.reserve(dof_handler.get_triangulation().n_cells(
level));
637 *
for (
const auto &cell : ordered_cells)
638 * ordered_indices.push_back(cell->
index());
640 *
return ordered_indices;
646 * std::vector<unsigned int>
650 * std::vector<typename DoFHandler<dim>::active_cell_iterator> ordered_cells;
651 * ordered_cells.reserve(dof_handler.get_triangulation().n_active_cells());
652 *
for (
const auto &cell : dof_handler.active_cell_iterators())
653 * ordered_cells.push_back(cell);
656 * CompareDownstream<typename DoFHandler<dim>::active_cell_iterator, dim>
657 * comparator(direction);
658 * std::sort(ordered_cells.begin(), ordered_cells.end(), comparator);
660 * std::vector<unsigned int> ordered_indices;
661 * ordered_indices.reserve(dof_handler.get_triangulation().n_active_cells());
663 *
for (
const auto &cell : ordered_cells)
664 * ordered_indices.push_back(cell->
index());
666 *
return ordered_indices;
673 * spirit in that they
first put information about all cells into an
674 * array. But then, instead of sorting them, they shuffle the
675 * elements randomly
using the facilities
C++ offers to generate
676 *
random numbers. The way
this is done is by iterating over all
677 * elements of the array, drawing a
random number
for another
678 * element before that, and then exchanging these elements. The
679 * result is a
random shuffle of the elements of the array.
683 * std::vector<unsigned int>
685 *
const unsigned int level)
687 * std::vector<unsigned int> ordered_cells;
688 * ordered_cells.reserve(dof_handler.get_triangulation().n_cells(
level));
689 *
for (
const auto &cell : dof_handler.cell_iterators_on_level(
level))
690 * ordered_cells.push_back(cell->
index());
692 * std::mt19937 random_number_generator;
693 * std::shuffle(ordered_cells.begin(),
694 * ordered_cells.end(),
695 * random_number_generator);
697 *
return ordered_cells;
703 * std::vector<unsigned int>
706 * std::vector<unsigned int> ordered_cells;
707 * ordered_cells.reserve(dof_handler.get_triangulation().n_active_cells());
708 *
for (
const auto &cell : dof_handler.active_cell_iterators())
709 * ordered_cells.push_back(cell->
index());
711 * std::mt19937 random_number_generator;
712 * std::shuffle(ordered_cells.begin(),
713 * ordered_cells.end(),
714 * random_number_generator);
716 *
return ordered_cells;
723 * <a name=
"Righthandsideandboundaryvalues"></a>
724 * <h3>Right-hand side and boundary
values</h3>
728 * The problem solved in
this tutorial is an adaptation of Ex. 3.1.3 found
730 * href=
"https://global.oup.com/academic/product/finite-elements-and-fast-iterative-solvers-9780199678808">
731 * Finite Elements and Fast Iterative Solvers: with Applications in
732 * Incompressible Fluid Dynamics by Elman, Silvester, and Wathen</a>. The
733 * main difference being that we add a hole in the
center of our domain with
734 * zero Dirichlet boundary conditions.
738 * For a complete description, we need classes that implement the
739 * zero right-hand side
first (we could of course have just used
744 * class RightHandSide : public
Function<dim>
748 * const unsigned
int component = 0) const override;
750 *
virtual void value_list(
const std::vector<
Point<dim>> &points,
751 * std::vector<double> & values,
752 *
const unsigned int component = 0)
const override;
758 *
double RightHandSide<dim>::value(
const Point<dim> &,
759 *
const unsigned int component)
const
761 *
Assert(component == 0, ExcIndexRange(component, 0, 1));
770 *
void RightHandSide<dim>::value_list(
const std::vector<
Point<dim>> &points,
771 * std::vector<double> & values,
772 *
const unsigned int component)
const
776 *
for (
unsigned int i = 0; i < points.size(); ++i)
777 * values[i] = RightHandSide<dim>::value(points[i], component);
783 * We also have Dirichlet boundary conditions. On a connected portion of the
785 * everywhere
else (including the inner, circular boundary):
789 *
class BoundaryValues :
public Function<dim>
793 *
const unsigned int component = 0)
const override;
796 * std::vector<double> & values,
797 *
const unsigned int component = 0)
const override;
803 *
double BoundaryValues<dim>::value(
const Point<dim> & p,
804 *
const unsigned int component)
const
806 *
Assert(component == 0, ExcIndexRange(component, 0, 1));
811 * Set boundary to 1
if @f$x=1@f$, or
if @f$x>0.5@f$ and @f$y=-1@f$.
814 *
if (std::fabs(p[0] - 1) < 1e-8 ||
815 * (std::fabs(p[1] + 1) < 1e-8 && p[0] >= 0.5))
828 *
void BoundaryValues<dim>::value_list(
const std::vector<
Point<dim>> &points,
829 * std::vector<double> & values,
830 *
const unsigned int component)
const
834 *
for (
unsigned int i = 0; i < points.size(); ++i)
835 * values[i] = BoundaryValues<dim>::value(points[i], component);
843 * <a name=
"Streamlinediffusionimplementation"></a>
844 * <h3>Streamline diffusion implementation</h3>
848 * The streamline diffusion method has a stabilization
constant that
849 * we need to be able to compute. The choice of how
this parameter
850 * is computed is taken from <a
851 * href=
"https://link.springer.com/chapter/10.1007/978-3-540-34288-5_27">On
852 * Discontinuity-Capturing Methods
for Convection-Diffusion
853 * Equations by Volker John and Petr Knobloch</a>.
857 *
double compute_stabilization_delta(
const double hk,
862 *
const double Peclet = dir.norm() * hk / (2.0 *
eps * pk);
863 *
const double coth =
866 *
return hk / (2.0 * dir.norm() * pk) * (coth - 1.0 / Peclet);
873 * <a name=
"codeAdvectionProlemcodeclass"></a>
874 * <h3><code>AdvectionProlem</code>
class</h3>
878 * This is the main
class of the program, and should look very similar to
879 * @ref step_16
"step-16". The major difference is that, since we are defining our multigrid
880 * smoother at runtime, we choose to define a function `create_smoother()` and
881 * a
class object `mg_smoother` which is a `
std::unique_ptr` to a smoother
882 * that is derived from
MGSmoother. Note that for smoothers derived from
884 * This will contain information about the cell ordering and the method of
885 * inverting cell matrices.
892 * class AdvectionProblem
895 * AdvectionProblem(
const Settings &
settings);
899 *
void setup_system();
901 *
template <
class IteratorType>
902 *
void assemble_cell(
const IteratorType &cell,
903 * ScratchData<dim> & scratch_data,
904 * CopyData & copy_data);
905 *
void assemble_system_and_multigrid();
907 *
void setup_smoother();
910 *
void refine_grid();
911 *
void output_results(
const unsigned int cycle)
const;
938 * std::unique_ptr<MGSmoother<Vector<double>>> mg_smoother;
940 *
using SmootherType =
942 *
using SmootherAdditionalDataType = SmootherType::AdditionalData;
955 * AdvectionProblem<dim>::AdvectionProblem(
const Settings &
settings)
973 * <a name=
"codeAdvectionProblemsetup_systemcode"></a>
974 * <h4><code>AdvectionProblem::setup_system()</code></h4>
983 * We could renumber the active DoFs with the
DoFRenumbering class,
984 * but the smoothers only act on multigrid levels and as such,
this
985 * would not matter
for the computations. Instead, we will renumber the
986 * DoFs on each multigrid
level below.
990 *
void AdvectionProblem<dim>::setup_system()
994 * dof_handler.distribute_dofs(fe);
996 * solution.reinit(dof_handler.n_dofs());
997 * system_rhs.reinit(dof_handler.n_dofs());
999 * constraints.clear();
1003 * mapping, dof_handler, 0, BoundaryValues<dim>(), constraints);
1005 * mapping, dof_handler, 1, BoundaryValues<dim>(), constraints);
1006 * constraints.close();
1014 * sparsity_pattern.copy_from(dsp);
1015 * system_matrix.reinit(sparsity_pattern);
1017 * dof_handler.distribute_mg_dofs();
1021 * Having enumerated the global degrees of freedom as well as (in
1022 * the last line above) the
level degrees of freedom, let us
1023 * renumber the
level degrees of freedom to get a better smoother
1024 * as explained in the introduction. The
first block below
1026 * direction
if needed. This is only necessary
for point smoothers
1027 * (SOR and Jacobi) as the block smoothers operate on cells (see
1028 * `create_smoother()`). The blocks below then also implement
1032 * if (
settings.smoother_type ==
"SOR" ||
settings.smoother_type ==
"Jacobi")
1035 * Settings::DoFRenumberingStrategy::downstream ||
1037 * Settings::DoFRenumberingStrategy::upstream)
1041 * Settings::DoFRenumberingStrategy::upstream ?
1044 * advection_direction;
1052 *
else if (
settings.dof_renumbering ==
1053 * Settings::DoFRenumberingStrategy::random)
1059 *
Assert(
false, ExcNotImplemented());
1064 * The rest of the function just sets up data structures. The last
1065 * lines of the code below is unlike the other GMG tutorials, as
1066 * it sets up both the
interface in and out matrices. We need this
1070 * mg_constrained_dofs.clear();
1071 * mg_constrained_dofs.initialize(dof_handler);
1073 * mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, {0, 1});
1075 * mg_matrices.resize(0, n_levels - 1);
1076 * mg_matrices.clear_elements();
1077 * mg_interface_in.resize(0, n_levels - 1);
1078 * mg_interface_in.clear_elements();
1079 * mg_interface_out.resize(0, n_levels - 1);
1080 * mg_interface_out.clear_elements();
1081 * mg_sparsity_patterns.resize(0, n_levels - 1);
1082 * mg_interface_sparsity_patterns.resize(0, n_levels - 1);
1088 * dof_handler.n_dofs(
level));
1090 * mg_sparsity_patterns[
level].copy_from(dsp);
1091 * mg_matrices[
level].reinit(mg_sparsity_patterns[
level]);
1095 * dof_handler.n_dofs(
level));
1097 * mg_constrained_dofs,
1100 * mg_interface_sparsity_patterns[
level].copy_from(dsp);
1102 * mg_interface_in[
level].reinit(mg_interface_sparsity_patterns[
level]);
1103 * mg_interface_out[
level].reinit(mg_interface_sparsity_patterns[
level]);
1112 * <a name=
"codeAdvectionProblemassemble_cellcode"></a>
1113 * <h4><code>AdvectionProblem::assemble_cell()</code></h4>
1117 * Here we define the assembly of the linear system on each cell to
1118 * be used by the
mesh_loop() function below. This one function
1119 * assembles the cell matrix for either an active or a
level cell
1120 * (whatever it is passed as its
first argument), and only assembles
1121 * a right-hand side if called with an active cell.
1127 * template <
int dim>
1128 * template <class IteratorType>
1129 *
void AdvectionProblem<dim>::assemble_cell(const IteratorType &cell,
1130 * ScratchData<dim> & scratch_data,
1131 * CopyData & copy_data)
1133 * copy_data.level = cell->level();
1135 *
const unsigned int dofs_per_cell =
1136 * scratch_data.fe_values.get_fe().n_dofs_per_cell();
1137 * copy_data.dofs_per_cell = dofs_per_cell;
1138 * copy_data.cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1140 *
const unsigned int n_q_points =
1141 * scratch_data.fe_values.get_quadrature().size();
1143 *
if (cell->is_level_cell() ==
false)
1144 * copy_data.cell_rhs.reinit(dofs_per_cell);
1146 * copy_data.local_dof_indices.resize(dofs_per_cell);
1147 * cell->get_active_or_mg_dof_indices(copy_data.local_dof_indices);
1149 * scratch_data.fe_values.reinit(cell);
1151 * RightHandSide<dim> right_hand_side;
1152 * std::vector<double> rhs_values(n_q_points);
1154 * right_hand_side.value_list(scratch_data.fe_values.get_quadrature_points(),
1159 * If we are
using streamline diffusion we must add its contribution
1160 * to both the cell
matrix and the cell right-hand side. If we are not
1161 *
using streamline diffusion, setting @f$\delta=0@f$ negates
this contribution
1162 * below and we are left with the standard, Galerkin finite element
1166 *
const double delta = (
settings.with_streamline_diffusion ?
1167 * compute_stabilization_delta(cell->diameter(),
1169 * advection_direction,
1173 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1174 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1176 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1180 * The assembly of the local
matrix has two parts. First
1181 * the Galerkin contribution:
1184 * copy_data.cell_matrix(i, j) +=
1186 * scratch_data.fe_values.shape_grad(i, q_point) *
1187 * scratch_data.fe_values.shape_grad(j, q_point) *
1188 * scratch_data.fe_values.JxW(q_point)) +
1189 * (scratch_data.fe_values.shape_value(i, q_point) *
1190 * (advection_direction *
1191 * scratch_data.fe_values.shape_grad(j, q_point)) *
1192 * scratch_data.fe_values.JxW(q_point))
1195 * and then the streamline diffusion contribution:
1199 * (advection_direction *
1200 * scratch_data.fe_values.shape_grad(j, q_point)) *
1201 * (advection_direction *
1202 * scratch_data.fe_values.shape_grad(i, q_point)) *
1203 * scratch_data.fe_values.JxW(q_point) -
1205 *
trace(scratch_data.fe_values.shape_hessian(j, q_point)) *
1206 * (advection_direction *
1207 * scratch_data.fe_values.shape_grad(i, q_point)) *
1208 * scratch_data.fe_values.JxW(q_point);
1210 *
if (cell->is_level_cell() ==
false)
1214 * The same applies to the right hand side. First the
1215 * Galerkin contribution:
1218 * copy_data.cell_rhs(i) +=
1219 * scratch_data.fe_values.shape_value(i, q_point) *
1220 * rhs_values[q_point] * scratch_data.fe_values.JxW(q_point)
1223 * and then the streamline diffusion contribution:
1226 * + delta * rhs_values[q_point] * advection_direction *
1227 * scratch_data.fe_values.shape_grad(i, q_point) *
1228 * scratch_data.fe_values.JxW(q_point);
1237 * <a name=
"codeAdvectionProblemassemble_system_and_multigridcode"></a>
1238 * <h4><code>AdvectionProblem::assemble_system_and_multigrid()</code></h4>
1243 * system_matrix, system_rhs, and all mg_matrices for us.
1249 * template <
int dim>
1250 *
void AdvectionProblem<dim>::assemble_system_and_multigrid()
1252 *
const auto cell_worker_active =
1253 * [&](
const decltype(dof_handler.begin_active()) &cell,
1254 * ScratchData<dim> & scratch_data,
1255 * CopyData & copy_data) {
1256 * this->assemble_cell(cell, scratch_data, copy_data);
1259 *
const auto copier_active = [&](
const CopyData ©_data) {
1260 * constraints.distribute_local_to_global(copy_data.cell_matrix,
1261 * copy_data.cell_rhs,
1262 * copy_data.local_dof_indices,
1269 * dof_handler.end(),
1270 * cell_worker_active,
1272 * ScratchData<dim>(fe, fe.degree + 1),
1278 * Unlike the constraints
for the active
level, we choose to create
1279 * constraint objects
for each multigrid
level local to
this function
1280 * since they are never needed elsewhere in the program.
1283 * std::vector<AffineConstraints<double>> boundary_constraints(
1288 *
const IndexSet locally_owned_level_dof_indices =
1290 * boundary_constraints[
level].reinit(locally_owned_level_dof_indices);
1291 * boundary_constraints[
level].add_lines(
1292 * mg_constrained_dofs.get_refinement_edge_indices(
level));
1293 * boundary_constraints[
level].add_lines(
1294 * mg_constrained_dofs.get_boundary_indices(
level));
1295 * boundary_constraints[
level].close();
1298 *
const auto cell_worker_mg =
1299 * [&](
const decltype(dof_handler.begin_mg()) &cell,
1300 * ScratchData<dim> & scratch_data,
1301 * CopyData & copy_data) {
1302 * this->assemble_cell(cell, scratch_data, copy_data);
1305 *
const auto copier_mg = [&](
const CopyData ©_data) {
1306 * boundary_constraints[copy_data.level].distribute_local_to_global(
1307 * copy_data.cell_matrix,
1308 * copy_data.local_dof_indices,
1309 * mg_matrices[copy_data.level]);
1313 * If @f$(i,j)@f$ is an `interface_out` dof pair, then @f$(j,i)@f$ is an
1314 * `interface_in` dof pair. Note: For `interface_in`, we load
1315 * the
transpose of the
interface entries, i.
e., the entry for
1316 * dof pair @f$(j,i)@f$ is stored in `interface_in(i,j)`. This is an
1317 * optimization
for the
symmetric case which allows only one
1318 *
matrix to be used when setting the edge_matrices in
1319 * solve(). Here, however, since our problem is non-
symmetric,
1320 * we must store both `interface_in` and `interface_out`
1324 *
for (
unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
1325 *
for (
unsigned int j = 0; j < copy_data.dofs_per_cell; ++j)
1326 *
if (mg_constrained_dofs.is_interface_matrix_entry(
1328 * copy_data.local_dof_indices[i],
1329 * copy_data.local_dof_indices[j]))
1331 * mg_interface_out[copy_data.level].add(
1332 * copy_data.local_dof_indices[i],
1333 * copy_data.local_dof_indices[j],
1334 * copy_data.cell_matrix(i, j));
1335 * mg_interface_in[copy_data.level].add(
1336 * copy_data.local_dof_indices[i],
1337 * copy_data.local_dof_indices[j],
1338 * copy_data.cell_matrix(j, i));
1343 * dof_handler.end_mg(),
1346 * ScratchData<dim>(fe, fe.degree + 1),
1355 * <a name=
"codeAdvectionProblemsetup_smoothercode"></a>
1356 * <h4><code>AdvectionProblem::setup_smoother()</code></h4>
1360 * Next, we
set up the smoother based on the
settings in the `.prm` file. The
1361 * two options that are of significance is the number of pre- and
1362 * post-smoothing steps on each
level of the multigrid v-cycle and the
1363 * relaxation parameter.
1367 * Since multiplicative methods tend to be more powerful than additive method,
1368 * fewer smoothing steps are required to see convergence independent of mesh
1369 * size. The same holds
for block smoothers over
point smoothers. This is
1370 * reflected in the choice
for the number of smoothing steps
for each type of
1375 * The relaxation parameter
for point smoothers is chosen based on trial and
1376 * error, and reflects
values necessary to keep the iteration counts in
1377 * the GMRES solve
constant (or as close as possible) as we
refine the mesh.
1378 * The two
values given
for both
"Jacobi" and
"SOR" in the `.prm` files are
1379 *
for degree 1 and degree 3 finite elements. If the user wants to change to
1380 * another degree, they may need to adjust these
numbers. For block smoothers,
1381 *
this parameter has a more straightforward interpretation, namely that
for
1382 * additive methods in 2
d, a DoF can have a repeated contribution from up to 4
1383 * cells, therefore we must relax these methods by 0.25 to compensate. This is
1384 * not an issue
for multiplicative methods as each cell
's inverse application
1385 * carries new information to all its DoFs.
1389 * Finally, as mentioned above, the point smoothers only operate on DoFs, and
1390 * the block smoothers on cells, so only the block smoothers need to be given
1391 * information regarding cell orderings. DoF ordering for point smoothers has
1392 * already been taken care of in `setup_system()`.
1398 * template <int dim>
1399 * void AdvectionProblem<dim>::setup_smoother()
1401 * if (settings.smoother_type == "SOR")
1403 * using Smoother = PreconditionSOR<SparseMatrix<double>>;
1406 * std::make_unique<MGSmootherPrecondition<SparseMatrix<double>,
1408 * Vector<double>>>();
1409 * smoother->initialize(mg_matrices,
1410 * Smoother::AdditionalData(fe.degree == 1 ? 1.0 :
1412 * smoother->set_steps(settings.smoothing_steps);
1413 * mg_smoother = std::move(smoother);
1415 * else if (settings.smoother_type == "Jacobi")
1417 * using Smoother = PreconditionJacobi<SparseMatrix<double>>;
1419 * std::make_unique<MGSmootherPrecondition<SparseMatrix<double>,
1421 * Vector<double>>>();
1422 * smoother->initialize(mg_matrices,
1423 * Smoother::AdditionalData(fe.degree == 1 ? 0.6667 :
1425 * smoother->set_steps(settings.smoothing_steps);
1426 * mg_smoother = std::move(smoother);
1428 * else if (settings.smoother_type == "block SOR" ||
1429 * settings.smoother_type == "block Jacobi")
1431 * smoother_data.resize(0, triangulation.n_levels() - 1);
1433 * for (unsigned int level = 0; level < triangulation.n_levels(); ++level)
1435 * DoFTools::make_cell_patches(smoother_data[level].block_list,
1439 * smoother_data[level].relaxation =
1440 * (settings.smoother_type == "block SOR" ? 1.0 : 0.25);
1441 * smoother_data[level].inversion = PreconditionBlockBase<double>::svd;
1443 * std::vector<unsigned int> ordered_indices;
1444 * switch (settings.dof_renumbering)
1446 * case Settings::DoFRenumberingStrategy::downstream:
1448 * create_downstream_cell_ordering(dof_handler,
1449 * advection_direction,
1453 * case Settings::DoFRenumberingStrategy::upstream:
1455 * create_downstream_cell_ordering(dof_handler,
1456 * -1.0 * advection_direction,
1460 * case Settings::DoFRenumberingStrategy::random:
1462 * create_random_cell_ordering(dof_handler, level);
1465 * case Settings::DoFRenumberingStrategy::none:
1469 * AssertThrow(false, ExcNotImplemented());
1473 * smoother_data[level].order =
1474 * std::vector<std::vector<unsigned int>>(1, ordered_indices);
1477 * if (settings.smoother_type == "block SOR")
1479 * auto smoother = std::make_unique<MGSmootherPrecondition<
1480 * SparseMatrix<double>,
1481 * RelaxationBlockSOR<SparseMatrix<double>, double, Vector<double>>,
1482 * Vector<double>>>();
1483 * smoother->initialize(mg_matrices, smoother_data);
1484 * smoother->set_steps(settings.smoothing_steps);
1485 * mg_smoother = std::move(smoother);
1487 * else if (settings.smoother_type == "block Jacobi")
1489 * auto smoother = std::make_unique<
1490 * MGSmootherPrecondition<SparseMatrix<double>,
1491 * RelaxationBlockJacobi<SparseMatrix<double>,
1494 * Vector<double>>>();
1495 * smoother->initialize(mg_matrices, smoother_data);
1496 * smoother->set_steps(settings.smoothing_steps);
1497 * mg_smoother = std::move(smoother);
1501 * AssertThrow(false, ExcNotImplemented());
1508 * <a name="codeAdvectionProblemsolvecode"></a>
1509 * <h4><code>AdvectionProblem::solve()</code></h4>
1513 * Before we can solve the system, we must first set up the multigrid
1514 * preconditioner. This requires the setup of the transfer between levels,
1515 * the coarse matrix solver, and the smoother. This setup follows almost
1516 * identically to @ref step_16 "step-16", the main difference being the various smoothers
1517 * defined above and the fact that we need different interface edge matrices
1518 * for in and out since our problem is non-symmetric. (In reality, for this
1519 * tutorial these interface matrices are empty since we are only using global
1520 * refinement, and thus have no refinement edges. However, we have still
1521 * included both here since if one made the simple switch to an adaptively
1522 * refined method, the program would still run correctly.)
1526 * The last thing to note is that since our problem is non-symmetric, we must
1527 * use an appropriate Krylov subspace method. We choose here to
1528 * use GMRES since it offers the guarantee of residual reduction in each
1529 * iteration. The major disadvantage of GMRES is that, for each iteration,
1530 * the number of stored temporary vectors increases by one, and one also needs
1531 * to compute a scalar product with all previously stored vectors. This is
1532 * rather expensive. This requirement is relaxed by using the restarted GMRES
1533 * method which puts a cap on the number of vectors we are required to store
1534 * at any one time (here we restart after 50 temporary vectors, or 48
1535 * iterations). This then has the disadvantage that we lose information we
1536 * have gathered throughout the iteration and therefore we could see slower
1537 * convergence. As a consequence, where to restart is a question of balancing
1538 * memory consumption, CPU effort, and convergence speed.
1539 * However, the goal of this tutorial is to have very low
1540 * iteration counts by using a powerful GMG preconditioner, so we have picked
1541 * the restart length such that all of the results shown below converge prior
1542 * to restart happening, and thus we have a standard GMRES method. If the user
1543 * is interested, another suitable method offered in deal.II would be
1550 * template <int dim>
1551 * void AdvectionProblem<dim>::solve()
1553 * const unsigned int max_iters = 200;
1554 * const double solve_tolerance = 1e-8 * system_rhs.l2_norm();
1555 * SolverControl solver_control(max_iters, solve_tolerance, true, true);
1556 * solver_control.enable_history_data();
1558 * using Transfer = MGTransferPrebuilt<Vector<double>>;
1559 * Transfer mg_transfer(mg_constrained_dofs);
1560 * mg_transfer.build(dof_handler);
1562 * FullMatrix<double> coarse_matrix;
1563 * coarse_matrix.copy_from(mg_matrices[0]);
1564 * MGCoarseGridHouseholder<double, Vector<double>> coarse_grid_solver;
1565 * coarse_grid_solver.initialize(coarse_matrix);
1569 * mg_matrix.initialize(mg_matrices);
1570 * mg_interface_matrix_in.initialize(mg_interface_in);
1571 * mg_interface_matrix_out.initialize(mg_interface_out);
1573 * Multigrid<Vector<double>> mg(
1574 * mg_matrix, coarse_grid_solver, mg_transfer, *mg_smoother, *mg_smoother);
1575 * mg.set_edge_matrices(mg_interface_matrix_out, mg_interface_matrix_in);
1577 * PreconditionMG<dim, Vector<double>, Transfer> preconditioner(dof_handler,
1581 * std::cout << " Solving with GMRES to tol " << solve_tolerance << "..."
1583 * SolverGMRES<Vector<double>> solver(
1584 * solver_control, SolverGMRES<Vector<double>>::AdditionalData(50, true));
1588 * solver.solve(system_matrix, solution, system_rhs, preconditioner);
1591 * std::cout << " converged in " << solver_control.last_step()
1593 * << " in " << time.last_wall_time() << " seconds " << std::endl;
1595 * constraints.distribute(solution);
1597 * mg_smoother.release();
1604 * <a name="codeAdvectionProblemoutput_resultscode"></a>
1605 * <h4><code>AdvectionProblem::output_results()</code></h4>
1609 * The final function of interest generates graphical output.
1610 * Here we output the solution and cell ordering in a .vtu format.
1614 * At the top of the function, we generate an index for each cell to
1615 * visualize the ordering used by the smoothers. Note that we do
1616 * this only for the active cells instead of the levels, where the
1617 * smoothers are actually used. For the point smoothers we renumber
1618 * DoFs instead of cells, so this is only an approximation of what
1619 * happens in reality. Finally, the random ordering is not the
1620 * random ordering we actually use (see `create_smoother()` for that).
1624 * The (integer) ordering of cells is then copied into a (floating
1625 * point) vector for graphical output.
1628 * template <int dim>
1629 * void AdvectionProblem<dim>::output_results(const unsigned int cycle) const
1631 * const unsigned int n_active_cells = triangulation.n_active_cells();
1632 * Vector<double> cell_indices(n_active_cells);
1634 * std::vector<unsigned int> ordered_indices;
1635 * switch (settings.dof_renumbering)
1637 * case Settings::DoFRenumberingStrategy::downstream:
1639 * create_downstream_cell_ordering(dof_handler, advection_direction);
1642 * case Settings::DoFRenumberingStrategy::upstream:
1644 * create_downstream_cell_ordering(dof_handler,
1645 * -1.0 * advection_direction);
1648 * case Settings::DoFRenumberingStrategy::random:
1649 * ordered_indices = create_random_cell_ordering(dof_handler);
1652 * case Settings::DoFRenumberingStrategy::none:
1653 * ordered_indices.resize(n_active_cells);
1654 * for (unsigned int i = 0; i < n_active_cells; ++i)
1655 * ordered_indices[i] = i;
1659 * AssertThrow(false, ExcNotImplemented());
1663 * for (unsigned int i = 0; i < n_active_cells; ++i)
1664 * cell_indices(ordered_indices[i]) = static_cast<double>(i);
1669 * The remainder of the function is then straightforward, given
1670 * previous tutorial programs:
1673 * DataOut<dim> data_out;
1674 * data_out.attach_dof_handler(dof_handler);
1675 * data_out.add_data_vector(solution, "solution");
1676 * data_out.add_data_vector(cell_indices, "cell_index");
1677 * data_out.build_patches();
1679 * const std::string filename =
1680 * "solution-" + Utilities::int_to_string(cycle) + ".vtu";
1681 * std::ofstream output(filename.c_str());
1682 * data_out.write_vtu(output);
1689 * <a name="codeAdvectionProblemruncode"></a>
1690 * <h4><code>AdvectionProblem::run()</code></h4>
1694 * As in most tutorials, this function creates/refines the mesh and calls
1695 * the various functions defined above to set up, assemble, solve, and output
1700 * In cycle zero, we generate the mesh for the on the square
1701 * <code>[-1,1]^dim</code> with a hole of radius 3/10 units centered
1702 * at the origin. For objects with `manifold_id` equal to one
1703 * (namely, the faces adjacent to the hole), we assign a spherical
1710 * template <int dim>
1711 * void AdvectionProblem<dim>::run()
1713 * for (unsigned int cycle = 0; cycle < (settings.fe_degree == 1 ? 7 : 5);
1716 * std::cout << " Cycle " << cycle << ':
' << std::endl;
1720 * GridGenerator::hyper_cube_with_cylindrical_hole(triangulation,
1724 * const SphericalManifold<dim> manifold_description(Point<dim>(0, 0));
1725 * triangulation.set_manifold(1, manifold_description);
1728 * triangulation.refine_global();
1732 * std::cout << " Number of active cells: "
1733 * << triangulation.n_active_cells() << " ("
1734 * << triangulation.n_levels() << " levels)" << std::endl;
1735 * std::cout << " Number of degrees of freedom: "
1736 * << dof_handler.n_dofs() << std::endl;
1738 * assemble_system_and_multigrid();
1742 * if (settings.output)
1743 * output_results(cycle);
1745 * std::cout << std::endl;
1748 * } // namespace Step63
1754 * <a name="Thecodemaincodefunction"></a>
1755 * <h3>The <code>main</code> function</h3>
1759 * Finally, the main function is like most tutorials. The only
1760 * interesting bit is that we require the user to pass a `.prm` file
1761 * as a sole command line argument. If no parameter file is given, the
1762 * program will output the contents of a sample parameter file with
1763 * all default values to the screen that the user can then copy and
1764 * paste into their own `.prm` file.
1770 * int main(int argc, char *argv[])
1774 * Step63::Settings settings;
1775 * settings.get_parameters((argc > 1) ? (argv[1]) : "");
1777 * Step63::AdvectionProblem<2> advection_problem_2d(settings);
1778 * advection_problem_2d.run();
1780 * catch (std::exception &exc)
1782 * std::cerr << std::endl
1784 * << "----------------------------------------------------"
1786 * std::cerr << "Exception on processing: " << std::endl
1787 * << exc.what() << std::endl
1788 * << "Aborting!" << std::endl
1789 * << "----------------------------------------------------"
1795 * std::cerr << std::endl
1797 * << "----------------------------------------------------"
1799 * std::cerr << "Unknown exception!" << std::endl
1800 * << "Aborting!" << std::endl
1801 * << "----------------------------------------------------"
1809<a name="Results"></a><h1>Results</h1>
1812<a name="GMRESIterationNumbers"></a><h3> GMRES Iteration Numbers </h3>
1815The major advantage for GMG is that it is an @f$\mathcal{O}(n)@f$ method,
1816that is, the complexity of the problem increases linearly with the
1817problem size. To show then that the linear solver presented in this
1818tutorial is in fact @f$\mathcal{O}(n)@f$, all one needs to do is show that
1819the iteration counts for the GMRES solve stay roughly constant as we
1822Each of the following tables gives the GMRES iteration counts to reduce the
1823initial residual by a factor of @f$10^8@f$. We selected a sufficient number of smoothing steps
1824(based on the method) to get iteration numbers independent of mesh size. As
1825can be seen from the tables below, the method is indeed @f$\mathcal{O}(n)@f$.
1827<a name="DoFCellRenumbering"></a><h4> DoF/Cell Renumbering </h4>
1830The point-wise smoothers ("Jacobi" and "SOR") get applied in the order the
1831DoFs are numbered on each level. We can influence this using the
1832DoFRenumbering namespace. The block smoothers are applied based on the
1833ordering we set in `setup_smoother()`. We can visualize this numbering. The
1834following pictures show the cell numbering of the active cells in downstream,
1835random, and upstream numbering (left to right):
1837<img src="https://www.dealii.org/images/steps/developer/step-63-cell-order.png" alt="">
1839Let us start with the additive smoothers. The following table shows
1840the number of iterations necessary to obtain convergence from GMRES:
1842<table align="center" class="doxtable">
1846 <th colspan="1">@f$Q_1@f$</th>
1847 <th colspan="7">Smoother (smoothing steps)</th>
1853 <th colspan="3">Jacobi (6)</th>
1855 <th colspan="3">Block Jacobi (3)</th>
1861 <th colspan="3">Renumbering Strategy</th>
1863 <th colspan="3">Renumbering Strategy</th>
1963We see that renumbering the
1964DoFs/cells has no effect on convergence speed. This is because these
1965smoothers compute operations on each DoF (point-smoother) or cell
1966(block-smoother) independently and add up the results. Since we can
1967define these smoothers as an application of a sum of matrices, and
1968matrix addition is commutative, the order at which we sum the
1969different components will not affect the end result.
1971On the other hand, the situation is different for multiplicative smoothers:
1973<table align="center" class="doxtable">
1977 <th colspan="1">@f$Q_1@f$</th>
1978 <th colspan="7">Smoother (smoothing steps)</th>
1984 <th colspan="3">SOR (3)</th>
1986 <th colspan="3">Block SOR (1)</th>
1992 <th colspan="3">Renumbering Strategy</th>
1994 <th colspan="3">Renumbering Strategy</th>
2094Here, we can speed up
2095convergence by renumbering the DoFs/cells in the advection direction,
2096and similarly, we can slow down convergence if we do the renumbering
2097in the opposite direction. This is because advection-dominated
2098problems have a directional flow of information (in the advection
2099direction) which, given the right renumbering of DoFs/cells,
2100multiplicative methods are able to capture.
2102This feature of multiplicative methods is, however, dependent on the
2103value of @f$\varepsilon@f$. As we increase @f$\varepsilon@f$ and the problem
2104becomes more diffusion-dominated, we have a more uniform propagation
2105of information over the mesh and there is a diminished advantage for
2106renumbering in the advection direction. On the opposite end, in the
2107extreme case of @f$\varepsilon=0@f$ (advection-only), we have a 1st-order
2108PDE and multiplicative methods with the right renumbering become
2109effective solvers: A correct downstream numbering may lead to methods
2110that require only a single iteration because information can be
2111propagated from the inflow boundary downstream, with no information
2112transport in the opposite direction. (Note, however, that in the case
2113of @f$\varepsilon=0@f$, special care must be taken for the boundary
2114conditions in this case).
2117<a name="Pointvsblocksmoothers"></a><h4> %Point vs. block smoothers </h4>
2120We will limit the results to runs using the downstream
2121renumbering. Here is a cross comparison of all four smoothers for both
2122@f$Q_1@f$ and @f$Q_3@f$ elements:
2124<table align="center" class="doxtable">
2128 <th colspan="1">@f$Q_1@f$</th>
2129 <th colspan="4">Smoother (smoothing steps)</th>
2131 <th colspan="1">@f$Q_3@f$</th>
2132 <th colspan="4">Smoother (smoothing steps)</th>
2135 <th colspan="1">Cells</th>
2137 <th colspan="1">DoFs</th>
2138 <th colspan="1">Jacobi (6)</th>
2139 <th colspan="1">Block Jacobi (3)</th>
2140 <th colspan="1">SOR (3)</th>
2141 <th colspan="1">Block SOR (1)</th>
2143 <th colspan="1">DoFs</th>
2144 <th colspan="1">Jacobi (6)</th>
2145 <th colspan="1">Block Jacobi (3)</th>
2146 <th colspan="1">SOR (3)</th>
2147 <th colspan="1">Block SOR (1)</th>
2246We see that for @f$Q_1@f$, both multiplicative smoothers require a smaller
2247combination of smoothing steps and iteration counts than either
2248additive smoother. However, when we increase the degree to a @f$Q_3@f$
2249element, there is a clear advantage for the block smoothers in terms
2250of the number of smoothing steps and iterations required to
2251solve. Specifically, the block SOR smoother gives constant iteration
2252counts over the degree, and the block Jacobi smoother only sees about
2253a 38% increase in iterations compared to 75% and 183% for Jacobi and
2256<a name="Cost"></a><h3> Cost </h3>
2259Iteration counts do not tell the full story in the optimality of a one
2260smoother over another. Obviously we must examine the cost of an
2261iteration. Block smoothers here are at a disadvantage as they are
2262having to construct and invert a cell matrix for each cell. Here is a
2263comparison of solve times for a @f$Q_3@f$ element with 74,496 DoFs:
2265<table align="center" class="doxtable">
2267 <th colspan="1">@f$Q_3@f$</th>
2268 <th colspan="4">Smoother (smoothing steps)</th>
2271 <th colspan="1">DoFs</th>
2272 <th colspan="1">Jacobi (6)</th>
2273 <th colspan="1">Block Jacobi (3)</th>
2274 <th colspan="1">SOR (3)</th>
2275 <th colspan="1">Block SOR (1)</th>
2286The smoother that requires the most iterations (Jacobi) actually takes
2287the shortest time (roughly 2/3 the time of the next fastest
2288method). This is because all that is required to apply a Jacobi
2289smoothing step is multiplication by a diagonal matrix which is very
2290cheap. On the other hand, while SOR requires over 3x more iterations
2291(each with 3x more smoothing steps) than block SOR, the times are
2292roughly equivalent, implying that a smoothing step of block SOR is
2293roughly 9x slower than a smoothing step of SOR. Lastly, block Jacobi
2294is almost 6x more expensive than block SOR, which intuitively makes
2295sense from the fact that 1 step of each method has the same cost
2296(inverting the cell matrices and either adding or multiply them
2297together), and block Jacobi has 3 times the number of smoothing steps per
2298iteration with 2 times the iterations.
2301<a name="Additionalpoints"></a><h3> Additional points </h3>
2304There are a few more important points to mention:
2307<li> For a mesh distributed in parallel, multiplicative methods cannot
2308be executed over the entire domain. This is because they operate one
2309cell at a time, and downstream cells can only be handled once upstream
2310cells have already been done. This is fine on a single processor: The
2311processor just goes through the list of cells one after the
2312other. However, in parallel, it would imply that some processors are
2313idle because upstream processors have not finished doing the work on
2314cells upstream from the ones owned by the current processor. Once the
2315upstream processors are done, the downstream ones can start, but by
2316that time the upstream processors have no work left. In other words,
2317most of the time during these smoother steps, most processors are in
2318fact idle. This is not how one obtains good parallel scalability!
2320One can use a hybrid method where
2321a multiplicative smoother is applied on each subdomain, but as you
2322increase the number of subdomains, the method approaches the behavior
2323of an additive method. This is a major disadvantage to these methods.
2326<li> Current research into block smoothers suggest that soon we will be
2327able to compute the inverse of the cell matrices much cheaper than
2328what is currently being done inside deal.II. This research is based on
2329the fast diagonalization method (dating back to the 1960s) and has
2330been used in the spectral community for around 20 years (see, e.g., <a
2331href="https://doi.org/10.1007/s10915-004-4787-3"> Hybrid
2332Multigrid/Schwarz Algorithms for the Spectral Element Method by Lottes
2333and Fischer</a>). There are currently efforts to generalize these
2334methods to DG and make them more robust. Also, it seems that one
2335should be able to take advantage of matrix-free implementations and
2336the fact that, in the interior of the domain, cell matrices tend to
2337look very similar, allowing fewer matrix inverse computations.
2341Combining 1. and 2. gives a good reason for expecting that a method
2342like block Jacobi could become very powerful in the future, even
2343though currently for these examples it is quite slow.
2346<a name="Possibilitiesforextensions"></a><h3> Possibilities for extensions </h3>
2349<a name="ConstantiterationsforQsub5sub"></a><h4> Constant iterations for Q<sub>5</sub> </h4>
2352Change the number of smoothing steps and the smoother relaxation
2353parameter (set in <code>Smoother::AdditionalData()</code> inside
2354<code>create_smoother()</code>, only necessary for point smoothers) so
2355that we maintain a constant number of iterations for a @f$Q_5@f$ element.
2357<a name="Effectivenessofrenumberingforchangingepsilon"></a><h4> Effectiveness of renumbering for changing epsilon </h4>
2360Increase/decrease the parameter "Epsilon" in the `.prm` files of the
2361multiplicative methods and observe for which values renumbering no
2362longer influences convergence speed.
2364<a name="Meshadaptivity"></a><h4> Mesh adaptivity </h4>
2367The code is set up to work correctly with an adaptively refined mesh (the
2368interface matrices are created and set). Devise a suitable refinement
2369criterium or try the KellyErrorEstimator class.
2372<a name="PlainProg"></a>
2373<h1> The plain program</h1>
2374@include "step-63.cc"
virtual void value_list(const std::vector< Point< dim > > &points, std::vector< RangeNumberType > &values, const unsigned int component=0) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
void declare_entry(const std::string &entry, const std::string &default_value, const Patterns::PatternBase &pattern=Patterns::Anything(), const std::string &documentation="", const bool has_to_be_set=false)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrow(cond, exc)
void mesh_loop(const CellIteratorType &begin, const CellIteratorType &end, const CellWorkerFunctionType &cell_worker, const CopierType &copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const AssembleFlags flags=assemble_own_cells, const BoundaryWorkerFunctionType &boundary_worker=BoundaryWorkerFunctionType(), const FaceWorkerFunctionType &face_worker=FaceWorkerFunctionType(), const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
Expression coth(const Expression &x)
void downstream(DoFHandler< dim, spacedim > &dof_handler, const Tensor< 1, spacedim > &direction, const bool dof_wise_renumbering=false)
void random(DoFHandler< dim, spacedim > &dof_handler)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > epsilon(const Tensor< 2, dim, Number > &Grad_u)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
const TriangulationDescription::Settings settings