237 * #
if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
238 * !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
239 *
using namespace dealii::LinearAlgebraPETSc;
240 * # define USE_PETSC_LA
241 * #elif defined(DEAL_II_WITH_TRILINOS)
242 *
using namespace dealii::LinearAlgebraTrilinos;
244 * # error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
248 * #include <deal.II/lac/vector.h>
249 * #include <deal.II/lac/full_matrix.h>
250 * #include <deal.II/lac/solver_cg.h>
251 * #include <deal.II/lac/solver_gmres.h>
252 * #include <deal.II/lac/solver_minres.h>
253 * #include <deal.II/lac/affine_constraints.h>
254 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
256 * #include <deal.II/lac/petsc_sparse_matrix.h>
257 * #include <deal.II/lac/petsc_vector.h>
258 * #include <deal.II/lac/petsc_solver.h>
259 * #include <deal.II/lac/petsc_precondition.h>
261 * #include <deal.II/grid/grid_generator.h>
262 * #include <deal.II/grid/tria_accessor.h>
263 * #include <deal.II/grid/tria_iterator.h>
264 * #include <deal.II/grid/manifold_lib.h>
265 * #include <deal.II/grid/grid_tools.h>
266 * #include <deal.II/dofs/dof_handler.h>
267 * #include <deal.II/dofs/dof_renumbering.h>
268 * #include <deal.II/dofs/dof_accessor.h>
269 * #include <deal.II/dofs/dof_tools.h>
270 * #include <deal.II/fe/fe_values.h>
271 * #include <deal.II/fe/fe_q.h>
272 * #include <deal.II/fe/fe_system.h>
273 * #include <deal.II/numerics/vector_tools.h>
274 * #include <deal.II/numerics/data_out.h>
275 * #include <deal.II/numerics/error_estimator.h>
277 * #include <deal.II/base/utilities.h>
278 * #include <deal.II/base/conditional_ostream.h>
279 * #include <deal.II/base/index_set.h>
280 * #include <deal.II/lac/sparsity_tools.h>
281 * #include <deal.II/distributed/tria.h>
282 * #include <deal.II/distributed/grid_refinement.h>
286 * #include <iostream>
295 * <a name=
"Linearsolversandpreconditioners"></a>
296 * <h3>Linear solvers and preconditioners</h3>
300 * We need a few helper classes to represent our solver strategy
301 * described in the introduction.
307 *
namespace LinearSolvers
311 * This
class exposes the action of applying the inverse of a giving
312 *
matrix via the
function InverseMatrix::vmult(). Internally, the
313 * inverse is not formed explicitly. Instead, a linear solver with CG
314 * is performed. This
class extends the InverseMatrix class in @ref step_22 "step-22"
315 * with an option to specify a preconditioner, and to allow
for different
316 * vector
types in the vmult
function.
319 *
template <
class Matrix,
class Preconditioner>
323 * InverseMatrix(
const Matrix &m,
const Preconditioner &preconditioner);
325 *
template <
typename VectorType>
330 *
const Preconditioner & preconditioner;
334 *
template <
class Matrix,
class Preconditioner>
335 * InverseMatrix<Matrix, Preconditioner>::InverseMatrix(
337 *
const Preconditioner &preconditioner)
339 * , preconditioner(preconditioner)
344 *
template <
class Matrix,
class Preconditioner>
345 *
template <
typename VectorType>
347 * InverseMatrix<Matrix, Preconditioner>::vmult(
VectorType & dst,
356 * cg.solve(*
matrix, dst, src, preconditioner);
358 *
catch (std::exception &
e)
367 * The
class A template class for a simple block
diagonal preconditioner
371 *
template <
class PreconditionerA,
class PreconditionerS>
372 *
class BlockDiagonalPreconditioner :
public Subscriptor
375 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
376 *
const PreconditionerS &preconditioner_S);
382 *
const PreconditionerA &preconditioner_A;
383 *
const PreconditionerS &preconditioner_S;
386 *
template <
class PreconditionerA,
class PreconditionerS>
387 * BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::
388 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
389 *
const PreconditionerS &preconditioner_S)
390 * : preconditioner_A(preconditioner_A)
391 * , preconditioner_S(preconditioner_S)
395 *
template <
class PreconditionerA,
class PreconditionerS>
396 *
void BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::vmult(
400 * preconditioner_A.vmult(dst.block(0), src.block(0));
401 * preconditioner_S.vmult(dst.block(1), src.block(1));
409 * <a name=
"Problemsetup"></a>
410 * <h3>Problem setup</h3>
414 * The following classes represent the right hand side and the exact
415 * solution
for the test problem.
422 *
class RightHandSide :
public Function<dim>
429 *
virtual void vector_value(
const Point<dim> &p,
435 *
void RightHandSide<dim>::vector_value(
const Point<dim> &p,
438 *
const double R_x = p[0];
439 *
const double R_y = p[1];
442 *
const double pi2 = pi * pi;
444 * -1.0L / 2.0L * (-2 *
sqrt(25.0 + 4 * pi2) + 10.0) *
445 *
exp(R_x * (-2 *
sqrt(25.0 + 4 * pi2) + 10.0)) -
446 * 0.4 * pi2 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
cos(2 * R_y * pi) +
447 * 0.1 *
pow(-
sqrt(25.0 + 4 * pi2) + 5.0, 2) *
448 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
cos(2 * R_y * pi);
449 * values[1] = 0.2 * pi * (-
sqrt(25.0 + 4 * pi2) + 5.0) *
450 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
sin(2 * R_y * pi) -
451 * 0.05 *
pow(-
sqrt(25.0 + 4 * pi2) + 5.0, 3) *
452 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
sin(2 * R_y * pi) /
459 *
class ExactSolution :
public Function<dim>
466 *
virtual void vector_value(
const Point<dim> &p,
471 *
void ExactSolution<dim>::vector_value(
const Point<dim> &p,
474 *
const double R_x = p[0];
475 *
const double R_y = p[1];
478 *
const double pi2 = pi * pi;
480 * -
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
cos(2 * R_y * pi) + 1;
481 * values[1] = (1.0L / 2.0L) * (-
sqrt(25.0 + 4 * pi2) + 5.0) *
482 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
sin(2 * R_y * pi) /
485 * -1.0L / 2.0L *
exp(R_x * (-2 *
sqrt(25.0 + 4 * pi2) + 10.0)) -
487 * (-6538034.74494422 +
488 * 0.0134758939981709 *
exp(4 *
sqrt(25.0 + 4 * pi2))) /
489 * (-80.0 *
exp(3 *
sqrt(25.0 + 4 * pi2)) +
490 * 16.0 *
sqrt(25.0 + 4 * pi2) *
exp(3 *
sqrt(25.0 + 4 * pi2))) -
491 * 1634508.68623606 *
exp(-3.0 *
sqrt(25.0 + 4 * pi2)) /
492 * (-10.0 + 2.0 *
sqrt(25.0 + 4 * pi2)) +
493 * (-0.00673794699908547 *
exp(
sqrt(25.0 + 4 * pi2)) +
494 * 3269017.37247211 *
exp(-3 *
sqrt(25.0 + 4 * pi2))) /
495 * (-8 *
sqrt(25.0 + 4 * pi2) + 40.0) +
496 * 0.00336897349954273 *
exp(1.0 *
sqrt(25.0 + 4 * pi2)) /
497 * (-10.0 + 2.0 *
sqrt(25.0 + 4 * pi2));
505 * <a name=
"Themainprogram"></a>
506 * <h3>The main program</h3>
510 * The main
class is very similar to @ref step_40 "step-40", except that matrices and
511 * vectors are now block versions, and we store a std::vector<IndexSet>
512 *
for owned and relevant DoFs instead of a single
IndexSet. We have
513 * exactly two IndexSets,
one for all velocity unknowns and
one for all
518 *
class StokesProblem
521 * StokesProblem(
unsigned int velocity_degree);
527 *
void setup_system();
528 *
void assemble_system();
530 *
void refine_grid();
531 *
void output_results(
const unsigned int cycle)
const;
533 *
unsigned int velocity_degree;
541 * std::vector<IndexSet> owned_partitioning;
542 * std::vector<IndexSet> relevant_partitioning;
558 * StokesProblem<dim>::StokesProblem(
unsigned int velocity_degree)
559 * : velocity_degree(velocity_degree)
561 * , mpi_communicator(MPI_COMM_WORLD)
570 * , computing_timer(mpi_communicator,
579 * The Kovasnay flow is defined on the domain [-0.5, 1.5]^2, which we
584 *
void StokesProblem<dim>::make_grid()
593 * <a name=
"SystemSetup"></a>
594 * <h3>System Setup</h3>
598 * The construction of the block matrices and vectors is
new compared to
599 * @ref step_40
"step-40" and is different compared to serial codes like @ref step_22
"step-22", because
600 * we need to supply the
set of rows that belong to our processor.
604 *
void StokesProblem<dim>::setup_system()
608 * dof_handler.distribute_dofs(fe);
612 * Put all dim velocities into block 0 and the pressure into block 1,
613 * then reorder the unknowns by block. Finally count how many unknowns
617 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
618 * stokes_sub_blocks[dim] = 1;
621 *
const std::vector<types::global_dof_index> dofs_per_block =
624 *
const unsigned int n_u = dofs_per_block[0];
625 *
const unsigned int n_p = dofs_per_block[1];
627 * pcout <<
" Number of degrees of freedom: " << dof_handler.n_dofs() <<
" ("
628 * << n_u <<
'+' << n_p <<
')' << std::endl;
632 * We
split up the
IndexSet for locally owned and locally relevant DoFs
633 * into two IndexSets based on how we want to create the block matrices
637 * owned_partitioning.resize(2);
638 * owned_partitioning[0] = dof_handler.locally_owned_dofs().
get_view(0, n_u);
639 * owned_partitioning[1] =
640 * dof_handler.locally_owned_dofs().
get_view(n_u, n_u + n_p);
644 * relevant_partitioning.resize(2);
645 * relevant_partitioning[0] = locally_relevant_dofs.
get_view(0, n_u);
646 * relevant_partitioning[1] = locally_relevant_dofs.
get_view(n_u, n_u + n_p);
650 * Setting up the constraints
for boundary conditions and hanging nodes
651 * is identical to @ref step_40
"step-40". Rven though we don
't have any hanging nodes
652 * because we only perform global refinement, it is still a good idea
653 * to put this function call in, in case adaptive refinement gets
658 * constraints.reinit(locally_relevant_dofs);
660 * FEValuesExtractors::Vector velocities(0);
661 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
662 * VectorTools::interpolate_boundary_values(dof_handler,
664 * ExactSolution<dim>(),
666 * fe.component_mask(velocities));
667 * constraints.close();
672 * Now we create the system matrix based on a BlockDynamicSparsityPattern.
673 * We know that we won't have coupling between different velocity
674 * components (because we use the laplace and not the deformation tensor)
675 * and no coupling between pressure with its test
functions, so we use
676 * a
Table to communicate
this coupling information to
681 * system_matrix.clear();
684 *
for (
unsigned int c = 0; c < dim + 1; ++c)
685 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
686 *
if (c == dim &&
d == dim)
688 *
else if (c == dim ||
d == dim || c ==
d)
696 * dof_handler, coupling, dsp, constraints,
false);
700 * dof_handler.locally_owned_dofs(),
702 * locally_relevant_dofs);
704 * system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
709 * The preconditioner
matrix has a different coupling (we only fill in
710 * the 1,1 block with the mass
matrix), otherwise
this code is identical
711 * to the construction of the system_matrix above.
715 * preconditioner_matrix.clear();
718 *
for (
unsigned int c = 0; c < dim + 1; ++c)
719 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
720 *
if (c == dim &&
d == dim)
728 * dof_handler, coupling, dsp, constraints,
false);
732 * dof_handler.locally_owned_dofs()),
734 * locally_relevant_dofs);
735 * preconditioner_matrix.reinit(owned_partitioning,
738 * owned_partitioning,
747 * Finally, we construct the block vectors with the right sizes. The
748 *
function call with two std::vector<IndexSet> will create a ghosted
752 * locally_relevant_solution.reinit(owned_partitioning,
753 * relevant_partitioning,
755 * system_rhs.reinit(owned_partitioning, mpi_communicator);
763 * <a name=
"Assembly"></a>
768 * This
function assembles the system
matrix, the preconditioner
matrix,
769 * and the right hand side. The code is pretty standard.
773 *
void StokesProblem<dim>::assemble_system()
778 * preconditioner_matrix = 0;
781 *
const QGauss<dim> quadrature_formula(velocity_degree + 1);
784 * quadrature_formula,
788 *
const unsigned int dofs_per_cell = fe.dofs_per_cell;
789 *
const unsigned int n_q_points = quadrature_formula.size();
795 *
const RightHandSide<dim> right_hand_side;
796 * std::vector<Vector<double>> rhs_values(n_q_points,
Vector<double>(dim + 1));
798 * std::vector<Tensor<2, dim>> grad_phi_u(dofs_per_cell);
799 * std::vector<double> div_phi_u(dofs_per_cell);
800 * std::vector<double> phi_p(dofs_per_cell);
802 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
806 *
for (
const auto &cell : dof_handler.active_cell_iterators())
807 *
if (cell->is_locally_owned())
813 * fe_values.reinit(cell);
814 * right_hand_side.vector_value_list(fe_values.get_quadrature_points(),
816 *
for (
unsigned int q = 0; q < n_q_points; ++q)
818 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
820 * grad_phi_u[k] = fe_values[velocities].gradient(k, q);
821 * div_phi_u[k] = fe_values[velocities].divergence(k, q);
822 * phi_p[k] = fe_values[pressure].value(k, q);
825 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
827 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
832 * div_phi_u[i] * phi_p[j] - phi_p[i] * div_phi_u[j]) *
835 * cell_matrix2(i, j) += 1.0 / viscosity * phi_p[i] *
836 * phi_p[j] * fe_values.JxW(q);
839 *
const unsigned int component_i =
840 * fe.system_to_component_index(i).first;
841 * cell_rhs(i) += fe_values.shape_value(i, q) *
842 * rhs_values[q](component_i) * fe_values.JxW(q);
847 * cell->get_dof_indices(local_dof_indices);
848 * constraints.distribute_local_to_global(
cell_matrix,
854 * constraints.distribute_local_to_global(cell_matrix2,
856 * preconditioner_matrix);
869 * <a name=
"Solving"></a>
874 * This
function solves the linear system with MINRES with a block
diagonal
875 * preconditioner and AMG
for the two
diagonal blocks as described in the
876 * introduction. The preconditioner applies a v cycle to the 0,0 block
877 * and a CG with the mass
matrix for the 1,1 block (the Schur complement).
881 *
void StokesProblem<dim>::solve()
887 * LA::MPI::PreconditionAMG::AdditionalData data;
889 * #ifdef USE_PETSC_LA
890 * data.symmetric_operator =
true;
892 * prec_A.
initialize(system_matrix.block(0, 0), data);
897 * LA::MPI::PreconditionAMG::AdditionalData data;
899 * #ifdef USE_PETSC_LA
900 * data.symmetric_operator =
true;
902 * prec_S.
initialize(preconditioner_matrix.block(1, 1), data);
907 * The InverseMatrix is used to solve
for the mass
matrix:
912 *
const mp_inverse_t mp_inverse(preconditioner_matrix.block(1, 1), prec_S);
916 * This constructs the block preconditioner based on the preconditioners
917 *
for the individual blocks defined above.
922 * preconditioner(prec_A, mp_inverse);
926 * With that, we can
finally set up the linear solver and solve the system:
930 * 1
e-10 * system_rhs.l2_norm());
937 * constraints.set_zero(distributed_solution);
939 * solver.solve(system_matrix,
940 * distributed_solution,
944 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
947 * constraints.distribute(distributed_solution);
951 * Like in @ref step_56
"step-56", we subtract the
mean pressure to allow error
952 * computations against our reference solution, which has a
mean value
956 * locally_relevant_solution = distributed_solution;
957 *
const double mean_pressure =
960 * locally_relevant_solution,
962 * distributed_solution.block(1).add(-mean_pressure);
963 * locally_relevant_solution.block(1) = distributed_solution.block(1);
971 * <a name=
"Therest"></a>
976 * The remainder of the code that deals with mesh refinement, output, and
977 * the main
loop is pretty standard.
981 *
void StokesProblem<dim>::refine_grid()
991 *
void StokesProblem<dim>::output_results(
const unsigned int cycle)
const
1002 * locally_relevant_solution,
1003 * ExactSolution<dim>(),
1009 *
const double error_u_l2 =
1015 * locally_relevant_solution,
1016 * ExactSolution<dim>(),
1022 *
const double error_p_l2 =
1027 * pcout <<
"error: u_0: " << error_u_l2 <<
" p_0: " << error_p_l2
1032 * std::vector<std::string> solution_names(dim,
"velocity");
1033 * solution_names.emplace_back(
"pressure");
1034 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1035 * data_component_interpretation(
1037 * data_component_interpretation.push_back(
1045 * data_component_interpretation);
1048 * interpolated.
reinit(owned_partitioning, MPI_COMM_WORLD);
1052 * relevant_partitioning,
1054 * interpolated_relevant = interpolated;
1056 * std::vector<std::string> solution_names(dim,
"ref_u");
1057 * solution_names.emplace_back(
"ref_p");
1061 * data_component_interpretation);
1066 *
for (
unsigned int i = 0; i < subdomain.size(); ++i)
1073 *
"./",
"solution", cycle, mpi_communicator, 2);
1078 *
template <
int dim>
1081 * #ifdef USE_PETSC_LA
1082 * pcout <<
"Running using PETSc." << std::endl;
1084 * pcout <<
"Running using Trilinos." << std::endl;
1086 *
const unsigned int n_cycles = 5;
1087 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1089 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
1098 * assemble_system();
1104 * output_results(cycle);
1107 * computing_timer.print_summary();
1108 * computing_timer.reset();
1110 * pcout << std::endl;
1117 *
int main(
int argc,
char *argv[])
1121 *
using namespace dealii;
1122 *
using namespace Step55;
1126 * StokesProblem<2> problem(2);
1129 *
catch (std::exception &exc)
1131 * std::cerr << std::endl
1133 * <<
"----------------------------------------------------"
1135 * std::cerr <<
"Exception on processing: " << std::endl
1136 * << exc.what() << std::endl
1137 * <<
"Aborting!" << std::endl
1138 * <<
"----------------------------------------------------"
1145 * std::cerr << std::endl
1147 * <<
"----------------------------------------------------"
1149 * std::cerr <<
"Unknown exception!" << std::endl
1150 * <<
"Aborting!" << std::endl
1151 * <<
"----------------------------------------------------"
1159 <a name=
"Results"></a><h1>Results</h1>
1162 As expected from the discussion above, the number of iterations is independent
1163 of the number of processors and only very slightly dependent on @f$h@f$:
1167 <th colspan=
"2">PETSc</th>
1168 <th colspan=
"8">number of processors</th>
1270 <th colspan=
"2">Trilinos</th>
1271 <th colspan=
"8">number of processors</th>
1371 While the PETSc results show a constant number of iterations, the iterations
1372 increase when
using Trilinos. This is likely because of the different settings
1373 used
for the AMG preconditioner. For performance reasons we
do not allow
1374 coarsening below a couple thousand unknowns. As the coarse solver is an exact
1375 solve (we are
using LU by
default), a change in number of levels will
1376 influence the quality of a
V-cycle. Therefore, a
V-cycle is closer to an exact
1377 solver
for smaller problem sizes.
1379 <a name=
"extensions"></a>
1380 <a name=
"Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1383 <a name=
"InvestigateTrilinositerations"></a><h4>Investigate Trilinos iterations</h4>
1386 Play with the smoothers, smoothing steps, and other properties
for the
1387 Trilinos AMG to achieve an optimal preconditioner.
1389 <a name=
"SolvetheOseenprobleminsteadoftheStokessystem"></a><h4>Solve the Oseen problem instead of the Stokes system</h4>
1392 This change requires changing the outer solver to GMRES or BiCGStab, because
1395 You can prescribe the exact flow solution as @f$b@f$ in the convective term @f$b
1396 \cdot \nabla u@f$. This should give the same solution as the original problem,
1397 if you
set the right hand side to
zero.
1399 <a name=
"Adaptiverefinement"></a><h4>Adaptive refinement</h4>
1402 So far,
this tutorial program refines the mesh globally in each step.
1403 Replacing the code in StokesProblem::refine_grid() by something like
1410 QGauss<dim - 1>(fe.degree + 1),
1412 locally_relevant_solution,
1413 estimated_error_per_cell,
1414 fe.component_mask(velocities));
1419 makes it simple to explore adaptive mesh refinement.
1422 <a name="PlainProg"></a>
1423 <h1> The plain program</h1>
1424 @include "step-55.cc"