237 * #
if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
238 * !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
239 *
using namespace dealii::LinearAlgebraPETSc;
240 * # define USE_PETSC_LA
241 * #elif defined(DEAL_II_WITH_TRILINOS)
242 *
using namespace dealii::LinearAlgebraTrilinos;
244 * # error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
248 * #include <deal.II/lac/vector.h>
249 * #include <deal.II/lac/full_matrix.h>
250 * #include <deal.II/lac/solver_cg.h>
251 * #include <deal.II/lac/solver_gmres.h>
252 * #include <deal.II/lac/solver_minres.h>
253 * #include <deal.II/lac/affine_constraints.h>
254 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
256 * #include <deal.II/lac/petsc_sparse_matrix.h>
257 * #include <deal.II/lac/petsc_vector.h>
258 * #include <deal.II/lac/petsc_solver.h>
259 * #include <deal.II/lac/petsc_precondition.h>
261 * #include <deal.II/grid/grid_generator.h>
262 * #include <deal.II/grid/manifold_lib.h>
263 * #include <deal.II/grid/grid_tools.h>
264 * #include <deal.II/dofs/dof_handler.h>
265 * #include <deal.II/dofs/dof_renumbering.h>
266 * #include <deal.II/dofs/dof_tools.h>
267 * #include <deal.II/fe/fe_values.h>
268 * #include <deal.II/fe/fe_q.h>
269 * #include <deal.II/fe/fe_system.h>
270 * #include <deal.II/numerics/vector_tools.h>
271 * #include <deal.II/numerics/data_out.h>
272 * #include <deal.II/numerics/error_estimator.h>
274 * #include <deal.II/base/utilities.h>
275 * #include <deal.II/base/conditional_ostream.h>
276 * #include <deal.II/base/index_set.h>
277 * #include <deal.II/lac/sparsity_tools.h>
278 * #include <deal.II/distributed/tria.h>
279 * #include <deal.II/distributed/grid_refinement.h>
283 * #include <iostream>
292 * <a name=
"step_55-Linearsolversandpreconditioners"></a>
293 * <h3>Linear solvers and preconditioners</h3>
297 * We need a few helper classes to represent our solver strategy
298 * described in the introduction.
304 *
namespace LinearSolvers
308 * This
class exposes the action of applying the inverse of a
309 * giving
matrix via the function
310 * InverseMatrix::vmult(). Internally, the inverse is not formed
311 * explicitly. Instead, a linear solver with CG is performed. This
312 *
class extends the InverseMatrix class in @ref step_22
"step-22" with an option
313 * to specify a preconditioner, and to allow
for different vector
314 *
types in the vmult function. We use the same mechanism as in
315 * @ref step_31
"step-31" to convert a
run-time exception into a failed assertion
316 * should the inner solver not converge.
319 *
template <
class Matrix,
class Preconditioner>
323 * InverseMatrix(
const Matrix &m,
const Preconditioner &preconditioner);
325 *
template <
typename VectorType>
326 *
void vmult(VectorType &dst,
const VectorType &src)
const;
330 *
const Preconditioner &preconditioner;
334 *
template <
class Matrix,
class Preconditioner>
335 * InverseMatrix<Matrix, Preconditioner>::InverseMatrix(
337 *
const Preconditioner &preconditioner)
339 * , preconditioner(preconditioner)
344 *
template <
class Matrix,
class Preconditioner>
345 *
template <
typename VectorType>
347 * InverseMatrix<Matrix, Preconditioner>::vmult(VectorType &dst,
348 *
const VectorType &src)
const
350 *
SolverControl solver_control(src.size(), 1e-8 * src.l2_norm());
356 * cg.solve(*matrix, dst, src, preconditioner);
358 *
catch (std::exception &e)
360 *
Assert(
false, ExcMessage(
e.what()));
367 * The
class A template class for a simple block
diagonal preconditioner
371 *
template <
class PreconditionerA,
class PreconditionerS>
372 *
class BlockDiagonalPreconditioner :
public Subscriptor
375 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
376 *
const PreconditionerS &preconditioner_S);
378 *
void vmult(LA::MPI::BlockVector &dst,
379 *
const LA::MPI::BlockVector &src)
const;
382 *
const PreconditionerA &preconditioner_A;
383 *
const PreconditionerS &preconditioner_S;
386 *
template <
class PreconditionerA,
class PreconditionerS>
387 * BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::
388 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
389 *
const PreconditionerS &preconditioner_S)
390 * : preconditioner_A(preconditioner_A)
391 * , preconditioner_S(preconditioner_S)
395 *
template <
class PreconditionerA,
class PreconditionerS>
396 *
void BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::vmult(
397 * LA::MPI::BlockVector &dst,
398 *
const LA::MPI::BlockVector &src)
const
400 * preconditioner_A.vmult(dst.block(0), src.block(0));
401 * preconditioner_S.vmult(dst.block(1), src.block(1));
409 * <a name=
"step_55-Problemsetup"></a>
410 * <h3>Problem setup</h3>
414 * The following classes represent the right hand side and the exact
415 * solution
for the test problem.
422 *
class RightHandSide :
public Function<dim>
435 *
void RightHandSide<dim>::vector_value(
const Point<dim> &p,
438 *
const double R_x = p[0];
439 *
const double R_y = p[1];
449 * values[0] = -1.0L / 2.0L * (-2 *
std::sqrt(25.0 + 4 * pi2) + 10.0) *
457 * values[1] = 0.2 * pi * (-
std::sqrt(25.0 + 4 * pi2) + 5.0) *
475 *
class ExactSolution :
public Function<dim>
487 *
void ExactSolution<dim>::vector_value(
const Point<dim> &p,
490 *
const double R_x = p[0];
491 *
const double R_y = p[1];
504 * values[1] = (1.0L / 2.0L) * (-
std::sqrt(25.0 + 4 * pi2) + 5.0) *
516 * (-6538034.74494422 +
522 * (-10.0 + 2.0 *
std::sqrt(25.0 + 4 * pi2)) +
525 * (-8 *
std::sqrt(25.0 + 4 * pi2) + 40.0) +
527 * (-10.0 + 2.0 *
std::sqrt(25.0 + 4 * pi2));
535 * <a name=
"step_55-Themainprogram"></a>
536 * <h3>The main program</h3>
540 * The main
class is very similar to @ref step_40
"step-40", except that matrices and
541 * vectors are now block versions, and we store a std::vector<IndexSet>
542 *
for owned and relevant DoFs instead of a single
IndexSet. We have
543 * exactly two IndexSets, one
for all velocity unknowns and one
for all
548 *
class StokesProblem
551 * StokesProblem(
unsigned int velocity_degree);
557 *
void setup_system();
558 *
void assemble_system();
560 *
void refine_grid();
561 *
void output_results(
const unsigned int cycle);
563 *
const unsigned int velocity_degree;
564 *
const double viscosity;
571 * std::vector<IndexSet> owned_partitioning;
572 * std::vector<IndexSet> relevant_partitioning;
576 * LA::MPI::BlockSparseMatrix system_matrix;
577 * LA::MPI::BlockSparseMatrix preconditioner_matrix;
578 * LA::MPI::BlockVector locally_relevant_solution;
579 * LA::MPI::BlockVector system_rhs;
588 * StokesProblem<dim>::StokesProblem(
unsigned int velocity_degree)
589 * : velocity_degree(velocity_degree)
591 * , mpi_communicator(MPI_COMM_WORLD)
600 * , computing_timer(mpi_communicator,
609 * The Kovasznay flow is defined on the domain [-0.5, 1.5]^2, which we
614 *
void StokesProblem<dim>::make_grid()
623 * <a name=
"step_55-SystemSetup"></a>
624 * <h3>System Setup</h3>
628 * The construction of the block matrices and vectors is
new compared to
629 * @ref step_40
"step-40" and is different compared to
serial codes like @ref step_22
"step-22", because
630 * we need to supply the
set of rows that belong to our processor.
634 *
void StokesProblem<dim>::setup_system()
638 * dof_handler.distribute_dofs(fe);
642 * Put all dim velocities into block 0 and the pressure into block 1,
643 * then
reorder the unknowns by block. Finally count how many unknowns
647 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
648 * stokes_sub_blocks[dim] = 1;
651 *
const std::vector<types::global_dof_index> dofs_per_block =
654 *
const unsigned int n_u = dofs_per_block[0];
655 *
const unsigned int n_p = dofs_per_block[1];
657 * pcout <<
" Number of degrees of freedom: " << dof_handler.n_dofs() <<
" ("
658 * << n_u <<
'+' << n_p <<
')' << std::endl;
662 * We
split up the
IndexSet for locally owned and locally relevant DoFs
663 * into two IndexSets based on how we want to create the block matrices
667 *
const IndexSet &locally_owned_dofs = dof_handler.locally_owned_dofs();
668 * owned_partitioning = {locally_owned_dofs.
get_view(0, n_u),
669 * locally_owned_dofs.
get_view(n_u, n_u + n_p)};
671 *
const IndexSet locally_relevant_dofs =
673 * relevant_partitioning = {locally_relevant_dofs.get_view(0, n_u),
674 * locally_relevant_dofs.get_view(n_u, n_u + n_p)};
678 * Setting up the constraints
for boundary conditions and hanging nodes
679 * is identical to @ref step_40
"step-40". Even though we don
't have any hanging nodes
680 * because we only perform global refinement, it is still a good idea
681 * to put this function call in, in case adaptive refinement gets
686 * constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
688 * const FEValuesExtractors::Vector velocities(0);
689 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
690 * VectorTools::interpolate_boundary_values(dof_handler,
692 * ExactSolution<dim>(),
694 * fe.component_mask(velocities));
695 * constraints.close();
700 * Now we create the system matrix based on a BlockDynamicSparsityPattern.
701 * We know that we won't have coupling between different velocity
702 * components (because we use the laplace and not the deformation tensor)
703 * and no coupling between pressure with its test
functions, so we use
704 * a
Table to communicate
this coupling information to
709 * system_matrix.clear();
712 *
for (
unsigned int c = 0; c < dim + 1; ++c)
713 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
714 *
if (c == dim && d == dim)
716 *
else if (c == dim || d == dim || c == d)
724 * dof_handler, coupling, dsp, constraints,
false);
728 * dof_handler.locally_owned_dofs(),
730 * locally_relevant_dofs);
732 * system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
737 * The preconditioner
matrix has a different coupling (we only fill in
738 * the 1,1 block with the @ref GlossMassMatrix
"mass matrix"), otherwise
this code is identical
739 * to the construction of the system_matrix above.
743 * preconditioner_matrix.clear();
746 *
for (
unsigned int c = 0; c < dim + 1; ++c)
747 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
748 *
if (c == dim && d == dim)
756 * dof_handler, coupling, dsp, constraints,
false);
760 * dof_handler.locally_owned_dofs()),
762 * locally_relevant_dofs);
763 * preconditioner_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
768 * Finally, we construct the block vectors with the right sizes. The
769 * function call with two std::vector<IndexSet> will create a ghosted
773 * locally_relevant_solution.reinit(owned_partitioning,
774 * relevant_partitioning,
776 * system_rhs.reinit(owned_partitioning, mpi_communicator);
784 * <a name=
"step_55-Assembly"></a>
789 * This function assembles the system
matrix, the preconditioner
matrix,
790 * and the right hand side. The code is pretty standard.
794 *
void StokesProblem<dim>::assemble_system()
799 * preconditioner_matrix = 0;
802 *
const QGauss<dim> quadrature_formula(velocity_degree + 1);
805 * quadrature_formula,
809 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
810 *
const unsigned int n_q_points = quadrature_formula.size();
816 *
const RightHandSide<dim> right_hand_side;
817 * std::vector<Vector<double>> rhs_values(n_q_points,
Vector<double>(dim + 1));
819 * std::vector<Tensor<2, dim>> grad_phi_u(dofs_per_cell);
820 * std::vector<double> div_phi_u(dofs_per_cell);
821 * std::vector<double> phi_p(dofs_per_cell);
823 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
827 *
for (
const auto &cell : dof_handler.active_cell_iterators())
828 * if (cell->is_locally_owned())
830 * system_cell_matrix = 0;
831 * preconditioner_cell_matrix = 0;
834 * fe_values.reinit(cell);
835 * right_hand_side.vector_value_list(fe_values.get_quadrature_points(),
837 *
for (
unsigned int q = 0; q < n_q_points; ++q)
839 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
841 * grad_phi_u[k] = fe_values[velocities].gradient(k, q);
842 * div_phi_u[k] = fe_values[velocities].divergence(k, q);
843 * phi_p[k] = fe_values[pressure].value(k, q);
846 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
848 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
850 * system_cell_matrix(i, j) +=
852 * scalar_product(grad_phi_u[i], grad_phi_u[j]) -
853 * div_phi_u[i] * phi_p[j] - phi_p[i] * div_phi_u[j]) *
856 * preconditioner_cell_matrix(i, j) += 1.0 / viscosity *
857 * phi_p[i] * phi_p[j] *
861 *
const unsigned int component_i =
862 * fe.system_to_component_index(i).first;
863 * cell_rhs(i) += fe_values.shape_value(i, q) *
864 * rhs_values[q](component_i) * fe_values.JxW(q);
869 * cell->get_dof_indices(local_dof_indices);
870 * constraints.distribute_local_to_global(system_cell_matrix,
876 * constraints.distribute_local_to_global(preconditioner_cell_matrix,
878 * preconditioner_matrix);
891 * <a name=
"step_55-Solving"></a>
896 * This function solves the linear system with MINRES with a block
diagonal
897 * preconditioner and AMG
for the two
diagonal blocks as described in the
898 * introduction. The preconditioner applies a v cycle to the 0,0 block
899 * and a CG with the mass
matrix for the 1,1 block (the Schur complement).
903 *
void StokesProblem<dim>::solve()
907 * LA::MPI::PreconditionAMG prec_A;
909 * LA::MPI::PreconditionAMG::AdditionalData data;
911 * #ifdef USE_PETSC_LA
912 * data.symmetric_operator =
true;
914 * prec_A.initialize(system_matrix.block(0, 0), data);
917 * LA::MPI::PreconditionAMG prec_S;
919 * LA::MPI::PreconditionAMG::AdditionalData data;
921 * #ifdef USE_PETSC_LA
922 * data.symmetric_operator =
true;
924 * prec_S.initialize(preconditioner_matrix.block(1, 1), data);
929 * The InverseMatrix is used to solve
for the mass
matrix:
932 *
using mp_inverse_t = LinearSolvers::InverseMatrix<LA::MPI::SparseMatrix,
933 * LA::MPI::PreconditionAMG>;
934 *
const mp_inverse_t mp_inverse(preconditioner_matrix.block(1, 1), prec_S);
938 * This constructs the block preconditioner based on the preconditioners
939 *
for the individual blocks defined above.
942 *
const LinearSolvers::BlockDiagonalPreconditioner<LA::MPI::PreconditionAMG,
944 * preconditioner(prec_A, mp_inverse);
948 * With that, we can
finally set up the linear solver and solve the system:
952 * 1e-10 * system_rhs.l2_norm());
956 * LA::MPI::BlockVector distributed_solution(owned_partitioning,
959 * constraints.set_zero(distributed_solution);
961 * solver.solve(system_matrix,
962 * distributed_solution,
966 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
969 * constraints.distribute(distributed_solution);
973 * Like in @ref step_56
"step-56", we subtract the
mean pressure to allow error
974 * computations against our reference solution, which has a
mean value
978 * locally_relevant_solution = distributed_solution;
979 *
const double mean_pressure =
982 * locally_relevant_solution,
984 * distributed_solution.block(1).add(-mean_pressure);
985 * locally_relevant_solution.block(1) = distributed_solution.block(1);
993 * <a name=
"step_55-Therest"></a>
998 * The remainder of the code that deals with mesh refinement, output, and
999 * the main
loop is pretty standard.
1002 *
template <
int dim>
1003 *
void StokesProblem<dim>::refine_grid()
1012 *
template <
int dim>
1013 *
void StokesProblem<dim>::output_results(
const unsigned int cycle)
1023 *
const QGauss<dim> quadrature(velocity_degree + 2);
1026 * locally_relevant_solution,
1027 * ExactSolution<dim>(),
1033 *
const double error_u_l2 =
1039 * locally_relevant_solution,
1040 * ExactSolution<dim>(),
1046 *
const double error_p_l2 =
1051 * pcout <<
"error: u_0: " << error_u_l2 <<
" p_0: " << error_p_l2
1056 * std::vector<std::string> solution_names(dim,
"velocity");
1057 * solution_names.emplace_back(
"pressure");
1058 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1059 * data_component_interpretation(
1061 * data_component_interpretation.push_back(
1066 * data_out.add_data_vector(locally_relevant_solution,
1069 * data_component_interpretation);
1071 * LA::MPI::BlockVector interpolated;
1072 * interpolated.reinit(owned_partitioning, MPI_COMM_WORLD);
1075 * LA::MPI::BlockVector interpolated_relevant(owned_partitioning,
1076 * relevant_partitioning,
1078 * interpolated_relevant = interpolated;
1080 * std::vector<std::string> solution_names(dim,
"ref_u");
1081 * solution_names.emplace_back(
"ref_p");
1082 * data_out.add_data_vector(interpolated_relevant,
1085 * data_component_interpretation);
1090 *
for (
unsigned int i = 0; i < subdomain.size(); ++i)
1092 * data_out.add_data_vector(subdomain,
"subdomain");
1094 * data_out.build_patches();
1096 * data_out.write_vtu_with_pvtu_record(
1097 *
"./",
"solution", cycle, mpi_communicator, 2);
1102 *
template <
int dim>
1103 *
void StokesProblem<dim>::run()
1105 * #ifdef USE_PETSC_LA
1106 * pcout <<
"Running using PETSc." << std::endl;
1108 * pcout <<
"Running using Trilinos." << std::endl;
1110 *
const unsigned int n_cycles = 5;
1111 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1113 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
1122 * assemble_system();
1126 * output_results(cycle);
1128 * computing_timer.print_summary();
1129 * computing_timer.reset();
1131 * pcout << std::endl;
1138 *
int main(
int argc,
char *argv[])
1142 *
using namespace dealii;
1143 *
using namespace Step55;
1147 * StokesProblem<2> problem(2);
1150 *
catch (std::exception &exc)
1152 * std::cerr << std::endl
1154 * <<
"----------------------------------------------------"
1156 * std::cerr <<
"Exception on processing: " << std::endl
1157 * << exc.what() << std::endl
1158 * <<
"Aborting!" << std::endl
1159 * <<
"----------------------------------------------------"
1166 * std::cerr << std::endl
1168 * <<
"----------------------------------------------------"
1170 * std::cerr <<
"Unknown exception!" << std::endl
1171 * <<
"Aborting!" << std::endl
1172 * <<
"----------------------------------------------------"
1180<a name=
"step_55-Results"></a><h1>Results</h1>
1183As expected from the discussion above, the number of iterations is
independent
1184of the number of processors and only very slightly dependent on @f$h@f$:
1188 <th colspan=
"2">
PETSc</th>
1189 <th colspan=
"8">number of processors</th>
1291 <th colspan=
"2">Trilinos</th>
1292 <th colspan=
"8">number of processors</th>
1392While the
PETSc results show a
constant number of iterations, the iterations
1393increase when
using Trilinos. This is likely because of the different settings
1394used
for the AMG preconditioner. For performance reasons we
do not allow
1395coarsening below a couple thousand unknowns. As the coarse solver is an exact
1396solve (we are
using LU by
default), a change in number of levels will
1397influence the quality of a V-cycle. Therefore, a V-cycle is closer to an exact
1398solver
for smaller problem sizes.
1400<a name=
"step-55-extensions"></a>
1401<a name=
"step_55-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1404<a name=
"step_55-InvestigateTrilinositerations"></a><h4>Investigate Trilinos iterations</h4>
1407Play with the smoothers, smoothing steps, and other properties
for the
1408Trilinos AMG to achieve an optimal preconditioner.
1410<a name=
"step_55-SolvetheOseenprobleminsteadoftheStokessystem"></a><h4>Solve the Oseen problem instead of the Stokes system</h4>
1413This change
requires changing the outer solver to GMRES or BiCGStab, because
1416You can prescribe the exact flow solution as @f$b@f$ in the convective term @f$b
1417\cdot \nabla u@f$. This should give the same solution as the original problem,
1418if you
set the right hand side to zero.
1420<a name=
"step_55-Adaptiverefinement"></a><h4>Adaptive refinement</h4>
1423So far,
this tutorial program refines the mesh globally in each step.
1424Replacing the code in StokesProblem::refine_grid() by something like
1431 QGauss<dim - 1>(fe.degree + 1),
1433 locally_relevant_solution,
1434 estimated_error_per_cell,
1435 fe.component_mask(velocities));
1440makes it simple to explore adaptive mesh refinement.
1443<a name="step_55-PlainProg"></a>
1444<h1> The plain program</h1>
1445@include "step-55.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
IndexSet get_view(const size_type begin, const size_type end) const
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
types::subdomain_id locally_owned_subdomain() const override
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
@ component_is_part_of_vector
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
constexpr T fixed_power(const T t)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(& functions)(const void *v1, const void *v2)
static constexpr double PI
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation