237 * #
if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
238 * !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
239 *
using namespace dealii::LinearAlgebraPETSc;
240 * # define USE_PETSC_LA
241 * #elif defined(DEAL_II_WITH_TRILINOS)
242 *
using namespace dealii::LinearAlgebraTrilinos;
244 * # error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
248 * #include <deal.II/lac/vector.h>
249 * #include <deal.II/lac/full_matrix.h>
250 * #include <deal.II/lac/solver_cg.h>
251 * #include <deal.II/lac/solver_gmres.h>
252 * #include <deal.II/lac/solver_minres.h>
253 * #include <deal.II/lac/affine_constraints.h>
254 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
256 * #include <deal.II/lac/petsc_sparse_matrix.h>
257 * #include <deal.II/lac/petsc_vector.h>
258 * #include <deal.II/lac/petsc_solver.h>
259 * #include <deal.II/lac/petsc_precondition.h>
261 * #include <deal.II/grid/grid_generator.h>
262 * #include <deal.II/grid/manifold_lib.h>
263 * #include <deal.II/grid/grid_tools.h>
264 * #include <deal.II/dofs/dof_handler.h>
265 * #include <deal.II/dofs/dof_renumbering.h>
266 * #include <deal.II/dofs/dof_tools.h>
267 * #include <deal.II/fe/fe_values.h>
268 * #include <deal.II/fe/fe_q.h>
269 * #include <deal.II/fe/fe_system.h>
270 * #include <deal.II/numerics/vector_tools.h>
271 * #include <deal.II/numerics/data_out.h>
272 * #include <deal.II/numerics/error_estimator.h>
274 * #include <deal.II/base/utilities.h>
275 * #include <deal.II/base/conditional_ostream.h>
276 * #include <deal.II/base/index_set.h>
277 * #include <deal.II/lac/sparsity_tools.h>
278 * #include <deal.II/distributed/
tria.h>
279 * #include <deal.II/distributed/grid_refinement.h>
283 * #include <iostream>
292 * <a name=
"Linearsolversandpreconditioners"></a>
293 * <h3>Linear solvers and preconditioners</h3>
297 * We need a few helper classes to represent our solver strategy
298 * described in the introduction.
304 *
namespace LinearSolvers
308 * This
class exposes the action of applying the inverse of a giving
309 *
matrix via the function InverseMatrix::vmult(). Internally, the
310 * inverse is not formed explicitly. Instead, a linear solver with CG
311 * is performed. This
class extends the InverseMatrix class in @ref step_22
"step-22"
312 * with an option to specify a preconditioner, and to allow
for different
313 * vector
types in the vmult function.
316 *
template <
class Matrix,
class Preconditioner>
320 * InverseMatrix(
const Matrix &m,
const Preconditioner &preconditioner);
322 *
template <
typename VectorType>
323 *
void vmult(VectorType &dst,
const VectorType &src)
const;
327 *
const Preconditioner & preconditioner;
331 *
template <
class Matrix,
class Preconditioner>
332 * InverseMatrix<Matrix, Preconditioner>::InverseMatrix(
334 *
const Preconditioner &preconditioner)
336 * , preconditioner(preconditioner)
341 *
template <
class Matrix,
class Preconditioner>
342 *
template <
typename VectorType>
344 * InverseMatrix<Matrix, Preconditioner>::vmult(VectorType & dst,
345 *
const VectorType &src)
const
347 *
SolverControl solver_control(src.size(), 1e-8 * src.l2_norm());
353 * cg.solve(*matrix, dst, src, preconditioner);
355 *
catch (std::exception &e)
357 *
Assert(
false, ExcMessage(
e.what()));
364 * The
class A template class for a simple block
diagonal preconditioner
368 *
template <
class PreconditionerA,
class PreconditionerS>
369 *
class BlockDiagonalPreconditioner :
public Subscriptor
372 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
373 *
const PreconditionerS &preconditioner_S);
375 *
void vmult(LA::MPI::BlockVector & dst,
376 *
const LA::MPI::BlockVector &src)
const;
379 *
const PreconditionerA &preconditioner_A;
380 *
const PreconditionerS &preconditioner_S;
383 *
template <
class PreconditionerA,
class PreconditionerS>
384 * BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::
385 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
386 *
const PreconditionerS &preconditioner_S)
387 * : preconditioner_A(preconditioner_A)
388 * , preconditioner_S(preconditioner_S)
392 *
template <
class PreconditionerA,
class PreconditionerS>
393 *
void BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::vmult(
394 * LA::MPI::BlockVector & dst,
395 *
const LA::MPI::BlockVector &src)
const
397 * preconditioner_A.vmult(dst.block(0), src.block(0));
398 * preconditioner_S.vmult(dst.block(1), src.block(1));
406 * <a name=
"Problemsetup"></a>
407 * <h3>Problem setup</h3>
411 * The following classes represent the right hand side and the exact
412 * solution
for the test problem.
419 *
class RightHandSide :
public Function<dim>
432 *
void RightHandSide<dim>::vector_value(
const Point<dim> &p,
435 *
const double R_x = p[0];
436 *
const double R_y = p[1];
459 *
class ExactSolution :
public Function<dim>
471 *
void ExactSolution<dim>::vector_value(
const Point<dim> &p,
474 *
const double R_x = p[0];
475 *
const double R_y = p[1];
489 * (-6538034.74494422 +
495 * (-10.0 + 2.0 *
std::sqrt(25.0 + 4 * pi2)) +
498 * (-8 *
std::sqrt(25.0 + 4 * pi2) + 40.0) +
500 * (-10.0 + 2.0 *
std::sqrt(25.0 + 4 * pi2));
508 * <a name=
"Themainprogram"></a>
509 * <h3>The main program</h3>
513 * The main
class is very similar to @ref step_40
"step-40", except that matrices and
514 * vectors are now block versions, and we store a std::vector<IndexSet>
515 *
for owned and relevant DoFs instead of a single
IndexSet. We have
516 * exactly two IndexSets, one
for all velocity unknowns and one
for all
521 *
class StokesProblem
524 * StokesProblem(
unsigned int velocity_degree);
530 *
void setup_system();
531 *
void assemble_system();
533 *
void refine_grid();
534 *
void output_results(
const unsigned int cycle)
const;
536 *
unsigned int velocity_degree;
544 * std::vector<IndexSet> owned_partitioning;
545 * std::vector<IndexSet> relevant_partitioning;
549 * LA::MPI::BlockSparseMatrix system_matrix;
550 * LA::MPI::BlockSparseMatrix preconditioner_matrix;
551 * LA::MPI::BlockVector locally_relevant_solution;
552 * LA::MPI::BlockVector system_rhs;
561 * StokesProblem<dim>::StokesProblem(
unsigned int velocity_degree)
562 * : velocity_degree(velocity_degree)
564 * , mpi_communicator(MPI_COMM_WORLD)
573 * , computing_timer(mpi_communicator,
582 * The Kovasznay flow is defined on the domain [-0.5, 1.5]^2, which we
587 *
void StokesProblem<dim>::make_grid()
596 * <a name=
"SystemSetup"></a>
597 * <h3>System Setup</h3>
601 * The construction of the block matrices and vectors is
new compared to
602 * @ref step_40
"step-40" and is different compared to
serial codes like @ref step_22
"step-22", because
603 * we need to supply the
set of rows that belong to our processor.
607 *
void StokesProblem<dim>::setup_system()
611 * dof_handler.distribute_dofs(fe);
615 * Put all dim velocities into block 0 and the pressure into block 1,
616 * then reorder the unknowns by block. Finally count how many unknowns
620 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
621 * stokes_sub_blocks[dim] = 1;
624 *
const std::vector<types::global_dof_index> dofs_per_block =
627 *
const unsigned int n_u = dofs_per_block[0];
628 *
const unsigned int n_p = dofs_per_block[1];
630 * pcout <<
" Number of degrees of freedom: " << dof_handler.n_dofs() <<
" ("
631 * << n_u <<
'+' << n_p <<
')' << std::endl;
635 * We
split up the
IndexSet for locally owned and locally relevant DoFs
636 * into two IndexSets based on how we want to create the block matrices
640 * owned_partitioning.resize(2);
641 * owned_partitioning[0] = dof_handler.locally_owned_dofs().
get_view(0, n_u);
642 * owned_partitioning[1] =
643 * dof_handler.locally_owned_dofs().
get_view(n_u, n_u + n_p);
645 *
const IndexSet locally_relevant_dofs =
647 * relevant_partitioning.resize(2);
648 * relevant_partitioning[0] = locally_relevant_dofs.get_view(0, n_u);
649 * relevant_partitioning[1] = locally_relevant_dofs.get_view(n_u, n_u + n_p);
653 * Setting up the constraints
for boundary conditions and hanging nodes
654 * is identical to @ref step_40
"step-40". Even though we don
't have any hanging nodes
655 * because we only perform global refinement, it is still a good idea
656 * to put this function call in, in case adaptive refinement gets
661 * constraints.reinit(locally_relevant_dofs);
663 * const FEValuesExtractors::Vector velocities(0);
664 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
665 * VectorTools::interpolate_boundary_values(dof_handler,
667 * ExactSolution<dim>(),
669 * fe.component_mask(velocities));
670 * constraints.close();
675 * Now we create the system matrix based on a BlockDynamicSparsityPattern.
676 * We know that we won't have coupling between different velocity
677 * components (because we use the laplace and not the deformation tensor)
678 * and no coupling between pressure with its test
functions, so we use
679 * a
Table to communicate
this coupling information to
684 * system_matrix.clear();
687 *
for (
unsigned int c = 0; c < dim + 1; ++c)
688 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
689 *
if (c == dim && d == dim)
691 *
else if (c == dim || d == dim || c == d)
699 * dof_handler, coupling, dsp, constraints,
false);
703 * dof_handler.locally_owned_dofs(),
705 * locally_relevant_dofs);
707 * system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
712 * The preconditioner
matrix has a different coupling (we only fill in
713 * the 1,1 block with the @ref GlossMassMatrix
"mass matrix"), otherwise
this code is identical
714 * to the construction of the system_matrix above.
718 * preconditioner_matrix.clear();
721 *
for (
unsigned int c = 0; c < dim + 1; ++c)
722 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
723 *
if (c == dim && d == dim)
731 * dof_handler, coupling, dsp, constraints,
false);
735 * dof_handler.locally_owned_dofs()),
737 * locally_relevant_dofs);
738 * preconditioner_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
743 * Finally, we construct the block vectors with the right sizes. The
744 * function
call with two std::vector<IndexSet> will create a ghosted
748 * locally_relevant_solution.reinit(owned_partitioning,
749 * relevant_partitioning,
751 * system_rhs.reinit(owned_partitioning, mpi_communicator);
759 * <a name=
"Assembly"></a>
764 * This function assembles the system
matrix, the preconditioner
matrix,
765 * and the right hand side. The code is pretty standard.
769 *
void StokesProblem<dim>::assemble_system()
774 * preconditioner_matrix = 0;
777 *
const QGauss<dim> quadrature_formula(velocity_degree + 1);
780 * quadrature_formula,
784 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
785 *
const unsigned int n_q_points = quadrature_formula.size();
791 *
const RightHandSide<dim> right_hand_side;
792 * std::vector<Vector<double>> rhs_values(n_q_points,
Vector<double>(dim + 1));
794 * std::vector<Tensor<2, dim>> grad_phi_u(dofs_per_cell);
795 * std::vector<double> div_phi_u(dofs_per_cell);
796 * std::vector<double> phi_p(dofs_per_cell);
798 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
802 *
for (
const auto &cell : dof_handler.active_cell_iterators())
803 * if (cell->is_locally_owned())
809 * fe_values.reinit(cell);
810 * right_hand_side.vector_value_list(fe_values.get_quadrature_points(),
812 *
for (
unsigned int q = 0; q < n_q_points; ++q)
814 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
816 * grad_phi_u[k] = fe_values[velocities].gradient(k, q);
817 * div_phi_u[k] = fe_values[velocities].divergence(k, q);
818 * phi_p[k] = fe_values[pressure].value(k, q);
821 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
823 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
827 * scalar_product(grad_phi_u[i], grad_phi_u[j]) -
828 * div_phi_u[i] * phi_p[j] - phi_p[i] * div_phi_u[j]) *
831 * cell_matrix2(i, j) += 1.0 / viscosity * phi_p[i] *
832 * phi_p[j] * fe_values.JxW(q);
835 *
const unsigned int component_i =
836 * fe.system_to_component_index(i).first;
837 * cell_rhs(i) += fe_values.shape_value(i, q) *
838 * rhs_values[q](component_i) * fe_values.JxW(q);
843 * cell->get_dof_indices(local_dof_indices);
844 * constraints.distribute_local_to_global(cell_matrix,
850 * constraints.distribute_local_to_global(cell_matrix2,
852 * preconditioner_matrix);
865 * <a name=
"Solving"></a>
870 * This function solves the linear system with MINRES with a block
diagonal
871 * preconditioner and AMG
for the two
diagonal blocks as described in the
872 * introduction. The preconditioner applies a v cycle to the 0,0 block
873 * and a CG with the mass
matrix for the 1,1 block (the Schur complement).
877 *
void StokesProblem<dim>::solve()
881 * LA::MPI::PreconditionAMG prec_A;
883 * LA::MPI::PreconditionAMG::AdditionalData data;
885 * #ifdef USE_PETSC_LA
886 * data.symmetric_operator =
true;
888 * prec_A.initialize(system_matrix.block(0, 0), data);
891 * LA::MPI::PreconditionAMG prec_S;
893 * LA::MPI::PreconditionAMG::AdditionalData data;
895 * #ifdef USE_PETSC_LA
896 * data.symmetric_operator =
true;
898 * prec_S.initialize(preconditioner_matrix.block(1, 1), data);
903 * The InverseMatrix is used to solve
for the mass
matrix:
906 *
using mp_inverse_t = LinearSolvers::InverseMatrix<LA::MPI::SparseMatrix,
907 * LA::MPI::PreconditionAMG>;
908 *
const mp_inverse_t mp_inverse(preconditioner_matrix.block(1, 1), prec_S);
912 * This constructs the block preconditioner based on the preconditioners
913 *
for the individual blocks defined above.
916 *
const LinearSolvers::BlockDiagonalPreconditioner<LA::MPI::PreconditionAMG,
918 * preconditioner(prec_A, mp_inverse);
922 * With that, we can
finally set up the linear solver and solve the system:
926 * 1e-10 * system_rhs.l2_norm());
930 * LA::MPI::BlockVector distributed_solution(owned_partitioning,
933 * constraints.set_zero(distributed_solution);
935 * solver.solve(system_matrix,
936 * distributed_solution,
940 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
943 * constraints.distribute(distributed_solution);
947 * Like in @ref step_56
"step-56", we subtract the
mean pressure to allow error
948 * computations against our reference solution, which has a
mean value
952 * locally_relevant_solution = distributed_solution;
953 *
const double mean_pressure =
956 * locally_relevant_solution,
958 * distributed_solution.block(1).add(-mean_pressure);
959 * locally_relevant_solution.block(1) = distributed_solution.block(1);
967 * <a name=
"Therest"></a>
972 * The remainder of the code that deals with mesh refinement, output, and
973 * the main
loop is pretty standard.
977 *
void StokesProblem<dim>::refine_grid()
987 *
void StokesProblem<dim>::output_results(
const unsigned int cycle)
const
998 * locally_relevant_solution,
999 * ExactSolution<dim>(),
1005 *
const double error_u_l2 =
1011 * locally_relevant_solution,
1012 * ExactSolution<dim>(),
1018 *
const double error_p_l2 =
1023 * pcout <<
"error: u_0: " << error_u_l2 <<
" p_0: " << error_p_l2
1028 * std::vector<std::string> solution_names(dim,
"velocity");
1029 * solution_names.emplace_back(
"pressure");
1030 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1031 * data_component_interpretation(
1033 * data_component_interpretation.push_back(
1038 * data_out.add_data_vector(locally_relevant_solution,
1041 * data_component_interpretation);
1043 * LA::MPI::BlockVector interpolated;
1044 * interpolated.reinit(owned_partitioning, MPI_COMM_WORLD);
1047 * LA::MPI::BlockVector interpolated_relevant(owned_partitioning,
1048 * relevant_partitioning,
1050 * interpolated_relevant = interpolated;
1052 * std::vector<std::string> solution_names(dim,
"ref_u");
1053 * solution_names.emplace_back(
"ref_p");
1054 * data_out.add_data_vector(interpolated_relevant,
1057 * data_component_interpretation);
1062 *
for (
unsigned int i = 0; i < subdomain.size(); ++i)
1064 * data_out.add_data_vector(subdomain,
"subdomain");
1066 * data_out.build_patches();
1068 * data_out.write_vtu_with_pvtu_record(
1069 *
"./",
"solution", cycle, mpi_communicator, 2);
1074 *
template <
int dim>
1075 *
void StokesProblem<dim>::run()
1077 * #ifdef USE_PETSC_LA
1078 * pcout <<
"Running using PETSc." << std::endl;
1080 * pcout <<
"Running using Trilinos." << std::endl;
1082 *
const unsigned int n_cycles = 5;
1083 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1085 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
1094 * assemble_system();
1100 * output_results(cycle);
1103 * computing_timer.print_summary();
1104 * computing_timer.reset();
1106 * pcout << std::endl;
1113 *
int main(
int argc,
char *argv[])
1117 *
using namespace dealii;
1118 *
using namespace Step55;
1122 * StokesProblem<2> problem(2);
1125 *
catch (std::exception &exc)
1127 * std::cerr << std::endl
1129 * <<
"----------------------------------------------------"
1131 * std::cerr <<
"Exception on processing: " << std::endl
1132 * << exc.what() << std::endl
1133 * <<
"Aborting!" << std::endl
1134 * <<
"----------------------------------------------------"
1141 * std::cerr << std::endl
1143 * <<
"----------------------------------------------------"
1145 * std::cerr <<
"Unknown exception!" << std::endl
1146 * <<
"Aborting!" << std::endl
1147 * <<
"----------------------------------------------------"
1155<a name=
"Results"></a><h1>Results</h1>
1158As expected from the discussion above, the number of iterations is independent
1159of the number of processors and only very slightly dependent on @f$h@f$:
1163 <th colspan=
"2">PETSc</th>
1164 <th colspan=
"8">number of processors</th>
1266 <th colspan=
"2">Trilinos</th>
1267 <th colspan=
"8">number of processors</th>
1367While the PETSc results show a
constant number of iterations, the iterations
1368increase when
using Trilinos. This is likely because of the different
settings
1369used
for the AMG preconditioner. For performance reasons we
do not allow
1370coarsening below a couple thousand unknowns. As the coarse solver is an exact
1371solve (we are
using LU by
default), a change in number of levels will
1372influence the quality of a V-cycle. Therefore, a V-cycle is closer to an exact
1373solver
for smaller problem sizes.
1375<a name=
"extensions"></a>
1376<a name=
"Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1379<a name=
"InvestigateTrilinositerations"></a><h4>Investigate Trilinos iterations</h4>
1382Play with the smoothers, smoothing steps, and other properties
for the
1383Trilinos AMG to achieve an optimal preconditioner.
1385<a name=
"SolvetheOseenprobleminsteadoftheStokessystem"></a><h4>Solve the Oseen problem instead of the Stokes system</h4>
1388This change
requires changing the outer solver to GMRES or BiCGStab, because
1391You can prescribe the exact flow solution as @f$b@f$ in the convective term @f$b
1392\cdot \nabla u@f$. This should give the same solution as the original problem,
1393if you
set the right hand side to zero.
1395<a name=
"Adaptiverefinement"></a><h4>Adaptive refinement</h4>
1398So far,
this tutorial program refines the mesh globally in each step.
1399Replacing the code in StokesProblem::refine_grid() by something like
1406 QGauss<dim - 1>(fe.degree + 1),
1408 locally_relevant_solution,
1409 estimated_error_per_cell,
1410 fe.component_mask(velocities));
1415makes it simple to explore adaptive mesh refinement.
1418<a name="PlainProg"></a>
1419<h1> The plain program</h1>
1420@include "step-55.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
IndexSet get_view(const size_type begin, const size_type end) const
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
@ component_is_part_of_vector
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
const TriangulationDescription::Settings settings
const ::Triangulation< dim, spacedim > & tria