237 * #
if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
238 * !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
239 *
using namespace dealii::LinearAlgebraPETSc;
240 * # define USE_PETSC_LA
241 * #elif defined(DEAL_II_WITH_TRILINOS)
242 *
using namespace dealii::LinearAlgebraTrilinos;
244 * # error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
248 * #include <deal.II/lac/vector.h>
249 * #include <deal.II/lac/full_matrix.h>
250 * #include <deal.II/lac/solver_cg.h>
251 * #include <deal.II/lac/solver_gmres.h>
252 * #include <deal.II/lac/solver_minres.h>
253 * #include <deal.II/lac/affine_constraints.h>
254 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
256 * #include <deal.II/lac/petsc_sparse_matrix.h>
257 * #include <deal.II/lac/petsc_vector.h>
258 * #include <deal.II/lac/petsc_solver.h>
259 * #include <deal.II/lac/petsc_precondition.h>
261 * #include <deal.II/grid/grid_generator.h>
262 * #include <deal.II/grid/manifold_lib.h>
263 * #include <deal.II/grid/grid_tools.h>
264 * #include <deal.II/dofs/dof_handler.h>
265 * #include <deal.II/dofs/dof_renumbering.h>
266 * #include <deal.II/dofs/dof_tools.h>
267 * #include <deal.II/fe/fe_values.h>
268 * #include <deal.II/fe/fe_q.h>
269 * #include <deal.II/fe/fe_system.h>
270 * #include <deal.II/numerics/vector_tools.h>
271 * #include <deal.II/numerics/data_out.h>
272 * #include <deal.II/numerics/error_estimator.h>
274 * #include <deal.II/base/utilities.h>
275 * #include <deal.II/base/conditional_ostream.h>
276 * #include <deal.II/base/index_set.h>
277 * #include <deal.II/lac/sparsity_tools.h>
278 * #include <deal.II/distributed/tria.h>
279 * #include <deal.II/distributed/grid_refinement.h>
283 * #include <iostream>
292 * <a name=
"Linearsolversandpreconditioners"></a>
293 * <h3>Linear solvers and preconditioners</h3>
297 * We need a few helper classes to represent our solver strategy
298 * described in the introduction.
304 *
namespace LinearSolvers
308 * This
class exposes the action of applying the inverse of a giving
309 *
matrix via the function InverseMatrix::vmult(). Internally, the
310 * inverse is not formed explicitly. Instead, a linear solver with CG
311 * is performed. This
class extends the InverseMatrix class in @ref step_22 "step-22"
312 * with an option to specify a preconditioner, and to allow
for different
313 * vector
types in the vmult function.
316 *
template <
class Matrix,
class Preconditioner>
320 * InverseMatrix(
const Matrix &m,
const Preconditioner &preconditioner);
322 *
template <
typename VectorType>
323 *
void vmult(VectorType &dst,
const VectorType &src)
const;
327 *
const Preconditioner & preconditioner;
331 *
template <
class Matrix,
class Preconditioner>
332 * InverseMatrix<Matrix, Preconditioner>::InverseMatrix(
334 *
const Preconditioner &preconditioner)
336 * , preconditioner(preconditioner)
341 *
template <
class Matrix,
class Preconditioner>
342 *
template <
typename VectorType>
344 * InverseMatrix<Matrix, Preconditioner>::vmult(VectorType & dst,
345 *
const VectorType &src)
const
353 * cg.solve(*
matrix, dst, src, preconditioner);
355 *
catch (std::exception &
e)
364 * The
class A template class for a simple block
diagonal preconditioner
368 *
template <
class PreconditionerA,
class PreconditionerS>
369 *
class BlockDiagonalPreconditioner :
public Subscriptor
372 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
373 *
const PreconditionerS &preconditioner_S);
379 *
const PreconditionerA &preconditioner_A;
380 *
const PreconditionerS &preconditioner_S;
383 *
template <
class PreconditionerA,
class PreconditionerS>
384 * BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::
385 * BlockDiagonalPreconditioner(
const PreconditionerA &preconditioner_A,
386 *
const PreconditionerS &preconditioner_S)
387 * : preconditioner_A(preconditioner_A)
388 * , preconditioner_S(preconditioner_S)
392 *
template <
class PreconditionerA,
class PreconditionerS>
393 *
void BlockDiagonalPreconditioner<PreconditionerA, PreconditionerS>::vmult(
397 * preconditioner_A.vmult(dst.block(0), src.block(0));
398 * preconditioner_S.vmult(dst.block(1), src.block(1));
406 * <a name=
"Problemsetup"></a>
407 * <h3>Problem setup</h3>
411 * The following classes represent the right hand side and the exact
412 * solution
for the test problem.
419 *
class RightHandSide :
public Function<dim>
432 *
void RightHandSide<dim>::vector_value(
const Point<dim> &p,
435 *
const double R_x = p[0];
436 *
const double R_y = p[1];
439 *
const double pi2 = pi * pi;
441 * -1.0L / 2.0L * (-2 *
sqrt(25.0 + 4 * pi2) + 10.0) *
442 *
exp(R_x * (-2 *
sqrt(25.0 + 4 * pi2) + 10.0)) -
443 * 0.4 * pi2 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
cos(2 * R_y * pi) +
444 * 0.1 *
pow(-
sqrt(25.0 + 4 * pi2) + 5.0, 2) *
445 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
cos(2 * R_y * pi);
446 *
values[1] = 0.2 * pi * (-
sqrt(25.0 + 4 * pi2) + 5.0) *
447 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
sin(2 * R_y * pi) -
448 * 0.05 *
pow(-
sqrt(25.0 + 4 * pi2) + 5.0, 3) *
449 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
sin(2 * R_y * pi) /
456 *
class ExactSolution :
public Function<dim>
468 *
void ExactSolution<dim>::vector_value(
const Point<dim> &p,
471 *
const double R_x = p[0];
472 *
const double R_y = p[1];
475 *
const double pi2 = pi * pi;
477 * -
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
cos(2 * R_y * pi) + 1;
478 *
values[1] = (1.0L / 2.0L) * (-
sqrt(25.0 + 4 * pi2) + 5.0) *
479 *
exp(R_x * (-
sqrt(25.0 + 4 * pi2) + 5.0)) *
sin(2 * R_y * pi) /
482 * -1.0L / 2.0L *
exp(R_x * (-2 *
sqrt(25.0 + 4 * pi2) + 10.0)) -
484 * (-6538034.74494422 +
485 * 0.0134758939981709 *
exp(4 *
sqrt(25.0 + 4 * pi2))) /
486 * (-80.0 *
exp(3 *
sqrt(25.0 + 4 * pi2)) +
487 * 16.0 *
sqrt(25.0 + 4 * pi2) *
exp(3 *
sqrt(25.0 + 4 * pi2))) -
488 * 1634508.68623606 *
exp(-3.0 *
sqrt(25.0 + 4 * pi2)) /
489 * (-10.0 + 2.0 *
sqrt(25.0 + 4 * pi2)) +
490 * (-0.00673794699908547 *
exp(
sqrt(25.0 + 4 * pi2)) +
491 * 3269017.37247211 *
exp(-3 *
sqrt(25.0 + 4 * pi2))) /
492 * (-8 *
sqrt(25.0 + 4 * pi2) + 40.0) +
493 * 0.00336897349954273 *
exp(1.0 *
sqrt(25.0 + 4 * pi2)) /
494 * (-10.0 + 2.0 *
sqrt(25.0 + 4 * pi2));
502 * <a name=
"Themainprogram"></a>
503 * <h3>The main program</h3>
507 * The main
class is very similar to @ref step_40 "step-40", except that matrices and
508 * vectors are now block versions, and we store a std::vector<IndexSet>
509 *
for owned and relevant DoFs instead of a single
IndexSet. We have
510 * exactly two IndexSets,
one for all velocity unknowns and
one for all
515 *
class StokesProblem
518 * StokesProblem(
unsigned int velocity_degree);
524 *
void setup_system();
525 *
void assemble_system();
527 *
void refine_grid();
528 *
void output_results(
const unsigned int cycle)
const;
530 *
unsigned int velocity_degree;
538 * std::vector<IndexSet> owned_partitioning;
539 * std::vector<IndexSet> relevant_partitioning;
555 * StokesProblem<dim>::StokesProblem(
unsigned int velocity_degree)
556 * : velocity_degree(velocity_degree)
558 * , mpi_communicator(MPI_COMM_WORLD)
567 * , computing_timer(mpi_communicator,
576 * The Kovasnay flow is defined on the domain [-0.5, 1.5]^2, which we
581 *
void StokesProblem<dim>::make_grid()
590 * <a name=
"SystemSetup"></a>
591 * <h3>System Setup</h3>
595 * The construction of the block matrices and vectors is
new compared to
596 * @ref step_40
"step-40" and is different compared to serial codes like @ref step_22
"step-22", because
597 * we need to supply the
set of rows that belong to our processor.
601 *
void StokesProblem<dim>::setup_system()
605 * dof_handler.distribute_dofs(fe);
609 * Put all dim velocities into block 0 and the pressure into block 1,
610 * then reorder the unknowns by block. Finally count how many unknowns
614 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
615 * stokes_sub_blocks[dim] = 1;
618 *
const std::vector<types::global_dof_index> dofs_per_block =
621 *
const unsigned int n_u = dofs_per_block[0];
622 *
const unsigned int n_p = dofs_per_block[1];
624 * pcout <<
" Number of degrees of freedom: " << dof_handler.n_dofs() <<
" ("
625 * << n_u <<
'+' << n_p <<
')' << std::endl;
629 * We
split up the
IndexSet for locally owned and locally relevant DoFs
630 * into two IndexSets based on how we want to create the block matrices
634 * owned_partitioning.resize(2);
635 * owned_partitioning[0] = dof_handler.locally_owned_dofs().
get_view(0, n_u);
636 * owned_partitioning[1] =
637 * dof_handler.locally_owned_dofs().
get_view(n_u, n_u + n_p);
641 * relevant_partitioning.resize(2);
642 * relevant_partitioning[0] = locally_relevant_dofs.
get_view(0, n_u);
643 * relevant_partitioning[1] = locally_relevant_dofs.
get_view(n_u, n_u + n_p);
647 * Setting up the constraints
for boundary conditions and hanging nodes
648 * is identical to @ref step_40
"step-40". Even though we don
't have any hanging nodes
649 * because we only perform global refinement, it is still a good idea
650 * to put this function call in, in case adaptive refinement gets
655 * constraints.reinit(locally_relevant_dofs);
657 * FEValuesExtractors::Vector velocities(0);
658 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
659 * VectorTools::interpolate_boundary_values(dof_handler,
661 * ExactSolution<dim>(),
663 * fe.component_mask(velocities));
664 * constraints.close();
669 * Now we create the system matrix based on a BlockDynamicSparsityPattern.
670 * We know that we won't have coupling between different velocity
671 * components (because we use the laplace and not the deformation tensor)
672 * and no coupling between pressure with its test
functions, so we use
673 * a
Table to communicate
this coupling information to
678 * system_matrix.clear();
681 *
for (
unsigned int c = 0; c < dim + 1; ++c)
682 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
683 *
if (c == dim &&
d == dim)
685 *
else if (c == dim ||
d == dim || c ==
d)
693 * dof_handler, coupling, dsp, constraints,
false);
697 * dof_handler.locally_owned_dofs(),
699 * locally_relevant_dofs);
701 * system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
706 * The preconditioner
matrix has a different coupling (we only fill in
707 * the 1,1 block with the mass
matrix), otherwise
this code is identical
708 * to the construction of the system_matrix above.
712 * preconditioner_matrix.clear();
715 *
for (
unsigned int c = 0; c < dim + 1; ++c)
716 *
for (
unsigned int d = 0;
d < dim + 1; ++
d)
717 *
if (c == dim &&
d == dim)
725 * dof_handler, coupling, dsp, constraints,
false);
729 * dof_handler.locally_owned_dofs()),
731 * locally_relevant_dofs);
732 * preconditioner_matrix.reinit(owned_partitioning,
735 * owned_partitioning,
744 * Finally, we construct the block vectors with the right sizes. The
745 * function
call with two std::vector<IndexSet> will create a ghosted
749 * locally_relevant_solution.reinit(owned_partitioning,
750 * relevant_partitioning,
752 * system_rhs.reinit(owned_partitioning, mpi_communicator);
760 * <a name=
"Assembly"></a>
765 * This function assembles the system
matrix, the preconditioner
matrix,
766 * and the right hand side. The code is pretty standard.
770 *
void StokesProblem<dim>::assemble_system()
775 * preconditioner_matrix = 0;
778 *
const QGauss<dim> quadrature_formula(velocity_degree + 1);
781 * quadrature_formula,
785 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
786 *
const unsigned int n_q_points = quadrature_formula.size();
792 *
const RightHandSide<dim> right_hand_side;
793 * std::vector<Vector<double>> rhs_values(n_q_points,
Vector<double>(dim + 1));
795 * std::vector<Tensor<2, dim>> grad_phi_u(dofs_per_cell);
796 * std::vector<double> div_phi_u(dofs_per_cell);
797 * std::vector<double> phi_p(dofs_per_cell);
799 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
803 *
for (
const auto &cell : dof_handler.active_cell_iterators())
804 *
if (cell->is_locally_owned())
810 * fe_values.reinit(cell);
811 * right_hand_side.vector_value_list(fe_values.get_quadrature_points(),
813 *
for (
unsigned int q = 0; q < n_q_points; ++q)
815 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
817 * grad_phi_u[k] = fe_values[velocities].gradient(k, q);
818 * div_phi_u[k] = fe_values[velocities].divergence(k, q);
819 * phi_p[k] = fe_values[pressure].value(k, q);
822 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
824 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
829 * div_phi_u[i] * phi_p[j] - phi_p[i] * div_phi_u[j]) *
832 * cell_matrix2(i, j) += 1.0 / viscosity * phi_p[i] *
833 * phi_p[j] * fe_values.JxW(q);
836 *
const unsigned int component_i =
837 * fe.system_to_component_index(i).first;
838 * cell_rhs(i) += fe_values.shape_value(i, q) *
839 * rhs_values[q](component_i) * fe_values.JxW(q);
844 * cell->get_dof_indices(local_dof_indices);
845 * constraints.distribute_local_to_global(
cell_matrix,
851 * constraints.distribute_local_to_global(cell_matrix2,
853 * preconditioner_matrix);
866 * <a name=
"Solving"></a>
871 * This function solves the linear system with MINRES with a block
diagonal
872 * preconditioner and AMG
for the two
diagonal blocks as described in the
873 * introduction. The preconditioner applies a v cycle to the 0,0 block
874 * and a CG with the mass
matrix for the 1,1 block (the Schur complement).
878 *
void StokesProblem<dim>::solve()
884 * LA::MPI::PreconditionAMG::AdditionalData data;
886 * #ifdef USE_PETSC_LA
887 * data.symmetric_operator =
true;
889 * prec_A.
initialize(system_matrix.block(0, 0), data);
894 * LA::MPI::PreconditionAMG::AdditionalData data;
896 * #ifdef USE_PETSC_LA
897 * data.symmetric_operator =
true;
899 * prec_S.
initialize(preconditioner_matrix.block(1, 1), data);
904 * The InverseMatrix is used to solve
for the mass
matrix:
909 *
const mp_inverse_t mp_inverse(preconditioner_matrix.block(1, 1), prec_S);
913 * This constructs the block preconditioner based on the preconditioners
914 *
for the individual blocks defined above.
919 * preconditioner(prec_A, mp_inverse);
923 * With that, we can
finally set up the linear solver and solve the system:
927 * 1
e-10 * system_rhs.l2_norm());
934 * constraints.set_zero(distributed_solution);
936 * solver.solve(system_matrix,
937 * distributed_solution,
941 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
944 * constraints.distribute(distributed_solution);
948 * Like in @ref step_56
"step-56", we subtract the
mean pressure to allow error
949 * computations against our reference solution, which has a
mean value
953 * locally_relevant_solution = distributed_solution;
954 *
const double mean_pressure =
957 * locally_relevant_solution,
959 * distributed_solution.block(1).add(-mean_pressure);
960 * locally_relevant_solution.block(1) = distributed_solution.block(1);
968 * <a name=
"Therest"></a>
973 * The remainder of the code that deals with mesh refinement, output, and
974 * the main
loop is pretty standard.
978 *
void StokesProblem<dim>::refine_grid()
988 *
void StokesProblem<dim>::output_results(
const unsigned int cycle)
const
999 * locally_relevant_solution,
1000 * ExactSolution<dim>(),
1006 *
const double error_u_l2 =
1012 * locally_relevant_solution,
1013 * ExactSolution<dim>(),
1019 *
const double error_p_l2 =
1024 * pcout <<
"error: u_0: " << error_u_l2 <<
" p_0: " << error_p_l2
1029 * std::vector<std::string> solution_names(dim,
"velocity");
1030 * solution_names.emplace_back(
"pressure");
1031 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1032 * data_component_interpretation(
1034 * data_component_interpretation.push_back(
1042 * data_component_interpretation);
1045 * interpolated.
reinit(owned_partitioning, MPI_COMM_WORLD);
1049 * relevant_partitioning,
1051 * interpolated_relevant = interpolated;
1053 * std::vector<std::string> solution_names(dim,
"ref_u");
1054 * solution_names.emplace_back(
"ref_p");
1058 * data_component_interpretation);
1063 *
for (
unsigned int i = 0; i < subdomain.size(); ++i)
1070 *
"./",
"solution", cycle, mpi_communicator, 2);
1075 *
template <
int dim>
1078 * #ifdef USE_PETSC_LA
1079 * pcout <<
"Running using PETSc." << std::endl;
1081 * pcout <<
"Running using Trilinos." << std::endl;
1083 *
const unsigned int n_cycles = 5;
1084 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1086 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
1095 * assemble_system();
1101 * output_results(cycle);
1104 * computing_timer.print_summary();
1105 * computing_timer.reset();
1107 * pcout << std::endl;
1114 *
int main(
int argc,
char *argv[])
1118 *
using namespace dealii;
1119 *
using namespace Step55;
1123 * StokesProblem<2> problem(2);
1126 *
catch (std::exception &exc)
1128 * std::cerr << std::endl
1130 * <<
"----------------------------------------------------"
1132 * std::cerr <<
"Exception on processing: " << std::endl
1133 * << exc.what() << std::endl
1134 * <<
"Aborting!" << std::endl
1135 * <<
"----------------------------------------------------"
1142 * std::cerr << std::endl
1144 * <<
"----------------------------------------------------"
1146 * std::cerr <<
"Unknown exception!" << std::endl
1147 * <<
"Aborting!" << std::endl
1148 * <<
"----------------------------------------------------"
1156<a name=
"Results"></a><h1>Results</h1>
1159As expected from the discussion above, the number of iterations is independent
1160of the number of processors and only very slightly dependent on @f$h@f$:
1164 <th colspan=
"2">PETSc</th>
1165 <th colspan=
"8">number of processors</th>
1267 <th colspan=
"2">Trilinos</th>
1268 <th colspan=
"8">number of processors</th>
1368While the PETSc results show a constant number of iterations, the iterations
1369increase when
using Trilinos. This is likely because of the different settings
1370used
for the AMG preconditioner. For performance reasons we
do not allow
1371coarsening below a couple thousand unknowns. As the coarse solver is an exact
1372solve (we are
using LU by
default), a change in number of levels will
1373influence the quality of a
V-cycle. Therefore, a
V-cycle is closer to an exact
1374solver
for smaller problem sizes.
1376<a name=
"extensions"></a>
1377<a name=
"Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1380<a name=
"InvestigateTrilinositerations"></a><h4>Investigate Trilinos iterations</h4>
1383Play with the smoothers, smoothing steps, and other properties
for the
1384Trilinos AMG to achieve an optimal preconditioner.
1386<a name=
"SolvetheOseenprobleminsteadoftheStokessystem"></a><h4>Solve the Oseen problem instead of the Stokes system</h4>
1389This change
requires changing the outer solver to GMRES or BiCGStab, because
1392You can prescribe the exact flow solution as @f$b@f$ in the convective term @f$b
1393\cdot \nabla u@f$. This should give the same solution as the original problem,
1394if you
set the right hand side to
zero.
1396<a name=
"Adaptiverefinement"></a><h4>Adaptive refinement</h4>
1399So far,
this tutorial program refines the mesh globally in each step.
1400Replacing the code in StokesProblem::refine_grid() by something like
1407 QGauss<dim - 1>(fe.degree + 1),
1409 locally_relevant_solution,
1410 estimated_error_per_cell,
1411 fe.component_mask(velocities));
1416makes it simple to explore adaptive mesh refinement.
1419<a name="PlainProg"></a>
1420<h1> The plain program</h1>
1421@include "step-55.cc"
void attach_dof_handler(const DoFHandlerType &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
virtual void build_patches(const unsigned int n_subdivisions=0)
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
IndexSet get_view(const size_type begin, const size_type end) const
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
__global__ void set(Number *val, const Number s, const size_type N)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
#define Assert(cond, exc)
static ::ExceptionBase & ExcMessage(std::string arg1)
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void reinit(const unsigned int n_blocks, const size_type block_size=0, const bool omit_zeroing_entries=false)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
@ component_is_part_of_vector
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
static const types::blas_int zero
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
static const types::blas_int one
BlockSparseMatrix< double > BlockSparseMatrix
SparseMatrix< double > SparseMatrix
BlockVector< double > BlockVector
PETScWrappers::PreconditionBoomerAMG PreconditionAMG
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_active_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation