129 * The included deal.II header files are the same as in the other example
133 * #include <deal.II/base/function.h>
134 * #include <deal.II/base/logstream.h>
135 * #include <deal.II/base/quadrature_lib.h>
137 * #include <deal.II/dofs/dof_accessor.h>
138 * #include <deal.II/dofs/dof_handler.h>
139 * #include <deal.II/dofs/dof_tools.h>
141 * #include <deal.II/fe/fe_q.h>
142 * #include <deal.II/fe/fe_values.h>
144 * #include <deal.II/grid/grid_generator.h>
145 * #include <deal.II/grid/tria.h>
146 * #include <deal.II/grid/tria_accessor.h>
147 * #include <deal.II/grid/tria_iterator.h>
149 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
150 * #include <deal.II/lac/full_matrix.h>
151 * #include <deal.II/lac/precondition.h>
152 * #include <deal.II/lac/solver_cg.h>
153 * #include <deal.II/lac/sparse_matrix.h>
154 * #include <deal.II/lac/vector.h>
156 * #include <deal.II/numerics/data_out.h>
157 * #include <deal.II/numerics/matrix_tools.h>
158 * #include <deal.II/numerics/vector_tools.h>
161 * In addition to the deal.II header files, we include the preCICE API in order
162 * to obtain access to preCICE specific functionality
165 * #include <precice/precice.hpp>
168 * #include <iostream>
174 * Configuration parameters
178 * We set up a simple hard-coded
struct containing all names we need for
179 * external coupling. The
struct includes the name of the preCICE
180 * configuration file as well as the name of the simulation participant, the
181 * name of the coupling mesh and the name of the exchanged
data. The last three
182 * names you also find in the preCICE configuration file. For real application
183 * cases, these names are better handled by a parameter file.
186 *
struct CouplingParamters
188 *
const std::string config_file =
"precice-config.xml";
189 *
const std::string participant_name =
"laplace-solver";
190 *
const std::string mesh_name =
"dealii-mesh";
191 *
const std::string read_data_name =
"boundary-data";
201 * The Adapter
class handles all functionalities to couple the deal.II solver
202 * code to other solvers with preCICE, i.e.,
data structures are set up and all
203 * relevant information is passed to preCICE.
209 *
template <
int dim,
typename ParameterClass>
213 * Adapter(
const ParameterClass & parameters,
218 * std::map<types::global_dof_index, double> &boundary_data,
222 * read_data(
double relative_read_time,
223 * std::map<types::global_dof_index, double> &boundary_data);
226 *
advance(
const double computed_timestep_length);
230 *
public precCICE solver interface
233 * precice::Participant precice;
237 * Boundary ID of the deal.II
triangulation, associated with the coupling
238 * interface. The variable is defined in the constructor of
this class and
239 * intentionally
public so that it can be used during the grid generation and
240 * system assembly. The only thing, one needs to make sure is that
this ID is
244 *
const unsigned int dealii_boundary_interface_id;
249 * preCICE related initializations
250 * These variables are specified in and read from a parameter file, which is
251 * in
this simple tutorial program the CouplingParameter
struct already
252 * introduced in the beginning.
255 *
const std::string mesh_name;
256 *
const std::string read_data_name;
260 * The node IDs are filled by preCICE during the initialization and associated to
261 * the spherical vertices we pass to preCICE
264 *
int n_interface_nodes;
268 * DoF
IndexSet, containing relevant coupling DoF indices at the coupling
276 * Data containers which are passed to preCICE in an appropriate preCICE
280 * std::vector<int> interface_nodes_ids;
281 * std::vector<double> read_data_buffer;
285 * The
MPI rank and total number of
MPI ranks is required by preCICE when the
286 * Participant is created. Since
this tutorial runs only in
serial mode we
287 * define the variables manually in
this class instead of using the regular
297 * map
for Dirichlet boundary conditions
301 * format_precice_to_dealii(
302 * std::map<types::global_dof_index, double> &boundary_data)
const;
309 * In the constructor of the Adapter
class, we set up the preCICE
310 * Participant. We need to tell preCICE our name as participant of the
311 * simulation and the name of the preCICE configuration file. Both have already
312 * been specified in the CouplingParameter
class above. Thus, we pass the class
313 * directly to the constructor and read out all relevant information. As a
315 * which is associated with the coupling interface.
318 *
template <
int dim,
typename ParameterClass>
319 * Adapter<dim, ParameterClass>::Adapter(
320 *
const ParameterClass & parameters,
322 * : precice(parameters.participant_name,
323 * parameters.config_file,
326 * , dealii_boundary_interface_id(deal_boundary_interface_id)
327 * , mesh_name(parameters.mesh_name)
328 * , read_data_name(parameters.read_data_name)
335 * This function initializes preCICE (
e.g. establishes communication channels
336 * and allocates memory) and passes all relevant
data to preCICE. For surface
337 * coupling, relevant
data is in particular the location of the
data points at
338 * the associated interface(s). The `boundary_data` is an empty map, which is
339 * filled by preCICE, i.e., information of the other participant. Throughout
340 * the system assembly, the map can directly be used in order to
apply the
341 * Dirichlet boundary conditions in the linear system.
344 *
template <
int dim,
typename ParameterClass>
346 * Adapter<dim, ParameterClass>::initialize(
348 * std::map<types::global_dof_index, double> &boundary_data,
351 *
Assert(dim > 1, ExcNotImplemented());
356 * Afterwards, we
extract the number of
interface nodes and the coupling DoFs
357 * at the coupling
interface from our deal.II solver via
361 * std::set<types::boundary_id> couplingBoundary;
362 * couplingBoundary.insert(dealii_boundary_interface_id);
366 * The `
ComponentMask()` might be important in
case we deal with vector valued
367 * problems, because vector valued problems have a DoF
for each component.
376 * The coupling DoFs are used to set up the `boundary_data` map. At the
end,
377 * we associate here each DoF with a respective boundary
value.
380 *
for (
const auto i : coupling_dofs)
381 * boundary_data[i] = 0.0;
385 * Since we deal with a
scalar problem, the number of DoFs at the particular
386 *
interface corresponds to the number of interface nodes.
389 * n_interface_nodes = coupling_dofs.n_elements();
391 * std::cout <<
"\t Number of coupling nodes: " << n_interface_nodes
396 * Now, we need to tell preCICE the coordinates of the
interface nodes. Hence,
397 * we set up a std::vector to pass the node positions to preCICE. Each node is
398 * specified only once.
401 * std::vector<double> interface_nodes_positions;
402 * interface_nodes_positions.reserve(dim * n_interface_nodes);
406 * Set up the appropriate
size of the
data container needed
for data
408 * is read/written per
interface node.
411 * read_data_buffer.resize(n_interface_nodes);
414 * The IDs are filled by preCICE during the initializations.
417 * interface_nodes_ids.resize(n_interface_nodes);
424 * std::map<types::global_dof_index, Point<dim>> support_points;
429 * `support_points` contains now the coordinates of all DoFs. In the next
430 * step, the relevant coordinates are extracted
using the
IndexSet with the
431 * extracted coupling_dofs.
434 *
for (
const auto element : coupling_dofs)
435 * for (
int i = 0; i < dim; ++i)
436 * interface_nodes_positions.push_back(support_points[element][i]);
440 * Now we have all information to define the coupling mesh and pass the
441 * information to preCICE.
444 * precice.setMeshVertices(mesh_name,
445 * interface_nodes_positions,
446 * interface_nodes_ids);
450 * Then, we initialize preCICE internally calling the API function
454 * precice.initialize();
457 *
template <
int dim,
typename ParameterClass>
459 * Adapter<dim, ParameterClass>::read_data(
460 *
double relative_read_time,
461 * std::map<types::global_dof_index, double> &boundary_data)
465 * here, we obtain
data, i.e. the boundary condition, from another
466 * participant. We have already vertex IDs and just need to convert our
467 * obtained
data to the deal.II compatible
'boundary map' , which is done in
468 * the format_deal_to_precice function.
471 * precice.readData(mesh_name,
473 * interface_nodes_ids,
474 * relative_read_time,
479 * After receiving the coupling
data in `read_data_buffer`, we convert it to
480 * the std::map `boundary_data` which is later needed in order to
apply
481 * Dirichlet boundary conditions
484 * format_precice_to_dealii(boundary_data);
490 * The function `
advance()` is called in the main time
loop after the
491 * computation in each time step. Here, preCICE exchanges the coupling
data
492 * internally and computes mappings as well as acceleration methods.
495 *
template <
int dim,
typename ParameterClass>
497 * Adapter<dim, ParameterClass>::advance(
const double computed_timestep_length)
501 * We specify the computed time-step length and pass it to preCICE.
504 * precice.advance(computed_timestep_length);
511 * This function takes the std::vector obtained by preCICE in `read_data_buffer` and
512 * inserts the
values to the right position in the boundary map used throughout
513 * our deal.II solver
for Dirichlet boundary conditions. The function is only
514 * used internally in the Adapter
class and not called in the solver itself. The
515 * order, in which preCICE sorts the
data in the `read_data_buffer` vector is exactly
516 * the same as the order of the initially passed vertices coordinates.
519 *
template <
int dim,
typename ParameterClass>
521 * Adapter<dim, ParameterClass>::format_precice_to_dealii(
522 * std::map<types::global_dof_index, double> &boundary_data)
const
526 * We already stored the coupling DoF indices in the `boundary_data` map, so
527 * that we can simply iterate over all keys in the map.
530 *
auto dof_component = boundary_data.begin();
531 *
for (
int i = 0; i < n_interface_nodes; ++i)
534 * boundary_data[dof_component->first] = read_data_buffer[i];
542 * The solver
class is essentially the same as in @ref step_4
"step-4". We only extend the
543 * stationary problem to a time-dependent problem and introduced the coupling.
544 * Comments are added at any
point, where the workflow differs from @ref step_4
"step-4".
548 *
class CoupledLaplaceProblem
551 * CoupledLaplaceProblem();
566 * output_results()
const;
582 * We allocate all structures required
for the preCICE coupling: The map
583 * is used to
apply Dirichlet boundary conditions and filled in the Adapter
584 *
class with
data from the other participant. The CouplingParameters hold the
585 * preCICE configuration as described above. The
interface boundary ID is the
586 * ID associated to our coupling
interface and needs to be specified, when we
587 * set up the Adapter
class object, because we pass it directly to the
588 * Constructor of
this class.
591 * std::map<types::global_dof_index, double> boundary_data;
592 * CouplingParamters parameters;
594 * Adapter<dim, CouplingParamters> adapter;
598 * The time-step
size delta_t is the actual time-step
size used
for all
599 * computations. The preCICE time-step
size is obtained by preCICE in order to
600 * ensure a synchronization at all coupling time steps. The solver time
601 * step-
size is the desired time-step
size of our individual solver. In more
602 * sophisticated computations, it might be determined adaptively. The
603 * `time_step` counter is just used
for the time-step number.
607 *
double precice_delta_t;
608 *
const double solver_delta_t = 0.1;
609 *
unsigned int time_step = 0;
615 *
class RightHandSide :
public Function<dim>
619 *
value(
const Point<dim> &p,
const unsigned int component = 0)
const override;
625 *
class BoundaryValues :
public Function<dim>
629 *
value(
const Point<dim> &p,
const unsigned int component = 0)
const override;
634 * RightHandSide<dim>::value(
const Point<dim> &p,
635 *
const unsigned int )
const
637 *
double return_value = 0.0;
638 *
for (
unsigned int i = 0; i < dim; ++i)
639 * return_value += 4.0 *
std::pow(p(i), 4.0);
641 *
return return_value;
647 * BoundaryValues<dim>::value(
const Point<dim> &p,
648 *
const unsigned int )
const
656 * CoupledLaplaceProblem<dim>::CoupledLaplaceProblem()
659 * , interface_boundary_id(1)
660 * , adapter(parameters, interface_boundary_id)
666 * CoupledLaplaceProblem<dim>::make_grid()
671 *
for (
const auto &cell :
triangulation.active_cell_iterators())
672 * for (const auto &face : cell->face_iterators())
676 * We choose the boundary in
positive x direction for the
677 * interface coupling.
680 * if (face->at_boundary() && (face->center()[0] == 1))
681 * face->set_boundary_id(interface_boundary_id);
684 * std::cout <<
" Number of active cells: " <<
triangulation.n_active_cells()
693 * CoupledLaplaceProblem<dim>::setup_system()
695 * dof_handler.distribute_dofs(fe);
697 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
702 * sparsity_pattern.copy_from(dsp);
704 * system_matrix.reinit(sparsity_pattern);
706 * solution.reinit(dof_handler.n_dofs());
707 * old_solution.reinit(dof_handler.n_dofs());
708 * system_rhs.reinit(dof_handler.n_dofs());
715 * CoupledLaplaceProblem<dim>::assemble_system()
719 * Reset global structures
726 * Update old solution
values
729 * old_solution = solution;
733 * RightHandSide<dim> right_hand_side;
736 * quadrature_formula,
740 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
744 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
747 * The solution
values from previous time steps are stored
for each quadrature
751 * std::vector<double> local_values_old_solution(fe_values.n_quadrature_points);
753 *
for (
const auto &cell : dof_handler.active_cell_iterators())
760 * Get the local
values from the `fe_values
' object
763 * fe_values.get_function_values(old_solution, local_values_old_solution);
767 * The system matrix contains additionally a mass matrix due to the time
768 * discretization. The RHS has contributions from the old solution values.
771 * for (const unsigned int q_index : fe_values.quadrature_point_indices())
772 * for (const unsigned int i : fe_values.dof_indices())
774 * for (const unsigned int j : fe_values.dof_indices())
775 * cell_matrix(i, j) +=
776 * ((fe_values.shape_value(i, q_index) * // phi_i(x_q)
777 * fe_values.shape_value(j, q_index)) + // phi_j(x_q)
778 * (delta_t * // delta t
779 * fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
780 * fe_values.shape_grad(j, q_index))) * // grad phi_j(x_q)
781 * fe_values.JxW(q_index); // dx
783 * const auto x_q = fe_values.quadrature_point(q_index);
784 * const auto &local_value = local_values_old_solution[q_index];
785 * cell_rhs(i) += ((delta_t * // delta t
786 * fe_values.shape_value(i, q_index) * // phi_i(x_q)
787 * right_hand_side.value(x_q)) + // f(x_q)
788 * fe_values.shape_value(i, q_index) *
789 * local_value) * // phi_i(x_q)*val
790 * fe_values.JxW(q_index); // dx
795 * Copy local to global
798 * cell->get_dof_indices(local_dof_indices);
799 * for (const unsigned int i : fe_values.dof_indices())
801 * for (const unsigned int j : fe_values.dof_indices())
802 * system_matrix.add(local_dof_indices[i],
803 * local_dof_indices[j],
804 * cell_matrix(i, j));
806 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
812 * At first, we apply the Dirichlet boundary condition from @ref step_4 "step-4", as
816 * std::map<types::global_dof_index, double> boundary_values;
817 * VectorTools::interpolate_boundary_values(dof_handler,
819 * BoundaryValues<dim>(),
821 * MatrixTools::apply_boundary_values(boundary_values,
829 * Afterwards, we apply the coupling boundary condition. The `boundary_data`
830 * has already been filled by preCICE.
833 * MatrixTools::apply_boundary_values(boundary_data,
844 * CoupledLaplaceProblem<dim>::solve()
846 * SolverControl solver_control(1000, 1e-12);
847 * SolverCG<Vector<double>> solver(solver_control);
848 * solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
850 * std::cout << " " << solver_control.last_step()
851 * << " CG iterations needed to obtain convergence." << std::endl;
858 * CoupledLaplaceProblem<dim>::output_results() const
860 * DataOut<dim> data_out;
862 * data_out.attach_dof_handler(dof_handler);
863 * data_out.add_data_vector(solution, "solution");
865 * data_out.build_patches(mapping);
867 * std::ofstream output("solution-" + std::to_string(time_step) + ".vtk");
868 * data_out.write_vtk(output);
875 * CoupledLaplaceProblem<dim>::run()
877 * std::cout << "Solving problem in " << dim << " space dimensions."
885 * After we set up the system, we initialize preCICE and the adapter using the
886 * functionalities of the Adapter.
889 * adapter.initialize(dof_handler, boundary_data, mapping);
893 * preCICE steers the coupled simulation: `isCouplingOngoing` is
894 * used to synchronize the end of the simulation with the coupling partner
897 * while (adapter.precice.isCouplingOngoing())
901 * The time step number is solely used to generate unique output files
907 * preCICE returns the maximum admissible time-step size,
908 * which needs to be compared to our desired solver time-step size.
911 * precice_delta_t = adapter.precice.getMaxTimeStepSize();
912 * delta_t = std::min(precice_delta_t, solver_delta_t);
915 * Next we read data. Since we use a fully backward Euler method, we want
916 * the data to be associated to the end of the current time-step (delta_t)
917 * Time-interpolation methods in preCICE allow to get readData at any
918 * point in time, if the coupling scheme allows it
921 * adapter.read_data(delta_t, boundary_data);
925 * In the time loop, we assemble the coupled system and solve it as
934 * After we solved the system, we advance the coupling to the next time
935 * level. In a bi-directional coupled simulation, we would pass our
936 * calculated data to preCICE
939 * adapter.advance(delta_t);
943 * Write an output file if the time step is completed. In case of an
944 * implicit coupling, where individual time steps are computed more than
945 * once, the function `isTimeWindowCompleted` prevents unnecessary result
946 * writing. For this simple tutorial configuration (explicit coupling),
947 * the function returns always `true`.
950 * if (adapter.precice.isTimeWindowComplete())
960 * CoupledLaplaceProblem<2> laplace_problem;
961 * laplace_problem.run();
968<a name="ann-fancy_boundary_condition.cc"></a>
969<h1>Annotated version of fancy_boundary_condition.cc</h1>
975 * /* -----------------------------------------------------------------------------
977 * * SPDX-License-Identifier: LGPL-2.1-or-later
978 * * Copyright (C) 2020 by David Schneider
979 * * Copyright (C) 2020 by Benjamin Uekermann
981 * * This file is part of the deal.II code gallery.
983 * * -----------------------------------------------------------------------------
988 * This program does not use any deal.II functionality and depends only on
989 * preCICE and the standard libraries.
992 * #include <precice/precice.hpp>
994 * #include <iostream>
1000 * The program computes a time-varying parabolic boundary condition, which is
1001 * passed to preCICE and serves as Dirichlet boundary condition for the other
1002 * coupling participant.
1006 * Function to generate boundary values in each time step
1010 * define_boundary_values(std::vector<double> &boundary_data,
1011 * const double time,
1012 * const double end_time)
1016 * Scale the current time value
1019 * const double relative_time = time / end_time;
1022 * Define the amplitude. Values run from -0.5 to 0.5
1025 * const double amplitude = (relative_time - 0.5);
1028 * Specify the actual data we want to pass to the other participant. Here, we
1029 * choose a parabola with boundary values 2 in order to enforce continuity
1030 * to adjacent boundaries.
1033 * const double n_elements = boundary_data.size();
1034 * const double right_zero = boundary_data.size() - 1;
1035 * const double left_zero = 0;
1036 * const double offset = 2;
1037 * for (uint i = 0; i < n_elements; ++i)
1038 * boundary_data[i] =
1039 * -amplitude * ((i - left_zero) * (i - right_zero)) + offset;
1046 * std::cout << "Boundary participant: starting... \n";
1053 * const std::string configFileName("precice-config.xml");
1054 * const std::string solverName("boundary-participant");
1055 * const std::string meshName("boundary-mesh");
1056 * const std::string dataWriteName("boundary-data");
1060 * Adjust to MPI rank and size for parallel computation
1063 * const int commRank = 0;
1064 * const int commSize = 1;
1066 * precice::Participant precice(solverName, configFileName, commRank, commSize);
1068 * const int dimensions = precice.getMeshDimensions(meshName);
1069 * const int numberOfVertices = 6;
1073 * Set up data structures
1076 * std::vector<double> writeData(numberOfVertices);
1077 * std::vector<int> vertexIDs(numberOfVertices);
1078 * std::vector<double> vertices(numberOfVertices * dimensions);
1082 * Define a boundary mesh
1085 * std::cout << "Boundary participant: defining boundary mesh \n";
1086 * const double length = 2;
1087 * const double xCoord = 1;
1088 * const double deltaY = length / (numberOfVertices - 1);
1089 * for (int i = 0; i < numberOfVertices; ++i)
1090 * for (int j = 0; j < dimensions; ++j)
1092 * const unsigned int index = dimensions * i + j;
1095 * The x-coordinate is always 1, i.e., the boundary is parallel to the
1096 * y-axis. The y-coordinate is descending from 1 to -1.
1100 * vertices[index] = xCoord;
1102 * vertices[index] = 1 - deltaY * i;
1107 * Pass the vertices to preCICE
1110 * precice.setMeshVertices(meshName, vertices, vertexIDs);
1114 * Variables for the time
1117 * const double end_time = 1;
1122 * Not used in the configuration by default
1125 * if (precice.requiresInitialData())
1127 * std::cout << "Boundary participant: writing initial data \n";
1128 * define_boundary_values(writeData, time, end_time);
1129 * precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1134 * initialize the Participant
1137 * precice.initialize();
1144 * while (precice.isCouplingOngoing())
1146 * double dt = precice.getMaxTimeStepSize();
1151 * Generate new boundary data
1154 * define_boundary_values(writeData, time, end_time);
1156 * std::cout << "Boundary participant: writing coupling data \n";
1157 * precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1159 * std::cout << "Boundary participant: advancing in time\n";
1160 * precice.advance(dt);
1163 * std::cout << "Boundary participant: closing...\n";
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< index_type > data
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
void advance(std::tuple< I1, I2 > &t, const unsigned int n)