130 * The included deal.II header files are the same as in the other example
134 * #include <deal.II/base/function.h>
135 * #include <deal.II/base/logstream.h>
136 * #include <deal.II/base/quadrature_lib.h>
138 * #include <deal.II/dofs/dof_accessor.h>
139 * #include <deal.II/dofs/dof_handler.h>
140 * #include <deal.II/dofs/dof_tools.h>
142 * #include <deal.II/fe/fe_q.h>
143 * #include <deal.II/fe/fe_values.h>
145 * #include <deal.II/grid/grid_generator.h>
146 * #include <deal.II/grid/tria.h>
147 * #include <deal.II/grid/tria_accessor.h>
148 * #include <deal.II/grid/tria_iterator.h>
150 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
151 * #include <deal.II/lac/full_matrix.h>
152 * #include <deal.II/lac/precondition.h>
153 * #include <deal.II/lac/solver_cg.h>
154 * #include <deal.II/lac/sparse_matrix.h>
155 * #include <deal.II/lac/vector.h>
157 * #include <deal.II/numerics/data_out.h>
158 * #include <deal.II/numerics/matrix_tools.h>
159 * #include <deal.II/numerics/vector_tools.h>
162 * In addition to the deal.II header files, we include the preCICE API in order
163 * to obtain access to preCICE specific functionality
166 * #include <precice/precice.hpp>
169 * #include <iostream>
175 * Configuration parameters
179 * We
set up a simple hard-coded
struct containing all names we need for
180 * external coupling. The
struct includes the name of the preCICE
181 * configuration file as well as the name of the simulation participant, the
182 * name of the coupling mesh and the name of the exchanged data. The last three
183 * names you also find in the preCICE configuration file. For real application
184 * cases, these names are better handled by a parameter file.
187 *
struct CouplingParamters
189 *
const std::string config_file =
"precice-config.xml";
190 *
const std::string participant_name =
"laplace-solver";
191 *
const std::string mesh_name =
"dealii-mesh";
192 *
const std::string read_data_name =
"boundary-data";
202 * The Adapter
class handles all functionalities to couple the deal.II solver
203 * code to other solvers with preCICE, i.e., data structures are
set up and all
204 * relevant information is passed to preCICE.
210 *
template <
int dim,
typename ParameterClass>
214 * Adapter(
const ParameterClass & parameters,
219 * std::map<types::global_dof_index, double> &boundary_data,
223 * read_data(
double relative_read_time,
224 * std::map<types::global_dof_index, double> &boundary_data);
227 *
advance(
const double computed_timestep_length);
231 *
public precCICE solver interface
234 * precice::Participant precice;
238 * Boundary ID of the deal.II
triangulation, associated with the coupling
239 * interface. The variable is defined in the constructor of
this class and
240 * intentionally
public so that it can be used during the grid generation and
241 * system assembly. The only thing, one needs to make sure is that
this ID is
245 *
const unsigned int dealii_boundary_interface_id;
250 * preCICE related initializations
251 * These variables are specified in and read from a parameter file, which is
252 * in
this simple tutorial program the CouplingParameter
struct already
253 * introduced in the beginning.
256 *
const std::string mesh_name;
257 *
const std::string read_data_name;
261 * The node IDs are filled by preCICE during the initialization and associated to
262 * the spherical
vertices we pass to preCICE
265 *
int n_interface_nodes;
269 * DoF
IndexSet, containing relevant coupling DoF indices at the coupling
277 * Data containers which are passed to preCICE in an appropriate preCICE
281 * std::vector<int> interface_nodes_ids;
282 * std::vector<double> read_data_buffer;
286 * The
MPI rank and total number of
MPI ranks is required by preCICE when the
287 * Participant is created. Since
this tutorial runs only in
serial mode we
288 * define the variables manually in
this class instead of using the regular
298 * map
for Dirichlet boundary conditions
302 * format_precice_to_dealii(
303 * std::map<types::global_dof_index, double> &boundary_data)
const;
310 * In the constructor of the Adapter
class, we
set up the preCICE
311 * Participant. We need to tell preCICE our name as participant of the
312 * simulation and the name of the preCICE configuration file. Both have already
313 * been specified in the CouplingParameter
class above. Thus, we pass the class
314 * directly to the constructor and read out all relevant information. As a
316 * which is associated with the coupling interface.
319 *
template <
int dim,
typename ParameterClass>
320 * Adapter<dim, ParameterClass>::Adapter(
321 *
const ParameterClass & parameters,
323 * : precice(parameters.participant_name,
324 * parameters.config_file,
327 * , dealii_boundary_interface_id(deal_boundary_interface_id)
328 * , mesh_name(parameters.mesh_name)
329 * , read_data_name(parameters.read_data_name)
336 * This function initializes preCICE (
e.g. establishes communication channels
337 * and allocates memory) and passes all relevant data to preCICE. For surface
338 * coupling, relevant data is in particular the location of the data points at
339 * the associated interface(s). The `boundary_data` is an empty map, which is
340 * filled by preCICE, i.e., information of the other participant. Throughout
341 * the system assembly, the map can directly be used in order to apply the
342 * Dirichlet boundary conditions in the linear system.
345 *
template <
int dim,
typename ParameterClass>
347 * Adapter<dim, ParameterClass>::initialize(
349 * std::map<types::global_dof_index, double> &boundary_data,
352 *
Assert(dim > 1, ExcNotImplemented());
357 * Afterwards, we
extract the number of
interface nodes and the coupling DoFs
358 * at the coupling
interface from our deal.II solver via
362 * std::set<types::boundary_id> couplingBoundary;
363 * couplingBoundary.insert(dealii_boundary_interface_id);
367 * The `
ComponentMask()` might be important in
case we deal with vector valued
368 * problems, because vector valued problems have a DoF
for each component.
377 * The coupling DoFs are used to
set up the `boundary_data` map. At the
end,
378 * we associate here each DoF with a respective boundary
value.
381 *
for (
const auto i : coupling_dofs)
382 * boundary_data[i] = 0.0;
386 * Since we deal with a
scalar problem, the number of DoFs at the particular
387 *
interface corresponds to the number of interface nodes.
390 * n_interface_nodes = coupling_dofs.n_elements();
392 * std::cout <<
"\t Number of coupling nodes: " << n_interface_nodes
397 * Now, we need to tell preCICE the coordinates of the
interface nodes. Hence,
398 * we
set up a std::vector to pass the node positions to preCICE. Each node is
399 * specified only once.
402 * std::vector<double> interface_nodes_positions;
403 * interface_nodes_positions.reserve(dim * n_interface_nodes);
407 * Set up the appropriate size of the data container needed
for data
409 * is read/written per
interface node.
412 * read_data_buffer.resize(n_interface_nodes);
415 * The IDs are filled by preCICE during the initializations.
418 * interface_nodes_ids.resize(n_interface_nodes);
425 * std::map<types::global_dof_index, Point<dim>> support_points;
430 * `support_points` contains now the coordinates of all DoFs. In the next
431 * step, the relevant coordinates are extracted
using the
IndexSet with the
432 * extracted coupling_dofs.
435 *
for (
const auto element : coupling_dofs)
436 * for (
int i = 0; i < dim; ++i)
437 * interface_nodes_positions.push_back(support_points[element][i]);
441 * Now we have all information to define the coupling mesh and pass the
442 * information to preCICE.
445 * precice.setMeshVertices(mesh_name,
446 * interface_nodes_positions,
447 * interface_nodes_ids);
451 * Then, we initialize preCICE internally calling the API function
455 * precice.initialize();
458 *
template <
int dim,
typename ParameterClass>
460 * Adapter<dim, ParameterClass>::read_data(
461 *
double relative_read_time,
462 * std::map<types::global_dof_index, double> &boundary_data)
466 * here, we obtain data, i.e. the boundary condition, from another
467 * participant. We have already vertex IDs and just need to convert our
468 * obtained data to the deal.II compatible
'boundary map' , which is done in
469 * the format_deal_to_precice function.
472 * precice.readData(mesh_name,
474 * interface_nodes_ids,
475 * relative_read_time,
480 * After receiving the coupling data in `read_data_buffer`, we convert it to
481 * the std::map `boundary_data` which is later needed in order to apply
482 * Dirichlet boundary conditions
485 * format_precice_to_dealii(boundary_data);
491 * The function `
advance()` is called in the main time
loop after the
492 * computation in each time step. Here, preCICE exchanges the coupling data
493 * internally and computes mappings as well as acceleration methods.
496 *
template <
int dim,
typename ParameterClass>
498 * Adapter<dim, ParameterClass>::advance(
const double computed_timestep_length)
502 * We specify the computed time-step length and pass it to preCICE.
505 * precice.advance(computed_timestep_length);
512 * This function takes the std::vector obtained by preCICE in `read_data_buffer` and
513 * inserts the values to the right position in the boundary map used throughout
514 * our deal.II solver
for Dirichlet boundary conditions. The function is only
515 * used internally in the Adapter
class and not called in the solver itself. The
516 * order, in which preCICE sorts the data in the `read_data_buffer` vector is exactly
517 * the same as the order of the initially passed
vertices coordinates.
520 *
template <
int dim,
typename ParameterClass>
522 * Adapter<dim, ParameterClass>::format_precice_to_dealii(
523 * std::map<types::global_dof_index, double> &boundary_data)
const
527 * We already stored the coupling DoF indices in the `boundary_data` map, so
528 * that we can simply iterate over all keys in the map.
531 *
auto dof_component = boundary_data.begin();
532 *
for (
int i = 0; i < n_interface_nodes; ++i)
535 * boundary_data[dof_component->first] = read_data_buffer[i];
543 * The solver
class is essentially the same as in @ref step_4
"step-4". We only extend the
544 * stationary problem to a time-dependent problem and introduced the coupling.
545 * Comments are added at any
point, where the workflow differs from @ref step_4
"step-4".
549 *
class CoupledLaplaceProblem
552 * CoupledLaplaceProblem();
567 * output_results()
const;
583 * We allocate all structures required
for the preCICE coupling: The map
584 * is used to apply Dirichlet boundary conditions and filled in the Adapter
585 *
class with data from the other participant. The CouplingParameters hold the
586 * preCICE configuration as described above. The
interface boundary ID is the
587 * ID associated to our coupling
interface and needs to be specified, when we
588 *
set up the Adapter
class object, because we pass it directly to the
589 * Constructor of
this class.
592 * std::map<types::global_dof_index, double> boundary_data;
593 * CouplingParamters parameters;
595 * Adapter<dim, CouplingParamters> adapter;
599 * The time-step size delta_t is the actual time-step size used
for all
600 * computations. The preCICE time-step size is obtained by preCICE in order to
601 * ensure a synchronization at all coupling time steps. The solver time
602 * step-size is the desired time-step size of our individual solver. In more
603 * sophisticated computations, it might be determined adaptively. The
604 * `time_step` counter is just used
for the time-step number.
608 *
double precice_delta_t;
609 *
const double solver_delta_t = 0.1;
610 *
unsigned int time_step = 0;
616 *
class RightHandSide :
public Function<dim>
620 *
value(
const Point<dim> &p,
const unsigned int component = 0)
const override;
626 *
class BoundaryValues :
public Function<dim>
630 *
value(
const Point<dim> &p,
const unsigned int component = 0)
const override;
635 * RightHandSide<dim>::value(
const Point<dim> &p,
636 *
const unsigned int )
const
638 *
double return_value = 0.0;
639 *
for (
unsigned int i = 0; i < dim; ++i)
640 * return_value += 4.0 *
std::pow(p(i), 4.0);
642 *
return return_value;
648 * BoundaryValues<dim>::value(
const Point<dim> &p,
649 *
const unsigned int )
const
657 * CoupledLaplaceProblem<dim>::CoupledLaplaceProblem()
660 * , interface_boundary_id(1)
661 * , adapter(parameters, interface_boundary_id)
667 * CoupledLaplaceProblem<dim>::make_grid()
672 *
for (
const auto &cell :
triangulation.active_cell_iterators())
673 * for (const auto &face : cell->face_iterators())
677 * We choose the boundary in
positive x direction for the
678 * interface coupling.
681 * if (face->at_boundary() && (face->
center()[0] == 1))
682 * face->set_boundary_id(interface_boundary_id);
694 * CoupledLaplaceProblem<dim>::setup_system()
696 * dof_handler.distribute_dofs(fe);
698 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
703 * sparsity_pattern.copy_from(dsp);
705 * system_matrix.reinit(sparsity_pattern);
707 * solution.reinit(dof_handler.n_dofs());
708 * old_solution.reinit(dof_handler.n_dofs());
709 * system_rhs.reinit(dof_handler.n_dofs());
716 * CoupledLaplaceProblem<dim>::assemble_system()
720 * Reset global structures
727 * Update old solution values
730 * old_solution = solution;
734 * RightHandSide<dim> right_hand_side;
737 * quadrature_formula,
741 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
745 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
748 * The solution values from previous time steps are stored
for each quadrature
752 * std::vector<double> local_values_old_solution(fe_values.n_quadrature_points);
754 *
for (
const auto &cell : dof_handler.active_cell_iterators())
761 * Get the local values from the `fe_values
' object
764 * fe_values.get_function_values(old_solution, local_values_old_solution);
768 * The system matrix contains additionally a mass matrix due to the time
769 * discretization. The RHS has contributions from the old solution values.
772 * for (const unsigned int q_index : fe_values.quadrature_point_indices())
773 * for (const unsigned int i : fe_values.dof_indices())
775 * for (const unsigned int j : fe_values.dof_indices())
776 * cell_matrix(i, j) +=
777 * ((fe_values.shape_value(i, q_index) * // phi_i(x_q)
778 * fe_values.shape_value(j, q_index)) + // phi_j(x_q)
779 * (delta_t * // delta t
780 * fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
781 * fe_values.shape_grad(j, q_index))) * // grad phi_j(x_q)
782 * fe_values.JxW(q_index); // dx
784 * const auto x_q = fe_values.quadrature_point(q_index);
785 * const auto &local_value = local_values_old_solution[q_index];
786 * cell_rhs(i) += ((delta_t * // delta t
787 * fe_values.shape_value(i, q_index) * // phi_i(x_q)
788 * right_hand_side.value(x_q)) + // f(x_q)
789 * fe_values.shape_value(i, q_index) *
790 * local_value) * // phi_i(x_q)*val
791 * fe_values.JxW(q_index); // dx
796 * Copy local to global
799 * cell->get_dof_indices(local_dof_indices);
800 * for (const unsigned int i : fe_values.dof_indices())
802 * for (const unsigned int j : fe_values.dof_indices())
803 * system_matrix.add(local_dof_indices[i],
804 * local_dof_indices[j],
805 * cell_matrix(i, j));
807 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
813 * At first, we apply the Dirichlet boundary condition from @ref step_4 "step-4", as
817 * std::map<types::global_dof_index, double> boundary_values;
818 * VectorTools::interpolate_boundary_values(dof_handler,
820 * BoundaryValues<dim>(),
822 * MatrixTools::apply_boundary_values(boundary_values,
830 * Afterwards, we apply the coupling boundary condition. The `boundary_data`
831 * has already been filled by preCICE.
834 * MatrixTools::apply_boundary_values(boundary_data,
845 * CoupledLaplaceProblem<dim>::solve()
847 * SolverControl solver_control(1000, 1e-12);
848 * SolverCG<Vector<double>> solver(solver_control);
849 * solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
851 * std::cout << " " << solver_control.last_step()
852 * << " CG iterations needed to obtain convergence." << std::endl;
859 * CoupledLaplaceProblem<dim>::output_results() const
861 * DataOut<dim> data_out;
863 * data_out.attach_dof_handler(dof_handler);
864 * data_out.add_data_vector(solution, "solution");
866 * data_out.build_patches(mapping);
868 * std::ofstream output("solution-" + std::to_string(time_step) + ".vtk");
869 * data_out.write_vtk(output);
876 * CoupledLaplaceProblem<dim>::run()
878 * std::cout << "Solving problem in " << dim << " space dimensions."
886 * After we set up the system, we initialize preCICE and the adapter using the
887 * functionalities of the Adapter.
890 * adapter.initialize(dof_handler, boundary_data, mapping);
894 * preCICE steers the coupled simulation: `isCouplingOngoing` is
895 * used to synchronize the end of the simulation with the coupling partner
898 * while (adapter.precice.isCouplingOngoing())
902 * The time step number is solely used to generate unique output files
908 * preCICE returns the maximum admissible time-step size,
909 * which needs to be compared to our desired solver time-step size.
912 * precice_delta_t = adapter.precice.getMaxTimeStepSize();
913 * delta_t = std::min(precice_delta_t, solver_delta_t);
916 * Next we read data. Since we use a fully backward Euler method, we want
917 * the data to be associated to the end of the current time-step (delta_t)
918 * Time-interpolation methods in preCICE allow to get readData at any
919 * point in time, if the coupling scheme allows it
922 * adapter.read_data(delta_t, boundary_data);
926 * In the time loop, we assemble the coupled system and solve it as
935 * After we solved the system, we advance the coupling to the next time
936 * level. In a bi-directional coupled simulation, we would pass our
937 * calculated data to preCICE
940 * adapter.advance(delta_t);
944 * Write an output file if the time step is completed. In case of an
945 * implicit coupling, where individual time steps are computed more than
946 * once, the function `isTimeWindowCompleted` prevents unnecessary result
947 * writing. For this simple tutorial configuration (explicit coupling),
948 * the function returns always `true`.
951 * if (adapter.precice.isTimeWindowComplete())
961 * CoupledLaplaceProblem<2> laplace_problem;
962 * laplace_problem.run();
969<a name="ann-fancy_boundary_condition.cc"></a>
970<h1>Annotated version of fancy_boundary_condition.cc</h1>
976 * /* -----------------------------------------------------------------------------
978 * * SPDX-License-Identifier: LGPL-2.1-or-later
979 * * Copyright (C) 2020 by David Schneider
980 * * Copyright (C) 2020 by Benjamin Uekermann
982 * * This file is part of the deal.II code gallery.
984 * * -----------------------------------------------------------------------------
989 * This program does not use any deal.II functionality and depends only on
990 * preCICE and the standard libraries.
993 * #include <precice/precice.hpp>
995 * #include <iostream>
1001 * The program computes a time-varying parabolic boundary condition, which is
1002 * passed to preCICE and serves as Dirichlet boundary condition for the other
1003 * coupling participant.
1007 * Function to generate boundary values in each time step
1011 * define_boundary_values(std::vector<double> &boundary_data,
1012 * const double time,
1013 * const double end_time)
1017 * Scale the current time value
1020 * const double relative_time = time / end_time;
1023 * Define the amplitude. Values run from -0.5 to 0.5
1026 * const double amplitude = (relative_time - 0.5);
1029 * Specify the actual data we want to pass to the other participant. Here, we
1030 * choose a parabola with boundary values 2 in order to enforce continuity
1031 * to adjacent boundaries.
1034 * const double n_elements = boundary_data.size();
1035 * const double right_zero = boundary_data.size() - 1;
1036 * const double left_zero = 0;
1037 * const double offset = 2;
1038 * for (uint i = 0; i < n_elements; ++i)
1039 * boundary_data[i] =
1040 * -amplitude * ((i - left_zero) * (i - right_zero)) + offset;
1047 * std::cout << "Boundary participant: starting... \n";
1054 * const std::string configFileName("precice-config.xml");
1055 * const std::string solverName("boundary-participant");
1056 * const std::string meshName("boundary-mesh");
1057 * const std::string dataWriteName("boundary-data");
1061 * Adjust to MPI rank and size for parallel computation
1064 * const int commRank = 0;
1065 * const int commSize = 1;
1067 * precice::Participant precice(solverName, configFileName, commRank, commSize);
1069 * const int dimensions = precice.getMeshDimensions(meshName);
1070 * const int numberOfVertices = 6;
1074 * Set up data structures
1077 * std::vector<double> writeData(numberOfVertices);
1078 * std::vector<int> vertexIDs(numberOfVertices);
1079 * std::vector<double> vertices(numberOfVertices * dimensions);
1083 * Define a boundary mesh
1086 * std::cout << "Boundary participant: defining boundary mesh \n";
1087 * const double length = 2;
1088 * const double xCoord = 1;
1089 * const double deltaY = length / (numberOfVertices - 1);
1090 * for (int i = 0; i < numberOfVertices; ++i)
1091 * for (int j = 0; j < dimensions; ++j)
1093 * const unsigned int index = dimensions * i + j;
1096 * The x-coordinate is always 1, i.e., the boundary is parallel to the
1097 * y-axis. The y-coordinate is descending from 1 to -1.
1101 * vertices[index] = xCoord;
1103 * vertices[index] = 1 - deltaY * i;
1108 * Pass the vertices to preCICE
1111 * precice.setMeshVertices(meshName, vertices, vertexIDs);
1115 * Variables for the time
1118 * const double end_time = 1;
1123 * Not used in the configuration by default
1126 * if (precice.requiresInitialData())
1128 * std::cout << "Boundary participant: writing initial data \n";
1129 * define_boundary_values(writeData, time, end_time);
1130 * precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1135 * initialize the Participant
1138 * precice.initialize();
1145 * while (precice.isCouplingOngoing())
1147 * double dt = precice.getMaxTimeStepSize();
1152 * Generate new boundary data
1155 * define_boundary_values(writeData, time, end_time);
1157 * std::cout << "Boundary participant: writing coupling data \n";
1158 * precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1160 * std::cout << "Boundary participant: advancing in time\n";
1161 * precice.advance(dt);
1164 * std::cout << "Boundary participant: closing...\n";
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
unsigned int n_cells() const
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
void advance(std::tuple< I1, I2 > &t, const unsigned int n)