758 *
return 1. / (0.05 + 2. * p.square());
765 *
const unsigned int component)
const
774 * <a name=
"step_37-Matrixfreeimplementation"></a>
867 *
class LaplaceOperator
872 *
using value_type = number;
876 *
void clear()
override;
883 *
virtual void apply_add(
891 *
const std::pair<unsigned int, unsigned int> &
cell_range)
const;
910 *
template <
int dim,
int fe_degree,
typename number>
911 *
LaplaceOperator<dim, fe_degree, number>::LaplaceOperator()
918 *
template <
int dim,
int fe_degree,
typename number>
919 *
void LaplaceOperator<dim, fe_degree, number>::clear()
931 * <a name=
"step_37-Computationofcoefficient"></a>
944 *
void LaplaceOperator<dim, fe_degree, number>::evaluate_coefficient(
947 *
const unsigned int n_cells = this->
data->n_cell_batches();
951 *
for (
unsigned int cell = 0; cell <
n_cells; ++cell)
954 *
for (
const unsigned int q :
phi.quadrature_point_indices())
965 * <a name=
"step_37-LocalevaluationofLaplaceoperator"></a>
987 * cell iterators, in
this class all cells
are laid out in a
plain array
1065 *
template <
int dim,
int fe_degree,
typename number>
1066 *
void LaplaceOperator<dim, fe_degree, number>::local_apply(
1070 *
const std::pair<unsigned int, unsigned int> &
cell_range)
const
1080 *
phi.read_dof_values(src);
1082 *
for (
const unsigned int q :
phi.quadrature_point_indices())
1085 *
phi.distribute_local_to_global(dst);
1104 * src.update_ghost_values();
1106 *
data.n_cell_batches()));
1182 *
void LaplaceOperator<dim, fe_degree, number>::apply_add(
1186 *
this->data->cell_loop(&LaplaceOperator::local_apply,
this, dst, src);
1230 *
void LaplaceOperator<dim, fe_degree, number>::compute_diagonal()
1240 *
&LaplaceOperator::local_compute_diagonal,
1248 *
ExcMessage(
"No diagonal entry in a positive definite operator "
1249 *
"should be zero"));
1300 *
inside the distribute_local_to_global call,
the vector
interface used
1305 *
href=
"http://dx.doi.org/10.4208/cicp.101214.021015a">
Kormann (2016),
1311 *
template <
int dim,
int fe_degree,
typename number>
1312 *
void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
1315 *
const unsigned int cell =
phi.get_current_cell_index();
1319 *
for (
const unsigned int q :
phi.quadrature_point_indices())
1331 * <a name=
"step_37-LaplaceProblemclass"></a>
1361 *
template <
int dim>
1417 *
template <
int dim>
1423 *
dim>::construct_multigrid_hierarchy)
1450 * <a name=
"step_37-LaplaceProblemsetup_system"></a>
1463 * @
ref step_40
"step-40".
1487 *
template <
int dim>
1493 *
system_matrix.
clear();
1496 *
dof_handler.distribute_dofs(fe);
1497 *
dof_handler.distribute_mg_dofs();
1499 *
pcout <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
1502 *
constraints.
clear();
1503 *
constraints.reinit(dof_handler.locally_owned_dofs(),
1508 *
constraints.close();
1511 *
time_details <<
"Distribute DoFs & B.C. (CPU/wall) " << time.cpu_time()
1512 *
<<
"s/" << time.wall_time() <<
's' << std::endl;
1517 *
additional_data.tasks_parallel_scheme =
1519 *
additional_data.mapping_update_flags =
1533 *
system_matrix.initialize_dof_vector(solution);
1534 *
system_matrix.initialize_dof_vector(
system_rhs);
1537 *
time_details <<
"Setup matrix-free system (CPU/wall) " << time.cpu_time()
1538 *
<<
"s/" << time.wall_time() <<
's' << std::endl;
1559 *
mg_constrained_dofs.initialize(dof_handler);
1560 *
mg_constrained_dofs.make_zero_boundary_constraints(
1566 *
dof_handler.locally_owned_mg_dofs(
level),
1569 *
mg_constrained_dofs.get_boundary_indices(
level))
1570 *
level_constraints.constrain_dof_to_zero(dof_index);
1571 *
level_constraints.close();
1574 *
additional_data.tasks_parallel_scheme =
1576 *
additional_data.mapping_update_flags =
1578 *
additional_data.mg_level =
level;
1580 *
std::make_shared<MatrixFree<dim, float>>();
1583 *
level_constraints,
1588 *
mg_constrained_dofs,
1594 *
time_details <<
"Setup matrix-free levels (CPU/wall) " << time.cpu_time()
1595 *
<<
"s/" << time.wall_time() <<
's' << std::endl;
1603 * <a name=
"step_37-LaplaceProblemassemble_rhs"></a>
1625 *
*system_matrix.get_matrix_free());
1626 *
for (
unsigned int cell = 0;
1627 *
cell < system_matrix.get_matrix_free()->n_cell_batches();
1631 *
for (
const unsigned int q :
phi.quadrature_point_indices())
1639 *
time_details <<
"Assemble right hand side (CPU/wall) " << time.cpu_time()
1640 *
<<
"s/" << time.wall_time() <<
's' << std::endl;
1648 * <a name=
"step_37-LaplaceProblemsolve"></a>
1649 * <
h4>LaplaceProblem::solve</
h4>
1660 *
template <
int dim>
1667 *
time_details <<
"MG build transfer time (CPU/wall) " << time.cpu_time()
1668 *
<<
"s/" << time.wall_time() <<
"s\n";
1690 *
by our LaplaceOperator
class.
1774 *
"Multigrid paper by Janssen and Kanschat" for more details.
1833 *
<< "s/" << time.wall_time() << "s\n";
1838 *
constraints.set_zero(solution);
1839 *
cg.solve(system_matrix, solution,
system_rhs, preconditioner);
1841 *
constraints.distribute(solution);
1844 *
<< (solver_control.last_step() < 10 ? " " : " ") << "(
CPU/
wall) "
1845 *
<< time.cpu_time() << "s/" << time.wall_time() << "s\n";
1866 *
of the linear solve.
1885 *
solution.update_ghost_values();
1886 *
data_out.attach_dof_handler(dof_handler);
1887 *
data_out.add_data_vector(solution,
"solution");
1888 *
data_out.build_patches(mapping);
1892 *
data_out.set_flags(flags);
1893 *
data_out.write_vtu_with_pvtu_record(
1896 *
time_details <<
"Time write output (CPU/wall) " << time.cpu_time()
1897 *
<<
"s/" << time.wall_time() <<
"s\n";
1905 * <a name=
"step_37-LaplaceProblemrun"></a>
1906 * <
h4>LaplaceProblem::run</
h4>
1916 * Before we run the program, we output some information about the detected
1917 * vectorization level as discussed in the introduction.
1920 * template <int dim>
1921 * void LaplaceProblem<dim>::run()
1924 * const unsigned int n_vect_doubles = VectorizedArray<double>::size();
1925 * const unsigned int n_vect_bits = 8 * sizeof(double) * n_vect_doubles;
1927 * pcout << "Vectorization over " << n_vect_doubles
1928 * << " doubles = " << n_vect_bits << " bits ("
1929 * << Utilities::System::get_current_vectorization_level() << ')
'
1933 * for (unsigned int cycle = 0; cycle < 9 - dim; ++cycle)
1935 * pcout << "Cycle " << cycle << std::endl;
1939 * GridGenerator::hyper_cube(triangulation, 0., 1.);
1940 * triangulation.refine_global(3 - dim);
1942 * triangulation.refine_global(1);
1946 * output_results(cycle);
1947 * pcout << std::endl;
1950 * } // namespace Step37
1957 * <a name="step_37-Thecodemaincodefunction"></a>
1958 * <h3>The <code>main</code> function</h3>
1962 * Apart from the fact that we set up the MPI framework according to @ref step_40 "step-40",
1963 * there are no surprises in the main function.
1966 * int main(int argc, char *argv[])
1970 * using namespace Step37;
1972 * Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
1974 * LaplaceProblem<dimension> laplace_problem;
1975 * laplace_problem.run();
1977 * catch (std::exception &exc)
1979 * std::cerr << std::endl
1981 * << "----------------------------------------------------"
1983 * std::cerr << "Exception on processing: " << std::endl
1984 * << exc.what() << std::endl
1985 * << "Aborting!" << std::endl
1986 * << "----------------------------------------------------"
1992 * std::cerr << std::endl
1994 * << "----------------------------------------------------"
1996 * std::cerr << "Unknown exception!" << std::endl
1997 * << "Aborting!" << std::endl
1998 * << "----------------------------------------------------"
2006<a name="step_37-Results"></a><h1>Results</h1>
2009<a name="step_37-Programoutput"></a><h3>Program output</h3>
2012Since this example solves the same problem as @ref step_5 "step-5" (except for
2013a different coefficient), there is little to say about the
2014solution. We show a picture anyway, illustrating the size of the
2015solution through both isocontours and volume rendering:
2017<img src="https://www.dealii.org/images/steps/developer/step-37.solution.png" alt="">
2019Of more interest is to evaluate some aspects of the multigrid solver.
2020When we run this program in 2D for quadratic (@f$Q_2@f$) elements, we get the
2021following output (when run on one core in release mode):
2023Vectorization over 2 doubles = 128 bits (SSE2)
2025Number of degrees of freedom: 81
2026Total setup time (wall) 0.00159788s
2027Time solve (6 iterations) (CPU/wall) 0.000951s/0.000951052s
2030Number of degrees of freedom: 289
2031Total setup time (wall) 0.00114608s
2032Time solve (6 iterations) (CPU/wall) 0.000935s/0.000934839s
2035Number of degrees of freedom: 1089
2036Total setup time (wall) 0.00244665s
2037Time solve (6 iterations) (CPU/wall) 0.00207s/0.002069s
2040Number of degrees of freedom: 4225
2041Total setup time (wall) 0.00678205s
2042Time solve (6 iterations) (CPU/wall) 0.005616s/0.00561595s
2045Number of degrees of freedom: 16641
2046Total setup time (wall) 0.0241671s
2047Time solve (6 iterations) (CPU/wall) 0.019543s/0.0195441s
2050Number of degrees of freedom: 66049
2051Total setup time (wall) 0.0967851s
2052Time solve (6 iterations) (CPU/wall) 0.07457s/0.0745709s
2055Number of degrees of freedom: 263169
2056Total setup time (wall) 0.346374s
2057Time solve (6 iterations) (CPU/wall) 0.260042s/0.265033s
2060As in @ref step_16 "step-16", we see that the number of CG iterations remains constant with
2061increasing number of degrees of freedom. A constant number of iterations
2062(together with optimal computational properties) means that the computing time
2063approximately quadruples as the problem size quadruples from one cycle to the
2064next. The code is also very efficient in terms of storage. Around 2-4 million
2065degrees of freedom fit into 1 GB of memory, see also the MPI results below. An
2066interesting fact is that solving one linear system is cheaper than the setup,
2067despite not building a matrix (approximately half of which is spent in the
2068DoFHandler::distribute_dofs() and DoFHandler::distribute_mg_dofs()
2069calls). This shows the high efficiency of this approach, but also that the
2070deal.II data structures are quite expensive to set up and the setup cost must
2071be amortized over several system solves.
2073Not much changes if we run the program in three spatial dimensions. Since we
2074use uniform mesh refinement, we get eight times as many elements and
2075approximately eight times as many degrees of freedom with each cycle:
2078Vectorization over 2 doubles = 128 bits (SSE2)
2080Number of degrees of freedom: 125
2081Total setup time (wall) 0.00231099s
2082Time solve (6 iterations) (CPU/wall) 0.000692s/0.000922918s
2085Number of degrees of freedom: 729
2086Total setup time (wall) 0.00289083s
2087Time solve (6 iterations) (CPU/wall) 0.001534s/0.0024128s
2090Number of degrees of freedom: 4913
2091Total setup time (wall) 0.0143182s
2092Time solve (6 iterations) (CPU/wall) 0.010785s/0.0107841s
2095Number of degrees of freedom: 35937
2096Total setup time (wall) 0.087064s
2097Time solve (6 iterations) (CPU/wall) 0.063522s/0.06545s
2100Number of degrees of freedom: 274625
2101Total setup time (wall) 0.596306s
2102Time solve (6 iterations) (CPU/wall) 0.427757s/0.431765s
2105Number of degrees of freedom: 2146689
2106Total setup time (wall) 4.96491s
2107Time solve (6 iterations) (CPU/wall) 3.53126s/3.56142s
2110Since it is so easy, we look at what happens if we increase the polynomial
2111degree. When selecting the degree as four in 3D, i.e., on @f$\mathcal Q_4@f$
2112elements, by changing the line <code>const unsigned int
2113degree_finite_element=4;</code> at the top of the program, we get the
2114following program output:
2117Vectorization over 2 doubles = 128 bits (SSE2)
2119Number of degrees of freedom: 729
2120Total setup time (wall) 0.00633097s
2121Time solve (6 iterations) (CPU/wall) 0.002829s/0.00379395s
2124Number of degrees of freedom: 4913
2125Total setup time (wall) 0.0174279s
2126Time solve (6 iterations) (CPU/wall) 0.012255s/0.012254s
2129Number of degrees of freedom: 35937
2130Total setup time (wall) 0.082655s
2131Time solve (6 iterations) (CPU/wall) 0.052362s/0.0523629s
2134Number of degrees of freedom: 274625
2135Total setup time (wall) 0.507943s
2136Time solve (6 iterations) (CPU/wall) 0.341811s/0.345788s
2139Number of degrees of freedom: 2146689
2140Total setup time (wall) 3.46251s
2141Time solve (7 iterations) (CPU/wall) 3.29638s/3.3265s
2144Number of degrees of freedom: 16974593
2145Total setup time (wall) 27.8989s
2146Time solve (7 iterations) (CPU/wall) 26.3705s/27.1077s
2149Since @f$\mathcal Q_4@f$ elements on a certain mesh correspond to @f$\mathcal Q_2@f$
2150elements on half the mesh size, we can compare the run time at cycle 4 with
2151fourth degree polynomials with cycle 5 using quadratic polynomials, both at
21522.1 million degrees of freedom. The surprising effect is that the solver for
2153@f$\mathcal Q_4@f$ element is actually slightly faster than for the quadratic
2154case, despite using one more linear iteration. The effect that higher-degree
2155polynomials are similarly fast or even faster than lower degree ones is one of
2156the main strengths of matrix-free operator evaluation through sum
2157factorization, see the <a
2158href="http://dx.doi.org/10.1016/j.compfluid.2012.04.012">matrix-free
2159paper</a>. This is fundamentally different to matrix-based methods that get
2160more expensive per unknown as the polynomial degree increases and the coupling
2163In addition, also the setup gets a bit cheaper for higher order, which is
2164because fewer elements need to be set up.
2166Finally, let us look at the timings with degree 8, which corresponds to
2167another round of mesh refinement in the lower order methods:
2170Vectorization over 2 doubles = 128 bits (SSE2)
2172Number of degrees of freedom: 4913
2173Total setup time (wall) 0.0842004s
2174Time solve (8 iterations) (CPU/wall) 0.019296s/0.0192959s
2177Number of degrees of freedom: 35937
2178Total setup time (wall) 0.327048s
2179Time solve (8 iterations) (CPU/wall) 0.07517s/0.075999s
2182Number of degrees of freedom: 274625
2183Total setup time (wall) 2.12335s
2184Time solve (8 iterations) (CPU/wall) 0.448739s/0.453698s
2187Number of degrees of freedom: 2146689
2188Total setup time (wall) 16.1743s
2189Time solve (8 iterations) (CPU/wall) 3.95003s/3.97717s
2192Number of degrees of freedom: 16974593
2193Total setup time (wall) 130.8s
2194Time solve (8 iterations) (CPU/wall) 31.0316s/31.767s
2197Here, the initialization seems considerably slower than before, which is
2198mainly due to the computation of the diagonal of the matrix, which actually
2199computes a 729 x 729 matrix on each cell and throws away everything but the
2200diagonal. The solver times, however, are again very close to the quartic case,
2201showing that the linear increase with the polynomial degree that is
2202theoretically expected is almost completely offset by better computational
2203characteristics and the fact that higher order methods have a smaller share of
2204degrees of freedom living on several cells that add to the evaluation
2207<a name="step_37-Comparisonwithasparsematrix"></a><h3>Comparison with a sparse matrix</h3>
2210In order to understand the capabilities of the matrix-free implementation, we
2211compare the performance of the 3d example above with a sparse matrix
2212implementation based on TrilinosWrappers::SparseMatrix by measuring both the
2213computation times for the initialization of the problem (distribute DoFs,
2214setup and assemble matrices, setup multigrid structures) and the actual
2215solution for the matrix-free variant and the variant based on sparse
2216matrices. We base the preconditioner on float numbers and the actual matrix
2217and vectors on double numbers, as shown above. Tests are run on an Intel Core
2218i7-5500U notebook processor (two cores and <a
2219href="http://en.wikipedia.org/wiki/Advanced_Vector_Extensions">AVX</a>
2220support, i.e., four operations on doubles can be done with one CPU
2221instruction, which is heavily used in FEEvaluation), optimized mode, and two
2224<table align="center" class="doxtable">
2227 <th colspan="2">Sparse matrix</th>
2228 <th colspan="2">Matrix-free implementation</th>
2232 <th>Setup + assemble</th>
2233 <th> Solve </th>
2234 <th>Setup + assemble</th>
2235 <th> Solve </th>
2238 <td align="right">125</td>
2239 <td align="center">0.0042s</td>
2240 <td align="center">0.0012s</td>
2241 <td align="center">0.0022s</td>
2242 <td align="center">0.00095s</td>
2245 <td align="right">729</td>
2246 <td align="center">0.012s</td>
2247 <td align="center">0.0040s</td>
2248 <td align="center">0.0027s</td>
2249 <td align="center">0.0021s</td>
2252 <td align="right">4,913</td>
2253 <td align="center">0.082s</td>
2254 <td align="center">0.012s</td>
2255 <td align="center">0.011s</td>
2256 <td align="center">0.0057s</td>
2259 <td align="right">35,937</td>
2260 <td align="center">0.73s</td>
2261 <td align="center">0.13s</td>
2262 <td align="center">0.048s</td>
2263 <td align="center">0.040s</td>
2266 <td align="right">274,625</td>
2267 <td align="center">5.43s</td>
2268 <td align="center">1.01s</td>
2269 <td align="center">0.33s</td>
2270 <td align="center">0.25s</td>
2273 <td align="right">2,146,689</td>
2274 <td align="center">43.8s</td>
2275 <td align="center">8.24s</td>
2276 <td align="center">2.42s</td>
2277 <td align="center">2.06s</td>
2281The table clearly shows that the matrix-free implementation is more than twice
2282as fast for the solver, and more than six times as fast when it comes to
2283initialization costs. As the problem size is made a factor 8 larger, we note
2284that the times usually go up by a factor eight, too (as the solver iterations
2285are constant at six). The main deviation is in the sparse matrix between 5k
2286and 36k degrees of freedom, where the time increases by a factor 12. This is
2287the threshold where the (L3) cache in the processor can no longer hold all
2288data necessary for the matrix-vector products and all matrix elements must be
2289fetched from main memory.
2291Of course, this picture does not necessarily translate to all cases, as there
2292are problems where knowledge of matrix entries enables much better solvers (as
2293happens when the coefficient is varying more strongly than in the above
2294example). Moreover, it also depends on the computer system. The present system
2295has good memory performance, so sparse matrices perform comparably
2296well. Nonetheless, the matrix-free implementation gives a nice speedup already
2297for the <i>Q</i><sub>2</sub> elements used in this example. This becomes
2298particularly apparent for time-dependent or nonlinear problems where sparse
2299matrices would need to be reassembled over and over again, which becomes much
2300easier with this class. And of course, thanks to the better complexity of the
2301products, the method gains increasingly larger advantages when the order of the
2302elements increases (the matrix-free implementation has costs
23034<i>d</i><sup>2</sup><i>p</i> per degree of freedom, compared to
23042<i>p<sup>d</sup></i> for the sparse matrix, so it will win anyway for order 4
2307<a name="step_37-ResultsforlargescaleparallelcomputationsonSuperMUC"></a><h3> Results for large-scale parallel computations on SuperMUC</h3>
2310As explained in the introduction and the in-code comments, this program can be
2311run in parallel with MPI. It turns out that geometric multigrid schemes work
2312really well and can scale to very large machines. To the authors' knowledge,
2315href=
"https://www.lrz.de/services/compute/supermuc/systemdescription/">
complete
2358<
img src=
"https://www.dealii.org/images/steps/developer/step-37.scaling_strong.png" alt=
"">
2390<
img src=
"https://www.dealii.org/images/steps/developer/step-37.scaling_size.png" alt=
"">
2402<
img src=
"https://www.dealii.org/images/steps/developer/step-37.scaling_oldnew.png" alt=
"">
2417<a name=
"step_37-Possibilitiesforextensions"></a><
h3> Possibilities
for extensions</
h3>
2444solution.reinit(dof_handler.locally_owned_dofs(),
2447solution.copy_locally_owned_data_from(
copy_vec);
2448constraints.distribute(solution);
2449solution.update_ghost_values();
2545 if (solution.locally_owned_elements().is_element(
pair.
first))
2552 constraints.distribute(solution);
2573the object @p constraints:
2579 constraints.distribute(solution);
2580 solution.update_ghost_values();
2585 for (
unsigned int cell = 0;
2586 cell < system_matrix.get_matrix_free()->n_cell_batches();
2590 phi.read_dof_values_plain(solution);
2592 for (
const unsigned int q :
phi.quadrature_point_indices())
2657 std::shared_ptr<MatrixFree<dim, double>> matrix_free(
2659 matrix_free->reinit(dof_handler,
2666 constraints.distribute(solution);
2673 for (
unsigned int cell = 0;
2678 for (
const unsigned int q :
phi.quadrature_point_indices())
2701improve the time
per unknown. (Even higher degrees typically get slower again,
2702because the multigrid iteration counts increase slightly with the chosen
2703simple smoother. One could then use hybrid multigrid algorithms to use
2704polynomial coarsening through MGTransferGlobalCoarsening, to reduce the impact
2716will then pick up this interface and schedule its vector operations),
and </li>
2722<a name="step_37-PlainProg"></a>
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
value_type get_dof_value(const unsigned int dof) const
void read_dof_values(const VectorType &src, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip())
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
void integrate(const EvaluationFlags::EvaluationFlags integration_flag)
void set_constrained_entries_to_one(VectorType &dst) const
void vmult_interface_down(VectorType &dst, const VectorType &src) const
std::shared_ptr< DiagonalMatrix< VectorType > > inverse_diagonal_entries
unsigned int n_cell_batches() const
void initialize_dof_vector(VectorType &vec, const unsigned int dof_handler_index=0) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
static constexpr unsigned int dimension
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< index_type > data
void matrix_free_data_locality(DoFHandler< dim, spacedim > &dof_handler, const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
constexpr types::blas_int zero
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
std::string compress(const std::string &input)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
constexpr unsigned int invalid_unsigned_int
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
TasksParallelScheme tasks_parallel_scheme
UpdateFlags mapping_update_flags
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)