591 *
if (p.norm_square() < 0.25)
592 *
return p.norm_square();
594 *
return 0.1 * p.norm_square() + (0.25 - 0.025);
599 *
const unsigned int = 0) const override
612 *
if (p.norm_square() < 0.25)
623 * <a name=
"step_65-ThePoissonProblemclass"></a>
624 * <h3>The PoissonProblem
class</h3>
628 * The implementation of the Poisson problem is very similar to what
629 * we used in the @ref step_5
"step-5" tutorial program. The two main differences
630 * are that we pass a mapping
object to the various steps in the
631 * program in order to
switch between two mapping representations as
632 * explained in the introduction, and the `timer` object (of type
633 *
TimerOutput) that will be used
for measuring the
run times in the
634 * various cases. (The
concept of mapping objects was
first
635 * introduced in @ref step_10 "step-10" and @ref step_11 "step-11", in case you want to look up
636 * the use of these classes.)
640 * class PoissonProblem
647 * void create_grid();
648 * void setup_system(const
Mapping<dim> &mapping);
649 * void assemble_system(const
Mapping<dim> &mapping);
651 * void postprocess(const
Mapping<dim> &mapping);
654 * const
FE_Q<dim> fe;
660 *
Vector<double> solution;
661 *
Vector<double> system_rhs;
670 * In the constructor, we
set up the timer object to record wall times but
671 * be quiet during the normal execution. We will query it for timing details
672 * in the `PoissonProblem::run()` function. Furthermore, we select a
673 * relatively high polynomial degree of three for the finite element in use.
677 * PoissonProblem<dim>::PoissonProblem()
688 * <a name=
"step_65-Gridcreationandinitializationofthemanifolds"></a>
689 * <h3>Grid creation and initialization of the manifolds</h3>
693 * The next function presents the typical usage of
695 * grid, which can be done by composition of two grids from
698 * function argument). The
second mesh is more interesting and constructed
699 * as follows: We want to have a mesh that is spherical in the interior but
700 * flat on the outer surface. Furthermore, the mesh topology of the inner
701 * ball should be compatible with the outer grid in the sense that their
702 *
vertices coincide so as to allow the two grid to be merged. The grid coming
703 * out of
GridGenerator::hyper_shell fulfills the requirements on the inner
704 * side in case it is created with @f$2d@f$ coarse cells (6 coarse cells in 3d
705 * which we are going to use) – this is the same number of cells as
706 * there are boundary faces for the ball. For the outer surface, we use the
707 * fact that the 6 faces on the surface of the shell without a manifold
708 * attached would degenerate to the surface of a cube. What we are still
709 * missing is the radius of the outer shell boundary. Since we desire a cube
711 * @f$[-1, 1]@f$ and the 6-cell shell puts its 8 outer
vertices at the 8
712 * opposing diagonals, we must translate the points @f$(\pm 1, \pm 1, \pm 1)@f$
713 * into a radius: Clearly, the radius must be @f$\sqrt{
d}@f$ in @f$d@f$ dimensions,
714 * i.e., @f$\sqrt{3}@f$
for the three-dimensional
case we want to consider.
718 * Thus, we have a plan: After creating the inner
triangulation for
719 * the ball and the one
for the outer shell, we
merge those two
720 * grids but remove all manifolds that the
functions in
722 * ensure that we have full control over manifolds. In particular,
723 * we want additional points added on the boundary during refinement
724 * to follow a flat manifold description. To start the process of
725 * adding more appropriate manifold ids, we assign the manifold
id 0
726 * to all mesh entities (cells, faces, lines), which will later be
728 * must identify the faces and lines that are along the sphere of
729 * radius 0.5 and mark them with a different manifold id, so as to then
731 *
id of 1. Since we have thrown away all manifolds that pre-existed
733 * the cells of the mesh and all their faces. We have found a face
734 * on the sphere
if all four
vertices have a radius of 0.5, or, as
735 * we write in the program, have @f$r^2-0.25 \approx 0@f$. Note that we call
736 * `cell->face(f)->set_all_manifold_ids(1)` to
set the manifold
id
737 * both on the faces and the surrounding lines. Furthermore, we want
738 * to distinguish the cells
inside the ball and
outside the ball by
739 * a material
id for visualization, corresponding to the picture in the
744 *
void PoissonProblem<dim>::create_grid()
760 * for (const auto &face : cell->face_iterators())
762 *
bool face_at_sphere_boundary = true;
765 * if (
std::
abs(face->vertex(v).norm_square() - 0.25) > 1
e-12)
766 * face_at_sphere_boundary = false;
768 *
if (face_at_sphere_boundary)
769 * face->set_all_manifold_ids(1);
771 *
if (cell->center().norm_square() < 0.25)
772 * cell->set_material_id(1);
774 * cell->set_material_id(0);
779 * With all cells, faces and lines marked appropriately, we can
781 * manifold
id 1 will get a spherical manifold, whereas the other
782 * entities, which have the manifold
id 0, will be assigned the
784 * introduction, we must explicitly initialize the manifold with
785 * the current mesh
using a call to
787 * up the coarse mesh cells and the manifolds attached to the
788 * boundaries of those cells. We also note that the manifold
789 * objects we create locally in this function are allowed to go
790 * out of scope (as they do at the end of the function scope),
795 * With all manifolds attached, we will finally go about and refine the
796 * mesh a few times to create a sufficiently large test case.
813 * <a name="step_65-Setupofdatastructures"></a>
814 * <h3>Setup of data structures</h3>
818 * The following function is well-known from other tutorials in that
819 * it enumerates the degrees of freedom, creates a constraint
object
820 * and sets up a sparse matrix for the linear system. The only thing
821 * worth mentioning is the fact that the function receives a
822 * reference to a mapping
object that we then pass to the
823 *
VectorTools::interpolate_boundary_values() function to ensure
824 * that our boundary values are evaluated on the high-order mesh
825 * used for assembly. In the present example, it does not really
826 * matter because the outer surfaces are flat, but for curved outer
827 * cells this leads to more accurate approximation of the boundary
832 *
void PoissonProblem<dim>::setup_system(const
Mapping<dim> &mapping)
834 * dof_handler.distribute_dofs(fe);
835 * std::cout <<
" Number of active cells: "
837 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
843 * constraints.clear();
847 * mapping, dof_handler, 0, ExactSolution<dim>(), constraints);
849 * constraints.close();
855 * sparsity_pattern.copy_from(dsp);
856 * system_matrix.reinit(sparsity_pattern);
858 * solution.reinit(dof_handler.n_dofs());
859 * system_rhs.reinit(dof_handler.n_dofs());
866 * <a name=
"step_65-Assemblyofthesystemmatrixandrighthandside"></a>
867 * <h3>Assembly of the system
matrix and right hand side</h3>
871 * The function that assembles the linear system is also well known
872 * from the previous tutorial programs. One thing to note is that we
873 *
set the number of quadrature points to the polynomial degree plus
874 * two, not the degree plus one as in most other tutorials. This is
875 * because we expect some extra accuracy as the mapping also
876 * involves a degree one more than the polynomials
for the solution.
880 * The only somewhat unusual code in the assembly is the way we compute the
881 * cell
matrix. Rather than
using three nested
loop over the quadrature
883 * derivatives of the shape function, multiplied by the square root of the
884 * product of the coefficient and the integration factor `JxW` in a separate
885 *
matrix `partial_matrix`. To compute the cell
matrix, we then execute
887 * `partial_matrix.mTmult(cell_matrix, partial_matrix);`. To understand why
888 *
this works, we realize that the
matrix-
matrix multiplication performs a
889 * summation over the columns of `partial_matrix`. If we denote the
890 * coefficient by @f$a(\mathbf{x}_q)@f$, the entries in the temporary matrix are
891 * @f$\sqrt{\text{det}(J) w_q a(x)} \frac{\partial \varphi_i(\boldsymbol
892 * \xi_q)}{\partial x_k}@f$. If we take the product of the <i>i</i>th row with
893 * the <i>j</i>th column of that
matrix, we compute a nested
sum involving
894 * @f$\sum_q \sum_{k=1}^d \sqrt{\text{det}(J) w_q a(x)} \frac{\partial
895 * \varphi_i(\boldsymbol \xi_q)}{\partial x_k} \
sqrt{\text{det}(J) w_q a(x)}
896 * \frac{\partial \varphi_j(\boldsymbol \xi_q)}{\partial x_k} = \sum_q
897 * \sum_{k=1}^d\text{det}(J) w_q a(x)\frac{\partial \varphi_i(\boldsymbol
898 * \xi_q)}{\partial x_k} \frac{\partial \varphi_j(\boldsymbol
899 * \xi_q)}{\partial x_k}@f$, which is exactly the terms needed
for the
900 * bilinear form of the Laplace equation.
904 * The reason
for choosing
this somewhat unusual scheme is due to the heavy
905 * work involved in computing the cell
matrix for a relatively high
906 * polynomial degree in 3
d. As we want to highlight the cost of the mapping
907 * in
this tutorial program, we better
do the assembly in an optimized way
908 * in order to not chase bottlenecks that have been solved by the community
909 * already. Matrix-
matrix multiplication is one of the best optimized
911 * call into those optimized BLAS functions. If the user has provided a good
912 * BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the
913 * computation of the cell matrix will execute close to the processor's peak
914 * arithmetic performance. As a side note, we mention that despite an
915 * optimized matrix-matrix multiplication, the current strategy is
916 * sub-optimal in terms of complexity as the work to be done is proportional
917 * to @f$(p+1)^9@f$ operations for degree @f$p@f$ (this also applies to the usual
918 * evaluation with
FEValues). One could compute the cell matrix with
919 * @f$\mathcal O((p+1)^7)@f$ operations by utilizing the tensor product
920 * structure of the shape functions, as is done by the matrix-free framework
921 * in deal.II. We refer to @ref step_37 "step-37" and the documentation of the
922 * tensor-product-aware evaluators
FEEvaluation for details on how an even
923 * more efficient cell matrix computation could be realized.
927 *
void PoissonProblem<dim>::assemble_system(const
Mapping<dim> &mapping)
931 *
const QGauss<dim> quadrature_formula(fe.degree + 2);
934 * quadrature_formula,
938 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
939 *
const unsigned int n_q_points = quadrature_formula.size();
944 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
946 *
for (
const auto &cell : dof_handler.active_cell_iterators())
949 * fe_values.reinit(cell);
951 *
for (
unsigned int q_index = 0; q_index < n_q_points; ++q_index)
953 *
const double current_coefficient =
954 * coefficient(fe_values.quadrature_point(q_index));
955 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
957 *
for (
unsigned int d = 0;
d < dim; ++
d)
958 * partial_matrix(i, q_index * dim + d) =
959 *
std::sqrt(fe_values.JxW(q_index) * current_coefficient) *
960 * fe_values.shape_grad(i, q_index)[
d];
962 * (fe_values.shape_value(i, q_index) *
964 * fe_values.JxW(q_index));
968 * partial_matrix.mTmult(cell_matrix, partial_matrix);
970 * cell->get_dof_indices(local_dof_indices);
971 * constraints.distribute_local_to_global(
972 * cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
981 * <a name=
"step_65-Solutionofthelinearsystem"></a>
982 * <h3>Solution of the linear system</h3>
986 * For solving the linear system, we pick a simple Jacobi-preconditioned
987 * conjugate
gradient solver, similar to the settings in the early tutorials.
991 *
void PoissonProblem<dim>::solve()
1001 * solver.solve(system_matrix, solution, system_rhs, preconditioner);
1002 * constraints.distribute(solution);
1004 * std::cout <<
" Number of solver iterations: "
1005 * << solver_control.last_step() << std::endl;
1013 * <a name=
"step_65-Outputofthesolutionandcomputationoferrors"></a>
1014 * <h3>Output of the solution and computation of errors</h3>
1018 * In the next function we
do various post-processing steps with the
1019 * solution, all of which involve the mapping in one way or the other.
1023 * The
first operation we
do is to write the solution as well as the
1024 * material ids to a VTU file. This is similar to what was done in many
1025 * other tutorial programs. The
new ingredient presented in
this tutorial
1026 * program is that we want to ensure that the data written to the file
1027 * used
for visualization is actually a faithful representation of what
1028 * is used internally by deal.II. That is because most of the visualization
1029 * data formats only represent cells by their vertex coordinates, but
1030 * have no way of representing the curved boundaries that are used
1031 * in deal.II when
using higher order mappings -- in other words, what
1032 * you see in the visualization tool is not actually what you are computing
1033 * on. (The same, incidentally, is
true when
using higher order shape
1034 *
functions: Most visualization tools only render bilinear/trilinear
1039 * So we need to ensure that a high-order representation is written
1040 * to the file. We need to consider two particular topics. Firstly, we tell
1042 * interpret the subdivisions of the elements as a high-order Lagrange
1043 * polynomial rather than a collection of bilinear patches.
1044 * Recent visualization programs, like ParaView version 5.5
1045 * or newer, can then render a high-order solution (see a <a
1046 * href=
"https://github.com/dealii/dealii/wiki/Notes-on-visualizing-high-order-output">wiki
1047 * page</a>
for more details).
1048 * Secondly, we need to make sure that the mapping is passed to the
1050 * curved faces for <i>boundary</i> cells by default, so we need to ensure
1051 * that also inner cells are printed in a curved representation via the
1056 * @note As of 2023, Visit 3.3.3 can still not deal with higher-order cells.
1057 * Rather, it simply reports that there is no data to show. To view the
1058 * results of this program with Visit, you will want to comment out the
1059 * line that sets `flags.write_higher_order_cells = true;`. On the other
1060 * hand, Paraview is able to understand VTU files with higher order cells
1064 * template <
int dim>
1065 *
void PoissonProblem<dim>::postprocess(const
Mapping<dim> &mapping)
1074 * data_out.set_flags(flags);
1076 * data_out.attach_dof_handler(dof_handler);
1077 * data_out.add_data_vector(solution,
"solution");
1080 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1081 * material_ids[cell->active_cell_index()] = cell->
material_id();
1082 * data_out.add_data_vector(material_ids,
"material_ids");
1084 * data_out.build_patches(mapping,
1088 * std::ofstream file(
1090 * std::to_string(
triangulation.n_global_levels() - 10 + 2 * dim) +
1094 * data_out.write_vtu(file);
1099 * The next operation in the postprocessing function is to compute the @f$L_2@f$
1100 * and @f$H^1@f$ errors against the analytical solution. As the analytical
1101 * solution is a quadratic polynomial, we expect a very accurate result at
1102 *
this point. If we were solving on a simple mesh with planar faces and a
1103 * coefficient whose jumps are aligned with the faces between cells, then
1104 * we would expect the numerical result to coincide with the
1105 * analytical solution up to roundoff accuracy. However, since we are
using
1106 * deformed cells following a sphere, which are only tracked by
1107 * polynomials of degree 4 (one more than the degree
for the finite
1108 * elements), we will see that there is an error around @f$10^{-7}@f$. We could
1109 * get more accuracy by increasing the polynomial degree or refining the
1121 * ExactSolution<dim>(),
1125 * std::cout <<
" L2 error vs exact solution: "
1126 * << norm_per_cell_p.l2_norm() << std::endl;
1131 * ExactSolution<dim>(),
1135 * std::cout <<
" H1 error vs exact solution: "
1136 * << norm_per_cell_p.l2_norm() << std::endl;
1141 * The
final post-processing operation we
do here is to compute an error
1143 * as in the @ref step_6
"step-6" tutorial program, except
for the fact that we also
1144 * hand in the mapping to ensure that errors are evaluated along the
1145 * curved element, consistent with the remainder of the program. However,
1146 * we
do not really use the result here to drive a mesh adaptation step
1147 * (that would
refine the mesh around the material
interface along the
1148 * sphere), as the focus here is on the cost of
this operation.
1161 * estimated_error_per_cell);
1162 * std::cout <<
" Max cell-wise error estimate: "
1163 * << estimated_error_per_cell.linfty_norm() << std::endl;
1172 * <a name=
"step_65-ThePoissonProblemrunfunction"></a>
1173 * <h3>The PoissonProblem::run() function</h3>
1177 * Finally, we define the `run()` function that controls how we want to
1178 * execute this program (which is called by the main() function in the usual
1179 * way). We start by calling the `create_grid()` function that sets up our
1180 * geometry with the appropriate manifolds. We then run two instances of a
1181 * solver chain, starting from the setup of the equations, the assembly of
1182 * the linear system, its solution with a simple iterative solver, and the
1183 * postprocessing discussed above. The two instances differ in the way they
1184 * use the mapping. The
first uses a conventional
MappingQ mapping
1185 *
object which we initialize to a degree one more than we use for the
1186 * finite element – after all, we expect the geometry representation
1187 * to be the bottleneck as the analytic solution is only a quadratic
1188 * polynomial. (In reality, things are interlinked to quite some extent
1189 * because the evaluation of the polynomials in real coordinates involves
1190 * the mapping of a higher-degree polynomials, which represent some smooth
1191 * rational functions. As a consequence, higher-degree polynomials still pay
1192 * off, so it does not make sense to increase the degree of the mapping
1193 * further.) Once the
first pass is completed, we let the timer print a
1194 * summary of the compute times of the individual stages.
1197 * template <
int dim>
1198 *
void PoissonProblem<dim>::run()
1203 * std::cout << std::endl
1204 * <<
"====== Running with the basic MappingQ class ====== "
1209 * setup_system(mapping);
1210 * assemble_system(mapping);
1212 * postprocess(mapping);
1214 * timer.print_summary();
1221 * use is very simple: After constructing it (with the degree, given that
1222 * we want it to show the correct degree functionality in other contexts),
1224 * stage, we specify which mapping we want to use (obviously, the same
1225 *
MappingQ as previously in order to repeat the same computations)
1226 * for the cache, and then run through the same functions again, now
1227 * handing in the modified mapping. In the end, we again print the
1228 * accumulated wall times since the reset to see how the times compare to
1229 * the original setting.
1234 * <<
"====== Running with the optimized MappingQCache class ====== "
1243 * std::cout <<
" Memory consumption cache: "
1244 * << 1
e-6 * mapping.memory_consumption() <<
" MB" << std::endl;
1246 * setup_system(mapping);
1247 * assemble_system(mapping);
1249 * postprocess(mapping);
1251 * timer.print_summary();
1260 * Step65::PoissonProblem<3> test_program;
1261 * test_program.run();
1266<a name=
"step_65-Results"></a><h1>Results</h1>
1269<a name=
"step_65-Programoutput"></a><h3>Program output</h3>
1272If we
run the three-dimensional version of
this program with polynomials of
1273degree three, we get the following program output:
1277Scanning dependencies of target step-65
1278[ 33%] Building CXX
object CMakeFiles/step-65.dir/step-65.cc.o
1279[ 66%] Linking CXX executable step-65
1280[ 66%] Built target step-65
1281[100%] Run step-65 with Release configuration
1283====== Running with the basic
MappingQ class ======
1285 Number of active cells: 6656
1286 Number of degrees of freedom: 181609
1287 Number of solver iterations: 285
1288 L2 error vs exact solution: 8.99339
e-08
1289 H1 error vs exact solution: 6.45341
e-06
1290 Max cell-wise error estimate: 0.00743406
1293+---------------------------------------------+------------+------------+
1294| Total wallclock time elapsed since start | 49.4s | |
1296| Section | no. calls | wall time | % of total |
1297+---------------------------------+-----------+------------+------------+
1298| Assemble linear system | 1 | 5.8s | 12% |
1299| Compute constraints | 1 | 0.109s | 0.22% |
1300| Compute error estimator | 1 | 16.5s | 33% |
1301| Compute error norms | 1 | 9.11s | 18% |
1302| Solve linear system | 1 | 9.92s | 20% |
1303| Write output | 1 | 4.85s | 9.8% |
1304+---------------------------------+-----------+------------+------------+
1308 Memory consumption cache: 22.9981 MB
1309 Number of active cells: 6656
1310 Number of degrees of freedom: 181609
1311 Number of solver iterations: 285
1312 L2 error vs exact solution: 8.99339
e-08
1313 H1 error vs exact solution: 6.45341
e-06
1314 Max cell-wise error estimate: 0.00743406
1317+---------------------------------------------+------------+------------+
1318| Total wallclock time elapsed since start | 18.4s | |
1320| Section | no. calls | wall time | % of total |
1321+---------------------------------+-----------+------------+------------+
1322| Assemble linear system | 1 | 1.44s | 7.8% |
1323| Compute constraints | 1 | 0.00336s | 0% |
1324| Compute error estimator | 1 | 0.476s | 2.6% |
1325| Compute error norms | 1 | 0.505s | 2.7% |
1326| Initialize mapping cache | 1 | 4.96s | 27% |
1327| Solve linear system | 1 | 9.95s | 54% |
1328| Write output | 1 | 0.875s | 4.8% |
1329+---------------------------------+-----------+------------+------------+
1331[100%] Built target
run
1334Before discussing the timings, we look at the memory consumption
for the
1335MappingQCache object: Our program prints that it utilizes 23 MB of
1336memory. If we relate
this number to the memory consumption of a single
1337(solution or right hand side) vector,
1338which is 1.5 MB (namely, 181,609 elements times 8 bytes per entry in
1339double precision), or to the memory consumed by the
1340system
matrix and the sparsity pattern (which is 274 MB), we realize that it is
1341not an overly heavy data structure, given its benefits.
1343With respect to the timers, we see a clear improvement in the overall
run time
1344of the program by a factor of 2.7. If we disregard the iterative solver, which
1345is the same in both cases (and not optimal, given the simple preconditioner we
1346use, and the fact that sparse matrix-vector products waste operations
for
1347cubic polynomials), the advantage is a factor of almost 5. This is pretty
1348impressive
for a linear stationary problem, and cost savings would indeed be
1349much more prominent
for time-dependent and nonlinear problems where assembly
1350is called several times. If we look into the individual components, we get a
1351clearer picture of what is going on and why the cache is so efficient: In the
1352MappingQ case, essentially every operation that involves a mapping take
1353at least 5 seconds to
run. The
norm computation runs two
1355seconds. (The computation of constraints is cheaper because it only evaluates
1356the mapping in cells at the boundary for the interpolation of boundary
1357conditions.) If we compare these 5 seconds to the time it takes to fill the
1358MappingQCache, which is 5.2 seconds (for all cells, not just the active ones),
1359it becomes obvious that the computation of the mapping support points
1360dominates over everything else in the
MappingQ case. Perhaps the most
1361striking result is the time for the error estimator, labeled "Compute error
1362estimator", where the
MappingQ implementation takes 17.3 seconds and
1363the
MappingQCache variant less than 0.5 seconds. The reason why the former is
1364so expensive (three times more expensive than the assembly, for instance) is
1365that the error estimation involves evaluation of quantities over faces, where
1366each face in the mesh requests additional points of the mapping that in turn
1368are six faces per cell, this happens much more often than in assembly. Again,
1369MappingQCache nicely eliminates the repeated evaluation, aggregating all the
1370expensive steps involving the manifold in a single initialization call that
1371gets repeatedly used.
1374<a name="step_65-PlainProg"></a>
1375<h1> The plain program</h1>
1376@include "step-65.cc"
SmartPointer< const Triangulation< dim, spacedim > > triangulation
virtual void build_patches(const unsigned int n_subdivisions=0)
void mTmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const Mapping< dim, spacedim > &mapping, const Triangulation< dim, spacedim > &triangulation)
Abstract base class for mapping classes.
void initialize(const MatrixType &A, const AdditionalData ¶meters=AdditionalData())
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
const Triangulation< dim, spacedim > * triangulation
void initialize(const Triangulation< dim, spacedim > &triangulation)
virtual types::global_cell_index n_global_active_cells() const
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void reset_all_manifolds()
void set_all_manifold_ids(const types::manifold_id number)
void hyper_shell(Triangulation< dim, spacedim > &tria, const Point< spacedim > ¢er, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, bool colorize=false)
void hyper_ball(Triangulation< dim > &tria, const Point< dim > ¢er=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void merge_triangulations(const Triangulation< dim, spacedim > &triangulation_1, const Triangulation< dim, spacedim > &triangulation_2, Triangulation< dim, spacedim > &result, const double duplicated_vertex_tolerance=1.0e-12, const bool copy_manifold_ids=false, const bool copy_boundary_ids=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
int(& functions)(const void *v1, const void *v2)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
bool write_higher_order_cells