940 *
double product = 1;
941 *
for (
unsigned int d = 0;
d < dim; ++
d)
942 * product *= (p[d] + 1);
951 * <a name=
"step_27-Implementationofthemainclass"></a>
952 * <h3>Implementation of the main
class</h3>
957 * <a name=
"step_27-LaplaceProblemLaplaceProblemconstructor"></a>
958 * <h4>LaplaceProblem::LaplaceProblem constructor</h4>
962 * The constructor of
this class is fairly straightforward. It associates
964 * maximal polynomial degree to 7 (in 1
d and 2
d) or 5 (in 3d and higher). We
965 *
do so because
using higher order polynomial degrees becomes prohibitively
966 * expensive, especially in higher space dimensions.
970 * Following
this, we fill the collections of finite element, and cell and
971 * face quadrature objects. We start with quadratic elements, and each
972 * quadrature formula is chosen so that it is appropriate
for the
matching
977 * LaplaceProblem<dim>::LaplaceProblem()
979 * , max_degree(dim <= 2 ? 7 : 5)
981 * for (unsigned
int degree = 2; degree <= max_degree; ++degree)
984 * quadrature_collection.push_back(
QGauss<dim>(degree + 1));
993 * <a name=
"step_27-LaplaceProblemLaplaceProblemdestructor"></a>
994 * <h4>LaplaceProblem::~LaplaceProblem destructor</h4>
998 * The destructor is unchanged from what we already did in @ref step_6
"step-6":
1001 *
template <
int dim>
1002 * LaplaceProblem<dim>::~LaplaceProblem()
1004 * dof_handler.clear();
1011 * <a name=
"step_27-LaplaceProblemsetup_system"></a>
1012 * <h4>LaplaceProblem::setup_system</h4>
1016 * This function is again a verbatim
copy of what we already did in
1017 * @ref step_6
"step-6". Despite function calls with exactly the same names and arguments,
1018 * the algorithms used internally are different in some aspect since the
1019 * dof_handler variable here is in @f$hp@f$-mode.
1022 *
template <
int dim>
1023 *
void LaplaceProblem<dim>::setup_system()
1025 * dof_handler.distribute_dofs(fe_collection);
1027 * solution.reinit(dof_handler.n_dofs());
1028 * system_rhs.reinit(dof_handler.n_dofs());
1030 * constraints.clear();
1036 * constraints.close();
1040 * sparsity_pattern.copy_from(dsp);
1042 * system_matrix.reinit(sparsity_pattern);
1050 * <a name=
"step_27-LaplaceProblemassemble_system"></a>
1051 * <h4>LaplaceProblem::assemble_system</h4>
1055 * This is the function that assembles the global
matrix and right hand side
1056 * vector from the local contributions of each cell. Its main working is as
1057 * has been described in many of the tutorial programs before. The
1058 * significant deviations are the ones necessary for <i>
hp</i> finite
1059 * element methods. In particular, that we need to use a collection of
1061 * have to eliminate constrained degrees of freedom already when copying
1062 * local contributions into global objects. Both of these are explained in
1063 * detail in the introduction of
this program.
1067 * One other slight complication is the fact that because we use different
1068 * polynomial degrees on different cells, the matrices and vectors holding
1069 * local contributions
do not have the same size on all cells. At the
1070 * beginning of the
loop over all cells, we therefore each time have to
1071 * resize them to the correct size (given by <code>dofs_per_cell</code>).
1072 * Because these classes are implemented in such a way that reducing the size
1073 * of a
matrix or vector does not release the currently allocated memory
1074 * (unless the
new size is zero), the process of resizing at the beginning of
1075 * the loop will only require re-allocation of memory during the
first few
1076 * iterations. Once we have found in a cell with the maximal finite element
1077 * degree, no more re-allocations will happen because all subsequent
1078 * <code>reinit</code> calls will only set the size to something that fits the
1079 * currently allocated memory. This is important since allocating memory is
1080 * expensive, and doing so every time we visit a
new cell would take
1081 * significant compute time.
1084 * template <int dim>
1085 *
void LaplaceProblem<dim>::assemble_system()
1088 * quadrature_collection,
1093 * RightHandSide<dim> rhs_function;
1098 * std::vector<types::global_dof_index> local_dof_indices;
1100 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1102 * const unsigned
int dofs_per_cell = cell->get_fe().n_dofs_per_cell();
1104 *
cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1107 * cell_rhs.reinit(dofs_per_cell);
1110 * hp_fe_values.reinit(cell);
1114 * std::vector<double> rhs_values(fe_values.n_quadrature_points);
1115 * rhs_function.value_list(fe_values.get_quadrature_points(), rhs_values);
1117 *
for (
unsigned int q_point = 0; q_point < fe_values.n_quadrature_points;
1119 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1121 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1123 * (fe_values.shape_grad(i, q_point) *
1124 * fe_values.shape_grad(j, q_point) *
1125 * fe_values.JxW(q_point));
1127 * cell_rhs(i) += (fe_values.shape_value(i, q_point) *
1128 * rhs_values[q_point] *
1129 * fe_values.JxW(q_point));
1132 * local_dof_indices.resize(dofs_per_cell);
1133 * cell->get_dof_indices(local_dof_indices);
1135 * constraints.distribute_local_to_global(
1136 * cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
1145 * <a name=
"step_27-LaplaceProblemsolve"></a>
1146 * <h4>LaplaceProblem::solve</h4>
1150 * The function solving the linear system is entirely unchanged from
1151 * previous examples. We simply
try to
reduce the
initial residual (which
1152 * equals the @f$l_2@f$ norm of the right hand side) by a certain factor:
1155 *
template <
int dim>
1156 *
void LaplaceProblem<dim>::solve()
1159 * 1e-12 * system_rhs.l2_norm());
1163 * preconditioner.
initialize(system_matrix, 1.2);
1165 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
1167 * constraints.distribute(solution);
1175 * <a name=
"step_27-LaplaceProblempostprocess"></a>
1176 * <h4>LaplaceProblem::postprocess</h4>
1180 * After solving the linear system, we will want to postprocess the
1181 * solution. Here, all we
do is to estimate the error, estimate the local
1182 * smoothness of the solution as described in the introduction, then write
1183 * graphical output, and
finally refine the mesh in both @f$h@f$ and @f$p@f$
1184 * according to the indicators computed before. We
do all
this in the same
1185 * function because we want the estimated error and smoothness indicators
1186 * not only
for refinement, but also include them in the graphical output.
1189 *
template <
int dim>
1190 *
void LaplaceProblem<dim>::postprocess(
const unsigned int cycle)
1194 * Let us start with computing estimated error and smoothness indicators,
1195 * which each are one number
for each active cell of our
1203 * face_quadrature_collection,
1206 * estimated_error_per_cell);
1210 * Estimating the smoothness is performed with the method of decaying
1211 * expansion coefficients as outlined in the introduction. We will
first
1212 * need to create an
object capable of transforming the finite element
1213 * solution on every single cell into a sequence of Fourier series
1216 * estimating smoothness. The actual determination of the decay of Fourier
1217 * coefficients on every individual cell then happens in the last function.
1226 * smoothness_indicators);
1230 * Next we want to generate graphical output. In addition to the two
1231 * estimated quantities derived above, we would also like to output the
1232 * polynomial degree of the finite elements used on each of the elements
1237 * The way to
do that
requires that we
loop over all cells and poll the
1238 * active finite element
index of them
using
1239 * <code>cell-@>active_fe_index()</code>. We then use the result of
this
1240 * operation and query the finite element collection
for the finite
1241 * element with that
index, and
finally determine the polynomial degree of
1242 * that element. The result we put into a vector with one element per
1243 * cell. The
DataOut class requires this to be a vector of
1244 * <code>
float</code> or <code>
double</code>, even though our values are
1245 * all integers, so that is what we use:
1250 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1251 * fe_degrees(cell->active_cell_index()) =
1252 * fe_collection[cell->active_fe_index()].degree;
1256 * With now all data vectors available -- solution, estimated errors and
1257 * smoothness indicators, and finite element degrees --, we create a
1258 *
DataOut object for graphical output and attach all data:
1264 * data_out.add_data_vector(solution,
"solution");
1265 * data_out.add_data_vector(estimated_error_per_cell,
"error");
1266 * data_out.add_data_vector(smoothness_indicators,
"smoothness");
1267 * data_out.add_data_vector(fe_degrees,
"fe_degree");
1268 * data_out.build_patches();
1272 * The
final step in generating output is to determine a file name, open
1273 * the file, and write the data into it (here, we use VTK format):
1276 * const
std::string filename =
1278 * std::ofstream output(filename);
1279 * data_out.write_vtk(output);
1284 * After
this, we would like to actually
refine the mesh, in both @f$h@f$ and
1285 * @f$p@f$. The way we are going to
do this is as follows:
first, we use the
1286 * estimated error to flag those cells
for refinement that have the
1287 * largest error. This is what we have
always done:
1292 * estimated_error_per_cell,
1298 * Next we would like to figure out which of the cells that have been
1299 * flagged
for refinement should actually have @f$p@f$ increased instead of
1300 * @f$h@f$ decreased. The strategy we choose here is that we look at the
1301 * smoothness indicators of those cells that are flagged
for refinement,
1302 * and increase @f$p@f$
for those with a smoothness larger than a certain
1303 * relative threshold. In other words,
for every cell
for which (i) the
1304 * refinement flag is
set, (ii) the smoothness indicator is larger than
1305 * the threshold, and (iii) we still have a finite element with a
1306 * polynomial degree higher than the current one in the finite element
1307 * collection, we will assign a future FE
index that corresponds to a
1308 * polynomial with degree one higher than it currently is. The following
1309 * function is capable of doing exactly this. Absent any better
1310 * strategies, we will
set the threshold via interpolation between the
1311 * minimal and maximal smoothness indicators on cells flagged
for
1312 * refinement. Since the corner singularities are strongly localized, we
1313 * will favor @f$p@f$- over @f$h@f$-refinement quantitatively. We achieve
this
1314 * with a low threshold by setting a small interpolation factor of 0.2. In
1315 * the same way, we deal with cells that are going to be coarsened and
1316 * decrease their polynomial degree when their smoothness indicator is
1317 * below the corresponding threshold determined on cells to be coarsened.
1321 * dof_handler, smoothness_indicators, 0.2, 0.2);
1325 * The above function only determines whether the polynomial degree will
1326 * change via future FE indices, but does not manipulate the
1327 * @f$h@f$-refinement flags. So
for cells that are flagged
for both refinement
1328 * categories, we prefer @f$p@f$- over @f$h@f$-refinement. The following function
1329 * call ensures that only one of @f$p@f$- or @f$h@f$-refinement is imposed, and
1337 * For grid adaptive refinement, we ensure a 2:1 mesh balance by limiting
1338 * the difference of refinement levels of neighboring cells to one by
1340 * like to achieve something similar
for the p-levels of neighboring
1341 * cells: levels of future finite elements are not allowed to differ by
1342 * more than a specified difference. With its
default parameters, a call
1344 * difference is limited to one. This will not necessarily decrease the
1345 * number of hanging nodes in the domain, but makes sure that high order
1346 * polynomials are not constrained to much lower polynomials on faces,
1347 * e.g., fifth order to
second order polynomials.
1351 *
hp::Refinement::limit_p_level_difference(dof_handler);
1355 * At the end of this procedure, we then refine the mesh. During this
1356 * process, children of cells undergoing bisection inherit their mother
1357 * cell's finite element index. Further, future finite element indices
1358 * will turn into active ones, so that the new finite elements will be
1359 * assigned to cells after the next call of
DoFHandler::distribute_dofs().
1370 * <a name="step_27-LaplaceProblemcreate_coarse_grid"></a>
1371 * <h4>LaplaceProblem::create_coarse_grid</h4>
1375 * The following function is used when creating the initial grid. The grid we
1376 * would like to create is actually similar to the one from @ref step_14 "step-14", i.e., the
1377 * square domain with the square hole in the middle. It can be generated by
1378 * exactly the same function. However, since its implementation is only a
1379 * specialization of the 2d case, we will present a different way of creating
1385 * already holds our desired domain @f$[-1,1]^d@f$, subdivided into @f$4^d@f$ cells.
1386 * We then remove those cells in the
center of the domain by testing the
1387 * coordinate values of the
vertices on each cell. In the end, we refine the
1388 * so created grid globally as usual.
1391 * template <
int dim>
1392 *
void LaplaceProblem<dim>::create_coarse_grid()
1397 * std::set<typename Triangulation<dim>::active_cell_iterator> cells_to_remove;
1398 *
for (
const auto &cell : cube.active_cell_iterators())
1399 * for (unsigned
int v = 0; v < GeometryInfo<dim>::vertices_per_cell; ++v)
1400 *
if (cell->vertex(v).square() < .1)
1401 * cells_to_remove.insert(cell);
1415 * <a name=
"step_27-LaplaceProblemrun"></a>
1416 * <h4>LaplaceProblem::run</h4>
1420 * This function implements the logic of the program, as did the respective
1421 * function in most of the previous programs already, see
for example @ref step_6
"step-6".
1425 * Basically, it contains the adaptive
loop: in the
first iteration create a
1426 * coarse grid, and then
set up the linear system,
assemble it, solve, and
1427 * postprocess the solution including mesh refinement. Then start over
1428 * again. In the meantime, also output some information
for those staring at
1429 * the screen trying to figure out what the program does:
1432 *
template <
int dim>
1433 *
void LaplaceProblem<dim>::run()
1435 *
for (
unsigned int cycle = 0; cycle < 6; ++cycle)
1437 * std::cout <<
"Cycle " << cycle <<
':' << std::endl;
1440 * create_coarse_grid();
1444 * std::cout <<
" Number of active cells : "
1446 * <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
1448 * <<
" Number of constraints : "
1449 * << constraints.n_constraints() << std::endl;
1451 * assemble_system();
1453 * postprocess(cycle);
1462 * <a name=
"step_27-Themainfunction"></a>
1463 * <h3>The main function</h3>
1467 * The main function is again verbatim what we had before: wrap creating and
1468 * running an
object of the main
class into a <code>try</code> block and catch
1469 * whatever exceptions are thrown, thereby producing meaningful output
if
1470 * anything should go wrong:
1477 *
using namespace Step27;
1479 * LaplaceProblem<2> laplace_problem;
1480 * laplace_problem.run();
1482 *
catch (std::exception &exc)
1484 * std::cerr << std::endl
1486 * <<
"----------------------------------------------------"
1488 * std::cerr <<
"Exception on processing: " << std::endl
1489 * << exc.what() << std::endl
1490 * <<
"Aborting!" << std::endl
1491 * <<
"----------------------------------------------------"
1498 * std::cerr << std::endl
1500 * <<
"----------------------------------------------------"
1502 * std::cerr <<
"Unknown exception!" << std::endl
1503 * <<
"Aborting!" << std::endl
1504 * <<
"----------------------------------------------------"
1512<a name=
"step_27-Results"></a><h1>Results</h1>
1515In
this section, we discuss a few results produced from running the
1516current tutorial program. More results, in particular the extension to
15173
d calculations and determining how much compute time the individual
1518components of the program take, are given in the @ref hp_paper
"hp-paper".
1520When
run,
this is what the program produces:
1524[ 66%] Built target @ref step_27
"step-27"
1525[100%] Run @ref step_27
"step-27" with Release configuration
1527 Number of active cells : 768
1528 Number of degrees of freedom: 3264
1529 Number of constraints : 384
1531 Number of active cells : 807
1532 Number of degrees of freedom: 4764
1533 Number of constraints : 756
1535 Number of active cells : 927
1536 Number of degrees of freedom: 8226
1537 Number of constraints : 1856
1539 Number of active cells : 978
1540 Number of degrees of freedom: 12146
1541 Number of constraints : 2944
1543 Number of active cells : 1104
1544 Number of degrees of freedom: 16892
1545 Number of constraints : 3998
1547 Number of active cells : 1149
1548 Number of degrees of freedom: 22078
1549 Number of constraints : 5230
1552The
first thing we learn from
this is that the number of constrained degrees
1553of freedom is on the order of 20-25% of the total number of degrees of
1554freedom, at least on the later grids when we have elements of relatively
1555high order (in 3d, the fraction of constrained degrees of freedom can be up
1556to 30%). This is, in fact, on the same order of magnitude as
for
1557non-@f$hp@f$-discretizations. For example, in the last step of the @ref step_6
"step-6"
1558program, we have 18353 degrees of freedom, 4432 of which are
1559constrained. The difference is that in the latter program, each constrained
1560hanging node is constrained against only the two adjacent degrees of
1561freedom, whereas in the @f$hp@f$-
case, constrained nodes are constrained against
1562many more degrees of freedom. Note also that the current program also
1563includes nodes subject to Dirichlet boundary conditions in the list of
1564constraints. In cycle 0, all the constraints are actually because of
1567Of maybe more interest is to look at the graphical output. First, here is the
1568solution of the problem:
1570<img src=
"https://www.dealii.org/images/steps/developer/step-27-solution.png"
1571 alt=
"Elevation plot of the solution, showing the lack of regularity near
1572 the interior (reentrant) corners."
1573 width=
"200" height=
"200">
1575Secondly, let us look at the sequence of meshes generated:
1577<div
class=
"threecolumn" style=
"width: 80%">
1579 <img src=
"https://www.dealii.org/images/steps/developer/step-27.mesh-00.svg"
1580 alt=
"Triangulation containing reentrant corners without adaptive refinement."
1581 width=
"200" height=
"200">
1584 <img src=
"https://www.dealii.org/images/steps/developer/step-27.mesh-01.svg"
1585 alt=
"Triangulation containing reentrant corners with one level of
1586 refinement. New cells are placed near the corners."
1587 width=
"200" height=
"200">
1590 <img src=
"https://www.dealii.org/images/steps/developer/step-27.mesh-02.svg"
1591 alt=
"Triangulation containing reentrant corners with two levels of
1592 refinement. New cells are placed near the corners."
1593 width=
"200" height=
"200">
1596 <img src=
"https://www.dealii.org/images/steps/developer/step-27.mesh-03.svg"
1597 alt=
"Triangulation containing reentrant corners with three levels of
1598 refinement. New cells are placed near the corners."
1599 width=
"200" height=
"200">
1602 <img src=
"https://www.dealii.org/images/steps/developer/step-27.mesh-04.svg"
1603 alt=
"Triangulation containing reentrant corners with four levels of
1604 refinement. New cells are placed near the corners."
1605 width=
"200" height=
"200">
1608 <img src=
"https://www.dealii.org/images/steps/developer/step-27.mesh-05.svg"
1609 alt=
"Triangulation containing reentrant corners with five levels of
1610 refinement. New cells are placed near the corners."
1611 width=
"200" height=
"200">
1615It is clearly visible how the mesh is refined near the corner singularities,
1616as one would expect it. More interestingly, we should be curious to see the
1617distribution of finite element polynomial degrees to these mesh cells, where
1618the lightest color corresponds to degree two and the darkest one corresponds
1621<div
class=
"threecolumn" style=
"width: 80%">
1623 <img src=
"https://www.dealii.org/images/steps/developer/step-27.fe_degree-00.svg"
1624 alt=
"Initial grid where all cells contain just biquadratic functions."
1625 width=
"200" height=
"200">
1628 <img src=
"https://www.dealii.org/images/steps/developer/step-27.fe_degree-01.svg"
1629 alt=
"Depiction of local approximation degrees after one refinement."
1630 width=
"200" height=
"200">
1633 <img src=
"https://www.dealii.org/images/steps/developer/step-27.fe_degree-02.svg"
1634 alt=
"Depiction of local approximation degrees after two refinements."
1635 width=
"200" height=
"200">
1638 <img src=
"https://www.dealii.org/images/steps/developer/step-27.fe_degree-03.svg"
1639 alt=
"Depiction of local approximation degrees after three refinements."
1640 width=
"200" height=
"200">
1643 <img src=
"https://www.dealii.org/images/steps/developer/step-27.fe_degree-04.svg"
1644 alt=
"Depiction of local approximation degrees after four refinements."
1645 width=
"200" height=
"200">
1648 <img src=
"https://www.dealii.org/images/steps/developer/step-27.fe_degree-05.svg"
1649 alt=
"Depiction of local approximation degrees after five refinements."
1650 width=
"200" height=
"200">
1654While
this is certainly not a perfect arrangement, it does make some sense: we
1655use low order elements close to boundaries and corners where regularity is
1656low. On the other hand, higher order elements are used where (i) the error was
1657at one
point fairly large, i.e., mainly in the
general area around the corner
1658singularities and in the top right corner where the solution is large, and
1659(ii) where the solution is smooth, i.e., far away from the boundary.
1661This arrangement of polynomial degrees of course follows from our smoothness
1662estimator. Here is the estimated smoothness of the solution, with darker colors
1663indicating least smoothness and lighter indicating the smoothest areas:
1665<div
class=
"threecolumn" style=
"width: 80%">
1667 <img src=
"https://www.dealii.org/images/steps/developer/step-27.smoothness-00.svg"
1668 alt=
"Estimated regularity per cell on the initial grid."
1669 width=
"200" height=
"200">
1672 <img src=
"https://www.dealii.org/images/steps/developer/step-27.smoothness-01.svg"
1673 alt=
"Depiction of the estimated regularity per cell after one refinement."
1674 width=
"200" height=
"200">
1677 <img src=
"https://www.dealii.org/images/steps/developer/step-27.smoothness-02.svg"
1678 alt=
"Depiction of the estimated regularity per cell after two refinements."
1679 width=
"200" height=
"200">
1682 <img src=
"https://www.dealii.org/images/steps/developer/step-27.smoothness-03.svg"
1683 alt=
"Depiction of the estimated regularity per cell after three refinements."
1684 width=
"200" height=
"200">
1687 <img src=
"https://www.dealii.org/images/steps/developer/step-27.smoothness-04.svg"
1688 alt=
"Depiction of the estimated regularity per cell after four refinements."
1689 width=
"200" height=
"200">
1692 <img src=
"https://www.dealii.org/images/steps/developer/step-27.smoothness-05.svg"
1693 alt=
"Depiction of the estimated regularity per cell after five refinements."
1694 width=
"200" height=
"200">
1698The primary conclusion one can draw from
this is that the loss of regularity at
1699the
internal corners is a highly localized phenomenon; it only seems to impact
1700the cells adjacent to the corner itself, so when we
refine the mesh the black
1701coloring is no longer visible. Besides the corners,
this sequence of plots
1702implies that the smoothness estimates are somewhat
independent of the mesh
1703refinement, particularly when we are far away from boundaries.
1704It is also obvious that the smoothness estimates are
independent of the actual
1705size of the solution (see the picture of the solution above), as it should be.
1706A
point of larger concern, however, is that one realizes on closer inspection
1707that the estimator we have overestimates the smoothness of the solution on
1708cells with hanging nodes. This in turn leads to higher polynomial degrees in
1709these areas, skewing the allocation of finite elements onto cells.
1711We have no good explanation
for this effect at the moment. One theory is that
1712the numerical solution on cells with hanging nodes is, of course, constrained
1713and therefore not entirely
free to explore the function space to get close to
1714the exact solution. This lack of degrees of freedom may manifest itself by
1715yielding numerical solutions on these cells with suppressed oscillation,
1716meaning a higher degree of smoothness. The estimator picks
this signal up and
1717the estimated smoothness overestimates the actual
value. However, a definite
1718answer to what is going on currently eludes the authors of
this program.
1720The bigger question is, of course, how to avoid
this problem. Possibilities
1721include estimating the smoothness not on single cells, but cell assemblies or
1722patches surrounding each cell. It may also be possible to find simple
1723correction factors
for each cell depending on the number of constrained
1724degrees of freedom it has. In either
case, there are ample opportunities
for
1725further research on finding good @f$hp@f$-refinement criteria. On the other hand,
1726the main
point of the current program was to demonstrate
using the
1727@f$hp@f$-technology in deal.II, which is unaffected by our use of a possible
1728sub-optimal refinement criterion.
1732<a name=
"step-27-extensions"></a>
1733<a name=
"step_27-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1736<a name=
"step_27-Differenthpdecisionstrategies"></a><h4>Different
hp-decision strategies</h4>
1739This tutorial demonstrates only one particular strategy to decide between @f$h@f$- and
1740@f$p@f$-adaptation. In fact, there are many more ways to automatically decide on the
1741adaptation type, of which a few are already implemented in deal.II:
1743 <li><i>Fourier coefficient decay:</i> This is the strategy currently
1744 implemented in
this tutorial. For more information on
this strategy, see
1747 <li><i>Legendre coefficient decay:</i> This strategy is quite similar
1748 to the current one, but uses Legendre series expansion rather than the
1749 Fourier one: instead of sinusoids as basis
functions,
this strategy uses
1750 Legendre polynomials. Of course, since we
approximate the solution
using a
1751 finite-dimensional polynomial on each cell, the expansion of the solution in
1752 Legendre polynomials is also finite and, consequently, when we talk about the
1753 "decay" of
this expansion, we can only consider the finitely many
nonzero
1754 coefficients of
this expansion, rather than think about it in asymptotic terms.
1755 But,
if we have enough of these coefficients, we can certainly think of the
1756 decay of these coefficients as characteristic of the decay of the coefficients
1757 of the exact solution (which is, in general, not polynomial and so will have an
1758 infinite Legendre expansion), and considering the coefficients we have should
1759 reveal something about the properties of the exact solution.
1761 The transition from the Fourier strategy to the Legendre one is quite simple:
1762 You just need to change the series expansion
class and the corresponding
1763 smoothness estimation function to be part of the proper namespaces
1765 in @ref step_75 "step-75". For the theoretical background of this strategy, consult the
1767 as @cite mavriplis1994hp , @cite eibner2007hp and @cite davydov2017hp .</li>
1769 <li><i>Refinement history:</i> The last strategy is quite different
1770 from the other two. In theory, we know how the error will converge
1771 after changing the discretization of the function space. With
1772 @f$h@f$-refinement the solution converges algebraically as already pointed
1773 out in @ref step_7
"step-7". If the solution is sufficiently smooth, though, we
1774 expect that the solution will converge exponentially with increasing
1775 polynomial degree of the finite element. We can compare a proper
1776 prediction of the error with the actual error in the following step to
1777 see
if our choice of adaptation type was justified.
1779 The transition to
this strategy is a bit more complicated. For
this, we need
1780 an initialization step with pure @f$h@f$- or @f$p@f$-refinement and we need to
1781 transfer the predicted errors over adapted meshes. The extensive
1783 only the theoretical details of this approach, but also presents a blueprint
1784 on how to implement this strategy in your code. For more information, see
1785 @cite melenk2001hp .
1787 Note that with this particular function you cannot predict the error for
1788 the next time step in time-dependent problems. Therefore, this strategy
1789 cannot be applied to this type of problem without further ado. Alternatively,
1790 the following approach could be used, which works for all the other
1791 strategies as well: start each time step with a coarse mesh, keep refining
1792 until happy with the result, and only then move on to the next time step.</li>
1795Try implementing one of these strategies into this tutorial and observe the
1796subtle changes to the results. You will notice that all strategies are
1797capable of identifying the singularities near the reentrant corners and
1798will perform @f$h@f$-refinement in these regions, while preferring @f$p@f$-refinement
1799in the bulk domain. A detailed comparison of these strategies is presented
1800in @cite fehling2020 .
1803<a name="step_27-Parallelhpadaptivefiniteelements"></a><h4>Parallel
hp-adaptive finite elements</h4>
1806All functionality presented in this tutorial already works for both
1807sequential and
parallel applications. It is possible without too much
1810it, we recommend reading @ref step_18 "step-18" for the former and @ref step_40 "step-40" for the
1811latter case
first for further background information on the topic, and
1812then come back to this tutorial to try out your newly acquired skills.
1814We go one step further in @ref step_75 "step-75": Here, we combine
hp-adaptive and
1819<a name="step_27-PlainProg"></a>
1820<h1> The plain program</h1>
1821@include "step-27.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
const FEValues< dim, spacedim > & get_present_fe_values() const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixType &A, const AdditionalData ¶meters=AdditionalData())
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
virtual bool prepare_coarsening_and_refinement()
void push_back(const FiniteElement< dim, spacedim > &new_fe)
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
void create_triangulation_with_removed_cells(const Triangulation< dim, spacedim > &input_triangulation, const std::set< typename Triangulation< dim, spacedim >::active_cell_iterator > &cells_to_remove, Triangulation< dim, spacedim > &result)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
FESeries::Fourier< dim, spacedim > default_fe_series(const hp::FECollection< dim, spacedim > &fe_collection, const unsigned int component=numbers::invalid_unsigned_int)
void coefficient_decay(FESeries::Fourier< dim, spacedim > &fe_fourier, const DoFHandler< dim, spacedim > &dof_handler, const VectorType &solution, Vector< float > &smoothness_indicators, const VectorTools::NormType regression_strategy=VectorTools::Linfty_norm, const double smallest_abs_coefficient=1e-10, const bool only_flagged_cells=false)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool limit_p_level_difference(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int max_difference=1, const unsigned int contains_fe_index=0)
void predict_error(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &error_indicators, Vector< Number > &predicted_errors, const double gamma_p=std::sqrt(0.4), const double gamma_h=2., const double gamma_n=1.)
void choose_p_over_h(const DoFHandler< dim, spacedim > &dof_handler)
void p_adaptivity_from_relative_threshold(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_coarsen=std::less_equal< Number >())
void copy(const T *begin, const T *end, U *dest)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation