745 * std::ofstream out(output_name_base +
"-" +
746 * std::to_string(this->refinement_cycle) +
".svg");
755 * <a name=
"TheLaplacesolverclasses"></a>
756 * <h3>The Laplace solver classes</h3>
760 * Next are the actual solver classes. Again, we discuss only the
761 * differences to the previous program.
764 *
namespace LaplaceSolver
769 * <a name=
"TheLaplacesolverbaseclass"></a>
770 * <h4>The Laplace solver base
class</h4>
774 * This
class is almost unchanged, with the exception that it declares two
775 * more
functions: <code>output_solution</code> will be used to generate
776 * output files from the actual solutions computed by derived classes, and
777 * the <code>set_refinement_cycle</code> function by which the testing
778 * framework sets the number of the refinement cycle to a local variable
779 * in
this class;
this number is later used to generate filenames
for the
788 *
virtual ~Base() =
default;
790 *
virtual void solve_problem() = 0;
791 *
virtual void postprocess(
792 *
const Evaluation::EvaluationBase<dim> &postprocessor)
const = 0;
793 *
virtual void refine_grid() = 0;
794 *
virtual unsigned int n_dofs()
const = 0;
796 *
virtual void set_refinement_cycle(
const unsigned int cycle);
798 *
virtual void output_solution()
const = 0;
803 *
unsigned int refinement_cycle;
816 *
void Base<dim>::set_refinement_cycle(
const unsigned int cycle)
818 * refinement_cycle = cycle;
825 * <a name=
"TheLaplaceSolverclass"></a>
826 * <h4>The Laplace Solver
class</h4>
830 * Likewise, the <code>Solver</code>
class is entirely unchanged and will
831 * thus not be discussed.
835 *
class Solver :
public virtual Base<dim>
843 *
virtual ~Solver()
override;
845 *
virtual void solve_problem()
override;
847 *
virtual void postprocess(
848 *
const Evaluation::EvaluationBase<dim> &postprocessor)
const override;
850 *
virtual unsigned int n_dofs()
const override;
863 *
struct LinearSystem
878 * The remainder of the
class is essentially a
copy of @ref step_13
"step-13"
879 * as well, including the data structures and
functions
880 * necessary to compute the linear system in
parallel using the
884 *
struct AssemblyScratchData
888 * AssemblyScratchData(
const AssemblyScratchData &scratch_data);
893 *
struct AssemblyCopyData
896 * std::vector<types::global_dof_index> local_dof_indices;
900 *
void assemble_linear_system(LinearSystem &linear_system);
902 *
void local_assemble_matrix(
904 * AssemblyScratchData & scratch_data,
905 * AssemblyCopyData & copy_data)
const;
908 *
void copy_local_to_global(
const AssemblyCopyData ©_data,
909 * LinearSystem & linear_system)
const;
922 * , quadrature(&quadrature)
923 * , face_quadrature(&face_quadrature)
925 * , boundary_values(&boundary_values)
930 * Solver<dim>::~Solver()
932 * dof_handler.clear();
937 *
void Solver<dim>::solve_problem()
939 * dof_handler.distribute_dofs(*fe);
940 * solution.reinit(dof_handler.n_dofs());
942 * LinearSystem linear_system(dof_handler);
943 * assemble_linear_system(linear_system);
944 * linear_system.solve(solution);
949 *
void Solver<dim>::postprocess(
950 *
const Evaluation::EvaluationBase<dim> &postprocessor)
const
952 * postprocessor(dof_handler, solution);
957 *
unsigned int Solver<dim>::n_dofs() const
959 *
return dof_handler.n_dofs();
965 * The following few
functions and constructors are verbatim
966 * copies taken from @ref step_13
"step-13":
970 *
void Solver<dim>::assemble_linear_system(LinearSystem &linear_system)
977 * AssemblyScratchData &scratch_data,
978 * AssemblyCopyData & copy_data) {
979 * this->local_assemble_matrix(cell, scratch_data, copy_data);
982 *
auto copier = [
this, &linear_system](
const AssemblyCopyData ©_data) {
983 * this->copy_local_to_global(copy_data, linear_system);
990 * AssemblyScratchData(*fe, *quadrature),
991 * AssemblyCopyData());
992 * linear_system.hanging_node_constraints.condense(linear_system.matrix);
994 * std::map<types::global_dof_index, double> boundary_value_map;
998 * boundary_value_map);
1001 * linear_system.hanging_node_constraints.condense(linear_system.rhs);
1004 * linear_system.matrix,
1006 * linear_system.rhs);
1010 *
template <
int dim>
1011 * Solver<dim>::AssemblyScratchData::AssemblyScratchData(
1018 *
template <
int dim>
1019 * Solver<dim>::AssemblyScratchData::AssemblyScratchData(
1020 *
const AssemblyScratchData &scratch_data)
1021 * : fe_values(scratch_data.fe_values.get_fe(),
1022 * scratch_data.fe_values.get_quadrature(),
1027 *
template <
int dim>
1028 *
void Solver<dim>::local_assemble_matrix(
1030 * AssemblyScratchData & scratch_data,
1031 * AssemblyCopyData & copy_data)
const
1033 *
const unsigned int dofs_per_cell = fe->n_dofs_per_cell();
1034 *
const unsigned int n_q_points = quadrature->size();
1036 * copy_data.cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1038 * copy_data.local_dof_indices.resize(dofs_per_cell);
1040 * scratch_data.fe_values.reinit(cell);
1042 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1043 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1044 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1045 * copy_data.cell_matrix(i, j) +=
1046 * (scratch_data.fe_values.shape_grad(i, q_point) *
1047 * scratch_data.fe_values.shape_grad(j, q_point) *
1048 * scratch_data.fe_values.JxW(q_point));
1050 * cell->get_dof_indices(copy_data.local_dof_indices);
1055 *
template <
int dim>
1056 *
void Solver<dim>::copy_local_to_global(
const AssemblyCopyData ©_data,
1057 * LinearSystem &linear_system)
const
1059 *
for (
unsigned int i = 0; i < copy_data.local_dof_indices.size(); ++i)
1060 *
for (
unsigned int j = 0; j < copy_data.local_dof_indices.size(); ++j)
1061 * linear_system.matrix.add(copy_data.local_dof_indices[i],
1062 * copy_data.local_dof_indices[j],
1063 * copy_data.cell_matrix(i, j));
1069 * Now
for the
functions that implement actions in the linear
1070 * system
class. First, the constructor initializes all data
1071 * elements to their correct sizes, and sets up a number of
1072 * additional data structures, such as constraints due to hanging
1073 * nodes. Since setting up the hanging nodes and finding out about
1074 * the
nonzero elements of the
matrix is independent, we
do that
1075 * in
parallel (
if the library was configured to use concurrency,
1076 * at least; otherwise, the actions are performed
1077 * sequentially). Note that we start only one thread, and
do the
1078 *
second action in the main thread. Since only one thread is
1079 * generated, we don
't use the <code>Threads::TaskGroup</code>
1080 * class here, but rather use the one created task object
1081 * directly to wait for this particular task's exit. The
1082 * approach is generally the same as the one we have used in
1083 * <code>Solver::assemble_linear_system()</code> above.
1087 * Note that taking the address of the
1089 * is a little tricky, since there are actually three
functions of
1090 *
this name, one
for each supported space dimension. Taking
1091 * addresses of overloaded
functions is somewhat complicated in
1092 *
C++, since the address-of operator <code>&</code> in that
case
1093 * returns a
set of
values (the addresses of all
1094 * functions with that name), and selecting the right one is then
1095 * the next step. If the context dictates which one to take (
for
1096 * example by assigning to a function pointer of known type), then
1097 * the compiler can
do that by itself, but
if this set of pointers
1098 * shall be given as the argument to a function that takes a
1099 *
template, the compiler could choose all without having a
1100 * preference
for one. We therefore have to make it clear to the
1101 * compiler which one we would like to have;
for this, we could
1102 * use a cast, but
for more clarity, we assign it to a temporary
1103 * <code>mhnc_p</code> (
short for <code>pointer to
1105 *
using this pointer instead.
1108 * template <int dim>
1109 * Solver<dim>::LinearSystem::LinearSystem(
const DoFHandler<dim> &dof_handler)
1111 * hanging_node_constraints.clear();
1118 * Start a side task then
continue on the main thread
1131 * Wait
for the side task to be done before going further
1136 * hanging_node_constraints.close();
1137 * hanging_node_constraints.condense(dsp);
1138 * sparsity_pattern.copy_from(dsp);
1140 *
matrix.reinit(sparsity_pattern);
1141 * rhs.reinit(dof_handler.
n_dofs());
1146 *
template <
int dim>
1147 *
void Solver<dim>::LinearSystem::solve(
Vector<double> &solution)
const
1155 * cg.solve(matrix, solution, rhs, preconditioner);
1157 * hanging_node_constraints.distribute(solution);
1165 * <a name=
"ThePrimalSolverclass"></a>
1166 * <h4>The PrimalSolver
class</h4>
1170 * The <code>PrimalSolver</code>
class is also mostly unchanged except for
1171 * implementing the <code>output_solution</code> function. We keep the
1172 * <code>GlobalRefinement</code> and <code>RefinementKelly</code> classes
1173 * in
this program, and they can then rely on the
default implementation
1174 * of
this function which simply outputs the primal solution. The
class
1175 * implementing dual weighted error estimators will overload
this function
1176 * itself, to also output the dual solution.
1179 *
template <
int dim>
1180 *
class PrimalSolver :
public Solver<dim>
1190 *
virtual void output_solution()
const override;
1198 *
template <
int dim>
1211 * , rhs_function(&rhs_function)
1216 *
template <
int dim>
1217 *
void PrimalSolver<dim>::output_solution() const
1221 * data_out.add_data_vector(this->solution,
"solution");
1222 * data_out.build_patches();
1224 * std::ofstream out(
"solution-" + std::to_string(this->refinement_cycle) +
1231 *
template <
int dim>
1232 *
void PrimalSolver<dim>::assemble_rhs(
Vector<double> &rhs)
const
1235 * *this->quadrature,
1239 *
const unsigned int dofs_per_cell = this->fe->n_dofs_per_cell();
1240 *
const unsigned int n_q_points = this->quadrature->size();
1243 * std::vector<double> rhs_values(n_q_points);
1244 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1246 *
for (
const auto &cell : this->dof_handler.active_cell_iterators())
1250 * fe_values.reinit(cell);
1252 * rhs_function->value_list(fe_values.get_quadrature_points(),
1255 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1256 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1257 * cell_rhs(i) += (fe_values.shape_value(i, q_point) *
1258 * rhs_values[q_point] *
1259 * fe_values.JxW(q_point));
1261 * cell->get_dof_indices(local_dof_indices);
1262 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1263 * rhs(local_dof_indices[i]) += cell_rhs(i);
1271 * <a name=
"TheRefinementGlobalandRefinementKellyclasses"></a>
1272 * <h4>The RefinementGlobal and RefinementKelly classes</h4>
1276 * For the following two classes, the same applies as
for most of the
1277 * above: the
class is taken from the previous example as-is:
1280 * template <int dim>
1281 * class RefinementGlobal :
public PrimalSolver<dim>
1291 *
virtual void refine_grid()
override;
1296 *
template <
int dim>
1297 * RefinementGlobal<dim>::RefinementGlobal(
1304 * : Base<dim>(coarse_grid)
1305 * , PrimalSolver<dim>(coarse_grid,
1315 *
template <
int dim>
1316 *
void RefinementGlobal<dim>::refine_grid()
1323 *
template <
int dim>
1324 *
class RefinementKelly :
public PrimalSolver<dim>
1334 *
virtual void refine_grid()
override;
1339 *
template <
int dim>
1340 * RefinementKelly<dim>::RefinementKelly(
1347 * : Base<dim>(coarse_grid)
1348 * , PrimalSolver<dim>(coarse_grid,
1358 *
template <
int dim>
1359 *
void RefinementKelly<dim>::refine_grid()
1364 * this->dof_handler,
1368 * estimated_error_per_cell);
1370 * estimated_error_per_cell,
1381 * <a name=
"TheRefinementWeightedKellyclass"></a>
1382 * <h4>The RefinementWeightedKelly
class</h4>
1386 * This
class is a variant of the previous one, in that it allows to
1387 * weight the refinement indicators we get from the library
's Kelly
1388 * indicator by some function. We include this class since the goal of
1389 * this example program is to demonstrate automatic refinement criteria
1390 * even for complex output quantities such as point values or stresses. If
1391 * we did not solve a dual problem and compute the weights thereof, we
1392 * would probably be tempted to give a hand-crafted weighting to the
1393 * indicators to account for the fact that we are going to evaluate these
1394 * quantities. This class accepts such a weighting function as argument to
1398 * template <int dim>
1399 * class RefinementWeightedKelly : public PrimalSolver<dim>
1402 * RefinementWeightedKelly(Triangulation<dim> & coarse_grid,
1403 * const FiniteElement<dim> & fe,
1404 * const Quadrature<dim> & quadrature,
1405 * const Quadrature<dim - 1> &face_quadrature,
1406 * const Function<dim> & rhs_function,
1407 * const Function<dim> & boundary_values,
1408 * const Function<dim> & weighting_function);
1410 * virtual void refine_grid() override;
1413 * const SmartPointer<const Function<dim>> weighting_function;
1418 * template <int dim>
1419 * RefinementWeightedKelly<dim>::RefinementWeightedKelly(
1420 * Triangulation<dim> & coarse_grid,
1421 * const FiniteElement<dim> & fe,
1422 * const Quadrature<dim> & quadrature,
1423 * const Quadrature<dim - 1> &face_quadrature,
1424 * const Function<dim> & rhs_function,
1425 * const Function<dim> & boundary_values,
1426 * const Function<dim> & weighting_function)
1427 * : Base<dim>(coarse_grid)
1428 * , PrimalSolver<dim>(coarse_grid,
1434 * , weighting_function(&weighting_function)
1441 * Now, here comes the main function, including the weighting:
1444 * template <int dim>
1445 * void RefinementWeightedKelly<dim>::refine_grid()
1449 * First compute some residual based error indicators for all cells by a
1450 * method already implemented in the library. What exactly we compute
1451 * here is described in more detail in the documentation of that class.
1454 * Vector<float> estimated_error_per_cell(
1455 * this->triangulation->n_active_cells());
1456 * std::map<types::boundary_id, const Function<dim> *> dummy_function_map;
1457 * KellyErrorEstimator<dim>::estimate(this->dof_handler,
1458 * *this->face_quadrature,
1459 * dummy_function_map,
1461 * estimated_error_per_cell);
1465 * Next weigh each entry in the vector of indicators by the value of the
1466 * function given to the constructor, evaluated at the cell center. We
1467 * need to write the result into the vector entry that corresponds to the
1468 * current cell, which we can obtain by asking the cell what its index
1469 * among all active cells is using CellAccessor::active_cell_index(). (In
1470 * reality, this index is zero for the first cell we handle in the loop,
1471 * one for the second cell, etc., and we could as well just keep track of
1472 * this index using an integer counter; but using
1473 * CellAccessor::active_cell_index() makes this more explicit.)
1476 * for (const auto &cell : this->dof_handler.active_cell_iterators())
1477 * estimated_error_per_cell(cell->active_cell_index()) *=
1478 * weighting_function->value(cell->center());
1480 * GridRefinement::refine_and_coarsen_fixed_number(*this->triangulation,
1481 * estimated_error_per_cell,
1484 * this->triangulation->execute_coarsening_and_refinement();
1487 * } // namespace LaplaceSolver
1493 * <a name="Equationdata"></a>
1494 * <h3>Equation data</h3>
1498 * In this example program, we work with the same data sets as in the
1499 * previous one, but as it may so happen that someone wants to run the
1500 * program with different boundary values and right hand side functions, or
1501 * on a different grid, we show a simple technique to do exactly that. For
1502 * more clarity, we furthermore pack everything that has to do with equation
1503 * data into a namespace of its own.
1507 * The underlying assumption is that this is a research program, and that
1508 * there we often have a number of test cases that consist of a domain, a
1509 * right hand side, boundary values, possibly a specified coefficient, and a
1510 * number of other parameters. They often vary all at the same time when
1511 * shifting from one example to another. To make handling such sets of
1512 * problem description parameters simple is the goal of the following.
1516 * Basically, the idea is this: let us have a structure for each set of
1517 * data, in which we pack everything that describes a test case: here, these
1518 * are two subclasses, one called <code>BoundaryValues</code> for the
1519 * boundary values of the exact solution, and one called
1520 * <code>RightHandSide</code>, and then a way to generate the coarse
1521 * grid. Since the solution of the previous example program looked like
1522 * curved ridges, we use this name here for the enclosing class. Note that
1523 * the names of the two inner classes have to be the same for all enclosing
1524 * test case classes, and also that we have attached the dimension template
1525 * argument to the enclosing class rather than to the inner ones, to make
1526 * further processing simpler. (From a language viewpoint, a namespace
1527 * would be better to encapsulate these inner classes, rather than a
1528 * structure. However, namespaces cannot be given as template arguments, so
1529 * we use a structure to allow a second object to select from within its
1530 * given argument. The enclosing structure, of course, has no member
1531 * variables apart from the classes it declares, and a static function to
1532 * generate the coarse mesh; it will in general never be instantiated.)
1536 * The idea is then the following (this is the right time to also take a
1537 * brief look at the code below): we can generate objects for boundary
1538 * values and right hand side by simply giving the name of the outer class
1539 * as a template argument to a class which we call here
1540 * <code>Data::SetUp</code>, and it then creates objects for the inner
1541 * classes. In this case, to get all that characterizes the curved ridge
1542 * solution, we would simply generate an instance of
1543 * <code>Data::SetUp@<Data::CurvedRidge@></code>, and everything we need to
1544 * know about the solution would be static member variables and functions of
1549 * This approach might seem like overkill in this case, but will become very
1550 * handy once a certain set up is not only characterized by Dirichlet
1551 * boundary values and a right hand side function, but in addition by
1552 * material properties, Neumann values, different boundary descriptors,
1553 * etc. In that case, the <code>SetUp</code> class might consist of a dozen
1554 * or more objects, and each descriptor class (like the
1555 * <code>CurvedRidges</code> class below) would have to provide them. Then,
1556 * you will be happy to be able to change from one set of data to another by
1557 * only changing the template argument to the <code>SetUp</code> class at
1558 * one place, rather than at many.
1562 * With this framework for different test cases, we are almost finished, but
1563 * one thing remains: by now we can select statically, by changing one
1564 * template argument, which data set to choose. In order to be able to do
1565 * that dynamically, i.e. at run time, we need a base class. This we provide
1566 * in the obvious way, see below, with virtual abstract functions. It forces
1567 * us to introduce a second template parameter <code>dim</code> which we
1568 * need for the base class (which could be avoided using some template
1569 * magic, but we omit that), but that's all.
1573 * Adding
new testcases is now simple, you don
't have to touch the framework
1574 * classes, only a structure like the <code>CurvedRidges</code> one is
1583 * <a name="TheSetUpBaseandSetUpclasses"></a>
1584 * <h4>The SetUpBase and SetUp classes</h4>
1588 * Based on the above description, the <code>SetUpBase</code> class then
1589 * looks as follows. To allow using the <code>SmartPointer</code> class
1590 * with this class, we derived from the <code>Subscriptor</code> class.
1593 * template <int dim>
1594 * struct SetUpBase : public Subscriptor
1596 * virtual const Function<dim> &get_boundary_values() const = 0;
1598 * virtual const Function<dim> &get_right_hand_side() const = 0;
1601 * create_coarse_grid(Triangulation<dim> &coarse_grid) const = 0;
1607 * And now for the derived class that takes the template argument as
1612 * Here we pack the data elements into private variables, and allow access
1613 * to them through the methods of the base class.
1616 * template <class Traits, int dim>
1617 * struct SetUp : public SetUpBase<dim>
1619 * virtual const Function<dim> &get_boundary_values() const override;
1621 * virtual const Function<dim> &get_right_hand_side() const override;
1625 * create_coarse_grid(Triangulation<dim> &coarse_grid) const override;
1628 * static const typename Traits::BoundaryValues boundary_values;
1629 * static const typename Traits::RightHandSide right_hand_side;
1634 * We have to provide definitions for the static member variables of the
1638 * template <class Traits, int dim>
1639 * const typename Traits::BoundaryValues SetUp<Traits, dim>::boundary_values;
1640 * template <class Traits, int dim>
1641 * const typename Traits::RightHandSide SetUp<Traits, dim>::right_hand_side;
1645 * And definitions of the member functions:
1648 * template <class Traits, int dim>
1649 * const Function<dim> &SetUp<Traits, dim>::get_boundary_values() const
1651 * return boundary_values;
1655 * template <class Traits, int dim>
1656 * const Function<dim> &SetUp<Traits, dim>::get_right_hand_side() const
1658 * return right_hand_side;
1662 * template <class Traits, int dim>
1663 * void SetUp<Traits, dim>::create_coarse_grid(
1664 * Triangulation<dim> &coarse_grid) const
1666 * Traits::create_coarse_grid(coarse_grid);
1673 * <a name="TheCurvedRidgesclass"></a>
1674 * <h4>The CurvedRidges class</h4>
1678 * The class that is used to describe the boundary values and right hand
1679 * side of the <code>curved ridge</code> problem already used in the
1680 * @ref step_13 "step-13" example program is then like so:
1683 * template <int dim>
1684 * struct CurvedRidges
1686 * class BoundaryValues : public Function<dim>
1689 * virtual double value(const Point<dim> & p,
1690 * const unsigned int component) const;
1694 * class RightHandSide : public Function<dim>
1697 * virtual double value(const Point<dim> & p,
1698 * const unsigned int component) const;
1701 * static void create_coarse_grid(Triangulation<dim> &coarse_grid);
1705 * template <int dim>
1706 * double CurvedRidges<dim>::BoundaryValues::value(
1707 * const Point<dim> &p,
1708 * const unsigned int /*component*/) const
1711 * for (unsigned int i = 1; i < dim; ++i)
1712 * q += std::sin(10 * p(i) + 5 * p(0) * p(0));
1713 * const double exponential = std::exp(q);
1714 * return exponential;
1719 * template <int dim>
1720 * double CurvedRidges<dim>::RightHandSide::value(
1721 * const Point<dim> &p,
1722 * const unsigned int /*component*/) const
1725 * for (unsigned int i = 1; i < dim; ++i)
1726 * q += std::sin(10 * p(i) + 5 * p(0) * p(0));
1727 * const double u = std::exp(q);
1728 * double t1 = 1, t2 = 0, t3 = 0;
1729 * for (unsigned int i = 1; i < dim; ++i)
1731 * t1 += std::cos(10 * p(i) + 5 * p(0) * p(0)) * 10 * p(0);
1732 * t2 += 10 * std::cos(10 * p(i) + 5 * p(0) * p(0)) -
1733 * 100 * std::sin(10 * p(i) + 5 * p(0) * p(0)) * p(0) * p(0);
1734 * t3 += 100 * std::cos(10 * p(i) + 5 * p(0) * p(0)) *
1735 * std::cos(10 * p(i) + 5 * p(0) * p(0)) -
1736 * 100 * std::sin(10 * p(i) + 5 * p(0) * p(0));
1740 * return -u * (t1 + t2 + t3);
1744 * template <int dim>
1745 * void CurvedRidges<dim>::create_coarse_grid(Triangulation<dim> &coarse_grid)
1747 * GridGenerator::hyper_cube(coarse_grid, -1, 1);
1748 * coarse_grid.refine_global(2);
1755 * <a name="TheExercise_2_3class"></a>
1756 * <h4>The Exercise_2_3 class</h4>
1760 * This example program was written while giving practical courses for a
1761 * lecture on adaptive finite element methods and duality based error
1762 * estimates. For these courses, we had one exercise, which required to
1763 * solve the Laplace equation with constant right hand side on a square
1764 * domain with a square hole in the center, and zero boundary
1765 * values. Since the implementation of the properties of this problem is
1766 * so particularly simple here, lets do it. As the number of the exercise
1767 * was 2.3, we take the liberty to retain this name for the class as well.
1770 * template <int dim>
1771 * struct Exercise_2_3
1775 * We need a class to denote the boundary values of the problem. In this
1776 * case, this is simple: it's the zero function, so don
't even declare a
1777 * class, just an alias:
1780 * using BoundaryValues = Functions::ZeroFunction<dim>;
1784 * Second, a class that denotes the right hand side. Since they are
1785 * constant, just subclass the corresponding class of the library and be
1789 * class RightHandSide : public Functions::ConstantFunction<dim>
1793 * : Functions::ConstantFunction<dim>(1.)
1799 * Finally a function to generate the coarse grid. This is somewhat more
1800 * complicated here, see immediately below.
1803 * static void create_coarse_grid(Triangulation<dim> &coarse_grid);
1809 * As stated above, the grid for this example is the square [-1,1]^2 with
1810 * the square [-1/2,1/2]^2 as hole in it. We create the coarse grid as 4
1811 * times 4 cells with the middle four ones missing. To understand how
1812 * exactly the mesh is going to look, it may be simplest to just look
1813 * at the "Results" section of this tutorial program first. In general,
1814 * if you'd like to understand more about creating meshes either from
1815 * scratch by hand, as we
do here, or
using other techniques, you
1816 * should take a look at @ref step_49
"step-49".
1820 * Of course, the example has an extension to 3
d, but since
this function
1821 * cannot be written in a dimension independent way we choose not to
1822 * implement
this here, but rather only specialize the
template for
1823 * dim=2. If you compile the program
for 3
d, you
'll get a message from the
1824 * linker that this function is not implemented for 3d, and needs to be
1829 * For the creation of this geometry, the library has no predefined
1830 * method. In this case, the geometry is still simple enough to do the
1831 * creation by hand, rather than using a mesh generator.
1835 * void Exercise_2_3<2>::create_coarse_grid(Triangulation<2> &coarse_grid)
1839 * We first define the space dimension, to allow those parts of the
1840 * function that are actually dimension independent to use this
1841 * variable. That makes it simpler if you later take this as a starting
1842 * point to implement a 3d version of this mesh. The next step is then
1843 * to have a list of vertices. Here, they are 24 (5 times 5, with the
1844 * middle one omitted). It is probably best to draw a sketch here.
1847 * const unsigned int dim = 2;
1849 * const std::vector<Point<2>> vertices = {
1850 * {-1.0, -1.0}, {-0.5, -1.0}, {+0.0, -1.0}, {+0.5, -1.0}, {+1.0, -1.0},
1851 * {-1.0, -0.5}, {-0.5, -0.5}, {+0.0, -0.5}, {+0.5, -0.5}, {+1.0, -0.5},
1852 * {-1.0, +0.0}, {-0.5, +0.0}, {+0.5, +0.0}, {+1.0, +0.0},
1853 * {-1.0, +0.5}, {-0.5, +0.5}, {+0.0, +0.5}, {+0.5, +0.5}, {+1.0, +0.5},
1854 * {-1.0, +1.0}, {-0.5, +1.0}, {+0.0, +1.0}, {+0.5, +1.0}, {+1.0, +1.0}};
1858 * Next, we have to define the cells and the vertices they contain.
1861 * const std::vector<std::array<int, GeometryInfo<dim>::vertices_per_cell>>
1862 * cell_vertices = {{{0, 1, 5, 6}},
1868 * {{10, 11, 14, 15}},
1869 * {{12, 13, 17, 18}},
1870 * {{14, 15, 19, 20}},
1871 * {{15, 16, 20, 21}},
1872 * {{16, 17, 21, 22}},
1873 * {{17, 18, 22, 23}}};
1875 * const unsigned int n_cells = cell_vertices.size();
1879 * Again, we generate a C++ vector type from this, but this time by
1880 * looping over the cells (yes, this is boring). Additionally, we set
1881 * the material indicator to zero for all the cells:
1884 * std::vector<CellData<dim>> cells(n_cells, CellData<dim>());
1885 * for (unsigned int i = 0; i < n_cells; ++i)
1887 * for (unsigned int j = 0; j < cell_vertices[i].size(); ++j)
1888 * cells[i].vertices[j] = cell_vertices[i][j];
1889 * cells[i].material_id = 0;
1894 * Finally pass all this information to the library to generate a
1895 * triangulation. The last parameter may be used to pass information
1896 * about non-zero boundary indicators at certain faces of the
1897 * triangulation to the library, but we don't want that here, so we give
1905 * And since we want that the evaluation
point (3/4,3/4) in this example
1906 * is a grid point, we refine once globally:
1909 * coarse_grid.refine_global(1);
1916 * <a name="Discussion"></a>
1917 * <h4>Discussion</h4>
1921 * As you have now read through this framework, you may be wondering why we
1922 * have not chosen to implement the classes implementing a certain setup
1923 * (like the <code>CurvedRidges</code> class) directly as classes derived
1924 * from <code>Data::SetUpBase</code>. Indeed, we could have done very well
1925 * so. The only reason is that then we would have to have member variables
1926 * for the solution and right hand side classes in the
1927 * <code>CurvedRidges</code> class, as well as member functions overloading
1928 * the abstract functions of the base class giving access to these member
1929 * variables. The <code>SetUp</code> class has the sole reason to relieve us
1930 * from the need to reiterate these member variables and functions that
1931 * would be necessary in all such classes. In some way, the template
1932 * mechanism here only provides a way to have default implementations for a
1933 * number of functions that depend on external quantities and can thus not
1934 * be provided using normal virtual functions, at least not without the help
1939 * However, there might be good reasons to actually implement classes
1940 * derived from <code>Data::SetUpBase</code>, for example if the solution or
1941 * right hand side classes require constructors that take arguments, which
1942 * the <code>Data::SetUpBase</code> class cannot provide. In that case,
1943 * subclassing is a worthwhile strategy. Other possibilities for special
1944 * cases are to derive from <code>Data::SetUp@<SomeSetUp@></code> where
1945 * <code>SomeSetUp</code> denotes a class, or even to explicitly specialize
1946 * <code>Data::SetUp@<SomeSetUp@></code>. The latter allows to transparently
1947 * use the way the <code>SetUp</code> class is used for other set-ups, but
1948 * with special actions taken for special arguments.
1952 * A final observation favoring the approach taken here is the following: we
1953 * have found numerous times that when starting a project, the number of
1954 * parameters (usually boundary values, right hand side, coarse grid, just
1955 * as here) was small, and the number of test cases was small as well. One
1956 * then starts out by handcoding them into a number of <code>switch</code>
1957 * statements. Over time, projects grow, and so does the number of test
1958 * cases. The number of <code>switch</code> statements grows with that, and
1959 * their length as well, and one starts to find ways to consider impossible
1960 * examples where domains, boundary values, and right hand sides do not fit
1961 * together any more, and starts losing the overview over the whole
1962 * structure. Encapsulating everything belonging to a certain test case into
1963 * a structure of its own has proven worthwhile for this, as it keeps
1964 * everything that belongs to one test case in one place. Furthermore, it
1965 * allows to put these things all in one or more files that are only devoted
1966 * to test cases and their data, without having to bring their actual
1967 * implementation into contact with the rest of the program.
1975 * <a name="Dualfunctionals"></a>
1976 * <h3>Dual functionals</h3>
1980 * As with the other components of the program, we put everything we need to
1981 * describe dual functionals into a namespace of its own, and define an
1982 * abstract base class that provides the interface the class solving the
1983 * dual problem needs for its work.
1987 * We will then implement two such classes, for the evaluation of a point
1988 * value and of the derivative of the solution at that point. For these
1989 * functionals we already have the corresponding evaluation objects, so they
1990 * are complementary.
1993 * namespace DualFunctional
1998 * <a name=
"TheDualFunctionalBaseclass"></a>
1999 * <h4>The DualFunctionalBase
class</h4>
2003 * First start with the base
class for dual functionals. Since for linear
2004 * problems the characteristics of the dual problem play a role only in
2005 * the right hand side, we only need to provide
for a function that
2006 * assembles the right hand side
for a given discretization:
2009 *
template <
int dim>
2021 * <a name=
"ThedualfunctionalPointValueEvaluationclass"></a>
2022 * <h4>The dual functional PointValueEvaluation
class</h4>
2026 * As a
first application, we consider the functional corresponding to the
2027 * evaluation of the solution
's value at a given point which again we
2028 * assume to be a vertex. Apart from the constructor that takes and stores
2029 * the evaluation point, this class consists only of the function that
2030 * implements assembling the right hand side.
2033 * template <int dim>
2034 * class PointValueEvaluation : public DualFunctionalBase<dim>
2037 * PointValueEvaluation(const Point<dim> &evaluation_point);
2039 * virtual void assemble_rhs(const DoFHandler<dim> &dof_handler,
2040 * Vector<double> & rhs) const override;
2043 * ExcEvaluationPointNotFound,
2045 * << "The evaluation point " << arg1
2046 * << " was not found among the vertices of the present grid.");
2049 * const Point<dim> evaluation_point;
2053 * template <int dim>
2054 * PointValueEvaluation<dim>::PointValueEvaluation(
2055 * const Point<dim> &evaluation_point)
2056 * : evaluation_point(evaluation_point)
2062 * As for doing the main purpose of the class, assembling the right hand
2063 * side, let us first consider what is necessary: The right hand side of
2064 * the dual problem is a vector of values J(phi_i), where J is the error
2065 * functional, and phi_i is the i-th shape function. Here, J is the
2066 * evaluation at the point x0, i.e. J(phi_i)=phi_i(x0).
2070 * Now, we have assumed that the evaluation point is a vertex. Thus, for
2071 * the usual finite elements we might be using in this program, we can
2072 * take for granted that at such a point exactly one shape function is
2073 * nonzero, and in particular has the value one. Thus, we set the right
2074 * hand side vector to all-zeros, then seek for the shape function
2075 * associated with that point and set the corresponding value of the right
2076 * hand side vector to one:
2079 * template <int dim>
2081 * PointValueEvaluation<dim>::assemble_rhs(const DoFHandler<dim> &dof_handler,
2082 * Vector<double> & rhs) const
2086 * So, first set everything to zeros...
2089 * rhs.reinit(dof_handler.n_dofs());
2093 * ...then loop over cells and find the evaluation point among the
2094 * vertices (or very close to a vertex, which may happen due to floating
2098 * for (const auto &cell : dof_handler.active_cell_iterators())
2099 * for (const auto vertex : cell->vertex_indices())
2100 * if (cell->vertex(vertex).distance(evaluation_point) <
2101 * cell->diameter() * 1e-8)
2105 * Ok, found, so set corresponding entry, and leave function
2106 * since we are finished:
2109 * rhs(cell->vertex_dof_index(vertex, 0)) = 1;
2115 * Finally, a sanity check: if we somehow got here, then we must have
2116 * missed the evaluation point, so raise an exception unconditionally:
2119 * AssertThrow(false, ExcEvaluationPointNotFound(evaluation_point));
2126 * <a name="ThedualfunctionalPointXDerivativeEvaluationclass"></a>
2127 * <h4>The dual functional PointXDerivativeEvaluation class</h4>
2131 * As second application, we again consider the evaluation of the
2132 * x-derivative of the solution at one point. Again, the declaration of
2133 * the class, and the implementation of its constructor is not too
2137 * template <int dim>
2138 * class PointXDerivativeEvaluation : public DualFunctionalBase<dim>
2141 * PointXDerivativeEvaluation(const Point<dim> &evaluation_point);
2143 * virtual void assemble_rhs(const DoFHandler<dim> &dof_handler,
2144 * Vector<double> & rhs) const;
2147 * ExcEvaluationPointNotFound,
2149 * << "The evaluation point " << arg1
2150 * << " was not found among the vertices of the present grid.");
2153 * const Point<dim> evaluation_point;
2157 * template <int dim>
2158 * PointXDerivativeEvaluation<dim>::PointXDerivativeEvaluation(
2159 * const Point<dim> &evaluation_point)
2160 * : evaluation_point(evaluation_point)
2166 * What is interesting is the implementation of this functional: here,
2167 * J(phi_i)=d/dx phi_i(x0).
2171 * We could, as in the implementation of the respective evaluation object
2172 * take the average of the gradients of each shape function phi_i at this
2173 * evaluation point. However, we take a slightly different approach: we
2174 * simply take the average over all cells that surround this point. The
2175 * question which cells <code>surrounds</code> the evaluation point is
2176 * made dependent on the mesh width by including those cells for which the
2177 * distance of the cell's midpoint to the evaluation
point is less than
2178 * the cell
's diameter.
2182 * Taking the average of the gradient over the area/volume of these cells
2183 * leads to a dual solution which is very close to the one which would
2184 * result from the point evaluation of the gradient. It is simple to
2185 * justify theoretically that this does not change the method
2189 * template <int dim>
2190 * void PointXDerivativeEvaluation<dim>::assemble_rhs(
2191 * const DoFHandler<dim> &dof_handler,
2192 * Vector<double> & rhs) const
2196 * Again, first set all entries to zero:
2199 * rhs.reinit(dof_handler.n_dofs());
2203 * Initialize a <code>FEValues</code> object with a quadrature formula,
2204 * have abbreviations for the number of quadrature points and shape
2208 * QGauss<dim> quadrature(dof_handler.get_fe().degree + 1);
2209 * FEValues<dim> fe_values(dof_handler.get_fe(),
2211 * update_gradients | update_quadrature_points |
2212 * update_JxW_values);
2213 * const unsigned int n_q_points = fe_values.n_quadrature_points;
2214 * const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
2218 * ...and have two objects that are used to store the global indices of
2219 * the degrees of freedom on a cell, and the values of the gradients of
2220 * the shape functions at the quadrature points:
2223 * Vector<double> cell_rhs(dofs_per_cell);
2224 * std::vector<unsigned int> local_dof_indices(dofs_per_cell);
2228 * Finally have a variable in which we will sum up the area/volume of
2229 * the cells over which we integrate, by integrating the unit functions
2233 * double total_volume = 0;
2237 * Then start the loop over all cells, and select those cells which are
2238 * close enough to the evaluation point:
2241 * for (const auto &cell : dof_handler.active_cell_iterators())
2242 * if (cell->center().distance(evaluation_point) <= cell->diameter())
2246 * If we have found such a cell, then initialize the
2247 * <code>FEValues</code> object and integrate the x-component of
2248 * the gradient of each shape function, as well as the unit
2249 * function for the total area/volume.
2252 * fe_values.reinit(cell);
2255 * for (unsigned int q = 0; q < n_q_points; ++q)
2257 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
2259 * fe_values.shape_grad(i, q)[0] // (d/dx phi_i(x_q))
2260 * * fe_values.JxW(q); // * dx
2261 * total_volume += fe_values.JxW(q);
2266 * If we have the local contributions, distribute them to the
2270 * cell->get_dof_indices(local_dof_indices);
2271 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
2272 * rhs(local_dof_indices[i]) += cell_rhs(i);
2277 * After we have looped over all cells, check whether we have found any
2278 * at all, by making sure that their volume is non-zero. If not, then
2279 * the results will be botched, as the right hand side should then still
2280 * be zero, so throw an exception:
2283 * AssertThrow(total_volume > 0,
2284 * ExcEvaluationPointNotFound(evaluation_point));
2288 * Finally, we have by now only integrated the gradients of the shape
2289 * functions, not taking their mean value. We fix this by dividing by
2290 * the measure of the volume over which we have integrated:
2293 * rhs /= total_volume;
2297 * } // namespace DualFunctional
2303 * <a name="ExtendingtheLaplaceSolvernamespace"></a>
2304 * <h3>Extending the LaplaceSolver namespace</h3>
2307 * namespace LaplaceSolver
2312 * <a name="TheDualSolverclass"></a>
2313 * <h4>The DualSolver class</h4>
2317 * In the same way as the <code>PrimalSolver</code> class above, we now
2318 * implement a <code>DualSolver</code>. It has all the same features, the
2319 * only difference is that it does not take a function object denoting a
2320 * right hand side object, but now takes a <code>DualFunctionalBase</code>
2321 * object that will assemble the right hand side vector of the dual
2322 * problem. The rest of the class is rather trivial.
2326 * Since both primal and dual solver will use the same triangulation, but
2327 * different discretizations, it now becomes clear why we have made the
2328 * <code>Base</code> class a virtual one: since the final class will be
2329 * derived from both <code>PrimalSolver</code> as well as
2330 * <code>DualSolver</code>, it would have two <code>Base</code> instances,
2331 * would we not have marked the inheritance as virtual. Since in many
2332 * applications the base class would store much more information than just
2333 * the triangulation which needs to be shared between primal and dual
2334 * solvers, we do not usually want to use two such base classes.
2337 * template <int dim>
2338 * class DualSolver : public Solver<dim>
2342 * Triangulation<dim> & triangulation,
2343 * const FiniteElement<dim> & fe,
2344 * const Quadrature<dim> & quadrature,
2345 * const Quadrature<dim - 1> & face_quadrature,
2346 * const DualFunctional::DualFunctionalBase<dim> &dual_functional);
2349 * const SmartPointer<const DualFunctional::DualFunctionalBase<dim>>
2351 * virtual void assemble_rhs(Vector<double> &rhs) const override;
2353 * static const Functions::ZeroFunction<dim> boundary_values;
2356 * template <int dim>
2357 * const Functions::ZeroFunction<dim> DualSolver<dim>::boundary_values;
2359 * template <int dim>
2360 * DualSolver<dim>::DualSolver(
2361 * Triangulation<dim> & triangulation,
2362 * const FiniteElement<dim> & fe,
2363 * const Quadrature<dim> & quadrature,
2364 * const Quadrature<dim - 1> & face_quadrature,
2365 * const DualFunctional::DualFunctionalBase<dim> &dual_functional)
2366 * : Base<dim>(triangulation)
2367 * , Solver<dim>(triangulation,
2372 * , dual_functional(&dual_functional)
2377 * template <int dim>
2378 * void DualSolver<dim>::assemble_rhs(Vector<double> &rhs) const
2380 * dual_functional->assemble_rhs(this->dof_handler, rhs);
2387 * <a name="TheWeightedResidualclass"></a>
2388 * <h4>The WeightedResidual class</h4>
2392 * Here finally comes the main class of this program, the one that
2393 * implements the dual weighted residual error estimator. It joins the
2394 * primal and dual solver classes to use them for the computation of
2395 * primal and dual solutions, and implements the error representation
2396 * formula for use as error estimate and mesh refinement.
2400 * The first few of the functions of this class are mostly overriders of
2401 * the respective functions of the base class:
2404 * template <int dim>
2405 * class WeightedResidual : public PrimalSolver<dim>, public DualSolver<dim>
2409 * Triangulation<dim> & coarse_grid,
2410 * const FiniteElement<dim> & primal_fe,
2411 * const FiniteElement<dim> & dual_fe,
2412 * const Quadrature<dim> & quadrature,
2413 * const Quadrature<dim - 1> & face_quadrature,
2414 * const Function<dim> & rhs_function,
2415 * const Function<dim> & boundary_values,
2416 * const DualFunctional::DualFunctionalBase<dim> &dual_functional);
2418 * virtual void solve_problem() override;
2420 * virtual void postprocess(
2421 * const Evaluation::EvaluationBase<dim> &postprocessor) const override;
2423 * virtual unsigned int n_dofs() const override;
2425 * virtual void refine_grid() override;
2427 * virtual void output_solution() const override;
2432 * In the private section, we have two functions that are used to call
2433 * the <code>solve_problem</code> functions of the primal and dual base
2434 * classes. These two functions will be called in parallel by the
2435 * <code>solve_problem</code> function of this class.
2438 * void solve_primal_problem();
2439 * void solve_dual_problem();
2442 * Then declare abbreviations for active cell iterators, to avoid that
2443 * we have to write this lengthy name over and over again:
2449 * using active_cell_iterator =
2450 * typename DoFHandler<dim>::active_cell_iterator;
2454 * Next, declare a data type that we will us to store the contribution
2455 * of faces to the error estimator. The idea is that we can compute the
2456 * face terms from each of the two cells to this face, as they are the
2457 * same when viewed from both sides. What we will do is to compute them
2458 * only once, based on some rules explained below which of the two
2459 * adjacent cells will be in charge to do so. We then store the
2460 * contribution of each face in a map mapping faces to their values, and
2461 * only collect the contributions for each cell by looping over the
2462 * cells a second time and grabbing the values from the map.
2466 * The data type of this map is declared here:
2469 * using FaceIntegrals =
2470 * typename std::map<typename DoFHandler<dim>::face_iterator, double>;
2474 * In the computation of the error estimates on cells and faces, we need
2475 * a number of helper objects, such as <code>FEValues</code> and
2476 * <code>FEFaceValues</code> functions, but also temporary objects
2477 * storing the values and gradients of primal and dual solutions, for
2478 * example. These fields are needed in the three functions that do the
2479 * integration on cells, and regular and irregular faces, respectively.
2483 * There are three reasonable ways to provide these fields: first, as
2484 * local variables in the function that needs them; second, as member
2485 * variables of this class; third, as arguments passed to that function.
2489 * These three alternatives all have drawbacks: the third that their
2490 * number is not negligible and would make calling these functions a
2491 * lengthy enterprise. The second has the drawback that it disallows
2492 * parallelization, since the threads that will compute the error
2493 * estimate have to have their own copies of these variables each, so
2494 * member variables of the enclosing class will not work. The first
2495 * approach, although straightforward, has a subtle but important
2496 * drawback: we will call these functions over and over again, many
2497 * thousands of times maybe; it now turns out that allocating
2498 * vectors and other objects that need memory from the heap is an
2499 * expensive business in terms of run-time, since memory allocation is
2500 * expensive when several threads are involved. It is thus
2501 * significantly better to allocate the memory only once, and recycle
2502 * the objects as often as possible.
2506 * What to do? Our answer is to use a variant of the third strategy.
2507 * In fact, this is exactly what the WorkStream concept is supposed to
2508 * do (we have already introduced it above, but see also @ref threads).
2509 * To avoid that we have to give these functions a dozen or so
2510 * arguments, we pack all these variables into two structures, one which
2511 * is used for the computations on cells, the other doing them on the
2512 * faces. Both are then joined into the WeightedResidualScratchData class
2513 * that will serve as the "scratch data" class of the WorkStream concept:
2518 * FEValues<dim> fe_values;
2519 * const SmartPointer<const Function<dim>> right_hand_side;
2521 * std::vector<double> cell_residual;
2522 * std::vector<double> rhs_values;
2523 * std::vector<double> dual_weights;
2524 * std::vector<double> cell_laplacians;
2525 * CellData(const FiniteElement<dim> &fe,
2526 * const Quadrature<dim> & quadrature,
2527 * const Function<dim> & right_hand_side);
2528 * CellData(const CellData &cell_data);
2533 * FEFaceValues<dim> fe_face_values_cell;
2534 * FEFaceValues<dim> fe_face_values_neighbor;
2535 * FESubfaceValues<dim> fe_subface_values_cell;
2537 * std::vector<double> jump_residual;
2538 * std::vector<double> dual_weights;
2539 * typename std::vector<Tensor<1, dim>> cell_grads;
2540 * typename std::vector<Tensor<1, dim>> neighbor_grads;
2541 * FaceData(const FiniteElement<dim> & fe,
2542 * const Quadrature<dim - 1> &face_quadrature);
2543 * FaceData(const FaceData &face_data);
2546 * struct WeightedResidualScratchData
2548 * WeightedResidualScratchData(
2549 * const FiniteElement<dim> & primal_fe,
2550 * const Quadrature<dim> & primal_quadrature,
2551 * const Quadrature<dim - 1> &primal_face_quadrature,
2552 * const Function<dim> & rhs_function,
2553 * const Vector<double> & primal_solution,
2554 * const Vector<double> & dual_weights);
2556 * WeightedResidualScratchData(
2557 * const WeightedResidualScratchData &scratch_data);
2559 * CellData cell_data;
2560 * FaceData face_data;
2561 * Vector<double> primal_solution;
2562 * Vector<double> dual_weights;
2568 * WorkStream::run generally wants both a scratch object and a copy
2569 * object. Here, for reasons similar to what we had in @ref step_9 "step-9" when
2570 * discussing the computation of an approximation of the gradient, we
2571 * don't actually need a
"copy data" structure. Since
WorkStream insists
2572 * on having one of these, we just declare an empty structure that does
2573 *
nothing other than being there.
2576 *
struct WeightedResidualCopyData
2583 * Regarding the evaluation of the error estimator, we have one driver
2588 *
void estimate_error(
Vector<
float> &error_indicators) const;
2590 *
void estimate_on_one_cell(const active_cell_iterator & cell,
2591 * WeightedResidualScratchData &scratch_data,
2592 * WeightedResidualCopyData & copy_data,
2593 *
Vector<
float> & error_indicators,
2594 * FaceIntegrals &face_integrals) const;
2598 * Then we have functions that do the actual integration of the error
2599 * representation formula. They will treat the terms on the cell
2600 * interiors, on those faces that have no hanging nodes, and on those
2601 * faces with hanging nodes, respectively:
2604 *
void integrate_over_cell(const active_cell_iterator &cell,
2605 * const
Vector<
double> & primal_solution,
2606 * const
Vector<
double> & dual_weights,
2608 *
Vector<
float> &error_indicators) const;
2610 *
void integrate_over_regular_face(const active_cell_iterator &cell,
2611 * const
unsigned int face_no,
2612 * const
Vector<
double> &primal_solution,
2613 * const
Vector<
double> &dual_weights,
2614 * FaceData & face_data,
2615 * FaceIntegrals &face_integrals) const;
2616 *
void integrate_over_irregular_face(const active_cell_iterator &cell,
2617 * const
unsigned int face_no,
2618 * const
Vector<
double> &primal_solution,
2619 * const
Vector<
double> &dual_weights,
2620 * FaceData & face_data,
2621 * FaceIntegrals &face_integrals) const;
2628 * In the implementation of this class, we
first have the constructors of
2629 * the <code>
CellData</code> and <code>FaceData</code> member classes, and
2630 * the <code>WeightedResidual</code> constructor. They only initialize
2631 * fields to their correct lengths, so we do not have to discuss them in
2635 * template <
int dim>
2639 * const
Function<dim> & right_hand_side)
2644 * , right_hand_side(&right_hand_side)
2645 * , cell_residual(quadrature.size())
2646 * , rhs_values(quadrature.size())
2647 * , dual_weights(quadrature.size())
2648 * , cell_laplacians(quadrature.size())
2653 *
template <
int dim>
2654 * WeightedResidual<dim>::CellData::CellData(
const CellData &cell_data)
2655 * : fe_values(cell_data.fe_values.get_fe(),
2656 * cell_data.fe_values.get_quadrature(),
2659 * , right_hand_side(cell_data.right_hand_side)
2661 * , rhs_values(cell_data.rhs_values)
2662 * , dual_weights(cell_data.dual_weights)
2663 * , cell_laplacians(cell_data.cell_laplacians)
2668 *
template <
int dim>
2669 * WeightedResidual<dim>::FaceData::FaceData(
2672 * : fe_face_values_cell(fe,
2676 * , fe_face_values_neighbor(fe,
2682 *
const unsigned int n_face_q_points = face_quadrature.size();
2684 * jump_residual.resize(n_face_q_points);
2685 * dual_weights.resize(n_face_q_points);
2686 * cell_grads.resize(n_face_q_points);
2687 * neighbor_grads.resize(n_face_q_points);
2692 *
template <
int dim>
2693 * WeightedResidual<dim>::FaceData::FaceData(
const FaceData &face_data)
2694 * : fe_face_values_cell(face_data.fe_face_values_cell.get_fe(),
2695 * face_data.fe_face_values_cell.get_quadrature(),
2698 * , fe_face_values_neighbor(
2699 * face_data.fe_face_values_neighbor.get_fe(),
2700 * face_data.fe_face_values_neighbor.get_quadrature(),
2703 * , fe_subface_values_cell(
2704 * face_data.fe_subface_values_cell.get_fe(),
2705 * face_data.fe_subface_values_cell.get_quadrature(),
2707 * , jump_residual(face_data.jump_residual)
2708 * , dual_weights(face_data.dual_weights)
2709 * , cell_grads(face_data.cell_grads)
2710 * , neighbor_grads(face_data.neighbor_grads)
2715 *
template <
int dim>
2716 * WeightedResidual<dim>::WeightedResidualScratchData::
2717 * WeightedResidualScratchData(
2724 * : cell_data(primal_fe, primal_quadrature, rhs_function)
2725 * , face_data(primal_fe, primal_face_quadrature)
2726 * , primal_solution(primal_solution)
2727 * , dual_weights(dual_weights)
2730 *
template <
int dim>
2731 * WeightedResidual<dim>::WeightedResidualScratchData::
2732 * WeightedResidualScratchData(
2733 *
const WeightedResidualScratchData &scratch_data)
2734 * : cell_data(scratch_data.cell_data)
2735 * , face_data(scratch_data.face_data)
2736 * , primal_solution(scratch_data.primal_solution)
2737 * , dual_weights(scratch_data.dual_weights)
2742 *
template <
int dim>
2743 * WeightedResidual<dim>::WeightedResidual(
2751 *
const DualFunctional::DualFunctionalBase<dim> &dual_functional)
2752 * : Base<dim>(coarse_grid)
2753 * , PrimalSolver<dim>(coarse_grid,
2759 * , DualSolver<dim>(coarse_grid,
2769 * The next five
functions are boring, as they simply relay their work to
2770 * the base classes. The
first calls the primal and dual solvers in
2771 *
parallel,
while postprocessing the solution and retrieving the number
2772 * of degrees of freedom is done by the primal
class.
2775 *
template <
int dim>
2776 *
void WeightedResidual<dim>::solve_problem()
2787 *
template <
int dim>
2788 *
void WeightedResidual<dim>::solve_primal_problem()
2790 * PrimalSolver<dim>::solve_problem();
2793 *
template <
int dim>
2794 *
void WeightedResidual<dim>::solve_dual_problem()
2796 * DualSolver<dim>::solve_problem();
2800 *
template <
int dim>
2801 *
void WeightedResidual<dim>::postprocess(
2802 *
const Evaluation::EvaluationBase<dim> &postprocessor)
const
2804 * PrimalSolver<dim>::postprocess(postprocessor);
2808 *
template <
int dim>
2809 *
unsigned int WeightedResidual<dim>::n_dofs() const
2811 *
return PrimalSolver<dim>::n_dofs();
2818 * Now, it is becoming more interesting: the <code>refine_grid()</code>
2819 * function asks the error estimator to compute the cell-wise error
2820 * indicators, then uses their absolute
values for mesh refinement.
2823 *
template <
int dim>
2824 *
void WeightedResidual<dim>::refine_grid()
2828 * First
call the function that computes the cell-wise and global error:
2832 * estimate_error(error_indicators);
2836 * Then note that marking cells
for refinement or coarsening only works
2837 *
if all indicators are
positive, to allow their comparison. Thus, drop
2838 * the signs on all these indicators:
2841 *
for (
float &error_indicator : error_indicators)
2842 * error_indicator =
std::
fabs(error_indicator);
2846 * Finally, we can select between different strategies
for
2847 * refinement. The
default here is to
refine those cells with the
2848 * largest error indicators that make up
for a total of 80 per cent of
2849 * the error,
while we
coarsen those with the smallest indicators that
2850 * make up
for the bottom 2 per cent of the error.
2863 * Since we want to output both the primal and the dual solution, we
2864 * overload the <code>output_solution</code> function. The only
2865 * interesting feature of
this function is that the primal and dual
2866 * solutions are defined on different finite element spaces, which is not
2867 * the format the <code>
DataOut</code>
class expects. Thus, we have to
2868 * transfer them to a common finite element space. Since we want the
2869 * solutions only to see them qualitatively, we contend ourselves with
2870 * interpolating the dual solution to the (smaller) primal space. For the
2871 * interpolation, there is a library function, that takes a
2873 * constraints. The rest is standard.
2876 *
template <
int dim>
2877 *
void WeightedResidual<dim>::output_solution() const
2881 * primal_hanging_node_constraints);
2882 * primal_hanging_node_constraints.close();
2883 *
Vector<double> dual_solution(PrimalSolver<dim>::dof_handler.n_dofs());
2885 * DualSolver<dim>::solution,
2886 * PrimalSolver<dim>::dof_handler,
2887 * primal_hanging_node_constraints,
2895 * Add the data vectors
for which we want output. Add them both, the
2897 * wish to write to output:
2900 * data_out.add_data_vector(PrimalSolver<dim>::solution,
"primal_solution");
2901 * data_out.add_data_vector(dual_solution,
"dual_solution");
2903 * data_out.build_patches();
2905 * std::ofstream out(
"solution-" + std::to_string(this->refinement_cycle) +
2914 * <a name=
"Estimatingerrors"></a>
2915 * <h3>Estimating errors</h3>
2920 * <a name=
"Errorestimationdriverfunctions"></a>
2921 * <h4>Error estimation driver
functions</h4>
2925 * As
for the actual computation of error estimates, let
's start with the
2926 * function that drives all this, i.e. calls those functions that actually
2927 * do the work, and finally collects the results.
2930 * template <int dim>
2932 * WeightedResidual<dim>::estimate_error(Vector<float> &error_indicators) const
2936 * The first task in computing the error is to set up vectors that
2937 * denote the primal solution, and the weights (z-z_h)=(z-I_hz), both in
2938 * the finite element space for which we have computed the dual
2939 * solution. For this, we have to interpolate the primal solution to the
2940 * dual finite element space, and to subtract the interpolation of the
2941 * computed dual solution to the primal finite element
2942 * space. Fortunately, the library provides functions for the
2943 * interpolation into larger or smaller finite element spaces, so this
2944 * is mostly obvious.
2948 * First, let's
do that
for the primal solution: it is cell-wise
2949 * interpolated into the finite element space in which we have solved
2950 * the dual problem: But, again as in the
2951 * <code>WeightedResidual::output_solution</code> function we
first need
2953 * constraints, but
this time of the dual finite element space.
2958 * dual_hanging_node_constraints);
2959 * dual_hanging_node_constraints.close();
2960 *
Vector<double> primal_solution(DualSolver<dim>::dof_handler.n_dofs());
2962 * PrimalSolver<dim>::solution,
2963 * DualSolver<dim>::dof_handler,
2964 * dual_hanging_node_constraints,
2969 * Then
for computing the interpolation of the numerically approximated
2970 * dual solution z into the finite element space of the primal solution
2971 * and subtracting it from z: use the
2972 * <code>interpolate_difference</code> function, that gives (z-I_hz) in
2973 * the element space of the dual solution.
2978 * primal_hanging_node_constraints);
2979 * primal_hanging_node_constraints.close();
2980 *
Vector<double> dual_weights(DualSolver<dim>::dof_handler.n_dofs());
2982 * dual_hanging_node_constraints,
2983 * DualSolver<dim>::solution,
2984 * PrimalSolver<dim>::dof_handler,
2985 * primal_hanging_node_constraints,
2990 * Note that
this could probably have been more efficient since those
2991 * constraints have been used previously when assembling
matrix and
2992 * right hand side
for the primal problem and writing out the dual
2993 * solution. We leave the optimization of the program in
this respect as
2998 * Having computed the dual weights we now proceed with computing the
2999 * cell and face residuals of the primal solution. First we
set up a map
3000 * between face iterators and their jump term contributions of faces to
3001 * the error estimator. The reason is that we compute the jump terms
3002 * only once, from one side of the face, and want to collect them only
3003 * afterwards when looping over all cells a
second time.
3007 * We initialize
this map already with a
value of -1e20
for all faces,
3008 * since
this value will stand out in the results
if something should go
3009 * wrong and we fail to compute the
value for a face
for some
3010 * reason. Secondly,
this initialization already makes the std::map
3011 *
object allocate all objects it may possibly need. This is important
3012 * since we will write into
this structure from
parallel threads,
3013 * and doing so would not be thread-safe
if the map needed to allocate
3014 * memory and thereby reshape its data structures. In other words, the
3015 *
initial initialization relieves us from the necessity to synchronize
3016 * the threads through a mutex each time they write to (and modify the
3017 * structure of)
this map.
3020 * FaceIntegrals face_integrals;
3021 *
for (
const auto &cell :
3022 * DualSolver<dim>::dof_handler.active_cell_iterators())
3023 * for (const auto &face : cell->face_iterators())
3024 * face_integrals[face] = -1e20;
3026 *
auto worker = [
this,
3027 * &error_indicators,
3028 * &face_integrals](
const active_cell_iterator & cell,
3029 * WeightedResidualScratchData &scratch_data,
3030 * WeightedResidualCopyData & copy_data) {
3031 * this->estimate_on_one_cell(
3032 * cell, scratch_data, copy_data, error_indicators, face_integrals);
3035 *
auto do_nothing_copier =
3036 * std::function<void(const WeightedResidualCopyData &)>();
3041 * estimators for all cells in
parallel:
3045 * DualSolver<dim>::dof_handler.begin_active(),
3046 * DualSolver<dim>::dof_handler.end(),
3048 * do_nothing_copier,
3049 * WeightedResidualScratchData(*DualSolver<dim>::fe,
3050 * *DualSolver<dim>::quadrature,
3051 * *DualSolver<dim>::face_quadrature,
3052 * *this->rhs_function,
3055 * WeightedResidualCopyData());
3059 * Once the error contributions are computed, sum them up. For this,
3060 * note that the cell terms are already set, and that only the edge
3061 * terms need to be collected. Thus, loop over all cells and their
3062 * faces, make sure that the contributions of each of the faces are
3063 * there, and add them up. Only take minus one half of the jump term,
3064 * since the other half will be taken by the neighboring cell.
3067 *
unsigned int present_cell = 0;
3068 * for (const auto &cell :
3069 * DualSolver<dim>::dof_handler.active_cell_iterators())
3071 *
for (
const auto &face : cell->face_iterators())
3073 *
Assert(face_integrals.find(face) != face_integrals.
end(),
3074 * ExcInternalError());
3075 * error_indicators(present_cell) -= 0.5 * face_integrals[face];
3079 * std::cout <<
" Estimated error="
3080 * << std::accumulate(error_indicators.begin(),
3081 * error_indicators.end(),
3090 * <a name=
"Estimatingonasinglecell"></a>
3091 * <h4>Estimating on a single cell</h4>
3095 * Next we have the function that is called to estimate the error on a
3096 * single cell. The function may be called multiple times
if the library was
3097 * configured to use multithreading. Here it goes:
3100 *
template <
int dim>
3101 *
void WeightedResidual<dim>::estimate_on_one_cell(
3102 *
const active_cell_iterator & cell,
3103 * WeightedResidualScratchData &scratch_data,
3104 * WeightedResidualCopyData & copy_data,
3106 * FaceIntegrals & face_integrals)
const
3110 * Because of
WorkStream, estimate_on_one_cell
requires a CopyData
object
3111 * even
if it is no used. The next line silences a warning about
this
3119 * First task on each cell is to compute the cell residual
3120 * contributions of
this cell, and put them into the
3121 * <code>error_indicators</code> variable:
3124 * integrate_over_cell(cell,
3125 * scratch_data.primal_solution,
3126 * scratch_data.dual_weights,
3127 * scratch_data.cell_data,
3128 * error_indicators);
3132 * After computing the cell terms, turn to the face terms. For
this,
3133 *
loop over all faces of the present cell, and see whether
3134 * something needs to be computed on it:
3137 *
for (
const auto face_no : cell->face_indices())
3141 * First, if this face is part of the boundary, then there is
3142 *
nothing to do. However, to make things easier when summing up
3143 * the contributions of the faces of cells, we enter this face
3144 * into the list of faces with a zero contribution to the error.
3147 * if (cell->face(face_no)->at_boundary())
3149 * face_integrals[cell->face(face_no)] = 0;
3155 * Next, note that since we want to compute the jump terms on
3156 * each face only once although we access it twice (
if it is not
3157 * at the boundary), we have to define some rules who is
3158 * responsible
for computing on a face:
3162 * First,
if the neighboring cell is on the same
level as
this
3163 * one, i.e. neither further refined not coarser, then the one
3164 * with the lower
index within
this level does the work. In
3165 * other words:
if the other one has a lower
index, then skip
3166 * work on
this face:
3169 *
if ((cell->neighbor(face_no)->has_children() ==
false) &&
3170 * (cell->neighbor(face_no)->level() == cell->level()) &&
3171 * (cell->neighbor(face_no)->index() < cell->index()))
3176 * Likewise, we
always work from the coarser cell
if this and
3177 * its neighbor differ in refinement. Thus,
if the neighboring
3178 * cell is less refined than the present one, then
do nothing
3179 * since we integrate over the subfaces when we visit the coarse
3183 *
if (cell->at_boundary(face_no) ==
false)
3184 *
if (cell->neighbor(face_no)->level() < cell->level())
3190 * Now we know that we are in charge here, so actually compute
3191 * the face jump terms. If the face is a regular one, i.e. the
3192 * other side
's cell is neither coarser not finer than this
3193 * cell, then call one function, and if the cell on the other
3194 * side is further refined, then use another function. Note that
3195 * the case that the cell on the other side is coarser cannot
3196 * happen since we have decided above that we handle this case
3197 * when we pass over that other cell.
3200 * if (cell->face(face_no)->has_children() == false)
3201 * integrate_over_regular_face(cell,
3203 * scratch_data.primal_solution,
3204 * scratch_data.dual_weights,
3205 * scratch_data.face_data,
3208 * integrate_over_irregular_face(cell,
3210 * scratch_data.primal_solution,
3211 * scratch_data.dual_weights,
3212 * scratch_data.face_data,
3221 * <a name="Computingcelltermerrorcontributions"></a>
3222 * <h4>Computing cell term error contributions</h4>
3226 * As for the actual computation of the error contributions, first turn to
3230 * template <int dim>
3231 * void WeightedResidual<dim>::integrate_over_cell(
3232 * const active_cell_iterator &cell,
3233 * const Vector<double> & primal_solution,
3234 * const Vector<double> & dual_weights,
3235 * CellData & cell_data,
3236 * Vector<float> & error_indicators) const
3240 * The tasks to be done are what appears natural from looking at the
3241 * error estimation formula: first get the right hand side and Laplacian
3242 * of the numerical solution at the quadrature points for the cell
3246 * cell_data.fe_values.reinit(cell);
3247 * cell_data.right_hand_side->value_list(
3248 * cell_data.fe_values.get_quadrature_points(), cell_data.rhs_values);
3249 * cell_data.fe_values.get_function_laplacians(primal_solution,
3250 * cell_data.cell_laplacians);
3254 * ...then get the dual weights...
3257 * cell_data.fe_values.get_function_values(dual_weights,
3258 * cell_data.dual_weights);
3262 * ...and finally build the sum over all quadrature points and store it
3263 * with the present cell:
3267 * for (unsigned int p = 0; p < cell_data.fe_values.n_quadrature_points; ++p)
3268 * sum += ((cell_data.rhs_values[p] + cell_data.cell_laplacians[p]) *
3269 * cell_data.dual_weights[p] * cell_data.fe_values.JxW(p));
3270 * error_indicators(cell->active_cell_index()) += sum;
3277 * <a name="Computingedgetermerrorcontributions1"></a>
3278 * <h4>Computing edge term error contributions -- 1</h4>
3282 * On the other hand, computation of the edge terms for the error estimate
3283 * is not so simple. First, we have to distinguish between faces with and
3284 * without hanging nodes. Because it is the simple case, we first consider
3285 * the case without hanging nodes on a face (let's
call this the `regular
'
3289 * template <int dim>
3290 * void WeightedResidual<dim>::integrate_over_regular_face(
3291 * const active_cell_iterator &cell,
3292 * const unsigned int face_no,
3293 * const Vector<double> & primal_solution,
3294 * const Vector<double> & dual_weights,
3295 * FaceData & face_data,
3296 * FaceIntegrals & face_integrals) const
3298 * const unsigned int n_q_points =
3299 * face_data.fe_face_values_cell.n_quadrature_points;
3303 * The first step is to get the values of the gradients at the
3304 * quadrature points of the finite element field on the present
3305 * cell. For this, initialize the <code>FEFaceValues</code> object
3306 * corresponding to this side of the face, and extract the gradients
3307 * using that object.
3310 * face_data.fe_face_values_cell.reinit(cell, face_no);
3311 * face_data.fe_face_values_cell.get_function_gradients(
3312 * primal_solution, face_data.cell_grads);
3316 * The second step is then to extract the gradients of the finite
3317 * element solution at the quadrature points on the other side of the
3318 * face, i.e. from the neighboring cell.
3322 * For this, do a sanity check before: make sure that the neighbor
3323 * actually exists (yes, we should not have come here if the neighbor
3324 * did not exist, but in complicated software there are bugs, so better
3325 * check this), and if this is not the case throw an error.
3328 * Assert(cell->neighbor(face_no).state() == IteratorState::valid,
3329 * ExcInternalError());
3332 * If we have that, then we need to find out with which face of the
3333 * neighboring cell we have to work, i.e. the <code>how-many'th</code> the
3334 * neighbor the present cell is of the cell behind the present face. For
3335 *
this, there is a function, and we put the result into a variable with
3336 * the name <code>neighbor_neighbor</code>:
3339 *
const unsigned int neighbor_neighbor =
3340 * cell->neighbor_of_neighbor(face_no);
3343 * Then define an abbreviation
for the neighbor cell, initialize the
3348 *
const active_cell_iterator neighbor = cell->neighbor(face_no);
3349 * face_data.fe_face_values_neighbor.reinit(neighbor, neighbor_neighbor);
3350 * face_data.fe_face_values_neighbor.get_function_gradients(
3351 * primal_solution, face_data.neighbor_grads);
3355 * Now that we have the
gradients on
this and the neighboring cell,
3356 * compute the jump residual by multiplying the jump in the
gradient
3357 * with the normal vector:
3360 *
for (
unsigned int p = 0; p < n_q_points; ++p)
3361 * face_data.jump_residual[p] =
3362 * ((face_data.cell_grads[p] - face_data.neighbor_grads[p]) *
3363 * face_data.fe_face_values_cell.normal_vector(p));
3367 * Next get the dual weights
for this face:
3370 * face_data.fe_face_values_cell.get_function_values(dual_weights,
3371 * face_data.dual_weights);
3375 * Finally, we have to compute the
sum over jump residuals, dual
3376 * weights, and quadrature weights, to get the result
for this face:
3379 *
double face_integral = 0;
3380 *
for (
unsigned int p = 0; p < n_q_points; ++p)
3382 * (face_data.jump_residual[p] * face_data.dual_weights[p] *
3383 * face_data.fe_face_values_cell.JxW(p));
3387 * Double
check that the element already exists and that it was not
3388 * already written to...
3391 *
Assert(face_integrals.find(cell->face(face_no)) != face_integrals.end(),
3392 * ExcInternalError());
3393 *
Assert(face_integrals[cell->face(face_no)] == -1e20, ExcInternalError());
3397 * ...then store computed
value at assigned location. Note that the
3398 * stored
value does not contain the factor 1/2 that appears in the
3399 * error representation. The reason is that the term actually does not
3401 * only appears
if we write it as a
sum over all cells and all faces of
3402 * each cell; we thus visit the same face twice. We take account of
this
3403 * by
using this factor -1/2 later, when we
sum up the contributions
for
3404 * each cell individually.
3407 * face_integrals[cell->face(face_no)] = face_integral;
3414 * <a name=
"Computingedgetermerrorcontributions2"></a>
3415 * <h4>Computing edge term error contributions -- 2</h4>
3419 * We are still missing the
case of faces with hanging nodes. This is what
3420 * is covered in
this function:
3423 *
template <
int dim>
3424 *
void WeightedResidual<dim>::integrate_over_irregular_face(
3425 *
const active_cell_iterator &cell,
3426 *
const unsigned int face_no,
3429 * FaceData & face_data,
3430 * FaceIntegrals & face_integrals)
const
3434 * First again two abbreviations, and some consistency checks whether
3435 * the function is called only on faces
for which it is supposed to be
3439 *
const unsigned int n_q_points =
3440 * face_data.fe_face_values_cell.n_quadrature_points;
3444 * cell->neighbor(face_no);
3446 *
Assert(neighbor->has_children(), ExcInternalError());
3451 * Then find out which neighbor the present cell is of the adjacent
3452 * cell. Note that we will operate on the children of
this adjacent
3453 * cell, but that their orientation is the same as that of their mother,
3454 * i.e. the neighbor direction is the same.
3457 *
const unsigned int neighbor_neighbor =
3458 * cell->neighbor_of_neighbor(face_no);
3462 * Then simply
do everything we did in the previous function
for one
3463 * face
for all the sub-faces now:
3466 *
for (
unsigned int subface_no = 0; subface_no < face->n_children();
3471 * Start with some checks again: get an iterator pointing to the
3472 * cell behind the present subface and
check whether its face is a
3473 * subface of the one we are considering. If that were not the
case,
3474 * then there would be either a bug in the
3475 * <code>neighbor_neighbor</code> function called above, or -- worse
3476 * -- some function in the library did not keep to some underlying
3477 * assumptions about cells, their children, and their faces. In any
3478 *
case, even though
this assertion should not be triggered, it does
3479 * not harm to be cautious, and in optimized mode computations the
3480 * assertion will be removed anyway.
3483 *
const active_cell_iterator neighbor_child =
3484 * cell->neighbor_child_on_subface(face_no, subface_no);
3485 *
Assert(neighbor_child->face(neighbor_neighbor) ==
3486 * cell->face(face_no)->child(subface_no),
3487 * ExcInternalError());
3491 * Now start the work by again getting the
gradient of the solution
3492 *
first at
this side of the interface,
3495 * face_data.fe_subface_values_cell.reinit(cell, face_no, subface_no);
3496 * face_data.fe_subface_values_cell.get_function_gradients(
3497 * primal_solution, face_data.cell_grads);
3500 * then at the other side,
3503 * face_data.fe_face_values_neighbor.reinit(neighbor_child,
3504 * neighbor_neighbor);
3505 * face_data.fe_face_values_neighbor.get_function_gradients(
3506 * primal_solution, face_data.neighbor_grads);
3510 * and
finally building the jump residuals. Since we take the normal
3511 * vector from the other cell
this time, revert the
sign of the
3512 *
first term compared to the other function:
3515 *
for (
unsigned int p = 0; p < n_q_points; ++p)
3516 * face_data.jump_residual[p] =
3517 * ((face_data.neighbor_grads[p] - face_data.cell_grads[p]) *
3518 * face_data.fe_face_values_neighbor.normal_vector(p));
3522 * Then get dual weights:
3525 * face_data.fe_face_values_neighbor.get_function_values(
3526 * dual_weights, face_data.dual_weights);
3530 * At last,
sum up the contribution of
this sub-face, and
set it in
3534 *
double face_integral = 0;
3535 *
for (
unsigned int p = 0; p < n_q_points; ++p)
3537 * (face_data.jump_residual[p] * face_data.dual_weights[p] *
3538 * face_data.fe_face_values_neighbor.JxW(p));
3539 * face_integrals[neighbor_child->face(neighbor_neighbor)] =
3545 * Once the contributions of all sub-faces are computed,
loop over all
3546 * sub-faces to collect and store them with the mother face
for simple
3547 * use when later collecting the error terms of cells. Again make safety
3548 * checks that the entries
for the sub-faces have been computed and
do
3553 *
for (
unsigned int subface_no = 0; subface_no < face->n_children();
3556 *
Assert(face_integrals.find(face->child(subface_no)) !=
3557 * face_integrals.end(),
3558 * ExcInternalError());
3559 *
Assert(face_integrals[face->child(subface_no)] != -1e20,
3560 * ExcInternalError());
3562 *
sum += face_integrals[face->child(subface_no)];
3566 * Finally store the
value with the parent face.
3569 * face_integrals[face] =
sum;
3578 * <a name=
"Asimulationframework"></a>
3579 * <h3>A simulation framework</h3>
3583 * In the previous example program, we have had two
functions that were used
3584 * to drive the process of solving on subsequently finer grids. We extend
3585 *
this here to allow
for a number of parameters to be passed to these
3586 *
functions, and put all of that into framework
class.
3590 * You will have noted that
this program is built up of a number of small
3591 * parts (evaluation functions, solver classes implementing various
3592 * refinement methods, different dual functionals, different problem and
3593 * data descriptions), which makes the program relatively simple to extend,
3594 * but also allows to solve a large number of different problems by
3595 * replacing one part by another. We reflect
this flexibility by declaring a
3596 * structure in the following framework
class that holds a number of
3597 * parameters that may be
set to test various combinations of the parts of
3598 *
this program, and which can be used to test it at various problems and
3599 * discretizations in a simple way.
3602 *
template <
int dim>
3608 * First, we declare two abbreviations
for simple use of the respective
3612 *
using Evaluator = Evaluation::EvaluationBase<dim>;
3613 *
using EvaluatorList = std::list<Evaluator *>;
3618 * Then we have the structure which declares all the parameters that may
3619 * be
set. In the
default constructor of the structure, these
values are
3620 * all
set to
default values,
for simple use.
3623 *
struct ProblemDescription
3627 * First allow
for the degrees of the piecewise polynomials by which the
3628 * primal and dual problems will be discretized. They
default to (bi-,
3629 * tri-)linear ansatz functions for the primal, and (bi-, tri-)quadratic
3630 * ones for the dual problem. If a refinement criterion is chosen that
3631 * does not need the solution of a dual problem, the value of the dual
3632 * finite element degree is of course ignored.
3635 *
unsigned int primal_fe_degree;
3636 *
unsigned int dual_fe_degree;
3640 * Then have an
object that describes the problem type, i.e. right hand
3641 * side, domain, boundary values, etc. The pointer needed here defaults
3642 * to the Null pointer, i.e. you will have to set it in actual instances
3643 * of this
object to make it useful.
3646 *
std::unique_ptr<const Data::SetUpBase<dim>> data;
3650 * Since we allow to use different refinement criteria (global
3651 * refinement, refinement by the Kelly error indicator, possibly with a
3652 * weight, and using the dual estimator), define a number of enumeration
3653 * values, and subsequently a variable of that type. It will default to
3654 * <code>dual_weighted_error_estimator</code>.
3657 * enum RefinementCriterion
3659 * dual_weighted_error_estimator,
3660 * global_refinement,
3662 * weighted_kelly_indicator
3665 * RefinementCriterion refinement_criterion;
3669 * Next, an
object that describes the dual functional. It is only needed
3670 *
if the dual weighted residual refinement is chosen, and also defaults
3671 * to a Null pointer.
3674 * std::unique_ptr<const DualFunctional::DualFunctionalBase<dim>>
3679 * Then a list of evaluation objects. Its
default value is empty,
3680 * i.e. no evaluation objects.
3683 * EvaluatorList evaluator_list;
3687 * Next to last, a function that is used as a weight to the
3688 * <code>RefinementWeightedKelly</code>
class. The
default value of
this
3689 * pointer is zero, but you have to
set it to some other
value if you
3690 * want to use the <code>weighted_kelly_indicator</code> refinement
3694 * std::unique_ptr<const Function<dim>> kelly_weight;
3698 * Finally, we have a variable that denotes the maximum number of
3699 * degrees of freedom we allow
for the (primal) discretization. If it is
3700 * exceeded, we stop the process of solving and intermittent mesh
3701 * refinement. Its
default value is 20,000.
3704 *
unsigned int max_degrees_of_freedom;
3708 * Finally the
default constructor of
this class:
3711 * ProblemDescription();
3716 * The driver framework
class only has one method which calls solver and
3717 * mesh refinement intermittently, and does some other small tasks in
3718 * between. Since it does not need data besides the parameters given to
3719 * it, we make it
static:
3722 *
static void run(
const ProblemDescription &descriptor);
3728 * As
for the implementation,
first the constructor of the parameter object,
3729 * setting all
values to their defaults:
3732 *
template <
int dim>
3733 * Framework<dim>::ProblemDescription::ProblemDescription()
3734 * : primal_fe_degree(1)
3735 * , dual_fe_degree(2)
3736 * , refinement_criterion(dual_weighted_error_estimator)
3737 * , max_degrees_of_freedom(20000)
3744 * Then the function which drives the whole process:
3747 *
template <
int dim>
3748 *
void Framework<dim>::run(
const ProblemDescription &descriptor)
3761 * then a
set of finite elements and appropriate quadrature formula:
3764 *
const FE_Q<dim> primal_fe(descriptor.primal_fe_degree);
3765 *
const FE_Q<dim> dual_fe(descriptor.dual_fe_degree);
3766 *
const QGauss<dim> quadrature(descriptor.dual_fe_degree + 1);
3767 *
const QGauss<dim - 1> face_quadrature(descriptor.dual_fe_degree + 1);
3771 * Next, select one of the classes implementing different refinement
3775 * std::unique_ptr<LaplaceSolver::Base<dim>> solver;
3776 *
switch (descriptor.refinement_criterion)
3778 *
case ProblemDescription::dual_weighted_error_estimator:
3780 * solver = std::make_unique<LaplaceSolver::WeightedResidual<dim>>(
3786 * descriptor.data->get_right_hand_side(),
3787 * descriptor.data->get_boundary_values(),
3788 * *descriptor.dual_functional);
3792 *
case ProblemDescription::global_refinement:
3794 * solver = std::make_unique<LaplaceSolver::RefinementGlobal<dim>>(
3799 * descriptor.data->get_right_hand_side(),
3800 * descriptor.data->get_boundary_values());
3804 *
case ProblemDescription::kelly_indicator:
3806 * solver = std::make_unique<LaplaceSolver::RefinementKelly<dim>>(
3811 * descriptor.data->get_right_hand_side(),
3812 * descriptor.data->get_boundary_values());
3816 *
case ProblemDescription::weighted_kelly_indicator:
3819 * std::make_unique<LaplaceSolver::RefinementWeightedKelly<dim>>(
3824 * descriptor.data->get_right_hand_side(),
3825 * descriptor.data->get_boundary_values(),
3826 * *descriptor.kelly_weight);
3836 * Now that all objects are in place,
run the main
loop. The stopping
3837 * criterion is implemented at the bottom of the
loop.
3841 * In the
loop,
first set the
new cycle number, then solve the problem,
3842 * output its solution(s),
apply the evaluation objects to it, then decide
3843 * whether we want to
refine the mesh further and solve again on
this
3844 * mesh, or jump out of the
loop.
3847 *
for (
unsigned int step = 0;
true; ++step)
3849 * std::cout <<
"Refinement cycle: " << step << std::endl;
3851 * solver->set_refinement_cycle(step);
3852 * solver->solve_problem();
3853 * solver->output_solution();
3855 * std::cout <<
" Number of degrees of freedom=" << solver->n_dofs()
3858 *
for (
const auto &evaluator : descriptor.evaluator_list)
3860 * evaluator->set_refinement_cycle(step);
3861 * solver->postprocess(*evaluator);
3865 *
if (solver->n_dofs() < descriptor.max_degrees_of_freedom)
3866 * solver->refine_grid();
3873 * Clean up the screen after the
loop has
run:
3876 * std::cout << std::endl;
3886 * <a name=
"Themainfunction"></a>
3887 * <h3>The main function</h3>
3891 * Here
finally comes the main function. It drives the whole process by
3892 * specifying a
set of parameters to be used
for the simulation (polynomial
3893 * degrees, evaluation and dual functionals, etc), and passes them packed into
3894 * a structure to the frame work
class above.
3901 *
using namespace Step14;
3905 * Describe the problem we want to solve here by passing a descriptor
3906 *
object to the function doing the rest of the work:
3909 *
const unsigned int dim = 2;
3910 * Framework<dim>::ProblemDescription descriptor;
3914 * First
set the refinement criterion we wish to use:
3917 * descriptor.refinement_criterion =
3918 * Framework<dim>::ProblemDescription::dual_weighted_error_estimator;
3921 * Here, we could as well have used <code>global_refinement</code> or
3922 * <code>weighted_kelly_indicator</code>. Note that the information
3923 * given about dual finite elements, dual functional, etc is only
3924 * important
for the given choice of refinement criterion, and is
3925 * ignored otherwise.
3929 * Then
set the polynomial degrees of primal and dual problem. We choose
3930 * here bi-linear and bi-quadratic ones:
3933 * descriptor.primal_fe_degree = 1;
3934 * descriptor.dual_fe_degree = 2;
3938 * Then
set the description of the test
case, i.e. domain, boundary
3939 *
values, and right hand side. These are prepackaged in classes. We
3940 * take here the description of <code>Exercise_2_3</code>, but you can
3941 * also use <code>CurvedRidges@<dim@></code>:
3945 * std::make_unique<Data::SetUp<Data::Exercise_2_3<dim>, dim>>();
3949 * Next
set first a dual functional, then a list of evaluation
3950 * objects. We choose as
default the evaluation of the
value at an
3951 * evaluation
point, represented by the classes
3952 * <code>PointValueEvaluation</code> in the namespaces of evaluation and
3953 * dual functional classes. You can also
set the
3954 * <code>PointXDerivativeEvaluation</code> classes
for the x-derivative
3955 * instead of the
value at the evaluation
point.
3959 * Note that dual functional and evaluation objects should
3960 * match. However, you can give as many evaluation functionals as you
3961 * want, so you can have both
point value and derivative evaluated after
3962 * each step. One such additional evaluation is to output the grid in
3966 *
const Point<dim> evaluation_point(0.75, 0.75);
3967 * descriptor.dual_functional =
3968 * std::make_unique<DualFunctional::PointValueEvaluation<dim>>(
3969 * evaluation_point);
3971 * Evaluation::PointValueEvaluation<dim> postprocessor1(evaluation_point);
3972 * Evaluation::GridOutput<dim> postprocessor2(
"grid");
3974 * descriptor.evaluator_list.push_back(&postprocessor1);
3975 * descriptor.evaluator_list.push_back(&postprocessor2);
3979 * Set the maximal number of degrees of freedom after which we want the
3980 * program to stop refining the mesh further:
3983 * descriptor.max_degrees_of_freedom = 20000;
3987 * Finally pass the descriptor
object to a function that runs the entire
3991 * Framework<dim>::run(descriptor);
3996 * Catch exceptions to give information about things that failed:
3999 *
catch (std::exception &exc)
4001 * std::cerr << std::endl
4003 * <<
"----------------------------------------------------"
4005 * std::cerr <<
"Exception on processing: " << std::endl
4006 * << exc.what() << std::endl
4007 * <<
"Aborting!" << std::endl
4008 * <<
"----------------------------------------------------"
4014 * std::cerr << std::endl
4016 * <<
"----------------------------------------------------"
4018 * std::cerr <<
"Unknown exception!" << std::endl
4019 * <<
"Aborting!" << std::endl
4020 * <<
"----------------------------------------------------"
4028<a name=
"Results"></a><h1>Results</h1>
4035This program offers a lot of possibilities to play around. We can thus
4036only show a small part of all possible results that can be obtained
4037with the help of
this program. However, you are encouraged to just
try
4038it out, by changing the
settings in the main program. Here, we start
4039by simply letting it
run, unmodified:
4042 Number of degrees of freedom=72
4044 Estimated error=0.000702385
4046 Number of degrees of freedom=67
4048 Estimated error=0.000888953
4050 Number of degrees of freedom=130
4052 Estimated error=0.000454606
4054 Number of degrees of freedom=307
4056 Estimated error=0.000241254
4058 Number of degrees of freedom=718
4060 Estimated error=7.4912e-05
4062 Number of degrees of freedom=1665
4064 Estimated error=3.69111e-05
4066 Number of degrees of freedom=3975
4068 Estimated error=1.54218e-05
4070 Number of degrees of freedom=8934
4072 Estimated error=6.28359e-06
4074 Number of degrees of freedom=21799
4079First let
's look what the program actually computed. On the seventh
4080grid, primal and dual numerical solutions look like this (using a
4081color scheme intended to evoke the snow-capped mountains of
4082Colorado that the original author of this program now calls
4084<table align="center">
4087 <img src="https://www.dealii.org/images/steps/developer/step-14.point-value.solution-7.9.2.png" alt="">
4090 <img src="https://www.dealii.org/images/steps/developer/step-14.point-value.solution-7-dual.9.2.png" alt="">
4094Apparently, the region at the bottom left is so unimportant for the
4095point value evaluation at the top right that the grid is left entirely
4096unrefined there, even though the solution has singularities at the inner
4097corner of that cell! Due
4098to the symmetry in right hand side and domain, the solution should
4099actually look like at the top right in all four corners, but the mesh
4100refinement criterion involving the dual solution chose to refine them
4101differently -- because we said that we really only care about a single
4102function value somewhere at the top right.
4106Here are some of the meshes that are produced in refinement cycles 0,
41072, 4 (top row), and 5, 7, and 8 (bottom row):
4109<table width="80%" align="center">
4111 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-0.9.2.png" alt="" width="100%"></td>
4112 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-2.9.2.png" alt="" width="100%"></td>
4113 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-4.9.2.png" alt="" width="100%"></td>
4116 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-5.9.2.png" alt="" width="100%"></td>
4117 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-7.9.2.png" alt="" width="100%"></td>
4118 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-8.9.2.png" alt="" width="100%"></td>
4122Note the subtle interplay between resolving the corner singularities,
4123and resolving around the point of evaluation. It will be rather
4124difficult to generate such a mesh by hand, as this would involve to
4125judge quantitatively how much which of the four corner singularities
4126should be resolved, and to set the weight compared to the vicinity of
4127the evaluation point.
4131The program prints the point value and the estimated error in this
4132quantity. From extrapolating it, we can guess that the exact value is
4133somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get
4134almost 6 valid digits from only 22,000 (primal) degrees of
4135freedom. This number cannot be obtained from the value of the
4136functional alone, but I have used the assumption that the error
4137estimator is mostly exact, and extrapolated the computed value plus
4138the estimated error, to get an approximation of the true
4139value. Computing with more degrees of freedom shows that this
4140assumption is indeed valid.
4144From the computed results, we can generate two graphs: one that shows
4145the convergence of the error @f$J(u)-J(u_h)@f$ (taking the
4146extrapolated value as correct) in the point value, and the value that
4147we get by adding up computed value @f$J(u_h)@f$ and estimated
4148error eta (if the error estimator @f$eta@f$ were exact, then the value
4149@f$J(u_h)+\eta@f$ would equal the exact point value, and the error
4150in this quantity would always be zero; however, since the error
4151estimator is only a - good - approximation to the true error, we can
4152by this only reduce the size of the error). In this graph, we also
4153indicate the complexity @f${\cal O}(1/N)@f$ to show that mesh refinement
4154acts optimal in this case. The second chart compares
4155true and estimated error, and shows that the two are actually very
4156close to each other, even for such a complicated quantity as the point
4160<table width="80%" align="center">
4162 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.error.png" alt="" width="100%"></td>
4163 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.error-estimation.png" alt="" width="100%"></td>
4168<a name="Comparingrefinementcriteria"></a><h3>Comparing refinement criteria</h3>
4172Since we have accepted quite some effort when using the mesh
4173refinement driven by the dual weighted error estimator (for solving
4174the dual problem, and for evaluating the error representation), it is
4175worth while asking whether that effort was successful. To this end, we
4176first compare the achieved error levels for different mesh refinement
4177criteria. To generate this data, simply change the value of the mesh
4178refinement criterion variable in the main program. The results are
4179thus (for the weight in the Kelly indicator, we have chosen the
4180function @f$1/(r^2+0.1^2)@f$, where @f$r@f$
4181is the distance to the evaluation point; it can be shown that this is
4182the optimal weight if we neglect the effects of boundaries):
4184<img src="https://www.dealii.org/images/steps/developer/step-14.point-value.error-comparison.png" alt="">
4188Checking these numbers, we see that for global refinement, the error
4189is proportional to @f$O(1/(sqrt(N) log(N)))@f$, and for the dual
4190estimator @f$O(1/N)@f$. Generally speaking, we see that the dual
4191weighted error estimator is better than the other refinement
4192indicators, at least when compared with those that have a similarly
4193regular behavior. The Kelly indicator produces smaller errors, but
4194jumps about the picture rather irregularly, with the error also
4195changing signs sometimes. Therefore, its behavior does not allow to
4196extrapolate the results to larger values of N. Furthermore, if we
4197trust the error estimates of the dual weighted error estimator, the
4198results can be improved by adding the estimated error to the computed
4199values. In terms of reliability, the weighted estimator is thus better
4200than the Kelly indicator, although the latter sometimes produces
4205<a name="Evaluationofpointstresses"></a><h3>Evaluation of point stresses</h3>
4209Besides evaluating the values of the solution at a certain point, the
4210program also offers the possibility to evaluate the x-derivatives at a
4211certain point, and also to tailor mesh refinement for this. To let the
4212program compute these quantities, simply replace the two occurrences of
4213<code>PointValueEvaluation</code> in the main function by
4214<code>PointXDerivativeEvaluation</code>, and let the program run:
4217 Number of degrees of freedom=72
4218 Point x-derivative=-0.0719397
4219 Estimated error=-0.0126173
4221 Number of degrees of freedom=61
4222 Point x-derivative=-0.0707956
4223 Estimated error=-0.00774316
4225 Number of degrees of freedom=131
4226 Point x-derivative=-0.0568671
4227 Estimated error=-0.00313426
4229 Number of degrees of freedom=247
4230 Point x-derivative=-0.053033
4231 Estimated error=-0.00136114
4233 Number of degrees of freedom=532
4234 Point x-derivative=-0.0526429
4235 Estimated error=-0.000558868
4237 Number of degrees of freedom=1267
4238 Point x-derivative=-0.0526955
4239 Estimated error=-0.000220116
4241 Number of degrees of freedom=2864
4242 Point x-derivative=-0.0527495
4243 Estimated error=-9.46731e-05
4245 Number of degrees of freedom=6409
4246 Point x-derivative=-0.052785
4247 Estimated error=-4.21543e-05
4249 Number of degrees of freedom=14183
4250 Point x-derivative=-0.0528028
4251 Estimated error=-2.04241e-05
4253 Number of degrees of freedom=29902
4254 Point x-derivative=-0.052814
4259The solution looks roughly the same as before (the exact solution of
4260course <em>is</em> the same, only the grid changed a little), but the
4261dual solution is now different. A close-up around the point of
4262evaluation shows this:
4263<table align="center">
4266 <img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.solution-7-dual.png" alt="">
4269 <img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.solution-7-dual-close-up.png" alt="">
4272This time, the grids in refinement cycles 0, 5, 6, 7, 8, and 9 look
4275<table align="center" width="80%">
4277 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-0.9.2.png" alt="" width="100%"></td>
4278 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-5.9.2.png" alt="" width="100%"></td>
4279 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-6.9.2.png" alt="" width="100%"></td>
4282 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-7.9.2.png" alt="" width="100%"></td>
4283 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-8.9.2.png" alt="" width="100%"></td>
4284 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-9.9.2.png" alt="" width="100%"></td>
4288Note the asymmetry of the grids compared with those we obtained for
4289the point evaluation. This is due to the fact that the domain and the primal
4290solution may be symmetric about the diagonal, but the @f$x@f$-derivative is
4291not, and the latter enters the refinement criterion.
4295Then, it is interesting to compare actually computed values of the
4296quantity of interest (i.e. the x-derivative of the solution at one
4297point) with a reference value of -0.0528223... plus or minus
42980.0000005. We get this reference value by computing on finer grid after
4299some more mesh refinements, with approximately 130,000 cells.
4300Recall that if the error is @f$O(1/N)@f$ in the optimal case, then
4301taking a mesh with ten times more cells gives us one additional digit
4306In the left part of the following chart, you again see the convergence
4307of the error towards this extrapolated value, while on the right you
4308see a comparison of true and estimated error:
4310<table width="80%" align="center">
4312 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.error.png" alt="" width="100%"></td>
4313 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.error-estimation.png" alt="" width="100%"></td>
4317After an initial phase where the true error changes its sign, the
4318estimated error matches it quite well, again. Also note the dramatic
4319improvement in the error when using the estimated error to correct the
4320computed value of @f$J(u_h)@f$.
4324<a name="step13revisited"></a><h3>step-13 revisited</h3>
4328If instead of the <code>Exercise_2_3</code> data set, we choose
4329<code>CurvedRidges</code> in the main function, and choose @f$(0.5,0.5)@f$
4330as the evaluation point, then we can redo the
4331computations of the previous example program, to compare whether the
4332results obtained with the help of the dual weighted error estimator
4333are better than those we had previously.
4337First, the meshes after 9 adaptive refinement cycles obtained with
4338the point evaluation and derivative evaluation refinement
4339criteria, respectively, look like this:
4341<table width="80%" align="center">
4343 <td><img src="https://www.dealii.org/images/steps/developer/step-14.step-13.point-value.png" alt="" width="100%"></td>
4344 <td><img src="https://www.dealii.org/images/steps/developer/step-14.step-13.point-derivative.png" alt="" width="100%"></td>
4348The features of the solution can still be seen in the mesh, but since the
4349solution is smooth, the singularities of the dual solution entirely
4350dominate the mesh refinement criterion, and lead to strongly
4351concentrated meshes. The solution after the seventh refinement step looks
4354<table width="40%" align="center">
4356 <td><img src="https://www.dealii.org/images/steps/developer/step-14.step-13.solution-7.9.2.png" alt="" width="100%"></td>
4360Obviously, the solution is worse at some places, but the mesh
4361refinement process should have taken care that these places are not
4362important for computing the point value.
4367The next point is to compare the new (duality based) mesh refinement
4368criterion with the old ones. These are the results:
4370<img src="https://www.dealii.org/images/steps/developer/step-14.step-13.error-comparison.png" alt="">
4374The results are, well, somewhat mixed. First, the Kelly indicator
4375disqualifies itself by its unsteady behavior, changing the sign of the
4376error several times, and with increasing errors under mesh
4377refinement. The dual weighted error estimator has a monotone decrease
4378in the error, and is better than the weighted Kelly and global
4379refinement, but the margin is not as large as expected. This is, here,
4380due to the fact the global refinement can exploit the regular
4381structure of the meshes around the point of evaluation, which leads to
4382a better order of convergence for the point error. However, if we had
4383a mesh that is not locally rectangular, for example because we had to
4384approximate curved boundaries, or if the coefficients were not
4385constant, then this advantage of globally refinement meshes would
4386vanish, while the good performance of the duality based estimator
4392<a name="Conclusionsandoutlook"></a><h3>Conclusions and outlook</h3>
4396The results here are not too clearly indicating the superiority of the
4397dual weighted error estimation approach for mesh refinement over other
4398mesh refinement criteria, such as the Kelly indicator. This is due to
4399the relative simplicity of the shown applications. If you are not
4400convinced yet that this approach is indeed superior, you are invited
4401to browse through the literature indicated in the introduction, where
4402plenty of examples are provided where the dual weighted approach can
4403reduce the necessary numerical work by orders of magnitude, making
4404this the only way to compute certain quantities to reasonable
4409Besides the objections you may raise against its use as a mesh
4410refinement criterion, consider that accurate knowledge of the error in
4411the quantity one might want to compute is of great use, since we can
4412stop computations when we are satisfied with the accuracy. Using more
4413traditional approaches, it is very difficult to get accurate estimates
4414for arbitrary quantities, except for, maybe, the error in the energy
4415norm, and we will then have no guarantee that the result we computed
4416satisfies any requirements on its accuracy. Also, as was shown for the
4417evaluation of point values and derivatives, the error estimate can be
4418used to extrapolate the results, yielding much higher accuracy in the
4419quantity we want to know.
4423Leaving these mathematical considerations, we tried to write the
4424program in a modular way, such that implementing another test case, or
4425another evaluation and dual functional is simple. You are encouraged
4426to take the program as a basis for your own experiments, and to play a
4430<a name="PlainProg"></a>
4431<h1> The plain program</h1>
4432@include "step-14.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
types::global_dof_index n_dofs() const
void write_svg(const Triangulation< 2, 2 > &tria, std::ostream &out) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixType &A, const AdditionalData ¶meters=AdditionalData())
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertThrow(cond, exc)
typename ActiveSelector::cell_iterator cell_iterator
typename ActiveSelector::face_iterator face_iterator
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_hessians
Second derivatives of shape functions.
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
Task< RT > new_task(const std::function< RT()> &function)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void refine_and_coarsen_fixed_fraction(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double top_fraction, const double bottom_fraction, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max(), const VectorTools::NormType norm_type=VectorTools::NormType::L1_norm)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
void cell_residual(Vector< double > &result, const FEValuesBase< dim > &fe, const std::vector< Tensor< 1, dim > > &input, const ArrayView< const std::vector< double > > &velocity, double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
static const unsigned int invalid_unsigned_int
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
const TriangulationDescription::Settings settings