596 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
600 *
std::exp(-x_minus_xi.norm_square() / (this->width * this->width));
609 *
const unsigned int = 0) const override
612 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
617 * (-2 / (this->width * this->width) *
618 *
std::exp(-x_minus_xi.norm_square() / (this->width * this->width)) *
631 * This
class implements a function where the
scalar solution and its
negative
632 *
gradient are collected together. This function is used when computing the
633 * error of the HDG approximation and its implementation is to simply call
638 *
class SolutionAndGradient :
public Function<dim>,
protected SolutionBase<dim>
641 * SolutionAndGradient()
649 * Solution<dim> solution;
651 *
for (
unsigned int d = 0;
d < dim; ++
d)
653 * v[dim] = solution.value(p);
661 * Next comes the implementation of the convection velocity. As described in
662 * the introduction, we choose a velocity field that is @f$(y, -x)@f$ in 2
d and
663 * @f$(y, -x, 1)@f$ in 3
d. This gives a divergence-free velocity field.
670 * ConvectionVelocity()
683 * convection[0] = p[1];
684 * convection[1] = -p[0];
687 * convection[0] = p[1];
688 * convection[1] = -p[0];
702 * The last function we implement is the right hand side
for the
703 * manufactured solution. It is very similar to @ref step_7
"step-7", with the exception
704 * that we now have a convection term instead of the reaction term. Since
705 * the velocity field is incompressible, i.e., @f$\nabla \cdot \mathbf{c} =
706 * 0@f$, the advection term simply reads @f$\mathbf{c} \nabla u@f$.
710 *
class RightHandSide :
public Function<dim>,
protected SolutionBase<dim>
714 *
const unsigned int = 0) const override
716 * ConvectionVelocity<dim> convection_velocity;
719 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
724 * ((2 * dim - 2 * convection * x_minus_xi -
725 * 4 * x_minus_xi.norm_square() / (this->width * this->width)) /
726 * (this->width * this->width) *
727 *
std::exp(-x_minus_xi.norm_square() / (this->width * this->width)));
740 * <a name=
"step_51-TheHDGsolverclass"></a>
741 * <h3>The HDG solver
class</h3>
745 * The HDG solution procedure follows closely that of @ref step_7
"step-7". The major
746 * difference is the use of three different sets of
DoFHandler and FE
748 * vectors. We also use
WorkStream to enable a multithreaded local solution
749 * process which exploits the embarrassingly
parallel nature of the local
750 * solver. For
WorkStream, we define the local operations on a cell and a
751 *
copy function into the global
matrix and vector. We
do this both
for the
752 * assembly (which is run twice, once when we generate the system matrix and
753 * once when we compute the element-interior solutions from the skeleton
754 * values) and
for the postprocessing where we
extract a solution that
755 * converges at higher order.
762 *
enum RefinementMode
765 * adaptive_refinement
768 * HDG(
const unsigned int degree,
const RefinementMode refinement_mode);
772 *
void setup_system();
773 *
void assemble_system(
const bool reconstruct_trace =
false);
775 *
void postprocess();
776 *
void refine_grid(
const unsigned int cycle);
777 *
void output_results(
const unsigned int cycle);
781 * Data
for the assembly and solution of the primal variables.
784 *
struct PerTaskData;
785 *
struct ScratchData;
789 * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
790 * procedure; as such, we
do not need to
assemble any global
data and
do
791 * not declare any
'task data' for WorkStream to use.
794 *
struct PostProcessScratchData;
799 * work of the program.
802 *
void assemble_system_one_cell(
804 * ScratchData &scratch,
805 * PerTaskData &task_data);
807 *
void copy_local_to_global(
const PerTaskData &
data);
809 *
void postprocess_one_cell(
811 * PostProcessScratchData &scratch,
812 *
unsigned int &empty_data);
819 * The
'local' solutions are interior to each element. These
820 * represent the primal solution field @f$u@f$ as well as the auxiliary
821 * field @f$\mathbf{q}@f$.
830 * The
new finite element type and corresponding <code>
DoFHandler</code> are
831 * used
for the global skeleton solution that couples the element-
level
842 * As stated in the introduction, HDG solutions can be post-processed to
843 * attain superconvergence rates of @f$\mathcal{O}(h^{p+2})@f$. The
844 * post-processed solution is a discontinuous finite element solution
845 * representing the primal variable on the interior of each cell. We define
846 * a FE type of degree @f$p+1@f$ to represent
this post-processed solution,
847 * which we only use
for output after constructing it.
856 * The degrees of freedom corresponding to the skeleton strongly enforce
857 * Dirichlet boundary conditions, just as in a continuous Galerkin finite
858 * element method. We can enforce the boundary conditions in an analogous
860 * handled in the same way as
for continuous finite elements: For the face
861 * elements which only define degrees of freedom on the face,
this process
862 * sets the solution on the refined side to coincide with the
863 * representation on the coarse side.
867 * Note that
for HDG, the elimination of hanging nodes is not the only
868 * possibility — in terms of the HDG theory, one could also use the
869 * unknowns from the refined side and express the local solution on the
870 * coarse side through the
trace values on the refined side. However, such
871 * a setup is not as easily implemented in terms of deal.II loops and not
881 * the actual
matrix object. When creating the sparsity pattern, we just
882 * have to additionally pass the
size of local blocks.
890 * Same as @ref step_7
"step-7":
893 *
const RefinementMode refinement_mode;
900 * <a name=
"step_51-TheHDGclassimplementation"></a>
901 * <h3>The HDG
class implementation</h3>
906 * <a name=
"step_51-Constructor"></a>
907 * <h4>Constructor</h4>
908 * The constructor is similar to those in other examples, with the exception
910 * create a system of finite elements
for the local DG part, including the
915 * HDG<dim>::HDG(
const unsigned int degree,
const RefinementMode refinement_mode)
920 * , fe_u_post(degree + 1)
922 * , refinement_mode(refinement_mode)
930 * <a name=
"step_51-HDGsetup_system"></a>
931 * <h4>HDG::setup_system</h4>
932 * The system
for an HDG solution is setup in an analogous manner to most
933 * of the other tutorial programs. We are careful to distribute dofs with
934 * all of our
DoFHandler objects. The @p solution and @p system_matrix
935 * objects go with the global skeleton solution.
939 *
void HDG<dim>::setup_system()
941 * dof_handler_local.distribute_dofs(fe_local);
942 * dof_handler.distribute_dofs(fe);
943 * dof_handler_u_post.distribute_dofs(fe_u_post);
945 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
948 * solution.reinit(dof_handler.n_dofs());
949 * system_rhs.reinit(dof_handler.n_dofs());
951 * solution_local.reinit(dof_handler_local.n_dofs());
952 * solution_u_post.reinit(dof_handler_u_post.n_dofs());
954 * constraints.
clear();
956 * std::map<types::boundary_id, const Function<dim> *> boundary_functions;
957 * Solution<dim> solution_function;
958 * boundary_functions[0] = &solution_function;
960 * boundary_functions,
963 * constraints.close();
967 * When creating the chunk sparsity pattern, we
first create the usual
969 * to the number of dofs on a face, when copying
this into the
final
976 * sparsity_pattern.copy_from(dsp, fe.n_dofs_per_face());
978 * system_matrix.reinit(sparsity_pattern);
986 * <a name=
"step_51-HDGPerTaskData"></a>
987 * <h4>HDG::PerTaskData</h4>
988 * Next comes the definition of the local
data structures
for the
parallel
989 * assembly. The
first structure @p PerTaskData contains the local vector
990 * and
matrix that are written into the global
matrix, whereas the
991 * ScratchData contains all
data that we need
for the local assembly. There
992 * is one variable worth noting here, namely the
boolean variable @p
993 * trace_reconstruct. As mentioned in the introduction, we solve the HDG
994 * system in two steps. First, we create a linear system
for the skeleton
995 * system where we condense the local part into it via the Schur complement
996 * @f$D-CA^{-1}B@f$. Then, we solve
for the local part
using the skeleton
997 * solution. For these two steps, we need the same matrices on the elements
998 * twice, which we want to compute by two assembly steps. Since most of the
999 * code is similar, we
do this with the same function but only
switch
1000 * between the two based on a flag that we set when starting the
1001 * assembly. Since we need to pass
this information on to the local worker
1002 * routines, we store it once in the task
data.
1005 *
template <
int dim>
1006 *
struct HDG<dim>::PerTaskData
1010 * std::vector<types::global_dof_index> dof_indices;
1012 *
bool trace_reconstruct;
1014 * PerTaskData(
const unsigned int n_dofs,
const bool trace_reconstruct)
1016 * , cell_vector(n_dofs)
1017 * , dof_indices(n_dofs)
1018 * , trace_reconstruct(trace_reconstruct)
1027 * <a name=
"step_51-HDGScratchData"></a>
1028 * <h4>HDG::ScratchData</h4>
1029 * @p ScratchData contains persistent
data for each
1031 * and vector objects should be familiar by now. There are two objects that
1032 * need to be discussed: `std::vector<std::vector<unsigned int> >
1033 * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1034 * fe_support_on_face`. These are used to indicate whether or not the finite
1035 * elements chosen have support (non-zero values) on a given face of the
1036 * reference cell
for the local part associated to @p fe_local and the
1037 * skeleton part @p fe. We
extract this information in the
1038 * constructor and store it once
for all cells that we work on. Had we not
1039 * stored
this information, we would be forced to
assemble a large number of
1040 * zero terms on each cell, which would significantly slow the program.
1043 *
template <
int dim>
1044 *
struct HDG<dim>::ScratchData
1057 * std::vector<Tensor<1, dim>> q_phi;
1058 * std::vector<double> q_phi_div;
1059 * std::vector<double> u_phi;
1060 * std::vector<Tensor<1, dim>> u_phi_grad;
1061 * std::vector<double> tr_phi;
1062 * std::vector<double> trace_values;
1064 * std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1065 * std::vector<std::vector<unsigned int>> fe_support_on_face;
1067 * ConvectionVelocity<dim> convection_velocity;
1068 * RightHandSide<dim> right_hand_side;
1069 *
const Solution<dim> exact_solution;
1078 * : fe_values_local(fe_local, quadrature_formula, local_flags)
1079 * , fe_face_values_local(fe_local,
1080 * face_quadrature_formula,
1082 * , fe_face_values(fe, face_quadrature_formula, flags)
1083 * , ll_matrix(fe_local.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1084 * , lf_matrix(fe_local.n_dofs_per_cell(), fe.n_dofs_per_cell())
1085 * , fl_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1086 * , tmp_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1087 * , l_rhs(fe_local.n_dofs_per_cell())
1088 * , tmp_rhs(fe_local.n_dofs_per_cell())
1089 * , q_phi(fe_local.n_dofs_per_cell())
1090 * , q_phi_div(fe_local.n_dofs_per_cell())
1091 * , u_phi(fe_local.n_dofs_per_cell())
1092 * , u_phi_grad(fe_local.n_dofs_per_cell())
1093 * , tr_phi(fe.n_dofs_per_cell())
1094 * , trace_values(face_quadrature_formula.size())
1097 * , exact_solution()
1099 *
for (
const unsigned int face_no :
GeometryInfo<dim>::face_indices())
1100 * for (unsigned
int i = 0; i < fe_local.n_dofs_per_cell(); ++i)
1102 *
if (fe_local.has_support_on_face(i, face_no))
1103 * fe_local_support_on_face[face_no].push_back(i);
1106 *
for (
const unsigned int face_no :
GeometryInfo<dim>::face_indices())
1107 * for (unsigned
int i = 0; i < fe.n_dofs_per_cell(); ++i)
1109 *
if (fe.has_support_on_face(i, face_no))
1110 * fe_support_on_face[face_no].push_back(i);
1114 * ScratchData(
const ScratchData &sd)
1115 * : fe_values_local(sd.fe_values_local.get_fe(),
1116 * sd.fe_values_local.get_quadrature(),
1117 * sd.fe_values_local.get_update_flags())
1118 * , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1119 * sd.fe_face_values_local.get_quadrature(),
1120 * sd.fe_face_values_local.get_update_flags())
1121 * , fe_face_values(sd.fe_face_values.get_fe(),
1122 * sd.fe_face_values.get_quadrature(),
1123 * sd.fe_face_values.get_update_flags())
1124 * , ll_matrix(sd.ll_matrix)
1125 * , lf_matrix(sd.lf_matrix)
1126 * , fl_matrix(sd.fl_matrix)
1127 * , tmp_matrix(sd.tmp_matrix)
1129 * , tmp_rhs(sd.tmp_rhs)
1131 * , q_phi_div(sd.q_phi_div)
1133 * , u_phi_grad(sd.u_phi_grad)
1134 * , tr_phi(sd.tr_phi)
1135 * , trace_values(sd.trace_values)
1136 * , fe_local_support_on_face(sd.fe_local_support_on_face)
1137 * , fe_support_on_face(sd.fe_support_on_face)
1138 * , exact_solution()
1147 * <a name=
"step_51-HDGPostProcessScratchData"></a>
1148 * <h4>HDG::PostProcessScratchData</h4>
1150 * when post-processing the local solution @f$u^*@f$. It is similar, but much
1151 * simpler, than @p ScratchData.
1154 *
template <
int dim>
1155 *
struct HDG<dim>::PostProcessScratchData
1160 * std::vector<double> u_values;
1161 * std::vector<Tensor<1, dim>> u_gradients;
1172 * : fe_values_local(fe_local, quadrature_formula, local_flags)
1173 * , fe_values(fe, quadrature_formula, flags)
1174 * , u_values(quadrature_formula.size())
1175 * , u_gradients(quadrature_formula.size())
1176 * ,
cell_matrix(fe.n_dofs_per_cell(), fe.n_dofs_per_cell())
1177 * , cell_rhs(fe.n_dofs_per_cell())
1178 * , cell_sol(fe.n_dofs_per_cell())
1181 * PostProcessScratchData(
const PostProcessScratchData &sd)
1182 * : fe_values_local(sd.fe_values_local.get_fe(),
1183 * sd.fe_values_local.get_quadrature(),
1184 * sd.fe_values_local.get_update_flags())
1185 * , fe_values(sd.fe_values.get_fe(),
1186 * sd.fe_values.get_quadrature(),
1187 * sd.fe_values.get_update_flags())
1188 * , u_values(sd.u_values)
1189 * , u_gradients(sd.u_gradients)
1191 * , cell_rhs(sd.cell_rhs)
1192 * , cell_sol(sd.cell_sol)
1201 * <a name=
"step_51-HDGassemble_system"></a>
1202 * <h4>HDG::assemble_system</h4>
1203 * The @p assemble_system function is similar to the one on @ref step_32
"step-32", where
1204 * the quadrature formula and the update flags are set up, and then
1205 * <code>
WorkStream</code> is used to
do the work in a multi-threaded
1206 * manner. The @p trace_reconstruct input parameter is used to decide
1207 * whether we are solving
for the global skeleton solution (
false) or the
1208 * local solution (true).
1212 * One thing worth noting for the multi-threaded execution of assembly is
1213 * the fact that the local computations in `assemble_system_one_cell()` call
1214 * into BLAS and LAPACK functions if those are available in deal.II. Thus,
1215 * the underlying BLAS/LAPACK library must support calls from multiple
1216 * threads at the same time. Most implementations do support this, but some
1217 * libraries need to be built in a specific way to avoid problems. For
1218 * example, OpenBLAS compiled without multithreading inside the BLAS/LAPACK
1219 * calls needs to built with a flag called `USE_LOCKING` set to true.
1222 * template <
int dim>
1223 *
void HDG<dim>::assemble_system(const
bool trace_reconstruct)
1225 *
const QGauss<dim> quadrature_formula(fe.degree + 1);
1226 *
const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1236 * PerTaskData task_data(fe.n_dofs_per_cell(), trace_reconstruct);
1237 * ScratchData scratch(fe,
1239 * quadrature_formula,
1240 * face_quadrature_formula,
1246 * dof_handler.end(),
1248 * &HDG<dim>::assemble_system_one_cell,
1249 * &HDG<dim>::copy_local_to_global,
1259 * <a name=
"step_51-HDGassemble_system_one_cell"></a>
1260 * <h4>HDG::assemble_system_one_cell</h4>
1261 * The real work of the HDG program is done by @p assemble_system_one_cell.
1262 * Assembling the local matrices @f$A, B,
C@f$ is done here, along with the
1263 * local contributions of the global
matrix @f$D@f$.
1266 *
template <
int dim>
1267 *
void HDG<dim>::assemble_system_one_cell(
1269 * ScratchData &scratch,
1270 * PerTaskData &task_data)
1274 * Construct iterator
for dof_handler_local
for FEValues reinit function.
1278 * cell->as_dof_handler_iterator(dof_handler_local);
1280 *
const unsigned int n_q_points =
1281 * scratch.fe_values_local.get_quadrature().size();
1282 *
const unsigned int n_face_q_points =
1283 * scratch.fe_face_values_local.get_quadrature().size();
1285 *
const unsigned int loc_dofs_per_cell =
1286 * scratch.fe_values_local.get_fe().n_dofs_per_cell();
1291 * scratch.ll_matrix = 0;
1292 * scratch.l_rhs = 0;
1293 *
if (!task_data.trace_reconstruct)
1295 * scratch.lf_matrix = 0;
1296 * scratch.fl_matrix = 0;
1297 * task_data.cell_matrix = 0;
1298 * task_data.cell_vector = 0;
1300 * scratch.fe_values_local.reinit(loc_cell);
1304 * We
first compute the cell-interior contribution to @p ll_matrix
matrix
1305 * (referred to as
matrix @f$A@f$ in the introduction) corresponding to
1306 * local-local coupling, as well as the local right-hand-side vector. We
1307 * store the values at each quadrature point
for the basis functions, the
1308 * right-hand-side value, and the convection velocity, in order to have
1309 * quick access to these fields.
1312 * for (
unsigned int q = 0; q < n_q_points; ++q)
1314 *
const double rhs_value = scratch.right_hand_side.value(
1315 * scratch.fe_values_local.quadrature_point(q));
1316 *
const Tensor<1, dim> convection = scratch.convection_velocity.value(
1317 * scratch.fe_values_local.quadrature_point(q));
1318 *
const double JxW = scratch.fe_values_local.JxW(q);
1319 *
for (
unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1321 * scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1322 * scratch.q_phi_div[k] =
1323 * scratch.fe_values_local[fluxes].divergence(k, q);
1324 * scratch.u_phi[k] = scratch.fe_values_local[
scalar].value(k, q);
1325 * scratch.u_phi_grad[k] =
1326 * scratch.fe_values_local[
scalar].gradient(k, q);
1328 *
for (
unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1330 *
for (
unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1331 * scratch.ll_matrix(i, j) +=
1332 * (scratch.q_phi[i] * scratch.q_phi[j] -
1333 * scratch.q_phi_div[i] * scratch.u_phi[j] +
1334 * scratch.u_phi[i] * scratch.q_phi_div[j] -
1335 * (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1337 * scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1343 * Face terms are assembled on all faces of all elements. This is in
1344 * contrast to more traditional DG methods, where each face is only visited
1345 * once in the assembly procedure.
1348 *
for (
const auto face_no : cell->face_indices())
1350 * scratch.fe_face_values_local.
reinit(loc_cell, face_no);
1351 * scratch.fe_face_values.reinit(cell, face_no);
1355 * The already obtained @f$\hat{u}@f$
values are needed when solving
for the
1359 *
if (task_data.trace_reconstruct)
1360 * scratch.fe_face_values.get_function_values(solution,
1361 * scratch.trace_values);
1363 *
for (
unsigned int q = 0; q < n_face_q_points; ++q)
1365 *
const double JxW = scratch.fe_face_values.JxW(q);
1367 * scratch.fe_face_values.quadrature_point(q);
1369 * scratch.fe_face_values.normal_vector(q);
1371 * scratch.convection_velocity.value(quadrature_point);
1375 * Here we compute the stabilization parameter discussed in the
1376 * introduction: since the diffusion is one and the diffusion
1377 * length
scale is set to 1/5, it simply results in a contribution
1378 * of 5
for the diffusion part and the magnitude of convection
1379 * through the element boundary in a centered scheme
for the
1383 *
const double tau_stab = (5. +
std::abs(convection * normal));
1387 * We store the non-zero flux and
scalar values, making use of the
1388 * support_on_face information we created in @p ScratchData.
1391 *
for (
unsigned int k = 0;
1392 * k < scratch.fe_local_support_on_face[face_no].size();
1395 *
const unsigned int kk =
1396 * scratch.fe_local_support_on_face[face_no][k];
1397 * scratch.q_phi[k] =
1398 * scratch.fe_face_values_local[fluxes].value(kk, q);
1399 * scratch.u_phi[k] =
1400 * scratch.fe_face_values_local[
scalar].value(kk, q);
1405 * When @p trace_reconstruct=
false, we are preparing to
assemble the
1406 * system
for the skeleton variable @f$\hat{u}@f$. If
this is the
case,
1407 * we must
assemble all local matrices associated with the problem:
1408 * local-local, local-face, face-local, and face-face. The
1409 * face-face
matrix is stored as @p TaskData::cell_matrix, so that
1410 * it can be assembled into the global system by @p
1411 * copy_local_to_global.
1414 *
if (!task_data.trace_reconstruct)
1416 *
for (
unsigned int k = 0;
1417 * k < scratch.fe_support_on_face[face_no].size();
1419 * scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1420 * scratch.fe_support_on_face[face_no][k], q);
1421 *
for (
unsigned int i = 0;
1422 * i < scratch.fe_local_support_on_face[face_no].size();
1424 *
for (
unsigned int j = 0;
1425 * j < scratch.fe_support_on_face[face_no].size();
1428 *
const unsigned int ii =
1429 * scratch.fe_local_support_on_face[face_no][i];
1430 *
const unsigned int jj =
1431 * scratch.fe_support_on_face[face_no][j];
1432 * scratch.lf_matrix(ii, jj) +=
1433 * ((scratch.q_phi[i] * normal +
1434 * (convection * normal - tau_stab) * scratch.u_phi[i]) *
1435 * scratch.tr_phi[j]) *
1440 * Note the
sign of the face_no-local
matrix. We negate
1441 * the
sign during assembly here so that we can use the
1446 * scratch.fl_matrix(jj, ii) -=
1447 * ((scratch.q_phi[i] * normal +
1448 * tau_stab * scratch.u_phi[i]) *
1449 * scratch.tr_phi[j]) *
1453 *
for (
unsigned int i = 0;
1454 * i < scratch.fe_support_on_face[face_no].size();
1456 *
for (
unsigned int j = 0;
1457 * j < scratch.fe_support_on_face[face_no].size();
1460 *
const unsigned int ii =
1461 * scratch.fe_support_on_face[face_no][i];
1462 *
const unsigned int jj =
1463 * scratch.fe_support_on_face[face_no][j];
1464 * task_data.cell_matrix(ii, jj) +=
1465 * ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1466 * scratch.tr_phi[j]) *
1470 *
if (cell->face(face_no)->at_boundary() &&
1471 * (cell->face(face_no)->boundary_id() == 1))
1473 *
const double neumann_value =
1474 * -scratch.exact_solution.gradient(quadrature_point) *
1476 * convection * normal *
1477 * scratch.exact_solution.value(quadrature_point);
1478 *
for (
unsigned int i = 0;
1479 * i < scratch.fe_support_on_face[face_no].size();
1482 *
const unsigned int ii =
1483 * scratch.fe_support_on_face[face_no][i];
1484 * task_data.cell_vector(ii) +=
1485 * scratch.tr_phi[i] * neumann_value * JxW;
1492 * This last term adds the contribution of the term @f$\left<
w,\tau
1493 * u_h\right>_{\partial \mathcal T}@f$ to the local
matrix. As opposed
1494 * to the face matrices above, we need it in both assembly stages.
1497 *
for (
unsigned int i = 0;
1498 * i < scratch.fe_local_support_on_face[face_no].size();
1500 *
for (
unsigned int j = 0;
1501 * j < scratch.fe_local_support_on_face[face_no].size();
1504 *
const unsigned int ii =
1505 * scratch.fe_local_support_on_face[face_no][i];
1506 *
const unsigned int jj =
1507 * scratch.fe_local_support_on_face[face_no][j];
1508 * scratch.ll_matrix(ii, jj) +=
1509 * tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1514 * When @p trace_reconstruct=
true, we are solving
for the local
1515 * solutions on an element by element basis. The local
1516 * right-hand-side is calculated by replacing the basis
functions @p
1517 * tr_phi in the @p lf_matrix computation by the computed
values @p
1518 * trace_values. Of course, the
sign of the
matrix is now minus
1519 * since we have moved everything to the other side of the equation.
1522 *
if (task_data.trace_reconstruct)
1523 *
for (
unsigned int i = 0;
1524 * i < scratch.fe_local_support_on_face[face_no].size();
1527 *
const unsigned int ii =
1528 * scratch.fe_local_support_on_face[face_no][i];
1529 * scratch.l_rhs(ii) -=
1530 * (scratch.q_phi[i] * normal +
1531 * scratch.u_phi[i] * (convection * normal - tau_stab)) *
1532 * scratch.trace_values[q] * JxW;
1539 * Once assembly of all of the local contributions is complete, we must
1540 * either: (1) assemble the global system, or (2) compute the local solution
1545 * scratch.ll_matrix.gauss_jordan();
1549 * For (1), we compute the Schur complement and add it to the @p
1553 *
if (task_data.trace_reconstruct ==
false)
1555 * scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1556 * scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1557 * scratch.tmp_matrix.mmult(task_data.cell_matrix,
1558 * scratch.lf_matrix,
1560 * cell->get_dof_indices(task_data.dof_indices);
1564 * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1565 * Hence, we multiply @p l_rhs by our already inverted local-local
matrix
1566 * and store the result
using the <code>
set_dof_values</code> function.
1571 * scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1572 * loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1581 * <a name=
"step_51-HDGcopy_local_to_global"></a>
1582 * <h4>HDG::copy_local_to_global</h4>
1583 * If we are in the
first step of the solution, i.e. @p trace_reconstruct=
false,
1584 * then we
assemble the local matrices into the global system.
1587 *
template <
int dim>
1588 *
void HDG<dim>::copy_local_to_global(
const PerTaskData &
data)
1590 *
if (
data.trace_reconstruct ==
false)
1591 * constraints.distribute_local_to_global(
data.cell_matrix,
1603 * <a name=
"step_51-HDGsolve"></a>
1604 * <h4>HDG::solve</h4>
1605 * The skeleton solution is solved
for by
using a BiCGStab solver with
1609 *
template <
int dim>
1610 *
void HDG<dim>::solve()
1613 * 1e-11 * system_rhs.l2_norm());
1617 * std::cout <<
" Number of BiCGStab iterations: "
1618 * << solver_control.last_step() << std::endl;
1620 * system_matrix.clear();
1621 * sparsity_pattern.reinit(0, 0, 0, 1);
1623 * constraints.distribute(solution);
1627 * Once we have solved
for the skeleton solution,
1628 * we can solve
for the local solutions in an element-by-element
1629 * fashion. We
do this by re-
using the same @p assemble_system function
1630 * but switching @p trace_reconstruct to
true.
1633 * assemble_system(
true);
1641 * <a name=
"step_51-HDGpostprocess"></a>
1642 * <h4>HDG::postprocess</h4>
1646 * The postprocess method serves two purposes. First, we want to construct a
1647 * post-processed
scalar variables in the element space of degree @f$p+1@f$ that
1648 * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1649 * process and only involves the
scalar solution as well as the
gradient on
1650 * the local cell. To
do this, we introduce the already defined scratch
data
1651 * together with some update flags and
run the work stream to
do this in
1656 * Secondly, we want to compute discretization errors just as we did in
1657 * @ref step_7
"step-7". The overall procedure is similar with calls to
1659 * the errors
for the
scalar variable and the
gradient variable. In @ref step_7
"step-7",
1661 * contributions. Here, we have a
DoFHandler with these two contributions
1662 * computed and
sorted by their vector component, <code>[0, dim)</code>
for
1664 * gradient and @p dim
for the
scalar. To compute their value, we hence use
1666 * SolutionAndGradient
class introduced above that contains the analytic
1667 * parts of either of them. Eventually, we also compute the L2-error of the
1668 * post-processed solution and add the results into the convergence table.
1671 * template <int dim>
1672 *
void HDG<dim>::postprocess()
1675 *
const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1680 * PostProcessScratchData scratch(
1681 * fe_u_post, fe_local, quadrature_formula, local_flags, flags);
1684 * dof_handler_u_post.begin_active(),
1685 * dof_handler_u_post.end(),
1687 * PostProcessScratchData &scratch,
1688 *
unsigned int &
data) {
1689 * this->postprocess_one_cell(cell, scratch, data);
1691 * std::function<
void(
const unsigned int &)>(),
1701 * SolutionAndGradient<dim>(),
1702 * difference_per_cell,
1706 *
const double L2_error =
1708 * difference_per_cell,
1712 * std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1715 * SolutionAndGradient<dim>(),
1716 * difference_per_cell,
1719 * &gradient_select);
1720 *
const double grad_error =
1722 * difference_per_cell,
1728 * difference_per_cell,
1731 *
const double post_error =
1733 * difference_per_cell,
1736 * convergence_table.add_value(
"cells",
triangulation.n_active_cells());
1737 * convergence_table.add_value(
"dofs", dof_handler.n_dofs());
1739 * convergence_table.add_value(
"val L2", L2_error);
1740 * convergence_table.set_scientific(
"val L2",
true);
1741 * convergence_table.set_precision(
"val L2", 3);
1743 * convergence_table.add_value(
"grad L2", grad_error);
1744 * convergence_table.set_scientific(
"grad L2",
true);
1745 * convergence_table.set_precision(
"grad L2", 3);
1747 * convergence_table.add_value(
"val L2-post", post_error);
1748 * convergence_table.set_scientific(
"val L2-post",
true);
1749 * convergence_table.set_precision(
"val L2-post", 3);
1757 * <a name=
"step_51-HDGpostprocess_one_cell"></a>
1758 * <h4>HDG::postprocess_one_cell</h4>
1762 * This is the actual work done
for the postprocessing. According to the
1763 * discussion in the introduction, we need to set up a system that projects
1765 * post-processed variable. Moreover, we need to set the average of the
new
1766 * post-processed variable to
equal the average of the
scalar DG solution
1771 * More technically speaking, the projection of the
gradient is a system
1772 * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1773 *
matrix but is singular (the sum of all rows would be zero because the
1774 * constant function has zero gradient). Therefore, we take one row away and
1776 * row
for the
scalar part, even though we could pick any row
for @f$\mathcal
1777 * Q_{-p}@f$ elements. However, had we used
FE_DGP elements instead, the
first
1778 * row would correspond to the
constant part already and deleting
e.g. the
1779 * last row would give us a singular system. This way, our program can also
1780 * be used
for those elements.
1783 *
template <
int dim>
1784 *
void HDG<dim>::postprocess_one_cell(
1786 * PostProcessScratchData &scratch,
1790 * cell->as_dof_handler_iterator(dof_handler_local);
1792 * scratch.fe_values_local.reinit(loc_cell);
1793 * scratch.fe_values.reinit(cell);
1798 *
const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1799 *
const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1801 * scratch.fe_values_local[
scalar].get_function_values(solution_local,
1802 * scratch.u_values);
1803 * scratch.fe_values_local[fluxes].get_function_values(solution_local,
1804 * scratch.u_gradients);
1807 *
for (
unsigned int i = 1; i < dofs_per_cell; ++i)
1809 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1812 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1813 * sum += (scratch.fe_values.shape_grad(i, q) *
1814 * scratch.fe_values.shape_grad(j, q)) *
1815 * scratch.fe_values.JxW(q);
1816 * scratch.cell_matrix(i, j) =
sum;
1820 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1821 * sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1822 * scratch.fe_values.JxW(q);
1823 * scratch.cell_rhs(i) =
sum;
1825 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1828 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1829 * sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1830 * scratch.cell_matrix(0, j) =
sum;
1834 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1835 * sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1836 * scratch.cell_rhs(0) =
sum;
1841 * Having assembled all terms, we can again go on and solve the linear
1842 * system. We
invert the
matrix and then multiply the inverse by the
1843 * right hand side. An alternative (and more numerically stable) method
1844 * would have been to only factorize the
matrix and
apply the factorization.
1847 * scratch.cell_matrix.gauss_jordan();
1848 * scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1849 * cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1857 * <a name=
"step_51-HDGoutput_results"></a>
1858 * <h4>HDG::output_results</h4>
1859 * We have 3 sets of results that we would like to output: the local
1860 * solution, the post-processed local solution, and the skeleton solution. The
1861 * former 2 both
'live' on element volumes, whereas the latter lives on
1862 * codimension-1 surfaces
1863 * of the
triangulation. Our @p output_results function writes all local solutions
1864 * to the same
vtk file, even though they correspond to different
1865 *
DoFHandler objects. The graphical output
for the skeleton
1866 * variable is done through use of the
DataOutFaces class.
1869 *
template <
int dim>
1870 *
void HDG<dim>::output_results(
const unsigned int cycle)
1872 * std::string filename;
1873 *
switch (refinement_mode)
1875 *
case global_refinement:
1876 * filename =
"solution-global";
1878 *
case adaptive_refinement:
1879 * filename =
"solution-adaptive";
1885 * std::string face_out(filename);
1886 * face_out +=
"-face";
1890 * filename +=
".vtk";
1891 * std::ofstream output(filename);
1897 * We
first define the names and
types of the local solution,
1898 * and add the
data to @p data_out.
1901 * std::vector<std::string> names(dim,
"gradient");
1902 * names.emplace_back(
"solution");
1903 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1904 * component_interpretation(
1906 * component_interpretation[dim] =
1908 * data_out.add_data_vector(dof_handler_local,
1911 * component_interpretation);
1915 * The
second data item we add is the post-processed solution.
1916 * In
this case, it is a single
scalar variable belonging to
1920 * std::vector<std::string> post_name(1,
"u_post");
1921 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1923 * data_out.add_data_vector(dof_handler_u_post,
1928 * data_out.build_patches(fe.degree);
1929 * data_out.write_vtk(output);
1933 * face_out +=
".vtk";
1934 * std::ofstream face_output(face_out);
1938 * The <code>
DataOutFaces</code>
class works analogously to the
1940 * defines the solution on the skeleton of the
triangulation. We treat it
1941 * as such here, and the code is similar to that above.
1945 * std::vector<std::string> face_name(1,
"u_hat");
1946 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1949 * data_out_face.add_data_vector(dof_handler,
1952 * face_component_type);
1954 * data_out_face.build_patches(fe.degree);
1955 * data_out_face.write_vtk(face_output);
1961 * <a name=
"step_51-HDGrefine_grid"></a>
1962 * <h4>HDG::refine_grid</h4>
1966 * We implement two different refinement cases
for HDG, just as in
1967 * <code>@ref step_7
"step-7"</code>: adaptive_refinement and global_refinement. The
1968 * global_refinement option recreates the entire
triangulation every
1969 * time. This is because we want to use a finer sequence of meshes than what
1970 * we would get with one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1971 * elements per direction.
1976 * give a decent indication of the non-regular regions in the
scalar local
1980 *
template <
int dim>
1981 *
void HDG<dim>::refine_grid(
const unsigned int cycle)
1989 *
switch (refinement_mode)
1991 *
case global_refinement:
2002 *
case adaptive_refinement:
2008 * std::map<types::boundary_id, const Function<dim> *>
2014 * estimated_error_per_cell,
2015 * fe_local.component_mask(
2034 * Just as in @ref step_7
"step-7", we set the boundary indicator of two of the faces to 1
2035 * where we want to specify Neumann boundary conditions instead of Dirichlet
2036 * conditions. Since we re-create the
triangulation every time
for global
2037 * refinement, the flags are set in every refinement step, not just at the
2042 * for (const auto &face : cell->face_iterators())
2043 * if (face->at_boundary())
2044 * if ((
std::
fabs(face->center()[0] - (-1)) < 1
e-12) ||
2045 * (
std::
fabs(face->center()[1] - (-1)) < 1
e-12))
2046 * face->set_boundary_id(1);
2052 * <a name=
"step_51-HDGrun"></a>
2054 * The functionality here is basically the same as <code>@ref step_7
"step-7"</code>.
2055 * We
loop over 10 cycles, refining the grid on each one. At the
end,
2056 * convergence tables are created.
2059 *
template <
int dim>
2060 *
void HDG<dim>::run()
2062 *
for (
unsigned int cycle = 0; cycle < 10; ++cycle)
2064 * std::cout <<
"Cycle " << cycle <<
':' << std::endl;
2066 * refine_grid(cycle);
2068 * assemble_system(
false);
2071 * output_results(cycle);
2076 * There is one minor change
for the convergence table compared to @ref step_7
"step-7":
2077 * Since we did not
refine our mesh by a factor two in each cycle (but
2078 * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2079 * convergence rate evaluation about this. We
do this by setting the
2080 * number of cells as a reference column and additionally specifying the
2081 * dimension of the problem, which gives the necessary information
for the
2082 * relation between number of cells and mesh
size.
2085 *
if (refinement_mode == global_refinement)
2087 * convergence_table.evaluate_convergence_rates(
2089 * convergence_table.evaluate_convergence_rates(
2091 * convergence_table.evaluate_convergence_rates(
2094 * convergence_table.write_text(std::cout);
2103 *
const unsigned int dim = 2;
2109 * Now
for the three calls to the main
class in complete analogy to
2110 * @ref step_7
"step-7".
2114 * std::cout <<
"Solving with Q1 elements, adaptive refinement"
2116 * <<
"============================================="
2120 * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2121 * hdg_problem.run();
2123 * std::cout << std::endl;
2127 * std::cout <<
"Solving with Q1 elements, global refinement" << std::endl
2128 * <<
"===========================================" << std::endl
2131 * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2132 * hdg_problem.run();
2134 * std::cout << std::endl;
2138 * std::cout <<
"Solving with Q3 elements, global refinement" << std::endl
2139 * <<
"===========================================" << std::endl
2142 * Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2143 * hdg_problem.run();
2145 * std::cout << std::endl;
2148 *
catch (std::exception &exc)
2150 * std::cerr << std::endl
2152 * <<
"----------------------------------------------------"
2154 * std::cerr <<
"Exception on processing: " << std::endl
2155 * << exc.what() << std::endl
2156 * <<
"Aborting!" << std::endl
2157 * <<
"----------------------------------------------------"
2163 * std::cerr << std::endl
2165 * <<
"----------------------------------------------------"
2167 * std::cerr <<
"Unknown exception!" << std::endl
2168 * <<
"Aborting!" << std::endl
2169 * <<
"----------------------------------------------------"
2177<a name=
"step_51-Results"></a><h1>Results</h1>
2180<a name=
"step_51-Programoutput"></a><h3>Program output</h3>
2183We
first have a look at the output generated by the program when
run in 2D. In
2184the four images below, we show the solution
for polynomial degree @f$p=1@f$
2185and cycles 2, 3, 4, and 8 of the program. In the plots, we overlay the
data
2186generated from the
internal data (DG part) with the skeleton part (@f$\hat{u}@f$)
2187into the same plot. We had to generate two different
data sets because cells
2188and faces represent different geometric entities, the combination of which (in
2189the same file) is not supported in the VTK output of deal.II.
2191The images show the distinctive features of HDG: The cell solution (colored
2192surfaces) is discontinuous between the cells. The solution on the skeleton
2193variable sits on the faces and ties together the local parts. The skeleton
2194solution is not continuous on the vertices where the faces meet, even though
2195its
values are quite close along lines in the same coordinate direction. The
2196skeleton solution can be interpreted as a rubber spring between the two sides
2197that balances the jumps in the solution (or rather, the flux @f$\kappa \nabla u
2198+ \mathbf{c} u@f$). From the picture at the top left, it is clear that
2199the bulk solution frequently over- and undershoots and that the
2200skeleton variable in indeed a better approximation to the exact
2201solution;
this explains why we can get a better solution
using a
2204As the mesh is refined, the jumps between the cells get
2205small (we represent a smooth solution), and the skeleton solution approaches
2206the interior parts. For cycle 8, there is no visible difference in the two
2207variables. We also see how boundary conditions are implemented weakly and that
2208the interior variables
do not exactly satisfy boundary conditions. On the
2209lower and left boundaries, we set Neumann boundary conditions, whereas we set
2210Dirichlet conditions on the right and top boundaries.
2212<table align=
"center">
2214 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=
""></td>
2215 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=
""></td>
2218 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=
""></td>
2219 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=
""></td>
2223Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2224and 8. This is a discontinuous solution that is locally described by
second
2225order polynomials. While the solution does not look very good on the mesh of
2226cycle two, it looks much better
for cycles three and four. As shown by the
2227convergence table below, we find that is also converges more quickly to the
2230<table align=
"center">
2232 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=
""></td>
2233 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=
""></td>
2236 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=
""></td>
2237 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=
""></td>
2241Finally, we look at the solution
for @f$p=3@f$ at cycle 2. Despite the coarse
2242mesh with only 64 cells, the post-processed solution is similar in quality
2243to the linear solution (not post-processed) at cycle 8 with 4,096
2244cells. This clearly shows the superiority of high order methods
for smooth
2247<table align=
"center">
2249 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=
""></td>
2250 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=
""></td>
2254<a name=
"step_51-Convergencetables"></a><h4>Convergence tables</h4>
2257When the program is
run, it also outputs information about the respective
2258steps and convergence tables with errors in the various components in the
2259end. In 2D, the convergence tables look the following:
2262Q1 elements, adaptive refinement:
2263cells dofs val
L2 grad
L2 val
L2-post
2264 16 80 1.804e+01 2.207e+01 1.798e+01
2265 31 170 9.874e+00 1.322e+01 9.798e+00
2266 61 314 7.452e-01 3.793e+00 4.891e-01
2267 121 634 3.240e-01 1.511e+00 2.616e-01
2268 238 1198 8.585e-02 8.212e-01 1.808e-02
2269 454 2290 4.802e-02 5.178e-01 2.195e-02
2270 898 4378 2.561e-02 2.947e-01 4.318e-03
2271 1720 7864 1.306e-02 1.664e-01 2.978e-03
2272 3271 14638 7.025e-03 9.815e-02 1.075e-03
2273 6217 27214 4.119e-03 6.407e-02 9.975e-04
2275Q1 elements, global refinement:
2276cells dofs val
L2 grad
L2 val
L2-post
2277 16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2278 36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2279 64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2280 144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2281 256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2282 576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2283 1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2284 2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2285 4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2286 9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2288Q3 elements, global refinement:
2289cells dofs val
L2 grad
L2 val
L2-post
2290 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2291 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2292 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2293 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2294 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2295 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2296 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2297 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2298 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2299 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2303One can see the error reduction upon grid refinement, and
for the cases where
2304global refinement was performed, also the convergence rates. The quadratic
2305convergence rates of Q1 elements in the @f$L_2@f$
norm for both the
scalar
2306variable and the
gradient variable is apparent, as is the cubic rate
for the
2307postprocessed
scalar variable in the @f$L_2@f$
norm. Note
this distinctive
2308feature of an HDG solution. In typical continuous finite elements, the
2309gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2310opposed to @f$p+1@f$
for the actual solution. Even though superconvergence
2311results
for finite elements are also available (
e.g. superconvergent patch
2312recovery
first introduced by Zienkiewicz and Zhu), these are typically limited
2313to structured meshes and other special cases. For Q3 HDG variables, the
scalar
2314variable and
gradient converge at fourth order and the postprocessed
scalar
2315variable at fifth order.
2317The same convergence rates are observed in 3
d.
2319Q1 elements, adaptive refinement:
2320cells dofs val
L2 grad
L2 val
L2-post
2321 8 144 7.122e+00 1.941e+01 6.102e+00
2322 29 500 3.309e+00 1.023e+01 2.145e+00
2323 113 1792 2.204e+00 1.023e+01 1.912e+00
2324 379 5732 6.085e-01 5.008e+00 2.233e-01
2325 1317 19412 1.543e-01 1.464e+00 4.196e-02
2326 4579 64768 5.058e-02 5.611e-01 9.521e-03
2327 14596 199552 2.129e-02 3.122e-01 4.569e-03
2328 46180 611400 1.033e-02 1.622e-01 1.684e-03
2329144859 1864212 5.007e-03 8.371e-02 7.364e-04
2330451060 5684508 2.518e-03 4.562e-02 3.070e-04
2332Q1 elements, global refinement:
2333cells dofs val
L2 grad
L2 val
L2-post
2334 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2335 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2336 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2337 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2338 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2339 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2340 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2341 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2342 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2343110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2345Q3 elements, global refinement:
2346cells dofs val
L2 grad
L2 val
L2-post
2347 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2348 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2349 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2350 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2351 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2352 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2353 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2354 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2355 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2356110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2359<a name=
"step_51-Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2362<a name=
"step_51-Resultsfor2D"></a><h4>Results
for 2D</h4>
2365The convergence tables verify the expected convergence rates stated in the
2366introduction. Now, we want to show a quick comparison of the computational
2367efficiency of the HDG method compared to a usual finite element (continuous
2368Galkerin) method on the problem of
this tutorial. Of course, stability aspects
2369of the HDG method compared to continuous finite elements
for
2370transport-dominated problems are also important in practice, which is an
2371aspect not seen on a problem with smooth analytic solution. In the picture
2372below, we compare the @f$L_2@f$ error as a function of the number of degrees of
2373freedom (left) and of the computing time spent in the linear solver (right)
2374for two space dimensions of continuous finite elements (CG) and the hybridized
2375discontinuous Galerkin method presented in
this tutorial. As opposed to the
2376tutorial where we only use unpreconditioned BiCGStab, the times shown in the
2377figures below use the Trilinos algebraic multigrid preconditioner in
2382<table align=
"center">
2384 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width=
"400" alt=
""></td>
2385 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width=
"400" alt=
""></td>
2389The results in the graphs show that the HDG method is slower than continuous
2390finite elements at @f$p=1@f$, about equally fast
for cubic elements and
2391faster
for sixth order elements. However, we have seen above that the HDG
2392method actually produces solutions which are more accurate than what is
2393represented in the original variables. Therefore, in the next two plots below
2394we instead display the error of the post-processed solution
for HDG (denoted
2395by @f$p=1^*@f$
for example). We now see a clear advantage of HDG
for the same
2396amount of work
for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2399<table align=
"center">
2401 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width=
"400" alt=
""></td>
2402 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width=
"400" alt=
""></td>
2406Since the HDG method actually produces results converging as
2407@f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2408solution with the same asymptotic convergence behavior, i.e.,
FE_Q with degree
2409@f$p+1@f$. If we
do this, we get the convergence curves below. We see that
2410CG with
second order polynomials is again clearly better than HDG with
2411linears. However, the advantage of HDG
for higher orders remains.
2413<table align=
"center">
2415 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width=
"400" alt=
""></td>
2416 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width=
"400" alt=
""></td>
2420The results are in line with properties of DG methods in
general: Best
2421performance is typically not achieved
for linear elements, but rather at
2422somewhat higher order, usually around @f$p=3@f$. This is because of a
2423volume-to-surface effect
for discontinuous solutions with too much of the
2424solution living on the surfaces and hence duplicating work when the elements
2425are linear. Put in other words, DG methods are often most efficient when used
2426at relatively high order, despite their focus on a discontinuous (and hence,
2427seemingly low accurate) representation of solutions.
2429<a name=
"step_51-Resultsfor3D"></a><h4>Results
for 3D</h4>
2432We now show the same figures in 3D: The
first row shows the number of degrees
2433of freedom and computing time versus the @f$L_2@f$ error in the
scalar variable
2434@f$u@f$
for CG and HDG at order @f$p@f$, the
second row shows the
2435post-processed HDG solution instead of the original one, and the third row
2436compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2437the
volume-to-surface effect makes the cost of HDG somewhat higher and the CG
2438solution is clearly better than HDG
for linears by any metric. For cubics, HDG
2439and CG are of similar quality, whereas HDG is again more efficient
for sixth
2440order polynomials. One can alternatively also use the combination of
FE_DGP
2442polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2443degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2444for FE_FaceP for a given mesh
size, but the solution quality (error vs. number
2445of DoFs) is very similar to the results
for FE_FaceQ.
2447<table align=
"center">
2449 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width=
"400" alt=
""></td>
2450 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width=
"400" alt=
""></td>
2453 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width=
"400" alt=
""></td>
2454 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width=
"400" alt=
""></td>
2457 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width=
"400" alt=
""></td>
2458 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width=
"400" alt=
""></td>
2462One
final note on the efficiency comparison: We tried to use
general-purpose
2463sparse
matrix structures and similar solvers (optimal AMG preconditioners
for
2464both without particular tuning of the AMG parameters on any of them) to give a
2465fair picture of the cost versus accuracy of two methods, on a toy example. It
2466should be noted however that geometric multigrid (GMG)
for continuous finite
2467elements is about a factor four to five faster
for @f$p=3@f$ and @f$p=6@f$. As of
24682019, optimal-complexity iterative solvers
for HDG are still under development
2469in the research community. Also, there are other implementation aspects
for CG
2470available such as fast
matrix-free approaches as shown in @ref step_37
"step-37" that make
2471higher order continuous elements more competitive. Again, it is not clear to
2472the authors of the tutorial whether similar improvements could be made
for
2473HDG. We refer to <a href=
"https://dx.doi.org/10.1137/16M110455X">Kronbichler
2474and Wall (2018)</a>
for a recent efficiency evaluation.
2477<a name=
"step_51-Possibilitiesforimprovements"></a><h3>Possibilities
for improvements</h3>
2480As already mentioned in the introduction, one possibility is to implement
2481another post-processing technique as discussed in the literature.
2483A
second item that is not done optimally relates to the performance of
this
2484program, which is of course an issue in practical applications (weighing in
2485also the better solution quality of (H)DG methods
for transport-dominated
2486problems). Let us look at
2487the computing time of the tutorial program and the share of the individual
2490<table align=
"center" class=
"doxtable">
2497 <th>Trace reconstruct</th>
2498 <th>Post-processing</th>
2504 <th colspan=
"6">Relative share</th>
2507 <td align=
"left">2D, Q1, cycle 9, 37,248 dofs</td>
2508 <td align=
"center">5.34s</td>
2509 <td align=
"center">0.7%</td>
2510 <td align=
"center">1.2%</td>
2511 <td align=
"center">89.5%</td>
2512 <td align=
"center">0.9%</td>
2513 <td align=
"center">2.3%</td>
2514 <td align=
"center">5.4%</td>
2517 <td align=
"left">2D, Q3, cycle 9, 74,496 dofs</td>
2518 <td align=
"center">22.2s</td>
2519 <td align=
"center">0.4%</td>
2520 <td align=
"center">4.3%</td>
2521 <td align=
"center">84.1%</td>
2522 <td align=
"center">4.1%</td>
2523 <td align=
"center">3.5%</td>
2524 <td align=
"center">3.6%</td>
2527 <td align=
"left">3D, Q1, cycle 7, 172,800 dofs</td>
2528 <td align=
"center">9.06s</td>
2529 <td align=
"center">3.1%</td>
2530 <td align=
"center">8.9%</td>
2531 <td align=
"center">42.7%</td>
2532 <td align=
"center">7.0%</td>
2533 <td align=
"center">20.6%</td>
2534 <td align=
"center">17.7%</td>
2537 <td align=
"left">3D, Q3, cycle 7, 691,200 dofs</td>
2538 <td align=
"center">516s</td>
2539 <td align=
"center">0.6%</td>
2540 <td align=
"center">34.5%</td>
2541 <td align=
"center">13.4%</td>
2542 <td align=
"center">32.8%</td>
2543 <td align=
"center">17.1%</td>
2544 <td align=
"center">1.5%</td>
2548As can be seen from the table, the solver and assembly calls dominate the
2549runtime of the program. This also gives a clear indication of where
2550improvements would make the most sense:
2553 <li> Better linear solvers: We use a BiCGStab iterative solver without
2554 preconditioner, where the number of iteration increases with increasing
2555 problem
size (the number of iterations
for Q1 elements and global
2556 refinements starts at 35
for the small sizes but increase up to 701
for the
2557 largest
size). To
do better, one could
for example use an algebraic
2558 multigrid preconditioner from Trilinos, or some more advanced variants as
2559 the one discussed in <a
2560 href=
"https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2561 (2018)</a>. For diffusion-dominated problems such as the problem at hand
2562 with finer meshes, such a solver can be designed that uses the
matrix-vector
2564 long as we are not working in
parallel with
MPI. For
MPI-parallelized
2567 <li> Speed up assembly by pre-assembling parts that
do not change from one
2568 cell to another (those that
do neither contain variable coefficients nor
2569 mapping-dependent terms).
2573<a name=
"step_51-PlainProg"></a>
2574<h1> The plain program</h1>
2575@include
"step-51.cc"
void mmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
virtual value_type value(const Point< dim > &p) const
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< index_type > data
@ component_is_part_of_vector
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void set_dof_values(const DoFCellAccessor< dim, spacedim, lda > &cell, const Vector< number > &local_values, OutputVector &values, const bool perform_check)
static constexpr double PI
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)