601 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
614 *
const unsigned int = 0) const override
617 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
622 * (-2 / (this->width * this->width) *
623 * std::exp(-x_minus_xi.
norm_square() / (this->width * this->width)) *
636 * This
class implements a function where the scalar solution and its negative
637 * gradient are collected together. This
function is used when computing the
638 * error of the HDG approximation and its implementation is to simply
call
639 *
value and gradient
function of the Solution
class.
643 *
class SolutionAndGradient :
public Function<dim>,
protected SolutionBase<dim>
646 * SolutionAndGradient()
654 * Solution<dim> solution;
656 *
for (
unsigned int d = 0;
d < dim; ++
d)
658 * v[dim] = solution.value(p);
666 * Next comes the implementation of the convection velocity. As described in
667 * the introduction, we choose a velocity field that is @f$(y, -x)@f$ in 2D and
668 * @f$(y, -x, 1)@f$ in 3D. This gives a divergence-
free velocity field.
675 * ConvectionVelocity()
688 * convection[0] = p[1];
689 * convection[1] = -p[0];
692 * convection[0] = p[1];
693 * convection[1] = -p[0];
707 * The last
function we implement is the right hand side
for the
708 * manufactured solution. It is very similar to @ref step_7
"step-7", with the exception
709 * that we now have a convection term instead of the reaction term. Since
710 * the velocity field is incompressible, i.e., @f$\nabla \cdot \mathbf{c} =
711 * 0@f$, the advection term simply reads @f$\mathbf{c} \nabla u@f$.
715 *
class RightHandSide :
public Function<dim>,
protected SolutionBase<dim>
719 *
const unsigned int = 0) const override
721 * ConvectionVelocity<dim> convection_velocity;
724 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
729 * ((2 * dim - 2 * convection * x_minus_xi -
730 * 4 * x_minus_xi.
norm_square() / (this->width * this->width)) /
731 * (this->width * this->width) *
732 * std::exp(-x_minus_xi.
norm_square() / (this->width * this->width)));
745 * <a name=
"TheHDGsolverclass"></a>
746 * <h3>The HDG solver
class</h3>
750 * The HDG solution procedure follows closely that of @ref step_7
"step-7". The major
751 * difference is the use of three different sets of
DoFHandler and FE
753 * vectors. We also use
WorkStream to enable a multithreaded local solution
754 * process which exploits the embarrassingly
parallel nature of the local
755 * solver. For
WorkStream, we define the local operations on a cell and a
756 *
copy function into the global
matrix and vector. We
do this both
for the
757 * assembly (which is
run twice, once when we generate the system
matrix and
758 * once when we compute the element-interior solutions from the skeleton
759 * values) and
for the postprocessing where we
extract a solution that
760 * converges at higher order.
767 *
enum RefinementMode
770 * adaptive_refinement
773 * HDG(
const unsigned int degree,
const RefinementMode refinement_mode);
777 *
void setup_system();
778 *
void assemble_system(
const bool reconstruct_trace =
false);
780 *
void postprocess();
781 *
void refine_grid(
const unsigned int cycle);
782 *
void output_results(
const unsigned int cycle);
786 * Data
for the assembly and solution of the primal variables.
789 *
struct PerTaskData;
790 *
struct ScratchData;
794 * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
795 * procedure; as such, we
do not need to
assemble any global data and
do
796 * not declare any
'task data' for WorkStream to use.
799 *
struct PostProcessScratchData;
804 * work of the program.
807 *
void assemble_system_one_cell(
809 * ScratchData & scratch,
810 * PerTaskData & task_data);
812 *
void copy_local_to_global(
const PerTaskData &data);
814 *
void postprocess_one_cell(
816 * PostProcessScratchData & scratch,
817 *
unsigned int & empty_data);
824 * The
'local' solutions are interior to each element. These
825 * represent the primal solution field @f$u@f$ as well as the auxiliary
826 * field @f$\mathbf{q}@f$.
835 * The
new finite element type and corresponding <code>
DoFHandler</code> are
836 * used
for the global skeleton solution that couples the element-
level
847 * As stated in the introduction, HDG solutions can be post-processed to
848 * attain superconvergence rates of @f$\mathcal{O}(h^{p+2})@f$. The
849 * post-processed solution is a discontinuous finite element solution
850 * representing the primal variable on the interior of each cell. We define
851 * a FE type of degree @f$p+1@f$ to represent
this post-processed solution,
852 * which we only use
for output after constructing it.
861 * The degrees of freedom corresponding to the skeleton strongly enforce
862 * Dirichlet boundary conditions, just as in a continuous Galerkin finite
863 * element method. We can enforce the boundary conditions in an analogous
865 * handled in the same way as
for continuous finite elements: For the face
866 * elements which only define degrees of freedom on the face,
this process
867 * sets the solution on the refined side to coincide with the
868 * representation on the coarse side.
872 * Note that
for HDG, the elimination of hanging nodes is not the only
873 * possibility — in terms of the HDG theory,
one could also use the
874 * unknowns from the refined side and express the local solution on the
875 * coarse side through the
trace values on the refined side. However, such
876 * a setup is not as easily implemented in terms of deal.II loops and not
886 * the actual
matrix object. When creating the sparsity pattern, we just
887 * have to additionally pass the size of local blocks.
895 * Same as @ref step_7
"step-7":
898 *
const RefinementMode refinement_mode;
905 * <a name=
"TheHDGclassimplementation"></a>
906 * <h3>The HDG
class implementation</h3>
911 * <a name=
"Constructor"></a>
912 * <h4>Constructor</h4>
913 * The constructor is similar to those in other examples, with the exception
915 * create a system of finite elements
for the local DG part, including the
916 * gradient/flux part and the scalar part.
920 * HDG<dim>::HDG(
const unsigned int degree,
const RefinementMode refinement_mode)
925 * , fe_u_post(degree + 1)
927 * , refinement_mode(refinement_mode)
935 * <a name=
"HDGsetup_system"></a>
936 * <h4>HDG::setup_system</h4>
937 * The system
for an HDG solution is setup in an analogous manner to most
938 * of the other tutorial programs. We are careful to distribute dofs with
939 * all of our
DoFHandler objects. The @p solution and @p system_matrix
940 * objects go with the global skeleton solution.
944 *
void HDG<dim>::setup_system()
946 * dof_handler_local.distribute_dofs(fe_local);
947 * dof_handler.distribute_dofs(fe);
948 * dof_handler_u_post.distribute_dofs(fe_u_post);
950 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
953 * solution.reinit(dof_handler.n_dofs());
954 * system_rhs.reinit(dof_handler.n_dofs());
956 * solution_local.reinit(dof_handler_local.n_dofs());
957 * solution_u_post.reinit(dof_handler_u_post.n_dofs());
959 * constraints.
clear();
961 * std::map<types::boundary_id, const Function<dim> *> boundary_functions;
962 * Solution<dim> solution_function;
963 * boundary_functions[0] = &solution_function;
965 * boundary_functions,
968 * constraints.close();
972 * When creating the chunk sparsity pattern, we
first create the usual
973 * dynamic sparsity pattern and then
set the chunk size, which is
equal
974 * to the number of dofs on a face, when copying
this into the
final
981 * sparsity_pattern.copy_from(dsp, fe.dofs_per_face);
983 * system_matrix.reinit(sparsity_pattern);
991 * <a name=
"HDGPerTaskData"></a>
992 * <h4>HDG::PerTaskData</h4>
993 * Next comes the definition of the local data structures
for the
parallel
994 * assembly. The
first structure @p PerTaskData contains the local vector
995 * and
matrix that are written into the global
matrix, whereas the
996 * ScratchData contains all data that we need
for the local assembly. There
997 * is
one variable worth noting here, namely the
boolean variable @p
998 * trace_reconstruct. As mentioned in the introduction, we solve the HDG
999 * system in two steps. First, we create a linear system
for the skeleton
1000 * system where we condense the local part into it via the Schur complement
1001 * @f$D-CA^{-1}B@f$. Then, we solve
for the local part
using the skeleton
1002 * solution. For these two steps, we need the same matrices on the elements
1003 * twice, which we want to compute by two assembly steps. Since most of the
1004 * code is similar, we
do this with the same
function but only
switch
1005 * between the two based on a flag that we
set when starting the
1006 * assembly. Since we need to pass
this information on to the local worker
1007 * routines, we store it once in the task data.
1010 *
template <
int dim>
1011 *
struct HDG<dim>::PerTaskData
1015 * std::vector<types::global_dof_index> dof_indices;
1017 *
bool trace_reconstruct;
1019 * PerTaskData(
const unsigned int n_dofs,
const bool trace_reconstruct)
1021 * , cell_vector(n_dofs)
1022 * , dof_indices(n_dofs)
1023 * , trace_reconstruct(trace_reconstruct)
1032 * <a name=
"HDGScratchData"></a>
1033 * <h4>HDG::ScratchData</h4>
1034 * @p ScratchData contains persistent data
for each
1036 * and vector objects should be familiar by now. There are two objects that
1037 * need to be discussed: `std::vector<std::vector<unsigned int> >
1038 * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1039 * fe_support_on_face`. These are used to indicate whether or not the finite
1040 * elements chosen have support (non-
zero values) on a given face of the
1041 * reference cell
for the local part associated to @p fe_local and the
1042 * skeleton part @p fe. We
extract this information in the
1043 * constructor and store it once
for all cells that we work on. Had we not
1044 * stored
this information, we would be forced to
assemble a large number of
1045 *
zero terms on each cell, which would significantly slow the program.
1048 *
template <
int dim>
1049 *
struct HDG<dim>::ScratchData
1062 * std::vector<Tensor<1, dim>> q_phi;
1063 * std::vector<double> q_phi_div;
1064 * std::vector<double> u_phi;
1065 * std::vector<Tensor<1, dim>> u_phi_grad;
1066 * std::vector<double> tr_phi;
1067 * std::vector<double> trace_values;
1069 * std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1070 * std::vector<std::vector<unsigned int>> fe_support_on_face;
1072 * ConvectionVelocity<dim> convection_velocity;
1073 * RightHandSide<dim> right_hand_side;
1074 *
const Solution<dim> exact_solution;
1083 * : fe_values_local(fe_local, quadrature_formula, local_flags)
1084 * , fe_face_values_local(fe_local,
1085 * face_quadrature_formula,
1087 * , fe_face_values(fe, face_quadrature_formula, flags)
1088 * , ll_matrix(fe_local.dofs_per_cell, fe_local.dofs_per_cell)
1089 * , lf_matrix(fe_local.dofs_per_cell, fe.dofs_per_cell)
1090 * , fl_matrix(fe.dofs_per_cell, fe_local.dofs_per_cell)
1091 * , tmp_matrix(fe.dofs_per_cell, fe_local.dofs_per_cell)
1092 * , l_rhs(fe_local.dofs_per_cell)
1093 * , tmp_rhs(fe_local.dofs_per_cell)
1094 * , q_phi(fe_local.dofs_per_cell)
1095 * , q_phi_div(fe_local.dofs_per_cell)
1096 * , u_phi(fe_local.dofs_per_cell)
1097 * , u_phi_grad(fe_local.dofs_per_cell)
1098 * , tr_phi(fe.dofs_per_cell)
1099 * , trace_values(face_quadrature_formula.size())
1102 * , exact_solution()
1105 *
for (
unsigned int i = 0; i < fe_local.dofs_per_cell; ++i)
1107 *
if (fe_local.has_support_on_face(i, face_no))
1108 * fe_local_support_on_face[face_no].push_back(i);
1112 *
for (
unsigned int i = 0; i < fe.dofs_per_cell; ++i)
1114 *
if (fe.has_support_on_face(i, face_no))
1115 * fe_support_on_face[face_no].push_back(i);
1119 * ScratchData(
const ScratchData &sd)
1120 * : fe_values_local(sd.fe_values_local.get_fe(),
1121 * sd.fe_values_local.get_quadrature(),
1122 * sd.fe_values_local.get_update_flags())
1123 * , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1124 * sd.fe_face_values_local.get_quadrature(),
1125 * sd.fe_face_values_local.get_update_flags())
1126 * , fe_face_values(sd.fe_face_values.get_fe(),
1127 * sd.fe_face_values.get_quadrature(),
1128 * sd.fe_face_values.get_update_flags())
1129 * , ll_matrix(sd.ll_matrix)
1130 * , lf_matrix(sd.lf_matrix)
1131 * , fl_matrix(sd.fl_matrix)
1132 * , tmp_matrix(sd.tmp_matrix)
1134 * , tmp_rhs(sd.tmp_rhs)
1136 * , q_phi_div(sd.q_phi_div)
1138 * , u_phi_grad(sd.u_phi_grad)
1139 * , tr_phi(sd.tr_phi)
1140 * , trace_values(sd.trace_values)
1141 * , fe_local_support_on_face(sd.fe_local_support_on_face)
1142 * , fe_support_on_face(sd.fe_support_on_face)
1143 * , exact_solution()
1152 * <a name=
"HDGPostProcessScratchData"></a>
1153 * <h4>HDG::PostProcessScratchData</h4>
1154 * @p PostProcessScratchData contains the data used by
WorkStream
1155 * when post-processing the local solution @f$u^*@f$. It is similar, but much
1156 * simpler, than @p ScratchData.
1159 *
template <
int dim>
1160 *
struct HDG<dim>::PostProcessScratchData
1165 * std::vector<double> u_values;
1166 * std::vector<Tensor<1, dim>> u_gradients;
1177 * : fe_values_local(fe_local, quadrature_formula, local_flags)
1178 * , fe_values(fe, quadrature_formula, flags)
1179 * , u_values(quadrature_formula.size())
1180 * , u_gradients(quadrature_formula.size())
1181 * ,
cell_matrix(fe.dofs_per_cell, fe.dofs_per_cell)
1182 * , cell_rhs(fe.dofs_per_cell)
1183 * , cell_sol(fe.dofs_per_cell)
1186 * PostProcessScratchData(
const PostProcessScratchData &sd)
1187 * : fe_values_local(sd.fe_values_local.get_fe(),
1188 * sd.fe_values_local.get_quadrature(),
1189 * sd.fe_values_local.get_update_flags())
1190 * , fe_values(sd.fe_values.get_fe(),
1191 * sd.fe_values.get_quadrature(),
1192 * sd.fe_values.get_update_flags())
1193 * , u_values(sd.u_values)
1194 * , u_gradients(sd.u_gradients)
1196 * , cell_rhs(sd.cell_rhs)
1197 * , cell_sol(sd.cell_sol)
1206 * <a name=
"HDGassemble_system"></a>
1207 * <h4>HDG::assemble_system</h4>
1208 * The @p assemble_system
function is similar to the
one on @ref step_32
"step-32", where
1209 * the quadrature formula and the update flags are
set up, and then
1210 * <code>
WorkStream</code> is used to
do the work in a multi-threaded
1211 * manner. The @p trace_reconstruct input parameter is used to decide
1212 * whether we are solving
for the global skeleton solution (
false) or the
1213 * local solution (true).
1217 * One thing worth noting for the multi-threaded execution of assembly is
1218 * the fact that the local computations in `assemble_system_one_cell()`
call
1219 * into BLAS and LAPACK
functions if those are available in deal.II. Thus,
1220 * the underlying BLAS/LAPACK library must support calls from multiple
1221 * threads at the same time. Most implementations do support this, but some
1222 * libraries need to be built in a specific way to avoid problems. For
1223 * example, OpenBLAS compiled without multithreading inside the BLAS/LAPACK
1224 * calls needs to built with a flag called `USE_LOCKING`
set to true.
1227 * template <
int dim>
1228 *
void HDG<dim>::assemble_system(const
bool trace_reconstruct)
1230 *
const QGauss<dim> quadrature_formula(fe.degree + 1);
1231 *
const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1241 * PerTaskData task_data(fe.dofs_per_cell, trace_reconstruct);
1242 * ScratchData scratch(fe,
1244 * quadrature_formula,
1245 * face_quadrature_formula,
1251 * dof_handler.end(),
1253 * &HDG<dim>::assemble_system_one_cell,
1254 * &HDG<dim>::copy_local_to_global,
1264 * <a name=
"HDGassemble_system_one_cell"></a>
1265 * <h4>HDG::assemble_system_one_cell</h4>
1266 * The real work of the HDG program is done by @p assemble_system_one_cell.
1267 * Assembling the local matrices @f$A, B,
C@f$ is done here, along with the
1268 * local contributions of the global
matrix @f$D@f$.
1271 *
template <
int dim>
1272 *
void HDG<dim>::assemble_system_one_cell(
1274 * ScratchData & scratch,
1275 * PerTaskData & task_data)
1279 * Construct iterator
for dof_handler_local
for FEValues reinit function.
1285 * &dof_handler_local);
1287 *
const unsigned int n_q_points =
1288 * scratch.fe_values_local.get_quadrature().size();
1289 *
const unsigned int n_face_q_points =
1290 * scratch.fe_face_values_local.get_quadrature().size();
1292 *
const unsigned int loc_dofs_per_cell =
1293 * scratch.fe_values_local.get_fe().dofs_per_cell;
1298 * scratch.ll_matrix = 0;
1299 * scratch.l_rhs = 0;
1300 *
if (!task_data.trace_reconstruct)
1302 * scratch.lf_matrix = 0;
1303 * scratch.fl_matrix = 0;
1304 * task_data.cell_matrix = 0;
1305 * task_data.cell_vector = 0;
1307 * scratch.fe_values_local.reinit(loc_cell);
1311 * We
first compute the cell-interior contribution to @p ll_matrix
matrix
1312 * (referred to as
matrix @f$A@f$ in the introduction) corresponding to
1313 * local-local coupling, as well as the local right-hand-side vector. We
1314 * store the values at each quadrature
point for the basis
functions, the
1315 * right-hand-side
value, and the convection velocity, in order to have
1316 * quick access to these fields.
1319 * for (
unsigned int q = 0; q < n_q_points; ++q)
1321 *
const double rhs_value = scratch.right_hand_side.value(
1322 * scratch.fe_values_local.quadrature_point(q));
1323 *
const Tensor<1, dim> convection = scratch.convection_velocity.value(
1324 * scratch.fe_values_local.quadrature_point(q));
1325 *
const double JxW = scratch.fe_values_local.JxW(q);
1326 *
for (
unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1328 * scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1329 * scratch.q_phi_div[k] =
1330 * scratch.fe_values_local[fluxes].divergence(k, q);
1331 * scratch.u_phi[k] = scratch.fe_values_local[scalar].value(k, q);
1332 * scratch.u_phi_grad[k] =
1333 * scratch.fe_values_local[scalar].gradient(k, q);
1335 *
for (
unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1337 *
for (
unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1338 * scratch.ll_matrix(i, j) +=
1339 * (scratch.q_phi[i] * scratch.q_phi[j] -
1340 * scratch.q_phi_div[i] * scratch.u_phi[j] +
1341 * scratch.u_phi[i] * scratch.q_phi_div[j] -
1342 * (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1344 * scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1350 * Face terms are assembled on all faces of all elements. This is in
1351 * contrast to more traditional DG methods, where each face is only visited
1352 * once in the assembly procedure.
1357 * scratch.fe_face_values_local.reinit(loc_cell, face_no);
1358 * scratch.fe_face_values.reinit(cell, face_no);
1362 * The already obtained @f$\hat{u}@f$ values are needed when solving
for the
1366 *
if (task_data.trace_reconstruct)
1367 * scratch.fe_face_values.get_function_values(solution,
1368 * scratch.trace_values);
1370 *
for (
unsigned int q = 0; q < n_face_q_points; ++q)
1372 *
const double JxW = scratch.fe_face_values.JxW(q);
1374 * scratch.fe_face_values.quadrature_point(q);
1376 * scratch.fe_face_values.normal_vector(q);
1378 * scratch.convection_velocity.value(quadrature_point);
1382 * Here we compute the stabilization parameter discussed in the
1383 * introduction: since the diffusion is
one and the diffusion
1384 * length
scale is
set to 1/5, it simply results in a contribution
1385 * of 5
for the diffusion part and the magnitude of convection
1386 * through the element boundary in a centered scheme
for the
1390 *
const double tau_stab = (5. +
std::abs(convection * normal));
1394 * We store the non-
zero flux and scalar values, making use of the
1395 * support_on_face information we created in @p ScratchData.
1398 *
for (
unsigned int k = 0;
1399 * k < scratch.fe_local_support_on_face[face_no].size();
1402 *
const unsigned int kk =
1403 * scratch.fe_local_support_on_face[face_no][k];
1404 * scratch.q_phi[k] =
1405 * scratch.fe_face_values_local[fluxes].value(kk, q);
1406 * scratch.u_phi[k] =
1407 * scratch.fe_face_values_local[scalar].value(kk, q);
1412 * When @p trace_reconstruct=
false, we are preparing to
assemble the
1413 * system
for the skeleton variable @f$\hat{u}@f$. If
this is the
case,
1414 * we must
assemble all local matrices associated with the problem:
1415 * local-local, local-face, face-local, and face-face. The
1417 * it can be assembled into the global system by @p
1418 * copy_local_to_global.
1421 *
if (!task_data.trace_reconstruct)
1423 *
for (
unsigned int k = 0;
1424 * k < scratch.fe_support_on_face[face_no].size();
1426 * scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1427 * scratch.fe_support_on_face[face_no][k], q);
1428 *
for (
unsigned int i = 0;
1429 * i < scratch.fe_local_support_on_face[face_no].size();
1431 *
for (
unsigned int j = 0;
1432 * j < scratch.fe_support_on_face[face_no].size();
1435 *
const unsigned int ii =
1436 * scratch.fe_local_support_on_face[face_no][i];
1437 *
const unsigned int jj =
1438 * scratch.fe_support_on_face[face_no][j];
1439 * scratch.lf_matrix(ii, jj) +=
1440 * ((scratch.q_phi[i] * normal +
1441 * (convection * normal - tau_stab) * scratch.u_phi[i]) *
1442 * scratch.tr_phi[j]) *
1447 * Note the
sign of the face_no-local
matrix. We negate
1448 * the
sign during assembly here so that we can use the
1453 * scratch.fl_matrix(jj, ii) -=
1454 * ((scratch.q_phi[i] * normal +
1455 * tau_stab * scratch.u_phi[i]) *
1456 * scratch.tr_phi[j]) *
1460 *
for (
unsigned int i = 0;
1461 * i < scratch.fe_support_on_face[face_no].size();
1463 *
for (
unsigned int j = 0;
1464 * j < scratch.fe_support_on_face[face_no].size();
1467 *
const unsigned int ii =
1468 * scratch.fe_support_on_face[face_no][i];
1469 *
const unsigned int jj =
1470 * scratch.fe_support_on_face[face_no][j];
1471 * task_data.cell_matrix(ii, jj) +=
1472 * ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1473 * scratch.tr_phi[j]) *
1477 *
if (cell->face(face_no)->at_boundary() &&
1478 * (cell->face(face_no)->boundary_id() == 1))
1480 *
const double neumann_value =
1481 * -scratch.exact_solution.gradient(quadrature_point) *
1483 * convection * normal *
1484 * scratch.exact_solution.value(quadrature_point);
1485 *
for (
unsigned int i = 0;
1486 * i < scratch.fe_support_on_face[face_no].size();
1489 *
const unsigned int ii =
1490 * scratch.fe_support_on_face[face_no][i];
1491 * task_data.cell_vector(ii) +=
1492 * scratch.tr_phi[i] * neumann_value * JxW;
1499 * This last term adds the contribution of the term @f$\left<
w,\tau
1500 * u_h\right>_{\partial \mathcal
T}@f$ to the local
matrix. As opposed
1501 * to the face matrices above, we need it in both assembly stages.
1504 *
for (
unsigned int i = 0;
1505 * i < scratch.fe_local_support_on_face[face_no].size();
1507 *
for (
unsigned int j = 0;
1508 * j < scratch.fe_local_support_on_face[face_no].size();
1511 *
const unsigned int ii =
1512 * scratch.fe_local_support_on_face[face_no][i];
1513 *
const unsigned int jj =
1514 * scratch.fe_local_support_on_face[face_no][j];
1515 * scratch.ll_matrix(ii, jj) +=
1516 * tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1521 * When @p trace_reconstruct=
true, we are solving
for the local
1522 * solutions on an element by element basis. The local
1523 * right-hand-side is calculated by replacing the basis
functions @p
1524 * tr_phi in the @p lf_matrix computation by the computed values @p
1525 * trace_values. Of course, the
sign of the
matrix is now minus
1526 * since we have moved everything to the other side of the equation.
1529 *
if (task_data.trace_reconstruct)
1530 *
for (
unsigned int i = 0;
1531 * i < scratch.fe_local_support_on_face[face_no].size();
1534 *
const unsigned int ii =
1535 * scratch.fe_local_support_on_face[face_no][i];
1536 * scratch.l_rhs(ii) -=
1537 * (scratch.q_phi[i] * normal +
1538 * scratch.u_phi[i] * (convection * normal - tau_stab)) *
1539 * scratch.trace_values[q] * JxW;
1546 * Once assembly of all of the local contributions is complete, we must
1547 * either: (1)
assemble the global system, or (2) compute the local solution
1548 * values and save them. In either
case, the
first step is to
invert the
1552 * scratch.ll_matrix.gauss_jordan();
1556 * For (1), we compute the Schur complement and add it to the @p
1560 *
if (task_data.trace_reconstruct ==
false)
1562 * scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1563 * scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1564 * scratch.tmp_matrix.mmult(task_data.cell_matrix,
1565 * scratch.lf_matrix,
1567 * cell->get_dof_indices(task_data.dof_indices);
1571 * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1572 * Hence, we multiply @p l_rhs by our already inverted local-local
matrix
1573 * and store the result
using the <code>set_dof_values</code>
function.
1578 * scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1579 * loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1588 * <a name=
"HDGcopy_local_to_global"></a>
1589 * <h4>HDG::copy_local_to_global</h4>
1590 * If we are in the
first step of the solution, i.e. @p trace_reconstruct=
false,
1591 * then we
assemble the local matrices into the global system.
1594 *
template <
int dim>
1595 *
void HDG<dim>::copy_local_to_global(
const PerTaskData &data)
1597 *
if (data.trace_reconstruct ==
false)
1598 * constraints.distribute_local_to_global(data.cell_matrix,
1610 * <a name=
"HDGsolve"></a>
1611 * <h4>HDG::solve</h4>
1612 * The skeleton solution is solved
for by
using a BiCGStab solver with
1616 *
template <
int dim>
1617 *
void HDG<dim>::solve()
1620 * 1
e-11 * system_rhs.l2_norm());
1624 * std::cout <<
" Number of BiCGStab iterations: "
1625 * << solver_control.last_step() << std::endl;
1627 * system_matrix.clear();
1628 * sparsity_pattern.reinit(0, 0, 0, 1);
1630 * constraints.distribute(solution);
1634 * Once we have solved
for the skeleton solution,
1635 * we can solve
for the local solutions in an element-by-element
1636 * fashion. We
do this by re-
using the same @p assemble_system
function
1637 * but switching @p trace_reconstruct to
true.
1640 * assemble_system(
true);
1648 * <a name=
"HDGpostprocess"></a>
1649 * <h4>HDG::postprocess</h4>
1653 * The postprocess method serves two purposes. First, we want to construct a
1654 * post-processed scalar variables in the element space of degree @f$p+1@f$ that
1655 * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1656 * process and only involves the scalar solution as well as the gradient on
1657 * the local cell. To
do this, we introduce the already defined scratch data
1658 * together with some update flags and
run the work stream to
do this in
1663 * Secondly, we want to compute discretization errors just as we did in
1664 * @ref step_7
"step-7". The overall procedure is similar with calls to
1666 * the errors
for the scalar variable and the gradient variable. In @ref step_7
"step-7",
1668 * contributions. Here, we have a
DoFHandler with these two contributions
1669 * computed and sorted by their vector component, <code>[0, dim)</code>
for
1671 * gradient and @p dim
for the scalar. To compute their
value, we hence use
1673 * SolutionAndGradient
class introduced above that contains the analytic
1674 * parts of either of them. Eventually, we also compute the
L2-error of the
1675 * post-processed solution and add the results into the convergence table.
1678 * template <int dim>
1679 *
void HDG<dim>::postprocess()
1682 *
const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1687 * PostProcessScratchData scratch(
1688 * fe_u_post, fe_local, quadrature_formula, local_flags, flags);
1691 * dof_handler_u_post.begin_active(),
1692 * dof_handler_u_post.end(),
1694 * PostProcessScratchData & scratch,
1695 *
unsigned int & data) {
1696 * this->postprocess_one_cell(cell, scratch, data);
1698 * std::function<
void(
const unsigned int &)>(),
1708 * SolutionAndGradient<dim>(),
1709 * difference_per_cell,
1713 *
const double L2_error =
1715 * difference_per_cell,
1719 * std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1722 * SolutionAndGradient<dim>(),
1723 * difference_per_cell,
1726 * &gradient_select);
1727 *
const double grad_error =
1729 * difference_per_cell,
1735 * difference_per_cell,
1738 *
const double post_error =
1740 * difference_per_cell,
1743 * convergence_table.add_value(
"cells",
triangulation.n_active_cells());
1744 * convergence_table.add_value(
"dofs", dof_handler.n_dofs());
1746 * convergence_table.add_value(
"val L2", L2_error);
1747 * convergence_table.set_scientific(
"val L2",
true);
1748 * convergence_table.set_precision(
"val L2", 3);
1750 * convergence_table.add_value(
"grad L2", grad_error);
1751 * convergence_table.set_scientific(
"grad L2",
true);
1752 * convergence_table.set_precision(
"grad L2", 3);
1754 * convergence_table.add_value(
"val L2-post", post_error);
1755 * convergence_table.set_scientific(
"val L2-post",
true);
1756 * convergence_table.set_precision(
"val L2-post", 3);
1764 * <a name=
"HDGpostprocess_one_cell"></a>
1765 * <h4>HDG::postprocess_one_cell</h4>
1769 * This is the actual work done
for the postprocessing. According to the
1770 * discussion in the introduction, we need to
set up a system that projects
1771 * the gradient part of the DG solution onto the gradient of the
1772 * post-processed variable. Moreover, we need to
set the average of the
new
1773 * post-processed variable to
equal the average of the scalar DG solution
1778 * More technically speaking, the projection of the gradient is a system
1779 * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1780 *
matrix but is singular (the
sum of all rows would be
zero because the
1781 * constant
function has
zero gradient). Therefore, we take
one row away and
1782 * use it
for imposing the average of the scalar
value. We pick the
first
1783 * row
for the scalar part, even though we could pick any row
for @f$\mathcal
1784 * Q_{-p}@f$ elements. However, had we used
FE_DGP elements instead, the
first
1785 * row would correspond to the constant part already and deleting
e.g. the
1786 * last row would give us a singular system. This way, our program can also
1787 * be used
for those elements.
1790 *
template <
int dim>
1791 *
void HDG<dim>::postprocess_one_cell(
1793 * PostProcessScratchData & scratch,
1799 * &dof_handler_local);
1801 * scratch.fe_values_local.reinit(loc_cell);
1802 * scratch.fe_values.reinit(cell);
1807 *
const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1808 *
const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1810 * scratch.fe_values_local[scalar].get_function_values(solution_local,
1811 * scratch.u_values);
1812 * scratch.fe_values_local[fluxes].get_function_values(solution_local,
1813 * scratch.u_gradients);
1816 *
for (
unsigned int i = 1; i < dofs_per_cell; ++i)
1818 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1821 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1822 *
sum += (scratch.fe_values.shape_grad(i, q) *
1823 * scratch.fe_values.shape_grad(j, q)) *
1824 * scratch.fe_values.JxW(q);
1825 * scratch.cell_matrix(i, j) =
sum;
1829 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1830 *
sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1831 * scratch.fe_values.JxW(q);
1832 * scratch.cell_rhs(i) =
sum;
1834 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1837 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1838 *
sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1839 * scratch.cell_matrix(0, j) =
sum;
1843 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1844 *
sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1845 * scratch.cell_rhs(0) =
sum;
1850 * Having assembled all terms, we can again go on and solve the linear
1851 * system. We
invert the
matrix and then multiply the inverse by the
1852 * right hand side. An alternative (and more numerically stable) method
1853 * would have been to only factorize the
matrix and
apply the factorization.
1856 * scratch.cell_matrix.gauss_jordan();
1857 * scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1858 * cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1866 * <a name=
"HDGoutput_results"></a>
1867 * <h4>HDG::output_results</h4>
1868 * We have 3 sets of results that we would like to output: the local
1869 * solution, the post-processed local solution, and the skeleton solution. The
1870 * former 2 both
'live' on element volumes, whereas the latter lives on
1871 * codimension-1 surfaces
1872 * of the
triangulation. Our @p output_results
function writes all local solutions
1873 * to the same
vtk file, even though they correspond to different
1874 *
DoFHandler objects. The graphical output
for the skeleton
1875 * variable is done through use of the
DataOutFaces class.
1878 *
template <
int dim>
1879 *
void HDG<dim>::output_results(
const unsigned int cycle)
1881 * std::string filename;
1882 *
switch (refinement_mode)
1884 *
case global_refinement:
1885 * filename =
"solution-global";
1887 *
case adaptive_refinement:
1888 * filename =
"solution-adaptive";
1894 * std::string face_out(filename);
1895 * face_out +=
"-face";
1899 * filename +=
".vtk";
1900 * std::ofstream output(filename);
1906 * We
first define the names and
types of the local solution,
1907 * and add the data to @p data_out.
1910 * std::vector<std::string> names(dim,
"gradient");
1911 * names.emplace_back(
"solution");
1912 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1913 * component_interpretation(
1915 * component_interpretation[dim] =
1920 * component_interpretation);
1924 * The
second data item we add is the post-processed solution.
1925 * In
this case, it is a single scalar variable belonging to
1929 * std::vector<std::string> post_name(1,
"u_post");
1930 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1942 * face_out +=
".vtk";
1943 * std::ofstream face_output(face_out);
1947 * The <code>
DataOutFaces</code>
class works analogously to the
1949 * defines the solution on the skeleton of the
triangulation. We treat it
1950 * as such here, and the code is similar to that above.
1954 * std::vector<std::string> face_name(1,
"u_hat");
1955 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1958 * data_out_face.add_data_vector(dof_handler,
1961 * face_component_type);
1963 * data_out_face.build_patches(fe.degree);
1964 * data_out_face.write_vtk(face_output);
1970 * <a name=
"HDGrefine_grid"></a>
1971 * <h4>HDG::refine_grid</h4>
1975 * We implement two different refinement cases
for HDG, just as in
1976 * <code>@ref step_7
"step-7"</code>: adaptive_refinement and global_refinement. The
1977 * global_refinement option recreates the entire
triangulation every
1978 * time. This is because we want to use a finer sequence of meshes than what
1979 * we would get with
one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1980 * elements per direction.
1985 * give a decent indication of the non-regular regions in the scalar local
1989 *
template <
int dim>
1990 *
void HDG<dim>::refine_grid(
const unsigned int cycle)
1998 *
switch (refinement_mode)
2000 *
case global_refinement:
2011 *
case adaptive_refinement:
2017 * std::map<types::boundary_id, const Function<dim> *>
2023 * estimated_error_per_cell,
2024 * fe_local.component_mask(
2043 * Just as in @ref step_7
"step-7", we
set the boundary indicator of two of the faces to 1
2044 * where we want to specify Neumann boundary conditions instead of Dirichlet
2045 * conditions. Since we re-create the
triangulation every time
for global
2046 * refinement, the flags are
set in every refinement step, not just at the
2051 *
for (
const auto &face : cell->face_iterators())
2052 *
if (face->at_boundary())
2053 *
if ((
std::fabs(face->center()(0) - (-1)) < 1
e-12) ||
2054 * (
std::fabs(face->center()(1) - (-1)) < 1
e-12))
2055 * face->set_boundary_id(1);
2061 * <a name=
"HDGrun"></a>
2063 * The functionality here is basically the same as <code>@ref step_7
"step-7"</code>.
2064 * We
loop over 10 cycles, refining the grid on each
one. At the
end,
2065 * convergence tables are created.
2068 *
template <
int dim>
2071 *
for (
unsigned int cycle = 0; cycle < 10; ++cycle)
2073 * std::cout <<
"Cycle " << cycle <<
':' << std::endl;
2075 * refine_grid(cycle);
2077 * assemble_system(
false);
2080 * output_results(cycle);
2085 * There is
one minor change
for the convergence table compared to @ref step_7
"step-7":
2086 * Since we did not
refine our mesh by a factor two in each cycle (but
2087 * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2088 * convergence rate evaluation about this. We
do this by setting the
2089 * number of cells as a reference column and additionally specifying the
2090 * dimension of the problem, which gives the necessary information
for the
2091 * relation between number of cells and mesh size.
2094 *
if (refinement_mode == global_refinement)
2096 * convergence_table.evaluate_convergence_rates(
2098 * convergence_table.evaluate_convergence_rates(
2100 * convergence_table.evaluate_convergence_rates(
2103 * convergence_table.write_text(std::cout);
2112 *
const unsigned int dim = 2;
2118 * Now
for the three calls to the main
class in complete analogy to
2119 * @ref step_7
"step-7".
2123 * std::cout <<
"Solving with Q1 elements, adaptive refinement"
2125 * <<
"============================================="
2129 * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2130 * hdg_problem.run();
2132 * std::cout << std::endl;
2136 * std::cout <<
"Solving with Q1 elements, global refinement" << std::endl
2137 * <<
"===========================================" << std::endl
2140 * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2141 * hdg_problem.run();
2143 * std::cout << std::endl;
2147 * std::cout <<
"Solving with Q3 elements, global refinement" << std::endl
2148 * <<
"===========================================" << std::endl
2151 * Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2152 * hdg_problem.run();
2154 * std::cout << std::endl;
2157 *
catch (std::exception &exc)
2159 * std::cerr << std::endl
2161 * <<
"----------------------------------------------------"
2163 * std::cerr <<
"Exception on processing: " << std::endl
2164 * << exc.what() << std::endl
2165 * <<
"Aborting!" << std::endl
2166 * <<
"----------------------------------------------------"
2172 * std::cerr << std::endl
2174 * <<
"----------------------------------------------------"
2176 * std::cerr <<
"Unknown exception!" << std::endl
2177 * <<
"Aborting!" << std::endl
2178 * <<
"----------------------------------------------------"
2186 <a name=
"Results"></a><h1>Results</h1>
2189 <a name=
"Programoutput"></a><h3>Program output</h3>
2192 We
first have a look at the output generated by the program when
run in 2D. In
2193 the four images below, we show the solution
for polynomial degree @f$p=1@f$
2194 and cycles 2, 3, 4, and 8 of the program. In the plots, we overlay the data
2195 generated from the
internal data (DG part) with the skeleton part (@f$\hat{u}@f$)
2196 into the same plot. We had to generate two different data sets because cells
2197 and faces represent different geometric entities, the combination of which (in
2198 the same file) is not supported in the VTK output of deal.II.
2200 The images show the distinctive features of HDG: The cell solution (colored
2201 surfaces) is discontinuous between the cells. The solution on the skeleton
2202 variable sits on the faces and ties together the local parts. The skeleton
2203 solution is not continuous on the
vertices where the faces meet, even though
2204 its values are quite close along lines in the same coordinate direction. The
2205 skeleton solution can be interpreted as a rubber spring between the two sides
2206 that balances the jumps in the solution (or rather, the flux @f$\kappa \nabla u
2207 + \mathbf{c} u@f$). From the picture at the top left, it is clear that
2208 the bulk solution frequently over- and undershoots and that the
2209 skeleton variable in indeed a better approximation to the exact
2210 solution;
this explains why we can get a better solution
using a
2211 postprocessing step.
2213 As the mesh is refined, the jumps between the cells get
2214 small (we represent a smooth solution), and the skeleton solution approaches
2215 the interior parts. For cycle 8, there is no visible difference in the two
2216 variables. We also see how boundary conditions are implemented weakly and that
2217 the interior variables
do not exactly satisfy boundary conditions. On the
2218 lower and left boundaries, we
set Neumann boundary conditions, whereas we
set
2219 Dirichlet conditions on the right and top boundaries.
2221 <table align=
"center">
2223 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=
""></td>
2224 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=
""></td>
2227 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=
""></td>
2228 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=
""></td>
2232 Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2233 and 8. This is a discontinuous solution that is locally described by
second
2234 order polynomials. While the solution does not look very good on the mesh of
2235 cycle two, it looks much better
for cycles three and four. As shown by the
2236 convergence table below, we find that is also converges more quickly to the
2237 analytical solution.
2239 <table align=
"center">
2241 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=
""></td>
2242 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=
""></td>
2245 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=
""></td>
2246 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=
""></td>
2250 Finally, we look at the solution
for @f$p=3@f$ at cycle 2. Despite the coarse
2251 mesh with only 64 cells, the post-processed solution is similar in quality
2252 to the linear solution (not post-processed) at cycle 8 with 4,096
2253 cells. This clearly shows the superiority of high order methods
for smooth
2256 <table align=
"center">
2258 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=
""></td>
2259 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=
""></td>
2263 <a name=
"Convergencetables"></a><h4>Convergence tables</h4>
2266 When the program is
run, it also outputs information about the respective
2267 steps and convergence tables with errors in the various components in the
2268 end. In 2D, the convergence tables look the following:
2271 Q1 elements, adaptive refinement:
2272 cells dofs val
L2 grad
L2 val
L2-post
2273 16 80 1.804e+01 2.207e+01 1.798e+01
2274 31 170 9.874e+00 1.322e+01 9.798e+00
2275 61 314 7.452e-01 3.793e+00 4.891e-01
2276 121 634 3.240e-01 1.511e+00 2.616e-01
2277 238 1198 8.585e-02 8.212e-01 1.808e-02
2278 454 2290 4.802e-02 5.178e-01 2.195e-02
2279 898 4378 2.561e-02 2.947e-01 4.318e-03
2280 1720 7864 1.306e-02 1.664e-01 2.978e-03
2281 3271 14638 7.025e-03 9.815e-02 1.075e-03
2282 6217 27214 4.119e-03 6.407e-02 9.975e-04
2284 Q1 elements, global refinement:
2285 cells dofs val
L2 grad
L2 val
L2-post
2286 16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2287 36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2288 64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2289 144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2290 256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2291 576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2292 1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2293 2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2294 4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2295 9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2297 Q3 elements, global refinement:
2298 cells dofs val
L2 grad
L2 val
L2-post
2299 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2300 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2301 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2302 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2303 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2304 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2305 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2306 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2307 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2308 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2312 One can see the error
reduction upon grid refinement, and
for the cases where
2313 global refinement was performed, also the convergence rates. The quadratic
2314 convergence rates of Q1 elements in the @f$L_2@f$
norm for both the scalar
2315 variable and the gradient variable is apparent, as is the cubic rate
for the
2316 postprocessed scalar variable in the @f$L_2@f$
norm. Note
this distinctive
2317 feature of an HDG solution. In typical continuous finite elements, the
2318 gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2319 opposed to @f$p+1@f$
for the actual solution. Even though superconvergence
2320 results
for finite elements are also available (
e.g. superconvergent patch
2321 recovery
first introduced by Zienkiewicz and Zhu), these are typically limited
2322 to structured meshes and other special cases. For Q3 HDG variables, the scalar
2323 variable and gradient converge at fourth order and the postprocessed scalar
2324 variable at fifth order.
2326 The same convergence rates are observed in 3
d.
2328 Q1 elements, adaptive refinement:
2329 cells dofs val
L2 grad
L2 val
L2-post
2330 8 144 7.122e+00 1.941e+01 6.102e+00
2331 29 500 3.309e+00 1.023e+01 2.145e+00
2332 113 1792 2.204e+00 1.023e+01 1.912e+00
2333 379 5732 6.085e-01 5.008e+00 2.233e-01
2334 1317 19412 1.543e-01 1.464e+00 4.196e-02
2335 4579 64768 5.058e-02 5.611e-01 9.521e-03
2336 14596 199552 2.129e-02 3.122e-01 4.569e-03
2337 46180 611400 1.033e-02 1.622e-01 1.684e-03
2338 144859 1864212 5.007e-03 8.371e-02 7.364e-04
2339 451060 5684508 2.518e-03 4.562e-02 3.070e-04
2341 Q1 elements, global refinement:
2342 cells dofs val
L2 grad
L2 val
L2-post
2343 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2344 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2345 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2346 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2347 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2348 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2349 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2350 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2351 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2352 110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2354 Q3 elements, global refinement:
2355 cells dofs val
L2 grad
L2 val
L2-post
2356 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2357 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2358 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2359 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2360 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2361 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2362 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2363 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2364 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2365 110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2368 <a name=
"Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2371 <a name=
"Resultsfor2D"></a><h4>Results
for 2D</h4>
2374 The convergence tables verify the expected convergence rates stated in the
2375 introduction. Now, we want to show a quick comparison of the computational
2376 efficiency of the HDG method compared to a usual finite element (continuous
2377 Galkerin) method on the problem of
this tutorial. Of course, stability aspects
2378 of the HDG method compared to continuous finite elements
for
2379 transport-dominated problems are also important in practice, which is an
2380 aspect not seen on a problem with smooth analytic solution. In the picture
2381 below, we compare the @f$L_2@f$ error as a
function of the number of degrees of
2382 freedom (left) and of the computing time spent in the linear solver (right)
2383 for two space dimensions of continuous finite elements (CG) and the hybridized
2384 discontinuous Galerkin method presented in
this tutorial. As opposed to the
2385 tutorial where we only use unpreconditioned BiCGStab, the times shown in the
2386 figures below use the Trilinos algebraic multigrid preconditioner in
2389 block structure in the
matrix on the finest
level.
2391 <table align=
"center">
2393 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width=
"400" alt=
""></td>
2394 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width=
"400" alt=
""></td>
2398 The results in the graphs show that the HDG method is slower than continuous
2399 finite elements at @f$p=1@f$, about equally fast
for cubic elements and
2400 faster
for sixth order elements. However, we have seen above that the HDG
2401 method actually produces solutions which are more accurate than what is
2402 represented in the original variables. Therefore, in the next two plots below
2403 we instead display the error of the post-processed solution
for HDG (denoted
2404 by @f$p=1^*@f$
for example). We now see a clear advantage of HDG
for the same
2405 amount of work
for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2408 <table align=
"center">
2410 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width=
"400" alt=
""></td>
2411 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width=
"400" alt=
""></td>
2415 Since the HDG method actually produces results converging as
2416 @f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2417 solution with the same asymptotic convergence behavior, i.e.,
FE_Q with degree
2418 @f$p+1@f$. If we
do this, we get the convergence curves below. We see that
2419 CG with
second order polynomials is again clearly better than HDG with
2420 linears. However, the advantage of HDG
for higher orders remains.
2422 <table align=
"center">
2424 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width=
"400" alt=
""></td>
2425 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width=
"400" alt=
""></td>
2429 The results are in line with properties of DG methods in
general: Best
2430 performance is typically not achieved
for linear elements, but rather at
2431 somewhat higher order, usually around @f$p=3@f$. This is because of a
2432 volume-to-surface effect
for discontinuous solutions with too much of the
2433 solution living on the surfaces and hence duplicating work when the elements
2434 are linear. Put in other words, DG methods are often most efficient when used
2435 at relatively high order, despite their focus on a discontinuous (and hence,
2436 seemingly low accurate) representation of solutions.
2438 <a name=
"Resultsfor3D"></a><h4>Results
for 3D</h4>
2441 We now show the same figures in 3D: The
first row shows the number of degrees
2442 of freedom and computing time versus the @f$L_2@f$ error in the scalar variable
2443 @f$u@f$
for CG and HDG at order @f$p@f$, the
second row shows the
2444 post-processed HDG solution instead of the original
one, and the third row
2445 compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2446 the
volume-to-surface effect makes the cost of HDG somewhat higher and the CG
2447 solution is clearly better than HDG
for linears by any metric. For cubics, HDG
2448 and CG are of similar quality, whereas HDG is again more efficient
for sixth
2449 order polynomials. One can alternatively also use the combination of
FE_DGP
2451 polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2452 degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2453 for FE_FaceP for a given mesh size, but the solution quality (error vs. number
2454 of DoFs) is very similar to the results
for FE_FaceQ.
2456 <table align=
"center">
2458 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width=
"400" alt=
""></td>
2459 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width=
"400" alt=
""></td>
2462 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width=
"400" alt=
""></td>
2463 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width=
"400" alt=
""></td>
2466 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width=
"400" alt=
""></td>
2467 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width=
"400" alt=
""></td>
2471 One
final note on the efficiency comparison: We tried to use
general-purpose
2472 sparse
matrix structures and similar solvers (optimal AMG preconditioners
for
2473 both without particular tuning of the AMG parameters on any of them) to give a
2474 fair picture of the cost versus accuracy of two methods, on a toy example. It
2475 should be noted however that geometric multigrid (GMG)
for continuous finite
2476 elements is about a factor four to five faster
for @f$p=3@f$ and @f$p=6@f$. As of
2477 2019, optimal-complexity iterative solvers
for HDG are still under development
2478 in the research community. Also, there are other implementation aspects
for CG
2479 available such as fast
matrix-
free approaches as shown in @ref step_37
"step-37" that make
2480 higher order continuous elements more competitive. Again, it is not clear to
2481 the authors of the tutorial whether similar improvements could be made
for
2482 HDG. We refer to <a href=
"https://dx.doi.org/10.1137/16M110455X">Kronbichler
2483 and Wall (2018)</a>
for a recent efficiency evaluation.
2486 <a name=
"Possibilitiesforimprovements"></a><h3>Possibilities
for improvements</h3>
2489 As already mentioned in the introduction,
one possibility is to implement
2490 another post-processing technique as discussed in the literature.
2492 A
second item that is not done optimally relates to the performance of
this
2493 program, which is of course an issue in practical applications (weighing in
2494 also the better solution quality of (H)DG methods
for transport-dominated
2495 problems). Let us look at
2496 the computing time of the tutorial program and the share of the individual
2499 <table align=
"center" class=
"doxtable">
2506 <th>Trace reconstruct</th>
2507 <th>Post-processing</th>
2513 <th colspan=
"6">Relative share</th>
2516 <td align=
"left">2D, Q1, cycle 9, 37,248 dofs</td>
2517 <td align=
"center">5.34s</td>
2518 <td align=
"center">0.7%</td>
2519 <td align=
"center">1.2%</td>
2520 <td align=
"center">89.5%</td>
2521 <td align=
"center">0.9%</td>
2522 <td align=
"center">2.3%</td>
2523 <td align=
"center">5.4%</td>
2526 <td align=
"left">2D, Q3, cycle 9, 74,496 dofs</td>
2527 <td align=
"center">22.2s</td>
2528 <td align=
"center">0.4%</td>
2529 <td align=
"center">4.3%</td>
2530 <td align=
"center">84.1%</td>
2531 <td align=
"center">4.1%</td>
2532 <td align=
"center">3.5%</td>
2533 <td align=
"center">3.6%</td>
2536 <td align=
"left">3D, Q1, cycle 7, 172,800 dofs</td>
2537 <td align=
"center">9.06s</td>
2538 <td align=
"center">3.1%</td>
2539 <td align=
"center">8.9%</td>
2540 <td align=
"center">42.7%</td>
2541 <td align=
"center">7.0%</td>
2542 <td align=
"center">20.6%</td>
2543 <td align=
"center">17.7%</td>
2546 <td align=
"left">3D, Q3, cycle 7, 691,200 dofs</td>
2547 <td align=
"center">516s</td>
2548 <td align=
"center">0.6%</td>
2549 <td align=
"center">34.5%</td>
2550 <td align=
"center">13.4%</td>
2551 <td align=
"center">32.8%</td>
2552 <td align=
"center">17.1%</td>
2553 <td align=
"center">1.5%</td>
2557 As can be seen from the table, the solver and assembly calls dominate the
2558 runtime of the program. This also gives a clear indication of where
2559 improvements would make the most sense:
2562 <li> Better linear solvers: We use a BiCGStab iterative solver without
2563 preconditioner, where the number of iteration increases with increasing
2564 problem size (the number of iterations
for Q1 elements and global
2565 refinements starts at 35
for the small sizes but increase up to 701
for the
2566 largest size). To
do better,
one could
for example use an algebraic
2567 multigrid preconditioner from Trilinos, or some more advanced variants as
2568 the
one discussed in <a
2569 href=
"https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2570 (2018)</a>. For diffusion-dominated problems such as the problem at hand
2571 with finer meshes, such a solver can be designed that uses the
matrix-vector
2573 long as we are not working in
parallel with MPI. For MPI-parallelized
2576 <li> Speed up assembly by pre-assembling parts that
do not change from
one
2577 cell to another (those that
do neither contain variable coefficients nor
2578 mapping-dependent terms).
2582 <a name=
"PlainProg"></a>
2583 <h1> The plain program</h1>
2584 @include
"step-51.cc"