598 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
602 *
std::exp(-x_minus_xi.norm_square() / (this->width * this->width));
611 *
const unsigned int = 0) const override
614 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
619 * (-2 / (this->width * this->width) *
620 *
std::exp(-x_minus_xi.norm_square() / (this->width * this->width)) *
633 * This
class implements a function where the
scalar solution and its
negative
634 *
gradient are collected together. This function is used when computing the
635 * error of the HDG approximation and its implementation is to simply
call
640 *
class SolutionAndGradient :
public Function<dim>,
protected SolutionBase<dim>
643 * SolutionAndGradient()
651 * Solution<dim> solution;
653 *
for (
unsigned int d = 0;
d < dim; ++
d)
655 * v[dim] = solution.value(p);
663 * Next comes the implementation of the convection velocity. As described in
664 * the introduction, we choose a velocity field that is @f$(y, -x)@f$ in 2
d and
665 * @f$(y, -x, 1)@f$ in 3
d. This gives a divergence-
free velocity field.
672 * ConvectionVelocity()
685 * convection[0] = p[1];
686 * convection[1] = -p[0];
689 * convection[0] = p[1];
690 * convection[1] = -p[0];
694 *
Assert(
false, ExcNotImplemented());
704 * The last function we implement is the right hand side
for the
705 * manufactured solution. It is very similar to @ref step_7
"step-7", with the exception
706 * that we now have a convection term instead of the reaction term. Since
707 * the velocity field is incompressible, i.e., @f$\nabla \cdot \mathbf{c} =
708 * 0@f$, the advection term simply reads @f$\mathbf{c} \nabla u@f$.
712 *
class RightHandSide :
public Function<dim>,
protected SolutionBase<dim>
716 *
const unsigned int = 0) const override
718 * ConvectionVelocity<dim> convection_velocity;
721 *
for (
unsigned int i = 0; i < this->n_source_centers; ++i)
726 * ((2 * dim - 2 * convection * x_minus_xi -
727 * 4 * x_minus_xi.norm_square() / (this->width * this->width)) /
728 * (this->width * this->width) *
729 *
std::exp(-x_minus_xi.norm_square() / (this->width * this->width)));
742 * <a name=
"TheHDGsolverclass"></a>
743 * <h3>The HDG solver
class</h3>
747 * The HDG solution procedure follows closely that of @ref step_7
"step-7". The major
748 * difference is the use of three different sets of
DoFHandler and FE
750 * vectors. We also use
WorkStream to enable a multithreaded local solution
751 * process which exploits the embarrassingly
parallel nature of the local
752 * solver. For
WorkStream, we define the local operations on a cell and a
753 *
copy function into the global
matrix and vector. We
do this both
for the
754 * assembly (which is run twice, once when we generate the system matrix and
755 * once when we compute the element-interior solutions from the skeleton
756 * values) and
for the postprocessing where we
extract a solution that
757 * converges at higher order.
764 *
enum RefinementMode
767 * adaptive_refinement
770 * HDG(
const unsigned int degree,
const RefinementMode refinement_mode);
774 *
void setup_system();
775 *
void assemble_system(
const bool reconstruct_trace =
false);
777 *
void postprocess();
778 *
void refine_grid(
const unsigned int cycle);
779 *
void output_results(
const unsigned int cycle);
783 * Data
for the assembly and solution of the primal variables.
786 *
struct PerTaskData;
787 *
struct ScratchData;
791 * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
792 * procedure; as such, we
do not need to
assemble any global data and
do
793 * not declare any
'task data' for WorkStream to use.
796 *
struct PostProcessScratchData;
801 * work of the program.
804 *
void assemble_system_one_cell(
806 * ScratchData & scratch,
807 * PerTaskData & task_data);
809 *
void copy_local_to_global(
const PerTaskData &data);
811 *
void postprocess_one_cell(
813 * PostProcessScratchData & scratch,
814 *
unsigned int & empty_data);
821 * The
'local' solutions are interior to each element. These
822 * represent the primal solution field @f$u@f$ as well as the auxiliary
823 * field @f$\mathbf{q}@f$.
832 * The
new finite element type and corresponding <code>
DoFHandler</code> are
833 * used
for the global skeleton solution that couples the element-
level
844 * As stated in the introduction, HDG solutions can be post-processed to
845 * attain superconvergence rates of @f$\mathcal{O}(h^{p+2})@f$. The
846 * post-processed solution is a discontinuous finite element solution
847 * representing the primal variable on the interior of each cell. We define
848 * a FE type of degree @f$p+1@f$ to represent
this post-processed solution,
849 * which we only use
for output after constructing it.
858 * The degrees of freedom corresponding to the skeleton strongly enforce
859 * Dirichlet boundary conditions, just as in a continuous Galerkin finite
860 * element method. We can enforce the boundary conditions in an analogous
862 * handled in the same way as
for continuous finite elements: For the face
863 * elements which only define degrees of freedom on the face,
this process
864 * sets the solution on the refined side to coincide with the
865 * representation on the coarse side.
869 * Note that
for HDG, the elimination of hanging nodes is not the only
870 * possibility — in terms of the HDG theory, one could also use the
871 * unknowns from the refined side and express the local solution on the
872 * coarse side through the
trace values on the refined side. However, such
873 * a setup is not as easily implemented in terms of deal.II loops and not
883 * the actual
matrix object. When creating the sparsity pattern, we just
884 * have to additionally pass the size of local blocks.
892 * Same as @ref step_7
"step-7":
895 *
const RefinementMode refinement_mode;
902 * <a name=
"TheHDGclassimplementation"></a>
903 * <h3>The HDG
class implementation</h3>
908 * <a name=
"Constructor"></a>
909 * <h4>Constructor</h4>
910 * The constructor is similar to those in other examples, with the exception
912 * create a system of finite elements
for the local DG part, including the
917 * HDG<dim>::HDG(
const unsigned int degree,
const RefinementMode refinement_mode)
922 * , fe_u_post(degree + 1)
924 * , refinement_mode(refinement_mode)
932 * <a name=
"HDGsetup_system"></a>
933 * <h4>HDG::setup_system</h4>
934 * The system
for an HDG solution is setup in an analogous manner to most
935 * of the other tutorial programs. We are careful to distribute dofs with
936 * all of our
DoFHandler objects. The @p solution and @p system_matrix
937 * objects go with the global skeleton solution.
941 *
void HDG<dim>::setup_system()
943 * dof_handler_local.distribute_dofs(fe_local);
944 * dof_handler.distribute_dofs(fe);
945 * dof_handler_u_post.distribute_dofs(fe_u_post);
947 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
950 * solution.reinit(dof_handler.n_dofs());
951 * system_rhs.reinit(dof_handler.n_dofs());
953 * solution_local.reinit(dof_handler_local.n_dofs());
954 * solution_u_post.reinit(dof_handler_u_post.n_dofs());
956 * constraints.
clear();
958 * std::map<types::boundary_id, const Function<dim> *> boundary_functions;
959 * Solution<dim> solution_function;
960 * boundary_functions[0] = &solution_function;
962 * boundary_functions,
965 * constraints.close();
969 * When creating the chunk sparsity pattern, we
first create the usual
971 * to the number of dofs on a face, when copying
this into the
final
978 * sparsity_pattern.copy_from(dsp, fe.n_dofs_per_face());
980 * system_matrix.reinit(sparsity_pattern);
988 * <a name=
"HDGPerTaskData"></a>
989 * <h4>HDG::PerTaskData</h4>
990 * Next comes the definition of the local data structures
for the
parallel
991 * assembly. The
first structure @p PerTaskData contains the local vector
992 * and
matrix that are written into the global
matrix, whereas the
993 * ScratchData contains all data that we need
for the local assembly. There
994 * is one variable worth noting here, namely the
boolean variable @p
995 * trace_reconstruct. As mentioned in the introduction, we solve the HDG
996 * system in two steps. First, we create a linear system
for the skeleton
997 * system where we condense the local part into it via the Schur complement
998 * @f$D-CA^{-1}B@f$. Then, we solve
for the local part
using the skeleton
999 * solution. For these two steps, we need the same matrices on the elements
1000 * twice, which we want to compute by two assembly steps. Since most of the
1001 * code is similar, we
do this with the same function but only
switch
1002 * between the two based on a flag that we
set when starting the
1003 * assembly. Since we need to pass
this information on to the local worker
1004 * routines, we store it once in the task data.
1007 *
template <
int dim>
1008 *
struct HDG<dim>::PerTaskData
1012 * std::vector<types::global_dof_index> dof_indices;
1014 *
bool trace_reconstruct;
1016 * PerTaskData(
const unsigned int n_dofs,
const bool trace_reconstruct)
1018 * , cell_vector(n_dofs)
1019 * , dof_indices(n_dofs)
1020 * , trace_reconstruct(trace_reconstruct)
1029 * <a name=
"HDGScratchData"></a>
1030 * <h4>HDG::ScratchData</h4>
1031 * @p ScratchData contains persistent data
for each
1033 * and vector objects should be familiar by now. There are two objects that
1034 * need to be discussed: `std::vector<std::vector<unsigned int> >
1035 * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1036 * fe_support_on_face`. These are used to indicate whether or not the finite
1037 * elements chosen have support (non-zero values) on a given face of the
1038 * reference cell
for the local part associated to @p fe_local and the
1039 * skeleton part @p fe. We
extract this information in the
1040 * constructor and store it once
for all cells that we work on. Had we not
1041 * stored
this information, we would be forced to
assemble a large number of
1042 * zero terms on each cell, which would significantly slow the program.
1045 *
template <
int dim>
1046 *
struct HDG<dim>::ScratchData
1059 * std::vector<Tensor<1, dim>> q_phi;
1060 * std::vector<double> q_phi_div;
1061 * std::vector<double> u_phi;
1062 * std::vector<Tensor<1, dim>> u_phi_grad;
1063 * std::vector<double> tr_phi;
1064 * std::vector<double> trace_values;
1066 * std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1067 * std::vector<std::vector<unsigned int>> fe_support_on_face;
1069 * ConvectionVelocity<dim> convection_velocity;
1070 * RightHandSide<dim> right_hand_side;
1071 *
const Solution<dim> exact_solution;
1080 * : fe_values_local(fe_local, quadrature_formula, local_flags)
1081 * , fe_face_values_local(fe_local,
1082 * face_quadrature_formula,
1084 * , fe_face_values(fe, face_quadrature_formula, flags)
1085 * , ll_matrix(fe_local.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1086 * , lf_matrix(fe_local.n_dofs_per_cell(), fe.n_dofs_per_cell())
1087 * , fl_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1088 * , tmp_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1089 * , l_rhs(fe_local.n_dofs_per_cell())
1090 * , tmp_rhs(fe_local.n_dofs_per_cell())
1091 * , q_phi(fe_local.n_dofs_per_cell())
1092 * , q_phi_div(fe_local.n_dofs_per_cell())
1093 * , u_phi(fe_local.n_dofs_per_cell())
1094 * , u_phi_grad(fe_local.n_dofs_per_cell())
1095 * , tr_phi(fe.n_dofs_per_cell())
1096 * , trace_values(face_quadrature_formula.size())
1099 * , exact_solution()
1101 *
for (
const unsigned int face_no :
GeometryInfo<dim>::face_indices())
1102 * for (unsigned
int i = 0; i < fe_local.n_dofs_per_cell(); ++i)
1104 *
if (fe_local.has_support_on_face(i, face_no))
1105 * fe_local_support_on_face[face_no].push_back(i);
1108 *
for (
const unsigned int face_no :
GeometryInfo<dim>::face_indices())
1109 * for (unsigned
int i = 0; i < fe.n_dofs_per_cell(); ++i)
1111 *
if (fe.has_support_on_face(i, face_no))
1112 * fe_support_on_face[face_no].push_back(i);
1116 * ScratchData(
const ScratchData &sd)
1117 * : fe_values_local(sd.fe_values_local.get_fe(),
1118 * sd.fe_values_local.get_quadrature(),
1119 * sd.fe_values_local.get_update_flags())
1120 * , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1121 * sd.fe_face_values_local.get_quadrature(),
1122 * sd.fe_face_values_local.get_update_flags())
1123 * , fe_face_values(sd.fe_face_values.get_fe(),
1124 * sd.fe_face_values.get_quadrature(),
1125 * sd.fe_face_values.get_update_flags())
1126 * , ll_matrix(sd.ll_matrix)
1127 * , lf_matrix(sd.lf_matrix)
1128 * , fl_matrix(sd.fl_matrix)
1129 * , tmp_matrix(sd.tmp_matrix)
1131 * , tmp_rhs(sd.tmp_rhs)
1133 * , q_phi_div(sd.q_phi_div)
1135 * , u_phi_grad(sd.u_phi_grad)
1136 * , tr_phi(sd.tr_phi)
1137 * , trace_values(sd.trace_values)
1138 * , fe_local_support_on_face(sd.fe_local_support_on_face)
1139 * , fe_support_on_face(sd.fe_support_on_face)
1140 * , exact_solution()
1149 * <a name=
"HDGPostProcessScratchData"></a>
1150 * <h4>HDG::PostProcessScratchData</h4>
1151 * @p PostProcessScratchData contains the data used by
WorkStream
1152 * when post-processing the local solution @f$u^*@f$. It is similar, but much
1153 * simpler, than @p ScratchData.
1156 *
template <
int dim>
1157 *
struct HDG<dim>::PostProcessScratchData
1162 * std::vector<double> u_values;
1163 * std::vector<Tensor<1, dim>> u_gradients;
1174 * : fe_values_local(fe_local, quadrature_formula, local_flags)
1175 * , fe_values(fe, quadrature_formula, flags)
1176 * , u_values(quadrature_formula.size())
1177 * , u_gradients(quadrature_formula.size())
1178 * ,
cell_matrix(fe.n_dofs_per_cell(), fe.n_dofs_per_cell())
1179 * , cell_rhs(fe.n_dofs_per_cell())
1180 * , cell_sol(fe.n_dofs_per_cell())
1183 * PostProcessScratchData(
const PostProcessScratchData &sd)
1184 * : fe_values_local(sd.fe_values_local.get_fe(),
1185 * sd.fe_values_local.get_quadrature(),
1186 * sd.fe_values_local.get_update_flags())
1187 * , fe_values(sd.fe_values.get_fe(),
1188 * sd.fe_values.get_quadrature(),
1189 * sd.fe_values.get_update_flags())
1190 * , u_values(sd.u_values)
1191 * , u_gradients(sd.u_gradients)
1193 * , cell_rhs(sd.cell_rhs)
1194 * , cell_sol(sd.cell_sol)
1203 * <a name=
"HDGassemble_system"></a>
1204 * <h4>HDG::assemble_system</h4>
1205 * The @p assemble_system function is similar to the one on @ref step_32
"step-32", where
1206 * the quadrature formula and the update flags are
set up, and then
1207 * <code>
WorkStream</code> is used to
do the work in a multi-threaded
1208 * manner. The @p trace_reconstruct input parameter is used to decide
1209 * whether we are solving
for the global skeleton solution (
false) or the
1210 * local solution (true).
1214 * One thing worth noting for the multi-threaded execution of assembly is
1215 * the fact that the local computations in `assemble_system_one_cell()` call
1216 * into BLAS and LAPACK functions if those are available in deal.II. Thus,
1217 * the underlying BLAS/LAPACK library must support calls from multiple
1218 * threads at the same time. Most implementations do support this, but some
1219 * libraries need to be built in a specific way to avoid problems. For
1220 * example, OpenBLAS compiled without multithreading inside the BLAS/LAPACK
1221 * calls needs to built with a flag called `USE_LOCKING` set to true.
1224 * template <
int dim>
1225 *
void HDG<dim>::assemble_system(const
bool trace_reconstruct)
1227 *
const QGauss<dim> quadrature_formula(fe.degree + 1);
1228 *
const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1238 * PerTaskData task_data(fe.n_dofs_per_cell(), trace_reconstruct);
1239 * ScratchData scratch(fe,
1241 * quadrature_formula,
1242 * face_quadrature_formula,
1248 * dof_handler.end(),
1250 * &HDG<dim>::assemble_system_one_cell,
1251 * &HDG<dim>::copy_local_to_global,
1261 * <a name=
"HDGassemble_system_one_cell"></a>
1262 * <h4>HDG::assemble_system_one_cell</h4>
1263 * The real work of the HDG program is done by @p assemble_system_one_cell.
1264 * Assembling the local matrices @f$A, B,
C@f$ is done here, along with the
1265 * local contributions of the global
matrix @f$D@f$.
1268 *
template <
int dim>
1269 *
void HDG<dim>::assemble_system_one_cell(
1271 * ScratchData & scratch,
1272 * PerTaskData & task_data)
1276 * Construct iterator
for dof_handler_local
for FEValues reinit function.
1280 * cell->as_dof_handler_iterator(dof_handler_local);
1282 *
const unsigned int n_q_points =
1283 * scratch.fe_values_local.get_quadrature().size();
1284 *
const unsigned int n_face_q_points =
1285 * scratch.fe_face_values_local.get_quadrature().size();
1287 *
const unsigned int loc_dofs_per_cell =
1288 * scratch.fe_values_local.get_fe().n_dofs_per_cell();
1293 * scratch.ll_matrix = 0;
1294 * scratch.l_rhs = 0;
1295 *
if (!task_data.trace_reconstruct)
1297 * scratch.lf_matrix = 0;
1298 * scratch.fl_matrix = 0;
1299 * task_data.cell_matrix = 0;
1300 * task_data.cell_vector = 0;
1302 * scratch.fe_values_local.reinit(loc_cell);
1306 * We
first compute the cell-interior contribution to @p ll_matrix
matrix
1307 * (referred to as
matrix @f$A@f$ in the introduction) corresponding to
1308 * local-local coupling, as well as the local right-hand-side vector. We
1309 * store the values at each quadrature point
for the basis functions, the
1310 * right-hand-side value, and the convection velocity, in order to have
1311 * quick access to these fields.
1314 * for (
unsigned int q = 0; q < n_q_points; ++q)
1316 *
const double rhs_value = scratch.right_hand_side.value(
1317 * scratch.fe_values_local.quadrature_point(q));
1318 *
const Tensor<1, dim> convection = scratch.convection_velocity.value(
1319 * scratch.fe_values_local.quadrature_point(q));
1320 *
const double JxW = scratch.fe_values_local.JxW(q);
1321 *
for (
unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1323 * scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1324 * scratch.q_phi_div[k] =
1325 * scratch.fe_values_local[fluxes].divergence(k, q);
1326 * scratch.u_phi[k] = scratch.fe_values_local[
scalar].value(k, q);
1327 * scratch.u_phi_grad[k] =
1328 * scratch.fe_values_local[
scalar].gradient(k, q);
1330 *
for (
unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1332 *
for (
unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1333 * scratch.ll_matrix(i, j) +=
1334 * (scratch.q_phi[i] * scratch.q_phi[j] -
1335 * scratch.q_phi_div[i] * scratch.u_phi[j] +
1336 * scratch.u_phi[i] * scratch.q_phi_div[j] -
1337 * (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1339 * scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1345 * Face terms are assembled on all faces of all elements. This is in
1346 * contrast to more traditional DG methods, where each face is only visited
1347 * once in the assembly procedure.
1350 *
for (
const auto face_no : cell->face_indices())
1352 * scratch.fe_face_values_local.
reinit(loc_cell, face_no);
1353 * scratch.fe_face_values.reinit(cell, face_no);
1357 * The already obtained @f$\hat{u}@f$
values are needed when solving
for the
1361 *
if (task_data.trace_reconstruct)
1362 * scratch.fe_face_values.get_function_values(solution,
1363 * scratch.trace_values);
1365 *
for (
unsigned int q = 0; q < n_face_q_points; ++q)
1367 *
const double JxW = scratch.fe_face_values.JxW(q);
1369 * scratch.fe_face_values.quadrature_point(q);
1371 * scratch.fe_face_values.normal_vector(q);
1373 * scratch.convection_velocity.value(quadrature_point);
1377 * Here we compute the stabilization parameter discussed in the
1378 * introduction: since the diffusion is one and the diffusion
1379 * length
scale is
set to 1/5, it simply results in a contribution
1380 * of 5
for the diffusion part and the magnitude of convection
1381 * through the element boundary in a centered scheme
for the
1385 *
const double tau_stab = (5. +
std::abs(convection * normal));
1389 * We store the non-zero flux and
scalar values, making use of the
1390 * support_on_face information we created in @p ScratchData.
1393 *
for (
unsigned int k = 0;
1394 * k < scratch.fe_local_support_on_face[face_no].size();
1397 *
const unsigned int kk =
1398 * scratch.fe_local_support_on_face[face_no][k];
1399 * scratch.q_phi[k] =
1400 * scratch.fe_face_values_local[fluxes].value(kk, q);
1401 * scratch.u_phi[k] =
1402 * scratch.fe_face_values_local[
scalar].value(kk, q);
1407 * When @p trace_reconstruct=
false, we are preparing to
assemble the
1408 * system
for the skeleton variable @f$\hat{u}@f$. If
this is the
case,
1409 * we must
assemble all local matrices associated with the problem:
1410 * local-local, local-face, face-local, and face-face. The
1411 * face-face
matrix is stored as @p TaskData::cell_matrix, so that
1412 * it can be assembled into the global system by @p
1413 * copy_local_to_global.
1416 *
if (!task_data.trace_reconstruct)
1418 *
for (
unsigned int k = 0;
1419 * k < scratch.fe_support_on_face[face_no].size();
1421 * scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1422 * scratch.fe_support_on_face[face_no][k], q);
1423 *
for (
unsigned int i = 0;
1424 * i < scratch.fe_local_support_on_face[face_no].size();
1426 *
for (
unsigned int j = 0;
1427 * j < scratch.fe_support_on_face[face_no].size();
1430 *
const unsigned int ii =
1431 * scratch.fe_local_support_on_face[face_no][i];
1432 *
const unsigned int jj =
1433 * scratch.fe_support_on_face[face_no][j];
1434 * scratch.lf_matrix(ii, jj) +=
1435 * ((scratch.q_phi[i] * normal +
1436 * (convection * normal - tau_stab) * scratch.u_phi[i]) *
1437 * scratch.tr_phi[j]) *
1442 * Note the
sign of the face_no-local
matrix. We negate
1443 * the
sign during assembly here so that we can use the
1448 * scratch.fl_matrix(jj, ii) -=
1449 * ((scratch.q_phi[i] * normal +
1450 * tau_stab * scratch.u_phi[i]) *
1451 * scratch.tr_phi[j]) *
1455 *
for (
unsigned int i = 0;
1456 * i < scratch.fe_support_on_face[face_no].size();
1458 *
for (
unsigned int j = 0;
1459 * j < scratch.fe_support_on_face[face_no].size();
1462 *
const unsigned int ii =
1463 * scratch.fe_support_on_face[face_no][i];
1464 *
const unsigned int jj =
1465 * scratch.fe_support_on_face[face_no][j];
1466 * task_data.cell_matrix(ii, jj) +=
1467 * ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1468 * scratch.tr_phi[j]) *
1472 *
if (cell->face(face_no)->at_boundary() &&
1473 * (cell->face(face_no)->boundary_id() == 1))
1475 *
const double neumann_value =
1476 * -scratch.exact_solution.gradient(quadrature_point) *
1478 * convection * normal *
1479 * scratch.exact_solution.value(quadrature_point);
1480 *
for (
unsigned int i = 0;
1481 * i < scratch.fe_support_on_face[face_no].size();
1484 *
const unsigned int ii =
1485 * scratch.fe_support_on_face[face_no][i];
1486 * task_data.cell_vector(ii) +=
1487 * scratch.tr_phi[i] * neumann_value * JxW;
1494 * This last term adds the contribution of the term @f$\left<
w,\tau
1495 * u_h\right>_{\partial \mathcal T}@f$ to the local
matrix. As opposed
1496 * to the face matrices above, we need it in both assembly stages.
1499 *
for (
unsigned int i = 0;
1500 * i < scratch.fe_local_support_on_face[face_no].size();
1502 *
for (
unsigned int j = 0;
1503 * j < scratch.fe_local_support_on_face[face_no].size();
1506 *
const unsigned int ii =
1507 * scratch.fe_local_support_on_face[face_no][i];
1508 *
const unsigned int jj =
1509 * scratch.fe_local_support_on_face[face_no][j];
1510 * scratch.ll_matrix(ii, jj) +=
1511 * tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1516 * When @p trace_reconstruct=
true, we are solving
for the local
1517 * solutions on an element by element basis. The local
1518 * right-hand-side is calculated by replacing the basis
functions @p
1519 * tr_phi in the @p lf_matrix computation by the computed
values @p
1520 * trace_values. Of course, the
sign of the
matrix is now minus
1521 * since we have moved everything to the other side of the equation.
1524 *
if (task_data.trace_reconstruct)
1525 *
for (
unsigned int i = 0;
1526 * i < scratch.fe_local_support_on_face[face_no].size();
1529 *
const unsigned int ii =
1530 * scratch.fe_local_support_on_face[face_no][i];
1531 * scratch.l_rhs(ii) -=
1532 * (scratch.q_phi[i] * normal +
1533 * scratch.u_phi[i] * (convection * normal - tau_stab)) *
1534 * scratch.trace_values[q] * JxW;
1541 * Once assembly of all of the local contributions is complete, we must
1542 * either: (1) assemble the global system, or (2) compute the local solution
1547 * scratch.ll_matrix.gauss_jordan();
1551 * For (1), we compute the Schur complement and add it to the @p
1555 *
if (task_data.trace_reconstruct ==
false)
1557 * scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1558 * scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1559 * scratch.tmp_matrix.mmult(task_data.cell_matrix,
1560 * scratch.lf_matrix,
1562 * cell->get_dof_indices(task_data.dof_indices);
1566 * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1567 * Hence, we multiply @p l_rhs by our already inverted local-local
matrix
1568 * and store the result
using the <code>
set_dof_values</code> function.
1573 * scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1574 * loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1583 * <a name=
"HDGcopy_local_to_global"></a>
1584 * <h4>HDG::copy_local_to_global</h4>
1585 * If we are in the
first step of the solution, i.e. @p trace_reconstruct=
false,
1586 * then we
assemble the local matrices into the global system.
1589 *
template <
int dim>
1590 *
void HDG<dim>::copy_local_to_global(
const PerTaskData &data)
1592 *
if (data.trace_reconstruct ==
false)
1593 * constraints.distribute_local_to_global(data.cell_matrix,
1605 * <a name=
"HDGsolve"></a>
1606 * <h4>HDG::solve</h4>
1607 * The skeleton solution is solved
for by
using a BiCGStab solver with
1611 *
template <
int dim>
1612 *
void HDG<dim>::solve()
1615 * 1e-11 * system_rhs.l2_norm());
1619 * std::cout <<
" Number of BiCGStab iterations: "
1620 * << solver_control.last_step() << std::endl;
1622 * system_matrix.clear();
1623 * sparsity_pattern.reinit(0, 0, 0, 1);
1625 * constraints.distribute(solution);
1629 * Once we have solved
for the skeleton solution,
1630 * we can solve
for the local solutions in an element-by-element
1631 * fashion. We
do this by re-
using the same @p assemble_system function
1632 * but switching @p trace_reconstruct to
true.
1635 * assemble_system(
true);
1643 * <a name=
"HDGpostprocess"></a>
1644 * <h4>HDG::postprocess</h4>
1648 * The postprocess method serves two purposes. First, we want to construct a
1649 * post-processed
scalar variables in the element space of degree @f$p+1@f$ that
1650 * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1651 * process and only involves the
scalar solution as well as the
gradient on
1652 * the local cell. To
do this, we introduce the already defined scratch data
1653 * together with some update flags and
run the work stream to
do this in
1658 * Secondly, we want to compute discretization errors just as we did in
1659 * @ref step_7
"step-7". The overall procedure is similar with calls to
1661 * the errors
for the
scalar variable and the
gradient variable. In @ref step_7
"step-7",
1663 * contributions. Here, we have a
DoFHandler with these two contributions
1664 * computed and
sorted by their vector component, <code>[0, dim)</code>
for
1666 * gradient and @p dim
for the
scalar. To compute their value, we hence use
1668 * SolutionAndGradient
class introduced above that contains the analytic
1669 * parts of either of them. Eventually, we also compute the L2-error of the
1670 * post-processed solution and add the results into the convergence table.
1673 * template <int dim>
1674 *
void HDG<dim>::postprocess()
1677 *
const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1682 * PostProcessScratchData scratch(
1683 * fe_u_post, fe_local, quadrature_formula, local_flags, flags);
1686 * dof_handler_u_post.begin_active(),
1687 * dof_handler_u_post.end(),
1689 * PostProcessScratchData & scratch,
1690 *
unsigned int & data) {
1691 * this->postprocess_one_cell(cell, scratch, data);
1693 * std::function<
void(
const unsigned int &)>(),
1703 * SolutionAndGradient<dim>(),
1704 * difference_per_cell,
1708 *
const double L2_error =
1710 * difference_per_cell,
1714 * std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1717 * SolutionAndGradient<dim>(),
1718 * difference_per_cell,
1721 * &gradient_select);
1722 *
const double grad_error =
1724 * difference_per_cell,
1730 * difference_per_cell,
1733 *
const double post_error =
1735 * difference_per_cell,
1738 * convergence_table.add_value(
"cells",
triangulation.n_active_cells());
1739 * convergence_table.add_value(
"dofs", dof_handler.n_dofs());
1741 * convergence_table.add_value(
"val L2", L2_error);
1742 * convergence_table.set_scientific(
"val L2",
true);
1743 * convergence_table.set_precision(
"val L2", 3);
1745 * convergence_table.add_value(
"grad L2", grad_error);
1746 * convergence_table.set_scientific(
"grad L2",
true);
1747 * convergence_table.set_precision(
"grad L2", 3);
1749 * convergence_table.add_value(
"val L2-post", post_error);
1750 * convergence_table.set_scientific(
"val L2-post",
true);
1751 * convergence_table.set_precision(
"val L2-post", 3);
1759 * <a name=
"HDGpostprocess_one_cell"></a>
1760 * <h4>HDG::postprocess_one_cell</h4>
1764 * This is the actual work done
for the postprocessing. According to the
1765 * discussion in the introduction, we need to
set up a system that projects
1767 * post-processed variable. Moreover, we need to
set the average of the
new
1768 * post-processed variable to
equal the average of the
scalar DG solution
1773 * More technically speaking, the projection of the
gradient is a system
1774 * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1775 *
matrix but is singular (the sum of all rows would be zero because the
1776 * constant function has zero gradient). Therefore, we take one row away and
1778 * row
for the
scalar part, even though we could pick any row
for @f$\mathcal
1779 * Q_{-p}@f$ elements. However, had we used
FE_DGP elements instead, the
first
1780 * row would correspond to the
constant part already and deleting
e.g. the
1781 * last row would give us a singular system. This way, our program can also
1782 * be used
for those elements.
1785 *
template <
int dim>
1786 *
void HDG<dim>::postprocess_one_cell(
1788 * PostProcessScratchData & scratch,
1792 * cell->as_dof_handler_iterator(dof_handler_local);
1794 * scratch.fe_values_local.reinit(loc_cell);
1795 * scratch.fe_values.reinit(cell);
1800 *
const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1801 *
const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1803 * scratch.fe_values_local[
scalar].get_function_values(solution_local,
1804 * scratch.u_values);
1805 * scratch.fe_values_local[fluxes].get_function_values(solution_local,
1806 * scratch.u_gradients);
1809 *
for (
unsigned int i = 1; i < dofs_per_cell; ++i)
1811 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1814 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1815 * sum += (scratch.fe_values.shape_grad(i, q) *
1816 * scratch.fe_values.shape_grad(j, q)) *
1817 * scratch.fe_values.JxW(q);
1818 * scratch.cell_matrix(i, j) =
sum;
1822 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1823 * sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1824 * scratch.fe_values.JxW(q);
1825 * scratch.cell_rhs(i) =
sum;
1827 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1830 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1831 * sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1832 * scratch.cell_matrix(0, j) =
sum;
1836 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1837 * sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1838 * scratch.cell_rhs(0) =
sum;
1843 * Having assembled all terms, we can again go on and solve the linear
1844 * system. We
invert the
matrix and then multiply the inverse by the
1845 * right hand side. An alternative (and more numerically stable) method
1846 * would have been to only factorize the
matrix and
apply the factorization.
1849 * scratch.cell_matrix.gauss_jordan();
1850 * scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1851 * cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1859 * <a name=
"HDGoutput_results"></a>
1860 * <h4>HDG::output_results</h4>
1861 * We have 3 sets of results that we would like to output: the local
1862 * solution, the post-processed local solution, and the skeleton solution. The
1863 * former 2 both
'live' on element volumes, whereas the latter lives on
1864 * codimension-1 surfaces
1865 * of the
triangulation. Our @p output_results function writes all local solutions
1866 * to the same
vtk file, even though they correspond to different
1867 *
DoFHandler objects. The graphical output
for the skeleton
1868 * variable is done through use of the
DataOutFaces class.
1871 *
template <
int dim>
1872 *
void HDG<dim>::output_results(
const unsigned int cycle)
1874 * std::string filename;
1875 *
switch (refinement_mode)
1877 *
case global_refinement:
1878 * filename =
"solution-global";
1880 *
case adaptive_refinement:
1881 * filename =
"solution-adaptive";
1884 *
Assert(
false, ExcNotImplemented());
1887 * std::string face_out(filename);
1888 * face_out +=
"-face";
1892 * filename +=
".vtk";
1893 * std::ofstream output(filename);
1899 * We
first define the names and
types of the local solution,
1900 * and add the data to @p data_out.
1903 * std::vector<std::string> names(dim,
"gradient");
1904 * names.emplace_back(
"solution");
1905 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1906 * component_interpretation(
1908 * component_interpretation[dim] =
1910 * data_out.add_data_vector(dof_handler_local,
1913 * component_interpretation);
1917 * The
second data item we add is the post-processed solution.
1918 * In
this case, it is a single
scalar variable belonging to
1922 * std::vector<std::string> post_name(1,
"u_post");
1923 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1925 * data_out.add_data_vector(dof_handler_u_post,
1930 * data_out.build_patches(fe.degree);
1931 * data_out.write_vtk(output);
1935 * face_out +=
".vtk";
1936 * std::ofstream face_output(face_out);
1940 * The <code>
DataOutFaces</code>
class works analogously to the
1942 * defines the solution on the skeleton of the
triangulation. We treat it
1943 * as such here, and the code is similar to that above.
1947 * std::vector<std::string> face_name(1,
"u_hat");
1948 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1951 * data_out_face.add_data_vector(dof_handler,
1954 * face_component_type);
1956 * data_out_face.build_patches(fe.degree);
1957 * data_out_face.write_vtk(face_output);
1963 * <a name=
"HDGrefine_grid"></a>
1964 * <h4>HDG::refine_grid</h4>
1968 * We implement two different refinement cases
for HDG, just as in
1969 * <code>@ref step_7
"step-7"</code>: adaptive_refinement and global_refinement. The
1970 * global_refinement option recreates the entire
triangulation every
1971 * time. This is because we want to use a finer sequence of meshes than what
1972 * we would get with one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1973 * elements per direction.
1978 * give a decent indication of the non-regular regions in the
scalar local
1982 *
template <
int dim>
1983 *
void HDG<dim>::refine_grid(
const unsigned int cycle)
1991 *
switch (refinement_mode)
1993 *
case global_refinement:
2004 *
case adaptive_refinement:
2010 * std::map<types::boundary_id, const Function<dim> *>
2016 * estimated_error_per_cell,
2017 * fe_local.component_mask(
2030 *
Assert(
false, ExcNotImplemented());
2036 * Just as in @ref step_7
"step-7", we
set the boundary indicator of two of the faces to 1
2037 * where we want to specify Neumann boundary conditions instead of Dirichlet
2038 * conditions. Since we re-create the
triangulation every time
for global
2039 * refinement, the flags are
set in every refinement step, not just at the
2044 * for (const auto &face : cell->face_iterators())
2045 * if (face->at_boundary())
2048 * face->set_boundary_id(1);
2054 * <a name=
"HDGrun"></a>
2056 * The functionality here is basically the same as <code>@ref step_7
"step-7"</code>.
2057 * We
loop over 10 cycles, refining the grid on each one. At the
end,
2058 * convergence tables are created.
2061 *
template <
int dim>
2062 *
void HDG<dim>::run()
2064 *
for (
unsigned int cycle = 0; cycle < 10; ++cycle)
2066 * std::cout <<
"Cycle " << cycle <<
':' << std::endl;
2068 * refine_grid(cycle);
2070 * assemble_system(
false);
2073 * output_results(cycle);
2078 * There is one minor change
for the convergence table compared to @ref step_7
"step-7":
2079 * Since we did not
refine our mesh by a factor two in each cycle (but
2080 * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2081 * convergence rate evaluation about this. We
do this by setting the
2082 * number of cells as a reference column and additionally specifying the
2083 * dimension of the problem, which gives the necessary information
for the
2084 * relation between number of cells and mesh size.
2087 *
if (refinement_mode == global_refinement)
2089 * convergence_table.evaluate_convergence_rates(
2091 * convergence_table.evaluate_convergence_rates(
2093 * convergence_table.evaluate_convergence_rates(
2096 * convergence_table.write_text(std::cout);
2105 *
const unsigned int dim = 2;
2111 * Now
for the three calls to the main
class in complete analogy to
2112 * @ref step_7
"step-7".
2116 * std::cout <<
"Solving with Q1 elements, adaptive refinement"
2118 * <<
"============================================="
2122 * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2123 * hdg_problem.run();
2125 * std::cout << std::endl;
2129 * std::cout <<
"Solving with Q1 elements, global refinement" << std::endl
2130 * <<
"===========================================" << std::endl
2133 * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2134 * hdg_problem.run();
2136 * std::cout << std::endl;
2140 * std::cout <<
"Solving with Q3 elements, global refinement" << std::endl
2141 * <<
"===========================================" << std::endl
2144 * Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2145 * hdg_problem.run();
2147 * std::cout << std::endl;
2150 *
catch (std::exception &exc)
2152 * std::cerr << std::endl
2154 * <<
"----------------------------------------------------"
2156 * std::cerr <<
"Exception on processing: " << std::endl
2157 * << exc.what() << std::endl
2158 * <<
"Aborting!" << std::endl
2159 * <<
"----------------------------------------------------"
2165 * std::cerr << std::endl
2167 * <<
"----------------------------------------------------"
2169 * std::cerr <<
"Unknown exception!" << std::endl
2170 * <<
"Aborting!" << std::endl
2171 * <<
"----------------------------------------------------"
2179<a name=
"Results"></a><h1>Results</h1>
2182<a name=
"Programoutput"></a><h3>Program output</h3>
2185We
first have a look at the output generated by the program when
run in 2D. In
2186the four images below, we show the solution
for polynomial degree @f$p=1@f$
2187and cycles 2, 3, 4, and 8 of the program. In the plots, we overlay the data
2188generated from the
internal data (DG part) with the skeleton part (@f$\hat{u}@f$)
2189into the same plot. We had to generate two different data sets because cells
2190and faces represent different geometric entities, the combination of which (in
2191the same file) is not supported in the VTK output of deal.II.
2193The images show the distinctive features of HDG: The cell solution (colored
2194surfaces) is discontinuous between the cells. The solution on the skeleton
2195variable sits on the faces and ties together the local parts. The skeleton
2196solution is not continuous on the
vertices where the faces meet, even though
2197its
values are quite close along lines in the same coordinate direction. The
2198skeleton solution can be interpreted as a rubber spring between the two sides
2199that balances the jumps in the solution (or rather, the flux @f$\kappa \nabla u
2200+ \mathbf{c} u@f$). From the picture at the top left, it is clear that
2201the bulk solution frequently over- and undershoots and that the
2202skeleton variable in indeed a better approximation to the exact
2203solution;
this explains why we can get a better solution
using a
2206As the mesh is refined, the jumps between the cells get
2207small (we represent a smooth solution), and the skeleton solution approaches
2208the interior parts. For cycle 8, there is no visible difference in the two
2209variables. We also see how boundary conditions are implemented weakly and that
2210the interior variables
do not exactly satisfy boundary conditions. On the
2211lower and left boundaries, we
set Neumann boundary conditions, whereas we
set
2212Dirichlet conditions on the right and top boundaries.
2214<table align=
"center">
2216 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=
""></td>
2217 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=
""></td>
2220 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=
""></td>
2221 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=
""></td>
2225Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2226and 8. This is a discontinuous solution that is locally described by
second
2227order polynomials. While the solution does not look very good on the mesh of
2228cycle two, it looks much better
for cycles three and four. As shown by the
2229convergence table below, we find that is also converges more quickly to the
2232<table align=
"center">
2234 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=
""></td>
2235 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=
""></td>
2238 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=
""></td>
2239 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=
""></td>
2243Finally, we look at the solution
for @f$p=3@f$ at cycle 2. Despite the coarse
2244mesh with only 64 cells, the post-processed solution is similar in quality
2245to the linear solution (not post-processed) at cycle 8 with 4,096
2246cells. This clearly shows the superiority of high order methods
for smooth
2249<table align=
"center">
2251 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=
""></td>
2252 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=
""></td>
2256<a name=
"Convergencetables"></a><h4>Convergence tables</h4>
2259When the program is
run, it also outputs information about the respective
2260steps and convergence tables with errors in the various components in the
2261end. In 2D, the convergence tables look the following:
2264Q1 elements, adaptive refinement:
2265cells dofs val
L2 grad
L2 val
L2-post
2266 16 80 1.804e+01 2.207e+01 1.798e+01
2267 31 170 9.874e+00 1.322e+01 9.798e+00
2268 61 314 7.452e-01 3.793e+00 4.891e-01
2269 121 634 3.240e-01 1.511e+00 2.616e-01
2270 238 1198 8.585e-02 8.212e-01 1.808e-02
2271 454 2290 4.802e-02 5.178e-01 2.195e-02
2272 898 4378 2.561e-02 2.947e-01 4.318e-03
2273 1720 7864 1.306e-02 1.664e-01 2.978e-03
2274 3271 14638 7.025e-03 9.815e-02 1.075e-03
2275 6217 27214 4.119e-03 6.407e-02 9.975e-04
2277Q1 elements, global refinement:
2278cells dofs val
L2 grad
L2 val
L2-post
2279 16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2280 36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2281 64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2282 144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2283 256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2284 576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2285 1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2286 2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2287 4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2288 9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2290Q3 elements, global refinement:
2291cells dofs val
L2 grad
L2 val
L2-post
2292 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2293 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2294 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2295 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2296 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2297 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2298 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2299 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2300 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2301 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2305One can see the error
reduction upon grid refinement, and
for the cases where
2306global refinement was performed, also the convergence rates. The quadratic
2307convergence rates of Q1 elements in the @f$L_2@f$
norm for both the
scalar
2308variable and the
gradient variable is apparent, as is the cubic rate
for the
2309postprocessed
scalar variable in the @f$L_2@f$
norm. Note
this distinctive
2310feature of an HDG solution. In typical continuous finite elements, the
2311gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2312opposed to @f$p+1@f$
for the actual solution. Even though superconvergence
2313results
for finite elements are also available (
e.g. superconvergent patch
2314recovery
first introduced by Zienkiewicz and Zhu), these are typically limited
2315to structured meshes and other special cases. For Q3 HDG variables, the
scalar
2316variable and
gradient converge at fourth order and the postprocessed
scalar
2317variable at fifth order.
2319The same convergence rates are observed in 3
d.
2321Q1 elements, adaptive refinement:
2322cells dofs val
L2 grad
L2 val
L2-post
2323 8 144 7.122e+00 1.941e+01 6.102e+00
2324 29 500 3.309e+00 1.023e+01 2.145e+00
2325 113 1792 2.204e+00 1.023e+01 1.912e+00
2326 379 5732 6.085e-01 5.008e+00 2.233e-01
2327 1317 19412 1.543e-01 1.464e+00 4.196e-02
2328 4579 64768 5.058e-02 5.611e-01 9.521e-03
2329 14596 199552 2.129e-02 3.122e-01 4.569e-03
2330 46180 611400 1.033e-02 1.622e-01 1.684e-03
2331144859 1864212 5.007e-03 8.371e-02 7.364e-04
2332451060 5684508 2.518e-03 4.562e-02 3.070e-04
2334Q1 elements, global refinement:
2335cells dofs val
L2 grad
L2 val
L2-post
2336 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2337 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2338 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2339 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2340 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2341 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2342 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2343 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2344 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2345110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2347Q3 elements, global refinement:
2348cells dofs val
L2 grad
L2 val
L2-post
2349 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2350 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2351 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2352 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2353 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2354 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2355 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2356 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2357 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2358110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2361<a name=
"Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2364<a name=
"Resultsfor2D"></a><h4>Results
for 2D</h4>
2367The convergence tables verify the expected convergence rates stated in the
2368introduction. Now, we want to show a quick comparison of the computational
2369efficiency of the HDG method compared to a usual finite element (continuous
2370Galkerin) method on the problem of
this tutorial. Of course, stability aspects
2371of the HDG method compared to continuous finite elements
for
2372transport-dominated problems are also important in practice, which is an
2373aspect not seen on a problem with smooth analytic solution. In the picture
2374below, we compare the @f$L_2@f$ error as a function of the number of degrees of
2375freedom (left) and of the computing time spent in the linear solver (right)
2376for two space dimensions of continuous finite elements (CG) and the hybridized
2377discontinuous Galerkin method presented in
this tutorial. As opposed to the
2378tutorial where we only use unpreconditioned BiCGStab, the times shown in the
2379figures below use the Trilinos algebraic multigrid preconditioner in
2384<table align=
"center">
2386 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width=
"400" alt=
""></td>
2387 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width=
"400" alt=
""></td>
2391The results in the graphs show that the HDG method is slower than continuous
2392finite elements at @f$p=1@f$, about equally fast
for cubic elements and
2393faster
for sixth order elements. However, we have seen above that the HDG
2394method actually produces solutions which are more accurate than what is
2395represented in the original variables. Therefore, in the next two plots below
2396we instead display the error of the post-processed solution
for HDG (denoted
2397by @f$p=1^*@f$
for example). We now see a clear advantage of HDG
for the same
2398amount of work
for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2401<table align=
"center">
2403 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width=
"400" alt=
""></td>
2404 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width=
"400" alt=
""></td>
2408Since the HDG method actually produces results converging as
2409@f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2410solution with the same asymptotic convergence behavior, i.e.,
FE_Q with degree
2411@f$p+1@f$. If we
do this, we get the convergence curves below. We see that
2412CG with
second order polynomials is again clearly better than HDG with
2413linears. However, the advantage of HDG
for higher orders remains.
2415<table align=
"center">
2417 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width=
"400" alt=
""></td>
2418 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width=
"400" alt=
""></td>
2422The results are in line with properties of DG methods in
general: Best
2423performance is typically not achieved
for linear elements, but rather at
2424somewhat higher order, usually around @f$p=3@f$. This is because of a
2425volume-to-surface effect
for discontinuous solutions with too much of the
2426solution living on the surfaces and hence duplicating work when the elements
2427are linear. Put in other words, DG methods are often most efficient when used
2428at relatively high order, despite their focus on a discontinuous (and hence,
2429seemingly low accurate) representation of solutions.
2431<a name=
"Resultsfor3D"></a><h4>Results
for 3D</h4>
2434We now show the same figures in 3D: The
first row shows the number of degrees
2435of freedom and computing time versus the @f$L_2@f$ error in the
scalar variable
2436@f$u@f$
for CG and HDG at order @f$p@f$, the
second row shows the
2437post-processed HDG solution instead of the original one, and the third row
2438compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2439the
volume-to-surface effect makes the cost of HDG somewhat higher and the CG
2440solution is clearly better than HDG
for linears by any metric. For cubics, HDG
2441and CG are of similar quality, whereas HDG is again more efficient
for sixth
2442order polynomials. One can alternatively also use the combination of
FE_DGP
2444polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2445degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2446for FE_FaceP for a given mesh size, but the solution quality (error vs. number
2447of DoFs) is very similar to the results
for FE_FaceQ.
2449<table align=
"center">
2451 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width=
"400" alt=
""></td>
2452 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width=
"400" alt=
""></td>
2455 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width=
"400" alt=
""></td>
2456 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width=
"400" alt=
""></td>
2459 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width=
"400" alt=
""></td>
2460 <td><img src=
"https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width=
"400" alt=
""></td>
2464One
final note on the efficiency comparison: We tried to use
general-purpose
2465sparse
matrix structures and similar solvers (optimal AMG preconditioners
for
2466both without particular tuning of the AMG parameters on any of them) to give a
2467fair picture of the cost versus accuracy of two methods, on a toy example. It
2468should be noted however that geometric multigrid (GMG)
for continuous finite
2469elements is about a factor four to five faster
for @f$p=3@f$ and @f$p=6@f$. As of
24702019, optimal-complexity iterative solvers
for HDG are still under development
2471in the research community. Also, there are other implementation aspects
for CG
2472available such as fast
matrix-
free approaches as shown in @ref step_37
"step-37" that make
2473higher order continuous elements more competitive. Again, it is not clear to
2474the authors of the tutorial whether similar improvements could be made
for
2475HDG. We refer to <a href=
"https://dx.doi.org/10.1137/16M110455X">Kronbichler
2476and Wall (2018)</a>
for a recent efficiency evaluation.
2479<a name=
"Possibilitiesforimprovements"></a><h3>Possibilities
for improvements</h3>
2482As already mentioned in the introduction, one possibility is to implement
2483another post-processing technique as discussed in the literature.
2485A
second item that is not done optimally relates to the performance of
this
2486program, which is of course an issue in practical applications (weighing in
2487also the better solution quality of (H)DG methods
for transport-dominated
2488problems). Let us look at
2489the computing time of the tutorial program and the share of the individual
2492<table align=
"center" class=
"doxtable">
2499 <th>Trace reconstruct</th>
2500 <th>Post-processing</th>
2506 <th colspan=
"6">Relative share</th>
2509 <td align=
"left">2D, Q1, cycle 9, 37,248 dofs</td>
2510 <td align=
"center">5.34s</td>
2511 <td align=
"center">0.7%</td>
2512 <td align=
"center">1.2%</td>
2513 <td align=
"center">89.5%</td>
2514 <td align=
"center">0.9%</td>
2515 <td align=
"center">2.3%</td>
2516 <td align=
"center">5.4%</td>
2519 <td align=
"left">2D, Q3, cycle 9, 74,496 dofs</td>
2520 <td align=
"center">22.2s</td>
2521 <td align=
"center">0.4%</td>
2522 <td align=
"center">4.3%</td>
2523 <td align=
"center">84.1%</td>
2524 <td align=
"center">4.1%</td>
2525 <td align=
"center">3.5%</td>
2526 <td align=
"center">3.6%</td>
2529 <td align=
"left">3D, Q1, cycle 7, 172,800 dofs</td>
2530 <td align=
"center">9.06s</td>
2531 <td align=
"center">3.1%</td>
2532 <td align=
"center">8.9%</td>
2533 <td align=
"center">42.7%</td>
2534 <td align=
"center">7.0%</td>
2535 <td align=
"center">20.6%</td>
2536 <td align=
"center">17.7%</td>
2539 <td align=
"left">3D, Q3, cycle 7, 691,200 dofs</td>
2540 <td align=
"center">516s</td>
2541 <td align=
"center">0.6%</td>
2542 <td align=
"center">34.5%</td>
2543 <td align=
"center">13.4%</td>
2544 <td align=
"center">32.8%</td>
2545 <td align=
"center">17.1%</td>
2546 <td align=
"center">1.5%</td>
2550As can be seen from the table, the solver and assembly calls dominate the
2551runtime of the program. This also gives a clear indication of where
2552improvements would make the most sense:
2555 <li> Better linear solvers: We use a BiCGStab iterative solver without
2556 preconditioner, where the number of iteration increases with increasing
2557 problem size (the number of iterations
for Q1 elements and global
2558 refinements starts at 35
for the small sizes but increase up to 701
for the
2559 largest size). To
do better, one could
for example use an algebraic
2560 multigrid preconditioner from Trilinos, or some more advanced variants as
2561 the one discussed in <a
2562 href=
"https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2563 (2018)</a>. For diffusion-dominated problems such as the problem at hand
2564 with finer meshes, such a solver can be designed that uses the
matrix-vector
2566 long as we are not working in
parallel with MPI. For MPI-parallelized
2569 <li> Speed up assembly by pre-assembling parts that
do not change from one
2570 cell to another (those that
do neither contain variable coefficients nor
2571 mapping-dependent terms).
2575<a name=
"PlainProg"></a>
2576<h1> The plain program</h1>
2577@include
"step-51.cc"
void mmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
virtual value_type value(const Point< dim > &p) const
__global__ void reduction(Number *result, const Number *v, const size_type N)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
@ component_is_part_of_vector
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void set_dof_values(const DoFCellAccessor< dim, spacedim, lda > &cell, const Vector< number > &local_values, OutputVector &values, const bool perform_check)
static constexpr double PI
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)