Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-51.h
Go to the documentation of this file.
1  = 0) const override
599  * {
600  * double sum = 0;
601  * for (unsigned int i = 0; i < this->n_source_centers; ++i)
602  * {
603  * const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
604  * sum +=
605  * std::exp(-x_minus_xi.norm_square() / (this->width * this->width));
606  * }
607  *
608  * return sum /
609  * std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
610  * }
611  *
612  * virtual Tensor<1, dim>
613  * gradient(const Point<dim> &p,
614  * const unsigned int /*component*/ = 0) const override
615  * {
617  * for (unsigned int i = 0; i < this->n_source_centers; ++i)
618  * {
619  * const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
620  *
621  * sum +=
622  * (-2 / (this->width * this->width) *
623  * std::exp(-x_minus_xi.norm_square() / (this->width * this->width)) *
624  * x_minus_xi);
625  * }
626  *
627  * return sum /
628  * std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
629  * }
630  * };
631  *
632  *
633  *
634  * @endcode
635  *
636  * This class implements a function where the scalar solution and its negative
637  * gradient are collected together. This function is used when computing the
638  * error of the HDG approximation and its implementation is to simply call
639  * value and gradient function of the Solution class.
640  *
641  * @code
642  * template <int dim>
643  * class SolutionAndGradient : public Function<dim>, protected SolutionBase<dim>
644  * {
645  * public:
646  * SolutionAndGradient()
647  * : Function<dim>(dim + 1)
648  * {}
649  *
650  * virtual void vector_value(const Point<dim> &p,
651  * Vector<double> & v) const override
652  * {
653  * AssertDimension(v.size(), dim + 1);
654  * Solution<dim> solution;
655  * Tensor<1, dim> grad = solution.gradient(p);
656  * for (unsigned int d = 0; d < dim; ++d)
657  * v[d] = -grad[d];
658  * v[dim] = solution.value(p);
659  * }
660  * };
661  *
662  *
663  *
664  * @endcode
665  *
666  * Next comes the implementation of the convection velocity. As described in
667  * the introduction, we choose a velocity field that is @f$(y, -x)@f$ in 2D and
668  * @f$(y, -x, 1)@f$ in 3D. This gives a divergence-free velocity field.
669  *
670  * @code
671  * template <int dim>
672  * class ConvectionVelocity : public TensorFunction<1, dim>
673  * {
674  * public:
675  * ConvectionVelocity()
677  * {}
678  *
679  * virtual Tensor<1, dim> value(const Point<dim> &p) const override
680  * {
681  * Tensor<1, dim> convection;
682  * switch (dim)
683  * {
684  * case 1:
685  * convection[0] = 1;
686  * break;
687  * case 2:
688  * convection[0] = p[1];
689  * convection[1] = -p[0];
690  * break;
691  * case 3:
692  * convection[0] = p[1];
693  * convection[1] = -p[0];
694  * convection[2] = 1;
695  * break;
696  * default:
697  * Assert(false, ExcNotImplemented());
698  * }
699  * return convection;
700  * }
701  * };
702  *
703  *
704  *
705  * @endcode
706  *
707  * The last function we implement is the right hand side for the
708  * manufactured solution. It is very similar to @ref step_7 "step-7", with the exception
709  * that we now have a convection term instead of the reaction term. Since
710  * the velocity field is incompressible, i.e., @f$\nabla \cdot \mathbf{c} =
711  * 0@f$, the advection term simply reads @f$\mathbf{c} \nabla u@f$.
712  *
713  * @code
714  * template <int dim>
715  * class RightHandSide : public Function<dim>, protected SolutionBase<dim>
716  * {
717  * public:
718  * virtual double value(const Point<dim> &p,
719  * const unsigned int /*component*/ = 0) const override
720  * {
721  * ConvectionVelocity<dim> convection_velocity;
722  * Tensor<1, dim> convection = convection_velocity.value(p);
723  * double sum = 0;
724  * for (unsigned int i = 0; i < this->n_source_centers; ++i)
725  * {
726  * const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
727  *
728  * sum +=
729  * ((2 * dim - 2 * convection * x_minus_xi -
730  * 4 * x_minus_xi.norm_square() / (this->width * this->width)) /
731  * (this->width * this->width) *
732  * std::exp(-x_minus_xi.norm_square() / (this->width * this->width)));
733  * }
734  *
735  * return sum /
736  * std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
737  * }
738  * };
739  *
740  *
741  *
742  * @endcode
743  *
744  *
745  * <a name="TheHDGsolverclass"></a>
746  * <h3>The HDG solver class</h3>
747  *
748 
749  *
750  * The HDG solution procedure follows closely that of @ref step_7 "step-7". The major
751  * difference is the use of three different sets of DoFHandler and FE
752  * objects, along with the ChunkSparseMatrix and the corresponding solutions
753  * vectors. We also use WorkStream to enable a multithreaded local solution
754  * process which exploits the embarrassingly parallel nature of the local
755  * solver. For WorkStream, we define the local operations on a cell and a
756  * copy function into the global matrix and vector. We do this both for the
757  * assembly (which is run twice, once when we generate the system matrix and
758  * once when we compute the element-interior solutions from the skeleton
759  * values) and for the postprocessing where we extract a solution that
760  * converges at higher order.
761  *
762  * @code
763  * template <int dim>
764  * class HDG
765  * {
766  * public:
767  * enum RefinementMode
768  * {
769  * global_refinement,
770  * adaptive_refinement
771  * };
772  *
773  * HDG(const unsigned int degree, const RefinementMode refinement_mode);
774  * void run();
775  *
776  * private:
777  * void setup_system();
778  * void assemble_system(const bool reconstruct_trace = false);
779  * void solve();
780  * void postprocess();
781  * void refine_grid(const unsigned int cycle);
782  * void output_results(const unsigned int cycle);
783  *
784  * @endcode
785  *
786  * Data for the assembly and solution of the primal variables.
787  *
788  * @code
789  * struct PerTaskData;
790  * struct ScratchData;
791  *
792  * @endcode
793  *
794  * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
795  * procedure; as such, we do not need to assemble any global data and do
796  * not declare any 'task data' for WorkStream to use.
797  *
798  * @code
799  * struct PostProcessScratchData;
800  *
801  * @endcode
802  *
803  * The following three functions are used by WorkStream to do the actual
804  * work of the program.
805  *
806  * @code
807  * void assemble_system_one_cell(
808  * const typename DoFHandler<dim>::active_cell_iterator &cell,
809  * ScratchData & scratch,
810  * PerTaskData & task_data);
811  *
812  * void copy_local_to_global(const PerTaskData &data);
813  *
814  * void postprocess_one_cell(
815  * const typename DoFHandler<dim>::active_cell_iterator &cell,
816  * PostProcessScratchData & scratch,
817  * unsigned int & empty_data);
818  *
819  *
821  *
822  * @endcode
823  *
824  * The 'local' solutions are interior to each element. These
825  * represent the primal solution field @f$u@f$ as well as the auxiliary
826  * field @f$\mathbf{q}@f$.
827  *
828  * @code
829  * FESystem<dim> fe_local;
830  * DoFHandler<dim> dof_handler_local;
831  * Vector<double> solution_local;
832  *
833  * @endcode
834  *
835  * The new finite element type and corresponding <code>DoFHandler</code> are
836  * used for the global skeleton solution that couples the element-level
837  * local solutions.
838  *
839  * @code
840  * FE_FaceQ<dim> fe;
841  * DoFHandler<dim> dof_handler;
842  * Vector<double> solution;
843  * Vector<double> system_rhs;
844  *
845  * @endcode
846  *
847  * As stated in the introduction, HDG solutions can be post-processed to
848  * attain superconvergence rates of @f$\mathcal{O}(h^{p+2})@f$. The
849  * post-processed solution is a discontinuous finite element solution
850  * representing the primal variable on the interior of each cell. We define
851  * a FE type of degree @f$p+1@f$ to represent this post-processed solution,
852  * which we only use for output after constructing it.
853  *
854  * @code
855  * FE_DGQ<dim> fe_u_post;
856  * DoFHandler<dim> dof_handler_u_post;
857  * Vector<double> solution_u_post;
858  *
859  * @endcode
860  *
861  * The degrees of freedom corresponding to the skeleton strongly enforce
862  * Dirichlet boundary conditions, just as in a continuous Galerkin finite
863  * element method. We can enforce the boundary conditions in an analogous
864  * manner via an AffineConstraints object. In addition, hanging nodes are
865  * handled in the same way as for continuous finite elements: For the face
866  * elements which only define degrees of freedom on the face, this process
867  * sets the solution on the refined side to coincide with the
868  * representation on the coarse side.
869  *
870 
871  *
872  * Note that for HDG, the elimination of hanging nodes is not the only
873  * possibility &mdash; in terms of the HDG theory, one could also use the
874  * unknowns from the refined side and express the local solution on the
875  * coarse side through the trace values on the refined side. However, such
876  * a setup is not as easily implemented in terms of deal.II loops and not
877  * further analyzed.
878  *
879  * @code
880  * AffineConstraints<double> constraints;
881  *
882  * @endcode
883  *
884  * The usage of the ChunkSparseMatrix class is similar to the usual sparse
885  * matrices: You need a sparsity pattern of type ChunkSparsityPattern and
886  * the actual matrix object. When creating the sparsity pattern, we just
887  * have to additionally pass the size of local blocks.
888  *
889  * @code
890  * ChunkSparsityPattern sparsity_pattern;
891  * ChunkSparseMatrix<double> system_matrix;
892  *
893  * @endcode
894  *
895  * Same as @ref step_7 "step-7":
896  *
897  * @code
898  * const RefinementMode refinement_mode;
899  * ConvergenceTable convergence_table;
900  * };
901  *
902  * @endcode
903  *
904  *
905  * <a name="TheHDGclassimplementation"></a>
906  * <h3>The HDG class implementation</h3>
907  *
908 
909  *
910  *
911  * <a name="Constructor"></a>
912  * <h4>Constructor</h4>
913  * The constructor is similar to those in other examples, with the exception
914  * of handling multiple DoFHandler and FiniteElement objects. Note that we
915  * create a system of finite elements for the local DG part, including the
916  * gradient/flux part and the scalar part.
917  *
918  * @code
919  * template <int dim>
920  * HDG<dim>::HDG(const unsigned int degree, const RefinementMode refinement_mode)
921  * : fe_local(FE_DGQ<dim>(degree), dim, FE_DGQ<dim>(degree), 1)
922  * , dof_handler_local(triangulation)
923  * , fe(degree)
924  * , dof_handler(triangulation)
925  * , fe_u_post(degree + 1)
926  * , dof_handler_u_post(triangulation)
927  * , refinement_mode(refinement_mode)
928  * {}
929  *
930  *
931  *
932  * @endcode
933  *
934  *
935  * <a name="HDGsetup_system"></a>
936  * <h4>HDG::setup_system</h4>
937  * The system for an HDG solution is setup in an analogous manner to most
938  * of the other tutorial programs. We are careful to distribute dofs with
939  * all of our DoFHandler objects. The @p solution and @p system_matrix
940  * objects go with the global skeleton solution.
941  *
942  * @code
943  * template <int dim>
944  * void HDG<dim>::setup_system()
945  * {
946  * dof_handler_local.distribute_dofs(fe_local);
947  * dof_handler.distribute_dofs(fe);
948  * dof_handler_u_post.distribute_dofs(fe_u_post);
949  *
950  * std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
951  * << std::endl;
952  *
953  * solution.reinit(dof_handler.n_dofs());
954  * system_rhs.reinit(dof_handler.n_dofs());
955  *
956  * solution_local.reinit(dof_handler_local.n_dofs());
957  * solution_u_post.reinit(dof_handler_u_post.n_dofs());
958  *
959  * constraints.clear();
960  * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
961  * std::map<types::boundary_id, const Function<dim> *> boundary_functions;
962  * Solution<dim> solution_function;
963  * boundary_functions[0] = &solution_function;
965  * boundary_functions,
966  * QGauss<dim - 1>(fe.degree + 1),
967  * constraints);
968  * constraints.close();
969  *
970  * @endcode
971  *
972  * When creating the chunk sparsity pattern, we first create the usual
973  * dynamic sparsity pattern and then set the chunk size, which is equal
974  * to the number of dofs on a face, when copying this into the final
975  * sparsity pattern.
976  *
977  * @code
978  * {
979  * DynamicSparsityPattern dsp(dof_handler.n_dofs());
980  * DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
981  * sparsity_pattern.copy_from(dsp, fe.dofs_per_face);
982  * }
983  * system_matrix.reinit(sparsity_pattern);
984  * }
985  *
986  *
987  *
988  * @endcode
989  *
990  *
991  * <a name="HDGPerTaskData"></a>
992  * <h4>HDG::PerTaskData</h4>
993  * Next comes the definition of the local data structures for the parallel
994  * assembly. The first structure @p PerTaskData contains the local vector
995  * and matrix that are written into the global matrix, whereas the
996  * ScratchData contains all data that we need for the local assembly. There
997  * is one variable worth noting here, namely the boolean variable @p
998  * trace_reconstruct. As mentioned in the introduction, we solve the HDG
999  * system in two steps. First, we create a linear system for the skeleton
1000  * system where we condense the local part into it via the Schur complement
1001  * @f$D-CA^{-1}B@f$. Then, we solve for the local part using the skeleton
1002  * solution. For these two steps, we need the same matrices on the elements
1003  * twice, which we want to compute by two assembly steps. Since most of the
1004  * code is similar, we do this with the same function but only switch
1005  * between the two based on a flag that we set when starting the
1006  * assembly. Since we need to pass this information on to the local worker
1007  * routines, we store it once in the task data.
1008  *
1009  * @code
1010  * template <int dim>
1011  * struct HDG<dim>::PerTaskData
1012  * {
1014  * Vector<double> cell_vector;
1015  * std::vector<types::global_dof_index> dof_indices;
1016  *
1017  * bool trace_reconstruct;
1018  *
1019  * PerTaskData(const unsigned int n_dofs, const bool trace_reconstruct)
1020  * : cell_matrix(n_dofs, n_dofs)
1021  * , cell_vector(n_dofs)
1022  * , dof_indices(n_dofs)
1023  * , trace_reconstruct(trace_reconstruct)
1024  * {}
1025  * };
1026  *
1027  *
1028  *
1029  * @endcode
1030  *
1031  *
1032  * <a name="HDGScratchData"></a>
1033  * <h4>HDG::ScratchData</h4>
1034  * @p ScratchData contains persistent data for each
1035  * thread within WorkStream. The FEValues, matrix,
1036  * and vector objects should be familiar by now. There are two objects that
1037  * need to be discussed: `std::vector<std::vector<unsigned int> >
1038  * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1039  * fe_support_on_face`. These are used to indicate whether or not the finite
1040  * elements chosen have support (non-zero values) on a given face of the
1041  * reference cell for the local part associated to @p fe_local and the
1042  * skeleton part @p fe. We extract this information in the
1043  * constructor and store it once for all cells that we work on. Had we not
1044  * stored this information, we would be forced to assemble a large number of
1045  * zero terms on each cell, which would significantly slow the program.
1046  *
1047  * @code
1048  * template <int dim>
1049  * struct HDG<dim>::ScratchData
1050  * {
1051  * FEValues<dim> fe_values_local;
1052  * FEFaceValues<dim> fe_face_values_local;
1053  * FEFaceValues<dim> fe_face_values;
1054  *
1055  * FullMatrix<double> ll_matrix;
1056  * FullMatrix<double> lf_matrix;
1057  * FullMatrix<double> fl_matrix;
1058  * FullMatrix<double> tmp_matrix;
1059  * Vector<double> l_rhs;
1060  * Vector<double> tmp_rhs;
1061  *
1062  * std::vector<Tensor<1, dim>> q_phi;
1063  * std::vector<double> q_phi_div;
1064  * std::vector<double> u_phi;
1065  * std::vector<Tensor<1, dim>> u_phi_grad;
1066  * std::vector<double> tr_phi;
1067  * std::vector<double> trace_values;
1068  *
1069  * std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1070  * std::vector<std::vector<unsigned int>> fe_support_on_face;
1071  *
1072  * ConvectionVelocity<dim> convection_velocity;
1073  * RightHandSide<dim> right_hand_side;
1074  * const Solution<dim> exact_solution;
1075  *
1076  * ScratchData(const FiniteElement<dim> &fe,
1077  * const FiniteElement<dim> &fe_local,
1078  * const QGauss<dim> & quadrature_formula,
1079  * const QGauss<dim - 1> & face_quadrature_formula,
1080  * const UpdateFlags local_flags,
1081  * const UpdateFlags local_face_flags,
1082  * const UpdateFlags flags)
1083  * : fe_values_local(fe_local, quadrature_formula, local_flags)
1084  * , fe_face_values_local(fe_local,
1085  * face_quadrature_formula,
1086  * local_face_flags)
1087  * , fe_face_values(fe, face_quadrature_formula, flags)
1088  * , ll_matrix(fe_local.dofs_per_cell, fe_local.dofs_per_cell)
1089  * , lf_matrix(fe_local.dofs_per_cell, fe.dofs_per_cell)
1090  * , fl_matrix(fe.dofs_per_cell, fe_local.dofs_per_cell)
1091  * , tmp_matrix(fe.dofs_per_cell, fe_local.dofs_per_cell)
1092  * , l_rhs(fe_local.dofs_per_cell)
1093  * , tmp_rhs(fe_local.dofs_per_cell)
1094  * , q_phi(fe_local.dofs_per_cell)
1095  * , q_phi_div(fe_local.dofs_per_cell)
1096  * , u_phi(fe_local.dofs_per_cell)
1097  * , u_phi_grad(fe_local.dofs_per_cell)
1098  * , tr_phi(fe.dofs_per_cell)
1099  * , trace_values(face_quadrature_formula.size())
1100  * , fe_local_support_on_face(GeometryInfo<dim>::faces_per_cell)
1101  * , fe_support_on_face(GeometryInfo<dim>::faces_per_cell)
1102  * , exact_solution()
1103  * {
1104  * for (unsigned int face_no : GeometryInfo<dim>::face_indices())
1105  * for (unsigned int i = 0; i < fe_local.dofs_per_cell; ++i)
1106  * {
1107  * if (fe_local.has_support_on_face(i, face_no))
1108  * fe_local_support_on_face[face_no].push_back(i);
1109  * }
1110  *
1111  * for (unsigned int face_no : GeometryInfo<dim>::face_indices())
1112  * for (unsigned int i = 0; i < fe.dofs_per_cell; ++i)
1113  * {
1114  * if (fe.has_support_on_face(i, face_no))
1115  * fe_support_on_face[face_no].push_back(i);
1116  * }
1117  * }
1118  *
1119  * ScratchData(const ScratchData &sd)
1120  * : fe_values_local(sd.fe_values_local.get_fe(),
1121  * sd.fe_values_local.get_quadrature(),
1122  * sd.fe_values_local.get_update_flags())
1123  * , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1124  * sd.fe_face_values_local.get_quadrature(),
1125  * sd.fe_face_values_local.get_update_flags())
1126  * , fe_face_values(sd.fe_face_values.get_fe(),
1127  * sd.fe_face_values.get_quadrature(),
1128  * sd.fe_face_values.get_update_flags())
1129  * , ll_matrix(sd.ll_matrix)
1130  * , lf_matrix(sd.lf_matrix)
1131  * , fl_matrix(sd.fl_matrix)
1132  * , tmp_matrix(sd.tmp_matrix)
1133  * , l_rhs(sd.l_rhs)
1134  * , tmp_rhs(sd.tmp_rhs)
1135  * , q_phi(sd.q_phi)
1136  * , q_phi_div(sd.q_phi_div)
1137  * , u_phi(sd.u_phi)
1138  * , u_phi_grad(sd.u_phi_grad)
1139  * , tr_phi(sd.tr_phi)
1140  * , trace_values(sd.trace_values)
1141  * , fe_local_support_on_face(sd.fe_local_support_on_face)
1142  * , fe_support_on_face(sd.fe_support_on_face)
1143  * , exact_solution()
1144  * {}
1145  * };
1146  *
1147  *
1148  *
1149  * @endcode
1150  *
1151  *
1152  * <a name="HDGPostProcessScratchData"></a>
1153  * <h4>HDG::PostProcessScratchData</h4>
1154  * @p PostProcessScratchData contains the data used by WorkStream
1155  * when post-processing the local solution @f$u^*@f$. It is similar, but much
1156  * simpler, than @p ScratchData.
1157  *
1158  * @code
1159  * template <int dim>
1160  * struct HDG<dim>::PostProcessScratchData
1161  * {
1162  * FEValues<dim> fe_values_local;
1163  * FEValues<dim> fe_values;
1164  *
1165  * std::vector<double> u_values;
1166  * std::vector<Tensor<1, dim>> u_gradients;
1168  *
1169  * Vector<double> cell_rhs;
1170  * Vector<double> cell_sol;
1171  *
1172  * PostProcessScratchData(const FiniteElement<dim> &fe,
1173  * const FiniteElement<dim> &fe_local,
1174  * const QGauss<dim> & quadrature_formula,
1175  * const UpdateFlags local_flags,
1176  * const UpdateFlags flags)
1177  * : fe_values_local(fe_local, quadrature_formula, local_flags)
1178  * , fe_values(fe, quadrature_formula, flags)
1179  * , u_values(quadrature_formula.size())
1180  * , u_gradients(quadrature_formula.size())
1181  * , cell_matrix(fe.dofs_per_cell, fe.dofs_per_cell)
1182  * , cell_rhs(fe.dofs_per_cell)
1183  * , cell_sol(fe.dofs_per_cell)
1184  * {}
1185  *
1186  * PostProcessScratchData(const PostProcessScratchData &sd)
1187  * : fe_values_local(sd.fe_values_local.get_fe(),
1188  * sd.fe_values_local.get_quadrature(),
1189  * sd.fe_values_local.get_update_flags())
1190  * , fe_values(sd.fe_values.get_fe(),
1191  * sd.fe_values.get_quadrature(),
1192  * sd.fe_values.get_update_flags())
1193  * , u_values(sd.u_values)
1194  * , u_gradients(sd.u_gradients)
1195  * , cell_matrix(sd.cell_matrix)
1196  * , cell_rhs(sd.cell_rhs)
1197  * , cell_sol(sd.cell_sol)
1198  * {}
1199  * };
1200  *
1201  *
1202  *
1203  * @endcode
1204  *
1205  *
1206  * <a name="HDGassemble_system"></a>
1207  * <h4>HDG::assemble_system</h4>
1208  * The @p assemble_system function is similar to the one on @ref step_32 "step-32", where
1209  * the quadrature formula and the update flags are set up, and then
1210  * <code>WorkStream</code> is used to do the work in a multi-threaded
1211  * manner. The @p trace_reconstruct input parameter is used to decide
1212  * whether we are solving for the global skeleton solution (false) or the
1213  * local solution (true).
1214  *
1215 
1216  *
1217  * One thing worth noting for the multi-threaded execution of assembly is
1218  * the fact that the local computations in `assemble_system_one_cell()` call
1219  * into BLAS and LAPACK functions if those are available in deal.II. Thus,
1220  * the underlying BLAS/LAPACK library must support calls from multiple
1221  * threads at the same time. Most implementations do support this, but some
1222  * libraries need to be built in a specific way to avoid problems. For
1223  * example, OpenBLAS compiled without multithreading inside the BLAS/LAPACK
1224  * calls needs to built with a flag called `USE_LOCKING` set to true.
1225  *
1226  * @code
1227  * template <int dim>
1228  * void HDG<dim>::assemble_system(const bool trace_reconstruct)
1229  * {
1230  * const QGauss<dim> quadrature_formula(fe.degree + 1);
1231  * const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1232  *
1233  * const UpdateFlags local_flags(update_values | update_gradients |
1235  *
1236  * const UpdateFlags local_face_flags(update_values);
1237  *
1240  *
1241  * PerTaskData task_data(fe.dofs_per_cell, trace_reconstruct);
1242  * ScratchData scratch(fe,
1243  * fe_local,
1244  * quadrature_formula,
1245  * face_quadrature_formula,
1246  * local_flags,
1247  * local_face_flags,
1248  * flags);
1249  *
1250  * WorkStream::run(dof_handler.begin_active(),
1251  * dof_handler.end(),
1252  * *this,
1253  * &HDG<dim>::assemble_system_one_cell,
1254  * &HDG<dim>::copy_local_to_global,
1255  * scratch,
1256  * task_data);
1257  * }
1258  *
1259  *
1260  *
1261  * @endcode
1262  *
1263  *
1264  * <a name="HDGassemble_system_one_cell"></a>
1265  * <h4>HDG::assemble_system_one_cell</h4>
1266  * The real work of the HDG program is done by @p assemble_system_one_cell.
1267  * Assembling the local matrices @f$A, B, C@f$ is done here, along with the
1268  * local contributions of the global matrix @f$D@f$.
1269  *
1270  * @code
1271  * template <int dim>
1272  * void HDG<dim>::assemble_system_one_cell(
1273  * const typename DoFHandler<dim>::active_cell_iterator &cell,
1274  * ScratchData & scratch,
1275  * PerTaskData & task_data)
1276  * {
1277  * @endcode
1278  *
1279  * Construct iterator for dof_handler_local for FEValues reinit function.
1280  *
1281  * @code
1283  * cell->level(),
1284  * cell->index(),
1285  * &dof_handler_local);
1286  *
1287  * const unsigned int n_q_points =
1288  * scratch.fe_values_local.get_quadrature().size();
1289  * const unsigned int n_face_q_points =
1290  * scratch.fe_face_values_local.get_quadrature().size();
1291  *
1292  * const unsigned int loc_dofs_per_cell =
1293  * scratch.fe_values_local.get_fe().dofs_per_cell;
1294  *
1295  * const FEValuesExtractors::Vector fluxes(0);
1296  * const FEValuesExtractors::Scalar scalar(dim);
1297  *
1298  * scratch.ll_matrix = 0;
1299  * scratch.l_rhs = 0;
1300  * if (!task_data.trace_reconstruct)
1301  * {
1302  * scratch.lf_matrix = 0;
1303  * scratch.fl_matrix = 0;
1304  * task_data.cell_matrix = 0;
1305  * task_data.cell_vector = 0;
1306  * }
1307  * scratch.fe_values_local.reinit(loc_cell);
1308  *
1309  * @endcode
1310  *
1311  * We first compute the cell-interior contribution to @p ll_matrix matrix
1312  * (referred to as matrix @f$A@f$ in the introduction) corresponding to
1313  * local-local coupling, as well as the local right-hand-side vector. We
1314  * store the values at each quadrature point for the basis functions, the
1315  * right-hand-side value, and the convection velocity, in order to have
1316  * quick access to these fields.
1317  *
1318  * @code
1319  * for (unsigned int q = 0; q < n_q_points; ++q)
1320  * {
1321  * const double rhs_value = scratch.right_hand_side.value(
1322  * scratch.fe_values_local.quadrature_point(q));
1323  * const Tensor<1, dim> convection = scratch.convection_velocity.value(
1324  * scratch.fe_values_local.quadrature_point(q));
1325  * const double JxW = scratch.fe_values_local.JxW(q);
1326  * for (unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1327  * {
1328  * scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1329  * scratch.q_phi_div[k] =
1330  * scratch.fe_values_local[fluxes].divergence(k, q);
1331  * scratch.u_phi[k] = scratch.fe_values_local[scalar].value(k, q);
1332  * scratch.u_phi_grad[k] =
1333  * scratch.fe_values_local[scalar].gradient(k, q);
1334  * }
1335  * for (unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1336  * {
1337  * for (unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1338  * scratch.ll_matrix(i, j) +=
1339  * (scratch.q_phi[i] * scratch.q_phi[j] -
1340  * scratch.q_phi_div[i] * scratch.u_phi[j] +
1341  * scratch.u_phi[i] * scratch.q_phi_div[j] -
1342  * (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1343  * JxW;
1344  * scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1345  * }
1346  * }
1347  *
1348  * @endcode
1349  *
1350  * Face terms are assembled on all faces of all elements. This is in
1351  * contrast to more traditional DG methods, where each face is only visited
1352  * once in the assembly procedure.
1353  *
1354  * @code
1355  * for (unsigned int face_no : GeometryInfo<dim>::face_indices())
1356  * {
1357  * scratch.fe_face_values_local.reinit(loc_cell, face_no);
1358  * scratch.fe_face_values.reinit(cell, face_no);
1359  *
1360  * @endcode
1361  *
1362  * The already obtained @f$\hat{u}@f$ values are needed when solving for the
1363  * local variables.
1364  *
1365  * @code
1366  * if (task_data.trace_reconstruct)
1367  * scratch.fe_face_values.get_function_values(solution,
1368  * scratch.trace_values);
1369  *
1370  * for (unsigned int q = 0; q < n_face_q_points; ++q)
1371  * {
1372  * const double JxW = scratch.fe_face_values.JxW(q);
1373  * const Point<dim> quadrature_point =
1374  * scratch.fe_face_values.quadrature_point(q);
1375  * const Tensor<1, dim> normal =
1376  * scratch.fe_face_values.normal_vector(q);
1377  * const Tensor<1, dim> convection =
1378  * scratch.convection_velocity.value(quadrature_point);
1379  *
1380  * @endcode
1381  *
1382  * Here we compute the stabilization parameter discussed in the
1383  * introduction: since the diffusion is one and the diffusion
1384  * length scale is set to 1/5, it simply results in a contribution
1385  * of 5 for the diffusion part and the magnitude of convection
1386  * through the element boundary in a centered scheme for the
1387  * convection part.
1388  *
1389  * @code
1390  * const double tau_stab = (5. + std::abs(convection * normal));
1391  *
1392  * @endcode
1393  *
1394  * We store the non-zero flux and scalar values, making use of the
1395  * support_on_face information we created in @p ScratchData.
1396  *
1397  * @code
1398  * for (unsigned int k = 0;
1399  * k < scratch.fe_local_support_on_face[face_no].size();
1400  * ++k)
1401  * {
1402  * const unsigned int kk =
1403  * scratch.fe_local_support_on_face[face_no][k];
1404  * scratch.q_phi[k] =
1405  * scratch.fe_face_values_local[fluxes].value(kk, q);
1406  * scratch.u_phi[k] =
1407  * scratch.fe_face_values_local[scalar].value(kk, q);
1408  * }
1409  *
1410  * @endcode
1411  *
1412  * When @p trace_reconstruct=false, we are preparing to assemble the
1413  * system for the skeleton variable @f$\hat{u}@f$. If this is the case,
1414  * we must assemble all local matrices associated with the problem:
1415  * local-local, local-face, face-local, and face-face. The
1416  * face-face matrix is stored as @p TaskData::cell_matrix, so that
1417  * it can be assembled into the global system by @p
1418  * copy_local_to_global.
1419  *
1420  * @code
1421  * if (!task_data.trace_reconstruct)
1422  * {
1423  * for (unsigned int k = 0;
1424  * k < scratch.fe_support_on_face[face_no].size();
1425  * ++k)
1426  * scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1427  * scratch.fe_support_on_face[face_no][k], q);
1428  * for (unsigned int i = 0;
1429  * i < scratch.fe_local_support_on_face[face_no].size();
1430  * ++i)
1431  * for (unsigned int j = 0;
1432  * j < scratch.fe_support_on_face[face_no].size();
1433  * ++j)
1434  * {
1435  * const unsigned int ii =
1436  * scratch.fe_local_support_on_face[face_no][i];
1437  * const unsigned int jj =
1438  * scratch.fe_support_on_face[face_no][j];
1439  * scratch.lf_matrix(ii, jj) +=
1440  * ((scratch.q_phi[i] * normal +
1441  * (convection * normal - tau_stab) * scratch.u_phi[i]) *
1442  * scratch.tr_phi[j]) *
1443  * JxW;
1444  *
1445  * @endcode
1446  *
1447  * Note the sign of the face_no-local matrix. We negate
1448  * the sign during assembly here so that we can use the
1449  * FullMatrix::mmult with addition when computing the
1450  * Schur complement.
1451  *
1452  * @code
1453  * scratch.fl_matrix(jj, ii) -=
1454  * ((scratch.q_phi[i] * normal +
1455  * tau_stab * scratch.u_phi[i]) *
1456  * scratch.tr_phi[j]) *
1457  * JxW;
1458  * }
1459  *
1460  * for (unsigned int i = 0;
1461  * i < scratch.fe_support_on_face[face_no].size();
1462  * ++i)
1463  * for (unsigned int j = 0;
1464  * j < scratch.fe_support_on_face[face_no].size();
1465  * ++j)
1466  * {
1467  * const unsigned int ii =
1468  * scratch.fe_support_on_face[face_no][i];
1469  * const unsigned int jj =
1470  * scratch.fe_support_on_face[face_no][j];
1471  * task_data.cell_matrix(ii, jj) +=
1472  * ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1473  * scratch.tr_phi[j]) *
1474  * JxW;
1475  * }
1476  *
1477  * if (cell->face(face_no)->at_boundary() &&
1478  * (cell->face(face_no)->boundary_id() == 1))
1479  * {
1480  * const double neumann_value =
1481  * -scratch.exact_solution.gradient(quadrature_point) *
1482  * normal +
1483  * convection * normal *
1484  * scratch.exact_solution.value(quadrature_point);
1485  * for (unsigned int i = 0;
1486  * i < scratch.fe_support_on_face[face_no].size();
1487  * ++i)
1488  * {
1489  * const unsigned int ii =
1490  * scratch.fe_support_on_face[face_no][i];
1491  * task_data.cell_vector(ii) +=
1492  * scratch.tr_phi[i] * neumann_value * JxW;
1493  * }
1494  * }
1495  * }
1496  *
1497  * @endcode
1498  *
1499  * This last term adds the contribution of the term @f$\left<w,\tau
1500  * u_h\right>_{\partial \mathcal T}@f$ to the local matrix. As opposed
1501  * to the face matrices above, we need it in both assembly stages.
1502  *
1503  * @code
1504  * for (unsigned int i = 0;
1505  * i < scratch.fe_local_support_on_face[face_no].size();
1506  * ++i)
1507  * for (unsigned int j = 0;
1508  * j < scratch.fe_local_support_on_face[face_no].size();
1509  * ++j)
1510  * {
1511  * const unsigned int ii =
1512  * scratch.fe_local_support_on_face[face_no][i];
1513  * const unsigned int jj =
1514  * scratch.fe_local_support_on_face[face_no][j];
1515  * scratch.ll_matrix(ii, jj) +=
1516  * tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1517  * }
1518  *
1519  * @endcode
1520  *
1521  * When @p trace_reconstruct=true, we are solving for the local
1522  * solutions on an element by element basis. The local
1523  * right-hand-side is calculated by replacing the basis functions @p
1524  * tr_phi in the @p lf_matrix computation by the computed values @p
1525  * trace_values. Of course, the sign of the matrix is now minus
1526  * since we have moved everything to the other side of the equation.
1527  *
1528  * @code
1529  * if (task_data.trace_reconstruct)
1530  * for (unsigned int i = 0;
1531  * i < scratch.fe_local_support_on_face[face_no].size();
1532  * ++i)
1533  * {
1534  * const unsigned int ii =
1535  * scratch.fe_local_support_on_face[face_no][i];
1536  * scratch.l_rhs(ii) -=
1537  * (scratch.q_phi[i] * normal +
1538  * scratch.u_phi[i] * (convection * normal - tau_stab)) *
1539  * scratch.trace_values[q] * JxW;
1540  * }
1541  * }
1542  * }
1543  *
1544  * @endcode
1545  *
1546  * Once assembly of all of the local contributions is complete, we must
1547  * either: (1) assemble the global system, or (2) compute the local solution
1548  * values and save them. In either case, the first step is to invert the
1549  * local-local matrix.
1550  *
1551  * @code
1552  * scratch.ll_matrix.gauss_jordan();
1553  *
1554  * @endcode
1555  *
1556  * For (1), we compute the Schur complement and add it to the @p
1557  * cell_matrix, matrix @f$D@f$ in the introduction.
1558  *
1559  * @code
1560  * if (task_data.trace_reconstruct == false)
1561  * {
1562  * scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1563  * scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1564  * scratch.tmp_matrix.mmult(task_data.cell_matrix,
1565  * scratch.lf_matrix,
1566  * true);
1567  * cell->get_dof_indices(task_data.dof_indices);
1568  * }
1569  * @endcode
1570  *
1571  * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1572  * Hence, we multiply @p l_rhs by our already inverted local-local matrix
1573  * and store the result using the <code>set_dof_values</code> function.
1574  *
1575  * @code
1576  * else
1577  * {
1578  * scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1579  * loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1580  * }
1581  * }
1582  *
1583  *
1584  *
1585  * @endcode
1586  *
1587  *
1588  * <a name="HDGcopy_local_to_global"></a>
1589  * <h4>HDG::copy_local_to_global</h4>
1590  * If we are in the first step of the solution, i.e. @p trace_reconstruct=false,
1591  * then we assemble the local matrices into the global system.
1592  *
1593  * @code
1594  * template <int dim>
1595  * void HDG<dim>::copy_local_to_global(const PerTaskData &data)
1596  * {
1597  * if (data.trace_reconstruct == false)
1598  * constraints.distribute_local_to_global(data.cell_matrix,
1599  * data.cell_vector,
1600  * data.dof_indices,
1601  * system_matrix,
1602  * system_rhs);
1603  * }
1604  *
1605  *
1606  *
1607  * @endcode
1608  *
1609  *
1610  * <a name="HDGsolve"></a>
1611  * <h4>HDG::solve</h4>
1612  * The skeleton solution is solved for by using a BiCGStab solver with
1613  * identity preconditioner.
1614  *
1615  * @code
1616  * template <int dim>
1617  * void HDG<dim>::solve()
1618  * {
1619  * SolverControl solver_control(system_matrix.m() * 10,
1620  * 1e-11 * system_rhs.l2_norm());
1621  * SolverBicgstab<Vector<double>> solver(solver_control);
1622  * solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
1623  *
1624  * std::cout << " Number of BiCGStab iterations: "
1625  * << solver_control.last_step() << std::endl;
1626  *
1627  * system_matrix.clear();
1628  * sparsity_pattern.reinit(0, 0, 0, 1);
1629  *
1630  * constraints.distribute(solution);
1631  *
1632  * @endcode
1633  *
1634  * Once we have solved for the skeleton solution,
1635  * we can solve for the local solutions in an element-by-element
1636  * fashion. We do this by re-using the same @p assemble_system function
1637  * but switching @p trace_reconstruct to true.
1638  *
1639  * @code
1640  * assemble_system(true);
1641  * }
1642  *
1643  *
1644  *
1645  * @endcode
1646  *
1647  *
1648  * <a name="HDGpostprocess"></a>
1649  * <h4>HDG::postprocess</h4>
1650  *
1651 
1652  *
1653  * The postprocess method serves two purposes. First, we want to construct a
1654  * post-processed scalar variables in the element space of degree @f$p+1@f$ that
1655  * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1656  * process and only involves the scalar solution as well as the gradient on
1657  * the local cell. To do this, we introduce the already defined scratch data
1658  * together with some update flags and run the work stream to do this in
1659  * parallel.
1660  *
1661 
1662  *
1663  * Secondly, we want to compute discretization errors just as we did in
1664  * @ref step_7 "step-7". The overall procedure is similar with calls to
1665  * VectorTools::integrate_difference. The difference is in how we compute
1666  * the errors for the scalar variable and the gradient variable. In @ref step_7 "step-7",
1667  * we did this by computing @p L2_norm or @p H1_seminorm
1668  * contributions. Here, we have a DoFHandler with these two contributions
1669  * computed and sorted by their vector component, <code>[0, dim)</code> for
1670  * the
1671  * gradient and @p dim for the scalar. To compute their value, we hence use
1672  * a ComponentSelectFunction with either of them, together with the @p
1673  * SolutionAndGradient class introduced above that contains the analytic
1674  * parts of either of them. Eventually, we also compute the L2-error of the
1675  * post-processed solution and add the results into the convergence table.
1676  *
1677  * @code
1678  * template <int dim>
1679  * void HDG<dim>::postprocess()
1680  * {
1681  * {
1682  * const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1683  * const UpdateFlags local_flags(update_values);
1684  * const UpdateFlags flags(update_values | update_gradients |
1685  * update_JxW_values);
1686  *
1687  * PostProcessScratchData scratch(
1688  * fe_u_post, fe_local, quadrature_formula, local_flags, flags);
1689  *
1690  * WorkStream::run(
1691  * dof_handler_u_post.begin_active(),
1692  * dof_handler_u_post.end(),
1693  * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
1694  * PostProcessScratchData & scratch,
1695  * unsigned int & data) {
1696  * this->postprocess_one_cell(cell, scratch, data);
1697  * },
1698  * std::function<void(const unsigned int &)>(),
1699  * scratch,
1700  * 0U);
1701  * }
1702  *
1703  * Vector<float> difference_per_cell(triangulation.n_active_cells());
1704  *
1705  * ComponentSelectFunction<dim> value_select(dim, dim + 1);
1706  * VectorTools::integrate_difference(dof_handler_local,
1707  * solution_local,
1708  * SolutionAndGradient<dim>(),
1709  * difference_per_cell,
1710  * QGauss<dim>(fe.degree + 2),
1712  * &value_select);
1713  * const double L2_error =
1715  * difference_per_cell,
1717  *
1718  * ComponentSelectFunction<dim> gradient_select(
1719  * std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1720  * VectorTools::integrate_difference(dof_handler_local,
1721  * solution_local,
1722  * SolutionAndGradient<dim>(),
1723  * difference_per_cell,
1724  * QGauss<dim>(fe.degree + 2),
1726  * &gradient_select);
1727  * const double grad_error =
1729  * difference_per_cell,
1731  *
1732  * VectorTools::integrate_difference(dof_handler_u_post,
1733  * solution_u_post,
1734  * Solution<dim>(),
1735  * difference_per_cell,
1736  * QGauss<dim>(fe.degree + 3),
1738  * const double post_error =
1740  * difference_per_cell,
1742  *
1743  * convergence_table.add_value("cells", triangulation.n_active_cells());
1744  * convergence_table.add_value("dofs", dof_handler.n_dofs());
1745  *
1746  * convergence_table.add_value("val L2", L2_error);
1747  * convergence_table.set_scientific("val L2", true);
1748  * convergence_table.set_precision("val L2", 3);
1749  *
1750  * convergence_table.add_value("grad L2", grad_error);
1751  * convergence_table.set_scientific("grad L2", true);
1752  * convergence_table.set_precision("grad L2", 3);
1753  *
1754  * convergence_table.add_value("val L2-post", post_error);
1755  * convergence_table.set_scientific("val L2-post", true);
1756  * convergence_table.set_precision("val L2-post", 3);
1757  * }
1758  *
1759  *
1760  *
1761  * @endcode
1762  *
1763  *
1764  * <a name="HDGpostprocess_one_cell"></a>
1765  * <h4>HDG::postprocess_one_cell</h4>
1766  *
1767 
1768  *
1769  * This is the actual work done for the postprocessing. According to the
1770  * discussion in the introduction, we need to set up a system that projects
1771  * the gradient part of the DG solution onto the gradient of the
1772  * post-processed variable. Moreover, we need to set the average of the new
1773  * post-processed variable to equal the average of the scalar DG solution
1774  * on the cell.
1775  *
1776 
1777  *
1778  * More technically speaking, the projection of the gradient is a system
1779  * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1780  * matrix but is singular (the sum of all rows would be zero because the
1781  * constant function has zero gradient). Therefore, we take one row away and
1782  * use it for imposing the average of the scalar value. We pick the first
1783  * row for the scalar part, even though we could pick any row for @f$\mathcal
1784  * Q_{-p}@f$ elements. However, had we used FE_DGP elements instead, the first
1785  * row would correspond to the constant part already and deleting e.g. the
1786  * last row would give us a singular system. This way, our program can also
1787  * be used for those elements.
1788  *
1789  * @code
1790  * template <int dim>
1791  * void HDG<dim>::postprocess_one_cell(
1792  * const typename DoFHandler<dim>::active_cell_iterator &cell,
1793  * PostProcessScratchData & scratch,
1794  * unsigned int &)
1795  * {
1797  * cell->level(),
1798  * cell->index(),
1799  * &dof_handler_local);
1800  *
1801  * scratch.fe_values_local.reinit(loc_cell);
1802  * scratch.fe_values.reinit(cell);
1803  *
1804  * FEValuesExtractors::Vector fluxes(0);
1805  * FEValuesExtractors::Scalar scalar(dim);
1806  *
1807  * const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1808  * const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1809  *
1810  * scratch.fe_values_local[scalar].get_function_values(solution_local,
1811  * scratch.u_values);
1812  * scratch.fe_values_local[fluxes].get_function_values(solution_local,
1813  * scratch.u_gradients);
1814  *
1815  * double sum = 0;
1816  * for (unsigned int i = 1; i < dofs_per_cell; ++i)
1817  * {
1818  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1819  * {
1820  * sum = 0;
1821  * for (unsigned int q = 0; q < n_q_points; ++q)
1822  * sum += (scratch.fe_values.shape_grad(i, q) *
1823  * scratch.fe_values.shape_grad(j, q)) *
1824  * scratch.fe_values.JxW(q);
1825  * scratch.cell_matrix(i, j) = sum;
1826  * }
1827  *
1828  * sum = 0;
1829  * for (unsigned int q = 0; q < n_q_points; ++q)
1830  * sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1831  * scratch.fe_values.JxW(q);
1832  * scratch.cell_rhs(i) = sum;
1833  * }
1834  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1835  * {
1836  * sum = 0;
1837  * for (unsigned int q = 0; q < n_q_points; ++q)
1838  * sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1839  * scratch.cell_matrix(0, j) = sum;
1840  * }
1841  * {
1842  * sum = 0;
1843  * for (unsigned int q = 0; q < n_q_points; ++q)
1844  * sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1845  * scratch.cell_rhs(0) = sum;
1846  * }
1847  *
1848  * @endcode
1849  *
1850  * Having assembled all terms, we can again go on and solve the linear
1851  * system. We invert the matrix and then multiply the inverse by the
1852  * right hand side. An alternative (and more numerically stable) method
1853  * would have been to only factorize the matrix and apply the factorization.
1854  *
1855  * @code
1856  * scratch.cell_matrix.gauss_jordan();
1857  * scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1858  * cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1859  * }
1860  *
1861  *
1862  *
1863  * @endcode
1864  *
1865  *
1866  * <a name="HDGoutput_results"></a>
1867  * <h4>HDG::output_results</h4>
1868  * We have 3 sets of results that we would like to output: the local
1869  * solution, the post-processed local solution, and the skeleton solution. The
1870  * former 2 both 'live' on element volumes, whereas the latter lives on
1871  * codimension-1 surfaces
1872  * of the triangulation. Our @p output_results function writes all local solutions
1873  * to the same vtk file, even though they correspond to different
1874  * DoFHandler objects. The graphical output for the skeleton
1875  * variable is done through use of the DataOutFaces class.
1876  *
1877  * @code
1878  * template <int dim>
1879  * void HDG<dim>::output_results(const unsigned int cycle)
1880  * {
1881  * std::string filename;
1882  * switch (refinement_mode)
1883  * {
1884  * case global_refinement:
1885  * filename = "solution-global";
1886  * break;
1887  * case adaptive_refinement:
1888  * filename = "solution-adaptive";
1889  * break;
1890  * default:
1891  * Assert(false, ExcNotImplemented());
1892  * }
1893  *
1894  * std::string face_out(filename);
1895  * face_out += "-face";
1896  *
1897  * filename += "-q" + Utilities::int_to_string(fe.degree, 1);
1898  * filename += "-" + Utilities::int_to_string(cycle, 2);
1899  * filename += ".vtk";
1900  * std::ofstream output(filename);
1901  *
1902  * DataOut<dim> data_out;
1903  *
1904  * @endcode
1905  *
1906  * We first define the names and types of the local solution,
1907  * and add the data to @p data_out.
1908  *
1909  * @code
1910  * std::vector<std::string> names(dim, "gradient");
1911  * names.emplace_back("solution");
1912  * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1913  * component_interpretation(
1915  * component_interpretation[dim] =
1917  * data_out.add_data_vector(dof_handler_local,
1918  * solution_local,
1919  * names,
1920  * component_interpretation);
1921  *
1922  * @endcode
1923  *
1924  * The second data item we add is the post-processed solution.
1925  * In this case, it is a single scalar variable belonging to
1926  * a different DoFHandler.
1927  *
1928  * @code
1929  * std::vector<std::string> post_name(1, "u_post");
1930  * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1932  * data_out.add_data_vector(dof_handler_u_post,
1933  * solution_u_post,
1934  * post_name,
1935  * post_comp_type);
1936  *
1937  * data_out.build_patches(fe.degree);
1938  * data_out.write_vtk(output);
1939  *
1940  * face_out += "-q" + Utilities::int_to_string(fe.degree, 1);
1941  * face_out += "-" + Utilities::int_to_string(cycle, 2);
1942  * face_out += ".vtk";
1943  * std::ofstream face_output(face_out);
1944  *
1945  * @endcode
1946  *
1947  * The <code>DataOutFaces</code> class works analogously to the
1948  * <code>DataOut</code> class when we have a <code>DoFHandler</code> that
1949  * defines the solution on the skeleton of the triangulation. We treat it
1950  * as such here, and the code is similar to that above.
1951  *
1952  * @code
1953  * DataOutFaces<dim> data_out_face(false);
1954  * std::vector<std::string> face_name(1, "u_hat");
1955  * std::vector<DataComponentInterpretation::DataComponentInterpretation>
1956  * face_component_type(1, DataComponentInterpretation::component_is_scalar);
1957  *
1958  * data_out_face.add_data_vector(dof_handler,
1959  * solution,
1960  * face_name,
1961  * face_component_type);
1962  *
1963  * data_out_face.build_patches(fe.degree);
1964  * data_out_face.write_vtk(face_output);
1965  * }
1966  *
1967  * @endcode
1968  *
1969  *
1970  * <a name="HDGrefine_grid"></a>
1971  * <h4>HDG::refine_grid</h4>
1972  *
1973 
1974  *
1975  * We implement two different refinement cases for HDG, just as in
1976  * <code>@ref step_7 "step-7"</code>: adaptive_refinement and global_refinement. The
1977  * global_refinement option recreates the entire triangulation every
1978  * time. This is because we want to use a finer sequence of meshes than what
1979  * we would get with one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1980  * elements per direction.
1981  *
1982 
1983  *
1984  * The adaptive_refinement mode uses the <code>KellyErrorEstimator</code> to
1985  * give a decent indication of the non-regular regions in the scalar local
1986  * solutions.
1987  *
1988  * @code
1989  * template <int dim>
1990  * void HDG<dim>::refine_grid(const unsigned int cycle)
1991  * {
1992  * if (cycle == 0)
1993  * {
1995  * triangulation.refine_global(3 - dim);
1996  * }
1997  * else
1998  * switch (refinement_mode)
1999  * {
2000  * case global_refinement:
2001  * {
2002  * triangulation.clear();
2004  * 2 + (cycle % 2),
2005  * -1,
2006  * 1);
2007  * triangulation.refine_global(3 - dim + cycle / 2);
2008  * break;
2009  * }
2010  *
2011  * case adaptive_refinement:
2012  * {
2013  * Vector<float> estimated_error_per_cell(
2014  * triangulation.n_active_cells());
2015  *
2016  * FEValuesExtractors::Scalar scalar(dim);
2017  * std::map<types::boundary_id, const Function<dim> *>
2018  * neumann_boundary;
2019  * KellyErrorEstimator<dim>::estimate(dof_handler_local,
2020  * QGauss<dim - 1>(fe.degree + 1),
2021  * neumann_boundary,
2022  * solution_local,
2023  * estimated_error_per_cell,
2024  * fe_local.component_mask(
2025  * scalar));
2026  *
2028  * triangulation, estimated_error_per_cell, 0.3, 0.);
2029  *
2030  * triangulation.execute_coarsening_and_refinement();
2031  *
2032  * break;
2033  * }
2034  *
2035  * default:
2036  * {
2037  * Assert(false, ExcNotImplemented());
2038  * }
2039  * }
2040  *
2041  * @endcode
2042  *
2043  * Just as in @ref step_7 "step-7", we set the boundary indicator of two of the faces to 1
2044  * where we want to specify Neumann boundary conditions instead of Dirichlet
2045  * conditions. Since we re-create the triangulation every time for global
2046  * refinement, the flags are set in every refinement step, not just at the
2047  * beginning.
2048  *
2049  * @code
2050  * for (const auto &cell : triangulation.cell_iterators())
2051  * for (const auto &face : cell->face_iterators())
2052  * if (face->at_boundary())
2053  * if ((std::fabs(face->center()(0) - (-1)) < 1e-12) ||
2054  * (std::fabs(face->center()(1) - (-1)) < 1e-12))
2055  * face->set_boundary_id(1);
2056  * }
2057  *
2058  * @endcode
2059  *
2060  *
2061  * <a name="HDGrun"></a>
2062  * <h4>HDG::run</h4>
2063  * The functionality here is basically the same as <code>@ref step_7 "step-7"</code>.
2064  * We loop over 10 cycles, refining the grid on each one. At the end,
2065  * convergence tables are created.
2066  *
2067  * @code
2068  * template <int dim>
2069  * void HDG<dim>::run()
2070  * {
2071  * for (unsigned int cycle = 0; cycle < 10; ++cycle)
2072  * {
2073  * std::cout << "Cycle " << cycle << ':' << std::endl;
2074  *
2075  * refine_grid(cycle);
2076  * setup_system();
2077  * assemble_system(false);
2078  * solve();
2079  * postprocess();
2080  * output_results(cycle);
2081  * }
2082  *
2083  * @endcode
2084  *
2085  * There is one minor change for the convergence table compared to @ref step_7 "step-7":
2086  * Since we did not refine our mesh by a factor two in each cycle (but
2087  * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2088  * convergence rate evaluation about this. We do this by setting the
2089  * number of cells as a reference column and additionally specifying the
2090  * dimension of the problem, which gives the necessary information for the
2091  * relation between number of cells and mesh size.
2092  *
2093  * @code
2094  * if (refinement_mode == global_refinement)
2095  * {
2096  * convergence_table.evaluate_convergence_rates(
2097  * "val L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2098  * convergence_table.evaluate_convergence_rates(
2099  * "grad L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2100  * convergence_table.evaluate_convergence_rates(
2101  * "val L2-post", "cells", ConvergenceTable::reduction_rate_log2, dim);
2102  * }
2103  * convergence_table.write_text(std::cout);
2104  * }
2105  *
2106  * } // end of namespace Step51
2107  *
2108  *
2109  *
2110  * int main()
2111  * {
2112  * const unsigned int dim = 2;
2113  *
2114  * try
2115  * {
2116  * @endcode
2117  *
2118  * Now for the three calls to the main class in complete analogy to
2119  * @ref step_7 "step-7".
2120  *
2121  * @code
2122  * {
2123  * std::cout << "Solving with Q1 elements, adaptive refinement"
2124  * << std::endl
2125  * << "============================================="
2126  * << std::endl
2127  * << std::endl;
2128  *
2129  * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2130  * hdg_problem.run();
2131  *
2132  * std::cout << std::endl;
2133  * }
2134  *
2135  * {
2136  * std::cout << "Solving with Q1 elements, global refinement" << std::endl
2137  * << "===========================================" << std::endl
2138  * << std::endl;
2139  *
2140  * Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2141  * hdg_problem.run();
2142  *
2143  * std::cout << std::endl;
2144  * }
2145  *
2146  * {
2147  * std::cout << "Solving with Q3 elements, global refinement" << std::endl
2148  * << "===========================================" << std::endl
2149  * << std::endl;
2150  *
2151  * Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2152  * hdg_problem.run();
2153  *
2154  * std::cout << std::endl;
2155  * }
2156  * }
2157  * catch (std::exception &exc)
2158  * {
2159  * std::cerr << std::endl
2160  * << std::endl
2161  * << "----------------------------------------------------"
2162  * << std::endl;
2163  * std::cerr << "Exception on processing: " << std::endl
2164  * << exc.what() << std::endl
2165  * << "Aborting!" << std::endl
2166  * << "----------------------------------------------------"
2167  * << std::endl;
2168  * return 1;
2169  * }
2170  * catch (...)
2171  * {
2172  * std::cerr << std::endl
2173  * << std::endl
2174  * << "----------------------------------------------------"
2175  * << std::endl;
2176  * std::cerr << "Unknown exception!" << std::endl
2177  * << "Aborting!" << std::endl
2178  * << "----------------------------------------------------"
2179  * << std::endl;
2180  * return 1;
2181  * }
2182  *
2183  * return 0;
2184  * }
2185  * @endcode
2186 <a name="Results"></a><h1>Results</h1>
2187 
2188 
2189 <a name="Programoutput"></a><h3>Program output</h3>
2190 
2191 
2192 We first have a look at the output generated by the program when run in 2D. In
2193 the four images below, we show the solution for polynomial degree @f$p=1@f$
2194 and cycles 2, 3, 4, and 8 of the program. In the plots, we overlay the data
2195 generated from the internal data (DG part) with the skeleton part (@f$\hat{u}@f$)
2196 into the same plot. We had to generate two different data sets because cells
2197 and faces represent different geometric entities, the combination of which (in
2198 the same file) is not supported in the VTK output of deal.II.
2199 
2200 The images show the distinctive features of HDG: The cell solution (colored
2201 surfaces) is discontinuous between the cells. The solution on the skeleton
2202 variable sits on the faces and ties together the local parts. The skeleton
2203 solution is not continuous on the vertices where the faces meet, even though
2204 its values are quite close along lines in the same coordinate direction. The
2205 skeleton solution can be interpreted as a rubber spring between the two sides
2206 that balances the jumps in the solution (or rather, the flux @f$\kappa \nabla u
2207 + \mathbf{c} u@f$). From the picture at the top left, it is clear that
2208 the bulk solution frequently over- and undershoots and that the
2209 skeleton variable in indeed a better approximation to the exact
2210 solution; this explains why we can get a better solution using a
2211 postprocessing step.
2212 
2213 As the mesh is refined, the jumps between the cells get
2214 small (we represent a smooth solution), and the skeleton solution approaches
2215 the interior parts. For cycle 8, there is no visible difference in the two
2216 variables. We also see how boundary conditions are implemented weakly and that
2217 the interior variables do not exactly satisfy boundary conditions. On the
2218 lower and left boundaries, we set Neumann boundary conditions, whereas we set
2219 Dirichlet conditions on the right and top boundaries.
2220 
2221 <table align="center">
2222  <tr>
2223  <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=""></td>
2224  <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=""></td>
2225  </tr>
2226  <tr>
2227  <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=""></td>
2228  <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=""></td>
2229  </tr>
2230 </table>
2231 
2232 Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2233 and 8. This is a discontinuous solution that is locally described by second
2234 order polynomials. While the solution does not look very good on the mesh of
2235 cycle two, it looks much better for cycles three and four. As shown by the
2236 convergence table below, we find that is also converges more quickly to the
2237 analytical solution.
2238 
2239 <table align="center">
2240  <tr>
2241  <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=""></td>
2242  <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=""></td>
2243  </tr>
2244  <tr>
2245  <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=""></td>
2246  <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=""></td>
2247  </tr>
2248 </table>
2249 
2250 Finally, we look at the solution for @f$p=3@f$ at cycle 2. Despite the coarse
2251 mesh with only 64 cells, the post-processed solution is similar in quality
2252 to the linear solution (not post-processed) at cycle 8 with 4,096
2253 cells. This clearly shows the superiority of high order methods for smooth
2254 solutions.
2255 
2256 <table align="center">
2257  <tr>
2258  <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=""></td>
2259  <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=""></td>
2260  </tr>
2261 </table>
2262 
2263 <a name="Convergencetables"></a><h4>Convergence tables</h4>
2264 
2265 
2266 When the program is run, it also outputs information about the respective
2267 steps and convergence tables with errors in the various components in the
2268 end. In 2D, the convergence tables look the following:
2269 
2270 @code
2271 Q1 elements, adaptive refinement:
2272 cells dofs val L2 grad L2 val L2-post
2273  16 80 1.804e+01 2.207e+01 1.798e+01
2274  31 170 9.874e+00 1.322e+01 9.798e+00
2275  61 314 7.452e-01 3.793e+00 4.891e-01
2276  121 634 3.240e-01 1.511e+00 2.616e-01
2277  238 1198 8.585e-02 8.212e-01 1.808e-02
2278  454 2290 4.802e-02 5.178e-01 2.195e-02
2279  898 4378 2.561e-02 2.947e-01 4.318e-03
2280  1720 7864 1.306e-02 1.664e-01 2.978e-03
2281  3271 14638 7.025e-03 9.815e-02 1.075e-03
2282  6217 27214 4.119e-03 6.407e-02 9.975e-04
2283 
2284 Q1 elements, global refinement:
2285 cells dofs val L2 grad L2 val L2-post
2286  16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2287  36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2288  64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2289  144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2290  256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2291  576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2292  1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2293  2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2294  4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2295  9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2296 
2297 Q3 elements, global refinement:
2298 cells dofs val L2 grad L2 val L2-post
2299  16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2300  36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2301  64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2302  144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2303  256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2304  576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2305  1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2306  2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2307  4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2308  9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2309 @endcode
2310 
2311 
2312 One can see the error reduction upon grid refinement, and for the cases where
2313 global refinement was performed, also the convergence rates. The quadratic
2314 convergence rates of Q1 elements in the @f$L_2@f$ norm for both the scalar
2315 variable and the gradient variable is apparent, as is the cubic rate for the
2316 postprocessed scalar variable in the @f$L_2@f$ norm. Note this distinctive
2317 feature of an HDG solution. In typical continuous finite elements, the
2318 gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2319 opposed to @f$p+1@f$ for the actual solution. Even though superconvergence
2320 results for finite elements are also available (e.g. superconvergent patch
2321 recovery first introduced by Zienkiewicz and Zhu), these are typically limited
2322 to structured meshes and other special cases. For Q3 HDG variables, the scalar
2323 variable and gradient converge at fourth order and the postprocessed scalar
2324 variable at fifth order.
2325 
2326 The same convergence rates are observed in 3d.
2327 @code
2328 Q1 elements, adaptive refinement:
2329 cells dofs val L2 grad L2 val L2-post
2330  8 144 7.122e+00 1.941e+01 6.102e+00
2331  29 500 3.309e+00 1.023e+01 2.145e+00
2332  113 1792 2.204e+00 1.023e+01 1.912e+00
2333  379 5732 6.085e-01 5.008e+00 2.233e-01
2334  1317 19412 1.543e-01 1.464e+00 4.196e-02
2335  4579 64768 5.058e-02 5.611e-01 9.521e-03
2336  14596 199552 2.129e-02 3.122e-01 4.569e-03
2337  46180 611400 1.033e-02 1.622e-01 1.684e-03
2338 144859 1864212 5.007e-03 8.371e-02 7.364e-04
2339 451060 5684508 2.518e-03 4.562e-02 3.070e-04
2340 
2341 Q1 elements, global refinement:
2342 cells dofs val L2 grad L2 val L2-post
2343  8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2344  27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2345  64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2346  216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2347  512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2348  1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2349  4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2350  13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2351  32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2352 110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2353 
2354 Q3 elements, global refinement:
2355 cells dofs val L2 grad L2 val L2-post
2356  8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2357  27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2358  64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2359  216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2360  512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2361  1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2362  4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2363  13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2364  32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2365 110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2366 @endcode
2367 
2368 <a name="Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2369 
2370 
2371 <a name="Resultsfor2D"></a><h4>Results for 2D</h4>
2372 
2373 
2374 The convergence tables verify the expected convergence rates stated in the
2375 introduction. Now, we want to show a quick comparison of the computational
2376 efficiency of the HDG method compared to a usual finite element (continuous
2377 Galkerin) method on the problem of this tutorial. Of course, stability aspects
2378 of the HDG method compared to continuous finite elements for
2379 transport-dominated problems are also important in practice, which is an
2380 aspect not seen on a problem with smooth analytic solution. In the picture
2381 below, we compare the @f$L_2@f$ error as a function of the number of degrees of
2382 freedom (left) and of the computing time spent in the linear solver (right)
2383 for two space dimensions of continuous finite elements (CG) and the hybridized
2384 discontinuous Galerkin method presented in this tutorial. As opposed to the
2385 tutorial where we only use unpreconditioned BiCGStab, the times shown in the
2386 figures below use the Trilinos algebraic multigrid preconditioner in
2387 TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around
2388 ChunkSparseMatrix for the trace variable has been used in order to utilize the
2389 block structure in the matrix on the finest level.
2390 
2391 <table align="center">
2392  <tr>
2393  <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width="400" alt=""></td>
2394  <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width="400" alt=""></td>
2395  </tr>
2396 </table>
2397 
2398 The results in the graphs show that the HDG method is slower than continuous
2399 finite elements at @f$p=1@f$, about equally fast for cubic elements and
2400 faster for sixth order elements. However, we have seen above that the HDG
2401 method actually produces solutions which are more accurate than what is
2402 represented in the original variables. Therefore, in the next two plots below
2403 we instead display the error of the post-processed solution for HDG (denoted
2404 by @f$p=1^*@f$ for example). We now see a clear advantage of HDG for the same
2405 amount of work for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2406 for @f$p=1@f$.
2407 
2408 <table align="center">
2409  <tr>
2410  <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width="400" alt=""></td>
2411  <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width="400" alt=""></td>
2412  </tr>
2413 </table>
2414 
2415 Since the HDG method actually produces results converging as
2416 @f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2417 solution with the same asymptotic convergence behavior, i.e., FE_Q with degree
2418 @f$p+1@f$. If we do this, we get the convergence curves below. We see that
2419 CG with second order polynomials is again clearly better than HDG with
2420 linears. However, the advantage of HDG for higher orders remains.
2421 
2422 <table align="center">
2423  <tr>
2424  <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width="400" alt=""></td>
2425  <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width="400" alt=""></td>
2426  </tr>
2427 </table>
2428 
2429 The results are in line with properties of DG methods in general: Best
2430 performance is typically not achieved for linear elements, but rather at
2431 somewhat higher order, usually around @f$p=3@f$. This is because of a
2432 volume-to-surface effect for discontinuous solutions with too much of the
2433 solution living on the surfaces and hence duplicating work when the elements
2434 are linear. Put in other words, DG methods are often most efficient when used
2435 at relatively high order, despite their focus on a discontinuous (and hence,
2436 seemingly low accurate) representation of solutions.
2437 
2438 <a name="Resultsfor3D"></a><h4>Results for 3D</h4>
2439 
2440 
2441 We now show the same figures in 3D: The first row shows the number of degrees
2442 of freedom and computing time versus the @f$L_2@f$ error in the scalar variable
2443 @f$u@f$ for CG and HDG at order @f$p@f$, the second row shows the
2444 post-processed HDG solution instead of the original one, and the third row
2445 compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2446 the volume-to-surface effect makes the cost of HDG somewhat higher and the CG
2447 solution is clearly better than HDG for linears by any metric. For cubics, HDG
2448 and CG are of similar quality, whereas HDG is again more efficient for sixth
2449 order polynomials. One can alternatively also use the combination of FE_DGP
2450 and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product
2451 polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2452 degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2453 for FE_FaceP for a given mesh size, but the solution quality (error vs. number
2454 of DoFs) is very similar to the results for FE_FaceQ.
2455 
2456 <table align="center">
2457  <tr>
2458  <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width="400" alt=""></td>
2459  <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width="400" alt=""></td>
2460  </tr>
2461  <tr>
2462  <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width="400" alt=""></td>
2463  <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width="400" alt=""></td>
2464  </tr>
2465  <tr>
2466  <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width="400" alt=""></td>
2467  <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width="400" alt=""></td>
2468  </tr>
2469 </table>
2470 
2471 One final note on the efficiency comparison: We tried to use general-purpose
2472 sparse matrix structures and similar solvers (optimal AMG preconditioners for
2473 both without particular tuning of the AMG parameters on any of them) to give a
2474 fair picture of the cost versus accuracy of two methods, on a toy example. It
2475 should be noted however that geometric multigrid (GMG) for continuous finite
2476 elements is about a factor four to five faster for @f$p=3@f$ and @f$p=6@f$. As of
2477 2019, optimal-complexity iterative solvers for HDG are still under development
2478 in the research community. Also, there are other implementation aspects for CG
2479 available such as fast matrix-free approaches as shown in @ref step_37 "step-37" that make
2480 higher order continuous elements more competitive. Again, it is not clear to
2481 the authors of the tutorial whether similar improvements could be made for
2482 HDG. We refer to <a href="https://dx.doi.org/10.1137/16M110455X">Kronbichler
2483 and Wall (2018)</a> for a recent efficiency evaluation.
2484 
2485 
2486 <a name="Possibilitiesforimprovements"></a><h3>Possibilities for improvements</h3>
2487 
2488 
2489 As already mentioned in the introduction, one possibility is to implement
2490 another post-processing technique as discussed in the literature.
2491 
2492 A second item that is not done optimally relates to the performance of this
2493 program, which is of course an issue in practical applications (weighing in
2494 also the better solution quality of (H)DG methods for transport-dominated
2495 problems). Let us look at
2496 the computing time of the tutorial program and the share of the individual
2497 components:
2498 
2499 <table align="center" class="doxtable">
2500  <tr>
2501  <th>&nbsp;</th>
2502  <th>&nbsp;</th>
2503  <th>Setup</th>
2504  <th>Assemble</th>
2505  <th>Solve</th>
2506  <th>Trace reconstruct</th>
2507  <th>Post-processing</th>
2508  <th>Output</th>
2509  </tr>
2510  <tr>
2511  <th>&nbsp;</th>
2512  <th>Total time</th>
2513  <th colspan="6">Relative share</th>
2514  </tr>
2515  <tr>
2516  <td align="left">2D, Q1, cycle 9, 37,248 dofs</td>
2517  <td align="center">5.34s</td>
2518  <td align="center">0.7%</td>
2519  <td align="center">1.2%</td>
2520  <td align="center">89.5%</td>
2521  <td align="center">0.9%</td>
2522  <td align="center">2.3%</td>
2523  <td align="center">5.4%</td>
2524  </tr>
2525  <tr>
2526  <td align="left">2D, Q3, cycle 9, 74,496 dofs</td>
2527  <td align="center">22.2s</td>
2528  <td align="center">0.4%</td>
2529  <td align="center">4.3%</td>
2530  <td align="center">84.1%</td>
2531  <td align="center">4.1%</td>
2532  <td align="center">3.5%</td>
2533  <td align="center">3.6%</td>
2534  </tr>
2535  <tr>
2536  <td align="left">3D, Q1, cycle 7, 172,800 dofs</td>
2537  <td align="center">9.06s</td>
2538  <td align="center">3.1%</td>
2539  <td align="center">8.9%</td>
2540  <td align="center">42.7%</td>
2541  <td align="center">7.0%</td>
2542  <td align="center">20.6%</td>
2543  <td align="center">17.7%</td>
2544  </tr>
2545  <tr>
2546  <td align="left">3D, Q3, cycle 7, 691,200 dofs</td>
2547  <td align="center">516s</td>
2548  <td align="center">0.6%</td>
2549  <td align="center">34.5%</td>
2550  <td align="center">13.4%</td>
2551  <td align="center">32.8%</td>
2552  <td align="center">17.1%</td>
2553  <td align="center">1.5%</td>
2554  </tr>
2555 </table>
2556 
2557 As can be seen from the table, the solver and assembly calls dominate the
2558 runtime of the program. This also gives a clear indication of where
2559 improvements would make the most sense:
2560 
2561 <ol>
2562  <li> Better linear solvers: We use a BiCGStab iterative solver without
2563  preconditioner, where the number of iteration increases with increasing
2564  problem size (the number of iterations for Q1 elements and global
2565  refinements starts at 35 for the small sizes but increase up to 701 for the
2566  largest size). To do better, one could for example use an algebraic
2567  multigrid preconditioner from Trilinos, or some more advanced variants as
2568  the one discussed in <a
2569  href="https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2570  (2018)</a>. For diffusion-dominated problems such as the problem at hand
2571  with finer meshes, such a solver can be designed that uses the matrix-vector
2572  products from the more efficient ChunkSparseMatrix on the finest level, as
2573  long as we are not working in parallel with MPI. For MPI-parallelized
2574  computations, a standard TrilinosWrappers::SparseMatrix can be used.
2575 
2576  <li> Speed up assembly by pre-assembling parts that do not change from one
2577  cell to another (those that do neither contain variable coefficients nor
2578  mapping-dependent terms).
2579 </ol>
2580  *
2581  *
2582 <a name="PlainProg"></a>
2583 <h1> The plain program</h1>
2584 @include "step-51.cc"
2585 */
SymmetricTensor::trace
constexpr Number trace(const SymmetricTensor< 2, dim, Number > &d)
Definition: symmetric_tensor.h:2705
std::exp
inline ::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5368
VectorTools::project_boundary_values
void project_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &boundary_functions, const Quadrature< dim - 1 > &q, std::map< types::global_dof_index, number > &boundary_values, std::vector< unsigned int > component_mapping={})
FE_DGQ
Definition: fe_dgq.h:112
update_quadrature_points
@ update_quadrature_points
Transformed quadrature points.
Definition: fe_update_flags.h:122
VectorTools::L2_norm
@ L2_norm
Definition: vector_tools_common.h:113
GridGenerator::subdivided_hyper_cube
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
FE_FaceP
Definition: fe_face.h:471
FE_Q
Definition: fe_q.h:554
Utilities::int_to_string
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition: utilities.cc:474
GridTools::volume
double volume(const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping=(StaticMappingQ1< dim, spacedim >::mapping))
Definition: grid_tools.cc:133
TrilinosWrappers::SparseMatrix
Definition: trilinos_sparse_matrix.h:515
DataComponentInterpretation::component_is_scalar
@ component_is_scalar
Definition: data_component_interpretation.h:55
StandardExceptions::ExcNotImplemented
static ::ExceptionBase & ExcNotImplemented()
WorkStream
Definition: work_stream.h:157
Triangulation< dim >
GridRefinement::refine_and_coarsen_fixed_number
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
Definition: grid_refinement.cc:189
DataOutBase::vtk
@ vtk
Definition: data_out_base.h:1605
FEValuesExtractors::Scalar
Definition: fe_values_extractors.h:95
LAPACKSupport::U
static const char U
Definition: lapack_support.h:167
GeometryInfo
Definition: geometry_info.h:1224
ConvergenceTable::reduction_rate_log2
@ reduction_rate_log2
Definition: convergence_table.h:88
SymmetricTensor::invert
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:3467
VectorTools::integrate_difference
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InVector &fe_function, const Function< spacedim, typename InVector::value_type > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
Physics::Elasticity::Kinematics::C
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
LinearAlgebra::CUDAWrappers::kernel::reduction
__global__ void reduction(Number *result, const Number *v, const size_type N)
VectorTools::compute_global_error
double compute_global_error(const Triangulation< dim, spacedim > &tria, const InVector &cellwise_error, const NormType &norm, const double exponent=2.)
DataOutFaces
Definition: data_out_faces.h:117
DataOutInterface::write_vtk
void write_vtk(std::ostream &out) const
Definition: data_out_base.cc:6853
identity
Definition: template_constraints.h:268
LocalIntegrators::Advection::cell_matrix
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
Definition: advection.h:80
update_values
@ update_values
Shape function values.
Definition: fe_update_flags.h:78
second
Point< 2 > second
Definition: grid_out.cc:4353
DataOut::build_patches
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1071
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
FE_FaceQ
Definition: fe_face.h:57
DoFHandler
Definition: dof_handler.h:205
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
FEValues
Definition: fe.h:38
Differentiation::SD::fabs
Expression fabs(const Expression &x)
Definition: symengine_math.cc:273
KellyErrorEstimator
Definition: error_estimator.h:262
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
Utilities::CUDA::free
void free(T *&pointer)
Definition: cuda.h:96
level
unsigned int level
Definition: grid_out.cc:4355
PreconditionIdentity
Definition: precondition.h:80
TensorAccessors::extract
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
Definition: tensor_accessors.h:226
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
LAPACKSupport::T
static const char T
Definition: lapack_support.h:163
Physics::Elasticity::Kinematics::w
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
DoFTools::make_sparsity_pattern
void make_sparsity_pattern(const DoFHandlerType &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
Definition: dof_tools_sparsity.cc:63
FEValuesExtractors::Vector
Definition: fe_values_extractors.h:150
TrilinosWrappers::PreconditionAMG
Definition: trilinos_precondition.h:1361
internal::reinit
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:621
Tensor< 1, dim >
ComponentSelectFunction
Definition: function.h:560
LocalIntegrators::Divergence::norm
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:548
GridTools::scale
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:837
update_gradients
@ update_gradients
Shape function gradients.
Definition: fe_update_flags.h:84
SymmetricTensor::sum
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
KellyErrorEstimator::estimate
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandlerType &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
std_cxx17::apply
auto apply(F &&fn, Tuple &&t) -> decltype(apply_impl(std::forward< F >(fn), std::forward< Tuple >(t), std_cxx14::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >()))
Definition: tuple.h:40
DynamicSparsityPattern
Definition: dynamic_sparsity_pattern.h:323
Tensor::norm_square
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
LAPACKSupport::general
@ general
No special properties.
Definition: lapack_support.h:113
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
std::abs
inline ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5450
Utilities::MPI::sum
T sum(const T &t, const MPI_Comm &mpi_communicator)
AssertDimension
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1579
Function::value
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
DoFTools::make_hanging_node_constraints
void make_hanging_node_constraints(const DoFHandlerType &dof_handler, AffineConstraints< number > &constraints)
Definition: dof_tools_constraints.cc:1725
types
Definition: types.h:31
FiniteElement
Definition: fe.h:648
UpdateFlags
UpdateFlags
Definition: fe_update_flags.h:66
TensorFunction
Definition: tensor_function.h:57
SolverBicgstab
Definition: solver_bicgstab.h:124
QGauss
Definition: quadrature_lib.h:40
Tensor::clear
constexpr void clear()
SIMDComparison::equal
@ equal
LocalIntegrators::L2::L2
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
Definition: l2.h:171
ChunkSparseMatrix
Definition: chunk_sparse_matrix.h:423
value
static const bool value
Definition: dof_tools_constraints.cc:433
vertices
Point< 3 > vertices[4]
Definition: data_out_base.cc:174
AffineConstraints
Definition: affine_constraints.h:180
update_JxW_values
@ update_JxW_values
Transformed quadrature weights.
Definition: fe_update_flags.h:129
ChunkSparsityPattern
Definition: chunk_sparsity_pattern.h:247
update_normal_vectors
@ update_normal_vectors
Normal vectors.
Definition: fe_update_flags.h:136
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
FE_DGP
Definition: fe_dgp.h:311
VectorTools::H1_seminorm
@ H1_seminorm
Definition: vector_tools_common.h:165
internal::assemble
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition: loop.h:71
TensorFunction::value
virtual value_type value(const Point< dim > &p) const
std::pow
inline ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
Definition: vectorization.h:5428
LAPACKSupport::zero
static const types::blas_int zero
Definition: lapack_support.h:179
FullMatrix::mmult
void mmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
Function::vector_value
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
Point< dim >
Differentiation::SD::sign
Expression sign(const Expression &x)
Definition: symengine_math.cc:280
FullMatrix< double >
Function
Definition: function.h:151
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
SolverControl
Definition: solver_control.h:67
FEFaceValues< dim >
MeshWorker::loop
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:443
first
Point< 2 > first
Definition: grid_out.cc:4352
DataOut< dim >
numbers::PI
static constexpr double PI
Definition: numbers.h:237
Vector< double >
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
FESystem
Definition: fe.h:44
ConvergenceTable
Definition: convergence_table.h:63
parallel
Definition: distributed.h:416
internal::VectorOperations::copy
void copy(const T *begin, const T *end, U *dest)
Definition: vector_operations_internal.h:67
DataComponentInterpretation::component_is_part_of_vector
@ component_is_part_of_vector
Definition: data_component_interpretation.h:61
DataOut_DoFData::add_data_vector
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
Definition: data_out_dof_data.h:1090