deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-51.h
Go to the documentation of this file.
1 = 0) const override
593 *   {
594 *   double sum = 0;
595 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
596 *   {
597 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
598 *   sum +=
599 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width));
600 *   }
601 *  
602 *   return sum /
603 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
604 *   }
605 *  
606 *   virtual Tensor<1, dim>
607 *   gradient(const Point<dim> &p,
608 *   const unsigned int /*component*/ = 0) const override
609 *   {
610 *   Tensor<1, dim> sum;
611 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
612 *   {
613 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
614 *  
615 *   sum +=
616 *   (-2 / (this->width * this->width) *
617 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width)) *
618 *   x_minus_xi);
619 *   }
620 *  
621 *   return sum /
622 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
623 *   }
624 *   };
625 *  
626 *  
627 *  
628 * @endcode
629 *
630 * This class implements a function where the scalar solution and its negative
631 * gradient are collected together. This function is used when computing the
632 * error of the HDG approximation and its implementation is to simply call
633 * value and gradient function of the Solution class.
634 *
635 * @code
636 *   template <int dim>
637 *   class SolutionAndGradient : public Function<dim>, protected SolutionBase<dim>
638 *   {
639 *   public:
640 *   SolutionAndGradient()
641 *   : Function<dim>(dim + 1)
642 *   {}
643 *  
644 *   virtual void vector_value(const Point<dim> &p,
645 *   Vector<double> &v) const override
646 *   {
647 *   AssertDimension(v.size(), dim + 1);
648 *   Solution<dim> solution;
649 *   Tensor<1, dim> grad = solution.gradient(p);
650 *   for (unsigned int d = 0; d < dim; ++d)
651 *   v[d] = -grad[d];
652 *   v[dim] = solution.value(p);
653 *   }
654 *   };
655 *  
656 *  
657 *  
658 * @endcode
659 *
660 * Next comes the implementation of the convection velocity. As described in
661 * the introduction, we choose a velocity field that is @f$(y, -x)@f$ in 2d and
662 * @f$(y, -x, 1)@f$ in 3d. This gives a divergence-free velocity field.
663 *
664 * @code
665 *   template <int dim>
666 *   class ConvectionVelocity : public TensorFunction<1, dim>
667 *   {
668 *   public:
669 *   ConvectionVelocity()
671 *   {}
672 *  
673 *   virtual Tensor<1, dim> value(const Point<dim> &p) const override
674 *   {
675 *   Tensor<1, dim> convection;
676 *   switch (dim)
677 *   {
678 *   case 1:
679 *   convection[0] = 1;
680 *   break;
681 *   case 2:
682 *   convection[0] = p[1];
683 *   convection[1] = -p[0];
684 *   break;
685 *   case 3:
686 *   convection[0] = p[1];
687 *   convection[1] = -p[0];
688 *   convection[2] = 1;
689 *   break;
690 *   default:
692 *   }
693 *   return convection;
694 *   }
695 *   };
696 *  
697 *  
698 *  
699 * @endcode
700 *
701 * The last function we implement is the right hand side for the
702 * manufactured solution. It is very similar to @ref step_7 "step-7", with the exception
703 * that we now have a convection term instead of the reaction term. Since
704 * the velocity field is incompressible, i.e., @f$\nabla \cdot \mathbf{c} =
705 * 0@f$, the advection term simply reads @f$\mathbf{c} \nabla u@f$.
706 *
707 * @code
708 *   template <int dim>
709 *   class RightHandSide : public Function<dim>, protected SolutionBase<dim>
710 *   {
711 *   public:
712 *   virtual double value(const Point<dim> &p,
713 *   const unsigned int /*component*/ = 0) const override
714 *   {
715 *   ConvectionVelocity<dim> convection_velocity;
716 *   Tensor<1, dim> convection = convection_velocity.value(p);
717 *   double sum = 0;
718 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
719 *   {
720 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
721 *  
722 *   sum +=
723 *   ((2 * dim - 2 * convection * x_minus_xi -
724 *   4 * x_minus_xi.norm_square() / (this->width * this->width)) /
725 *   (this->width * this->width) *
726 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width)));
727 *   }
728 *  
729 *   return sum /
730 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
731 *   }
732 *   };
733 *  
734 *  
735 *  
736 * @endcode
737 *
738 *
739 * <a name="step_51-TheHDGsolverclass"></a>
740 * <h3>The HDG solver class</h3>
741 *
742
743 *
744 * The HDG solution procedure follows closely that of @ref step_7 "step-7". The major
745 * difference is the use of three different sets of DoFHandler and FE
746 * objects, along with the ChunkSparseMatrix and the corresponding solutions
747 * vectors. We also use WorkStream to enable a multithreaded local solution
748 * process which exploits the embarrassingly parallel nature of the local
749 * solver. For WorkStream, we define the local operations on a cell and a
750 * copy function into the global matrix and vector. We do this both for the
751 * assembly (which is run twice, once when we generate the system matrix and
752 * once when we compute the element-interior solutions from the skeleton
753 * values) and for the postprocessing where we extract a solution that
754 * converges at higher order.
755 *
756 * @code
757 *   template <int dim>
758 *   class HDG
759 *   {
760 *   public:
761 *   enum RefinementMode
762 *   {
763 *   global_refinement,
764 *   adaptive_refinement
765 *   };
766 *  
767 *   HDG(const unsigned int degree, const RefinementMode refinement_mode);
768 *   void run();
769 *  
770 *   private:
771 *   void setup_system();
772 *   void assemble_system(const bool reconstruct_trace = false);
773 *   void solve();
774 *   void postprocess();
775 *   void refine_grid(const unsigned int cycle);
776 *   void output_results(const unsigned int cycle);
777 *  
778 * @endcode
779 *
780 * Data for the assembly and solution of the primal variables.
781 *
782 * @code
783 *   struct PerTaskData;
784 *   struct ScratchData;
785 *  
786 * @endcode
787 *
788 * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
789 * procedure; as such, we do not need to assemble any global data and do
790 * not declare any 'task data' for WorkStream to use.
791 *
792 * @code
793 *   struct PostProcessScratchData;
794 *  
795 * @endcode
796 *
797 * The following three functions are used by WorkStream to do the actual
798 * work of the program.
799 *
800 * @code
801 *   void assemble_system_one_cell(
802 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
803 *   ScratchData &scratch,
804 *   PerTaskData &task_data);
805 *  
806 *   void copy_local_to_global(const PerTaskData &data);
807 *  
808 *   void postprocess_one_cell(
809 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
810 *   PostProcessScratchData &scratch,
811 *   unsigned int &empty_data);
812 *  
813 *  
815 *  
816 * @endcode
817 *
818 * The 'local' solutions are interior to each element. These
819 * represent the primal solution field @f$u@f$ as well as the auxiliary
820 * field @f$\mathbf{q}@f$.
821 *
822 * @code
823 *   const FESystem<dim> fe_local;
824 *   DoFHandler<dim> dof_handler_local;
825 *   Vector<double> solution_local;
826 *  
827 * @endcode
828 *
829 * The new finite element type and corresponding <code>DoFHandler</code> are
830 * used for the global skeleton solution that couples the element-level
831 * local solutions.
832 *
833 * @code
834 *   const FE_FaceQ<dim> fe;
835 *   DoFHandler<dim> dof_handler;
836 *   Vector<double> solution;
837 *   Vector<double> system_rhs;
838 *  
839 * @endcode
840 *
841 * As stated in the introduction, HDG solutions can be post-processed to
842 * attain superconvergence rates of @f$\mathcal{O}(h^{p+2})@f$. The
843 * post-processed solution is a discontinuous finite element solution
844 * representing the primal variable on the interior of each cell. We define
845 * a FE type of degree @f$p+1@f$ to represent this post-processed solution,
846 * which we only use for output after constructing it.
847 *
848 * @code
849 *   const FE_DGQ<dim> fe_u_post;
850 *   DoFHandler<dim> dof_handler_u_post;
851 *   Vector<double> solution_u_post;
852 *  
853 * @endcode
854 *
855 * The degrees of freedom corresponding to the skeleton strongly enforce
856 * Dirichlet boundary conditions, just as in a continuous Galerkin finite
857 * element method. We can enforce the boundary conditions in an analogous
858 * manner via an AffineConstraints object. In addition, hanging nodes are
859 * handled in the same way as for continuous finite elements: For the face
860 * elements which only define degrees of freedom on the face, this process
861 * sets the solution on the refined side to coincide with the
862 * representation on the coarse side.
863 *
864
865 *
866 * Note that for HDG, the elimination of hanging nodes is not the only
867 * possibility &mdash; in terms of the HDG theory, one could also use the
868 * unknowns from the refined side and express the local solution on the
869 * coarse side through the trace values on the refined side. However, such
870 * a setup is not as easily implemented in terms of deal.II loops and not
871 * further analyzed.
872 *
873 * @code
874 *   AffineConstraints<double> constraints;
875 *  
876 * @endcode
877 *
878 * The usage of the ChunkSparseMatrix class is similar to the usual sparse
879 * matrices: You need a sparsity pattern of type ChunkSparsityPattern and
880 * the actual matrix object. When creating the sparsity pattern, we just
881 * have to additionally pass the size of local blocks.
882 *
883 * @code
884 *   ChunkSparsityPattern sparsity_pattern;
885 *   ChunkSparseMatrix<double> system_matrix;
886 *  
887 * @endcode
888 *
889 * Same as @ref step_7 "step-7":
890 *
891 * @code
892 *   const RefinementMode refinement_mode;
893 *   ConvergenceTable convergence_table;
894 *   };
895 *  
896 * @endcode
897 *
898 *
899 * <a name="step_51-TheHDGclassimplementation"></a>
900 * <h3>The HDG class implementation</h3>
901 *
902
903 *
904 *
905 * <a name="step_51-Constructor"></a>
906 * <h4>Constructor</h4>
907 * The constructor is similar to those in other examples, with the exception
908 * of handling multiple DoFHandler and FiniteElement objects. Note that we
909 * create a system of finite elements for the local DG part, including the
910 * gradient/flux part and the scalar part.
911 *
912 * @code
913 *   template <int dim>
914 *   HDG<dim>::HDG(const unsigned int degree, const RefinementMode refinement_mode)
915 *   : fe_local(FE_DGQ<dim>(degree) ^ dim, FE_DGQ<dim>(degree))
916 *   , dof_handler_local(triangulation)
917 *   , fe(degree)
918 *   , dof_handler(triangulation)
919 *   , fe_u_post(degree + 1)
920 *   , dof_handler_u_post(triangulation)
921 *   , refinement_mode(refinement_mode)
922 *   {}
923 *  
924 *  
925 *  
926 * @endcode
927 *
928 *
929 * <a name="step_51-HDGsetup_system"></a>
930 * <h4>HDG::setup_system</h4>
931 * The system for an HDG solution is setup in an analogous manner to most
932 * of the other tutorial programs. We are careful to distribute dofs with
933 * all of our DoFHandler objects. The @p solution and @p system_matrix
934 * objects go with the global skeleton solution.
935 *
936 * @code
937 *   template <int dim>
938 *   void HDG<dim>::setup_system()
939 *   {
940 *   dof_handler_local.distribute_dofs(fe_local);
941 *   dof_handler.distribute_dofs(fe);
942 *   dof_handler_u_post.distribute_dofs(fe_u_post);
943 *  
944 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
945 *   << std::endl;
946 *  
947 *   solution.reinit(dof_handler.n_dofs());
948 *   system_rhs.reinit(dof_handler.n_dofs());
949 *  
950 *   solution_local.reinit(dof_handler_local.n_dofs());
951 *   solution_u_post.reinit(dof_handler_u_post.n_dofs());
952 *  
953 *   constraints.clear();
954 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
955 *   std::map<types::boundary_id, const Function<dim> *> boundary_functions;
956 *   Solution<dim> solution_function;
957 *   boundary_functions[0] = &solution_function;
959 *   boundary_functions,
960 *   QGauss<dim - 1>(fe.degree + 1),
961 *   constraints);
962 *   constraints.close();
963 *  
964 * @endcode
965 *
966 * When creating the chunk sparsity pattern, we first create the usual
967 * dynamic sparsity pattern and then set the chunk size, which is equal
968 * to the number of dofs on a face, when copying this into the final
969 * sparsity pattern.
970 *
971 * @code
972 *   {
973 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
974 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
975 *   sparsity_pattern.copy_from(dsp, fe.n_dofs_per_face());
976 *   }
977 *   system_matrix.reinit(sparsity_pattern);
978 *   }
979 *  
980 *  
981 *  
982 * @endcode
983 *
984 *
985 * <a name="step_51-HDGPerTaskData"></a>
986 * <h4>HDG::PerTaskData</h4>
987 * Next comes the definition of the local data structures for the parallel
988 * assembly. The first structure @p PerTaskData contains the local vector
989 * and matrix that are written into the global matrix, whereas the
990 * ScratchData contains all data that we need for the local assembly. There
991 * is one variable worth noting here, namely the boolean variable @p
992 * trace_reconstruct. As mentioned in the introduction, we solve the HDG
993 * system in two steps. First, we create a linear system for the skeleton
994 * system where we condense the local part into it via the Schur complement
995 * @f$D-CA^{-1}B@f$. Then, we solve for the local part using the skeleton
996 * solution. For these two steps, we need the same matrices on the elements
997 * twice, which we want to compute by two assembly steps. Since most of the
998 * code is similar, we do this with the same function but only switch
999 * between the two based on a flag that we set when starting the
1000 * assembly. Since we need to pass this information on to the local worker
1001 * routines, we store it once in the task data.
1002 *
1003 * @code
1004 *   template <int dim>
1005 *   struct HDG<dim>::PerTaskData
1006 *   {
1008 *   Vector<double> cell_vector;
1009 *   std::vector<types::global_dof_index> dof_indices;
1010 *  
1011 *   bool trace_reconstruct;
1012 *  
1013 *   PerTaskData(const unsigned int n_dofs, const bool trace_reconstruct)
1014 *   : cell_matrix(n_dofs, n_dofs)
1015 *   , cell_vector(n_dofs)
1016 *   , dof_indices(n_dofs)
1017 *   , trace_reconstruct(trace_reconstruct)
1018 *   {}
1019 *   };
1020 *  
1021 *  
1022 *  
1023 * @endcode
1024 *
1025 *
1026 * <a name="step_51-HDGScratchData"></a>
1027 * <h4>HDG::ScratchData</h4>
1028 * @p ScratchData contains persistent data for each
1029 * thread within WorkStream. The FEValues, matrix,
1030 * and vector objects should be familiar by now. There are two objects that
1031 * need to be discussed: `std::vector<std::vector<unsigned int> >
1032 * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1033 * fe_support_on_face`. These are used to indicate whether or not the finite
1034 * elements chosen have support (non-zero values) on a given face of the
1035 * reference cell for the local part associated to @p fe_local and the
1036 * skeleton part @p fe. We extract this information in the
1037 * constructor and store it once for all cells that we work on. Had we not
1038 * stored this information, we would be forced to assemble a large number of
1039 * zero terms on each cell, which would significantly slow the program.
1040 *
1041 * @code
1042 *   template <int dim>
1043 *   struct HDG<dim>::ScratchData
1044 *   {
1045 *   FEValues<dim> fe_values_local;
1046 *   FEFaceValues<dim> fe_face_values_local;
1047 *   FEFaceValues<dim> fe_face_values;
1048 *  
1049 *   FullMatrix<double> ll_matrix;
1050 *   FullMatrix<double> lf_matrix;
1051 *   FullMatrix<double> fl_matrix;
1052 *   FullMatrix<double> tmp_matrix;
1053 *   Vector<double> l_rhs;
1054 *   Vector<double> tmp_rhs;
1055 *  
1056 *   std::vector<Tensor<1, dim>> q_phi;
1057 *   std::vector<double> q_phi_div;
1058 *   std::vector<double> u_phi;
1059 *   std::vector<Tensor<1, dim>> u_phi_grad;
1060 *   std::vector<double> tr_phi;
1061 *   std::vector<double> trace_values;
1062 *  
1063 *   std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1064 *   std::vector<std::vector<unsigned int>> fe_support_on_face;
1065 *  
1066 *   ConvectionVelocity<dim> convection_velocity;
1067 *   RightHandSide<dim> right_hand_side;
1068 *   const Solution<dim> exact_solution;
1069 *  
1070 *   ScratchData(const FiniteElement<dim> &fe,
1071 *   const FiniteElement<dim> &fe_local,
1072 *   const QGauss<dim> &quadrature_formula,
1073 *   const QGauss<dim - 1> &face_quadrature_formula,
1074 *   const UpdateFlags local_flags,
1075 *   const UpdateFlags local_face_flags,
1076 *   const UpdateFlags flags)
1077 *   : fe_values_local(fe_local, quadrature_formula, local_flags)
1078 *   , fe_face_values_local(fe_local,
1079 *   face_quadrature_formula,
1080 *   local_face_flags)
1081 *   , fe_face_values(fe, face_quadrature_formula, flags)
1082 *   , ll_matrix(fe_local.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1083 *   , lf_matrix(fe_local.n_dofs_per_cell(), fe.n_dofs_per_cell())
1084 *   , fl_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1085 *   , tmp_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1086 *   , l_rhs(fe_local.n_dofs_per_cell())
1087 *   , tmp_rhs(fe_local.n_dofs_per_cell())
1088 *   , q_phi(fe_local.n_dofs_per_cell())
1089 *   , q_phi_div(fe_local.n_dofs_per_cell())
1090 *   , u_phi(fe_local.n_dofs_per_cell())
1091 *   , u_phi_grad(fe_local.n_dofs_per_cell())
1092 *   , tr_phi(fe.n_dofs_per_cell())
1093 *   , trace_values(face_quadrature_formula.size())
1094 *   , fe_local_support_on_face(GeometryInfo<dim>::faces_per_cell)
1095 *   , fe_support_on_face(GeometryInfo<dim>::faces_per_cell)
1096 *   , exact_solution()
1097 *   {
1098 *   for (const unsigned int face_no : GeometryInfo<dim>::face_indices())
1099 *   for (unsigned int i = 0; i < fe_local.n_dofs_per_cell(); ++i)
1100 *   {
1101 *   if (fe_local.has_support_on_face(i, face_no))
1102 *   fe_local_support_on_face[face_no].push_back(i);
1103 *   }
1104 *  
1105 *   for (const unsigned int face_no : GeometryInfo<dim>::face_indices())
1106 *   for (unsigned int i = 0; i < fe.n_dofs_per_cell(); ++i)
1107 *   {
1108 *   if (fe.has_support_on_face(i, face_no))
1109 *   fe_support_on_face[face_no].push_back(i);
1110 *   }
1111 *   }
1112 *  
1113 *   ScratchData(const ScratchData &sd)
1114 *   : fe_values_local(sd.fe_values_local.get_fe(),
1115 *   sd.fe_values_local.get_quadrature(),
1116 *   sd.fe_values_local.get_update_flags())
1117 *   , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1118 *   sd.fe_face_values_local.get_quadrature(),
1119 *   sd.fe_face_values_local.get_update_flags())
1120 *   , fe_face_values(sd.fe_face_values.get_fe(),
1121 *   sd.fe_face_values.get_quadrature(),
1122 *   sd.fe_face_values.get_update_flags())
1123 *   , ll_matrix(sd.ll_matrix)
1124 *   , lf_matrix(sd.lf_matrix)
1125 *   , fl_matrix(sd.fl_matrix)
1126 *   , tmp_matrix(sd.tmp_matrix)
1127 *   , l_rhs(sd.l_rhs)
1128 *   , tmp_rhs(sd.tmp_rhs)
1129 *   , q_phi(sd.q_phi)
1130 *   , q_phi_div(sd.q_phi_div)
1131 *   , u_phi(sd.u_phi)
1132 *   , u_phi_grad(sd.u_phi_grad)
1133 *   , tr_phi(sd.tr_phi)
1134 *   , trace_values(sd.trace_values)
1135 *   , fe_local_support_on_face(sd.fe_local_support_on_face)
1136 *   , fe_support_on_face(sd.fe_support_on_face)
1137 *   , exact_solution()
1138 *   {}
1139 *   };
1140 *  
1141 *  
1142 *  
1143 * @endcode
1144 *
1145 *
1146 * <a name="step_51-HDGPostProcessScratchData"></a>
1147 * <h4>HDG::PostProcessScratchData</h4>
1148 * @p PostProcessScratchData contains the data used by WorkStream
1149 * when post-processing the local solution @f$u^*@f$. It is similar, but much
1150 * simpler, than @p ScratchData.
1151 *
1152 * @code
1153 *   template <int dim>
1154 *   struct HDG<dim>::PostProcessScratchData
1155 *   {
1156 *   FEValues<dim> fe_values_local;
1157 *   FEValues<dim> fe_values;
1158 *  
1159 *   std::vector<double> u_values;
1160 *   std::vector<Tensor<1, dim>> u_gradients;
1162 *  
1163 *   Vector<double> cell_rhs;
1164 *   Vector<double> cell_sol;
1165 *  
1166 *   PostProcessScratchData(const FiniteElement<dim> &fe,
1167 *   const FiniteElement<dim> &fe_local,
1168 *   const QGauss<dim> &quadrature_formula,
1169 *   const UpdateFlags local_flags,
1170 *   const UpdateFlags flags)
1171 *   : fe_values_local(fe_local, quadrature_formula, local_flags)
1172 *   , fe_values(fe, quadrature_formula, flags)
1173 *   , u_values(quadrature_formula.size())
1174 *   , u_gradients(quadrature_formula.size())
1175 *   , cell_matrix(fe.n_dofs_per_cell(), fe.n_dofs_per_cell())
1176 *   , cell_rhs(fe.n_dofs_per_cell())
1177 *   , cell_sol(fe.n_dofs_per_cell())
1178 *   {}
1179 *  
1180 *   PostProcessScratchData(const PostProcessScratchData &sd)
1181 *   : fe_values_local(sd.fe_values_local.get_fe(),
1182 *   sd.fe_values_local.get_quadrature(),
1183 *   sd.fe_values_local.get_update_flags())
1184 *   , fe_values(sd.fe_values.get_fe(),
1185 *   sd.fe_values.get_quadrature(),
1186 *   sd.fe_values.get_update_flags())
1187 *   , u_values(sd.u_values)
1188 *   , u_gradients(sd.u_gradients)
1189 *   , cell_matrix(sd.cell_matrix)
1190 *   , cell_rhs(sd.cell_rhs)
1191 *   , cell_sol(sd.cell_sol)
1192 *   {}
1193 *   };
1194 *  
1195 *  
1196 *  
1197 * @endcode
1198 *
1199 *
1200 * <a name="step_51-HDGassemble_system"></a>
1201 * <h4>HDG::assemble_system</h4>
1202 * The @p assemble_system function is similar to the one on @ref step_32 "step-32", where
1203 * the quadrature formula and the update flags are set up, and then
1204 * <code>WorkStream</code> is used to do the work in a multi-threaded
1205 * manner. The @p trace_reconstruct input parameter is used to decide
1206 * whether we are solving for the global skeleton solution (false) or the
1207 * local solution (true).
1208 *
1209
1210 *
1211 * One thing worth noting for the multi-threaded execution of assembly is
1212 * the fact that the local computations in `assemble_system_one_cell()` call
1213 * into BLAS and LAPACK functions if those are available in deal.II. Thus,
1214 * the underlying BLAS/LAPACK library must support calls from multiple
1215 * threads at the same time. Most implementations do support this, but some
1216 * libraries need to be built in a specific way to avoid problems. For
1217 * example, OpenBLAS compiled without multithreading inside the BLAS/LAPACK
1218 * calls needs to built with a flag called `USE_LOCKING` set to true.
1219 *
1220 * @code
1221 *   template <int dim>
1222 *   void HDG<dim>::assemble_system(const bool trace_reconstruct)
1223 *   {
1224 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1225 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1226 *  
1227 *   const UpdateFlags local_flags(update_values | update_gradients |
1229 *  
1230 *   const UpdateFlags local_face_flags(update_values);
1231 *  
1234 *  
1235 *   PerTaskData task_data(fe.n_dofs_per_cell(), trace_reconstruct);
1236 *   ScratchData scratch(fe,
1237 *   fe_local,
1238 *   quadrature_formula,
1239 *   face_quadrature_formula,
1240 *   local_flags,
1241 *   local_face_flags,
1242 *   flags);
1243 *  
1244 *   WorkStream::run(dof_handler.begin_active(),
1245 *   dof_handler.end(),
1246 *   *this,
1247 *   &HDG<dim>::assemble_system_one_cell,
1248 *   &HDG<dim>::copy_local_to_global,
1249 *   scratch,
1250 *   task_data);
1251 *   }
1252 *  
1253 *  
1254 *  
1255 * @endcode
1256 *
1257 *
1258 * <a name="step_51-HDGassemble_system_one_cell"></a>
1259 * <h4>HDG::assemble_system_one_cell</h4>
1260 * The real work of the HDG program is done by @p assemble_system_one_cell.
1261 * Assembling the local matrices @f$A, B, C@f$ is done here, along with the
1262 * local contributions of the global matrix @f$D@f$.
1263 *
1264 * @code
1265 *   template <int dim>
1266 *   void HDG<dim>::assemble_system_one_cell(
1267 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1268 *   ScratchData &scratch,
1269 *   PerTaskData &task_data)
1270 *   {
1271 * @endcode
1272 *
1273 * Construct iterator for dof_handler_local for FEValues reinit function.
1274 *
1275 * @code
1276 *   const typename DoFHandler<dim>::active_cell_iterator loc_cell =
1277 *   cell->as_dof_handler_iterator(dof_handler_local);
1278 *  
1279 *   const unsigned int n_q_points =
1280 *   scratch.fe_values_local.get_quadrature().size();
1281 *   const unsigned int n_face_q_points =
1282 *   scratch.fe_face_values_local.get_quadrature().size();
1283 *  
1284 *   const unsigned int loc_dofs_per_cell =
1285 *   scratch.fe_values_local.get_fe().n_dofs_per_cell();
1286 *  
1287 *   const FEValuesExtractors::Vector fluxes(0);
1288 *   const FEValuesExtractors::Scalar scalar(dim);
1289 *  
1290 *   scratch.ll_matrix = 0;
1291 *   scratch.l_rhs = 0;
1292 *   if (!task_data.trace_reconstruct)
1293 *   {
1294 *   scratch.lf_matrix = 0;
1295 *   scratch.fl_matrix = 0;
1296 *   task_data.cell_matrix = 0;
1297 *   task_data.cell_vector = 0;
1298 *   }
1299 *   scratch.fe_values_local.reinit(loc_cell);
1300 *  
1301 * @endcode
1302 *
1303 * We first compute the cell-interior contribution to @p ll_matrix matrix
1304 * (referred to as matrix @f$A@f$ in the introduction) corresponding to
1305 * local-local coupling, as well as the local right-hand-side vector. We
1306 * store the values at each quadrature point for the basis functions, the
1307 * right-hand-side value, and the convection velocity, in order to have
1308 * quick access to these fields.
1309 *
1310 * @code
1311 *   for (unsigned int q = 0; q < n_q_points; ++q)
1312 *   {
1313 *   const double rhs_value = scratch.right_hand_side.value(
1314 *   scratch.fe_values_local.quadrature_point(q));
1315 *   const Tensor<1, dim> convection = scratch.convection_velocity.value(
1316 *   scratch.fe_values_local.quadrature_point(q));
1317 *   const double JxW = scratch.fe_values_local.JxW(q);
1318 *   for (unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1319 *   {
1320 *   scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1321 *   scratch.q_phi_div[k] =
1322 *   scratch.fe_values_local[fluxes].divergence(k, q);
1323 *   scratch.u_phi[k] = scratch.fe_values_local[scalar].value(k, q);
1324 *   scratch.u_phi_grad[k] =
1325 *   scratch.fe_values_local[scalar].gradient(k, q);
1326 *   }
1327 *   for (unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1328 *   {
1329 *   for (unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1330 *   scratch.ll_matrix(i, j) +=
1331 *   (scratch.q_phi[i] * scratch.q_phi[j] -
1332 *   scratch.q_phi_div[i] * scratch.u_phi[j] +
1333 *   scratch.u_phi[i] * scratch.q_phi_div[j] -
1334 *   (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1335 *   JxW;
1336 *   scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1337 *   }
1338 *   }
1339 *  
1340 * @endcode
1341 *
1342 * Face terms are assembled on all faces of all elements. This is in
1343 * contrast to more traditional DG methods, where each face is only visited
1344 * once in the assembly procedure.
1345 *
1346 * @code
1347 *   for (const auto face_no : cell->face_indices())
1348 *   {
1349 *   scratch.fe_face_values_local.reinit(loc_cell, face_no);
1350 *   scratch.fe_face_values.reinit(cell, face_no);
1351 *  
1352 * @endcode
1353 *
1354 * The already obtained @f$\hat{u}@f$ values are needed when solving for the
1355 * local variables.
1356 *
1357 * @code
1358 *   if (task_data.trace_reconstruct)
1359 *   scratch.fe_face_values.get_function_values(solution,
1360 *   scratch.trace_values);
1361 *  
1362 *   for (unsigned int q = 0; q < n_face_q_points; ++q)
1363 *   {
1364 *   const double JxW = scratch.fe_face_values.JxW(q);
1365 *   const Point<dim> quadrature_point =
1366 *   scratch.fe_face_values.quadrature_point(q);
1367 *   const Tensor<1, dim> normal =
1368 *   scratch.fe_face_values.normal_vector(q);
1369 *   const Tensor<1, dim> convection =
1370 *   scratch.convection_velocity.value(quadrature_point);
1371 *  
1372 * @endcode
1373 *
1374 * Here we compute the stabilization parameter discussed in the
1375 * introduction: since the diffusion is one and the diffusion
1376 * length scale is set to 1/5, it simply results in a contribution
1377 * of 5 for the diffusion part and the magnitude of convection
1378 * through the element boundary in a centered scheme for the
1379 * convection part.
1380 *
1381 * @code
1382 *   const double tau_stab = (5. + std::abs(convection * normal));
1383 *  
1384 * @endcode
1385 *
1386 * We store the non-zero flux and scalar values, making use of the
1387 * support_on_face information we created in @p ScratchData.
1388 *
1389 * @code
1390 *   for (unsigned int k = 0;
1391 *   k < scratch.fe_local_support_on_face[face_no].size();
1392 *   ++k)
1393 *   {
1394 *   const unsigned int kk =
1395 *   scratch.fe_local_support_on_face[face_no][k];
1396 *   scratch.q_phi[k] =
1397 *   scratch.fe_face_values_local[fluxes].value(kk, q);
1398 *   scratch.u_phi[k] =
1399 *   scratch.fe_face_values_local[scalar].value(kk, q);
1400 *   }
1401 *  
1402 * @endcode
1403 *
1404 * When @p trace_reconstruct=false, we are preparing to assemble the
1405 * system for the skeleton variable @f$\hat{u}@f$. If this is the case,
1406 * we must assemble all local matrices associated with the problem:
1407 * local-local, local-face, face-local, and face-face. The
1408 * face-face matrix is stored as @p TaskData::cell_matrix, so that
1409 * it can be assembled into the global system by @p
1410 * copy_local_to_global.
1411 *
1412 * @code
1413 *   if (!task_data.trace_reconstruct)
1414 *   {
1415 *   for (unsigned int k = 0;
1416 *   k < scratch.fe_support_on_face[face_no].size();
1417 *   ++k)
1418 *   scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1419 *   scratch.fe_support_on_face[face_no][k], q);
1420 *   for (unsigned int i = 0;
1421 *   i < scratch.fe_local_support_on_face[face_no].size();
1422 *   ++i)
1423 *   for (unsigned int j = 0;
1424 *   j < scratch.fe_support_on_face[face_no].size();
1425 *   ++j)
1426 *   {
1427 *   const unsigned int ii =
1428 *   scratch.fe_local_support_on_face[face_no][i];
1429 *   const unsigned int jj =
1430 *   scratch.fe_support_on_face[face_no][j];
1431 *   scratch.lf_matrix(ii, jj) +=
1432 *   ((scratch.q_phi[i] * normal +
1433 *   (convection * normal - tau_stab) * scratch.u_phi[i]) *
1434 *   scratch.tr_phi[j]) *
1435 *   JxW;
1436 *  
1437 * @endcode
1438 *
1439 * Note the sign of the face_no-local matrix. We negate
1440 * the sign during assembly here so that we can use the
1441 * FullMatrix::mmult with addition when computing the
1442 * Schur complement.
1443 *
1444 * @code
1445 *   scratch.fl_matrix(jj, ii) -=
1446 *   ((scratch.q_phi[i] * normal +
1447 *   tau_stab * scratch.u_phi[i]) *
1448 *   scratch.tr_phi[j]) *
1449 *   JxW;
1450 *   }
1451 *  
1452 *   for (unsigned int i = 0;
1453 *   i < scratch.fe_support_on_face[face_no].size();
1454 *   ++i)
1455 *   for (unsigned int j = 0;
1456 *   j < scratch.fe_support_on_face[face_no].size();
1457 *   ++j)
1458 *   {
1459 *   const unsigned int ii =
1460 *   scratch.fe_support_on_face[face_no][i];
1461 *   const unsigned int jj =
1462 *   scratch.fe_support_on_face[face_no][j];
1463 *   task_data.cell_matrix(ii, jj) +=
1464 *   ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1465 *   scratch.tr_phi[j]) *
1466 *   JxW;
1467 *   }
1468 *  
1469 *   if (cell->face(face_no)->at_boundary() &&
1470 *   (cell->face(face_no)->boundary_id() == 1))
1471 *   {
1472 *   const double neumann_value =
1473 *   -scratch.exact_solution.gradient(quadrature_point) *
1474 *   normal +
1475 *   convection * normal *
1476 *   scratch.exact_solution.value(quadrature_point);
1477 *   for (unsigned int i = 0;
1478 *   i < scratch.fe_support_on_face[face_no].size();
1479 *   ++i)
1480 *   {
1481 *   const unsigned int ii =
1482 *   scratch.fe_support_on_face[face_no][i];
1483 *   task_data.cell_vector(ii) +=
1484 *   scratch.tr_phi[i] * neumann_value * JxW;
1485 *   }
1486 *   }
1487 *   }
1488 *  
1489 * @endcode
1490 *
1491 * This last term adds the contribution of the term @f$\left<w,\tau
1492 * u_h\right>_{\partial \mathcal T}@f$ to the local matrix. As opposed
1493 * to the face matrices above, we need it in both assembly stages.
1494 *
1495 * @code
1496 *   for (unsigned int i = 0;
1497 *   i < scratch.fe_local_support_on_face[face_no].size();
1498 *   ++i)
1499 *   for (unsigned int j = 0;
1500 *   j < scratch.fe_local_support_on_face[face_no].size();
1501 *   ++j)
1502 *   {
1503 *   const unsigned int ii =
1504 *   scratch.fe_local_support_on_face[face_no][i];
1505 *   const unsigned int jj =
1506 *   scratch.fe_local_support_on_face[face_no][j];
1507 *   scratch.ll_matrix(ii, jj) +=
1508 *   tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1509 *   }
1510 *  
1511 * @endcode
1512 *
1513 * When @p trace_reconstruct=true, we are solving for the local
1514 * solutions on an element by element basis. The local
1515 * right-hand-side is calculated by replacing the basis functions @p
1516 * tr_phi in the @p lf_matrix computation by the computed values @p
1517 * trace_values. Of course, the sign of the matrix is now minus
1518 * since we have moved everything to the other side of the equation.
1519 *
1520 * @code
1521 *   if (task_data.trace_reconstruct)
1522 *   for (unsigned int i = 0;
1523 *   i < scratch.fe_local_support_on_face[face_no].size();
1524 *   ++i)
1525 *   {
1526 *   const unsigned int ii =
1527 *   scratch.fe_local_support_on_face[face_no][i];
1528 *   scratch.l_rhs(ii) -=
1529 *   (scratch.q_phi[i] * normal +
1530 *   scratch.u_phi[i] * (convection * normal - tau_stab)) *
1531 *   scratch.trace_values[q] * JxW;
1532 *   }
1533 *   }
1534 *   }
1535 *  
1536 * @endcode
1537 *
1538 * Once assembly of all of the local contributions is complete, we must
1539 * either: (1) assemble the global system, or (2) compute the local solution
1540 * values and save them. In either case, the first step is to invert the
1541 * local-local matrix.
1542 *
1543 * @code
1544 *   scratch.ll_matrix.gauss_jordan();
1545 *  
1546 * @endcode
1547 *
1548 * For (1), we compute the Schur complement and add it to the @p
1549 * cell_matrix, matrix @f$D@f$ in the introduction.
1550 *
1551 * @code
1552 *   if (task_data.trace_reconstruct == false)
1553 *   {
1554 *   scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1555 *   scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1556 *   scratch.tmp_matrix.mmult(task_data.cell_matrix,
1557 *   scratch.lf_matrix,
1558 *   true);
1559 *   cell->get_dof_indices(task_data.dof_indices);
1560 *   }
1561 * @endcode
1562 *
1563 * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1564 * Hence, we multiply @p l_rhs by our already inverted local-local matrix
1565 * and store the result using the <code>set_dof_values</code> function.
1566 *
1567 * @code
1568 *   else
1569 *   {
1570 *   scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1571 *   loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1572 *   }
1573 *   }
1574 *  
1575 *  
1576 *  
1577 * @endcode
1578 *
1579 *
1580 * <a name="step_51-HDGcopy_local_to_global"></a>
1581 * <h4>HDG::copy_local_to_global</h4>
1582 * If we are in the first step of the solution, i.e. @p trace_reconstruct=false,
1583 * then we assemble the local matrices into the global system.
1584 *
1585 * @code
1586 *   template <int dim>
1587 *   void HDG<dim>::copy_local_to_global(const PerTaskData &data)
1588 *   {
1589 *   if (data.trace_reconstruct == false)
1590 *   constraints.distribute_local_to_global(data.cell_matrix,
1591 *   data.cell_vector,
1592 *   data.dof_indices,
1593 *   system_matrix,
1594 *   system_rhs);
1595 *   }
1596 *  
1597 *  
1598 *  
1599 * @endcode
1600 *
1601 *
1602 * <a name="step_51-HDGsolve"></a>
1603 * <h4>HDG::solve</h4>
1604 * The skeleton solution is solved for by using a BiCGStab solver with
1605 * identity preconditioner.
1606 *
1607 * @code
1608 *   template <int dim>
1609 *   void HDG<dim>::solve()
1610 *   {
1611 *   SolverControl solver_control(system_matrix.m() * 10,
1612 *   1e-11 * system_rhs.l2_norm());
1613 *   SolverBicgstab<Vector<double>> solver(solver_control);
1614 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
1615 *  
1616 *   std::cout << " Number of BiCGStab iterations: "
1617 *   << solver_control.last_step() << std::endl;
1618 *  
1619 *   system_matrix.clear();
1620 *   sparsity_pattern.reinit(0, 0, 0, 1);
1621 *  
1622 *   constraints.distribute(solution);
1623 *  
1624 * @endcode
1625 *
1626 * Once we have solved for the skeleton solution,
1627 * we can solve for the local solutions in an element-by-element
1628 * fashion. We do this by re-using the same @p assemble_system function
1629 * but switching @p trace_reconstruct to true.
1630 *
1631 * @code
1632 *   assemble_system(true);
1633 *   }
1634 *  
1635 *  
1636 *  
1637 * @endcode
1638 *
1639 *
1640 * <a name="step_51-HDGpostprocess"></a>
1641 * <h4>HDG::postprocess</h4>
1642 *
1643
1644 *
1645 * The postprocess method serves two purposes. First, we want to construct a
1646 * post-processed scalar variables in the element space of degree @f$p+1@f$ that
1647 * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1648 * process and only involves the scalar solution as well as the gradient on
1649 * the local cell. To do this, we introduce the already defined scratch data
1650 * together with some update flags and run the work stream to do this in
1651 * parallel.
1652 *
1653
1654 *
1655 * Secondly, we want to compute discretization errors just as we did in
1656 * @ref step_7 "step-7". The overall procedure is similar with calls to
1657 * VectorTools::integrate_difference. The difference is in how we compute
1658 * the errors for the scalar variable and the gradient variable. In @ref step_7 "step-7",
1659 * we did this by computing @p L2_norm or @p H1_seminorm
1660 * contributions. Here, we have a DoFHandler with these two contributions
1661 * computed and sorted by their vector component, <code>[0, dim)</code> for
1662 * the
1663 * gradient and @p dim for the scalar. To compute their value, we hence use
1664 * a ComponentSelectFunction with either of them, together with the @p
1665 * SolutionAndGradient class introduced above that contains the analytic
1666 * parts of either of them. Eventually, we also compute the L2-error of the
1667 * post-processed solution and add the results into the convergence table.
1668 *
1669 * @code
1670 *   template <int dim>
1671 *   void HDG<dim>::postprocess()
1672 *   {
1673 *   {
1674 *   const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1675 *   const UpdateFlags local_flags(update_values);
1676 *   const UpdateFlags flags(update_values | update_gradients |
1677 *   update_JxW_values);
1678 *  
1679 *   PostProcessScratchData scratch(
1680 *   fe_u_post, fe_local, quadrature_formula, local_flags, flags);
1681 *  
1682 *   WorkStream::run(
1683 *   dof_handler_u_post.begin_active(),
1684 *   dof_handler_u_post.end(),
1685 *   [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
1686 *   PostProcessScratchData &scratch,
1687 *   unsigned int &data) {
1688 *   this->postprocess_one_cell(cell, scratch, data);
1689 *   },
1690 *   std::function<void(const unsigned int &)>(),
1691 *   scratch,
1692 *   0U);
1693 *   }
1694 *  
1695 *   Vector<float> difference_per_cell(triangulation.n_active_cells());
1696 *  
1697 *   ComponentSelectFunction<dim> value_select(dim, dim + 1);
1698 *   VectorTools::integrate_difference(dof_handler_local,
1699 *   solution_local,
1700 *   SolutionAndGradient<dim>(),
1701 *   difference_per_cell,
1702 *   QGauss<dim>(fe.degree + 2),
1704 *   &value_select);
1705 *   const double L2_error =
1707 *   difference_per_cell,
1709 *  
1710 *   ComponentSelectFunction<dim> gradient_select(
1711 *   std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1712 *   VectorTools::integrate_difference(dof_handler_local,
1713 *   solution_local,
1714 *   SolutionAndGradient<dim>(),
1715 *   difference_per_cell,
1716 *   QGauss<dim>(fe.degree + 2),
1718 *   &gradient_select);
1719 *   const double grad_error =
1721 *   difference_per_cell,
1723 *  
1724 *   VectorTools::integrate_difference(dof_handler_u_post,
1725 *   solution_u_post,
1726 *   Solution<dim>(),
1727 *   difference_per_cell,
1728 *   QGauss<dim>(fe.degree + 3),
1730 *   const double post_error =
1732 *   difference_per_cell,
1734 *  
1735 *   convergence_table.add_value("cells", triangulation.n_active_cells());
1736 *   convergence_table.add_value("dofs", dof_handler.n_dofs());
1737 *  
1738 *   convergence_table.add_value("val L2", L2_error);
1739 *   convergence_table.set_scientific("val L2", true);
1740 *   convergence_table.set_precision("val L2", 3);
1741 *  
1742 *   convergence_table.add_value("grad L2", grad_error);
1743 *   convergence_table.set_scientific("grad L2", true);
1744 *   convergence_table.set_precision("grad L2", 3);
1745 *  
1746 *   convergence_table.add_value("val L2-post", post_error);
1747 *   convergence_table.set_scientific("val L2-post", true);
1748 *   convergence_table.set_precision("val L2-post", 3);
1749 *   }
1750 *  
1751 *  
1752 *  
1753 * @endcode
1754 *
1755 *
1756 * <a name="step_51-HDGpostprocess_one_cell"></a>
1757 * <h4>HDG::postprocess_one_cell</h4>
1758 *
1759
1760 *
1761 * This is the actual work done for the postprocessing. According to the
1762 * discussion in the introduction, we need to set up a system that projects
1763 * the gradient part of the DG solution onto the gradient of the
1764 * post-processed variable. Moreover, we need to set the average of the new
1765 * post-processed variable to equal the average of the scalar DG solution
1766 * on the cell.
1767 *
1768
1769 *
1770 * More technically speaking, the projection of the gradient is a system
1771 * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1772 * matrix but is singular (the sum of all rows would be zero because the
1773 * constant function has zero gradient). Therefore, we take one row away and
1774 * use it for imposing the average of the scalar value. We pick the first
1775 * row for the scalar part, even though we could pick any row for @f$\mathcal
1776 * Q_{-p}@f$ elements. However, had we used FE_DGP elements instead, the first
1777 * row would correspond to the constant part already and deleting e.g. the
1778 * last row would give us a singular system. This way, our program can also
1779 * be used for those elements.
1780 *
1781 * @code
1782 *   template <int dim>
1783 *   void HDG<dim>::postprocess_one_cell(
1784 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1785 *   PostProcessScratchData &scratch,
1786 *   unsigned int &)
1787 *   {
1788 *   const typename DoFHandler<dim>::active_cell_iterator loc_cell =
1789 *   cell->as_dof_handler_iterator(dof_handler_local);
1790 *  
1791 *   scratch.fe_values_local.reinit(loc_cell);
1792 *   scratch.fe_values.reinit(cell);
1793 *  
1794 *   const FEValuesExtractors::Vector fluxes(0);
1795 *   const FEValuesExtractors::Scalar scalar(dim);
1796 *  
1797 *   const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1798 *   const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1799 *  
1800 *   scratch.fe_values_local[scalar].get_function_values(solution_local,
1801 *   scratch.u_values);
1802 *   scratch.fe_values_local[fluxes].get_function_values(solution_local,
1803 *   scratch.u_gradients);
1804 *  
1805 *   double sum = 0;
1806 *   for (unsigned int i = 1; i < dofs_per_cell; ++i)
1807 *   {
1808 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1809 *   {
1810 *   sum = 0;
1811 *   for (unsigned int q = 0; q < n_q_points; ++q)
1812 *   sum += (scratch.fe_values.shape_grad(i, q) *
1813 *   scratch.fe_values.shape_grad(j, q)) *
1814 *   scratch.fe_values.JxW(q);
1815 *   scratch.cell_matrix(i, j) = sum;
1816 *   }
1817 *  
1818 *   sum = 0;
1819 *   for (unsigned int q = 0; q < n_q_points; ++q)
1820 *   sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1821 *   scratch.fe_values.JxW(q);
1822 *   scratch.cell_rhs(i) = sum;
1823 *   }
1824 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1825 *   {
1826 *   sum = 0;
1827 *   for (unsigned int q = 0; q < n_q_points; ++q)
1828 *   sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1829 *   scratch.cell_matrix(0, j) = sum;
1830 *   }
1831 *   {
1832 *   sum = 0;
1833 *   for (unsigned int q = 0; q < n_q_points; ++q)
1834 *   sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1835 *   scratch.cell_rhs(0) = sum;
1836 *   }
1837 *  
1838 * @endcode
1839 *
1840 * Having assembled all terms, we can again go on and solve the linear
1841 * system. We invert the matrix and then multiply the inverse by the
1842 * right hand side. An alternative (and more numerically stable) method
1843 * would have been to only factorize the matrix and apply the factorization.
1844 *
1845 * @code
1846 *   scratch.cell_matrix.gauss_jordan();
1847 *   scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1848 *   cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1849 *   }
1850 *  
1851 *  
1852 *  
1853 * @endcode
1854 *
1855 *
1856 * <a name="step_51-HDGoutput_results"></a>
1857 * <h4>HDG::output_results</h4>
1858 * We have 3 sets of results that we would like to output: the local
1859 * solution, the post-processed local solution, and the skeleton solution. The
1860 * former 2 both 'live' on element volumes, whereas the latter lives on
1861 * codimension-1 surfaces
1862 * of the triangulation. Our @p output_results function writes all local solutions
1863 * to the same vtk file, even though they correspond to different
1864 * DoFHandler objects. The graphical output for the skeleton
1865 * variable is done through use of the DataOutFaces class.
1866 *
1867 * @code
1868 *   template <int dim>
1869 *   void HDG<dim>::output_results(const unsigned int cycle)
1870 *   {
1871 *   std::string filename;
1872 *   switch (refinement_mode)
1873 *   {
1874 *   case global_refinement:
1875 *   filename = "solution-global";
1876 *   break;
1877 *   case adaptive_refinement:
1878 *   filename = "solution-adaptive";
1879 *   break;
1880 *   default:
1882 *   }
1883 *  
1884 *   std::string face_out(filename);
1885 *   face_out += "-face";
1886 *  
1887 *   filename += "-q" + Utilities::int_to_string(fe.degree, 1);
1888 *   filename += "-" + Utilities::int_to_string(cycle, 2);
1889 *   filename += ".vtk";
1890 *   std::ofstream output(filename);
1891 *  
1892 *   DataOut<dim> data_out;
1893 *  
1894 * @endcode
1895 *
1896 * We first define the names and types of the local solution,
1897 * and add the data to @p data_out.
1898 *
1899 * @code
1900 *   std::vector<std::string> names(dim, "gradient");
1901 *   names.emplace_back("solution");
1902 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1903 *   component_interpretation(
1905 *   component_interpretation[dim] =
1907 *   data_out.add_data_vector(dof_handler_local,
1908 *   solution_local,
1909 *   names,
1910 *   component_interpretation);
1911 *  
1912 * @endcode
1913 *
1914 * The second data item we add is the post-processed solution.
1915 * In this case, it is a single scalar variable belonging to
1916 * a different DoFHandler.
1917 *
1918 * @code
1919 *   std::vector<std::string> post_name(1, "u_post");
1920 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1922 *   data_out.add_data_vector(dof_handler_u_post,
1923 *   solution_u_post,
1924 *   post_name,
1925 *   post_comp_type);
1926 *  
1927 *   data_out.build_patches(fe.degree);
1928 *   data_out.write_vtk(output);
1929 *  
1930 *   face_out += "-q" + Utilities::int_to_string(fe.degree, 1);
1931 *   face_out += "-" + Utilities::int_to_string(cycle, 2);
1932 *   face_out += ".vtk";
1933 *   std::ofstream face_output(face_out);
1934 *  
1935 * @endcode
1936 *
1937 * The <code>DataOutFaces</code> class works analogously to the
1938 * <code>DataOut</code> class when we have a <code>DoFHandler</code> that
1939 * defines the solution on the skeleton of the triangulation. We treat it
1940 * as such here, and the code is similar to that above.
1941 *
1942 * @code
1943 *   DataOutFaces<dim> data_out_face(false);
1944 *   std::vector<std::string> face_name(1, "u_hat");
1945 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1946 *   face_component_type(1, DataComponentInterpretation::component_is_scalar);
1947 *  
1948 *   data_out_face.add_data_vector(dof_handler,
1949 *   solution,
1950 *   face_name,
1951 *   face_component_type);
1952 *  
1953 *   data_out_face.build_patches(fe.degree);
1954 *   data_out_face.write_vtk(face_output);
1955 *   }
1956 *  
1957 * @endcode
1958 *
1959 *
1960 * <a name="step_51-HDGrefine_grid"></a>
1961 * <h4>HDG::refine_grid</h4>
1962 *
1963
1964 *
1965 * We implement two different refinement cases for HDG, just as in
1966 * <code>@ref step_7 "step-7"</code>: adaptive_refinement and global_refinement. The
1967 * global_refinement option recreates the entire triangulation every
1968 * time. This is because we want to use a finer sequence of meshes than what
1969 * we would get with one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1970 * elements per direction.
1971 *
1972
1973 *
1974 * The adaptive_refinement mode uses the <code>KellyErrorEstimator</code> to
1975 * give a decent indication of the non-regular regions in the scalar local
1976 * solutions.
1977 *
1978 * @code
1979 *   template <int dim>
1980 *   void HDG<dim>::refine_grid(const unsigned int cycle)
1981 *   {
1982 *   if (cycle == 0)
1983 *   {
1985 *   triangulation.refine_global(3 - dim);
1986 *   }
1987 *   else
1988 *   switch (refinement_mode)
1989 *   {
1990 *   case global_refinement:
1991 *   {
1992 *   triangulation.clear();
1994 *   2 + (cycle % 2),
1995 *   -1,
1996 *   1);
1997 *   triangulation.refine_global(3 - dim + cycle / 2);
1998 *   break;
1999 *   }
2000 *  
2001 *   case adaptive_refinement:
2002 *   {
2003 *   Vector<float> estimated_error_per_cell(
2004 *   triangulation.n_active_cells());
2005 *  
2006 *   const FEValuesExtractors::Scalar scalar(dim);
2007 *   std::map<types::boundary_id, const Function<dim> *>
2008 *   neumann_boundary;
2009 *   KellyErrorEstimator<dim>::estimate(dof_handler_local,
2010 *   QGauss<dim - 1>(fe.degree + 1),
2011 *   neumann_boundary,
2012 *   solution_local,
2013 *   estimated_error_per_cell,
2014 *   fe_local.component_mask(
2015 *   scalar));
2016 *  
2018 *   triangulation, estimated_error_per_cell, 0.3, 0.);
2019 *  
2020 *   triangulation.execute_coarsening_and_refinement();
2021 *  
2022 *   break;
2023 *   }
2024 *  
2025 *   default:
2026 *   {
2028 *   }
2029 *   }
2030 *  
2031 * @endcode
2032 *
2033 * Just as in @ref step_7 "step-7", we set the boundary indicator of two of the faces to 1
2034 * where we want to specify Neumann boundary conditions instead of Dirichlet
2035 * conditions. Since we re-create the triangulation every time for global
2036 * refinement, the flags are set in every refinement step, not just at the
2037 * beginning.
2038 *
2039 * @code
2040 *   for (const auto &cell : triangulation.cell_iterators())
2041 *   for (const auto &face : cell->face_iterators())
2042 *   if (face->at_boundary())
2043 *   if ((std::fabs(face->center()[0] - (-1)) < 1e-12) ||
2044 *   (std::fabs(face->center()[1] - (-1)) < 1e-12))
2045 *   face->set_boundary_id(1);
2046 *   }
2047 *  
2048 * @endcode
2049 *
2050 *
2051 * <a name="step_51-HDGrun"></a>
2052 * <h4>HDG::run</h4>
2053 * The functionality here is basically the same as <code>@ref step_7 "step-7"</code>.
2054 * We loop over 10 cycles, refining the grid on each one. At the end,
2055 * convergence tables are created.
2056 *
2057 * @code
2058 *   template <int dim>
2059 *   void HDG<dim>::run()
2060 *   {
2061 *   for (unsigned int cycle = 0; cycle < 10; ++cycle)
2062 *   {
2063 *   std::cout << "Cycle " << cycle << ':' << std::endl;
2064 *  
2065 *   refine_grid(cycle);
2066 *   setup_system();
2067 *   assemble_system(false);
2068 *   solve();
2069 *   postprocess();
2070 *   output_results(cycle);
2071 *   }
2072 *  
2073 * @endcode
2074 *
2075 * There is one minor change for the convergence table compared to @ref step_7 "step-7":
2076 * Since we did not refine our mesh by a factor two in each cycle (but
2077 * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2078 * convergence rate evaluation about this. We do this by setting the
2079 * number of cells as a reference column and additionally specifying the
2080 * dimension of the problem, which gives the necessary information for the
2081 * relation between number of cells and mesh size.
2082 *
2083 * @code
2084 *   if (refinement_mode == global_refinement)
2085 *   {
2086 *   convergence_table.evaluate_convergence_rates(
2087 *   "val L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2088 *   convergence_table.evaluate_convergence_rates(
2089 *   "grad L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2090 *   convergence_table.evaluate_convergence_rates(
2091 *   "val L2-post", "cells", ConvergenceTable::reduction_rate_log2, dim);
2092 *   }
2093 *   convergence_table.write_text(std::cout);
2094 *   }
2095 *  
2096 *   } // end of namespace Step51
2097 *  
2098 *  
2099 *  
2100 *   int main()
2101 *   {
2102 *   const unsigned int dim = 2;
2103 *  
2104 *   try
2105 *   {
2106 * @endcode
2107 *
2108 * Now for the three calls to the main class in complete analogy to
2109 * @ref step_7 "step-7".
2110 *
2111 * @code
2112 *   {
2113 *   std::cout << "Solving with Q1 elements, adaptive refinement"
2114 *   << std::endl
2115 *   << "============================================="
2116 *   << std::endl
2117 *   << std::endl;
2118 *  
2119 *   Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2120 *   hdg_problem.run();
2121 *  
2122 *   std::cout << std::endl;
2123 *   }
2124 *  
2125 *   {
2126 *   std::cout << "Solving with Q1 elements, global refinement" << std::endl
2127 *   << "===========================================" << std::endl
2128 *   << std::endl;
2129 *  
2130 *   Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2131 *   hdg_problem.run();
2132 *  
2133 *   std::cout << std::endl;
2134 *   }
2135 *  
2136 *   {
2137 *   std::cout << "Solving with Q3 elements, global refinement" << std::endl
2138 *   << "===========================================" << std::endl
2139 *   << std::endl;
2140 *  
2141 *   Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2142 *   hdg_problem.run();
2143 *  
2144 *   std::cout << std::endl;
2145 *   }
2146 *   }
2147 *   catch (std::exception &exc)
2148 *   {
2149 *   std::cerr << std::endl
2150 *   << std::endl
2151 *   << "----------------------------------------------------"
2152 *   << std::endl;
2153 *   std::cerr << "Exception on processing: " << std::endl
2154 *   << exc.what() << std::endl
2155 *   << "Aborting!" << std::endl
2156 *   << "----------------------------------------------------"
2157 *   << std::endl;
2158 *   return 1;
2159 *   }
2160 *   catch (...)
2161 *   {
2162 *   std::cerr << std::endl
2163 *   << std::endl
2164 *   << "----------------------------------------------------"
2165 *   << std::endl;
2166 *   std::cerr << "Unknown exception!" << std::endl
2167 *   << "Aborting!" << std::endl
2168 *   << "----------------------------------------------------"
2169 *   << std::endl;
2170 *   return 1;
2171 *   }
2172 *  
2173 *   return 0;
2174 *   }
2175 * @endcode
2176<a name="step_51-Results"></a><h1>Results</h1>
2177
2178
2179<a name="step_51-Programoutput"></a><h3>Program output</h3>
2180
2181
2182We first have a look at the output generated by the program when run in 2D. In
2183the four images below, we show the solution for polynomial degree @f$p=1@f$
2184and cycles 2, 3, 4, and 8 of the program. In the plots, we overlay the data
2185generated from the internal data (DG part) with the skeleton part (@f$\hat{u}@f$)
2186into the same plot. We had to generate two different data sets because cells
2187and faces represent different geometric entities, the combination of which (in
2188the same file) is not supported in the VTK output of deal.II.
2189
2190The images show the distinctive features of HDG: The cell solution (colored
2191surfaces) is discontinuous between the cells. The solution on the skeleton
2192variable sits on the faces and ties together the local parts. The skeleton
2193solution is not continuous on the vertices where the faces meet, even though
2194its values are quite close along lines in the same coordinate direction. The
2195skeleton solution can be interpreted as a rubber spring between the two sides
2196that balances the jumps in the solution (or rather, the flux @f$\kappa \nabla u
2197+ \mathbf{c} u@f$). From the picture at the top left, it is clear that
2198the bulk solution frequently over- and undershoots and that the
2199skeleton variable in indeed a better approximation to the exact
2200solution; this explains why we can get a better solution using a
2201postprocessing step.
2202
2203As the mesh is refined, the jumps between the cells get
2204small (we represent a smooth solution), and the skeleton solution approaches
2205the interior parts. For cycle 8, there is no visible difference in the two
2206variables. We also see how boundary conditions are implemented weakly and that
2207the interior variables do not exactly satisfy boundary conditions. On the
2208lower and left boundaries, we set Neumann boundary conditions, whereas we set
2209Dirichlet conditions on the right and top boundaries.
2210
2211<table align="center">
2212 <tr>
2213 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=""></td>
2214 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=""></td>
2215 </tr>
2216 <tr>
2217 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=""></td>
2218 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=""></td>
2219 </tr>
2220</table>
2221
2222Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2223and 8. This is a discontinuous solution that is locally described by second
2224order polynomials. While the solution does not look very good on the mesh of
2225cycle two, it looks much better for cycles three and four. As shown by the
2226convergence table below, we find that is also converges more quickly to the
2227analytical solution.
2228
2229<table align="center">
2230 <tr>
2231 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=""></td>
2232 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=""></td>
2233 </tr>
2234 <tr>
2235 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=""></td>
2236 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=""></td>
2237 </tr>
2238</table>
2239
2240Finally, we look at the solution for @f$p=3@f$ at cycle 2. Despite the coarse
2241mesh with only 64 cells, the post-processed solution is similar in quality
2242to the linear solution (not post-processed) at cycle 8 with 4,096
2243cells. This clearly shows the superiority of high order methods for smooth
2244solutions.
2245
2246<table align="center">
2247 <tr>
2248 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=""></td>
2249 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=""></td>
2250 </tr>
2251</table>
2252
2253<a name="step_51-Convergencetables"></a><h4>Convergence tables</h4>
2254
2255
2256When the program is run, it also outputs information about the respective
2257steps and convergence tables with errors in the various components in the
2258end. In 2D, the convergence tables look the following:
2259
2260@code
2261Q1 elements, adaptive refinement:
2262cells dofs val L2 grad L2 val L2-post
2263 16 80 1.804e+01 2.207e+01 1.798e+01
2264 31 170 9.874e+00 1.322e+01 9.798e+00
2265 61 314 7.452e-01 3.793e+00 4.891e-01
2266 121 634 3.240e-01 1.511e+00 2.616e-01
2267 238 1198 8.585e-02 8.212e-01 1.808e-02
2268 454 2290 4.802e-02 5.178e-01 2.195e-02
2269 898 4378 2.561e-02 2.947e-01 4.318e-03
2270 1720 7864 1.306e-02 1.664e-01 2.978e-03
2271 3271 14638 7.025e-03 9.815e-02 1.075e-03
2272 6217 27214 4.119e-03 6.407e-02 9.975e-04
2273
2274Q1 elements, global refinement:
2275cells dofs val L2 grad L2 val L2-post
2276 16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2277 36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2278 64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2279 144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2280 256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2281 576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2282 1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2283 2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2284 4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2285 9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2286
2287Q3 elements, global refinement:
2288cells dofs val L2 grad L2 val L2-post
2289 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2290 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2291 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2292 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2293 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2294 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2295 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2296 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2297 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2298 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2299@endcode
2300
2301
2302One can see the error reduction upon grid refinement, and for the cases where
2303global refinement was performed, also the convergence rates. The quadratic
2304convergence rates of Q1 elements in the @f$L_2@f$ norm for both the scalar
2305variable and the gradient variable is apparent, as is the cubic rate for the
2306postprocessed scalar variable in the @f$L_2@f$ norm. Note this distinctive
2307feature of an HDG solution. In typical continuous finite elements, the
2308gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2309opposed to @f$p+1@f$ for the actual solution. Even though superconvergence
2310results for finite elements are also available (e.g. superconvergent patch
2311recovery first introduced by Zienkiewicz and Zhu), these are typically limited
2312to structured meshes and other special cases. For Q3 HDG variables, the scalar
2313variable and gradient converge at fourth order and the postprocessed scalar
2314variable at fifth order.
2315
2316The same convergence rates are observed in 3d.
2317@code
2318Q1 elements, adaptive refinement:
2319cells dofs val L2 grad L2 val L2-post
2320 8 144 7.122e+00 1.941e+01 6.102e+00
2321 29 500 3.309e+00 1.023e+01 2.145e+00
2322 113 1792 2.204e+00 1.023e+01 1.912e+00
2323 379 5732 6.085e-01 5.008e+00 2.233e-01
2324 1317 19412 1.543e-01 1.464e+00 4.196e-02
2325 4579 64768 5.058e-02 5.611e-01 9.521e-03
2326 14596 199552 2.129e-02 3.122e-01 4.569e-03
2327 46180 611400 1.033e-02 1.622e-01 1.684e-03
2328144859 1864212 5.007e-03 8.371e-02 7.364e-04
2329451060 5684508 2.518e-03 4.562e-02 3.070e-04
2330
2331Q1 elements, global refinement:
2332cells dofs val L2 grad L2 val L2-post
2333 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2334 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2335 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2336 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2337 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2338 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2339 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2340 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2341 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2342110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2343
2344Q3 elements, global refinement:
2345cells dofs val L2 grad L2 val L2-post
2346 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2347 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2348 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2349 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2350 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2351 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2352 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2353 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2354 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2355110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2356@endcode
2357
2358<a name="step_51-Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2359
2360
2361<a name="step_51-Resultsfor2D"></a><h4>Results for 2D</h4>
2362
2363
2364The convergence tables verify the expected convergence rates stated in the
2365introduction. Now, we want to show a quick comparison of the computational
2366efficiency of the HDG method compared to a usual finite element (continuous
2367Galkerin) method on the problem of this tutorial. Of course, stability aspects
2368of the HDG method compared to continuous finite elements for
2369transport-dominated problems are also important in practice, which is an
2370aspect not seen on a problem with smooth analytic solution. In the picture
2371below, we compare the @f$L_2@f$ error as a function of the number of degrees of
2372freedom (left) and of the computing time spent in the linear solver (right)
2373for two space dimensions of continuous finite elements (CG) and the hybridized
2374discontinuous Galerkin method presented in this tutorial. As opposed to the
2375tutorial where we only use unpreconditioned BiCGStab, the times shown in the
2376figures below use the Trilinos algebraic multigrid preconditioner in
2377TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around
2378ChunkSparseMatrix for the trace variable has been used in order to utilize the
2379block structure in the matrix on the finest level.
2380
2381<table align="center">
2382 <tr>
2383 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width="400" alt=""></td>
2384 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width="400" alt=""></td>
2385 </tr>
2386</table>
2387
2388The results in the graphs show that the HDG method is slower than continuous
2389finite elements at @f$p=1@f$, about equally fast for cubic elements and
2390faster for sixth order elements. However, we have seen above that the HDG
2391method actually produces solutions which are more accurate than what is
2392represented in the original variables. Therefore, in the next two plots below
2393we instead display the error of the post-processed solution for HDG (denoted
2394by @f$p=1^*@f$ for example). We now see a clear advantage of HDG for the same
2395amount of work for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2396for @f$p=1@f$.
2397
2398<table align="center">
2399 <tr>
2400 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width="400" alt=""></td>
2401 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width="400" alt=""></td>
2402 </tr>
2403</table>
2404
2405Since the HDG method actually produces results converging as
2406@f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2407solution with the same asymptotic convergence behavior, i.e., FE_Q with degree
2408@f$p+1@f$. If we do this, we get the convergence curves below. We see that
2409CG with second order polynomials is again clearly better than HDG with
2410linears. However, the advantage of HDG for higher orders remains.
2411
2412<table align="center">
2413 <tr>
2414 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width="400" alt=""></td>
2415 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width="400" alt=""></td>
2416 </tr>
2417</table>
2418
2419The results are in line with properties of DG methods in general: Best
2420performance is typically not achieved for linear elements, but rather at
2421somewhat higher order, usually around @f$p=3@f$. This is because of a
2422volume-to-surface effect for discontinuous solutions with too much of the
2423solution living on the surfaces and hence duplicating work when the elements
2424are linear. Put in other words, DG methods are often most efficient when used
2425at relatively high order, despite their focus on a discontinuous (and hence,
2426seemingly low accurate) representation of solutions.
2427
2428<a name="step_51-Resultsfor3D"></a><h4>Results for 3D</h4>
2429
2430
2431We now show the same figures in 3D: The first row shows the number of degrees
2432of freedom and computing time versus the @f$L_2@f$ error in the scalar variable
2433@f$u@f$ for CG and HDG at order @f$p@f$, the second row shows the
2434post-processed HDG solution instead of the original one, and the third row
2435compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2436the volume-to-surface effect makes the cost of HDG somewhat higher and the CG
2437solution is clearly better than HDG for linears by any metric. For cubics, HDG
2438and CG are of similar quality, whereas HDG is again more efficient for sixth
2439order polynomials. One can alternatively also use the combination of FE_DGP
2440and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product
2441polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2442degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2443for FE_FaceP for a given mesh size, but the solution quality (error vs. number
2444of DoFs) is very similar to the results for FE_FaceQ.
2445
2446<table align="center">
2447 <tr>
2448 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width="400" alt=""></td>
2449 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width="400" alt=""></td>
2450 </tr>
2451 <tr>
2452 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width="400" alt=""></td>
2453 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width="400" alt=""></td>
2454 </tr>
2455 <tr>
2456 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width="400" alt=""></td>
2457 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width="400" alt=""></td>
2458 </tr>
2459</table>
2460
2461One final note on the efficiency comparison: We tried to use general-purpose
2462sparse matrix structures and similar solvers (optimal AMG preconditioners for
2463both without particular tuning of the AMG parameters on any of them) to give a
2464fair picture of the cost versus accuracy of two methods, on a toy example. It
2465should be noted however that geometric multigrid (GMG) for continuous finite
2466elements is about a factor four to five faster for @f$p=3@f$ and @f$p=6@f$. As of
24672019, optimal-complexity iterative solvers for HDG are still under development
2468in the research community. Also, there are other implementation aspects for CG
2469available such as fast matrix-free approaches as shown in @ref step_37 "step-37" that make
2470higher order continuous elements more competitive. Again, it is not clear to
2471the authors of the tutorial whether similar improvements could be made for
2472HDG. We refer to <a href="https://dx.doi.org/10.1137/16M110455X">Kronbichler
2473and Wall (2018)</a> for a recent efficiency evaluation.
2474
2475
2476<a name="step_51-Possibilitiesforimprovements"></a><h3>Possibilities for improvements</h3>
2477
2478
2479As already mentioned in the introduction, one possibility is to implement
2480another post-processing technique as discussed in the literature.
2481
2482A second item that is not done optimally relates to the performance of this
2483program, which is of course an issue in practical applications (weighing in
2484also the better solution quality of (H)DG methods for transport-dominated
2485problems). Let us look at
2486the computing time of the tutorial program and the share of the individual
2487components:
2488
2489<table align="center" class="doxtable">
2490 <tr>
2491 <th>&nbsp;</th>
2492 <th>&nbsp;</th>
2493 <th>Setup</th>
2494 <th>Assemble</th>
2495 <th>Solve</th>
2496 <th>Trace reconstruct</th>
2497 <th>Post-processing</th>
2498 <th>Output</th>
2499 </tr>
2500 <tr>
2501 <th>&nbsp;</th>
2502 <th>Total time</th>
2503 <th colspan="6">Relative share</th>
2504 </tr>
2505 <tr>
2506 <td align="left">2D, Q1, cycle 9, 37,248 dofs</td>
2507 <td align="center">5.34s</td>
2508 <td align="center">0.7%</td>
2509 <td align="center">1.2%</td>
2510 <td align="center">89.5%</td>
2511 <td align="center">0.9%</td>
2512 <td align="center">2.3%</td>
2513 <td align="center">5.4%</td>
2514 </tr>
2515 <tr>
2516 <td align="left">2D, Q3, cycle 9, 74,496 dofs</td>
2517 <td align="center">22.2s</td>
2518 <td align="center">0.4%</td>
2519 <td align="center">4.3%</td>
2520 <td align="center">84.1%</td>
2521 <td align="center">4.1%</td>
2522 <td align="center">3.5%</td>
2523 <td align="center">3.6%</td>
2524 </tr>
2525 <tr>
2526 <td align="left">3D, Q1, cycle 7, 172,800 dofs</td>
2527 <td align="center">9.06s</td>
2528 <td align="center">3.1%</td>
2529 <td align="center">8.9%</td>
2530 <td align="center">42.7%</td>
2531 <td align="center">7.0%</td>
2532 <td align="center">20.6%</td>
2533 <td align="center">17.7%</td>
2534 </tr>
2535 <tr>
2536 <td align="left">3D, Q3, cycle 7, 691,200 dofs</td>
2537 <td align="center">516s</td>
2538 <td align="center">0.6%</td>
2539 <td align="center">34.5%</td>
2540 <td align="center">13.4%</td>
2541 <td align="center">32.8%</td>
2542 <td align="center">17.1%</td>
2543 <td align="center">1.5%</td>
2544 </tr>
2545</table>
2546
2547As can be seen from the table, the solver and assembly calls dominate the
2548runtime of the program. This also gives a clear indication of where
2549improvements would make the most sense:
2550
2551<ol>
2552 <li> Better linear solvers: We use a BiCGStab iterative solver without
2553 preconditioner, where the number of iteration increases with increasing
2554 problem size (the number of iterations for Q1 elements and global
2555 refinements starts at 35 for the small sizes but increase up to 701 for the
2556 largest size). To do better, one could for example use an algebraic
2557 multigrid preconditioner from Trilinos, or some more advanced variants as
2558 the one discussed in <a
2559 href="https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2560 (2018)</a>. For diffusion-dominated problems such as the problem at hand
2561 with finer meshes, such a solver can be designed that uses the matrix-vector
2562 products from the more efficient ChunkSparseMatrix on the finest level, as
2563 long as we are not working in parallel with MPI. For MPI-parallelized
2564 computations, a standard TrilinosWrappers::SparseMatrix can be used.
2565
2566 <li> Speed up assembly by pre-assembling parts that do not change from one
2567 cell to another (those that do neither contain variable coefficients nor
2568 mapping-dependent terms).
2569</ol>
2570 *
2571 *
2572<a name="step_51-PlainProg"></a>
2573<h1> The plain program</h1>
2574@include "step-51.cc"
2575*/
Definition fe_q.h:554
void mmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Definition point.h:111
virtual value_type value(const Point< dim > &p) const
constexpr void clear()
Point< 3 > center
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
UpdateFlags
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
Definition l2.h:159
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
double compute_global_error(const Triangulation< dim, spacedim > &tria, const InVector &cellwise_error, const NormType &norm, const double exponent=2.)
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
void project_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &boundary_functions, const Quadrature< dim - 1 > &q, std::map< types::global_dof_index, number > &boundary_values, std::vector< unsigned int > component_mapping={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void set_dof_values(const DoFCellAccessor< dim, spacedim, lda > &cell, const Vector< number > &local_values, OutputVector &values, const bool perform_check)
static constexpr double PI
Definition numbers.h:254
STL namespace.
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)