Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-51.h
Go to the documentation of this file.
1 = 0) const override
596 *   {
597 *   double sum = 0;
598 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
599 *   {
600 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
601 *   sum +=
602 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width));
603 *   }
604 *  
605 *   return sum /
606 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
607 *   }
608 *  
609 *   virtual Tensor<1, dim>
610 *   gradient(const Point<dim> &p,
611 *   const unsigned int /*component*/ = 0) const override
612 *   {
613 *   Tensor<1, dim> sum;
614 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
615 *   {
616 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
617 *  
618 *   sum +=
619 *   (-2 / (this->width * this->width) *
620 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width)) *
621 *   x_minus_xi);
622 *   }
623 *  
624 *   return sum /
625 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
626 *   }
627 *   };
628 *  
629 *  
630 *  
631 * @endcode
632 *
633 * This class implements a function where the scalar solution and its negative
634 * gradient are collected together. This function is used when computing the
635 * error of the HDG approximation and its implementation is to simply call
636 * value and gradient function of the Solution class.
637 *
638 * @code
639 *   template <int dim>
640 *   class SolutionAndGradient : public Function<dim>, protected SolutionBase<dim>
641 *   {
642 *   public:
643 *   SolutionAndGradient()
644 *   : Function<dim>(dim + 1)
645 *   {}
646 *  
647 *   virtual void vector_value(const Point<dim> &p,
648 *   Vector<double> & v) const override
649 *   {
650 *   AssertDimension(v.size(), dim + 1);
651 *   Solution<dim> solution;
652 *   Tensor<1, dim> grad = solution.gradient(p);
653 *   for (unsigned int d = 0; d < dim; ++d)
654 *   v[d] = -grad[d];
655 *   v[dim] = solution.value(p);
656 *   }
657 *   };
658 *  
659 *  
660 *  
661 * @endcode
662 *
663 * Next comes the implementation of the convection velocity. As described in
664 * the introduction, we choose a velocity field that is @f$(y, -x)@f$ in 2d and
665 * @f$(y, -x, 1)@f$ in 3d. This gives a divergence-free velocity field.
666 *
667 * @code
668 *   template <int dim>
669 *   class ConvectionVelocity : public TensorFunction<1, dim>
670 *   {
671 *   public:
672 *   ConvectionVelocity()
674 *   {}
675 *  
676 *   virtual Tensor<1, dim> value(const Point<dim> &p) const override
677 *   {
678 *   Tensor<1, dim> convection;
679 *   switch (dim)
680 *   {
681 *   case 1:
682 *   convection[0] = 1;
683 *   break;
684 *   case 2:
685 *   convection[0] = p[1];
686 *   convection[1] = -p[0];
687 *   break;
688 *   case 3:
689 *   convection[0] = p[1];
690 *   convection[1] = -p[0];
691 *   convection[2] = 1;
692 *   break;
693 *   default:
694 *   Assert(false, ExcNotImplemented());
695 *   }
696 *   return convection;
697 *   }
698 *   };
699 *  
700 *  
701 *  
702 * @endcode
703 *
704 * The last function we implement is the right hand side for the
705 * manufactured solution. It is very similar to @ref step_7 "step-7", with the exception
706 * that we now have a convection term instead of the reaction term. Since
707 * the velocity field is incompressible, i.e., @f$\nabla \cdot \mathbf{c} =
708 * 0@f$, the advection term simply reads @f$\mathbf{c} \nabla u@f$.
709 *
710 * @code
711 *   template <int dim>
712 *   class RightHandSide : public Function<dim>, protected SolutionBase<dim>
713 *   {
714 *   public:
715 *   virtual double value(const Point<dim> &p,
716 *   const unsigned int /*component*/ = 0) const override
717 *   {
718 *   ConvectionVelocity<dim> convection_velocity;
719 *   Tensor<1, dim> convection = convection_velocity.value(p);
720 *   double sum = 0;
721 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
722 *   {
723 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
724 *  
725 *   sum +=
726 *   ((2 * dim - 2 * convection * x_minus_xi -
727 *   4 * x_minus_xi.norm_square() / (this->width * this->width)) /
728 *   (this->width * this->width) *
729 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width)));
730 *   }
731 *  
732 *   return sum /
733 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
734 *   }
735 *   };
736 *  
737 *  
738 *  
739 * @endcode
740 *
741 *
742 * <a name="TheHDGsolverclass"></a>
743 * <h3>The HDG solver class</h3>
744 *
745
746 *
747 * The HDG solution procedure follows closely that of @ref step_7 "step-7". The major
748 * difference is the use of three different sets of DoFHandler and FE
749 * objects, along with the ChunkSparseMatrix and the corresponding solutions
750 * vectors. We also use WorkStream to enable a multithreaded local solution
751 * process which exploits the embarrassingly parallel nature of the local
752 * solver. For WorkStream, we define the local operations on a cell and a
753 * copy function into the global matrix and vector. We do this both for the
754 * assembly (which is run twice, once when we generate the system matrix and
755 * once when we compute the element-interior solutions from the skeleton
756 * values) and for the postprocessing where we extract a solution that
757 * converges at higher order.
758 *
759 * @code
760 *   template <int dim>
761 *   class HDG
762 *   {
763 *   public:
764 *   enum RefinementMode
765 *   {
766 *   global_refinement,
767 *   adaptive_refinement
768 *   };
769 *  
770 *   HDG(const unsigned int degree, const RefinementMode refinement_mode);
771 *   void run();
772 *  
773 *   private:
774 *   void setup_system();
775 *   void assemble_system(const bool reconstruct_trace = false);
776 *   void solve();
777 *   void postprocess();
778 *   void refine_grid(const unsigned int cycle);
779 *   void output_results(const unsigned int cycle);
780 *  
781 * @endcode
782 *
783 * Data for the assembly and solution of the primal variables.
784 *
785 * @code
786 *   struct PerTaskData;
787 *   struct ScratchData;
788 *  
789 * @endcode
790 *
791 * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
792 * procedure; as such, we do not need to assemble any global data and do
793 * not declare any 'task data' for WorkStream to use.
794 *
795 * @code
796 *   struct PostProcessScratchData;
797 *  
798 * @endcode
799 *
800 * The following three functions are used by WorkStream to do the actual
801 * work of the program.
802 *
803 * @code
804 *   void assemble_system_one_cell(
805 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
806 *   ScratchData & scratch,
807 *   PerTaskData & task_data);
808 *  
809 *   void copy_local_to_global(const PerTaskData &data);
810 *  
811 *   void postprocess_one_cell(
812 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
813 *   PostProcessScratchData & scratch,
814 *   unsigned int & empty_data);
815 *  
816 *  
818 *  
819 * @endcode
820 *
821 * The 'local' solutions are interior to each element. These
822 * represent the primal solution field @f$u@f$ as well as the auxiliary
823 * field @f$\mathbf{q}@f$.
824 *
825 * @code
826 *   FESystem<dim> fe_local;
827 *   DoFHandler<dim> dof_handler_local;
828 *   Vector<double> solution_local;
829 *  
830 * @endcode
831 *
832 * The new finite element type and corresponding <code>DoFHandler</code> are
833 * used for the global skeleton solution that couples the element-level
834 * local solutions.
835 *
836 * @code
837 *   FE_FaceQ<dim> fe;
838 *   DoFHandler<dim> dof_handler;
839 *   Vector<double> solution;
840 *   Vector<double> system_rhs;
841 *  
842 * @endcode
843 *
844 * As stated in the introduction, HDG solutions can be post-processed to
845 * attain superconvergence rates of @f$\mathcal{O}(h^{p+2})@f$. The
846 * post-processed solution is a discontinuous finite element solution
847 * representing the primal variable on the interior of each cell. We define
848 * a FE type of degree @f$p+1@f$ to represent this post-processed solution,
849 * which we only use for output after constructing it.
850 *
851 * @code
852 *   FE_DGQ<dim> fe_u_post;
853 *   DoFHandler<dim> dof_handler_u_post;
854 *   Vector<double> solution_u_post;
855 *  
856 * @endcode
857 *
858 * The degrees of freedom corresponding to the skeleton strongly enforce
859 * Dirichlet boundary conditions, just as in a continuous Galerkin finite
860 * element method. We can enforce the boundary conditions in an analogous
861 * manner via an AffineConstraints object. In addition, hanging nodes are
862 * handled in the same way as for continuous finite elements: For the face
863 * elements which only define degrees of freedom on the face, this process
864 * sets the solution on the refined side to coincide with the
865 * representation on the coarse side.
866 *
867
868 *
869 * Note that for HDG, the elimination of hanging nodes is not the only
870 * possibility &mdash; in terms of the HDG theory, one could also use the
871 * unknowns from the refined side and express the local solution on the
872 * coarse side through the trace values on the refined side. However, such
873 * a setup is not as easily implemented in terms of deal.II loops and not
874 * further analyzed.
875 *
876 * @code
877 *   AffineConstraints<double> constraints;
878 *  
879 * @endcode
880 *
881 * The usage of the ChunkSparseMatrix class is similar to the usual sparse
882 * matrices: You need a sparsity pattern of type ChunkSparsityPattern and
883 * the actual matrix object. When creating the sparsity pattern, we just
884 * have to additionally pass the size of local blocks.
885 *
886 * @code
887 *   ChunkSparsityPattern sparsity_pattern;
888 *   ChunkSparseMatrix<double> system_matrix;
889 *  
890 * @endcode
891 *
892 * Same as @ref step_7 "step-7":
893 *
894 * @code
895 *   const RefinementMode refinement_mode;
896 *   ConvergenceTable convergence_table;
897 *   };
898 *  
899 * @endcode
900 *
901 *
902 * <a name="TheHDGclassimplementation"></a>
903 * <h3>The HDG class implementation</h3>
904 *
905
906 *
907 *
908 * <a name="Constructor"></a>
909 * <h4>Constructor</h4>
910 * The constructor is similar to those in other examples, with the exception
911 * of handling multiple DoFHandler and FiniteElement objects. Note that we
912 * create a system of finite elements for the local DG part, including the
913 * gradient/flux part and the scalar part.
914 *
915 * @code
916 *   template <int dim>
917 *   HDG<dim>::HDG(const unsigned int degree, const RefinementMode refinement_mode)
918 *   : fe_local(FE_DGQ<dim>(degree), dim, FE_DGQ<dim>(degree), 1)
919 *   , dof_handler_local(triangulation)
920 *   , fe(degree)
921 *   , dof_handler(triangulation)
922 *   , fe_u_post(degree + 1)
923 *   , dof_handler_u_post(triangulation)
924 *   , refinement_mode(refinement_mode)
925 *   {}
926 *  
927 *  
928 *  
929 * @endcode
930 *
931 *
932 * <a name="HDGsetup_system"></a>
933 * <h4>HDG::setup_system</h4>
934 * The system for an HDG solution is setup in an analogous manner to most
935 * of the other tutorial programs. We are careful to distribute dofs with
936 * all of our DoFHandler objects. The @p solution and @p system_matrix
937 * objects go with the global skeleton solution.
938 *
939 * @code
940 *   template <int dim>
941 *   void HDG<dim>::setup_system()
942 *   {
943 *   dof_handler_local.distribute_dofs(fe_local);
944 *   dof_handler.distribute_dofs(fe);
945 *   dof_handler_u_post.distribute_dofs(fe_u_post);
946 *  
947 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
948 *   << std::endl;
949 *  
950 *   solution.reinit(dof_handler.n_dofs());
951 *   system_rhs.reinit(dof_handler.n_dofs());
952 *  
953 *   solution_local.reinit(dof_handler_local.n_dofs());
954 *   solution_u_post.reinit(dof_handler_u_post.n_dofs());
955 *  
956 *   constraints.clear();
957 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
958 *   std::map<types::boundary_id, const Function<dim> *> boundary_functions;
959 *   Solution<dim> solution_function;
960 *   boundary_functions[0] = &solution_function;
962 *   boundary_functions,
963 *   QGauss<dim - 1>(fe.degree + 1),
964 *   constraints);
965 *   constraints.close();
966 *  
967 * @endcode
968 *
969 * When creating the chunk sparsity pattern, we first create the usual
970 * dynamic sparsity pattern and then set the chunk size, which is equal
971 * to the number of dofs on a face, when copying this into the final
972 * sparsity pattern.
973 *
974 * @code
975 *   {
976 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
977 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
978 *   sparsity_pattern.copy_from(dsp, fe.n_dofs_per_face());
979 *   }
980 *   system_matrix.reinit(sparsity_pattern);
981 *   }
982 *  
983 *  
984 *  
985 * @endcode
986 *
987 *
988 * <a name="HDGPerTaskData"></a>
989 * <h4>HDG::PerTaskData</h4>
990 * Next comes the definition of the local data structures for the parallel
991 * assembly. The first structure @p PerTaskData contains the local vector
992 * and matrix that are written into the global matrix, whereas the
993 * ScratchData contains all data that we need for the local assembly. There
994 * is one variable worth noting here, namely the boolean variable @p
995 * trace_reconstruct. As mentioned in the introduction, we solve the HDG
996 * system in two steps. First, we create a linear system for the skeleton
997 * system where we condense the local part into it via the Schur complement
998 * @f$D-CA^{-1}B@f$. Then, we solve for the local part using the skeleton
999 * solution. For these two steps, we need the same matrices on the elements
1000 * twice, which we want to compute by two assembly steps. Since most of the
1001 * code is similar, we do this with the same function but only switch
1002 * between the two based on a flag that we set when starting the
1003 * assembly. Since we need to pass this information on to the local worker
1004 * routines, we store it once in the task data.
1005 *
1006 * @code
1007 *   template <int dim>
1008 *   struct HDG<dim>::PerTaskData
1009 *   {
1011 *   Vector<double> cell_vector;
1012 *   std::vector<types::global_dof_index> dof_indices;
1013 *  
1014 *   bool trace_reconstruct;
1015 *  
1016 *   PerTaskData(const unsigned int n_dofs, const bool trace_reconstruct)
1017 *   : cell_matrix(n_dofs, n_dofs)
1018 *   , cell_vector(n_dofs)
1019 *   , dof_indices(n_dofs)
1020 *   , trace_reconstruct(trace_reconstruct)
1021 *   {}
1022 *   };
1023 *  
1024 *  
1025 *  
1026 * @endcode
1027 *
1028 *
1029 * <a name="HDGScratchData"></a>
1030 * <h4>HDG::ScratchData</h4>
1031 * @p ScratchData contains persistent data for each
1032 * thread within WorkStream. The FEValues, matrix,
1033 * and vector objects should be familiar by now. There are two objects that
1034 * need to be discussed: `std::vector<std::vector<unsigned int> >
1035 * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1036 * fe_support_on_face`. These are used to indicate whether or not the finite
1037 * elements chosen have support (non-zero values) on a given face of the
1038 * reference cell for the local part associated to @p fe_local and the
1039 * skeleton part @p fe. We extract this information in the
1040 * constructor and store it once for all cells that we work on. Had we not
1041 * stored this information, we would be forced to assemble a large number of
1042 * zero terms on each cell, which would significantly slow the program.
1043 *
1044 * @code
1045 *   template <int dim>
1046 *   struct HDG<dim>::ScratchData
1047 *   {
1048 *   FEValues<dim> fe_values_local;
1049 *   FEFaceValues<dim> fe_face_values_local;
1050 *   FEFaceValues<dim> fe_face_values;
1051 *  
1052 *   FullMatrix<double> ll_matrix;
1053 *   FullMatrix<double> lf_matrix;
1054 *   FullMatrix<double> fl_matrix;
1055 *   FullMatrix<double> tmp_matrix;
1056 *   Vector<double> l_rhs;
1057 *   Vector<double> tmp_rhs;
1058 *  
1059 *   std::vector<Tensor<1, dim>> q_phi;
1060 *   std::vector<double> q_phi_div;
1061 *   std::vector<double> u_phi;
1062 *   std::vector<Tensor<1, dim>> u_phi_grad;
1063 *   std::vector<double> tr_phi;
1064 *   std::vector<double> trace_values;
1065 *  
1066 *   std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1067 *   std::vector<std::vector<unsigned int>> fe_support_on_face;
1068 *  
1069 *   ConvectionVelocity<dim> convection_velocity;
1070 *   RightHandSide<dim> right_hand_side;
1071 *   const Solution<dim> exact_solution;
1072 *  
1073 *   ScratchData(const FiniteElement<dim> &fe,
1074 *   const FiniteElement<dim> &fe_local,
1075 *   const QGauss<dim> & quadrature_formula,
1076 *   const QGauss<dim - 1> & face_quadrature_formula,
1077 *   const UpdateFlags local_flags,
1078 *   const UpdateFlags local_face_flags,
1079 *   const UpdateFlags flags)
1080 *   : fe_values_local(fe_local, quadrature_formula, local_flags)
1081 *   , fe_face_values_local(fe_local,
1082 *   face_quadrature_formula,
1083 *   local_face_flags)
1084 *   , fe_face_values(fe, face_quadrature_formula, flags)
1085 *   , ll_matrix(fe_local.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1086 *   , lf_matrix(fe_local.n_dofs_per_cell(), fe.n_dofs_per_cell())
1087 *   , fl_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1088 *   , tmp_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1089 *   , l_rhs(fe_local.n_dofs_per_cell())
1090 *   , tmp_rhs(fe_local.n_dofs_per_cell())
1091 *   , q_phi(fe_local.n_dofs_per_cell())
1092 *   , q_phi_div(fe_local.n_dofs_per_cell())
1093 *   , u_phi(fe_local.n_dofs_per_cell())
1094 *   , u_phi_grad(fe_local.n_dofs_per_cell())
1095 *   , tr_phi(fe.n_dofs_per_cell())
1096 *   , trace_values(face_quadrature_formula.size())
1097 *   , fe_local_support_on_face(GeometryInfo<dim>::faces_per_cell)
1098 *   , fe_support_on_face(GeometryInfo<dim>::faces_per_cell)
1099 *   , exact_solution()
1100 *   {
1101 *   for (const unsigned int face_no : GeometryInfo<dim>::face_indices())
1102 *   for (unsigned int i = 0; i < fe_local.n_dofs_per_cell(); ++i)
1103 *   {
1104 *   if (fe_local.has_support_on_face(i, face_no))
1105 *   fe_local_support_on_face[face_no].push_back(i);
1106 *   }
1107 *  
1108 *   for (const unsigned int face_no : GeometryInfo<dim>::face_indices())
1109 *   for (unsigned int i = 0; i < fe.n_dofs_per_cell(); ++i)
1110 *   {
1111 *   if (fe.has_support_on_face(i, face_no))
1112 *   fe_support_on_face[face_no].push_back(i);
1113 *   }
1114 *   }
1115 *  
1116 *   ScratchData(const ScratchData &sd)
1117 *   : fe_values_local(sd.fe_values_local.get_fe(),
1118 *   sd.fe_values_local.get_quadrature(),
1119 *   sd.fe_values_local.get_update_flags())
1120 *   , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1121 *   sd.fe_face_values_local.get_quadrature(),
1122 *   sd.fe_face_values_local.get_update_flags())
1123 *   , fe_face_values(sd.fe_face_values.get_fe(),
1124 *   sd.fe_face_values.get_quadrature(),
1125 *   sd.fe_face_values.get_update_flags())
1126 *   , ll_matrix(sd.ll_matrix)
1127 *   , lf_matrix(sd.lf_matrix)
1128 *   , fl_matrix(sd.fl_matrix)
1129 *   , tmp_matrix(sd.tmp_matrix)
1130 *   , l_rhs(sd.l_rhs)
1131 *   , tmp_rhs(sd.tmp_rhs)
1132 *   , q_phi(sd.q_phi)
1133 *   , q_phi_div(sd.q_phi_div)
1134 *   , u_phi(sd.u_phi)
1135 *   , u_phi_grad(sd.u_phi_grad)
1136 *   , tr_phi(sd.tr_phi)
1137 *   , trace_values(sd.trace_values)
1138 *   , fe_local_support_on_face(sd.fe_local_support_on_face)
1139 *   , fe_support_on_face(sd.fe_support_on_face)
1140 *   , exact_solution()
1141 *   {}
1142 *   };
1143 *  
1144 *  
1145 *  
1146 * @endcode
1147 *
1148 *
1149 * <a name="HDGPostProcessScratchData"></a>
1150 * <h4>HDG::PostProcessScratchData</h4>
1151 * @p PostProcessScratchData contains the data used by WorkStream
1152 * when post-processing the local solution @f$u^*@f$. It is similar, but much
1153 * simpler, than @p ScratchData.
1154 *
1155 * @code
1156 *   template <int dim>
1157 *   struct HDG<dim>::PostProcessScratchData
1158 *   {
1159 *   FEValues<dim> fe_values_local;
1160 *   FEValues<dim> fe_values;
1161 *  
1162 *   std::vector<double> u_values;
1163 *   std::vector<Tensor<1, dim>> u_gradients;
1165 *  
1166 *   Vector<double> cell_rhs;
1167 *   Vector<double> cell_sol;
1168 *  
1169 *   PostProcessScratchData(const FiniteElement<dim> &fe,
1170 *   const FiniteElement<dim> &fe_local,
1171 *   const QGauss<dim> & quadrature_formula,
1172 *   const UpdateFlags local_flags,
1173 *   const UpdateFlags flags)
1174 *   : fe_values_local(fe_local, quadrature_formula, local_flags)
1175 *   , fe_values(fe, quadrature_formula, flags)
1176 *   , u_values(quadrature_formula.size())
1177 *   , u_gradients(quadrature_formula.size())
1178 *   , cell_matrix(fe.n_dofs_per_cell(), fe.n_dofs_per_cell())
1179 *   , cell_rhs(fe.n_dofs_per_cell())
1180 *   , cell_sol(fe.n_dofs_per_cell())
1181 *   {}
1182 *  
1183 *   PostProcessScratchData(const PostProcessScratchData &sd)
1184 *   : fe_values_local(sd.fe_values_local.get_fe(),
1185 *   sd.fe_values_local.get_quadrature(),
1186 *   sd.fe_values_local.get_update_flags())
1187 *   , fe_values(sd.fe_values.get_fe(),
1188 *   sd.fe_values.get_quadrature(),
1189 *   sd.fe_values.get_update_flags())
1190 *   , u_values(sd.u_values)
1191 *   , u_gradients(sd.u_gradients)
1192 *   , cell_matrix(sd.cell_matrix)
1193 *   , cell_rhs(sd.cell_rhs)
1194 *   , cell_sol(sd.cell_sol)
1195 *   {}
1196 *   };
1197 *  
1198 *  
1199 *  
1200 * @endcode
1201 *
1202 *
1203 * <a name="HDGassemble_system"></a>
1204 * <h4>HDG::assemble_system</h4>
1205 * The @p assemble_system function is similar to the one on @ref step_32 "step-32", where
1206 * the quadrature formula and the update flags are set up, and then
1207 * <code>WorkStream</code> is used to do the work in a multi-threaded
1208 * manner. The @p trace_reconstruct input parameter is used to decide
1209 * whether we are solving for the global skeleton solution (false) or the
1210 * local solution (true).
1211 *
1212
1213 *
1214 * One thing worth noting for the multi-threaded execution of assembly is
1215 * the fact that the local computations in `assemble_system_one_cell()` call
1216 * into BLAS and LAPACK functions if those are available in deal.II. Thus,
1217 * the underlying BLAS/LAPACK library must support calls from multiple
1218 * threads at the same time. Most implementations do support this, but some
1219 * libraries need to be built in a specific way to avoid problems. For
1220 * example, OpenBLAS compiled without multithreading inside the BLAS/LAPACK
1221 * calls needs to built with a flag called `USE_LOCKING` set to true.
1222 *
1223 * @code
1224 *   template <int dim>
1225 *   void HDG<dim>::assemble_system(const bool trace_reconstruct)
1226 *   {
1227 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1228 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1229 *  
1230 *   const UpdateFlags local_flags(update_values | update_gradients |
1232 *  
1233 *   const UpdateFlags local_face_flags(update_values);
1234 *  
1237 *  
1238 *   PerTaskData task_data(fe.n_dofs_per_cell(), trace_reconstruct);
1239 *   ScratchData scratch(fe,
1240 *   fe_local,
1241 *   quadrature_formula,
1242 *   face_quadrature_formula,
1243 *   local_flags,
1244 *   local_face_flags,
1245 *   flags);
1246 *  
1247 *   WorkStream::run(dof_handler.begin_active(),
1248 *   dof_handler.end(),
1249 *   *this,
1250 *   &HDG<dim>::assemble_system_one_cell,
1251 *   &HDG<dim>::copy_local_to_global,
1252 *   scratch,
1253 *   task_data);
1254 *   }
1255 *  
1256 *  
1257 *  
1258 * @endcode
1259 *
1260 *
1261 * <a name="HDGassemble_system_one_cell"></a>
1262 * <h4>HDG::assemble_system_one_cell</h4>
1263 * The real work of the HDG program is done by @p assemble_system_one_cell.
1264 * Assembling the local matrices @f$A, B, C@f$ is done here, along with the
1265 * local contributions of the global matrix @f$D@f$.
1266 *
1267 * @code
1268 *   template <int dim>
1269 *   void HDG<dim>::assemble_system_one_cell(
1270 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1271 *   ScratchData & scratch,
1272 *   PerTaskData & task_data)
1273 *   {
1274 * @endcode
1275 *
1276 * Construct iterator for dof_handler_local for FEValues reinit function.
1277 *
1278 * @code
1279 *   const typename DoFHandler<dim>::active_cell_iterator loc_cell =
1280 *   cell->as_dof_handler_iterator(dof_handler_local);
1281 *  
1282 *   const unsigned int n_q_points =
1283 *   scratch.fe_values_local.get_quadrature().size();
1284 *   const unsigned int n_face_q_points =
1285 *   scratch.fe_face_values_local.get_quadrature().size();
1286 *  
1287 *   const unsigned int loc_dofs_per_cell =
1288 *   scratch.fe_values_local.get_fe().n_dofs_per_cell();
1289 *  
1290 *   const FEValuesExtractors::Vector fluxes(0);
1291 *   const FEValuesExtractors::Scalar scalar(dim);
1292 *  
1293 *   scratch.ll_matrix = 0;
1294 *   scratch.l_rhs = 0;
1295 *   if (!task_data.trace_reconstruct)
1296 *   {
1297 *   scratch.lf_matrix = 0;
1298 *   scratch.fl_matrix = 0;
1299 *   task_data.cell_matrix = 0;
1300 *   task_data.cell_vector = 0;
1301 *   }
1302 *   scratch.fe_values_local.reinit(loc_cell);
1303 *  
1304 * @endcode
1305 *
1306 * We first compute the cell-interior contribution to @p ll_matrix matrix
1307 * (referred to as matrix @f$A@f$ in the introduction) corresponding to
1308 * local-local coupling, as well as the local right-hand-side vector. We
1309 * store the values at each quadrature point for the basis functions, the
1310 * right-hand-side value, and the convection velocity, in order to have
1311 * quick access to these fields.
1312 *
1313 * @code
1314 *   for (unsigned int q = 0; q < n_q_points; ++q)
1315 *   {
1316 *   const double rhs_value = scratch.right_hand_side.value(
1317 *   scratch.fe_values_local.quadrature_point(q));
1318 *   const Tensor<1, dim> convection = scratch.convection_velocity.value(
1319 *   scratch.fe_values_local.quadrature_point(q));
1320 *   const double JxW = scratch.fe_values_local.JxW(q);
1321 *   for (unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1322 *   {
1323 *   scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1324 *   scratch.q_phi_div[k] =
1325 *   scratch.fe_values_local[fluxes].divergence(k, q);
1326 *   scratch.u_phi[k] = scratch.fe_values_local[scalar].value(k, q);
1327 *   scratch.u_phi_grad[k] =
1328 *   scratch.fe_values_local[scalar].gradient(k, q);
1329 *   }
1330 *   for (unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1331 *   {
1332 *   for (unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1333 *   scratch.ll_matrix(i, j) +=
1334 *   (scratch.q_phi[i] * scratch.q_phi[j] -
1335 *   scratch.q_phi_div[i] * scratch.u_phi[j] +
1336 *   scratch.u_phi[i] * scratch.q_phi_div[j] -
1337 *   (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1338 *   JxW;
1339 *   scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1340 *   }
1341 *   }
1342 *  
1343 * @endcode
1344 *
1345 * Face terms are assembled on all faces of all elements. This is in
1346 * contrast to more traditional DG methods, where each face is only visited
1347 * once in the assembly procedure.
1348 *
1349 * @code
1350 *   for (const auto face_no : cell->face_indices())
1351 *   {
1352 *   scratch.fe_face_values_local.reinit(loc_cell, face_no);
1353 *   scratch.fe_face_values.reinit(cell, face_no);
1354 *  
1355 * @endcode
1356 *
1357 * The already obtained @f$\hat{u}@f$ values are needed when solving for the
1358 * local variables.
1359 *
1360 * @code
1361 *   if (task_data.trace_reconstruct)
1362 *   scratch.fe_face_values.get_function_values(solution,
1363 *   scratch.trace_values);
1364 *  
1365 *   for (unsigned int q = 0; q < n_face_q_points; ++q)
1366 *   {
1367 *   const double JxW = scratch.fe_face_values.JxW(q);
1368 *   const Point<dim> quadrature_point =
1369 *   scratch.fe_face_values.quadrature_point(q);
1370 *   const Tensor<1, dim> normal =
1371 *   scratch.fe_face_values.normal_vector(q);
1372 *   const Tensor<1, dim> convection =
1373 *   scratch.convection_velocity.value(quadrature_point);
1374 *  
1375 * @endcode
1376 *
1377 * Here we compute the stabilization parameter discussed in the
1378 * introduction: since the diffusion is one and the diffusion
1379 * length scale is set to 1/5, it simply results in a contribution
1380 * of 5 for the diffusion part and the magnitude of convection
1381 * through the element boundary in a centered scheme for the
1382 * convection part.
1383 *
1384 * @code
1385 *   const double tau_stab = (5. + std::abs(convection * normal));
1386 *  
1387 * @endcode
1388 *
1389 * We store the non-zero flux and scalar values, making use of the
1390 * support_on_face information we created in @p ScratchData.
1391 *
1392 * @code
1393 *   for (unsigned int k = 0;
1394 *   k < scratch.fe_local_support_on_face[face_no].size();
1395 *   ++k)
1396 *   {
1397 *   const unsigned int kk =
1398 *   scratch.fe_local_support_on_face[face_no][k];
1399 *   scratch.q_phi[k] =
1400 *   scratch.fe_face_values_local[fluxes].value(kk, q);
1401 *   scratch.u_phi[k] =
1402 *   scratch.fe_face_values_local[scalar].value(kk, q);
1403 *   }
1404 *  
1405 * @endcode
1406 *
1407 * When @p trace_reconstruct=false, we are preparing to assemble the
1408 * system for the skeleton variable @f$\hat{u}@f$. If this is the case,
1409 * we must assemble all local matrices associated with the problem:
1410 * local-local, local-face, face-local, and face-face. The
1411 * face-face matrix is stored as @p TaskData::cell_matrix, so that
1412 * it can be assembled into the global system by @p
1413 * copy_local_to_global.
1414 *
1415 * @code
1416 *   if (!task_data.trace_reconstruct)
1417 *   {
1418 *   for (unsigned int k = 0;
1419 *   k < scratch.fe_support_on_face[face_no].size();
1420 *   ++k)
1421 *   scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1422 *   scratch.fe_support_on_face[face_no][k], q);
1423 *   for (unsigned int i = 0;
1424 *   i < scratch.fe_local_support_on_face[face_no].size();
1425 *   ++i)
1426 *   for (unsigned int j = 0;
1427 *   j < scratch.fe_support_on_face[face_no].size();
1428 *   ++j)
1429 *   {
1430 *   const unsigned int ii =
1431 *   scratch.fe_local_support_on_face[face_no][i];
1432 *   const unsigned int jj =
1433 *   scratch.fe_support_on_face[face_no][j];
1434 *   scratch.lf_matrix(ii, jj) +=
1435 *   ((scratch.q_phi[i] * normal +
1436 *   (convection * normal - tau_stab) * scratch.u_phi[i]) *
1437 *   scratch.tr_phi[j]) *
1438 *   JxW;
1439 *  
1440 * @endcode
1441 *
1442 * Note the sign of the face_no-local matrix. We negate
1443 * the sign during assembly here so that we can use the
1444 * FullMatrix::mmult with addition when computing the
1445 * Schur complement.
1446 *
1447 * @code
1448 *   scratch.fl_matrix(jj, ii) -=
1449 *   ((scratch.q_phi[i] * normal +
1450 *   tau_stab * scratch.u_phi[i]) *
1451 *   scratch.tr_phi[j]) *
1452 *   JxW;
1453 *   }
1454 *  
1455 *   for (unsigned int i = 0;
1456 *   i < scratch.fe_support_on_face[face_no].size();
1457 *   ++i)
1458 *   for (unsigned int j = 0;
1459 *   j < scratch.fe_support_on_face[face_no].size();
1460 *   ++j)
1461 *   {
1462 *   const unsigned int ii =
1463 *   scratch.fe_support_on_face[face_no][i];
1464 *   const unsigned int jj =
1465 *   scratch.fe_support_on_face[face_no][j];
1466 *   task_data.cell_matrix(ii, jj) +=
1467 *   ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1468 *   scratch.tr_phi[j]) *
1469 *   JxW;
1470 *   }
1471 *  
1472 *   if (cell->face(face_no)->at_boundary() &&
1473 *   (cell->face(face_no)->boundary_id() == 1))
1474 *   {
1475 *   const double neumann_value =
1476 *   -scratch.exact_solution.gradient(quadrature_point) *
1477 *   normal +
1478 *   convection * normal *
1479 *   scratch.exact_solution.value(quadrature_point);
1480 *   for (unsigned int i = 0;
1481 *   i < scratch.fe_support_on_face[face_no].size();
1482 *   ++i)
1483 *   {
1484 *   const unsigned int ii =
1485 *   scratch.fe_support_on_face[face_no][i];
1486 *   task_data.cell_vector(ii) +=
1487 *   scratch.tr_phi[i] * neumann_value * JxW;
1488 *   }
1489 *   }
1490 *   }
1491 *  
1492 * @endcode
1493 *
1494 * This last term adds the contribution of the term @f$\left<w,\tau
1495 * u_h\right>_{\partial \mathcal T}@f$ to the local matrix. As opposed
1496 * to the face matrices above, we need it in both assembly stages.
1497 *
1498 * @code
1499 *   for (unsigned int i = 0;
1500 *   i < scratch.fe_local_support_on_face[face_no].size();
1501 *   ++i)
1502 *   for (unsigned int j = 0;
1503 *   j < scratch.fe_local_support_on_face[face_no].size();
1504 *   ++j)
1505 *   {
1506 *   const unsigned int ii =
1507 *   scratch.fe_local_support_on_face[face_no][i];
1508 *   const unsigned int jj =
1509 *   scratch.fe_local_support_on_face[face_no][j];
1510 *   scratch.ll_matrix(ii, jj) +=
1511 *   tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1512 *   }
1513 *  
1514 * @endcode
1515 *
1516 * When @p trace_reconstruct=true, we are solving for the local
1517 * solutions on an element by element basis. The local
1518 * right-hand-side is calculated by replacing the basis functions @p
1519 * tr_phi in the @p lf_matrix computation by the computed values @p
1520 * trace_values. Of course, the sign of the matrix is now minus
1521 * since we have moved everything to the other side of the equation.
1522 *
1523 * @code
1524 *   if (task_data.trace_reconstruct)
1525 *   for (unsigned int i = 0;
1526 *   i < scratch.fe_local_support_on_face[face_no].size();
1527 *   ++i)
1528 *   {
1529 *   const unsigned int ii =
1530 *   scratch.fe_local_support_on_face[face_no][i];
1531 *   scratch.l_rhs(ii) -=
1532 *   (scratch.q_phi[i] * normal +
1533 *   scratch.u_phi[i] * (convection * normal - tau_stab)) *
1534 *   scratch.trace_values[q] * JxW;
1535 *   }
1536 *   }
1537 *   }
1538 *  
1539 * @endcode
1540 *
1541 * Once assembly of all of the local contributions is complete, we must
1542 * either: (1) assemble the global system, or (2) compute the local solution
1543 * values and save them. In either case, the first step is to invert the
1544 * local-local matrix.
1545 *
1546 * @code
1547 *   scratch.ll_matrix.gauss_jordan();
1548 *  
1549 * @endcode
1550 *
1551 * For (1), we compute the Schur complement and add it to the @p
1552 * cell_matrix, matrix @f$D@f$ in the introduction.
1553 *
1554 * @code
1555 *   if (task_data.trace_reconstruct == false)
1556 *   {
1557 *   scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1558 *   scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1559 *   scratch.tmp_matrix.mmult(task_data.cell_matrix,
1560 *   scratch.lf_matrix,
1561 *   true);
1562 *   cell->get_dof_indices(task_data.dof_indices);
1563 *   }
1564 * @endcode
1565 *
1566 * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1567 * Hence, we multiply @p l_rhs by our already inverted local-local matrix
1568 * and store the result using the <code>set_dof_values</code> function.
1569 *
1570 * @code
1571 *   else
1572 *   {
1573 *   scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1574 *   loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1575 *   }
1576 *   }
1577 *  
1578 *  
1579 *  
1580 * @endcode
1581 *
1582 *
1583 * <a name="HDGcopy_local_to_global"></a>
1584 * <h4>HDG::copy_local_to_global</h4>
1585 * If we are in the first step of the solution, i.e. @p trace_reconstruct=false,
1586 * then we assemble the local matrices into the global system.
1587 *
1588 * @code
1589 *   template <int dim>
1590 *   void HDG<dim>::copy_local_to_global(const PerTaskData &data)
1591 *   {
1592 *   if (data.trace_reconstruct == false)
1593 *   constraints.distribute_local_to_global(data.cell_matrix,
1594 *   data.cell_vector,
1595 *   data.dof_indices,
1596 *   system_matrix,
1597 *   system_rhs);
1598 *   }
1599 *  
1600 *  
1601 *  
1602 * @endcode
1603 *
1604 *
1605 * <a name="HDGsolve"></a>
1606 * <h4>HDG::solve</h4>
1607 * The skeleton solution is solved for by using a BiCGStab solver with
1608 * identity preconditioner.
1609 *
1610 * @code
1611 *   template <int dim>
1612 *   void HDG<dim>::solve()
1613 *   {
1614 *   SolverControl solver_control(system_matrix.m() * 10,
1615 *   1e-11 * system_rhs.l2_norm());
1616 *   SolverBicgstab<Vector<double>> solver(solver_control);
1617 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
1618 *  
1619 *   std::cout << " Number of BiCGStab iterations: "
1620 *   << solver_control.last_step() << std::endl;
1621 *  
1622 *   system_matrix.clear();
1623 *   sparsity_pattern.reinit(0, 0, 0, 1);
1624 *  
1625 *   constraints.distribute(solution);
1626 *  
1627 * @endcode
1628 *
1629 * Once we have solved for the skeleton solution,
1630 * we can solve for the local solutions in an element-by-element
1631 * fashion. We do this by re-using the same @p assemble_system function
1632 * but switching @p trace_reconstruct to true.
1633 *
1634 * @code
1635 *   assemble_system(true);
1636 *   }
1637 *  
1638 *  
1639 *  
1640 * @endcode
1641 *
1642 *
1643 * <a name="HDGpostprocess"></a>
1644 * <h4>HDG::postprocess</h4>
1645 *
1646
1647 *
1648 * The postprocess method serves two purposes. First, we want to construct a
1649 * post-processed scalar variables in the element space of degree @f$p+1@f$ that
1650 * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1651 * process and only involves the scalar solution as well as the gradient on
1652 * the local cell. To do this, we introduce the already defined scratch data
1653 * together with some update flags and run the work stream to do this in
1654 * parallel.
1655 *
1656
1657 *
1658 * Secondly, we want to compute discretization errors just as we did in
1659 * @ref step_7 "step-7". The overall procedure is similar with calls to
1660 * VectorTools::integrate_difference. The difference is in how we compute
1661 * the errors for the scalar variable and the gradient variable. In @ref step_7 "step-7",
1662 * we did this by computing @p L2_norm or @p H1_seminorm
1663 * contributions. Here, we have a DoFHandler with these two contributions
1664 * computed and sorted by their vector component, <code>[0, dim)</code> for
1665 * the
1666 * gradient and @p dim for the scalar. To compute their value, we hence use
1667 * a ComponentSelectFunction with either of them, together with the @p
1668 * SolutionAndGradient class introduced above that contains the analytic
1669 * parts of either of them. Eventually, we also compute the L2-error of the
1670 * post-processed solution and add the results into the convergence table.
1671 *
1672 * @code
1673 *   template <int dim>
1674 *   void HDG<dim>::postprocess()
1675 *   {
1676 *   {
1677 *   const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1678 *   const UpdateFlags local_flags(update_values);
1679 *   const UpdateFlags flags(update_values | update_gradients |
1680 *   update_JxW_values);
1681 *  
1682 *   PostProcessScratchData scratch(
1683 *   fe_u_post, fe_local, quadrature_formula, local_flags, flags);
1684 *  
1685 *   WorkStream::run(
1686 *   dof_handler_u_post.begin_active(),
1687 *   dof_handler_u_post.end(),
1688 *   [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
1689 *   PostProcessScratchData & scratch,
1690 *   unsigned int & data) {
1691 *   this->postprocess_one_cell(cell, scratch, data);
1692 *   },
1693 *   std::function<void(const unsigned int &)>(),
1694 *   scratch,
1695 *   0U);
1696 *   }
1697 *  
1698 *   Vector<float> difference_per_cell(triangulation.n_active_cells());
1699 *  
1700 *   ComponentSelectFunction<dim> value_select(dim, dim + 1);
1701 *   VectorTools::integrate_difference(dof_handler_local,
1702 *   solution_local,
1703 *   SolutionAndGradient<dim>(),
1704 *   difference_per_cell,
1705 *   QGauss<dim>(fe.degree + 2),
1707 *   &value_select);
1708 *   const double L2_error =
1710 *   difference_per_cell,
1712 *  
1713 *   ComponentSelectFunction<dim> gradient_select(
1714 *   std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1715 *   VectorTools::integrate_difference(dof_handler_local,
1716 *   solution_local,
1717 *   SolutionAndGradient<dim>(),
1718 *   difference_per_cell,
1719 *   QGauss<dim>(fe.degree + 2),
1721 *   &gradient_select);
1722 *   const double grad_error =
1724 *   difference_per_cell,
1726 *  
1727 *   VectorTools::integrate_difference(dof_handler_u_post,
1728 *   solution_u_post,
1729 *   Solution<dim>(),
1730 *   difference_per_cell,
1731 *   QGauss<dim>(fe.degree + 3),
1733 *   const double post_error =
1735 *   difference_per_cell,
1737 *  
1738 *   convergence_table.add_value("cells", triangulation.n_active_cells());
1739 *   convergence_table.add_value("dofs", dof_handler.n_dofs());
1740 *  
1741 *   convergence_table.add_value("val L2", L2_error);
1742 *   convergence_table.set_scientific("val L2", true);
1743 *   convergence_table.set_precision("val L2", 3);
1744 *  
1745 *   convergence_table.add_value("grad L2", grad_error);
1746 *   convergence_table.set_scientific("grad L2", true);
1747 *   convergence_table.set_precision("grad L2", 3);
1748 *  
1749 *   convergence_table.add_value("val L2-post", post_error);
1750 *   convergence_table.set_scientific("val L2-post", true);
1751 *   convergence_table.set_precision("val L2-post", 3);
1752 *   }
1753 *  
1754 *  
1755 *  
1756 * @endcode
1757 *
1758 *
1759 * <a name="HDGpostprocess_one_cell"></a>
1760 * <h4>HDG::postprocess_one_cell</h4>
1761 *
1762
1763 *
1764 * This is the actual work done for the postprocessing. According to the
1765 * discussion in the introduction, we need to set up a system that projects
1766 * the gradient part of the DG solution onto the gradient of the
1767 * post-processed variable. Moreover, we need to set the average of the new
1768 * post-processed variable to equal the average of the scalar DG solution
1769 * on the cell.
1770 *
1771
1772 *
1773 * More technically speaking, the projection of the gradient is a system
1774 * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1775 * matrix but is singular (the sum of all rows would be zero because the
1776 * constant function has zero gradient). Therefore, we take one row away and
1777 * use it for imposing the average of the scalar value. We pick the first
1778 * row for the scalar part, even though we could pick any row for @f$\mathcal
1779 * Q_{-p}@f$ elements. However, had we used FE_DGP elements instead, the first
1780 * row would correspond to the constant part already and deleting e.g. the
1781 * last row would give us a singular system. This way, our program can also
1782 * be used for those elements.
1783 *
1784 * @code
1785 *   template <int dim>
1786 *   void HDG<dim>::postprocess_one_cell(
1787 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1788 *   PostProcessScratchData & scratch,
1789 *   unsigned int &)
1790 *   {
1791 *   const typename DoFHandler<dim>::active_cell_iterator loc_cell =
1792 *   cell->as_dof_handler_iterator(dof_handler_local);
1793 *  
1794 *   scratch.fe_values_local.reinit(loc_cell);
1795 *   scratch.fe_values.reinit(cell);
1796 *  
1797 *   const FEValuesExtractors::Vector fluxes(0);
1798 *   const FEValuesExtractors::Scalar scalar(dim);
1799 *  
1800 *   const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1801 *   const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1802 *  
1803 *   scratch.fe_values_local[scalar].get_function_values(solution_local,
1804 *   scratch.u_values);
1805 *   scratch.fe_values_local[fluxes].get_function_values(solution_local,
1806 *   scratch.u_gradients);
1807 *  
1808 *   double sum = 0;
1809 *   for (unsigned int i = 1; i < dofs_per_cell; ++i)
1810 *   {
1811 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1812 *   {
1813 *   sum = 0;
1814 *   for (unsigned int q = 0; q < n_q_points; ++q)
1815 *   sum += (scratch.fe_values.shape_grad(i, q) *
1816 *   scratch.fe_values.shape_grad(j, q)) *
1817 *   scratch.fe_values.JxW(q);
1818 *   scratch.cell_matrix(i, j) = sum;
1819 *   }
1820 *  
1821 *   sum = 0;
1822 *   for (unsigned int q = 0; q < n_q_points; ++q)
1823 *   sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1824 *   scratch.fe_values.JxW(q);
1825 *   scratch.cell_rhs(i) = sum;
1826 *   }
1827 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1828 *   {
1829 *   sum = 0;
1830 *   for (unsigned int q = 0; q < n_q_points; ++q)
1831 *   sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1832 *   scratch.cell_matrix(0, j) = sum;
1833 *   }
1834 *   {
1835 *   sum = 0;
1836 *   for (unsigned int q = 0; q < n_q_points; ++q)
1837 *   sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1838 *   scratch.cell_rhs(0) = sum;
1839 *   }
1840 *  
1841 * @endcode
1842 *
1843 * Having assembled all terms, we can again go on and solve the linear
1844 * system. We invert the matrix and then multiply the inverse by the
1845 * right hand side. An alternative (and more numerically stable) method
1846 * would have been to only factorize the matrix and apply the factorization.
1847 *
1848 * @code
1849 *   scratch.cell_matrix.gauss_jordan();
1850 *   scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1851 *   cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1852 *   }
1853 *  
1854 *  
1855 *  
1856 * @endcode
1857 *
1858 *
1859 * <a name="HDGoutput_results"></a>
1860 * <h4>HDG::output_results</h4>
1861 * We have 3 sets of results that we would like to output: the local
1862 * solution, the post-processed local solution, and the skeleton solution. The
1863 * former 2 both 'live' on element volumes, whereas the latter lives on
1864 * codimension-1 surfaces
1865 * of the triangulation. Our @p output_results function writes all local solutions
1866 * to the same vtk file, even though they correspond to different
1867 * DoFHandler objects. The graphical output for the skeleton
1868 * variable is done through use of the DataOutFaces class.
1869 *
1870 * @code
1871 *   template <int dim>
1872 *   void HDG<dim>::output_results(const unsigned int cycle)
1873 *   {
1874 *   std::string filename;
1875 *   switch (refinement_mode)
1876 *   {
1877 *   case global_refinement:
1878 *   filename = "solution-global";
1879 *   break;
1880 *   case adaptive_refinement:
1881 *   filename = "solution-adaptive";
1882 *   break;
1883 *   default:
1884 *   Assert(false, ExcNotImplemented());
1885 *   }
1886 *  
1887 *   std::string face_out(filename);
1888 *   face_out += "-face";
1889 *  
1890 *   filename += "-q" + Utilities::int_to_string(fe.degree, 1);
1891 *   filename += "-" + Utilities::int_to_string(cycle, 2);
1892 *   filename += ".vtk";
1893 *   std::ofstream output(filename);
1894 *  
1895 *   DataOut<dim> data_out;
1896 *  
1897 * @endcode
1898 *
1899 * We first define the names and types of the local solution,
1900 * and add the data to @p data_out.
1901 *
1902 * @code
1903 *   std::vector<std::string> names(dim, "gradient");
1904 *   names.emplace_back("solution");
1905 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1906 *   component_interpretation(
1908 *   component_interpretation[dim] =
1910 *   data_out.add_data_vector(dof_handler_local,
1911 *   solution_local,
1912 *   names,
1913 *   component_interpretation);
1914 *  
1915 * @endcode
1916 *
1917 * The second data item we add is the post-processed solution.
1918 * In this case, it is a single scalar variable belonging to
1919 * a different DoFHandler.
1920 *
1921 * @code
1922 *   std::vector<std::string> post_name(1, "u_post");
1923 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1925 *   data_out.add_data_vector(dof_handler_u_post,
1926 *   solution_u_post,
1927 *   post_name,
1928 *   post_comp_type);
1929 *  
1930 *   data_out.build_patches(fe.degree);
1931 *   data_out.write_vtk(output);
1932 *  
1933 *   face_out += "-q" + Utilities::int_to_string(fe.degree, 1);
1934 *   face_out += "-" + Utilities::int_to_string(cycle, 2);
1935 *   face_out += ".vtk";
1936 *   std::ofstream face_output(face_out);
1937 *  
1938 * @endcode
1939 *
1940 * The <code>DataOutFaces</code> class works analogously to the
1941 * <code>DataOut</code> class when we have a <code>DoFHandler</code> that
1942 * defines the solution on the skeleton of the triangulation. We treat it
1943 * as such here, and the code is similar to that above.
1944 *
1945 * @code
1946 *   DataOutFaces<dim> data_out_face(false);
1947 *   std::vector<std::string> face_name(1, "u_hat");
1948 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1949 *   face_component_type(1, DataComponentInterpretation::component_is_scalar);
1950 *  
1951 *   data_out_face.add_data_vector(dof_handler,
1952 *   solution,
1953 *   face_name,
1954 *   face_component_type);
1955 *  
1956 *   data_out_face.build_patches(fe.degree);
1957 *   data_out_face.write_vtk(face_output);
1958 *   }
1959 *  
1960 * @endcode
1961 *
1962 *
1963 * <a name="HDGrefine_grid"></a>
1964 * <h4>HDG::refine_grid</h4>
1965 *
1966
1967 *
1968 * We implement two different refinement cases for HDG, just as in
1969 * <code>@ref step_7 "step-7"</code>: adaptive_refinement and global_refinement. The
1970 * global_refinement option recreates the entire triangulation every
1971 * time. This is because we want to use a finer sequence of meshes than what
1972 * we would get with one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1973 * elements per direction.
1974 *
1975
1976 *
1977 * The adaptive_refinement mode uses the <code>KellyErrorEstimator</code> to
1978 * give a decent indication of the non-regular regions in the scalar local
1979 * solutions.
1980 *
1981 * @code
1982 *   template <int dim>
1983 *   void HDG<dim>::refine_grid(const unsigned int cycle)
1984 *   {
1985 *   if (cycle == 0)
1986 *   {
1988 *   triangulation.refine_global(3 - dim);
1989 *   }
1990 *   else
1991 *   switch (refinement_mode)
1992 *   {
1993 *   case global_refinement:
1994 *   {
1995 *   triangulation.clear();
1997 *   2 + (cycle % 2),
1998 *   -1,
1999 *   1);
2000 *   triangulation.refine_global(3 - dim + cycle / 2);
2001 *   break;
2002 *   }
2003 *  
2004 *   case adaptive_refinement:
2005 *   {
2006 *   Vector<float> estimated_error_per_cell(
2007 *   triangulation.n_active_cells());
2008 *  
2009 *   const FEValuesExtractors::Scalar scalar(dim);
2010 *   std::map<types::boundary_id, const Function<dim> *>
2011 *   neumann_boundary;
2012 *   KellyErrorEstimator<dim>::estimate(dof_handler_local,
2013 *   QGauss<dim - 1>(fe.degree + 1),
2014 *   neumann_boundary,
2015 *   solution_local,
2016 *   estimated_error_per_cell,
2017 *   fe_local.component_mask(
2018 *   scalar));
2019 *  
2021 *   triangulation, estimated_error_per_cell, 0.3, 0.);
2022 *  
2023 *   triangulation.execute_coarsening_and_refinement();
2024 *  
2025 *   break;
2026 *   }
2027 *  
2028 *   default:
2029 *   {
2030 *   Assert(false, ExcNotImplemented());
2031 *   }
2032 *   }
2033 *  
2034 * @endcode
2035 *
2036 * Just as in @ref step_7 "step-7", we set the boundary indicator of two of the faces to 1
2037 * where we want to specify Neumann boundary conditions instead of Dirichlet
2038 * conditions. Since we re-create the triangulation every time for global
2039 * refinement, the flags are set in every refinement step, not just at the
2040 * beginning.
2041 *
2042 * @code
2043 *   for (const auto &cell : triangulation.cell_iterators())
2044 *   for (const auto &face : cell->face_iterators())
2045 *   if (face->at_boundary())
2046 *   if ((std::fabs(face->center()(0) - (-1)) < 1e-12) ||
2047 *   (std::fabs(face->center()(1) - (-1)) < 1e-12))
2048 *   face->set_boundary_id(1);
2049 *   }
2050 *  
2051 * @endcode
2052 *
2053 *
2054 * <a name="HDGrun"></a>
2055 * <h4>HDG::run</h4>
2056 * The functionality here is basically the same as <code>@ref step_7 "step-7"</code>.
2057 * We loop over 10 cycles, refining the grid on each one. At the end,
2058 * convergence tables are created.
2059 *
2060 * @code
2061 *   template <int dim>
2062 *   void HDG<dim>::run()
2063 *   {
2064 *   for (unsigned int cycle = 0; cycle < 10; ++cycle)
2065 *   {
2066 *   std::cout << "Cycle " << cycle << ':' << std::endl;
2067 *  
2068 *   refine_grid(cycle);
2069 *   setup_system();
2070 *   assemble_system(false);
2071 *   solve();
2072 *   postprocess();
2073 *   output_results(cycle);
2074 *   }
2075 *  
2076 * @endcode
2077 *
2078 * There is one minor change for the convergence table compared to @ref step_7 "step-7":
2079 * Since we did not refine our mesh by a factor two in each cycle (but
2080 * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2081 * convergence rate evaluation about this. We do this by setting the
2082 * number of cells as a reference column and additionally specifying the
2083 * dimension of the problem, which gives the necessary information for the
2084 * relation between number of cells and mesh size.
2085 *
2086 * @code
2087 *   if (refinement_mode == global_refinement)
2088 *   {
2089 *   convergence_table.evaluate_convergence_rates(
2090 *   "val L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2091 *   convergence_table.evaluate_convergence_rates(
2092 *   "grad L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2093 *   convergence_table.evaluate_convergence_rates(
2094 *   "val L2-post", "cells", ConvergenceTable::reduction_rate_log2, dim);
2095 *   }
2096 *   convergence_table.write_text(std::cout);
2097 *   }
2098 *  
2099 *   } // end of namespace Step51
2100 *  
2101 *  
2102 *  
2103 *   int main()
2104 *   {
2105 *   const unsigned int dim = 2;
2106 *  
2107 *   try
2108 *   {
2109 * @endcode
2110 *
2111 * Now for the three calls to the main class in complete analogy to
2112 * @ref step_7 "step-7".
2113 *
2114 * @code
2115 *   {
2116 *   std::cout << "Solving with Q1 elements, adaptive refinement"
2117 *   << std::endl
2118 *   << "============================================="
2119 *   << std::endl
2120 *   << std::endl;
2121 *  
2122 *   Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2123 *   hdg_problem.run();
2124 *  
2125 *   std::cout << std::endl;
2126 *   }
2127 *  
2128 *   {
2129 *   std::cout << "Solving with Q1 elements, global refinement" << std::endl
2130 *   << "===========================================" << std::endl
2131 *   << std::endl;
2132 *  
2133 *   Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2134 *   hdg_problem.run();
2135 *  
2136 *   std::cout << std::endl;
2137 *   }
2138 *  
2139 *   {
2140 *   std::cout << "Solving with Q3 elements, global refinement" << std::endl
2141 *   << "===========================================" << std::endl
2142 *   << std::endl;
2143 *  
2144 *   Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2145 *   hdg_problem.run();
2146 *  
2147 *   std::cout << std::endl;
2148 *   }
2149 *   }
2150 *   catch (std::exception &exc)
2151 *   {
2152 *   std::cerr << std::endl
2153 *   << std::endl
2154 *   << "----------------------------------------------------"
2155 *   << std::endl;
2156 *   std::cerr << "Exception on processing: " << std::endl
2157 *   << exc.what() << std::endl
2158 *   << "Aborting!" << std::endl
2159 *   << "----------------------------------------------------"
2160 *   << std::endl;
2161 *   return 1;
2162 *   }
2163 *   catch (...)
2164 *   {
2165 *   std::cerr << std::endl
2166 *   << std::endl
2167 *   << "----------------------------------------------------"
2168 *   << std::endl;
2169 *   std::cerr << "Unknown exception!" << std::endl
2170 *   << "Aborting!" << std::endl
2171 *   << "----------------------------------------------------"
2172 *   << std::endl;
2173 *   return 1;
2174 *   }
2175 *  
2176 *   return 0;
2177 *   }
2178 * @endcode
2179<a name="Results"></a><h1>Results</h1>
2180
2181
2182<a name="Programoutput"></a><h3>Program output</h3>
2183
2184
2185We first have a look at the output generated by the program when run in 2D. In
2186the four images below, we show the solution for polynomial degree @f$p=1@f$
2187and cycles 2, 3, 4, and 8 of the program. In the plots, we overlay the data
2188generated from the internal data (DG part) with the skeleton part (@f$\hat{u}@f$)
2189into the same plot. We had to generate two different data sets because cells
2190and faces represent different geometric entities, the combination of which (in
2191the same file) is not supported in the VTK output of deal.II.
2192
2193The images show the distinctive features of HDG: The cell solution (colored
2194surfaces) is discontinuous between the cells. The solution on the skeleton
2195variable sits on the faces and ties together the local parts. The skeleton
2196solution is not continuous on the vertices where the faces meet, even though
2197its values are quite close along lines in the same coordinate direction. The
2198skeleton solution can be interpreted as a rubber spring between the two sides
2199that balances the jumps in the solution (or rather, the flux @f$\kappa \nabla u
2200+ \mathbf{c} u@f$). From the picture at the top left, it is clear that
2201the bulk solution frequently over- and undershoots and that the
2202skeleton variable in indeed a better approximation to the exact
2203solution; this explains why we can get a better solution using a
2204postprocessing step.
2205
2206As the mesh is refined, the jumps between the cells get
2207small (we represent a smooth solution), and the skeleton solution approaches
2208the interior parts. For cycle 8, there is no visible difference in the two
2209variables. We also see how boundary conditions are implemented weakly and that
2210the interior variables do not exactly satisfy boundary conditions. On the
2211lower and left boundaries, we set Neumann boundary conditions, whereas we set
2212Dirichlet conditions on the right and top boundaries.
2213
2214<table align="center">
2215 <tr>
2216 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=""></td>
2217 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=""></td>
2218 </tr>
2219 <tr>
2220 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=""></td>
2221 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=""></td>
2222 </tr>
2223</table>
2224
2225Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2226and 8. This is a discontinuous solution that is locally described by second
2227order polynomials. While the solution does not look very good on the mesh of
2228cycle two, it looks much better for cycles three and four. As shown by the
2229convergence table below, we find that is also converges more quickly to the
2230analytical solution.
2231
2232<table align="center">
2233 <tr>
2234 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=""></td>
2235 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=""></td>
2236 </tr>
2237 <tr>
2238 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=""></td>
2239 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=""></td>
2240 </tr>
2241</table>
2242
2243Finally, we look at the solution for @f$p=3@f$ at cycle 2. Despite the coarse
2244mesh with only 64 cells, the post-processed solution is similar in quality
2245to the linear solution (not post-processed) at cycle 8 with 4,096
2246cells. This clearly shows the superiority of high order methods for smooth
2247solutions.
2248
2249<table align="center">
2250 <tr>
2251 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=""></td>
2252 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=""></td>
2253 </tr>
2254</table>
2255
2256<a name="Convergencetables"></a><h4>Convergence tables</h4>
2257
2258
2259When the program is run, it also outputs information about the respective
2260steps and convergence tables with errors in the various components in the
2261end. In 2D, the convergence tables look the following:
2262
2263@code
2264Q1 elements, adaptive refinement:
2265cells dofs val L2 grad L2 val L2-post
2266 16 80 1.804e+01 2.207e+01 1.798e+01
2267 31 170 9.874e+00 1.322e+01 9.798e+00
2268 61 314 7.452e-01 3.793e+00 4.891e-01
2269 121 634 3.240e-01 1.511e+00 2.616e-01
2270 238 1198 8.585e-02 8.212e-01 1.808e-02
2271 454 2290 4.802e-02 5.178e-01 2.195e-02
2272 898 4378 2.561e-02 2.947e-01 4.318e-03
2273 1720 7864 1.306e-02 1.664e-01 2.978e-03
2274 3271 14638 7.025e-03 9.815e-02 1.075e-03
2275 6217 27214 4.119e-03 6.407e-02 9.975e-04
2276
2277Q1 elements, global refinement:
2278cells dofs val L2 grad L2 val L2-post
2279 16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2280 36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2281 64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2282 144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2283 256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2284 576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2285 1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2286 2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2287 4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2288 9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2289
2290Q3 elements, global refinement:
2291cells dofs val L2 grad L2 val L2-post
2292 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2293 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2294 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2295 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2296 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2297 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2298 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2299 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2300 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2301 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2302@endcode
2303
2304
2305One can see the error reduction upon grid refinement, and for the cases where
2306global refinement was performed, also the convergence rates. The quadratic
2307convergence rates of Q1 elements in the @f$L_2@f$ norm for both the scalar
2308variable and the gradient variable is apparent, as is the cubic rate for the
2309postprocessed scalar variable in the @f$L_2@f$ norm. Note this distinctive
2310feature of an HDG solution. In typical continuous finite elements, the
2311gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2312opposed to @f$p+1@f$ for the actual solution. Even though superconvergence
2313results for finite elements are also available (e.g. superconvergent patch
2314recovery first introduced by Zienkiewicz and Zhu), these are typically limited
2315to structured meshes and other special cases. For Q3 HDG variables, the scalar
2316variable and gradient converge at fourth order and the postprocessed scalar
2317variable at fifth order.
2318
2319The same convergence rates are observed in 3d.
2320@code
2321Q1 elements, adaptive refinement:
2322cells dofs val L2 grad L2 val L2-post
2323 8 144 7.122e+00 1.941e+01 6.102e+00
2324 29 500 3.309e+00 1.023e+01 2.145e+00
2325 113 1792 2.204e+00 1.023e+01 1.912e+00
2326 379 5732 6.085e-01 5.008e+00 2.233e-01
2327 1317 19412 1.543e-01 1.464e+00 4.196e-02
2328 4579 64768 5.058e-02 5.611e-01 9.521e-03
2329 14596 199552 2.129e-02 3.122e-01 4.569e-03
2330 46180 611400 1.033e-02 1.622e-01 1.684e-03
2331144859 1864212 5.007e-03 8.371e-02 7.364e-04
2332451060 5684508 2.518e-03 4.562e-02 3.070e-04
2333
2334Q1 elements, global refinement:
2335cells dofs val L2 grad L2 val L2-post
2336 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2337 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2338 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2339 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2340 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2341 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2342 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2343 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2344 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2345110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2346
2347Q3 elements, global refinement:
2348cells dofs val L2 grad L2 val L2-post
2349 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2350 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2351 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2352 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2353 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2354 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2355 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2356 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2357 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2358110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2359@endcode
2360
2361<a name="Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2362
2363
2364<a name="Resultsfor2D"></a><h4>Results for 2D</h4>
2365
2366
2367The convergence tables verify the expected convergence rates stated in the
2368introduction. Now, we want to show a quick comparison of the computational
2369efficiency of the HDG method compared to a usual finite element (continuous
2370Galkerin) method on the problem of this tutorial. Of course, stability aspects
2371of the HDG method compared to continuous finite elements for
2372transport-dominated problems are also important in practice, which is an
2373aspect not seen on a problem with smooth analytic solution. In the picture
2374below, we compare the @f$L_2@f$ error as a function of the number of degrees of
2375freedom (left) and of the computing time spent in the linear solver (right)
2376for two space dimensions of continuous finite elements (CG) and the hybridized
2377discontinuous Galerkin method presented in this tutorial. As opposed to the
2378tutorial where we only use unpreconditioned BiCGStab, the times shown in the
2379figures below use the Trilinos algebraic multigrid preconditioner in
2380TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around
2381ChunkSparseMatrix for the trace variable has been used in order to utilize the
2382block structure in the matrix on the finest level.
2383
2384<table align="center">
2385 <tr>
2386 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width="400" alt=""></td>
2387 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width="400" alt=""></td>
2388 </tr>
2389</table>
2390
2391The results in the graphs show that the HDG method is slower than continuous
2392finite elements at @f$p=1@f$, about equally fast for cubic elements and
2393faster for sixth order elements. However, we have seen above that the HDG
2394method actually produces solutions which are more accurate than what is
2395represented in the original variables. Therefore, in the next two plots below
2396we instead display the error of the post-processed solution for HDG (denoted
2397by @f$p=1^*@f$ for example). We now see a clear advantage of HDG for the same
2398amount of work for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2399for @f$p=1@f$.
2400
2401<table align="center">
2402 <tr>
2403 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width="400" alt=""></td>
2404 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width="400" alt=""></td>
2405 </tr>
2406</table>
2407
2408Since the HDG method actually produces results converging as
2409@f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2410solution with the same asymptotic convergence behavior, i.e., FE_Q with degree
2411@f$p+1@f$. If we do this, we get the convergence curves below. We see that
2412CG with second order polynomials is again clearly better than HDG with
2413linears. However, the advantage of HDG for higher orders remains.
2414
2415<table align="center">
2416 <tr>
2417 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width="400" alt=""></td>
2418 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width="400" alt=""></td>
2419 </tr>
2420</table>
2421
2422The results are in line with properties of DG methods in general: Best
2423performance is typically not achieved for linear elements, but rather at
2424somewhat higher order, usually around @f$p=3@f$. This is because of a
2425volume-to-surface effect for discontinuous solutions with too much of the
2426solution living on the surfaces and hence duplicating work when the elements
2427are linear. Put in other words, DG methods are often most efficient when used
2428at relatively high order, despite their focus on a discontinuous (and hence,
2429seemingly low accurate) representation of solutions.
2430
2431<a name="Resultsfor3D"></a><h4>Results for 3D</h4>
2432
2433
2434We now show the same figures in 3D: The first row shows the number of degrees
2435of freedom and computing time versus the @f$L_2@f$ error in the scalar variable
2436@f$u@f$ for CG and HDG at order @f$p@f$, the second row shows the
2437post-processed HDG solution instead of the original one, and the third row
2438compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2439the volume-to-surface effect makes the cost of HDG somewhat higher and the CG
2440solution is clearly better than HDG for linears by any metric. For cubics, HDG
2441and CG are of similar quality, whereas HDG is again more efficient for sixth
2442order polynomials. One can alternatively also use the combination of FE_DGP
2443and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product
2444polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2445degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2446for FE_FaceP for a given mesh size, but the solution quality (error vs. number
2447of DoFs) is very similar to the results for FE_FaceQ.
2448
2449<table align="center">
2450 <tr>
2451 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width="400" alt=""></td>
2452 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width="400" alt=""></td>
2453 </tr>
2454 <tr>
2455 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width="400" alt=""></td>
2456 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width="400" alt=""></td>
2457 </tr>
2458 <tr>
2459 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width="400" alt=""></td>
2460 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width="400" alt=""></td>
2461 </tr>
2462</table>
2463
2464One final note on the efficiency comparison: We tried to use general-purpose
2465sparse matrix structures and similar solvers (optimal AMG preconditioners for
2466both without particular tuning of the AMG parameters on any of them) to give a
2467fair picture of the cost versus accuracy of two methods, on a toy example. It
2468should be noted however that geometric multigrid (GMG) for continuous finite
2469elements is about a factor four to five faster for @f$p=3@f$ and @f$p=6@f$. As of
24702019, optimal-complexity iterative solvers for HDG are still under development
2471in the research community. Also, there are other implementation aspects for CG
2472available such as fast matrix-free approaches as shown in @ref step_37 "step-37" that make
2473higher order continuous elements more competitive. Again, it is not clear to
2474the authors of the tutorial whether similar improvements could be made for
2475HDG. We refer to <a href="https://dx.doi.org/10.1137/16M110455X">Kronbichler
2476and Wall (2018)</a> for a recent efficiency evaluation.
2477
2478
2479<a name="Possibilitiesforimprovements"></a><h3>Possibilities for improvements</h3>
2480
2481
2482As already mentioned in the introduction, one possibility is to implement
2483another post-processing technique as discussed in the literature.
2484
2485A second item that is not done optimally relates to the performance of this
2486program, which is of course an issue in practical applications (weighing in
2487also the better solution quality of (H)DG methods for transport-dominated
2488problems). Let us look at
2489the computing time of the tutorial program and the share of the individual
2490components:
2491
2492<table align="center" class="doxtable">
2493 <tr>
2494 <th>&nbsp;</th>
2495 <th>&nbsp;</th>
2496 <th>Setup</th>
2497 <th>Assemble</th>
2498 <th>Solve</th>
2499 <th>Trace reconstruct</th>
2500 <th>Post-processing</th>
2501 <th>Output</th>
2502 </tr>
2503 <tr>
2504 <th>&nbsp;</th>
2505 <th>Total time</th>
2506 <th colspan="6">Relative share</th>
2507 </tr>
2508 <tr>
2509 <td align="left">2D, Q1, cycle 9, 37,248 dofs</td>
2510 <td align="center">5.34s</td>
2511 <td align="center">0.7%</td>
2512 <td align="center">1.2%</td>
2513 <td align="center">89.5%</td>
2514 <td align="center">0.9%</td>
2515 <td align="center">2.3%</td>
2516 <td align="center">5.4%</td>
2517 </tr>
2518 <tr>
2519 <td align="left">2D, Q3, cycle 9, 74,496 dofs</td>
2520 <td align="center">22.2s</td>
2521 <td align="center">0.4%</td>
2522 <td align="center">4.3%</td>
2523 <td align="center">84.1%</td>
2524 <td align="center">4.1%</td>
2525 <td align="center">3.5%</td>
2526 <td align="center">3.6%</td>
2527 </tr>
2528 <tr>
2529 <td align="left">3D, Q1, cycle 7, 172,800 dofs</td>
2530 <td align="center">9.06s</td>
2531 <td align="center">3.1%</td>
2532 <td align="center">8.9%</td>
2533 <td align="center">42.7%</td>
2534 <td align="center">7.0%</td>
2535 <td align="center">20.6%</td>
2536 <td align="center">17.7%</td>
2537 </tr>
2538 <tr>
2539 <td align="left">3D, Q3, cycle 7, 691,200 dofs</td>
2540 <td align="center">516s</td>
2541 <td align="center">0.6%</td>
2542 <td align="center">34.5%</td>
2543 <td align="center">13.4%</td>
2544 <td align="center">32.8%</td>
2545 <td align="center">17.1%</td>
2546 <td align="center">1.5%</td>
2547 </tr>
2548</table>
2549
2550As can be seen from the table, the solver and assembly calls dominate the
2551runtime of the program. This also gives a clear indication of where
2552improvements would make the most sense:
2553
2554<ol>
2555 <li> Better linear solvers: We use a BiCGStab iterative solver without
2556 preconditioner, where the number of iteration increases with increasing
2557 problem size (the number of iterations for Q1 elements and global
2558 refinements starts at 35 for the small sizes but increase up to 701 for the
2559 largest size). To do better, one could for example use an algebraic
2560 multigrid preconditioner from Trilinos, or some more advanced variants as
2561 the one discussed in <a
2562 href="https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2563 (2018)</a>. For diffusion-dominated problems such as the problem at hand
2564 with finer meshes, such a solver can be designed that uses the matrix-vector
2565 products from the more efficient ChunkSparseMatrix on the finest level, as
2566 long as we are not working in parallel with MPI. For MPI-parallelized
2567 computations, a standard TrilinosWrappers::SparseMatrix can be used.
2568
2569 <li> Speed up assembly by pre-assembling parts that do not change from one
2570 cell to another (those that do neither contain variable coefficients nor
2571 mapping-dependent terms).
2572</ol>
2573 *
2574 *
2575<a name="PlainProg"></a>
2576<h1> The plain program</h1>
2577@include "step-51.cc"
2578*/
Definition fe_q.h:551
void mmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Definition point.h:112
virtual value_type value(const Point< dim > &p) const
constexpr void clear()
Point< 3 > center
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4616
Point< 2 > first
Definition grid_out.cc:4615
unsigned int level
Definition grid_out.cc:4618
__global__ void reduction(Number *result, const Number *v, const size_type N)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:439
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
UpdateFlags
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:75
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:472
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
Definition l2.h:160
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition cuda.h:97
T sum(const T &t, const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:471
double compute_global_error(const Triangulation< dim, spacedim > &tria, const InVector &cellwise_error, const NormType &norm, const double exponent=2.)
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InVector &fe_function, const Function< spacedim, typename InVector::value_type > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
void project_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &boundary_functions, const Quadrature< dim - 1 > &q, std::map< types::global_dof_index, number > &boundary_values, std::vector< unsigned int > component_mapping={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:71
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void set_dof_values(const DoFCellAccessor< dim, spacedim, lda > &cell, const Vector< number > &local_values, OutputVector &values, const bool perform_check)
static constexpr double PI
Definition numbers.h:259
STL namespace.
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:33
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)