deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-51.h
Go to the documentation of this file.
1 = 0) const override
594 *   {
595 *   double sum = 0;
596 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
597 *   {
598 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
599 *   sum +=
600 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width));
601 *   }
602 *  
603 *   return sum /
604 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
605 *   }
606 *  
607 *   virtual Tensor<1, dim>
608 *   gradient(const Point<dim> &p,
609 *   const unsigned int /*component*/ = 0) const override
610 *   {
612 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
613 *   {
614 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
615 *  
616 *   sum +=
617 *   (-2 / (this->width * this->width) *
618 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width)) *
619 *   x_minus_xi);
620 *   }
621 *  
622 *   return sum /
623 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
624 *   }
625 *   };
626 *  
627 *  
628 *  
629 * @endcode
630 *
631 * This class implements a function where the scalar solution and its negative
634 * value and gradient function of the Solution class.
635 *
636 * @code
637 *   template <int dim>
638 *   class SolutionAndGradient : public Function<dim>, protected SolutionBase<dim>
639 *   {
640 *   public:
642 *   : Function<dim>(dim + 1)
643 *   {}
644 *  
645 *   virtual void vector_value(const Point<dim> &p,
646 *   Vector<double> &v) const override
647 *   {
648 *   AssertDimension(v.size(), dim + 1);
649 *   Solution<dim> solution;
650 *   Tensor<1, dim> grad = solution.gradient(p);
651 *   for (unsigned int d = 0; d < dim; ++d)
652 *   v[d] = -grad[d];
653 *   v[dim] = solution.value(p);
654 *   }
655 *   };
656 *  
657 *  
658 *  
659 * @endcode
660 *
663 * @f$(y, -x, 1)@f$ in 3d. This gives a divergence-free velocity field.
664 *
665 * @code
666 *   template <int dim>
667 *   class ConvectionVelocity : public TensorFunction<1, dim>
668 *   {
669 *   public:
672 *   {}
673 *  
674 *   virtual Tensor<1, dim> value(const Point<dim> &p) const override
675 *   {
677 *   switch (dim)
678 *   {
679 *   case 1:
680 *   convection[0] = 1;
681 *   break;
682 *   case 2:
683 *   convection[0] = p[1];
684 *   convection[1] = -p[0];
685 *   break;
686 *   case 3:
687 *   convection[0] = p[1];
688 *   convection[1] = -p[0];
689 *   convection[2] = 1;
690 *   break;
691 *   default:
693 *   }
694 *   return convection;
695 *   }
696 *   };
697 *  
698 *  
699 *  
700 * @endcode
701 *
702 * The last function we implement is the right hand side for the
703 * manufactured solution. It is very similar to @ref step_7 "step-7", with the exception
704 * that we now have a convection term instead of the reaction term. Since
707 *
708 * @code
709 *   template <int dim>
710 *   class RightHandSide : public Function<dim>, protected SolutionBase<dim>
711 *   {
712 *   public:
713 *   virtual double value(const Point<dim> &p,
714 *   const unsigned int /*component*/ = 0) const override
715 *   {
718 *   double sum = 0;
719 *   for (unsigned int i = 0; i < this->n_source_centers; ++i)
720 *   {
721 *   const Tensor<1, dim> x_minus_xi = p - this->source_centers[i];
722 *  
723 *   sum +=
724 *   ((2 * dim - 2 * convection * x_minus_xi -
725 *   4 * x_minus_xi.norm_square() / (this->width * this->width)) /
726 *   (this->width * this->width) *
727 *   std::exp(-x_minus_xi.norm_square() / (this->width * this->width)));
728 *   }
729 *  
730 *   return sum /
731 *   std::pow(2. * numbers::PI * this->width * this->width, dim / 2.);
732 *   }
733 *   };
734 *  
735 *  
736 *  
737 * @endcode
738 *
739 *
740 * <a name="step_51-TheHDGsolverclass"></a>
741 * <h3>The HDG solver class</h3>
742 *
743
744 *
745 * The HDG solution procedure follows closely that of @ref step_7 "step-7". The major
748 * vectors. We also use WorkStream to enable a multithreaded local solution
750 * solver. For WorkStream, we define the local operations on a cell and a
751 * copy function into the global matrix and vector. We do this both for the
752 * assembly (which is run twice, once when we generate the system matrix and
753 * once when we compute the element-interior solutions from the skeleton
754 * values) and for the postprocessing where we extract a solution that
755 * converges at higher order.
756 *
757 * @code
758 *   template <int dim>
759 *   class HDG
760 *   {
761 *   public:
762 *   enum RefinementMode
763 *   {
766 *   };
767 *  
768 *   HDG(const unsigned int degree, const RefinementMode refinement_mode);
769 *   void run();
770 *  
771 *   private:
772 *   void setup_system();
773 *   void assemble_system(const bool reconstruct_trace = false);
774 *   void solve();
775 *   void postprocess();
776 *   void refine_grid(const unsigned int cycle);
777 *   void output_results(const unsigned int cycle);
778 *  
779 * @endcode
780 *
781 * Data for the assembly and solution of the primal variables.
782 *
783 * @code
784 *   struct PerTaskData;
785 *   struct ScratchData;
786 *  
787 * @endcode
788 *
789 * Post-processing the solution to obtain @f$u^*@f$ is an element-by-element
790 * procedure; as such, we do not need to assemble any global data and do
791 * not declare any 'task data' for WorkStream to use.
792 *
793 * @code
794 *   struct PostProcessScratchData;
795 *  
796 * @endcode
797 *
799 * work of the program.
800 *
801 * @code
803 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
804 *   ScratchData &scratch,
805 *   PerTaskData &task_data);
806 *  
808 *  
810 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
811 *   PostProcessScratchData &scratch,
812 *   unsigned int &empty_data);
813 *  
814 *  
816 *  
817 * @endcode
818 *
819 * The 'local' solutions are interior to each element. These
821 * field @f$\mathbf{q}@f$.
822 *
823 * @code
824 *   const FESystem<dim> fe_local;
827 *  
828 * @endcode
829 *
831 * used for the global skeleton solution that couples the element-level
832 * local solutions.
833 *
834 * @code
835 *   const FE_FaceQ<dim> fe;
836 *   DoFHandler<dim> dof_handler;
837 *   Vector<double> solution;
839 *  
840 * @endcode
841 *
844 * post-processed solution is a discontinuous finite element solution
845 * representing the primal variable on the interior of each cell. We define
846 * a FE type of degree @f$p+1@f$ to represent this post-processed solution,
847 * which we only use for output after constructing it.
848 *
849 * @code
850 *   const FE_DGQ<dim> fe_u_post;
853 *  
854 * @endcode
855 *
858 * element method. We can enforce the boundary conditions in an analogous
860 * handled in the same way as for continuous finite elements: For the face
861 * elements which only define degrees of freedom on the face, this process
862 * sets the solution on the refined side to coincide with the
863 * representation on the coarse side.
864 *
865
866 *
869 * unknowns from the refined side and express the local solution on the
870 * coarse side through the trace values on the refined side. However, such
871 * a setup is not as easily implemented in terms of deal.II loops and not
873 *
874 * @code
875 *   AffineConstraints<double> constraints;
876 *  
877 * @endcode
878 *
880 * matrices: You need a sparsity pattern of type ChunkSparsityPattern and
881 * the actual matrix object. When creating the sparsity pattern, we just
882 * have to additionally pass the size of local blocks.
883 *
884 * @code
885 *   ChunkSparsityPattern sparsity_pattern;
886 *   ChunkSparseMatrix<double> system_matrix;
887 *  
888 * @endcode
889 *
890 * Same as @ref step_7 "step-7":
891 *
892 * @code
895 *   };
896 *  
897 * @endcode
898 *
899 *
900 * <a name="step_51-TheHDGclassimplementation"></a>
901 * <h3>The HDG class implementation</h3>
902 *
903
904 *
905 *
906 * <a name="step_51-Constructor"></a>
907 * <h4>Constructor</h4>
910 * create a system of finite elements for the local DG part, including the
912 *
913 * @code
914 *   template <int dim>
915 *   HDG<dim>::HDG(const unsigned int degree, const RefinementMode refinement_mode)
916 *   : fe_local(FE_DGQ<dim>(degree) ^ dim, FE_DGQ<dim>(degree))
918 *   , fe(degree)
919 *   , dof_handler(triangulation)
920 *   , fe_u_post(degree + 1)
923 *   {}
924 *  
925 *  
926 *  
927 * @endcode
928 *
929 *
930 * <a name="step_51-HDGsetup_system"></a>
932 * The system for an HDG solution is setup in an analogous manner to most
933 * of the other tutorial programs. We are careful to distribute dofs with
934 * all of our DoFHandler objects. The @p solution and @p system_matrix
935 * objects go with the global skeleton solution.
936 *
937 * @code
938 *   template <int dim>
940 *   {
941 *   dof_handler_local.distribute_dofs(fe_local);
942 *   dof_handler.distribute_dofs(fe);
943 *   dof_handler_u_post.distribute_dofs(fe_u_post);
944 *  
945 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
946 *   << std::endl;
947 *  
948 *   solution.reinit(dof_handler.n_dofs());
949 *   system_rhs.reinit(dof_handler.n_dofs());
950 *  
951 *   solution_local.reinit(dof_handler_local.n_dofs());
952 *   solution_u_post.reinit(dof_handler_u_post.n_dofs());
953 *  
954 *   constraints.clear();
955 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
956 *   std::map<types::boundary_id, const Function<dim> *> boundary_functions;
961 *   QGauss<dim - 1>(fe.degree + 1),
962 *   constraints);
963 *   constraints.close();
964 *  
965 * @endcode
966 *
967 * When creating the chunk sparsity pattern, we first create the usual
968 * dynamic sparsity pattern and then set the chunk size, which is equal
969 * to the number of dofs on a face, when copying this into the final
970 * sparsity pattern.
971 *
972 * @code
973 *   {
974 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
975 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
976 *   sparsity_pattern.copy_from(dsp, fe.n_dofs_per_face());
977 *   }
978 *   system_matrix.reinit(sparsity_pattern);
979 *   }
980 *  
981 *  
982 *  
983 * @endcode
984 *
985 *
986 * <a name="step_51-HDGPerTaskData"></a>
991 * ScratchData contains all data that we need for the local assembly. There
992 * is one variable worth noting here, namely the boolean variable @p
994 * system in two steps. First, we create a linear system for the skeleton
995 * system where we condense the local part into it via the Schur complement
996 * @f$D-CA^{-1}B@f$. Then, we solve for the local part using the skeleton
997 * solution. For these two steps, we need the same matrices on the elements
998 * twice, which we want to compute by two assembly steps. Since most of the
999 * code is similar, we do this with the same function but only switch
1000 * between the two based on a flag that we set when starting the
1001 * assembly. Since we need to pass this information on to the local worker
1002 * routines, we store it once in the task data.
1003 *
1004 * @code
1005 *   template <int dim>
1006 *   struct HDG<dim>::PerTaskData
1007 *   {
1009 *   Vector<double> cell_vector;
1010 *   std::vector<types::global_dof_index> dof_indices;
1011 *  
1012 *   bool trace_reconstruct;
1013 *  
1014 *   PerTaskData(const unsigned int n_dofs, const bool trace_reconstruct)
1015 *   : cell_matrix(n_dofs, n_dofs)
1016 *   , cell_vector(n_dofs)
1017 *   , dof_indices(n_dofs)
1019 *   {}
1020 *   };
1021 *  
1022 *  
1023 *  
1024 * @endcode
1025 *
1026 *
1027 * <a name="step_51-HDGScratchData"></a>
1028 * <h4>HDG::ScratchData</h4>
1029 * @p ScratchData contains persistent data for each
1030 * thread within WorkStream. The FEValues, matrix,
1031 * and vector objects should be familiar by now. There are two objects that
1032 * need to be discussed: `std::vector<std::vector<unsigned int> >
1033 * fe_local_support_on_face` and `std::vector<std::vector<unsigned int> >
1035 * elements chosen have support (non-zero values) on a given face of the
1036 * reference cell for the local part associated to @p fe_local and the
1037 * skeleton part @p fe. We extract this information in the
1038 * constructor and store it once for all cells that we work on. Had we not
1041 *
1042 * @code
1043 *   template <int dim>
1044 *   struct HDG<dim>::ScratchData
1045 *   {
1048 *   FEFaceValues<dim> fe_face_values;
1049 *  
1056 *  
1057 *   std::vector<Tensor<1, dim>> q_phi;
1058 *   std::vector<double> q_phi_div;
1059 *   std::vector<double> u_phi;
1060 *   std::vector<Tensor<1, dim>> u_phi_grad;
1061 *   std::vector<double> tr_phi;
1062 *   std::vector<double> trace_values;
1063 *  
1064 *   std::vector<std::vector<unsigned int>> fe_local_support_on_face;
1065 *   std::vector<std::vector<unsigned int>> fe_support_on_face;
1066 *  
1070 *  
1071 *   ScratchData(const FiniteElement<dim> &fe,
1075 *   const UpdateFlags local_flags,
1077 *   const UpdateFlags flags)
1082 *   , fe_face_values(fe, face_quadrature_formula, flags)
1083 *   , ll_matrix(fe_local.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1084 *   , lf_matrix(fe_local.n_dofs_per_cell(), fe.n_dofs_per_cell())
1085 *   , fl_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1086 *   , tmp_matrix(fe.n_dofs_per_cell(), fe_local.n_dofs_per_cell())
1087 *   , l_rhs(fe_local.n_dofs_per_cell())
1088 *   , tmp_rhs(fe_local.n_dofs_per_cell())
1089 *   , q_phi(fe_local.n_dofs_per_cell())
1090 *   , q_phi_div(fe_local.n_dofs_per_cell())
1091 *   , u_phi(fe_local.n_dofs_per_cell())
1092 *   , u_phi_grad(fe_local.n_dofs_per_cell())
1093 *   , tr_phi(fe.n_dofs_per_cell())
1097 *   , exact_solution()
1098 *   {
1099 *   for (const unsigned int face_no : GeometryInfo<dim>::face_indices())
1100 *   for (unsigned int i = 0; i < fe_local.n_dofs_per_cell(); ++i)
1101 *   {
1102 *   if (fe_local.has_support_on_face(i, face_no))
1103 *   fe_local_support_on_face[face_no].push_back(i);
1104 *   }
1105 *  
1106 *   for (const unsigned int face_no : GeometryInfo<dim>::face_indices())
1107 *   for (unsigned int i = 0; i < fe.n_dofs_per_cell(); ++i)
1108 *   {
1109 *   if (fe.has_support_on_face(i, face_no))
1110 *   fe_support_on_face[face_no].push_back(i);
1111 *   }
1112 *   }
1113 *  
1114 *   ScratchData(const ScratchData &sd)
1115 *   : fe_values_local(sd.fe_values_local.get_fe(),
1116 *   sd.fe_values_local.get_quadrature(),
1117 *   sd.fe_values_local.get_update_flags())
1118 *   , fe_face_values_local(sd.fe_face_values_local.get_fe(),
1119 *   sd.fe_face_values_local.get_quadrature(),
1120 *   sd.fe_face_values_local.get_update_flags())
1121 *   , fe_face_values(sd.fe_face_values.get_fe(),
1122 *   sd.fe_face_values.get_quadrature(),
1123 *   sd.fe_face_values.get_update_flags())
1124 *   , ll_matrix(sd.ll_matrix)
1125 *   , lf_matrix(sd.lf_matrix)
1126 *   , fl_matrix(sd.fl_matrix)
1127 *   , tmp_matrix(sd.tmp_matrix)
1128 *   , l_rhs(sd.l_rhs)
1129 *   , tmp_rhs(sd.tmp_rhs)
1130 *   , q_phi(sd.q_phi)
1131 *   , q_phi_div(sd.q_phi_div)
1132 *   , u_phi(sd.u_phi)
1133 *   , u_phi_grad(sd.u_phi_grad)
1134 *   , tr_phi(sd.tr_phi)
1135 *   , trace_values(sd.trace_values)
1136 *   , fe_local_support_on_face(sd.fe_local_support_on_face)
1137 *   , fe_support_on_face(sd.fe_support_on_face)
1138 *   , exact_solution()
1139 *   {}
1140 *   };
1141 *  
1142 *  
1143 *  
1144 * @endcode
1145 *
1146 *
1147 * <a name="step_51-HDGPostProcessScratchData"></a>
1150 * when post-processing the local solution @f$u^*@f$. It is similar, but much
1151 * simpler, than @p ScratchData.
1152 *
1153 * @code
1154 *   template <int dim>
1155 *   struct HDG<dim>::PostProcessScratchData
1156 *   {
1158 *   FEValues<dim> fe_values;
1159 *  
1160 *   std::vector<double> u_values;
1161 *   std::vector<Tensor<1, dim>> u_gradients;
1163 *  
1166 *  
1170 *   const UpdateFlags local_flags,
1171 *   const UpdateFlags flags)
1173 *   , fe_values(fe, quadrature_formula, flags)
1174 *   , u_values(quadrature_formula.size())
1176 *   , cell_matrix(fe.n_dofs_per_cell(), fe.n_dofs_per_cell())
1177 *   , cell_rhs(fe.n_dofs_per_cell())
1178 *   , cell_sol(fe.n_dofs_per_cell())
1179 *   {}
1180 *  
1182 *   : fe_values_local(sd.fe_values_local.get_fe(),
1183 *   sd.fe_values_local.get_quadrature(),
1184 *   sd.fe_values_local.get_update_flags())
1185 *   , fe_values(sd.fe_values.get_fe(),
1186 *   sd.fe_values.get_quadrature(),
1187 *   sd.fe_values.get_update_flags())
1188 *   , u_values(sd.u_values)
1189 *   , u_gradients(sd.u_gradients)
1190 *   , cell_matrix(sd.cell_matrix)
1191 *   , cell_rhs(sd.cell_rhs)
1192 *   , cell_sol(sd.cell_sol)
1193 *   {}
1194 *   };
1195 *  
1196 *  
1197 *  
1198 * @endcode
1199 *
1200 *
1201 * <a name="step_51-HDGassemble_system"></a>
1203 * The @p assemble_system function is similar to the one on @ref step_32 "step-32", where
1204 * the quadrature formula and the update flags are set up, and then
1205 * <code>WorkStream</code> is used to do the work in a multi-threaded
1206 * manner. The @p trace_reconstruct input parameter is used to decide
1207 * whether we are solving for the global skeleton solution (false) or the
1208 * local solution (true).
1209 *
1210
1211 *
1214 * into BLAS and LAPACK functions if those are available in deal.II. Thus,
1216 * threads at the same time. Most implementations do support this, but some
1217 * libraries need to be built in a specific way to avoid problems. For
1219 * calls needs to built with a flag called `USE_LOCKING` set to true.
1220 *
1221 * @code
1222 *   template <int dim>
1223 *   void HDG<dim>::assemble_system(const bool trace_reconstruct)
1224 *   {
1225 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1226 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1227 *  
1230 *  
1232 *  
1235 *  
1236 *   PerTaskData task_data(fe.n_dofs_per_cell(), trace_reconstruct);
1237 *   ScratchData scratch(fe,
1238 *   fe_local,
1241 *   local_flags,
1243 *   flags);
1244 *  
1245 *   WorkStream::run(dof_handler.begin_active(),
1246 *   dof_handler.end(),
1247 *   *this,
1250 *   scratch,
1251 *   task_data);
1252 *   }
1253 *  
1254 *  
1255 *  
1256 * @endcode
1257 *
1258 *
1259 * <a name="step_51-HDGassemble_system_one_cell"></a>
1262 * Assembling the local matrices @f$A, B, C@f$ is done here, along with the
1263 * local contributions of the global matrix @f$D@f$.
1264 *
1265 * @code
1266 *   template <int dim>
1268 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1269 *   ScratchData &scratch,
1270 *   PerTaskData &task_data)
1271 *   {
1272 * @endcode
1273 *
1274 * Construct iterator for dof_handler_local for FEValues reinit function.
1275 *
1276 * @code
1278 *   cell->as_dof_handler_iterator(dof_handler_local);
1279 *  
1280 *   const unsigned int n_q_points =
1281 *   scratch.fe_values_local.get_quadrature().size();
1282 *   const unsigned int n_face_q_points =
1283 *   scratch.fe_face_values_local.get_quadrature().size();
1284 *  
1285 *   const unsigned int loc_dofs_per_cell =
1286 *   scratch.fe_values_local.get_fe().n_dofs_per_cell();
1287 *  
1290 *  
1291 *   scratch.ll_matrix = 0;
1292 *   scratch.l_rhs = 0;
1293 *   if (!task_data.trace_reconstruct)
1294 *   {
1295 *   scratch.lf_matrix = 0;
1296 *   scratch.fl_matrix = 0;
1297 *   task_data.cell_matrix = 0;
1298 *   task_data.cell_vector = 0;
1299 *   }
1300 *   scratch.fe_values_local.reinit(loc_cell);
1301 *  
1302 * @endcode
1303 *
1304 * We first compute the cell-interior contribution to @p ll_matrix matrix
1306 * local-local coupling, as well as the local right-hand-side vector. We
1307 * store the values at each quadrature point for the basis functions, the
1308 * right-hand-side value, and the convection velocity, in order to have
1309 * quick access to these fields.
1310 *
1311 * @code
1312 *   for (unsigned int q = 0; q < n_q_points; ++q)
1313 *   {
1314 *   const double rhs_value = scratch.right_hand_side.value(
1315 *   scratch.fe_values_local.quadrature_point(q));
1316 *   const Tensor<1, dim> convection = scratch.convection_velocity.value(
1317 *   scratch.fe_values_local.quadrature_point(q));
1318 *   const double JxW = scratch.fe_values_local.JxW(q);
1319 *   for (unsigned int k = 0; k < loc_dofs_per_cell; ++k)
1320 *   {
1321 *   scratch.q_phi[k] = scratch.fe_values_local[fluxes].value(k, q);
1322 *   scratch.q_phi_div[k] =
1323 *   scratch.fe_values_local[fluxes].divergence(k, q);
1324 *   scratch.u_phi[k] = scratch.fe_values_local[scalar].value(k, q);
1325 *   scratch.u_phi_grad[k] =
1326 *   scratch.fe_values_local[scalar].gradient(k, q);
1327 *   }
1328 *   for (unsigned int i = 0; i < loc_dofs_per_cell; ++i)
1329 *   {
1330 *   for (unsigned int j = 0; j < loc_dofs_per_cell; ++j)
1331 *   scratch.ll_matrix(i, j) +=
1332 *   (scratch.q_phi[i] * scratch.q_phi[j] -
1333 *   scratch.q_phi_div[i] * scratch.u_phi[j] +
1334 *   scratch.u_phi[i] * scratch.q_phi_div[j] -
1335 *   (scratch.u_phi_grad[i] * convection) * scratch.u_phi[j]) *
1336 *   JxW;
1337 *   scratch.l_rhs(i) += scratch.u_phi[i] * rhs_value * JxW;
1338 *   }
1339 *   }
1340 *  
1341 * @endcode
1342 *
1343 * Face terms are assembled on all faces of all elements. This is in
1346 *
1347 * @code
1348 *   for (const auto face_no : cell->face_indices())
1349 *   {
1351 *   scratch.fe_face_values.reinit(cell, face_no);
1352 *  
1353 * @endcode
1354 *
1356 * local variables.
1357 *
1358 * @code
1359 *   if (task_data.trace_reconstruct)
1360 *   scratch.fe_face_values.get_function_values(solution,
1361 *   scratch.trace_values);
1362 *  
1363 *   for (unsigned int q = 0; q < n_face_q_points; ++q)
1364 *   {
1365 *   const double JxW = scratch.fe_face_values.JxW(q);
1366 *   const Point<dim> quadrature_point =
1367 *   scratch.fe_face_values.quadrature_point(q);
1368 *   const Tensor<1, dim> normal =
1369 *   scratch.fe_face_values.normal_vector(q);
1370 *   const Tensor<1, dim> convection =
1371 *   scratch.convection_velocity.value(quadrature_point);
1372 *  
1373 * @endcode
1374 *
1375 * Here we compute the stabilization parameter discussed in the
1377 * length scale is set to 1/5, it simply results in a contribution
1379 * through the element boundary in a centered scheme for the
1380 * convection part.
1381 *
1382 * @code
1383 *   const double tau_stab = (5. + std::abs(convection * normal));
1384 *  
1385 * @endcode
1386 *
1388 * support_on_face information we created in @p ScratchData.
1389 *
1390 * @code
1391 *   for (unsigned int k = 0;
1392 *   k < scratch.fe_local_support_on_face[face_no].size();
1393 *   ++k)
1394 *   {
1395 *   const unsigned int kk =
1396 *   scratch.fe_local_support_on_face[face_no][k];
1397 *   scratch.q_phi[k] =
1398 *   scratch.fe_face_values_local[fluxes].value(kk, q);
1399 *   scratch.u_phi[k] =
1400 *   scratch.fe_face_values_local[scalar].value(kk, q);
1401 *   }
1402 *  
1403 * @endcode
1404 *
1406 * system for the skeleton variable @f$\hat{u}@f$. If this is the case,
1407 * we must assemble all local matrices associated with the problem:
1408 * local-local, local-face, face-local, and face-face. The
1409 * face-face matrix is stored as @p TaskData::cell_matrix, so that
1410 * it can be assembled into the global system by @p
1412 *
1413 * @code
1414 *   if (!task_data.trace_reconstruct)
1415 *   {
1416 *   for (unsigned int k = 0;
1417 *   k < scratch.fe_support_on_face[face_no].size();
1418 *   ++k)
1419 *   scratch.tr_phi[k] = scratch.fe_face_values.shape_value(
1420 *   scratch.fe_support_on_face[face_no][k], q);
1421 *   for (unsigned int i = 0;
1422 *   i < scratch.fe_local_support_on_face[face_no].size();
1423 *   ++i)
1424 *   for (unsigned int j = 0;
1425 *   j < scratch.fe_support_on_face[face_no].size();
1426 *   ++j)
1427 *   {
1428 *   const unsigned int ii =
1429 *   scratch.fe_local_support_on_face[face_no][i];
1430 *   const unsigned int jj =
1431 *   scratch.fe_support_on_face[face_no][j];
1432 *   scratch.lf_matrix(ii, jj) +=
1433 *   ((scratch.q_phi[i] * normal +
1434 *   (convection * normal - tau_stab) * scratch.u_phi[i]) *
1435 *   scratch.tr_phi[j]) *
1436 *   JxW;
1437 *  
1438 * @endcode
1439 *
1443 * Schur complement.
1444 *
1445 * @code
1446 *   scratch.fl_matrix(jj, ii) -=
1447 *   ((scratch.q_phi[i] * normal +
1448 *   tau_stab * scratch.u_phi[i]) *
1449 *   scratch.tr_phi[j]) *
1450 *   JxW;
1451 *   }
1452 *  
1453 *   for (unsigned int i = 0;
1454 *   i < scratch.fe_support_on_face[face_no].size();
1455 *   ++i)
1456 *   for (unsigned int j = 0;
1457 *   j < scratch.fe_support_on_face[face_no].size();
1458 *   ++j)
1459 *   {
1460 *   const unsigned int ii =
1461 *   scratch.fe_support_on_face[face_no][i];
1462 *   const unsigned int jj =
1463 *   scratch.fe_support_on_face[face_no][j];
1464 *   task_data.cell_matrix(ii, jj) +=
1465 *   ((convection * normal - tau_stab) * scratch.tr_phi[i] *
1466 *   scratch.tr_phi[j]) *
1467 *   JxW;
1468 *   }
1469 *  
1470 *   if (cell->face(face_no)->at_boundary() &&
1471 *   (cell->face(face_no)->boundary_id() == 1))
1472 *   {
1473 *   const double neumann_value =
1474 *   -scratch.exact_solution.gradient(quadrature_point) *
1475 *   normal +
1476 *   convection * normal *
1477 *   scratch.exact_solution.value(quadrature_point);
1478 *   for (unsigned int i = 0;
1479 *   i < scratch.fe_support_on_face[face_no].size();
1480 *   ++i)
1481 *   {
1482 *   const unsigned int ii =
1483 *   scratch.fe_support_on_face[face_no][i];
1484 *   task_data.cell_vector(ii) +=
1485 *   scratch.tr_phi[i] * neumann_value * JxW;
1486 *   }
1487 *   }
1488 *   }
1489 *  
1490 * @endcode
1491 *
1494 * to the face matrices above, we need it in both assembly stages.
1495 *
1496 * @code
1497 *   for (unsigned int i = 0;
1498 *   i < scratch.fe_local_support_on_face[face_no].size();
1499 *   ++i)
1500 *   for (unsigned int j = 0;
1501 *   j < scratch.fe_local_support_on_face[face_no].size();
1502 *   ++j)
1503 *   {
1504 *   const unsigned int ii =
1505 *   scratch.fe_local_support_on_face[face_no][i];
1506 *   const unsigned int jj =
1507 *   scratch.fe_local_support_on_face[face_no][j];
1508 *   scratch.ll_matrix(ii, jj) +=
1509 *   tau_stab * scratch.u_phi[i] * scratch.u_phi[j] * JxW;
1510 *   }
1511 *  
1512 * @endcode
1513 *
1514 * When @p trace_reconstruct=true, we are solving for the local
1515 * solutions on an element by element basis. The local
1516 * right-hand-side is calculated by replacing the basis functions @p
1520 *
1521 * @code
1522 *   if (task_data.trace_reconstruct)
1523 *   for (unsigned int i = 0;
1524 *   i < scratch.fe_local_support_on_face[face_no].size();
1525 *   ++i)
1526 *   {
1527 *   const unsigned int ii =
1528 *   scratch.fe_local_support_on_face[face_no][i];
1529 *   scratch.l_rhs(ii) -=
1530 *   (scratch.q_phi[i] * normal +
1531 *   scratch.u_phi[i] * (convection * normal - tau_stab)) *
1532 *   scratch.trace_values[q] * JxW;
1533 *   }
1534 *   }
1535 *   }
1536 *  
1537 * @endcode
1538 *
1540 * either: (1) assemble the global system, or (2) compute the local solution
1541 * values and save them. In either case, the first step is to invert the
1542 * local-local matrix.
1543 *
1544 * @code
1545 *   scratch.ll_matrix.gauss_jordan();
1546 *  
1547 * @endcode
1548 *
1549 * For (1), we compute the Schur complement and add it to the @p
1551 *
1552 * @code
1553 *   if (task_data.trace_reconstruct == false)
1554 *   {
1555 *   scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
1556 *   scratch.tmp_matrix.vmult_add(task_data.cell_vector, scratch.l_rhs);
1557 *   scratch.tmp_matrix.mmult(task_data.cell_matrix,
1558 *   scratch.lf_matrix,
1559 *   true);
1560 *   cell->get_dof_indices(task_data.dof_indices);
1561 *   }
1562 * @endcode
1563 *
1564 * For (2), we are simply solving (ll_matrix).(solution_local) = (l_rhs).
1565 * Hence, we multiply @p l_rhs by our already inverted local-local matrix
1566 * and store the result using the <code>set_dof_values</code> function.
1567 *
1568 * @code
1569 *   else
1570 *   {
1571 *   scratch.ll_matrix.vmult(scratch.tmp_rhs, scratch.l_rhs);
1572 *   loc_cell->set_dof_values(scratch.tmp_rhs, solution_local);
1573 *   }
1574 *   }
1575 *  
1576 *  
1577 *  
1578 * @endcode
1579 *
1580 *
1581 * <a name="step_51-HDGcopy_local_to_global"></a>
1583 * If we are in the first step of the solution, i.e. @p trace_reconstruct=false,
1584 * then we assemble the local matrices into the global system.
1585 *
1586 * @code
1587 *   template <int dim>
1589 *   {
1590 *   if (data.trace_reconstruct == false)
1591 *   constraints.distribute_local_to_global(data.cell_matrix,
1592 *   data.cell_vector,
1593 *   data.dof_indices,
1594 *   system_matrix,
1595 *   system_rhs);
1596 *   }
1597 *  
1598 *  
1599 *  
1600 * @endcode
1601 *
1602 *
1603 * <a name="step_51-HDGsolve"></a>
1604 * <h4>HDG::solve</h4>
1605 * The skeleton solution is solved for by using a BiCGStab solver with
1606 * identity preconditioner.
1607 *
1608 * @code
1609 *   template <int dim>
1610 *   void HDG<dim>::solve()
1611 *   {
1612 *   SolverControl solver_control(system_matrix.m() * 10,
1613 *   1e-11 * system_rhs.l2_norm());
1614 *   SolverBicgstab<Vector<double>> solver(solver_control);
1615 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
1616 *  
1617 *   std::cout << " Number of BiCGStab iterations: "
1618 *   << solver_control.last_step() << std::endl;
1619 *  
1620 *   system_matrix.clear();
1621 *   sparsity_pattern.reinit(0, 0, 0, 1);
1622 *  
1623 *   constraints.distribute(solution);
1624 *  
1625 * @endcode
1626 *
1627 * Once we have solved for the skeleton solution,
1628 * we can solve for the local solutions in an element-by-element
1629 * fashion. We do this by re-using the same @p assemble_system function
1631 *
1632 * @code
1633 *   assemble_system(true);
1634 *   }
1635 *  
1636 *  
1637 *  
1638 * @endcode
1639 *
1640 *
1641 * <a name="step_51-HDGpostprocess"></a>
1642 * <h4>HDG::postprocess</h4>
1643 *
1644
1645 *
1646 * The postprocess method serves two purposes. First, we want to construct a
1647 * post-processed scalar variables in the element space of degree @f$p+1@f$ that
1648 * we hope will converge at order @f$p+2@f$. This is again an element-by-element
1649 * process and only involves the scalar solution as well as the gradient on
1650 * the local cell. To do this, we introduce the already defined scratch data
1651 * together with some update flags and run the work stream to do this in
1652 * parallel.
1653 *
1654
1655 *
1657 * @ref step_7 "step-7". The overall procedure is similar with calls to
1658 * VectorTools::integrate_difference. The difference is in how we compute
1659 * the errors for the scalar variable and the gradient variable. In @ref step_7 "step-7",
1660 * we did this by computing @p L2_norm or @p H1_seminorm
1662 * computed and sorted by their vector component, <code>[0, dim)</code> for
1663 * the
1664 * gradient and @p dim for the scalar. To compute their value, we hence use
1667 * parts of either of them. Eventually, we also compute the L2-error of the
1668 * post-processed solution and add the results into the convergence table.
1669 *
1670 * @code
1672 *   void HDG<dim>::postprocess()
1673 *   {
1674 *   {
1675 *   const QGauss<dim> quadrature_formula(fe_u_post.degree + 1);
1679 *  
1680 *   PostProcessScratchData scratch(
1682 *  
1684 *   dof_handler_u_post.begin_active(),
1685 *   dof_handler_u_post.end(),
1686 *   [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
1687 *   PostProcessScratchData &scratch,
1688 *   unsigned int &data) {
1689 *   this->postprocess_one_cell(cell, scratch, data);
1690 *   },
1691 *   std::function<void(const unsigned int &)>(),
1692 *   scratch,
1693 *   0U);
1694 *   }
1695 *  
1697 *  
1703 *   QGauss<dim>(fe.degree + 2),
1705 *   &value_select);
1706 *   const double L2_error =
1710 *  
1712 *   std::pair<unsigned int, unsigned int>(0, dim), dim + 1);
1717 *   QGauss<dim>(fe.degree + 2),
1719 *   &gradient_select);
1720 *   const double grad_error =
1724 *  
1727 *   Solution<dim>(),
1729 *   QGauss<dim>(fe.degree + 3),
1731 *   const double post_error =
1735 *  
1736 *   convergence_table.add_value("cells", triangulation.n_active_cells());
1737 *   convergence_table.add_value("dofs", dof_handler.n_dofs());
1738 *  
1739 *   convergence_table.add_value("val L2", L2_error);
1740 *   convergence_table.set_scientific("val L2", true);
1741 *   convergence_table.set_precision("val L2", 3);
1742 *  
1743 *   convergence_table.add_value("grad L2", grad_error);
1744 *   convergence_table.set_scientific("grad L2", true);
1745 *   convergence_table.set_precision("grad L2", 3);
1746 *  
1747 *   convergence_table.add_value("val L2-post", post_error);
1748 *   convergence_table.set_scientific("val L2-post", true);
1749 *   convergence_table.set_precision("val L2-post", 3);
1750 *   }
1751 *  
1752 *  
1753 *  
1754 * @endcode
1755 *
1756 *
1757 * <a name="step_51-HDGpostprocess_one_cell"></a>
1759 *
1760
1761 *
1765 * post-processed variable. Moreover, we need to set the average of the new
1766 * post-processed variable to equal the average of the scalar DG solution
1767 * on the cell.
1768 *
1769
1770 *
1772 * that would potentially fills our @p dofs_per_cell times @p dofs_per_cell
1773 * matrix but is singular (the sum of all rows would be zero because the
1774 * constant function has zero gradient). Therefore, we take one row away and
1776 * row for the scalar part, even though we could pick any row for @f$\mathcal
1777 * Q_{-p}@f$ elements. However, had we used FE_DGP elements instead, the first
1780 * be used for those elements.
1781 *
1782 * @code
1783 *   template <int dim>
1785 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1786 *   PostProcessScratchData &scratch,
1787 *   unsigned int &)
1788 *   {
1790 *   cell->as_dof_handler_iterator(dof_handler_local);
1791 *  
1792 *   scratch.fe_values_local.reinit(loc_cell);
1793 *   scratch.fe_values.reinit(cell);
1794 *  
1797 *  
1798 *   const unsigned int n_q_points = scratch.fe_values.get_quadrature().size();
1799 *   const unsigned int dofs_per_cell = scratch.fe_values.dofs_per_cell;
1800 *  
1801 *   scratch.fe_values_local[scalar].get_function_values(solution_local,
1802 *   scratch.u_values);
1803 *   scratch.fe_values_local[fluxes].get_function_values(solution_local,
1804 *   scratch.u_gradients);
1805 *  
1806 *   double sum = 0;
1807 *   for (unsigned int i = 1; i < dofs_per_cell; ++i)
1808 *   {
1809 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1810 *   {
1811 *   sum = 0;
1812 *   for (unsigned int q = 0; q < n_q_points; ++q)
1813 *   sum += (scratch.fe_values.shape_grad(i, q) *
1814 *   scratch.fe_values.shape_grad(j, q)) *
1815 *   scratch.fe_values.JxW(q);
1816 *   scratch.cell_matrix(i, j) = sum;
1817 *   }
1818 *  
1819 *   sum = 0;
1820 *   for (unsigned int q = 0; q < n_q_points; ++q)
1821 *   sum -= (scratch.fe_values.shape_grad(i, q) * scratch.u_gradients[q]) *
1822 *   scratch.fe_values.JxW(q);
1823 *   scratch.cell_rhs(i) = sum;
1824 *   }
1825 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1826 *   {
1827 *   sum = 0;
1828 *   for (unsigned int q = 0; q < n_q_points; ++q)
1829 *   sum += scratch.fe_values.shape_value(j, q) * scratch.fe_values.JxW(q);
1830 *   scratch.cell_matrix(0, j) = sum;
1831 *   }
1832 *   {
1833 *   sum = 0;
1834 *   for (unsigned int q = 0; q < n_q_points; ++q)
1835 *   sum += scratch.u_values[q] * scratch.fe_values.JxW(q);
1836 *   scratch.cell_rhs(0) = sum;
1837 *   }
1838 *  
1839 * @endcode
1840 *
1841 * Having assembled all terms, we can again go on and solve the linear
1842 * system. We invert the matrix and then multiply the inverse by the
1843 * right hand side. An alternative (and more numerically stable) method
1845 *
1846 * @code
1847 *   scratch.cell_matrix.gauss_jordan();
1848 *   scratch.cell_matrix.vmult(scratch.cell_sol, scratch.cell_rhs);
1849 *   cell->distribute_local_to_global(scratch.cell_sol, solution_u_post);
1850 *   }
1851 *  
1852 *  
1853 *  
1854 * @endcode
1855 *
1856 *
1857 * <a name="step_51-HDGoutput_results"></a>
1859 * We have 3 sets of results that we would like to output: the local
1860 * solution, the post-processed local solution, and the skeleton solution. The
1861 * former 2 both 'live' on element volumes, whereas the latter lives on
1863 * of the triangulation. Our @p output_results function writes all local solutions
1865 * DoFHandler objects. The graphical output for the skeleton
1866 * variable is done through use of the DataOutFaces class.
1867 *
1868 * @code
1869 *   template <int dim>
1870 *   void HDG<dim>::output_results(const unsigned int cycle)
1871 *   {
1872 *   std::string filename;
1873 *   switch (refinement_mode)
1874 *   {
1875 *   case global_refinement:
1876 *   filename = "solution-global";
1877 *   break;
1878 *   case adaptive_refinement:
1879 *   filename = "solution-adaptive";
1880 *   break;
1881 *   default:
1883 *   }
1884 *  
1885 *   std::string face_out(filename);
1886 *   face_out += "-face";
1887 *  
1888 *   filename += "-q" + Utilities::int_to_string(fe.degree, 1);
1889 *   filename += "-" + Utilities::int_to_string(cycle, 2);
1890 *   filename += ".vtk";
1891 *   std::ofstream output(filename);
1892 *  
1893 *   DataOut<dim> data_out;
1894 *  
1895 * @endcode
1896 *
1897 * We first define the names and types of the local solution,
1898 * and add the data to @p data_out.
1899 *
1900 * @code
1901 *   std::vector<std::string> names(dim, "gradient");
1902 *   names.emplace_back("solution");
1903 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1908 *   data_out.add_data_vector(dof_handler_local,
1910 *   names,
1912 *  
1913 * @endcode
1914 *
1915 * The second data item we add is the post-processed solution.
1916 * In this case, it is a single scalar variable belonging to
1918 *
1919 * @code
1920 *   std::vector<std::string> post_name(1, "u_post");
1921 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1923 *   data_out.add_data_vector(dof_handler_u_post,
1925 *   post_name,
1926 *   post_comp_type);
1927 *  
1928 *   data_out.build_patches(fe.degree);
1929 *   data_out.write_vtk(output);
1930 *  
1931 *   face_out += "-q" + Utilities::int_to_string(fe.degree, 1);
1932 *   face_out += "-" + Utilities::int_to_string(cycle, 2);
1933 *   face_out += ".vtk";
1934 *   std::ofstream face_output(face_out);
1935 *  
1936 * @endcode
1937 *
1942 *
1943 * @code
1945 *   std::vector<std::string> face_name(1, "u_hat");
1946 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1948 *  
1949 *   data_out_face.add_data_vector(dof_handler,
1950 *   solution,
1951 *   face_name,
1953 *  
1954 *   data_out_face.build_patches(fe.degree);
1955 *   data_out_face.write_vtk(face_output);
1956 *   }
1957 *  
1958 * @endcode
1959 *
1960 *
1961 * <a name="step_51-HDGrefine_grid"></a>
1962 * <h4>HDG::refine_grid</h4>
1963 *
1964
1965 *
1967 * <code>@ref step_7 "step-7"</code>: adaptive_refinement and global_refinement. The
1969 * time. This is because we want to use a finer sequence of meshes than what
1970 * we would get with one refinement step, namely 2, 3, 4, 6, 8, 12, 16, ...
1971 * elements per direction.
1972 *
1973
1974 *
1977 * solutions.
1978 *
1979 * @code
1980 *   template <int dim>
1981 *   void HDG<dim>::refine_grid(const unsigned int cycle)
1982 *   {
1983 *   if (cycle == 0)
1984 *   {
1986 *   triangulation.refine_global(3 - dim);
1987 *   }
1988 *   else
1989 *   switch (refinement_mode)
1990 *   {
1991 *   case global_refinement:
1992 *   {
1995 *   2 + (cycle % 2),
1996 *   -1,
1997 *   1);
1998 *   triangulation.refine_global(3 - dim + cycle / 2);
1999 *   break;
2000 *   }
2001 *  
2002 *   case adaptive_refinement:
2003 *   {
2005 *   triangulation.n_active_cells());
2006 *  
2008 *   std::map<types::boundary_id, const Function<dim> *>
2011 *   QGauss<dim - 1>(fe.degree + 1),
2015 *   fe_local.component_mask(
2016 *   scalar));
2017 *  
2020 *  
2021 *   triangulation.execute_coarsening_and_refinement();
2022 *  
2023 *   break;
2024 *   }
2025 *  
2026 *   default:
2027 *   {
2029 *   }
2030 *   }
2031 *  
2032 * @endcode
2033 *
2034 * Just as in @ref step_7 "step-7", we set the boundary indicator of two of the faces to 1
2036 * conditions. Since we re-create the triangulation every time for global
2037 * refinement, the flags are set in every refinement step, not just at the
2038 * beginning.
2039 *
2040 * @code
2041 *   for (const auto &cell : triangulation.cell_iterators())
2042 *   for (const auto &face : cell->face_iterators())
2043 *   if (face->at_boundary())
2044 *   if ((std::fabs(face->center()[0] - (-1)) < 1e-12) ||
2045 *   (std::fabs(face->center()[1] - (-1)) < 1e-12))
2046 *   face->set_boundary_id(1);
2047 *   }
2048 *  
2049 * @endcode
2050 *
2051 *
2052 * <a name="step_51-HDGrun"></a>
2053 * <h4>HDG::run</h4>
2054 * The functionality here is basically the same as <code>@ref step_7 "step-7"</code>.
2055 * We loop over 10 cycles, refining the grid on each one. At the end,
2057 *
2058 * @code
2059 *   template <int dim>
2060 *   void HDG<dim>::run()
2061 *   {
2062 *   for (unsigned int cycle = 0; cycle < 10; ++cycle)
2063 *   {
2064 *   std::cout << "Cycle " << cycle << ':' << std::endl;
2065 *  
2066 *   refine_grid(cycle);
2067 *   setup_system();
2068 *   assemble_system(false);
2069 *   solve();
2070 *   postprocess();
2071 *   output_results(cycle);
2072 *   }
2073 *  
2074 * @endcode
2075 *
2076 * There is one minor change for the convergence table compared to @ref step_7 "step-7":
2077 * Since we did not refine our mesh by a factor two in each cycle (but
2078 * rather used the sequence 2, 3, 4, 6, 8, 12, ...), we need to tell the
2080 * number of cells as a reference column and additionally specifying the
2082 * relation between number of cells and mesh size.
2083 *
2084 * @code
2086 *   {
2087 *   convergence_table.evaluate_convergence_rates(
2088 *   "val L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2089 *   convergence_table.evaluate_convergence_rates(
2090 *   "grad L2", "cells", ConvergenceTable::reduction_rate_log2, dim);
2091 *   convergence_table.evaluate_convergence_rates(
2092 *   "val L2-post", "cells", ConvergenceTable::reduction_rate_log2, dim);
2093 *   }
2094 *   convergence_table.write_text(std::cout);
2095 *   }
2096 *  
2097 *   } // end of namespace Step51
2098 *  
2099 *  
2100 *  
2101 *   int main()
2102 *   {
2103 *   const unsigned int dim = 2;
2104 *  
2105 *   try
2106 *   {
2107 * @endcode
2108 *
2109 * Now for the three calls to the main class in complete analogy to
2110 * @ref step_7 "step-7".
2111 *
2112 * @code
2113 *   {
2114 *   std::cout << "Solving with Q1 elements, adaptive refinement"
2115 *   << std::endl
2116 *   << "============================================="
2117 *   << std::endl
2118 *   << std::endl;
2119 *  
2120 *   Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::adaptive_refinement);
2121 *   hdg_problem.run();
2122 *  
2123 *   std::cout << std::endl;
2124 *   }
2125 *  
2126 *   {
2127 *   std::cout << "Solving with Q1 elements, global refinement" << std::endl
2128 *   << "===========================================" << std::endl
2129 *   << std::endl;
2130 *  
2131 *   Step51::HDG<dim> hdg_problem(1, Step51::HDG<dim>::global_refinement);
2132 *   hdg_problem.run();
2133 *  
2134 *   std::cout << std::endl;
2135 *   }
2136 *  
2137 *   {
2138 *   std::cout << "Solving with Q3 elements, global refinement" << std::endl
2139 *   << "===========================================" << std::endl
2140 *   << std::endl;
2141 *  
2142 *   Step51::HDG<dim> hdg_problem(3, Step51::HDG<dim>::global_refinement);
2143 *   hdg_problem.run();
2144 *  
2145 *   std::cout << std::endl;
2146 *   }
2147 *   }
2148 *   catch (std::exception &exc)
2149 *   {
2150 *   std::cerr << std::endl
2151 *   << std::endl
2152 *   << "----------------------------------------------------"
2153 *   << std::endl;
2154 *   std::cerr << "Exception on processing: " << std::endl
2155 *   << exc.what() << std::endl
2156 *   << "Aborting!" << std::endl
2157 *   << "----------------------------------------------------"
2158 *   << std::endl;
2159 *   return 1;
2160 *   }
2161 *   catch (...)
2162 *   {
2163 *   std::cerr << std::endl
2164 *   << std::endl
2165 *   << "----------------------------------------------------"
2166 *   << std::endl;
2167 *   std::cerr << "Unknown exception!" << std::endl
2168 *   << "Aborting!" << std::endl
2169 *   << "----------------------------------------------------"
2170 *   << std::endl;
2171 *   return 1;
2172 *   }
2173 *  
2174 *   return 0;
2175 *   }
2176 * @endcode
2177<a name="step_51-Results"></a><h1>Results</h1>
2178
2179
2180<a name="step_51-Programoutput"></a><h3>Program output</h3>
2181
2182
2183We first have a look at the output generated by the program when run in 2D. In
2184the four images below, we show the solution for polynomial degree @f$p=1@f$
2189the same file) is not supported in the VTK output of deal.II.
2190
2191The images show the distinctive features of HDG: The cell solution (colored
2192surfaces) is discontinuous between the cells. The solution on the skeleton
2193variable sits on the faces and ties together the local parts. The skeleton
2194solution is not continuous on the vertices where the faces meet, even though
2195its values are quite close along lines in the same coordinate direction. The
2198+ \mathbf{c} u@f$). From the picture at the top left, it is clear that
2201solution; this explains why we can get a better solution using a
2202postprocessing step.
2203
2204As the mesh is refined, the jumps between the cells get
2205small (we represent a smooth solution), and the skeleton solution approaches
2206the interior parts. For cycle 8, there is no visible difference in the two
2208the interior variables do not exactly satisfy boundary conditions. On the
2209lower and left boundaries, we set Neumann boundary conditions, whereas we set
2211
2212<table align="center">
2213 <tr>
2214 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_2.png" alt=""></td>
2215 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_3.png" alt=""></td>
2216 </tr>
2217 <tr>
2218 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_4.png" alt=""></td>
2219 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_8.png" alt=""></td>
2220 </tr>
2221</table>
2222
2223Next, we have a look at the post-processed solution, again at cycles 2, 3, 4,
2224and 8. This is a discontinuous solution that is locally described by second
2225order polynomials. While the solution does not look very good on the mesh of
2228analytical solution.
2229
2230<table align="center">
2231 <tr>
2232 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_2.png" alt=""></td>
2233 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_3.png" alt=""></td>
2234 </tr>
2235 <tr>
2236 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_4.png" alt=""></td>
2237 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_8.png" alt=""></td>
2238 </tr>
2239</table>
2240
2241Finally, we look at the solution for @f$p=3@f$ at cycle 2. Despite the coarse
2242mesh with only 64 cells, the post-processed solution is similar in quality
2243to the linear solution (not post-processed) at cycle 8 with 4,096
2244cells. This clearly shows the superiority of high order methods for smooth
2245solutions.
2246
2247<table align="center">
2248 <tr>
2249 <td><img src="https://www.dealii.org/images/steps/developer/step-51.sol_q3_2.png" alt=""></td>
2250 <td><img src="https://www.dealii.org/images/steps/developer/step-51.post_q3_2.png" alt=""></td>
2251 </tr>
2252</table>
2253
2254<a name="step_51-Convergencetables"></a><h4>Convergence tables</h4>
2255
2256
2258steps and convergence tables with errors in the various components in the
2260
2261@code
2262Q1 elements, adaptive refinement:
2263cells dofs val L2 grad L2 val L2-post
2264 16 80 1.804e+01 2.207e+01 1.798e+01
2265 31 170 9.874e+00 1.322e+01 9.798e+00
2266 61 314 7.452e-01 3.793e+00 4.891e-01
2267 121 634 3.240e-01 1.511e+00 2.616e-01
2268 238 1198 8.585e-02 8.212e-01 1.808e-02
2269 454 2290 4.802e-02 5.178e-01 2.195e-02
2270 898 4378 2.561e-02 2.947e-01 4.318e-03
2271 1720 7864 1.306e-02 1.664e-01 2.978e-03
2272 3271 14638 7.025e-03 9.815e-02 1.075e-03
2273 6217 27214 4.119e-03 6.407e-02 9.975e-04
2274
2275Q1 elements, global refinement:
2276cells dofs val L2 grad L2 val L2-post
2277 16 80 1.804e+01 - 2.207e+01 - 1.798e+01 -
2278 36 168 6.125e+00 2.66 9.472e+00 2.09 6.084e+00 2.67
2279 64 288 9.785e-01 6.38 4.260e+00 2.78 7.102e-01 7.47
2280 144 624 2.730e-01 3.15 1.866e+00 2.04 6.115e-02 6.05
2281 256 1088 1.493e-01 2.10 1.046e+00 2.01 2.880e-02 2.62
2282 576 2400 6.965e-02 1.88 4.846e-01 1.90 9.204e-03 2.81
2283 1024 4224 4.018e-02 1.91 2.784e-01 1.93 4.027e-03 2.87
2284 2304 9408 1.831e-02 1.94 1.264e-01 1.95 1.236e-03 2.91
2285 4096 16640 1.043e-02 1.96 7.185e-02 1.96 5.306e-04 2.94
2286 9216 37248 4.690e-03 1.97 3.228e-02 1.97 1.599e-04 2.96
2287
2288Q3 elements, global refinement:
2289cells dofs val L2 grad L2 val L2-post
2290 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
2291 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
2292 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
2293 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
2294 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
2295 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
2296 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
2297 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
2298 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
2299 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
2300@endcode
2301
2302
2303One can see the error reduction upon grid refinement, and for the cases where
2305convergence rates of Q1 elements in the @f$L_2@f$ norm for both the scalar
2306variable and the gradient variable is apparent, as is the cubic rate for the
2308feature of an HDG solution. In typical continuous finite elements, the
2309gradient of the solution of order @f$p@f$ converges at rate @f$p@f$ only, as
2311results for finite elements are also available (e.g. superconvergent patch
2315variable at fifth order.
2316
2318@code
2319Q1 elements, adaptive refinement:
2320cells dofs val L2 grad L2 val L2-post
2321 8 144 7.122e+00 1.941e+01 6.102e+00
2322 29 500 3.309e+00 1.023e+01 2.145e+00
2323 113 1792 2.204e+00 1.023e+01 1.912e+00
2324 379 5732 6.085e-01 5.008e+00 2.233e-01
2325 1317 19412 1.543e-01 1.464e+00 4.196e-02
2326 4579 64768 5.058e-02 5.611e-01 9.521e-03
2327 14596 199552 2.129e-02 3.122e-01 4.569e-03
2328 46180 611400 1.033e-02 1.622e-01 1.684e-03
2329144859 1864212 5.007e-03 8.371e-02 7.364e-04
2330451060 5684508 2.518e-03 4.562e-02 3.070e-04
2331
2332Q1 elements, global refinement:
2333cells dofs val L2 grad L2 val L2-post
2334 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
2335 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
2336 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
2337 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
2338 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
2339 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
2340 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
2341 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
2342 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
2343110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
2344
2345Q3 elements, global refinement:
2346cells dofs val L2 grad L2 val L2-post
2347 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
2348 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
2349 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
2350 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
2351 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
2352 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
2353 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
2354 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
2355 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
2356110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
2357@endcode
2358
2359<a name="step_51-Comparisonwithcontinuousfiniteelements"></a><h3>Comparison with continuous finite elements</h3>
2360
2361
2362<a name="step_51-Resultsfor2D"></a><h4>Results for 2D</h4>
2363
2364
2369of the HDG method compared to continuous finite elements for
2371aspect not seen on a problem with smooth analytic solution. In the picture
2372below, we compare the @f$L_2@f$ error as a function of the number of degrees of
2373freedom (left) and of the computing time spent in the linear solver (right)
2374for two space dimensions of continuous finite elements (CG) and the hybridized
2375discontinuous Galerkin method presented in this tutorial. As opposed to the
2377figures below use the Trilinos algebraic multigrid preconditioner in
2379ChunkSparseMatrix for the trace variable has been used in order to utilize the
2381
2382<table align="center">
2383 <tr>
2384 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_plain.png" width="400" alt=""></td>
2385 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_plain.png" width="400" alt=""></td>
2386 </tr>
2387</table>
2388
2389The results in the graphs show that the HDG method is slower than continuous
2390finite elements at @f$p=1@f$, about equally fast for cubic elements and
2391faster for sixth order elements. However, we have seen above that the HDG
2395by @f$p=1^*@f$ for example). We now see a clear advantage of HDG for the same
2396amount of work for both @f$p=3@f$ and @f$p=6@f$, and about the same quality
2397for @f$p=1@f$.
2398
2399<table align="center">
2400 <tr>
2401 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_post.png" width="400" alt=""></td>
2402 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_post.png" width="400" alt=""></td>
2403 </tr>
2404</table>
2405
2406Since the HDG method actually produces results converging as
2407@f$h^{p+2}@f$, we should compare it to a continuous Galerkin
2408solution with the same asymptotic convergence behavior, i.e., FE_Q with degree
2409@f$p+1@f$. If we do this, we get the convergence curves below. We see that
2410CG with second order polynomials is again clearly better than HDG with
2412
2413<table align="center">
2414 <tr>
2415 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2d_postb.png" width="400" alt=""></td>
2416 <td><img src="https://www.dealii.org/images/steps/developer/step-51.2dt_postb.png" width="400" alt=""></td>
2417 </tr>
2418</table>
2419
2420The results are in line with properties of DG methods in general: Best
2421performance is typically not achieved for linear elements, but rather at
2423volume-to-surface effect for discontinuous solutions with too much of the
2424solution living on the surfaces and hence duplicating work when the elements
2426at relatively high order, despite their focus on a discontinuous (and hence,
2428
2429<a name="step_51-Resultsfor3D"></a><h4>Results for 3D</h4>
2430
2431
2432We now show the same figures in 3D: The first row shows the number of degrees
2434@f$u@f$ for CG and HDG at order @f$p@f$, the second row shows the
2436compares the post-processed HDG solution with CG at order @f$p+1@f$. In 3D,
2438solution is clearly better than HDG for linears by any metric. For cubics, HDG
2440order polynomials. One can alternatively also use the combination of FE_DGP
2442polynomials of degree @f$p@f$ but Legendre polynomials of <i>complete</i>
2443degree @f$p@f$. There are fewer degrees of freedom on the skeleton variable
2444for FE_FaceP for a given mesh size, but the solution quality (error vs. number
2445of DoFs) is very similar to the results for FE_FaceQ.
2446
2447<table align="center">
2448 <tr>
2449 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_plain.png" width="400" alt=""></td>
2450 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_plain.png" width="400" alt=""></td>
2451 </tr>
2452 <tr>
2453 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_post.png" width="400" alt=""></td>
2454 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_post.png" width="400" alt=""></td>
2455 </tr>
2456 <tr>
2457 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3d_postb.png" width="400" alt=""></td>
2458 <td><img src="https://www.dealii.org/images/steps/developer/step-51.3dt_postb.png" width="400" alt=""></td>
2459 </tr>
2460</table>
2461
2464both without particular tuning of the AMG parameters on any of them) to give a
2467elements is about a factor four to five faster for @f$p=3@f$ and @f$p=6@f$. As of
2470available such as fast matrix-free approaches as shown in @ref step_37 "step-37" that make
2471higher order continuous elements more competitive. Again, it is not clear to
2473HDG. We refer to <a href="https://dx.doi.org/10.1137/16M110455X">Kronbichler
2474and Wall (2018)</a> for a recent efficiency evaluation.
2475
2476
2477<a name="step_51-Possibilitiesforimprovements"></a><h3>Possibilities for improvements</h3>
2478
2479
2482
2486problems). Let us look at
2488components:
2489
2490<table align="center" class="doxtable">
2491 <tr>
2492 <th>&nbsp;</th>
2493 <th>&nbsp;</th>
2494 <th>Setup</th>
2495 <th>Assemble</th>
2496 <th>Solve</th>
2498 <th>Post-processing</th>
2499 <th>Output</th>
2500 </tr>
2501 <tr>
2502 <th>&nbsp;</th>
2503 <th>Total time</th>
2504 <th colspan="6">Relative share</th>
2505 </tr>
2506 <tr>
2507 <td align="left">2D, Q1, cycle 9, 37,248 dofs</td>
2508 <td align="center">5.34s</td>
2509 <td align="center">0.7%</td>
2510 <td align="center">1.2%</td>
2511 <td align="center">89.5%</td>
2512 <td align="center">0.9%</td>
2513 <td align="center">2.3%</td>
2514 <td align="center">5.4%</td>
2515 </tr>
2516 <tr>
2517 <td align="left">2D, Q3, cycle 9, 74,496 dofs</td>
2518 <td align="center">22.2s</td>
2519 <td align="center">0.4%</td>
2520 <td align="center">4.3%</td>
2521 <td align="center">84.1%</td>
2522 <td align="center">4.1%</td>
2523 <td align="center">3.5%</td>
2524 <td align="center">3.6%</td>
2525 </tr>
2526 <tr>
2527 <td align="left">3D, Q1, cycle 7, 172,800 dofs</td>
2528 <td align="center">9.06s</td>
2529 <td align="center">3.1%</td>
2530 <td align="center">8.9%</td>
2531 <td align="center">42.7%</td>
2532 <td align="center">7.0%</td>
2533 <td align="center">20.6%</td>
2534 <td align="center">17.7%</td>
2535 </tr>
2536 <tr>
2537 <td align="left">3D, Q3, cycle 7, 691,200 dofs</td>
2538 <td align="center">516s</td>
2539 <td align="center">0.6%</td>
2540 <td align="center">34.5%</td>
2541 <td align="center">13.4%</td>
2542 <td align="center">32.8%</td>
2543 <td align="center">17.1%</td>
2544 <td align="center">1.5%</td>
2545 </tr>
2546</table>
2547
2548As can be seen from the table, the solver and assembly calls dominate the
2551
2552<ol>
2553 <li> Better linear solvers: We use a BiCGStab iterative solver without
2554 preconditioner, where the number of iteration increases with increasing
2555 problem size (the number of iterations for Q1 elements and global
2556 refinements starts at 35 for the small sizes but increase up to 701 for the
2558 multigrid preconditioner from Trilinos, or some more advanced variants as
2559 the one discussed in <a
2560 href="https://dx.doi.org/10.1137/16M110455X">Kronbichler and Wall
2562 with finer meshes, such a solver can be designed that uses the matrix-vector
2566
2568 cell to another (those that do neither contain variable coefficients nor
2569 mapping-dependent terms).
2570</ol>
2571 *
2572 *
2573<a name="step_51-PlainProg"></a>
2574<h1> The plain program</h1>
2575@include "step-51.cc"
2576*/
Definition fe_q.h:554
void mmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
constexpr void clear()
friend class Tensor
Definition tensor.h:865
static constexpr unsigned int dimension
Definition tensor.h:476
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
unsigned int level
Definition grid_out.cc:4632
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
UpdateFlags
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< index_type > data
Definition mpi.cc:740
std::size_t size
Definition mpi.cc:739
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void subdivided_hyper_cube(Triangulation< dim, spacedim > &tria, const unsigned int repetitions, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
constexpr char O
@ matrix
Contents is actually a matrix.
@ general
No special properties.
constexpr char T
constexpr types::blas_int zero
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
Definition l2.h:159
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:474
double compute_global_error(const Triangulation< dim, spacedim > &tria, const InVector &cellwise_error, const NormType &norm, const double exponent=2.)
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
void project_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &boundary_functions, const Quadrature< dim - 1 > &q, std::map< types::global_dof_index, number > &boundary_values, std::vector< unsigned int > component_mapping={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void set_dof_values(const DoFCellAccessor< dim, spacedim, lda > &cell, const Vector< number > &local_values, OutputVector &values, const bool perform_check)
constexpr double PI
Definition numbers.h:241
STL namespace.
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)