deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-75.h
Go to the documentation of this file.
1) const override
468 *   {
469 *   const std::array<double, 2> polar =
471 *  
472 *   constexpr const double alpha = 2. / 3.;
473 *   return std::pow(polar[0], alpha) * std::sin(alpha * polar[1]);
474 *   }
475 *   };
476 *  
477 *  
478 *  
479 * @endcode
480 *
481 *
482 * <a name="step_75-Parameters"></a>
483 * <h3>Parameters</h3>
484 *
485
486 *
487 * For this tutorial, we will use a simplified set of parameters. It is also
489 * short we decided on using simple structs. The actual intention of all these
491 * location where they are used.
492 *
493
494 *
495 * The following parameter set controls the coarse-grid solver, the smoothers,
496 * and the inter-grid transfer scheme of the multigrid mechanism.
497 * We populate it with default parameters.
498 *
499 * @code
500 *   struct MultigridParameters
501 *   {
502 *   struct
503 *   {
504 *   std::string type = "cg_with_amg";
505 *   unsigned int maxiter = 10000;
506 *   double abstol = 1e-20;
507 *   double reltol = 1e-4;
508 *   unsigned int smoother_sweeps = 1;
509 *   unsigned int n_cycles = 1;
510 *   std::string smoother_type = "ILU";
511 *   } coarse_solver;
512 *  
513 *   struct
514 *   {
515 *   std::string type = "chebyshev";
516 *   double smoothing_range = 20;
517 *   unsigned int degree = 5;
518 *   unsigned int eig_cg_n_iterations = 20;
519 *   } smoother;
520 *  
521 *   struct
522 *   {
525 *   PolynomialCoarseningSequenceType::decrease_by_one;
526 *   bool perform_h_transfer = true;
527 *   } transfer;
528 *   };
529 *  
530 *  
531 *  
532 * @endcode
533 *
534 * This is the general parameter struct for the problem class. You will find
537 * parameters for cell weighting. It also contains an instance of the above
538 * struct for multigrid parameters which will be passed to the multigrid
539 * algorithm.
540 *
541 * @code
542 *   struct Parameters
543 *   {
544 *   unsigned int n_cycles = 8;
545 *   double tolerance_factor = 1e-12;
546 *  
548 *  
549 *   unsigned int min_h_level = 5;
550 *   unsigned int max_h_level = 12;
551 *   unsigned int min_p_degree = 2;
552 *   unsigned int max_p_degree = 6;
553 *   unsigned int max_p_level_difference = 1;
554 *  
555 *   double refine_fraction = 0.3;
556 *   double coarsen_fraction = 0.03;
557 *   double p_refine_fraction = 0.9;
558 *   double p_coarsen_fraction = 0.9;
559 *  
560 *   double weighting_factor = 1.;
561 *   double weighting_exponent = 1.;
562 *   };
563 *  
564 *  
565 *  
566 * @endcode
567 *
568 *
569 * <a name="step_75-MatrixfreeLaplaceoperator"></a>
570 * <h3>Matrix-free Laplace operator</h3>
571 *
572
573 *
574 * This is a matrix-free implementation of the Laplace operator that will
578 *
579
580 *
581 * We will use the FEEvaluation class to evaluate the solution vector
582 * at the quadrature points and to perform the integration. In contrast to
583 * other tutorials, the template arguments `degree` is set to @f$-1@f$ and
584 * `number of quadrature in 1d` to @f$0@f$. In this case, FEEvaluation selects
585 * dynamically the correct polynomial degree and number of quadrature
587 * template parameters so that we do not have to worry about them later on.
588 *
589 * @code
590 *   template <int dim, typename number>
591 *   class LaplaceOperator : public EnableObserverPointer
592 *   {
593 *   public:
595 *  
596 *   using FECellIntegrator = FEEvaluation<dim, -1, 0, 1, number>;
597 *  
598 *   LaplaceOperator() = default;
599 *  
600 *   LaplaceOperator(const hp::MappingCollection<dim> &mapping,
601 *   const DoFHandler<dim> &dof_handler,
602 *   const hp::QCollection<dim> &quad,
603 *   const AffineConstraints<number> &constraints,
604 *   VectorType &system_rhs);
605 *  
606 *   void reinit(const hp::MappingCollection<dim> &mapping,
607 *   const DoFHandler<dim> &dof_handler,
608 *   const hp::QCollection<dim> &quad,
609 *   const AffineConstraints<number> &constraints,
610 *   VectorType &system_rhs);
611 *  
612 *   types::global_dof_index m() const;
613 *  
614 *   number el(unsigned int, unsigned int) const;
615 *  
616 *   void initialize_dof_vector(VectorType &vec) const;
617 *  
618 *   void vmult(VectorType &dst, const VectorType &src) const;
619 *  
620 *   void Tvmult(VectorType &dst, const VectorType &src) const;
621 *  
623 *  
624 *   void compute_inverse_diagonal(VectorType &diagonal) const;
625 *  
626 *   private:
628 *  
630 *   VectorType &dst,
631 *   const VectorType &src) const;
632 *  
633 *  
635 *   const MatrixFree<dim, number> &matrix_free,
636 *   VectorType &dst,
637 *   const VectorType &src,
638 *   const std::pair<unsigned int, unsigned int> &range) const;
639 *  
640 *   MatrixFree<dim, number> matrix_free;
641 *  
642 * @endcode
643 *
645 * preconditioner, we need an actual system matrix on the coarsest level.
648 * dedicated SparseMatrix object. In the default case, this matrix stays
649 * empty. Once `get_system_matrix()` is called, this matrix is filled (lazy
650 * allocation). Since this is a `const` function, we need the "mutable"
651 * keyword here. We also need a the constraints object to build the matrix.
652 *
653 * @code
654 *   AffineConstraints<number> constraints;
655 *   mutable TrilinosWrappers::SparseMatrix system_matrix;
656 *   };
657 *  
658 *  
659 *  
660 * @endcode
661 *
663 * the class. In particular, these functions initialize the internal
665 * right-hand-side vector.
666 *
667 * @code
668 *   template <int dim, typename number>
669 *   LaplaceOperator<dim, number>::LaplaceOperator(
670 *   const hp::MappingCollection<dim> &mapping,
671 *   const DoFHandler<dim> &dof_handler,
672 *   const hp::QCollection<dim> &quad,
673 *   const AffineConstraints<number> &constraints,
674 *   VectorType &system_rhs)
675 *   {
676 *   this->reinit(mapping, dof_handler, quad, constraints, system_rhs);
677 *   }
678 *  
679 *  
680 *  
681 *   template <int dim, typename number>
682 *   void LaplaceOperator<dim, number>::reinit(
683 *   const hp::MappingCollection<dim> &mapping,
684 *   const DoFHandler<dim> &dof_handler,
685 *   const hp::QCollection<dim> &quad,
686 *   const AffineConstraints<number> &constraints,
687 *   VectorType &system_rhs)
688 *   {
689 * @endcode
690 *
691 * Clear internal data structures (in the case that the operator is reused).
692 *
693 * @code
694 *   this->system_matrix.clear();
695 *  
696 * @endcode
697 *
698 * Copy the constraints, since they might be needed for computation of the
700 *
701 * @code
702 *   this->constraints.copy_from(constraints);
703 *  
704 * @endcode
705 *
706 * Set up MatrixFree. At the quadrature points, we only need to evaluate
707 * the gradient of the solution and test with the gradient of the shape
709 *
710 * @code
712 *   data.mapping_update_flags = update_gradients;
713 *  
714 *   matrix_free.reinit(mapping, dof_handler, constraints, quad, data);
715 *  
716 * @endcode
717 *
718 * Compute the right-hand side vector. For this purpose, we set up a second
720 * the constraints due to Dirichlet-boundary conditions. This modified
721 * operator is applied to a vector with only the Dirichlet values set. The
722 * result is the negative right-hand-side vector.
723 *
724 * @code
725 *   {
727 *   dof_handler.locally_owned_dofs(),
729 *  
732 *   constraints_without_dbc.close();
733 *  
734 *   VectorType b, x;
735 *  
736 *   this->initialize_dof_vector(system_rhs);
737 *  
738 *   MatrixFree<dim, number> matrix_free;
739 *   matrix_free.reinit(
740 *   mapping, dof_handler, constraints_without_dbc, quad, data);
741 *  
742 *   matrix_free.initialize_dof_vector(b);
743 *   matrix_free.initialize_dof_vector(x);
744 *  
745 *   constraints.distribute(x);
746 *  
747 *   matrix_free.cell_loop(&LaplaceOperator::do_cell_integral_range,
748 *   this,
749 *   b,
750 *   x);
751 *  
752 *   constraints.set_zero(b);
753 *  
754 *   system_rhs -= b;
755 *   }
756 *   }
757 *  
758 *  
759 *  
760 * @endcode
761 *
763 * including the smoothers.
764 *
765
766 *
767 * Since we do not have a matrix, query the DoFHandler for the number of
768 * degrees of freedom.
769 *
770 * @code
771 *   template <int dim, typename number>
772 *   types::global_dof_index LaplaceOperator<dim, number>::m() const
773 *   {
774 *   return matrix_free.get_dof_handler().n_dofs();
775 *   }
776 *  
777 *  
778 *  
779 * @endcode
780 *
781 * Access a particular element in the matrix. This function is neither
783 *
784 * @code
785 *   template <int dim, typename number>
786 *   number LaplaceOperator<dim, number>::el(unsigned int, unsigned int) const
787 *   {
789 *   return 0;
790 *   }
791 *  
792 *  
793 *  
794 * @endcode
795 *
796 * Initialize the given vector. We simply delegate the task to the
797 * MatrixFree function with the same name.
798 *
799 * @code
800 *   template <int dim, typename number>
801 *   void
802 *   LaplaceOperator<dim, number>::initialize_dof_vector(VectorType &vec) const
803 *   {
804 *   matrix_free.initialize_dof_vector(vec);
805 *   }
806 *  
807 *  
808 *  
809 * @endcode
810 *
812 * over all cells and evaluating the effect of the cell integrals (see also:
814 *
815 * @code
816 *   template <int dim, typename number>
817 *   void LaplaceOperator<dim, number>::vmult(VectorType &dst,
818 *   const VectorType &src) const
819 *   {
820 *   this->matrix_free.cell_loop(
821 *   &LaplaceOperator::do_cell_integral_range, this, dst, src, true);
822 *   }
823 *  
824 *  
825 *  
826 * @endcode
827 *
829 * symmetric "matrices", this function can simply delegate it task to vmult().
830 *
831 * @code
832 *   template <int dim, typename number>
833 *   void LaplaceOperator<dim, number>::Tvmult(VectorType &dst,
834 *   const VectorType &src) const
835 *   {
836 *   this->vmult(dst, src);
837 *   }
838 *  
839 *  
840 *  
841 * @endcode
842 *
844 * diagonal entries of the matrix. Instead, we compute the diagonal by
845 * performing a sequence of operator evaluations to unit basis vectors.
846 * For this purpose, an optimized function from the MatrixFreeTools
847 * namespace is used. The inversion is performed manually afterwards.
848 *
849 * @code
850 *   template <int dim, typename number>
851 *   void LaplaceOperator<dim, number>::compute_inverse_diagonal(
852 *   VectorType &diagonal) const
853 *   {
854 *   this->matrix_free.initialize_dof_vector(diagonal);
856 *   diagonal,
857 *   &LaplaceOperator::do_cell_integral_local,
858 *   this);
859 *  
860 *   for (auto &i : diagonal)
861 *   i = (std::abs(i) > 1.0e-10) ? (1.0 / i) : 1.0;
862 *   }
863 *  
864 *  
865 *  
866 * @endcode
867 *
871 * this tutorial for linear elements (on the coarse grid), this is
872 * acceptable.
873 * The matrix entries are obtained via sequence of operator evaluations.
874 * For this purpose, the optimized function MatrixFreeTools::compute_matrix()
875 * is used. The matrix will only be computed if it has not been set up yet
876 * (lazy allocation).
877 *
878 * @code
881 *   LaplaceOperator<dim, number>::get_system_matrix() const
882 *   {
883 *   if (system_matrix.m() == 0 && system_matrix.n() == 0)
884 *   {
885 *   const auto &dof_handler = this->matrix_free.get_dof_handler();
886 *  
888 *   dof_handler.locally_owned_dofs(),
889 *   dof_handler.get_triangulation().get_mpi_communicator());
890 *  
891 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, this->constraints);
892 *  
893 *   dsp.compress();
894 *   system_matrix.reinit(dsp);
895 *  
897 *   matrix_free,
898 *   constraints,
899 *   system_matrix,
900 *   &LaplaceOperator::do_cell_integral_local,
901 *   this);
902 *   }
903 *  
904 *   return this->system_matrix;
905 *   }
906 *  
907 *  
908 *  
909 * @endcode
910 *
914 *
915 * @code
916 *   template <int dim, typename number>
917 *   void LaplaceOperator<dim, number>::do_cell_integral_local(
919 *   {
921 *  
922 *   for (const unsigned int q : integrator.quadrature_point_indices())
923 *   integrator.submit_gradient(integrator.get_gradient(q), q);
924 *  
926 *   }
927 *  
928 *  
929 *  
930 * @endcode
931 *
932 * Same as above but with access to the global vectors.
933 *
934 * @code
935 *   template <int dim, typename number>
936 *   void LaplaceOperator<dim, number>::do_cell_integral_global(
938 *   VectorType &dst,
939 *   const VectorType &src) const
940 *   {
941 *   integrator.gather_evaluate(src, EvaluationFlags::gradients);
942 *  
943 *   for (const unsigned int q : integrator.quadrature_point_indices())
944 *   integrator.submit_gradient(integrator.get_gradient(q), q);
945 *  
946 *   integrator.integrate_scatter(EvaluationFlags::gradients, dst);
947 *   }
948 *  
949 *  
950 *  
951 * @endcode
952 *
953 * This function loops over all cell batches within a cell-batch range and
954 * calls the above function.
955 *
956 * @code
957 *   template <int dim, typename number>
958 *   void LaplaceOperator<dim, number>::do_cell_integral_range(
959 *   const MatrixFree<dim, number> &matrix_free,
960 *   VectorType &dst,
961 *   const VectorType &src,
962 *   const std::pair<unsigned int, unsigned int> &range) const
963 *   {
964 *   FECellIntegrator integrator(matrix_free, range);
965 *  
966 *   for (unsigned cell = range.first; cell < range.second; ++cell)
967 *   {
968 *   integrator.reinit(cell);
969 *  
971 *   }
972 *   }
973 *  
974 *  
975 *  
976 * @endcode
977 *
978 *
979 * <a name="step_75-Solverandpreconditioner"></a>
980 * <h3>Solver and preconditioner</h3>
981 *
982
983 *
984 *
985 * <a name="step_75-Conjugategradientsolverwithmultigridpreconditioner"></a>
986 * <h4>Conjugate-gradient solver with multigrid preconditioner</h4>
987 *
988
989 *
990 * This function solves the equation system with a sequence of provided
991 * multigrid objects. It is meant to be treated as general as possible, hence
992 * the multitude of template parameters.
993 *
994 * @code
995 *   template <typename VectorType,
996 *   int dim,
997 *   typename SystemMatrixType,
998 *   typename LevelMatrixType,
999 *   typename MGTransferType>
1000 *   static void
1001 *   mg_solve(SolverControl &solver_control,
1002 *   VectorType &dst,
1003 *   const VectorType &src,
1005 *   const DoFHandler<dim> &dof,
1007 *   const MGLevelObject<std::unique_ptr<LevelMatrixType>> &mg_matrices,
1008 *   const MGTransferType &mg_transfer)
1009 *   {
1010 *   AssertThrow(mg_data.coarse_solver.type == "cg_with_amg",
1011 *   ExcNotImplemented());
1012 *   AssertThrow(mg_data.smoother.type == "chebyshev", ExcNotImplemented());
1013 *  
1014 *   const unsigned int min_level = mg_matrices.min_level();
1015 *   const unsigned int max_level = mg_matrices.max_level();
1016 *  
1019 *   VectorType,
1021 *   using PreconditionerType = PreconditionMG<dim, VectorType, MGTransferType>;
1022 *  
1023 * @endcode
1024 *
1025 * We initialize level operators and Chebyshev smoothers here.
1026 *
1027 * @code
1029 *  
1031 *   min_level, max_level);
1032 *  
1033 *   for (unsigned int level = min_level; level <= max_level; ++level)
1034 *   {
1035 *   smoother_data[level].preconditioner =
1036 *   std::make_shared<SmootherPreconditionerType>();
1037 *   mg_matrices[level]->compute_inverse_diagonal(
1038 *   smoother_data[level].preconditioner->get_vector());
1039 *   smoother_data[level].smoothing_range = mg_data.smoother.smoothing_range;
1040 *   smoother_data[level].degree = mg_data.smoother.degree;
1041 *   smoother_data[level].eig_cg_n_iterations =
1042 *   mg_data.smoother.eig_cg_n_iterations;
1043 *   }
1044 *  
1046 *   mg_smoother;
1047 *   mg_smoother.initialize(mg_matrices, smoother_data);
1048 *  
1049 * @endcode
1050 *
1051 * Next, we initialize the coarse-grid solver. We use conjugate-gradient
1052 * method with AMG as preconditioner.
1053 *
1054 * @code
1055 *   ReductionControl coarse_grid_solver_control(mg_data.coarse_solver.maxiter,
1056 *   mg_data.coarse_solver.abstol,
1057 *   mg_data.coarse_solver.reltol,
1058 *   false,
1059 *   false);
1061 *  
1062 *   std::unique_ptr<MGCoarseGridBase<VectorType>> mg_coarse;
1063 *  
1066 *   amg_data.smoother_sweeps = mg_data.coarse_solver.smoother_sweeps;
1067 *   amg_data.n_cycles = mg_data.coarse_solver.n_cycles;
1068 *   amg_data.smoother_type = mg_data.coarse_solver.smoother_type.c_str();
1069 *  
1070 *   precondition_amg.initialize(mg_matrices[min_level]->get_system_matrix(),
1071 *   amg_data);
1072 *  
1073 *   mg_coarse =
1074 *   std::make_unique<MGCoarseGridIterativeSolver<VectorType,
1077 *   decltype(precondition_amg)>>(
1079 *  
1080 * @endcode
1081 *
1082 * Finally, we create the Multigrid object, convert it to a preconditioner,
1083 * and use it inside of a conjugate-gradient solver to solve the linear
1085 *
1086 * @code
1089 *  
1090 *   PreconditionerType preconditioner(dof, mg, mg_transfer);
1091 *  
1092 *   SolverCG<VectorType>(solver_control)
1093 *   .solve(fine_matrix, dst, src, preconditioner);
1094 *   }
1095 *  
1096 *  
1097 *  
1098 * @endcode
1099 *
1100 *
1101 * <a name="step_75-Hybridpolynomialgeometricglobalcoarseningmultigridpreconditioner"></a>
1102 * <h4>Hybrid polynomial/geometric-global-coarsening multigrid preconditioner</h4>
1103 *
1104
1105 *
1106 * The above function deals with the actual solution for a given sequence of
1107 * multigrid objects. This functions creates the actual multigrid levels, in
1108 * particular the operators, and the transfer operator as a
1110 *
1111 * @code
1112 *   template <typename VectorType, typename OperatorType, int dim>
1113 *   void solve_with_gmg(SolverControl &solver_control,
1114 *   const OperatorType &system_matrix,
1115 *   VectorType &dst,
1116 *   const VectorType &src,
1118 *   const hp::MappingCollection<dim> &mapping_collection,
1119 *   const DoFHandler<dim> &dof_handler,
1121 *   {
1122 * @endcode
1123 *
1124 * Create a DoFHandler and operator for each multigrid level,
1125 * as well as, create transfer operators. To be able to
1126 * set up the operators, we need a set of DoFHandler that we create
1127 * via global coarsening of p or h. For latter, we need also a sequence
1130 *
1131
1132 *
1133 * In case no h-transfer is requested, we provide an empty deleter for the
1136 *
1137 * @code
1138 *   MGLevelObject<DoFHandler<dim>> dof_handlers;
1141 *  
1142 *   std::vector<std::shared_ptr<const Triangulation<dim>>>
1144 *   if (mg_data.transfer.perform_h_transfer)
1147 *   dof_handler.get_triangulation());
1148 *   else
1149 *   coarse_grid_triangulations.emplace_back(
1150 *   &(dof_handler.get_triangulation()), [](auto *) {});
1151 *  
1152 * @endcode
1153 *
1154 * Determine the total number of levels for the multigrid operation and
1155 * allocate sufficient memory for all levels.
1156 *
1157 * @code
1158 *   const unsigned int n_h_levels = coarse_grid_triangulations.size() - 1;
1159 *  
1160 *   const auto get_max_active_fe_degree = [&](const auto &dof_handler) {
1161 *   unsigned int max = 0;
1162 *  
1163 *   for (auto &cell : dof_handler.active_cell_iterators())
1164 *   if (cell->is_locally_owned())
1165 *   max =
1166 *   std::max(max, dof_handler.get_fe(cell->active_fe_index()).degree);
1167 *  
1168 *   return Utilities::MPI::max(max, MPI_COMM_WORLD);
1169 *   };
1170 *  
1171 *   const unsigned int n_p_levels =
1173 *   get_max_active_fe_degree(dof_handler), mg_data.transfer.p_sequence)
1174 *   .size();
1175 *  
1176 *   std::map<unsigned int, unsigned int> fe_index_for_degree;
1177 *   for (unsigned int i = 0; i < dof_handler.get_fe_collection().size(); ++i)
1178 *   {
1179 *   const unsigned int degree = dof_handler.get_fe(i).degree;
1180 *   Assert(fe_index_for_degree.find(degree) == fe_index_for_degree.end(),
1181 *   ExcMessage("FECollection does not contain unique degrees."));
1182 *   fe_index_for_degree[degree] = i;
1183 *   }
1184 *  
1185 *   unsigned int minlevel = 0;
1186 *   unsigned int maxlevel = n_h_levels + n_p_levels - 1;
1187 *  
1188 *   dof_handlers.resize(minlevel, maxlevel);
1189 *   operators.resize(minlevel, maxlevel);
1190 *   transfers.resize(minlevel, maxlevel);
1191 *  
1192 * @endcode
1193 *
1195 * DoFHandler accordingly. We start with the h-levels, where we distribute
1196 * on increasingly finer meshes linear elements.
1197 *
1198 * @code
1199 *   for (unsigned int l = 0; l < n_h_levels; ++l)
1200 *   {
1201 *   dof_handlers[l].reinit(*coarse_grid_triangulations[l]);
1202 *   dof_handlers[l].distribute_dofs(dof_handler.get_fe_collection());
1203 *   }
1204 *  
1205 * @endcode
1206 *
1207 * After we reached the finest mesh, we will adjust the polynomial degrees
1208 * on each level. We reverse iterate over our data structure and start at
1210 * indices. We then lower the polynomial degree of each cell level by level.
1211 *
1212 * @code
1213 *   for (unsigned int i = 0, l = maxlevel; i < n_p_levels; ++i, --l)
1214 *   {
1215 *   dof_handlers[l].reinit(dof_handler.get_triangulation());
1216 *  
1217 *   if (l == maxlevel) // finest level
1218 *   {
1219 *   auto &dof_handler_mg = dof_handlers[l];
1220 *  
1221 *   auto cell_other = dof_handler.begin_active();
1222 *   for (auto &cell : dof_handler_mg.active_cell_iterators())
1223 *   {
1224 *   if (cell->is_locally_owned())
1225 *   cell->set_active_fe_index(cell_other->active_fe_index());
1226 *   ++cell_other;
1227 *   }
1228 *   }
1229 *   else // coarse level
1230 *   {
1231 *   auto &dof_handler_fine = dof_handlers[l + 1];
1232 *   auto &dof_handler_coarse = dof_handlers[l + 0];
1233 *  
1234 *   auto cell_other = dof_handler_fine.begin_active();
1235 *   for (auto &cell : dof_handler_coarse.active_cell_iterators())
1236 *   {
1237 *   if (cell->is_locally_owned())
1238 *   {
1239 *   const unsigned int next_degree =
1242 *   cell_other->get_fe().degree,
1243 *   mg_data.transfer.p_sequence);
1245 *   fe_index_for_degree.end(),
1246 *   ExcMessage("Next polynomial degree in sequence "
1247 *   "does not exist in FECollection."));
1248 *  
1249 *   cell->set_active_fe_index(fe_index_for_degree[next_degree]);
1250 *   }
1251 *   ++cell_other;
1252 *   }
1253 *   }
1254 *  
1255 *   dof_handlers[l].distribute_dofs(dof_handler.get_fe_collection());
1256 *   }
1257 *  
1258 * @endcode
1259 *
1261 * multigrid level. This involves determining constraints with homogeneous
1262 * Dirichlet boundary conditions, and building the operator just like on the
1263 * active level.
1264 *
1265 * @code
1267 *   constraints(minlevel, maxlevel);
1268 *  
1269 *   for (unsigned int level = minlevel; level <= maxlevel; ++level)
1270 *   {
1271 *   const auto &dof_handler = dof_handlers[level];
1272 *   auto &constraint = constraints[level];
1273 *  
1274 *   constraint.reinit(dof_handler.locally_owned_dofs(),
1276 *  
1278 *   VectorTools::interpolate_boundary_values(mapping_collection,
1279 *   dof_handler,
1280 *   0,
1282 *   constraint);
1283 *   constraint.close();
1284 *  
1285 *   VectorType dummy;
1286 *  
1287 *   operators[level] = std::make_unique<OperatorType>(mapping_collection,
1288 *   dof_handler,
1290 *   constraint,
1291 *   dummy);
1292 *   }
1293 *  
1294 * @endcode
1295 *
1297 * operator as needed by the Multigrid solver class.
1298 *
1299 * @code
1300 *   for (unsigned int level = minlevel; level < maxlevel; ++level)
1301 *   transfers[level + 1].reinit(dof_handlers[level + 1],
1302 *   dof_handlers[level],
1303 *   constraints[level + 1],
1304 *   constraints[level]);
1305 *  
1307 *   transfers, [&](const auto l, auto &vec) {
1308 *   operators[l]->initialize_dof_vector(vec);
1309 *   });
1310 *  
1311 * @endcode
1312 *
1313 * Finally, proceed to solve the problem with multigrid.
1314 *
1315 * @code
1316 *   mg_solve(solver_control,
1317 *   dst,
1318 *   src,
1319 *   mg_data,
1320 *   dof_handler,
1321 *   system_matrix,
1322 *   operators,
1323 *   transfer);
1324 *   }
1325 *  
1326 *  
1327 *  
1328 * @endcode
1329 *
1330 *
1331 * <a name="step_75-ThecodeLaplaceProblemcodeclasstemplate"></a>
1333 *
1334
1335 *
1336 * Now we will finally declare the main class of this program, which solves
1338 * will look familiar as it is similar to the main classes of @ref step_27 "step-27" and
1339 * @ref step_40 "step-40". There are basically just two additions:
1341 * replaced by an object of the LaplaceOperator class for the MatrixFree
1342 * formulation.
1343 * - An object of parallel::CellWeights, which will help us with load
1345 *
1346 * @code
1347 *   template <int dim>
1348 *   class LaplaceProblem
1349 *   {
1350 *   public:
1351 *   LaplaceProblem(const Parameters &parameters);
1352 *  
1353 *   void run();
1354 *  
1355 *   private:
1356 *   void initialize_grid();
1357 *   void setup_system();
1358 *   void print_diagnostics();
1359 *   void solve_system();
1360 *   void compute_indicators();
1361 *   void adapt_resolution();
1362 *   void output_results(const unsigned int cycle);
1363 *  
1364 *   MPI_Comm mpi_communicator;
1365 *  
1366 *   const Parameters prm;
1367 *  
1369 *   DoFHandler<dim> dof_handler;
1370 *  
1371 *   hp::MappingCollection<dim> mapping_collection;
1372 *   hp::FECollection<dim> fe_collection;
1374 *   hp::QCollection<dim - 1> face_quadrature_collection;
1375 *  
1376 *   IndexSet locally_owned_dofs;
1378 *  
1379 *   AffineConstraints<double> constraints;
1380 *  
1384 *  
1385 *   std::unique_ptr<FESeries::Legendre<dim>> legendre;
1386 *   std::unique_ptr<parallel::CellWeights<dim>> cell_weights;
1387 *  
1390 *  
1393 *   };
1394 *  
1395 *  
1396 *  
1397 * @endcode
1398 *
1399 *
1400 * <a name="step_75-ThecodeLaplaceProblemcodeclassimplementation"></a>
1402 *
1403
1404 *
1405 *
1406 * <a name="step_75-Constructor"></a>
1407 * <h4>Constructor</h4>
1408 *
1409
1410 *
1412 * one of @ref step_40 "step-40". We again prepare the ConditionalOStream object to allow
1413 * only the first process to output anything over the console, and initialize
1414 * the computing timer properly.
1415 *
1416 * @code
1417 *   template <int dim>
1419 *   : mpi_communicator(MPI_COMM_WORLD)
1420 *   , prm(parameters)
1421 *   , triangulation(mpi_communicator)
1422 *   , dof_handler(triangulation)
1423 *   , pcout(std::cout,
1424 *   (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
1425 *   , computing_timer(mpi_communicator,
1426 *   pcout,
1429 *   {
1430 *   Assert(prm.min_h_level <= prm.max_h_level,
1431 *   ExcMessage(
1432 *   "Triangulation level limits have been incorrectly set up."));
1433 *   Assert(prm.min_p_degree <= prm.max_p_degree,
1434 *   ExcMessage("FECollection degrees have been incorrectly set up."));
1435 *  
1436 * @endcode
1437 *
1439 * actual body of the constructor, and create corresponding objects for
1440 * every degree in the specified range from the parameter struct. As we are
1441 * only dealing with non-distorted rectangular cells, a linear mapping
1442 * object is sufficient in this context.
1443 *
1444
1445 *
1446 * In the Parameters struct, we provide ranges for levels on which the
1447 * function space is operating with a reasonable resolution. The multigrid
1448 * algorithm requires linear elements on the coarsest possible level. So we
1449 * start with the lowest polynomial degree and fill the collection with
1451 * reached.
1452 *
1453 * @code
1454 *   mapping_collection.push_back(MappingQ1<dim>());
1455 *  
1456 *   for (unsigned int degree = 1; degree <= prm.max_p_degree; ++degree)
1457 *   {
1458 *   fe_collection.push_back(FE_Q<dim>(degree));
1459 *   quadrature_collection.push_back(QGauss<dim>(degree + 1));
1460 *   face_quadrature_collection.push_back(QGauss<dim - 1>(degree + 1));
1461 *   }
1462 *  
1463 * @endcode
1464 *
1465 * As our FECollection contains more finite elements than we want to use for
1466 * the finite element approximation of our solution, we would like to limit
1467 * the range on which active FE indices can operate on. For this, the
1468 * FECollection class allows to register a hierarchy that determines the
1469 * succeeding and preceding finite element in case of of p-refinement and
1471 * consult this hierarchy to determine future FE indices. We will register
1472 * such a hierarchy that only works on finite elements with polynomial
1474 *
1475 * @code
1476 *   const unsigned int min_fe_index = prm.min_p_degree - 1;
1477 *   fe_collection.set_hierarchy(
1478 *   /*next_index=*/
1479 *   [](const typename hp::FECollection<dim> &fe_collection,
1480 *   const unsigned int fe_index) -> unsigned int {
1481 *   return ((fe_index + 1) < fe_collection.size()) ? fe_index + 1 :
1482 *   fe_index;
1483 *   },
1484 *   /*previous_index=*/
1485 *   [min_fe_index](const typename hp::FECollection<dim> &,
1486 *   const unsigned int fe_index) -> unsigned int {
1487 *   Assert(fe_index >= min_fe_index,
1488 *   ExcMessage("Finite element is not part of hierarchy!"));
1489 *   return (fe_index > min_fe_index) ? fe_index - 1 : fe_index;
1490 *   });
1491 *  
1492 * @endcode
1493 *
1494 * We initialize the FESeries::Legendre object in the default configuration
1495 * for smoothness estimation.
1496 *
1497 * @code
1498 *   legendre = std::make_unique<FESeries::Legendre<dim>>(
1500 *  
1501 * @endcode
1502 *
1508 * require this functionality for load balancing and to limit the polynomial
1509 * degrees of neighboring cells.
1510 *
1511
1512 *
1513 * For the former, we would like to assign a weight to every cell that is
1514 * proportional to the number of degrees of freedom of its future finite
1516 * easily attach individual weights at the right place during the refinement
1517 * process, i.e., after all refine and coarsen flags have been set correctly
1518 * for hp-adaptation and right before repartitioning for load balancing is
1519 * about to happen. Functions can be registered that will attach weights in
1520 * the form that @f$a (n_\text{dofs})^b@f$ with a provided pair of parameters
1521 * @f$(a,b)@f$. We register such a function in the following.
1522 *
1523
1524 *
1526 * linearly with the number of degrees of freedom owned. We set the
1527 * parameters for cell weighting correspondingly: A weighting factor of @f$1@f$
1530 *
1531 * @code
1532 *   cell_weights = std::make_unique<parallel::CellWeights<dim>>(
1533 *   dof_handler,
1535 *   {prm.weighting_factor, prm.weighting_exponent}));
1536 *  
1537 * @endcode
1538 *
1539 * In h-adaptive applications, we ensure a 2:1 mesh balance by limiting the
1540 * difference of refinement levels of neighboring cells to one. With the
1542 * p-levels on neighboring cells: levels of future finite elements are not
1543 * allowed to differ by more than a specified difference. The function
1547 * future FE indices accordingly. As we ask the p4est oracle to perform
1551 * that for the duration of its life. Thus, we will create an object of this
1552 * class right before limiting the p-level difference, and connect the
1553 * corresponding lambda function to the signal
1556 * Furthermore, we specify that this function will be connected to the front
1558 * other function connected to the same signal.
1559 *
1560 * @code
1561 *   triangulation.signals.post_p4est_refinement.connect(
1562 *   [&, min_fe_index]() {
1566 *   prm.max_p_level_difference,
1567 *   /*contains=*/min_fe_index);
1568 *   },
1569 *   boost::signals2::at_front);
1570 *   }
1571 *  
1572 *  
1573 *  
1574 * @endcode
1575 *
1576 *
1577 * <a name="step_75-LaplaceProbleminitialize_grid"></a>
1579 *
1580
1581 *
1583 * as demonstrated in @ref step_50 "step-50". However in the 2d case, that particular
1584 * function removes the first quadrant, while we need the fourth quadrant
1585 * removed in our scenario. Thus, we will use a different function
1587 * the mesh. Furthermore, we formulate that function in a way that it also
1589 * in the positive z-direction.
1590 *
1591
1592 *
1593 * We first pretend to build a GridGenerator::subdivided_hyper_rectangle().
1594 * The parameters that we need to provide are Point objects for the lower left
1595 * and top right corners, as well as the number of repetitions that the base
1596 * mesh will have in each direction. We provide them for the first two
1597 * dimensions and treat the higher third dimension separately.
1598 *
1599
1600 *
1601 * To create a L-shaped domain, we need to remove the excess cells. For this,
1603 * remove one cell in every cell from the negative direction, but remove one
1604 * from the positive x-direction.
1605 *
1606
1607 *
1608 * On the coarse grid, we set the initial active FE indices and distribute the
1609 * degrees of freedom once. We do that in order to assign the hp::FECollection
1613 * parallel::distributed::Triangulation::refine_global().
1614 *
1615 * @code
1616 *   template <int dim>
1617 *   void LaplaceProblem<dim>::initialize_grid()
1618 *   {
1619 *   TimerOutput::Scope t(computing_timer, "initialize grid");
1620 *  
1621 *   std::vector<unsigned int> repetitions(dim);
1623 *   for (unsigned int d = 0; d < dim; ++d)
1624 *   if (d < 2)
1625 *   {
1626 *   repetitions[d] = 2;
1627 *   bottom_left[d] = -1.;
1628 *   top_right[d] = 1.;
1629 *   }
1630 *   else
1631 *   {
1632 *   repetitions[d] = 1;
1633 *   bottom_left[d] = 0.;
1634 *   top_right[d] = 1.;
1635 *   }
1636 *  
1637 *   std::vector<int> cells_to_remove(dim, 1);
1638 *   cells_to_remove[0] = -1;
1639 *  
1642 *  
1643 *   const unsigned int min_fe_index = prm.min_p_degree - 1;
1644 *   for (const auto &cell : dof_handler.active_cell_iterators())
1645 *   if (cell->is_locally_owned())
1646 *   cell->set_active_fe_index(min_fe_index);
1647 *  
1648 *   dof_handler.distribute_dofs(fe_collection);
1649 *  
1650 *   triangulation.refine_global(prm.min_h_level);
1651 *   }
1652 *  
1653 *  
1654 *  
1655 * @endcode
1656 *
1657 *
1658 * <a name="step_75-LaplaceProblemsetup_system"></a>
1660 *
1661
1662 *
1663 * This function looks exactly the same to the one of @ref step_40 "step-40", but you will
1668 *
1669 * @code
1670 *   template <int dim>
1672 *   {
1673 *   TimerOutput::Scope t(computing_timer, "setup system");
1674 *  
1675 *   dof_handler.distribute_dofs(fe_collection);
1676 *  
1677 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
1680 *  
1681 *   locally_relevant_solution.reinit(locally_owned_dofs,
1683 *   mpi_communicator);
1684 *   system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1685 *  
1686 *   constraints.clear();
1687 *   constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1688 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
1690 *   mapping_collection, dof_handler, 0, Solution<dim>(), constraints);
1691 *   constraints.close();
1692 *  
1693 *   laplace_operator.reinit(mapping_collection,
1694 *   dof_handler,
1696 *   constraints,
1697 *   system_rhs);
1698 *   }
1699 *  
1700 *  
1701 *  
1702 * @endcode
1703 *
1704 *
1705 * <a name="step_75-LaplaceProblemprint_diagnostics"></a>
1707 *
1708
1709 *
1711 * system and its partitioning. In addition to the usual global number of
1712 * active cells and degrees of freedom, we also output their local
1713 * equivalents. For a regulated output, we will communicate the local
1715 * which will then output all information. Output of local quantities is
1717 *
1718
1719 *
1721 * To ensure that we do not access invalid memory with the insertion operator
1722 * (`<<`) on these processes, we need to check that the containers are not
1723 * empty.
1724 *
1725
1726 *
1727 * Furthermore, we would like to print the frequencies of the polynomial
1729 * stored locally, we will count the finite elements on locally owned cells
1731 *
1732 * @code
1735 *   {
1736 *   const unsigned int first_n_processes =
1737 *   std::min<unsigned int>(8,
1738 *   Utilities::MPI::n_mpi_processes(mpi_communicator));
1739 *   const bool output_cropped =
1741 *  
1742 *   {
1743 *   pcout << " Number of active cells: "
1744 *   << triangulation.n_global_active_cells() << std::endl
1745 *   << " by partition: ";
1746 *  
1747 *   std::vector<unsigned int> n_active_cells_per_subdomain =
1748 *   Utilities::MPI::gather(mpi_communicator,
1749 *   triangulation.n_locally_owned_active_cells());
1750 *   for (unsigned int i = 0; i < first_n_processes; ++i)
1751 *   if (n_active_cells_per_subdomain.size() > 0)
1752 *   pcout << ' ' << n_active_cells_per_subdomain[i];
1753 *   if (output_cropped)
1754 *   pcout << " ...";
1755 *   pcout << std::endl;
1756 *   }
1757 *  
1758 *   {
1759 *   pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1760 *   << std::endl
1761 *   << " by partition: ";
1762 *  
1763 *   std::vector<types::global_dof_index> n_dofs_per_subdomain =
1764 *   Utilities::MPI::gather(mpi_communicator,
1765 *   dof_handler.n_locally_owned_dofs());
1766 *   for (unsigned int i = 0; i < first_n_processes; ++i)
1767 *   if (n_dofs_per_subdomain.size() > 0)
1768 *   pcout << ' ' << n_dofs_per_subdomain[i];
1769 *   if (output_cropped)
1770 *   pcout << " ...";
1771 *   pcout << std::endl;
1772 *   }
1773 *  
1774 *   {
1775 *   std::vector<types::global_dof_index> n_constraints_per_subdomain =
1776 *   Utilities::MPI::gather(mpi_communicator, constraints.n_constraints());
1777 *  
1778 *   pcout << " Number of constraints: "
1779 *   << std::accumulate(n_constraints_per_subdomain.begin(),
1781 *   0)
1782 *   << std::endl
1783 *   << " by partition: ";
1784 *   for (unsigned int i = 0; i < first_n_processes; ++i)
1785 *   if (n_constraints_per_subdomain.size() > 0)
1786 *   pcout << ' ' << n_constraints_per_subdomain[i];
1787 *   if (output_cropped)
1788 *   pcout << " ...";
1789 *   pcout << std::endl;
1790 *   }
1791 *  
1792 *   {
1793 *   std::vector<unsigned int> n_fe_indices(fe_collection.size(), 0);
1794 *   for (const auto &cell : dof_handler.active_cell_iterators())
1795 *   if (cell->is_locally_owned())
1796 *   n_fe_indices[cell->active_fe_index()]++;
1797 *  
1798 *   Utilities::MPI::sum(n_fe_indices, mpi_communicator, n_fe_indices);
1799 *  
1800 *   pcout << " Frequencies of poly. degrees:";
1801 *   for (unsigned int i = 0; i < fe_collection.size(); ++i)
1802 *   if (n_fe_indices[i] > 0)
1803 *   pcout << ' ' << fe_collection[i].degree << ':' << n_fe_indices[i];
1804 *   pcout << std::endl;
1805 *   }
1806 *   }
1807 *  
1808 *  
1809 *  
1810 * @endcode
1811 *
1812 *
1813 * <a name="step_75-LaplaceProblemsolve_system"></a>
1815 *
1816
1817 *
1818 * The scaffold around the solution is similar to the one of @ref step_40 "step-40". We
1821 * solution happens with the function introduced earlier.
1822 *
1823 * @code
1824 *   template <int dim>
1826 *   {
1827 *   TimerOutput::Scope t(computing_timer, "solve system");
1828 *  
1830 *   laplace_operator.initialize_dof_vector(completely_distributed_solution);
1831 *  
1832 *   SolverControl solver_control(system_rhs.size(),
1833 *   prm.tolerance_factor * system_rhs.l2_norm());
1834 *  
1835 *   solve_with_gmg(solver_control,
1838 *   system_rhs,
1839 *   prm.mg_data,
1840 *   mapping_collection,
1841 *   dof_handler,
1843 *  
1844 *   pcout << " Solved in " << solver_control.last_step() << " iterations."
1845 *   << std::endl;
1846 *  
1847 *   constraints.distribute(completely_distributed_solution);
1848 *  
1849 *   locally_relevant_solution.copy_locally_owned_data_from(
1851 *   locally_relevant_solution.update_ghost_values();
1852 *   }
1853 *  
1854 *  
1855 *  
1856 * @endcode
1857 *
1858 *
1859 * <a name="step_75-LaplaceProblemcompute_indicators"></a>
1861 *
1862
1863 *
1864 * This function contains only a part of the typical <code>refine_grid</code>
1865 * function from other tutorials and is new in that sense. Here, we will only
1866 * calculate all indicators for adaptation with actually refining the grid. We
1867 * do this for the purpose of writing all indicators to the file system, so we
1868 * store them for later.
1869 *
1870
1871 *
1874 * scaling factor of the underlying face integrals to be dependent on the
1875 * actual polynomial degree of the neighboring elements is favorable in
1876 * hp-adaptive applications @cite davydov2017hp. We can do this by specifying
1877 * the very last parameter from the additional ones you notices. The others
1879 *
1880
1881 *
1885 * we set the minimal polynomial degree to 2 as it seems that the smoothness
1886 * estimation algorithms have trouble with linear elements.
1887 *
1888 * @code
1889 *   template <int dim>
1891 *   {
1892 *   TimerOutput::Scope t(computing_timer, "compute indicators");
1893 *  
1894 *   estimated_error_per_cell.grow_or_shrink(triangulation.n_active_cells());
1896 *   dof_handler,
1897 *   face_quadrature_collection,
1898 *   std::map<types::boundary_id, const Function<dim> *>(),
1901 *   /*component_mask=*/ComponentMask(),
1902 *   /*coefficients=*/nullptr,
1903 *   /*n_threads=*/numbers::invalid_unsigned_int,
1904 *   /*subdomain_id=*/numbers::invalid_subdomain_id,
1905 *   /*material_id=*/numbers::invalid_material_id,
1906 *   /*strategy=*/
1908 *  
1909 *   hp_decision_indicators.grow_or_shrink(triangulation.n_active_cells());
1911 *   dof_handler,
1914 *   }
1915 *  
1916 *  
1917 *  
1918 * @endcode
1919 *
1920 *
1921 * <a name="step_75-LaplaceProblemadapt_resolution"></a>
1923 *
1924
1925 *
1926 * With the previously calculated indicators, we will finally flag all cells
1927 * for adaptation and also execute refinement in this function. As in previous
1928 * tutorials, we will use the "fixed number" strategy, but now for
1929 * hp-adaptation.
1930 *
1931 * @code
1932 *   template <int dim>
1934 *   {
1935 *   TimerOutput::Scope t(computing_timer, "adapt resolution");
1936 *  
1937 * @endcode
1938 *
1940 * on each cell. There is nothing new here.
1941 *
1942
1943 *
1945 * elaborated in the other deal.II tutorials: using the fixed number
1946 * strategy, we will flag 30% of all cells for refinement and 3% for
1947 * coarsening, as provided in the Parameters struct.
1948 *
1949 * @code
1951 *   triangulation,
1953 *   prm.refine_fraction,
1954 *   prm.coarsen_fraction);
1955 *  
1956 * @endcode
1957 *
1959 * and coarsen those cells flagged in the previous step, but need to decide
1961 * polynomial degree.
1962 *
1963
1964 *
1965 * The next function call sets future FE indices according to the previously
1967 * indices will only be set on those cells that have refine or coarsen flags
1968 * assigned.
1969 *
1970
1971 *
1974 * the domain, and a smooth solution anywhere else, we would like to
1976 * our choice of a fraction of 90% for both p-refinement and p-coarsening.
1977 *
1978 * @code
1981 *   prm.p_refine_fraction,
1982 *   prm.p_coarsen_fraction);
1983 *  
1984 * @endcode
1985 *
1987 * specified limits of the provided level ranges in the Parameters struct.
1990 * hierarchy for p-adaptation in the constructor. Now, we need to do this
1991 * manually in the h-adaptive context like in @ref step_31 "step-31".
1992 *
1993
1994 *
1995 * We will iterate over all cells on the designated min and max levels and
1996 * remove the corresponding flags. As an alternative, we could also flag
1997 * these cells for p-adaptation by setting future FE indices accordingly
1999 *
2000 * @code
2001 *   Assert(triangulation.n_levels() >= prm.min_h_level + 1 &&
2002 *   triangulation.n_levels() <= prm.max_h_level + 1,
2003 *   ExcInternalError());
2004 *  
2005 *   if (triangulation.n_levels() > prm.max_h_level)
2006 *   for (const auto &cell :
2007 *   triangulation.active_cell_iterators_on_level(prm.max_h_level))
2008 *   cell->clear_refine_flag();
2009 *  
2010 *   for (const auto &cell :
2011 *   triangulation.active_cell_iterators_on_level(prm.min_h_level))
2012 *   cell->clear_coarsen_flag();
2013 *  
2014 * @endcode
2015 *
2016 * At this stage, we have both the future FE indices and the classic refine
2017 * and coarsen flags set. The latter will be interpreted by
2021 *
2022
2023 *
2024 * Now, we would like to only impose one type of adaptation on cells, which
2025 * is what the next function will sort out for us. In short, on cells which
2027 * one and remove the h-adaptation one.
2028 *
2029 * @code
2030 *   hp::Refinement::choose_p_over_h(dof_handler);
2031 *  
2032 * @endcode
2033 *
2034 * In the end, we are left to execute coarsening and refinement. Here, not
2035 * only the grid will be updated, but also all previous future FE indices
2036 * will become active.
2037 *
2038
2039 *
2040 * Remember that we have attached functions to triangulation signals in the
2041 * constructor, will be triggered in this function call. So there is even
2043 * balancing, as well as we will limit the difference of p-levels between
2044 * neighboring cells.
2045 *
2046 * @code
2047 *   triangulation.execute_coarsening_and_refinement();
2048 *   }
2049 *  
2050 *  
2051 *  
2052 * @endcode
2053 *
2054 *
2055 * <a name="step_75-LaplaceProblemoutput_results"></a>
2057 *
2058
2059 *
2061 * like in @ref step_40 "step-40". In addition to the data containers that we prepared
2062 * throughout the tutorial, we would also like to write out the polynomial
2063 * degree of each finite element on the grid as well as the subdomain each
2065 * this function.
2066 *
2067 * @code
2068 *   template <int dim>
2069 *   void LaplaceProblem<dim>::output_results(const unsigned int cycle)
2070 *   {
2071 *   TimerOutput::Scope t(computing_timer, "output results");
2072 *  
2073 *   Vector<float> fe_degrees(triangulation.n_active_cells());
2074 *   for (const auto &cell : dof_handler.active_cell_iterators())
2075 *   if (cell->is_locally_owned())
2076 *   fe_degrees(cell->active_cell_index()) = cell->get_fe().degree;
2077 *  
2078 *   Vector<float> subdomain(triangulation.n_active_cells());
2079 *   for (auto &subd : subdomain)
2081 *  
2082 *   DataOut<dim> data_out;
2083 *   data_out.attach_dof_handler(dof_handler);
2084 *   data_out.add_data_vector(locally_relevant_solution, "solution");
2085 *   data_out.add_data_vector(fe_degrees, "fe_degree");
2086 *   data_out.add_data_vector(subdomain, "subdomain");
2087 *   data_out.add_data_vector(estimated_error_per_cell, "error");
2088 *   data_out.add_data_vector(hp_decision_indicators, "hp_indicator");
2089 *   data_out.build_patches(mapping_collection);
2090 *  
2091 *   data_out.write_vtu_with_pvtu_record(
2092 *   "./", "solution", cycle, mpi_communicator, 2, 1);
2093 *   }
2094 *  
2095 *  
2096 *  
2097 * @endcode
2098 *
2099 *
2100 * <a name="step_75-LaplaceProblemrun"></a>
2101 * <h4>LaplaceProblem::run</h4>
2102 *
2103
2104 *
2105 * The actual run function again looks very familiar to @ref step_40 "step-40". The only
2107 * Here, we will pre-calculate the Legendre transformation matrices. In
2110 * calculate them all at once before the actual time measurement begins. We
2112 *
2113 * @code
2114 *   template <int dim>
2116 *   {
2117 *   pcout << "Running with Trilinos on "
2118 *   << Utilities::MPI::n_mpi_processes(mpi_communicator)
2119 *   << " MPI rank(s)..." << std::endl;
2120 *  
2121 *   {
2122 *   pcout << "Calculating transformation matrices..." << std::endl;
2123 *   TimerOutput::Scope t(computing_timer, "calculate transformation");
2124 *   legendre->precalculate_all_transformation_matrices();
2125 *   }
2126 *  
2127 *   for (unsigned int cycle = 0; cycle < prm.n_cycles; ++cycle)
2128 *   {
2129 *   pcout << "Cycle " << cycle << ':' << std::endl;
2130 *  
2131 *   if (cycle == 0)
2132 *   initialize_grid();
2133 *   else
2134 *   adapt_resolution();
2135 *  
2136 *   setup_system();
2137 *  
2139 *  
2140 *   solve_system();
2141 *  
2143 *  
2144 *   if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
2145 *   output_results(cycle);
2146 *  
2147 *   computing_timer.print_summary();
2148 *   computing_timer.reset();
2149 *  
2150 *   pcout << std::endl;
2151 *   }
2152 *   }
2153 *   } // namespace Step75
2154 *  
2155 *  
2156 *  
2157 * @endcode
2158 *
2159 *
2160 * <a name="step_75-main"></a>
2161 * <h4>main()</h4>
2162 *
2163
2164 *
2165 * The final function is the <code>main</code> function that will ultimately
2166 * create and run a LaplaceOperator instantiation. Its structure is similar to
2168 *
2169 * @code
2170 *   int main(int argc, char *argv[])
2171 *   {
2172 *   try
2173 *   {
2174 *   using namespace dealii;
2175 *   using namespace Step75;
2176 *  
2178 *  
2179 *   Parameters prm;
2181 *   laplace_problem.run();
2182 *   }
2183 *   catch (std::exception &exc)
2184 *   {
2185 *   std::cerr << std::endl
2186 *   << std::endl
2187 *   << "----------------------------------------------------"
2188 *   << std::endl;
2189 *   std::cerr << "Exception on processing: " << std::endl
2190 *   << exc.what() << std::endl
2191 *   << "Aborting!" << std::endl
2192 *   << "----------------------------------------------------"
2193 *   << std::endl;
2194 *  
2195 *   return 1;
2196 *   }
2197 *   catch (...)
2198 *   {
2199 *   std::cerr << std::endl
2200 *   << std::endl
2201 *   << "----------------------------------------------------"
2202 *   << std::endl;
2203 *   std::cerr << "Unknown exception!" << std::endl
2204 *   << "Aborting!" << std::endl
2205 *   << "----------------------------------------------------"
2206 *   << std::endl;
2207 *   return 1;
2208 *   }
2209 *  
2210 *   return 0;
2211 *   }
2212 * @endcode
2213<a name="step_75-Results"></a><h1>Results</h1>
2214
2215
2217release mode, your terminal output should look like this:
2218@code
2219Running with Trilinos on 4 MPI rank(s)...
2220Calculating transformation matrices...
2221Cycle 0:
2222 Number of active cells: 3072
2223 by partition: 768 768 768 768
2224 Number of degrees of freedom: 12545
2225 by partition: 3201 3104 3136 3104
2226 Number of constraints: 542
2227 by partition: 165 74 138 165
2228 Frequencies of poly. degrees: 2:3072
2229 Solved in 7 iterations.
2230
2231
2232+---------------------------------------------+------------+------------+
2233| Total wallclock time elapsed since start | 0.172s | |
2234| | | |
2235| Section | no. calls | wall time | % of total |
2236+---------------------------------+-----------+------------+------------+
2237| calculate transformation | 1 | 0.0194s | 11% |
2238| compute indicators | 1 | 0.00676s | 3.9% |
2239| initialize grid | 1 | 0.011s | 6.4% |
2240| output results | 1 | 0.0343s | 20% |
2241| setup system | 1 | 0.00839s | 4.9% |
2242| solve system | 1 | 0.0896s | 52% |
2243+---------------------------------+-----------+------------+------------+
2244
2245
2246Cycle 1:
2247 Number of active cells: 3351
2248 by partition: 875 761 843 872
2249 Number of degrees of freedom: 18228
2250 by partition: 4535 4735 4543 4415
2251 Number of constraints: 1202
2252 by partition: 303 290 326 283
2253 Frequencies of poly. degrees: 2:2522 3:829
2254 Solved in 7 iterations.
2255
2256
2257+---------------------------------------------+------------+------------+
2258| Total wallclock time elapsed since start | 0.165s | |
2259| | | |
2260| Section | no. calls | wall time | % of total |
2261+---------------------------------+-----------+------------+------------+
2262| adapt resolution | 1 | 0.00473s | 2.9% |
2263| compute indicators | 1 | 0.00764s | 4.6% |
2264| output results | 1 | 0.0243s | 15% |
2265| setup system | 1 | 0.00662s | 4% |
2266| solve system | 1 | 0.121s | 74% |
2267+---------------------------------+-----------+------------+------------+
2268
2269
2270...
2271
2272
2273Cycle 7:
2274 Number of active cells: 5610
2275 by partition: 1324 1483 1482 1321
2276 Number of degrees of freedom: 82047
2277 by partition: 21098 19960 20111 20878
2278 Number of constraints: 14357
2279 by partition: 3807 3229 3554 3767
2280 Frequencies of poly. degrees: 2:1126 3:1289 4:2725 5:465 6:5
2281 Solved in 7 iterations.
2282
2283
2284+---------------------------------------------+------------+------------+
2285| Total wallclock time elapsed since start | 1.83s | |
2286| | | |
2287| Section | no. calls | wall time | % of total |
2288+---------------------------------+-----------+------------+------------+
2289| adapt resolution | 1 | 0.00834s | 0.46% |
2290| compute indicators | 1 | 0.0178s | 0.97% |
2291| output results | 1 | 0.0434s | 2.4% |
2292| setup system | 1 | 0.0165s | 0.9% |
2293| solve system | 1 | 1.74s | 95% |
2294+---------------------------------+-----------+------------+------------+
2295@endcode
2296
2298differences in the number of active cells and degrees of freedom. This
2299is due to the fact that solver and preconditioner depend on the
2301the solution in the last digits and ultimately yields to different
2303
2304Furthermore, the number of iterations for the solver stays about the
2308
2309Let us have a look at the graphical output of the program. After all
2312partitioning on twelve processes on the left and the polynomial degrees
2313of finite elements on the right. In the left picture, each color
2315corresponds to the polynomial degree two and the darkest one corresponds
2316to degree six:
2317
2318<div class="twocolumn" style="width: 80%; text-align: center;">
2319 <div>
2320 <img src="https://www.dealii.org/images/steps/developer/step-75.subdomains-07.svg"
2321 alt="Partitioning after seven refinements.">
2322 </div>
2323 <div>
2324 <img src="https://www.dealii.org/images/steps/developer/step-75.fedegrees-07.svg"
2325 alt="Local approximation degrees after seven refinements.">
2326 </div>
2327</div>
2328
2329
2330
2331<a name="step-75-extensions"></a>
2332<a name="step_75-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2333
2334
2336hp-adaptive finite element methods. In the following paragraphs, you
2338extensions are already part of https://github.com/marcfehling/hpbox/,
2340around with.
2341
2342
2343<a name="step_75-Differenthpdecisionstrategies"></a><h4>Different hp-decision strategies</h4>
2344
2345
2348change the polynomial degree. We only presented the <i>Legendre
2349coefficient decay</i> strategy in this tutorial, while @ref step_27 "step-27"
2351
2352See the "possibilities for extensions" section of @ref step_27 "step-27" for an
2354for a detailed description.
2355
2356There, another strategy is mentioned that has not been shown in any
2358usage of this method for parallel distributed applications is more
2361flags, and we need to transfer the solution across refined meshes. For
2363function to the Triangulation::Signals::post_p4est_refinement signal in
2364a way that it will be called <i>after</i> the
2365hp::Refinement::limit_p_level_difference() function. At this stage, all
2366refinement flags and future FE indices are terminally set and a reliable
2369parallel::distributed::CellDataTransfer.
2370
2377
2378
2379<a name="step_75-Solvewithmatrixbasedmethods"></a><h4>Solve with matrix-based methods</h4>
2380
2381
2382This tutorial focuses solely on matrix-free strategies. All hp-adaptive
2384parallel distributed context.
2385
2386To create a system matrix, you can either use the
2387LaplaceOperator::get_system_matrix() function, or use an
2388<code>assemble_system()</code> function similar to the one of @ref step_27 "step-27".
2389You can then pass the system matrix to the solver as usual.
2390
2391You can time the results of both matrix-based and matrix-free
2393variant is faster.
2394
2395
2396<a name="step_75-Multigridvariants"></a><h4>Multigrid variants</h4>
2397
2398
2400coarse-grid solver (CG with AMG), smoother (Chebyshev smoother with
2401point Jacobi preconditioner), and geometric-coarsening scheme (global
2402coarsening) within the multigrid algorithm. Feel free to try out
2404 *
2405 *
2406<a name="step_75-PlainProg"></a>
2407<h1> The plain program</h1>
2408@include "step-75.cc"
2409*/
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Definition point.h:113
static constexpr unsigned int rank
Definition tensor.h:481
constexpr void clear()
friend class Tensor
Definition tensor.h:865
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
@ wall_times
Definition timer.h:651
virtual types::subdomain_id locally_owned_subdomain() const
void coarsen_global(const unsigned int times=1)
virtual void execute_coarsening_and_refinement()
unsigned int size() const
Definition collection.h:316
static WeightingFunction ndofs_weighting(const std::pair< float, float > &coefficients)
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
unsigned int level
Definition grid_out.cc:4632
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_gradients
Shape function gradients.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< index_type > data
Definition mpi.cc:740
std::size_t size
Definition mpi.cc:739
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
std::array< double, dim > to_spherical(const Point< dim > &point)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void subdivided_hyper_L(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &bottom_left, const Point< dim > &top_right, const std::vector< int > &n_cells_to_remove)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
constexpr char L
constexpr char A
constexpr types::blas_int one
Tpetra::Vector< Number, LO, GO, NodeType< MemorySpace > > VectorType
std::vector< std::shared_ptr< const Triangulation< dim, spacedim > > > create_geometric_coarsening_sequence(const Triangulation< dim, spacedim > &tria)
unsigned int create_next_polynomial_coarsening_degree(const unsigned int degree, const PolynomialCoarseningSequenceType &p_sequence)
std::vector< unsigned int > create_polynomial_coarsening_sequence(const unsigned int max_degree, const PolynomialCoarseningSequenceType &p_sequence)
void compute_diagonal(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, VectorType &diagonal_global, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &cell_operation, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0, const unsigned int first_vector_component=0)
void compute_matrix(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, const AffineConstraints< Number > &constraints, MatrixType &matrix, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &cell_operation, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
FESeries::Legendre< dim, spacedim > default_fe_series(const hp::FECollection< dim, spacedim > &fe_collection, const unsigned int component=numbers::invalid_unsigned_int)
void coefficient_decay(FESeries::Legendre< dim, spacedim > &fe_legendre, const DoFHandler< dim, spacedim > &dof_handler, const VectorType &solution, Vector< float > &smoothness_indicators, const VectorTools::NormType regression_strategy=VectorTools::Linfty_norm, const double smallest_abs_coefficient=1e-10, const bool only_flagged_cells=false)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:97
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:112
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool limit_p_level_difference(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int max_difference=1, const unsigned int contains_fe_index=0)
void predict_error(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &error_indicators, Vector< Number > &predicted_errors, const double gamma_p=std::sqrt(0.4), const double gamma_h=2., const double gamma_n=1.)
void p_adaptivity_fixed_number(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_coarsen=std::less_equal< Number >())
Definition hp.h:117
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition mg.h:81
constexpr unsigned int invalid_unsigned_int
Definition types.h:232
constexpr types::material_id invalid_material_id
Definition types.h:288
constexpr types::subdomain_id invalid_subdomain_id
Definition types.h:375
void refine_and_coarsen_fixed_number(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
double legendre(unsigned int l, double x)
Definition cmath.h:65
STL namespace.
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned short int fe_index
Definition types.h:68
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
boost::signals2::signal< void()> post_p4est_refinement
Definition tria.h:2462