Loading [MathJax]/extensions/TeX/AMSsymbols.js
 Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
step-75.h
Go to the documentation of this file.
1) const override
463 * {
464 * const std::array<double, dim> p_sphere =
466 *
467 * constexpr const double alpha = 2. / 3.;
468 * return std::pow(p_sphere[0], alpha) * std::sin(alpha * p_sphere[1]);
469 * }
470 * };
471 *
472 *
473 *
474 * @endcode
475 *
476 *
477 * <a name="Parameters"></a>
478 * <h3>Parameters</h3>
479 *
480
481 *
482 * For this tutorial, we will use a simplified set of parameters. It is also
483 * possible to use a ParameterHandler class here, but to keep this tutorial
484 * short we decided on using simple structs. The actual intention of all these
485 * parameters will be described in the upcoming classes at their respective
486 * location where they are used.
487 *
488
489 *
490 * The following parameter set controls the coarse-grid solver, the smoothers,
491 * and the inter-grid transfer scheme of the multigrid mechanism.
492 * We populate it with default parameters.
493 *
494 * @code
495 * struct MultigridParameters
496 * {
497 * struct
498 * {
499 * std::string type = "cg_with_amg";
500 * unsigned int maxiter = 10000;
501 * double abstol = 1e-20;
502 * double reltol = 1e-4;
503 * unsigned int smoother_sweeps = 1;
504 * unsigned int n_cycles = 1;
505 * std::string smoother_type = "ILU";
506 * } coarse_solver;
507 *
508 * struct
509 * {
510 * std::string type = "chebyshev";
511 * double smoothing_range = 20;
512 * unsigned int degree = 5;
513 * unsigned int eig_cg_n_iterations = 20;
514 * } smoother;
515 *
516 * struct
517 * {
519 * p_sequence = MGTransferGlobalCoarseningTools::
520 * PolynomialCoarseningSequenceType::decrease_by_one;
521 * bool perform_h_transfer = true;
522 * } transfer;
523 * };
524 *
525 *
526 *
527 * @endcode
528 *
529 * This is the general parameter struct for the problem class. You will find
530 * this struct divided into several categories, including general runtime
531 * parameters, level limits, refine and coarsen fractions, as well as
532 * parameters for cell weighting. It also contains an instance of the above
533 * struct for multigrid parameters which will be passed to the multigrid
534 * algorithm.
535 *
536 * @code
537 * struct Parameters
538 * {
539 * unsigned int n_cycles = 8;
540 * double tolerance_factor = 1e-12;
541 *
542 * MultigridParameters mg_data;
543 *
544 * unsigned int min_h_level = 5;
545 * unsigned int max_h_level = 12;
546 * unsigned int min_p_degree = 2;
547 * unsigned int max_p_degree = 6;
548 * unsigned int max_p_level_difference = 1;
549 *
550 * double refine_fraction = 0.3;
551 * double coarsen_fraction = 0.03;
552 * double p_refine_fraction = 0.9;
553 * double p_coarsen_fraction = 0.9;
554 *
555 * double weighting_factor = 1e6;
556 * double weighting_exponent = 1.;
557 * };
558 *
559 *
560 *
561 * @endcode
562 *
563 *
564 * <a name="MatrixfreeLaplaceoperator"></a>
565 * <h3>Matrix-free Laplace operator</h3>
566 *
567
568 *
569 * This is a matrix-free implementation of the Laplace operator that will
570 * basically take over the part of the `assemble_system()` function from other
571 * tutorials. The meaning of all member functions will be explained at their
572 * definition later.
573 *
574
575 *
576 * We will use the FEEvaluation class to evaluate the solution vector
577 * at the quadrature points and to perform the integration. In contrast to
578 * other tutorials, the template arguments `degree` is set to @f$-1@f$ and
579 * `number of quadrature in 1D` to @f$0@f$. In this case, FEEvaluation selects
580 * dynamically the correct polynomial degree and number of quadrature
581 * points. Here, we introduce an alias to FEEvaluation with the correct
582 * template parameters so that we do not have to worry about them later on.
583 *
584 * @code
585 * template <int dim, typename number>
586 * class LaplaceOperator : public Subscriptor
587 * {
588 * public:
590 *
591 * using FECellIntegrator = FEEvaluation<dim, -1, 0, 1, number>;
592 *
593 * LaplaceOperator() = default;
594 *
595 * LaplaceOperator(const hp::MappingCollection<dim> &mapping,
596 * const DoFHandler<dim> & dof_handler,
597 * const hp::QCollection<dim> & quad,
598 * const AffineConstraints<number> & constraints,
599 * VectorType & system_rhs);
600 *
601 * void reinit(const hp::MappingCollection<dim> &mapping,
602 * const DoFHandler<dim> & dof_handler,
603 * const hp::QCollection<dim> & quad,
604 * const AffineConstraints<number> & constraints,
605 * VectorType & system_rhs);
606 *
607 * types::global_dof_index m() const;
608 *
609 * number el(unsigned int, unsigned int) const;
610 *
611 * void initialize_dof_vector(VectorType &vec) const;
612 *
613 * void vmult(VectorType &dst, const VectorType &src) const;
614 *
615 * void Tvmult(VectorType &dst, const VectorType &src) const;
616 *
617 * const TrilinosWrappers::SparseMatrix &get_system_matrix() const;
618 *
619 * void compute_inverse_diagonal(VectorType &diagonal) const;
620 *
621 * private:
622 * void do_cell_integral_local(FECellIntegrator &integrator) const;
623 *
624 * void do_cell_integral_global(FECellIntegrator &integrator,
625 * VectorType & dst,
626 * const VectorType &src) const;
627 *
628 *
629 * void do_cell_integral_range(
630 * const MatrixFree<dim, number> & matrix_free,
631 * VectorType & dst,
632 * const VectorType & src,
633 * const std::pair<unsigned int, unsigned int> &range) const;
634 *
635 * MatrixFree<dim, number> matrix_free;
636 *
637 * @endcode
638 *
639 * To solve the equation system on the coarsest level with an AMG
640 * preconditioner, we need an actual system matrix on the coarsest level.
641 * For this purpose, we provide a mechanism that optionally computes a
642 * matrix from the matrix-free formulation, for which we introduce a
643 * dedicated SparseMatrix object. In the default case, this matrix stays
644 * empty. Once `get_system_matrix()` is called, this matrix is filled (lazy
645 * allocation). Since this is a `const` function, we need the "mutable"
646 * keyword here. We also need a the constraints object to build the matrix.
647 *
648 * @code
649 * AffineConstraints<number> constraints;
650 * mutable TrilinosWrappers::SparseMatrix system_matrix;
651 * };
652 *
653 *
654 *
655 * @endcode
656 *
657 * The following section contains functions to initialize and reinitialize
658 * the class. In particular, these functions initialize the internal
659 * MatrixFree instance. For sake of simplicity, we also compute the system
660 * right-hand-side vector.
661 *
662 * @code
663 * template <int dim, typename number>
664 * LaplaceOperator<dim, number>::LaplaceOperator(
665 * const hp::MappingCollection<dim> &mapping,
666 * const DoFHandler<dim> & dof_handler,
667 * const hp::QCollection<dim> & quad,
668 * const AffineConstraints<number> & constraints,
669 * VectorType & system_rhs)
670 * {
671 * this->reinit(mapping, dof_handler, quad, constraints, system_rhs);
672 * }
673 *
674 *
675 *
676 * template <int dim, typename number>
678 * const hp::MappingCollection<dim> &mapping,
679 * const DoFHandler<dim> & dof_handler,
680 * const hp::QCollection<dim> & quad,
681 * const AffineConstraints<number> & constraints,
682 * VectorType & system_rhs)
683 * {
684 * @endcode
685 *
686 * Clear internal data structures (in the case that the operator is reused).
687 *
688 * @code
689 * this->system_matrix.clear();
690 *
691 * @endcode
692 *
693 * Copy the constraints, since they might be needed for computation of the
694 * system matrix later on.
695 *
696 * @code
697 * this->constraints.copy_from(constraints);
698 *
699 * @endcode
700 *
701 * Set up MatrixFree. At the quadrature points, we only need to evaluate
702 * the gradient of the solution and test with the gradient of the shape
703 * functions so that we only need to set the flag `update_gradients`.
704 *
705 * @code
708 *
709 * matrix_free.reinit(mapping, dof_handler, constraints, quad, data);
710 *
711 * @endcode
712 *
713 * Compute the right-hand side vector. For this purpose, we set up a second
714 * MatrixFree instance that uses a modified AffineConstraints not containing
715 * the constraints due to Dirichlet-boundary conditions. This modified
716 * operator is applied to a vector with only the Dirichlet values set. The
717 * result is the negative right-hand-side vector.
718 *
719 * @code
720 * {
721 * AffineConstraints<number> constraints_without_dbc;
722 *
723 * IndexSet locally_relevant_dofs;
725 * locally_relevant_dofs);
726 * constraints_without_dbc.reinit(locally_relevant_dofs);
727 *
729 * constraints_without_dbc);
730 * constraints_without_dbc.close();
731 *
732 * VectorType b, x;
733 *
734 * this->initialize_dof_vector(system_rhs);
735 *
736 * MatrixFree<dim, number> matrix_free;
737 * matrix_free.reinit(
738 * mapping, dof_handler, constraints_without_dbc, quad, data);
739 *
740 * matrix_free.initialize_dof_vector(b);
741 * matrix_free.initialize_dof_vector(x);
742 *
743 * constraints.distribute(x);
744 *
745 * matrix_free.cell_loop(&LaplaceOperator::do_cell_integral_range,
746 * this,
747 * b,
748 * x);
749 *
750 * constraints.set_zero(b);
751 *
752 * system_rhs -= b;
753 * }
754 * }
755 *
756 *
757 *
758 * @endcode
759 *
760 * The following functions are implicitly needed by the multigrid algorithm,
761 * including the smoothers.
762 *
763
764 *
765 * Since we do not have a matrix, query the DoFHandler for the number of
766 * degrees of freedom.
767 *
768 * @code
769 * template <int dim, typename number>
770 * types::global_dof_index LaplaceOperator<dim, number>::m() const
771 * {
772 * return matrix_free.get_dof_handler().n_dofs();
773 * }
774 *
775 *
776 *
777 * @endcode
778 *
779 * Access a particular element in the matrix. This function is neither
780 * needed nor implemented, however, is required to compile the program.
781 *
782 * @code
783 * template <int dim, typename number>
784 * number LaplaceOperator<dim, number>::el(unsigned int, unsigned int) const
785 * {
786 * Assert(false, ExcNotImplemented());
787 * return 0;
788 * }
789 *
790 *
791 *
792 * @endcode
793 *
794 * Initialize the given vector. We simply delegate the task to the
795 * MatrixFree function with the same name.
796 *
797 * @code
798 * template <int dim, typename number>
799 * void
800 * LaplaceOperator<dim, number>::initialize_dof_vector(VectorType &vec) const
801 * {
802 * matrix_free.initialize_dof_vector(vec);
803 * }
804 *
805 *
806 *
807 * @endcode
808 *
809 * Perform an operator evaluation by looping with the help of MatrixFree
810 * over all cells and evaluating the effect of the cell integrals (see also:
811 * `do_cell_integral_local()` and `do_cell_integral_global()`).
812 *
813 * @code
814 * template <int dim, typename number>
815 * void LaplaceOperator<dim, number>::vmult(VectorType & dst,
816 * const VectorType &src) const
817 * {
818 * this->matrix_free.cell_loop(
819 * &LaplaceOperator::do_cell_integral_range, this, dst, src, true);
820 * }
821 *
822 *
823 *
824 * @endcode
825 *
826 * Perform the transposed operator evaluation. Since we are considering
827 * symmetric "matrices", this function can simply delegate it task to vmult().
828 *
829 * @code
830 * template <int dim, typename number>
831 * void LaplaceOperator<dim, number>::Tvmult(VectorType & dst,
832 * const VectorType &src) const
833 * {
834 * this->vmult(dst, src);
835 * }
836 *
837 *
838 *
839 * @endcode
840 *
841 * Since we do not have a system matrix, we cannot loop over the the
842 * diagonal entries of the matrix. Instead, we compute the diagonal by
843 * performing a sequence of operator evaluations to unit basis vectors.
844 * For this purpose, an optimized function from the MatrixFreeTools
845 * namespace is used. The inversion is performed manually afterwards.
846 *
847 * @code
848 * template <int dim, typename number>
849 * void LaplaceOperator<dim, number>::compute_inverse_diagonal(
850 * VectorType &diagonal) const
851 * {
853 * diagonal,
854 * &LaplaceOperator::do_cell_integral_local,
855 * this);
856 *
857 * for (auto &i : diagonal)
858 * i = (std::abs(i) > 1.0e-10) ? (1.0 / i) : 1.0;
859 * }
860 *
861 *
862 *
863 * @endcode
864 *
865 * In the matrix-free context, no system matrix is set up during
866 * initialization of this class. As a consequence, it has to be computed
867 * here if it should be requested. Since the matrix is only computed in
868 * this tutorial for linear elements (on the coarse grid), this is
869 * acceptable.
870 * The matrix entries are obtained via sequence of operator evaluations.
871 * For this purpose, the optimized function MatrixFreeTools::compute_matrix()
872 * is used. The matrix will only be computed if it has not been set up yet
873 * (lazy allocation).
874 *
875 * @code
876 * template <int dim, typename number>
878 * LaplaceOperator<dim, number>::get_system_matrix() const
879 * {
880 * if (system_matrix.m() == 0 && system_matrix.n() == 0)
881 * {
882 * const auto &dof_handler = this->matrix_free.get_dof_handler();
883 *
885 * dof_handler.locally_owned_dofs(),
886 * dof_handler.get_triangulation().get_communicator());
887 *
888 * DoFTools::make_sparsity_pattern(dof_handler, dsp, this->constraints);
889 *
890 * dsp.compress();
891 * system_matrix.reinit(dsp);
892 *
894 * matrix_free,
895 * constraints,
896 * system_matrix,
897 * &LaplaceOperator::do_cell_integral_local,
898 * this);
899 * }
900 *
901 * return this->system_matrix;
902 * }
903 *
904 *
905 *
906 * @endcode
907 *
908 * Perform cell integral on a cell batch without gathering and scattering
909 * the values. This function is needed for the MatrixFreeTools functions
910 * since these functions operate directly on the buffers of FEEvaluation.
911 *
912 * @code
913 * template <int dim, typename number>
914 * void LaplaceOperator<dim, number>::do_cell_integral_local(
915 * FECellIntegrator &integrator) const
916 * {
918 *
919 * for (unsigned int q = 0; q < integrator.n_q_points; ++q)
920 * integrator.submit_gradient(integrator.get_gradient(q), q);
921 *
922 * integrator.integrate(EvaluationFlags::gradients);
923 * }
924 *
925 *
926 *
927 * @endcode
928 *
929 * Same as above but with access to the global vectors.
930 *
931 * @code
932 * template <int dim, typename number>
933 * void LaplaceOperator<dim, number>::do_cell_integral_global(
934 * FECellIntegrator &integrator,
935 * VectorType & dst,
936 * const VectorType &src) const
937 * {
938 * integrator.gather_evaluate(src, EvaluationFlags::gradients);
939 *
940 * for (unsigned int q = 0; q < integrator.n_q_points; ++q)
941 * integrator.submit_gradient(integrator.get_gradient(q), q);
942 *
943 * integrator.integrate_scatter(EvaluationFlags::gradients, dst);
944 * }
945 *
946 *
947 *
948 * @endcode
949 *
950 * This function loops over all cell batches within a cell-batch range and
951 * calls the above function.
952 *
953 * @code
954 * template <int dim, typename number>
955 * void LaplaceOperator<dim, number>::do_cell_integral_range(
956 * const MatrixFree<dim, number> & matrix_free,
957 * VectorType & dst,
958 * const VectorType & src,
959 * const std::pair<unsigned int, unsigned int> &range) const
960 * {
961 * FECellIntegrator integrator(matrix_free, range);
962 *
963 * for (unsigned cell = range.first; cell < range.second; ++cell)
964 * {
965 * integrator.reinit(cell);
966 *
967 * do_cell_integral_global(integrator, dst, src);
968 * }
969 * }
970 *
971 *
972 *
973 * @endcode
974 *
975 *
976 * <a name="Solverandpreconditioner"></a>
977 * <h3>Solver and preconditioner</h3>
978 *
979
980 *
981 *
982 * <a name="Conjugategradientsolverwithmultigridpreconditioner"></a>
983 * <h4>Conjugate-gradient solver with multigrid preconditioner</h4>
984 *
985
986 *
987 * This function solves the equation system with a sequence of provided
988 * multigrid objects. It is meant to be treated as general as possible, hence
989 * the multitude of template parameters.
990 *
991 * @code
992 * template <typename VectorType,
993 * int dim,
994 * typename SystemMatrixType,
995 * typename LevelMatrixType,
996 * typename MGTransferType>
997 * static void
998 * mg_solve(SolverControl & solver_control,
999 * VectorType & dst,
1000 * const VectorType & src,
1001 * const MultigridParameters &mg_data,
1002 * const DoFHandler<dim> & dof,
1003 * const SystemMatrixType & fine_matrix,
1004 * const MGLevelObject<std::unique_ptr<LevelMatrixType>> &mg_matrices,
1005 * const MGTransferType & mg_transfer)
1006 * {
1007 * AssertThrow(mg_data.coarse_solver.type == "cg_with_amg",
1008 * ExcNotImplemented());
1009 * AssertThrow(mg_data.smoother.type == "chebyshev", ExcNotImplemented());
1010 *
1011 * const unsigned int min_level = mg_matrices.min_level();
1012 * const unsigned int max_level = mg_matrices.max_level();
1013 *
1014 * using SmootherPreconditionerType = DiagonalMatrix<VectorType>;
1015 * using SmootherType = PreconditionChebyshev<LevelMatrixType,
1016 * VectorType,
1017 * SmootherPreconditionerType>;
1018 * using PreconditionerType = PreconditionMG<dim, VectorType, MGTransferType>;
1019 *
1020 * @endcode
1021 *
1022 * We initialize level operators and Chebyshev smoothers here.
1023 *
1024 * @code
1025 * mg::Matrix<VectorType> mg_matrix(mg_matrices);
1026 *
1028 * min_level, max_level);
1029 *
1030 * for (unsigned int level = min_level; level <= max_level; level++)
1031 * {
1032 * smoother_data[level].preconditioner =
1033 * std::make_shared<SmootherPreconditionerType>();
1034 * mg_matrices[level]->compute_inverse_diagonal(
1035 * smoother_data[level].preconditioner->get_vector());
1036 * smoother_data[level].smoothing_range = mg_data.smoother.smoothing_range;
1037 * smoother_data[level].degree = mg_data.smoother.degree;
1038 * smoother_data[level].eig_cg_n_iterations =
1039 * mg_data.smoother.eig_cg_n_iterations;
1040 * }
1041 *
1043 * mg_smoother;
1044 * mg_smoother.initialize(mg_matrices, smoother_data);
1045 *
1046 * @endcode
1047 *
1048 * Next, we initialize the coarse-grid solver. We use conjugate-gradient
1049 * method with AMG as preconditioner.
1050 *
1051 * @code
1052 * ReductionControl coarse_grid_solver_control(mg_data.coarse_solver.maxiter,
1053 * mg_data.coarse_solver.abstol,
1054 * mg_data.coarse_solver.reltol,
1055 * false,
1056 * false);
1057 * SolverCG<VectorType> coarse_grid_solver(coarse_grid_solver_control);
1058 *
1059 * std::unique_ptr<MGCoarseGridBase<VectorType>> mg_coarse;
1060 *
1061 * TrilinosWrappers::PreconditionAMG precondition_amg;
1063 * amg_data.smoother_sweeps = mg_data.coarse_solver.smoother_sweeps;
1064 * amg_data.n_cycles = mg_data.coarse_solver.n_cycles;
1065 * amg_data.smoother_type = mg_data.coarse_solver.smoother_type.c_str();
1066 *
1067 * precondition_amg.initialize(mg_matrices[min_level]->get_system_matrix(),
1068 * amg_data);
1069 *
1070 * mg_coarse =
1071 * std::make_unique<MGCoarseGridIterativeSolver<VectorType,
1073 * LevelMatrixType,
1074 * decltype(precondition_amg)>>(
1075 * coarse_grid_solver, *mg_matrices[min_level], precondition_amg);
1076 *
1077 * @endcode
1078 *
1079 * Finally, we create the Multigrid object, convert it to a preconditioner,
1080 * and use it inside of a conjugate-gradient solver to solve the linear
1081 * system of equations.
1082 *
1083 * @code
1085 * mg_matrix, *mg_coarse, mg_transfer, mg_smoother, mg_smoother);
1086 *
1087 * PreconditionerType preconditioner(dof, mg, mg_transfer);
1088 *
1089 * SolverCG<VectorType>(solver_control)
1090 * .solve(fine_matrix, dst, src, preconditioner);
1091 * }
1092 *
1093 *
1094 *
1095 * @endcode
1096 *
1097 *
1098 * <a name="Hybridpolynomialgeometricglobalcoarseningmultigridpreconditioner"></a>
1099 * <h4>Hybrid polynomial/geometric-global-coarsening multigrid preconditioner</h4>
1100 *
1101
1102 *
1103 * The above function deals with the actual solution for a given sequence of
1104 * multigrid objects. This functions creates the actual multigrid levels, in
1105 * particular the operators, and the transfer operator as a
1107 *
1108 * @code
1109 * template <typename VectorType, typename OperatorType, int dim>
1110 * void solve_with_gmg(SolverControl & solver_control,
1111 * const OperatorType & system_matrix,
1112 * VectorType & dst,
1113 * const VectorType & src,
1114 * const MultigridParameters & mg_data,
1115 * const hp::MappingCollection<dim> mapping_collection,
1116 * const DoFHandler<dim> & dof_handler,
1117 * const hp::QCollection<dim> & quadrature_collection)
1118 * {
1119 * @endcode
1120 *
1121 * Create a DoFHandler and operator for each multigrid level,
1122 * as well as, create transfer operators. To be able to
1123 * set up the operators, we need a set of DoFHandler that we create
1124 * via global coarsening of p or h. For latter, we need also a sequence
1125 * of Triangulation objects that are obtained by
1127 *
1128
1129 *
1130 * In case no h-transfer is requested, we provide an empty deleter for the
1131 * `emplace_back()` function, since the Triangulation of our DoFHandler is
1132 * an external field and its destructor is called somewhere else.
1133 *
1134 * @code
1135 * MGLevelObject<DoFHandler<dim>> dof_handlers;
1138 *
1139 * std::vector<std::shared_ptr<const Triangulation<dim>>>
1140 * coarse_grid_triangulations;
1141 * if (mg_data.transfer.perform_h_transfer)
1142 * coarse_grid_triangulations =
1144 * dof_handler.get_triangulation());
1145 * else
1146 * coarse_grid_triangulations.emplace_back(
1147 * const_cast<Triangulation<dim> *>(&(dof_handler.get_triangulation())),
1148 * [](auto &) {});
1149 *
1150 * @endcode
1151 *
1152 * Determine the total number of levels for the multigrid operation and
1153 * allocate sufficient memory for all levels.
1154 *
1155 * @code
1156 * const unsigned int n_h_levels = coarse_grid_triangulations.size() - 1;
1157 *
1158 * const auto get_max_active_fe_degree = [&](const auto &dof_handler) {
1159 * unsigned int max = 0;
1160 *
1161 * for (auto &cell : dof_handler.active_cell_iterators())
1162 * if (cell->is_locally_owned())
1163 * max =
1164 * std::max(max, dof_handler.get_fe(cell->active_fe_index()).degree);
1165 *
1166 * return Utilities::MPI::max(max, MPI_COMM_WORLD);
1167 * };
1168 *
1169 * const unsigned int n_p_levels =
1171 * get_max_active_fe_degree(dof_handler), mg_data.transfer.p_sequence)
1172 * .size();
1173 *
1174 * std::map<unsigned int, unsigned int> fe_index_for_degree;
1175 * for (unsigned int i = 0; i < dof_handler.get_fe_collection().size(); ++i)
1176 * {
1177 * const unsigned int degree = dof_handler.get_fe(i).degree;
1178 * Assert(fe_index_for_degree.find(degree) == fe_index_for_degree.end(),
1179 * ExcMessage("FECollection does not contain unique degrees."));
1180 * fe_index_for_degree[degree] = i;
1181 * }
1182 *
1183 * unsigned int minlevel = 0;
1184 * unsigned int minlevel_p = n_h_levels;
1185 * unsigned int maxlevel = n_h_levels + n_p_levels - 1;
1186 *
1187 * dof_handlers.resize(minlevel, maxlevel);
1188 * operators.resize(minlevel, maxlevel);
1189 * transfers.resize(minlevel, maxlevel);
1190 *
1191 * @endcode
1192 *
1193 * Loop from the minimum (coarsest) to the maximum (finest) level and set up
1194 * DoFHandler accordingly. We start with the h-levels, where we distribute
1195 * on increasingly finer meshes linear elements.
1196 *
1197 * @code
1198 * for (unsigned int l = 0; l < n_h_levels; ++l)
1199 * {
1200 * dof_handlers[l].reinit(*coarse_grid_triangulations[l]);
1201 * dof_handlers[l].distribute_dofs(dof_handler.get_fe_collection());
1202 * }
1203 *
1204 * @endcode
1205 *
1206 * After we reached the finest mesh, we will adjust the polynomial degrees
1207 * on each level. We reverse iterate over our data structure and start at
1208 * the finest mesh that contains all information about the active FE
1209 * indices. We then lower the polynomial degree of each cell level by level.
1210 *
1211 * @code
1212 * for (unsigned int i = 0, l = maxlevel; i < n_p_levels; ++i, --l)
1213 * {
1214 * dof_handlers[l].reinit(dof_handler.get_triangulation());
1215 *
1216 * if (l == maxlevel) // finest level
1217 * {
1218 * auto &dof_handler_mg = dof_handlers[l];
1219 *
1220 * auto cell_other = dof_handler.begin_active();
1221 * for (auto &cell : dof_handler_mg.active_cell_iterators())
1222 * {
1223 * if (cell->is_locally_owned())
1224 * cell->set_active_fe_index(cell_other->active_fe_index());
1225 * cell_other++;
1226 * }
1227 * }
1228 * else // coarse level
1229 * {
1230 * auto &dof_handler_fine = dof_handlers[l + 1];
1231 * auto &dof_handler_coarse = dof_handlers[l + 0];
1232 *
1233 * auto cell_other = dof_handler_fine.begin_active();
1234 * for (auto &cell : dof_handler_coarse.active_cell_iterators())
1235 * {
1236 * if (cell->is_locally_owned())
1237 * {
1238 * const unsigned int next_degree =
1241 * cell_other->get_fe().degree,
1242 * mg_data.transfer.p_sequence);
1243 * Assert(fe_index_for_degree.find(next_degree) !=
1244 * fe_index_for_degree.end(),
1245 * ExcMessage("Next polynomial degree in sequence "
1246 * "does not exist in FECollection."));
1247 *
1248 * cell->set_active_fe_index(fe_index_for_degree[next_degree]);
1249 * }
1250 * cell_other++;
1251 * }
1252 * }
1253 *
1254 * dof_handlers[l].distribute_dofs(dof_handler.get_fe_collection());
1255 * }
1256 *
1257 * @endcode
1258 *
1259 * Next, we will create all data structures additionally needed on each
1260 * multigrid level. This involves determining constraints with homogeneous
1261 * Dirichlet boundary conditions, and building the operator just like on the
1262 * active level.
1263 *
1264 * @code
1266 * constraints(minlevel, maxlevel);
1267 *
1268 * for (unsigned int level = minlevel; level <= maxlevel; ++level)
1269 * {
1270 * const auto &dof_handler = dof_handlers[level];
1271 * auto & constraint = constraints[level];
1272 *
1273 * IndexSet locally_relevant_dofs;
1275 * locally_relevant_dofs);
1276 * constraint.reinit(locally_relevant_dofs);
1277 *
1278 * DoFTools::make_hanging_node_constraints(dof_handler, constraint);
1279 * VectorTools::interpolate_boundary_values(mapping_collection,
1280 * dof_handler,
1281 * 0,
1283 * constraint);
1284 * constraint.close();
1285 *
1286 * VectorType dummy;
1287 *
1288 * operators[level] = std::make_unique<OperatorType>(mapping_collection,
1289 * dof_handler,
1290 * quadrature_collection,
1291 * constraint,
1292 * dummy);
1293 * }
1294 *
1295 * @endcode
1296 *
1297 * Set up intergrid operators and collect transfer operators within a single
1298 * operator as needed by the Multigrid solver class.
1299 *
1300 * @code
1301 * for (unsigned int level = minlevel; level < minlevel_p; ++level)
1302 * transfers[level + 1].reinit_geometric_transfer(dof_handlers[level + 1],
1303 * dof_handlers[level],
1304 * constraints[level + 1],
1305 * constraints[level]);
1306 *
1307 * for (unsigned int level = minlevel_p; level < maxlevel; ++level)
1308 * transfers[level + 1].reinit_polynomial_transfer(dof_handlers[level + 1],
1309 * dof_handlers[level],
1310 * constraints[level + 1],
1311 * constraints[level]);
1312 *
1314 * transfers, [&](const auto l, auto &vec) {
1315 * operators[l]->initialize_dof_vector(vec);
1316 * });
1317 *
1318 * @endcode
1319 *
1320 * Finally, proceed to solve the problem with multigrid.
1321 *
1322 * @code
1323 * mg_solve(solver_control,
1324 * dst,
1325 * src,
1326 * mg_data,
1327 * dof_handler,
1328 * system_matrix,
1329 * operators,
1330 * transfer);
1331 * }
1332 *
1333 *
1334 *
1335 * @endcode
1336 *
1337 *
1338 * <a name="ThecodeLaplaceProblemcodeclasstemplate"></a>
1339 * <h3>The <code>LaplaceProblem</code> class template</h3>
1340 *
1341
1342 *
1343 * Now we will finally declare the main class of this program, which solves
1344 * the Laplace equation on subsequently refined function spaces. Its structure
1345 * will look familiar as it is similar to the main classes of @ref step_27 "step-27" and
1346 * @ref step_40 "step-40". There are basically just two additions:
1347 * - The SparseMatrix object that would hold the system matrix has been
1348 * replaced by an object of the LaplaceOperator class for the MatrixFree
1349 * formulation.
1350 * - An object of parallel::CellWeights, which will help us with load
1351 * balancing, has been added.
1352 *
1353 * @code
1354 * template <int dim>
1355 * class LaplaceProblem
1356 * {
1357 * public:
1358 * LaplaceProblem(const Parameters &parameters);
1359 *
1360 * void run();
1361 *
1362 * private:
1363 * void initialize_grid();
1364 * void setup_system();
1365 * void print_diagnostics();
1366 * void solve_system();
1367 * void compute_indicators();
1368 * void adapt_resolution();
1369 * void output_results(const unsigned int cycle);
1370 *
1371 * MPI_Comm mpi_communicator;
1372 *
1373 * const Parameters prm;
1374 *
1376 * DoFHandler<dim> dof_handler;
1377 *
1378 * hp::MappingCollection<dim> mapping_collection;
1379 * hp::FECollection<dim> fe_collection;
1380 * hp::QCollection<dim> quadrature_collection;
1381 * hp::QCollection<dim - 1> face_quadrature_collection;
1382 *
1383 * IndexSet locally_owned_dofs;
1384 * IndexSet locally_relevant_dofs;
1385 *
1386 * AffineConstraints<double> constraints;
1387 *
1388 * LaplaceOperator<dim, double> laplace_operator;
1389 * LinearAlgebra::distributed::Vector<double> locally_relevant_solution;
1391 *
1392 * std::unique_ptr<FESeries::Legendre<dim>> legendre;
1393 * std::unique_ptr<parallel::CellWeights<dim>> cell_weights;
1394 *
1395 * Vector<float> estimated_error_per_cell;
1396 * Vector<float> hp_decision_indicators;
1397 *
1398 * ConditionalOStream pcout;
1399 * TimerOutput computing_timer;
1400 * };
1401 *
1402 *
1403 *
1404 * @endcode
1405 *
1406 *
1407 * <a name="ThecodeLaplaceProblemcodeclassimplementation"></a>
1408 * <h3>The <code>LaplaceProblem</code> class implementation</h3>
1409 *
1410
1411 *
1412 *
1413 * <a name="Constructor"></a>
1414 * <h4>Constructor</h4>
1415 *
1416
1417 *
1418 * The constructor starts with an initializer list that looks similar to the
1419 * one of @ref step_40 "step-40". We again prepare the ConditionalOStream object to allow
1420 * only the first process to output anything over the console, and initialize
1421 * the computing timer properly.
1422 *
1423 * @code
1424 * template <int dim>
1425 * LaplaceProblem<dim>::LaplaceProblem(const Parameters &parameters)
1426 * : mpi_communicator(MPI_COMM_WORLD)
1427 * , prm(parameters)
1428 * , triangulation(mpi_communicator)
1429 * , dof_handler(triangulation)
1430 * , pcout(std::cout,
1431 * (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
1432 * , computing_timer(mpi_communicator,
1433 * pcout,
1436 * {
1437 * Assert(prm.min_h_level <= prm.max_h_level,
1438 * ExcMessage(
1439 * "Triangulation level limits have been incorrectly set up."));
1440 * Assert(prm.min_p_degree <= prm.max_p_degree,
1441 * ExcMessage("FECollection degrees have been incorrectly set up."));
1442 *
1443 * @endcode
1444 *
1445 * We need to prepare the data structures for the hp-functionality in the
1446 * actual body of the constructor, and create corresponding objects for
1447 * every degree in the specified range from the parameter struct. As we are
1448 * only dealing with non-distorted rectangular cells, a linear mapping
1449 * object is sufficient in this context.
1450 *
1451
1452 *
1453 * In the Parameters struct, we provide ranges for levels on which the
1454 * function space is operating with a reasonable resolution. The multigrid
1455 * algorithm requires linear elements on the coarsest possible level. So we
1456 * start with the lowest polynomial degree and fill the collection with
1457 * consecutively higher degrees until the user-specified maximum is
1458 * reached.
1459 *
1460 * @code
1461 * mapping_collection.push_back(MappingQ1<dim>());
1462 *
1463 * for (unsigned int degree = 1; degree <= prm.max_p_degree; ++degree)
1464 * {
1465 * fe_collection.push_back(FE_Q<dim>(degree));
1466 * quadrature_collection.push_back(QGauss<dim>(degree + 1));
1467 * face_quadrature_collection.push_back(QGauss<dim - 1>(degree + 1));
1468 * }
1469 *
1470 * @endcode
1471 *
1472 * As our FECollection contains more finite elements than we want to use for
1473 * the finite element approximation of our solution, we would like to limit
1474 * the range on which active FE indices can operate on. For this, the
1475 * FECollection class allows to register a hierarchy that determines the
1476 * succeeding and preceding finite element in case of of p-refinement and
1477 * p-coarsening, respectively. All functions in the hp::Refinement namespace
1478 * consult this hierarchy to determine future FE indices. We will register
1479 * such a hierarchy that only works on finite elements with polynomial
1480 * degrees in the proposed range <code>[min_p_degree, max_p_degree]</code>.
1481 *
1482 * @code
1483 * const unsigned int min_fe_index = prm.min_p_degree - 1;
1484 * fe_collection.set_hierarchy(
1485 * /*next_index=*/
1486 * [](const typename hp::FECollection<dim> &fe_collection,
1487 * const unsigned int fe_index) -> unsigned int {
1488 * return ((fe_index + 1) < fe_collection.size()) ? fe_index + 1 :
1489 * fe_index;
1490 * },
1491 * /*previous_index=*/
1492 * [min_fe_index](const typename hp::FECollection<dim> &,
1493 * const unsigned int fe_index) -> unsigned int {
1494 * Assert(fe_index >= min_fe_index,
1495 * ExcMessage("Finite element is not part of hierarchy!"));
1496 * return (fe_index > min_fe_index) ? fe_index - 1 : fe_index;
1497 * });
1498 *
1499 * @endcode
1500 *
1501 * We initialize the FESeries::Legendre object in the default configuration
1502 * for smoothness estimation.
1503 *
1504 * @code
1505 * legendre = std::make_unique<FESeries::Legendre<dim>>(
1507 *
1508 * @endcode
1509 *
1510 * The next part is going to be tricky. During execution of refinement, a
1511 * few hp-algorithms need to interfere with the actual refinement process on
1512 * the Triangulation object. We do this by connecting several functions to
1513 * Triangulation::Signals: signals will be called at different stages during
1514 * the actual refinement process and trigger all connected functions. We
1515 * require this functionality for load balancing and to limit the polynomial
1516 * degrees of neighboring cells.
1517 *
1518
1519 *
1520 * For the former, we would like to assign a weight to every cell that is
1521 * proportional to the number of degrees of freedom of its future finite
1522 * element. The library offers a class parallel::CellWeights that allows to
1523 * easily attach individual weights at the right place during the refinement
1524 * process, i.e., after all refine and coarsen flags have been set correctly
1525 * for hp-adaptation and right before repartitioning for load balancing is
1526 * about to happen. Functions can be registered that will attach weights in
1527 * the form that @f$a (n_\text{dofs})^b@f$ with a provided pair of parameters
1528 * @f$(a,b)@f$. We register such a function in the following. Every cell will be
1529 * charged with a constant weight at creation, which is a value of 1000 (see
1531 *
1532
1533 *
1534 * For load balancing, efficient solvers like the one we use should scale
1535 * linearly with the number of degrees of freedom owned. Further, to
1536 * increase the impact of the weights we would like to attach, make sure
1537 * that the individual weight will exceed this base weight by orders of
1538 * magnitude. We set the parameters for cell weighting correspondingly: A
1539 * large weighting factor of @f$10^6@f$ and an exponent of @f$1@f$.
1540 *
1541 * @code
1542 * cell_weights = std::make_unique<parallel::CellWeights<dim>>(
1543 * dof_handler,
1545 * {prm.weighting_factor, prm.weighting_exponent}));
1546 *
1547 * @endcode
1548 *
1549 * In h-adaptive applications, we ensure a 2:1 mesh balance by limiting the
1550 * difference of refinement levels of neighboring cells to one. With the
1551 * second call in the following code snippet, we will ensure the same for
1552 * p-levels on neighboring cells: levels of future finite elements are not
1553 * allowed to differ by more than a specified difference. The function
1554 * hp::Refinement::limit_p_level_difference takes care of this, but needs to
1555 * be connected to a very specific signal in the parallel context. The issue
1556 * is that we need to know how the mesh will be actually refined to set
1557 * future FE indices accordingly. As we ask the p4est oracle to perform
1558 * refinement, we need to ensure that the Triangulation has been updated
1559 * with the adaptation flags of the oracle first. An instantiation of
1561 * that for the duration of its life. Thus, we will create an object of this
1562 * class right before limiting the p-level difference, and connect the
1563 * corresponding lambda function to the signal
1564 * Triangulation::Signals::post_p4est_refinement, which will be triggered
1565 * after the oracle got refined, but before the Triangulation is refined.
1566 * Furthermore, we specify that this function will be connected to the front
1567 * of the signal, to ensure that the modification is performed before any
1568 * other function connected to the same signal.
1569 *
1570 * @code
1571 * triangulation.signals.post_p4est_refinement.connect(
1572 * [&, min_fe_index]() {
1574 * refine_modifier(triangulation);
1576 * prm.max_p_level_difference,
1577 * /*contains=*/min_fe_index);
1578 * },
1579 * boost::signals2::at_front);
1580 * }
1581 *
1582 *
1583 *
1584 * @endcode
1585 *
1586 *
1587 * <a name="LaplaceProbleminitialize_grid"></a>
1588 * <h4>LaplaceProblem::initialize_grid</h4>
1589 *
1590
1591 *
1592 * For a L-shaped domain, we could use the function GridGenerator::hyper_L()
1593 * as demonstrated in @ref step_50 "step-50". However in the 2D case, that particular
1594 * function removes the first quadrant, while we need the fourth quadrant
1595 * removed in our scenario. Thus, we will use a different function
1596 * GridGenerator::subdivided_hyper_L() which gives us more options to create
1597 * the mesh. Furthermore, we formulate that function in a way that it also
1598 * generates a 3D mesh: the 2D L-shaped domain will basically elongated by 1
1599 * in the positive z-direction.
1600 *
1601
1602 *
1603 * We first pretend to build a GridGenerator::subdivided_hyper_rectangle().
1604 * The parameters that we need to provide are Point objects for the lower left
1605 * and top right corners, as well as the number of repetitions that the base
1606 * mesh will have in each direction. We provide them for the first two
1607 * dimensions and treat the higher third dimension separately.
1608 *
1609
1610 *
1611 * To create a L-shaped domain, we need to remove the excess cells. For this,
1612 * we specify the <code>cells_to_remove</code> accordingly. We would like to
1613 * remove one cell in every cell from the negative direction, but remove one
1614 * from the positive x-direction.
1615 *
1616
1617 *
1618 * In the end, we supply the number of initial refinements that corresponds to
1619 * the supplied minimal grid refinement level. Further, we set the initial
1620 * active FE indices accordingly.
1621 *
1622 * @code
1623 * template <int dim>
1624 * void LaplaceProblem<dim>::initialize_grid()
1625 * {
1626 * TimerOutput::Scope t(computing_timer, "initialize grid");
1627 *
1628 * std::vector<unsigned int> repetitions(dim);
1629 * Point<dim> bottom_left, top_right;
1630 * for (unsigned int d = 0; d < dim; ++d)
1631 * if (d < 2)
1632 * {
1633 * repetitions[d] = 2;
1634 * bottom_left[d] = -1.;
1635 * top_right[d] = 1.;
1636 * }
1637 * else
1638 * {
1639 * repetitions[d] = 1;
1640 * bottom_left[d] = 0.;
1641 * top_right[d] = 1.;
1642 * }
1643 *
1644 * std::vector<int> cells_to_remove(dim, 1);
1645 * cells_to_remove[0] = -1;
1646 *
1648 * triangulation, repetitions, bottom_left, top_right, cells_to_remove);
1649 *
1650 * triangulation.refine_global(prm.min_h_level);
1651 *
1652 * const unsigned int min_fe_index = prm.min_p_degree - 1;
1653 * for (const auto &cell : dof_handler.active_cell_iterators())
1654 * if (cell->is_locally_owned())
1655 * cell->set_active_fe_index(min_fe_index);
1656 * }
1657 *
1658 *
1659 *
1660 * @endcode
1661 *
1662 *
1663 * <a name="LaplaceProblemsetup_system"></a>
1664 * <h4>LaplaceProblem::setup_system</h4>
1665 *
1666
1667 *
1668 * This function looks exactly the same to the one of @ref step_40 "step-40", but you will
1669 * notice the absence of the system matrix as well as the scaffold that
1670 * surrounds it. Instead, we will initialize the MatrixFree formulation of the
1671 * <code>laplace_operator</code> here. For boundary conditions, we will use
1672 * the Solution class introduced earlier in this tutorial.
1673 *
1674 * @code
1675 * template <int dim>
1676 * void LaplaceProblem<dim>::setup_system()
1677 * {
1678 * TimerOutput::Scope t(computing_timer, "setup system");
1679 *
1680 * dof_handler.distribute_dofs(fe_collection);
1681 *
1682 * locally_owned_dofs = dof_handler.locally_owned_dofs();
1683 * DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
1684 *
1685 * locally_relevant_solution.reinit(locally_owned_dofs,
1686 * locally_relevant_dofs,
1687 * mpi_communicator);
1688 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1689 *
1690 * constraints.clear();
1691 * constraints.reinit(locally_relevant_dofs);
1692 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
1694 * mapping_collection, dof_handler, 0, Solution<dim>(), constraints);
1695 * constraints.close();
1696 *
1697 * laplace_operator.reinit(mapping_collection,
1698 * dof_handler,
1699 * quadrature_collection,
1700 * constraints,
1701 * system_rhs);
1702 * }
1703 *
1704 *
1705 *
1706 * @endcode
1707 *
1708 *
1709 * <a name="LaplaceProblemprint_diagnostics"></a>
1710 * <h4>LaplaceProblem::print_diagnostics</h4>
1711 *
1712
1713 *
1714 * This is a function that prints additional diagnostics about the equation
1715 * system and its partitioning. In addition to the usual global number of
1716 * active cells and degrees of freedom, we also output their local
1717 * equivalents. For a regulated output, we will communicate the local
1718 * quantities with a Utilities::MPI::gather operation to the first process
1719 * which will then output all information. Output of local quantities is
1720 * limited to the first 8 processes to avoid cluttering the terminal.
1721 *
1722
1723 *
1724 * Furthermore, we would like to print the frequencies of the polynomial
1725 * degrees in the numerical discretization. Since this information is only
1726 * stored locally, we will count the finite elements on locally owned cells
1727 * and later communicate them via Utilities::MPI::sum.
1728 *
1729 * @code
1730 * template <int dim>
1731 * void LaplaceProblem<dim>::print_diagnostics()
1732 * {
1733 * const unsigned int first_n_processes =
1734 * std::min<unsigned int>(8,
1735 * Utilities::MPI::n_mpi_processes(mpi_communicator));
1736 * const bool output_cropped =
1737 * first_n_processes < Utilities::MPI::n_mpi_processes(mpi_communicator);
1738 *
1739 * {
1740 * pcout << " Number of active cells: "
1741 * << triangulation.n_global_active_cells() << std::endl
1742 * << " by partition: ";
1743 *
1744 * std::vector<unsigned int> n_active_cells_per_subdomain =
1745 * Utilities::MPI::gather(mpi_communicator,
1746 * triangulation.n_locally_owned_active_cells());
1747 * for (unsigned int i = 0; i < first_n_processes; ++i)
1748 * pcout << ' ' << n_active_cells_per_subdomain[i];
1749 * if (output_cropped)
1750 * pcout << " ...";
1751 * pcout << std::endl;
1752 * }
1753 *
1754 * {
1755 * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
1756 * << std::endl
1757 * << " by partition: ";
1758 *
1759 * std::vector<types::global_dof_index> n_dofs_per_subdomain =
1760 * Utilities::MPI::gather(mpi_communicator,
1761 * dof_handler.n_locally_owned_dofs());
1762 * for (unsigned int i = 0; i < first_n_processes; ++i)
1763 * pcout << ' ' << n_dofs_per_subdomain[i];
1764 * if (output_cropped)
1765 * pcout << " ...";
1766 * pcout << std::endl;
1767 * }
1768 *
1769 * {
1770 * std::vector<types::global_dof_index> n_constraints_per_subdomain =
1771 * Utilities::MPI::gather(mpi_communicator, constraints.n_constraints());
1772 *
1773 * pcout << " Number of constraints: "
1774 * << std::accumulate(n_constraints_per_subdomain.begin(),
1775 * n_constraints_per_subdomain.end(),
1776 * 0)
1777 * << std::endl
1778 * << " by partition: ";
1779 * for (unsigned int i = 0; i < first_n_processes; ++i)
1780 * pcout << ' ' << n_constraints_per_subdomain[i];
1781 * if (output_cropped)
1782 * pcout << " ...";
1783 * pcout << std::endl;
1784 * }
1785 *
1786 * {
1787 * std::vector<unsigned int> n_fe_indices(fe_collection.size(), 0);
1788 * for (const auto &cell : dof_handler.active_cell_iterators())
1789 * if (cell->is_locally_owned())
1790 * n_fe_indices[cell->active_fe_index()]++;
1791 *
1792 * Utilities::MPI::sum(n_fe_indices, mpi_communicator, n_fe_indices);
1793 *
1794 * pcout << " Frequencies of poly. degrees:";
1795 * for (unsigned int i = 0; i < fe_collection.size(); ++i)
1796 * if (n_fe_indices[i] > 0)
1797 * pcout << ' ' << fe_collection[i].degree << ":" << n_fe_indices[i];
1798 * pcout << std::endl;
1799 * }
1800 * }
1801 *
1802 *
1803 *
1804 * @endcode
1805 *
1806 *
1807 * <a name="LaplaceProblemsolve_system"></a>
1808 * <h4>LaplaceProblem::solve_system</h4>
1809 *
1810
1811 *
1812 * The scaffold around the solution is similar to the one of @ref step_40 "step-40". We
1813 * prepare a vector that matches the requirements of MatrixFree and collect
1814 * the locally-relevant degrees of freedoms we solved the equation system. The
1815 * solution happens with the function introduced earlier.
1816 *
1817 * @code
1818 * template <int dim>
1819 * void LaplaceProblem<dim>::solve_system()
1820 * {
1821 * TimerOutput::Scope t(computing_timer, "solve system");
1822 *
1823 * LinearAlgebra::distributed::Vector<double> completely_distributed_solution;
1824 * laplace_operator.initialize_dof_vector(completely_distributed_solution);
1825 *
1826 * SolverControl solver_control(system_rhs.size(),
1827 * prm.tolerance_factor * system_rhs.l2_norm());
1828 *
1829 * solve_with_gmg(solver_control,
1830 * laplace_operator,
1831 * completely_distributed_solution,
1832 * system_rhs,
1833 * prm.mg_data,
1834 * mapping_collection,
1835 * dof_handler,
1836 * quadrature_collection);
1837 *
1838 * pcout << " Solved in " << solver_control.last_step() << " iterations."
1839 * << std::endl;
1840 *
1841 * constraints.distribute(completely_distributed_solution);
1842 *
1843 * locally_relevant_solution.copy_locally_owned_data_from(
1844 * completely_distributed_solution);
1845 * locally_relevant_solution.update_ghost_values();
1846 * }
1847 *
1848 *
1849 *
1850 * @endcode
1851 *
1852 *
1853 * <a name="LaplaceProblemcompute_indicators"></a>
1854 * <h4>LaplaceProblem::compute_indicators</h4>
1855 *
1856
1857 *
1858 * This function contains only a part of the typical <code>refine_grid</code>
1859 * function from other tutorials and is new in that sense. Here, we will only
1860 * calculate all indicators for adaptation with actually refining the grid. We
1861 * do this for the purpose of writing all indicators to the file system, so we
1862 * store them for later.
1863 *
1864
1865 *
1866 * Since we are dealing the an elliptic problem, we will make use of the
1867 * KellyErrorEstimator again, but with a slight difference. Modifying the
1868 * scaling factor of the underlying face integrals to be dependent on the
1869 * actual polynomial degree of the neighboring elements is favorable in
1870 * hp-adaptive applications @cite davydov2017hp. We can do this by specifying
1871 * the very last parameter from the additional ones you notices. The others
1872 * are actually just the defaults.
1873 *
1874
1875 *
1876 * For the purpose of hp-adaptation, we will calculate smoothness estimates
1877 * with the strategy presented in the tutorial introduction and use the
1878 * implementation in SmoothnessEstimator::Legendre. In the Parameters struct,
1879 * we set the minimal polynomial degree to 2 as it seems that the smoothness
1880 * estimation algorithms have trouble with linear elements.
1881 *
1882 * @code
1883 * template <int dim>
1884 * void LaplaceProblem<dim>::compute_indicators()
1885 * {
1886 * TimerOutput::Scope t(computing_timer, "compute indicators");
1887 *
1888 * estimated_error_per_cell.grow_or_shrink(triangulation.n_active_cells());
1890 * dof_handler,
1891 * face_quadrature_collection,
1892 * std::map<types::boundary_id, const Function<dim> *>(),
1893 * locally_relevant_solution,
1894 * estimated_error_per_cell,
1895 * /*component_mask=*/ComponentMask(),
1896 * /*coefficients=*/nullptr,
1897 * /*n_threads=*/numbers::invalid_unsigned_int,
1898 * /*subdomain_id=*/numbers::invalid_subdomain_id,
1899 * /*material_id=*/numbers::invalid_material_id,
1900 * /*strategy=*/
1902 *
1903 * hp_decision_indicators.grow_or_shrink(triangulation.n_active_cells());
1905 * dof_handler,
1906 * locally_relevant_solution,
1907 * hp_decision_indicators);
1908 * }
1909 *
1910 *
1911 *
1912 * @endcode
1913 *
1914 *
1915 * <a name="LaplaceProblemadapt_resolution"></a>
1916 * <h4>LaplaceProblem::adapt_resolution</h4>
1917 *
1918
1919 *
1920 * With the previously calculated indicators, we will finally flag all cells
1921 * for adaptation and also execute refinement in this function. As in previous
1922 * tutorials, we will use the "fixed number" strategy, but now for
1923 * hp-adaptation.
1924 *
1925 * @code
1926 * template <int dim>
1927 * void LaplaceProblem<dim>::adapt_resolution()
1928 * {
1929 * TimerOutput::Scope t(computing_timer, "adapt resolution");
1930 *
1931 * @endcode
1932 *
1933 * First, we will set refine and coarsen flags based on the error estimates
1934 * on each cell. There is nothing new here.
1935 *
1936
1937 *
1938 * We will use general refine and coarsen fractions that have been
1939 * elaborated in the other deal.II tutorials: using the fixed number
1940 * strategy, we will flag 30% of all cells for refinement and 3% for
1941 * coarsening, as provided in the Parameters struct.
1942 *
1943 * @code
1945 * triangulation,
1946 * estimated_error_per_cell,
1947 * prm.refine_fraction,
1948 * prm.coarsen_fraction);
1949 *
1950 * @endcode
1951 *
1952 * Next, we will make all adjustments for hp-adaptation. We want to refine
1953 * and coarsen those cells flagged in the previous step, but need to decide
1954 * if we would like to do it by adjusting the grid resolution or the
1955 * polynomial degree.
1956 *
1957
1958 *
1959 * The next function call sets future FE indices according to the previously
1960 * calculated smoothness indicators as p-adaptation indicators. These
1961 * indices will only be set on those cells that have refine or coarsen flags
1962 * assigned.
1963 *
1964
1965 *
1966 * For the p-adaptation fractions, we will take an educated guess. Since we
1967 * only expect a single singularity in our scenario, i.e., in the origin of
1968 * the domain, and a smooth solution anywhere else, we would like to
1969 * strongly prefer to use p-adaptation over h-adaptation. This reflects in
1970 * our choice of a fraction of 90% for both p-refinement and p-coarsening.
1971 *
1972 * @code
1974 * hp_decision_indicators,
1975 * prm.p_refine_fraction,
1976 * prm.p_coarsen_fraction);
1977 *
1978 * @endcode
1979 *
1980 * At this stage, we have both the future FE indices and the classic refine
1981 * and coarsen flags set, from which the latter will be interpreted by
1983 * We would like to only impose one type of adaptation on cells, which is
1984 * what the next function will sort out for us. In short, on cells which
1985 * have both types of indicators assigned, we will favor the p-adaptation
1986 * one and remove the h-adaptation one.
1987 *
1988 * @code
1989 * hp::Refinement::choose_p_over_h(dof_handler);
1990 *
1991 * @endcode
1992 *
1993 * After setting all indicators, we will remove those that exceed the
1994 * specified limits of the provided level ranges in the Parameters struct.
1995 * This limitation naturally arises for p-adaptation as the number of
1996 * supplied finite elements is limited. In addition, we registered a custom
1997 * hierarchy for p-adaptation in the constructor. Now, we need to do this
1998 * manually in the h-adaptive context like in @ref step_31 "step-31".
1999 *
2000
2001 *
2002 * We will iterate over all cells on the designated min and max levels and
2003 * remove the corresponding flags. As an alternative, we could also flag
2004 * these cells for p-adaptation by setting future FE indices accordingly
2005 * instead of simply clearing the refine and coarsen flags.
2006 *
2007 * @code
2008 * Assert(triangulation.n_levels() >= prm.min_h_level + 1 &&
2009 * triangulation.n_levels() <= prm.max_h_level + 1,
2010 * ExcInternalError());
2011 *
2012 * if (triangulation.n_levels() > prm.max_h_level)
2013 * for (const auto &cell :
2014 * triangulation.active_cell_iterators_on_level(prm.max_h_level))
2015 * cell->clear_refine_flag();
2016 *
2017 * for (const auto &cell :
2018 * triangulation.active_cell_iterators_on_level(prm.min_h_level))
2019 * cell->clear_coarsen_flag();
2020 *
2021 * @endcode
2022 *
2023 * In the end, we are left to execute coarsening and refinement. Here, not
2024 * only the grid will be updated, but also all previous future FE indices
2025 * will become active.
2026 *
2027
2028 *
2029 * Remember that we have attached functions to triangulation signals in the
2030 * constructor, will be triggered in this function call. So there is even
2031 * more happening: weighted repartitioning will be performed to ensure load
2032 * balancing, as well as we will limit the difference of p-levels between
2033 * neighboring cells.
2034 *
2035 * @code
2036 * triangulation.execute_coarsening_and_refinement();
2037 * }
2038 *
2039 *
2040 *
2041 * @endcode
2042 *
2043 *
2044 * <a name="LaplaceProblemoutput_results"></a>
2045 * <h4>LaplaceProblem::output_results</h4>
2046 *
2047
2048 *
2049 * Writing results to the file system in parallel applications works exactly
2050 * like in @ref step_40 "step-40". In addition to the data containers that we prepared
2051 * throughout the tutorial, we would also like to write out the polynomial
2052 * degree of each finite element on the grid as well as the subdomain each
2053 * cell belongs to. We prepare necessary containers for this in the scope of
2054 * this function.
2055 *
2056 * @code
2057 * template <int dim>
2058 * void LaplaceProblem<dim>::output_results(const unsigned int cycle)
2059 * {
2060 * TimerOutput::Scope t(computing_timer, "output results");
2061 *
2062 * Vector<float> fe_degrees(triangulation.n_active_cells());
2063 * for (const auto &cell : dof_handler.active_cell_iterators())
2064 * if (cell->is_locally_owned())
2065 * fe_degrees(cell->active_cell_index()) = cell->get_fe().degree;
2066 *
2067 * Vector<float> subdomain(triangulation.n_active_cells());
2068 * for (auto &subd : subdomain)
2069 * subd = triangulation.locally_owned_subdomain();
2070 *
2071 * DataOut<dim> data_out;
2072 * data_out.attach_dof_handler(dof_handler);
2073 * data_out.add_data_vector(locally_relevant_solution, "solution");
2074 * data_out.add_data_vector(fe_degrees, "fe_degree");
2075 * data_out.add_data_vector(subdomain, "subdomain");
2076 * data_out.add_data_vector(estimated_error_per_cell, "error");
2077 * data_out.add_data_vector(hp_decision_indicators, "hp_indicator");
2078 * data_out.build_patches(mapping_collection);
2079 *
2080 * data_out.write_vtu_with_pvtu_record(
2081 * "./", "solution", cycle, mpi_communicator, 2, 1);
2082 * }
2083 *
2084 *
2085 *
2086 * @endcode
2087 *
2088 *
2089 * <a name="LaplaceProblemrun"></a>
2090 * <h4>LaplaceProblem::run</h4>
2091 *
2092
2093 *
2094 * The actual run function again looks very familiar to @ref step_40 "step-40". The only
2095 * addition is the bracketed section that precedes the actual cycle loop.
2096 * Here, we will pre-calculate the Legendre transformation matrices. In
2097 * general, these will be calculated on the fly via lazy allocation whenever a
2098 * certain matrix is needed. For timing purposes however, we would like to
2099 * calculate them all at once before the actual time measurement begins. We
2100 * will thus designate their calculation to their own scope.
2101 *
2102 * @code
2103 * template <int dim>
2105 * {
2106 * pcout << "Running with Trilinos on "
2107 * << Utilities::MPI::n_mpi_processes(mpi_communicator)
2108 * << " MPI rank(s)..." << std::endl;
2109 *
2110 * {
2111 * pcout << "Calculating transformation matrices..." << std::endl;
2112 * TimerOutput::Scope t(computing_timer, "calculate transformation");
2113 * legendre->precalculate_all_transformation_matrices();
2114 * }
2115 *
2116 * for (unsigned int cycle = 0; cycle < prm.n_cycles; ++cycle)
2117 * {
2118 * pcout << "Cycle " << cycle << ':' << std::endl;
2119 *
2120 * if (cycle == 0)
2121 * initialize_grid();
2122 * else
2123 * adapt_resolution();
2124 *
2125 * setup_system();
2126 *
2127 * print_diagnostics();
2128 *
2129 * solve_system();
2130 *
2131 * compute_indicators();
2132 *
2133 * if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
2134 * output_results(cycle);
2135 *
2136 * computing_timer.print_summary();
2137 * computing_timer.reset();
2138 *
2139 * pcout << std::endl;
2140 * }
2141 * }
2142 * } // namespace Step75
2143 *
2144 *
2145 *
2146 * @endcode
2147 *
2148 *
2149 * <a name="main"></a>
2150 * <h4>main()</h4>
2151 *
2152
2153 *
2154 * The final function is the <code>main</code> function that will ultimately
2155 * create and run a LaplaceOperator instantiation. Its structure is similar to
2156 * most other tutorial programs.
2157 *
2158 * @code
2159 * int main(int argc, char *argv[])
2160 * {
2161 * try
2162 * {
2163 * using namespace dealii;
2164 * using namespace Step75;
2165 *
2166 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2167 *
2168 * Parameters prm;
2169 * LaplaceProblem<2> laplace_problem(prm);
2170 * laplace_problem.run();
2171 * }
2172 * catch (std::exception &exc)
2173 * {
2174 * std::cerr << std::endl
2175 * << std::endl
2176 * << "----------------------------------------------------"
2177 * << std::endl;
2178 * std::cerr << "Exception on processing: " << std::endl
2179 * << exc.what() << std::endl
2180 * << "Aborting!" << std::endl
2181 * << "----------------------------------------------------"
2182 * << std::endl;
2183 *
2184 * return 1;
2185 * }
2186 * catch (...)
2187 * {
2188 * std::cerr << std::endl
2189 * << std::endl
2190 * << "----------------------------------------------------"
2191 * << std::endl;
2192 * std::cerr << "Unknown exception!" << std::endl
2193 * << "Aborting!" << std::endl
2194 * << "----------------------------------------------------"
2195 * << std::endl;
2196 * return 1;
2197 * }
2198 *
2199 * return 0;
2200 * }
2201 * @endcode
2202<a name="Results"></a><h1>Results</h1>
2203
2204
2205When you run the program with the given parameters on four processes in
2206release mode, your terminal output should look like this:
2207@code
2208Running with Trilinos on 4 MPI rank(s)...
2209Calculating transformation matrices...
2210Cycle 0:
2211 Number of active cells: 3072
2212 by partition: 768 768 768 768
2213 Number of degrees of freedom: 12545
2214 by partition: 3201 3104 3136 3104
2215 Number of constraints: 542
2216 by partition: 165 74 138 165
2217 Frequencies of poly. degrees: 2:3072
2218 Solved in 7 iterations.
2219
2220
2221+---------------------------------------------+------------+------------+
2222| Total wallclock time elapsed since start | 0.598s | |
2223| | | |
2224| Section | no. calls | wall time | % of total |
2225+---------------------------------+-----------+------------+------------+
2226| calculate transformation | 1 | 0.0533s | 8.9% |
2227| compute indicators | 1 | 0.0177s | 3% |
2228| initialize grid | 1 | 0.0397s | 6.6% |
2229| output results | 1 | 0.0844s | 14% |
2230| setup system | 1 | 0.0351s | 5.9% |
2231| solve system | 1 | 0.362s | 61% |
2232+---------------------------------+-----------+------------+------------+
2233
2234
2235Cycle 1:
2236 Number of active cells: 3351
2237 by partition: 875 761 843 872
2238 Number of degrees of freedom: 18223
2239 by partition: 4535 4735 4543 4410
2240 Number of constraints: 1202
2241 by partition: 303 290 326 283
2242 Frequencies of poly. degrees: 2:2523 3:828
2243 Solved in 7 iterations.
2244
2245
2246+---------------------------------------------+------------+------------+
2247| Total wallclock time elapsed since start | 0.442s | |
2248| | | |
2249| Section | no. calls | wall time | % of total |
2250+---------------------------------+-----------+------------+------------+
2251| adapt resolution | 1 | 0.0189s | 4.3% |
2252| compute indicators | 1 | 0.0135s | 3% |
2253| output results | 1 | 0.064s | 14% |
2254| setup system | 1 | 0.0232s | 5.2% |
2255| solve system | 1 | 0.322s | 73% |
2256+---------------------------------+-----------+------------+------------+
2257
2258
2259...
2260
2261
2262Cycle 7:
2263 Number of active cells: 5610
2264 by partition: 1324 1483 1482 1321
2265 Number of degrees of freedom: 82062
2266 by partition: 21116 19951 20113 20882
2267 Number of constraints: 14383
2268 by partition: 3825 3225 3557 3776
2269 Frequencies of poly. degrees: 2:1130 3:1283 4:2727 5:465 6:5
2270 Solved in 7 iterations.
2271
2272
2273+---------------------------------------------+------------+------------+
2274| Total wallclock time elapsed since start | 0.932s | |
2275| | | |
2276| Section | no. calls | wall time | % of total |
2277+---------------------------------+-----------+------------+------------+
2278| adapt resolution | 1 | 0.0182s | 1.9% |
2279| compute indicators | 1 | 0.0173s | 1.9% |
2280| output results | 1 | 0.0572s | 6.1% |
2281| setup system | 1 | 0.0252s | 2.7% |
2282| solve system | 1 | 0.813s | 87% |
2283+---------------------------------+-----------+------------+------------+
2284@endcode
2285
2286When running the code with more processes, you will notice slight
2287differences in the number of active cells and degrees of freedom. This
2288is due to the fact that solver and preconditioner depend on the
2289partitioning of the problem, which might yield to slight differences of
2290the solution in the last digits and ultimately yields to different
2291adaptation behavior.
2292
2293Furthermore, the number of iterations for the solver stays about the
2294same in all cycles despite hp-adaptation, indicating the robustness of
2295the proposed algorithms and promising good scalability for even larger
2296problem sizes and on more processes.
2297
2298Let us have a look at the graphical output of the program. After all
2299refinement cycles in the given parameter configuration, the actual
2300discretized function space looks like the following with its
2301partitioning on twelve processes on the left and the polynomial degrees
2302of finite elements on the right. In the left picture, each color
2303represents a unique subdomain. In the right picture, the lightest color
2304corresponds to the polynomial degree two and the darkest one corresponds
2305to degree six:
2306
2307<div class="twocolumn" style="width: 80%; text-align: center;">
2308 <div>
2309 <img src="https://www.dealii.org/images/steps/developer/step-75.subdomains-07.svg"
2310 alt="Partitioning after seven refinements.">
2311 </div>
2312 <div>
2313 <img src="https://www.dealii.org/images/steps/developer/step-75.fedegrees-07.svg"
2314 alt="Local approximation degrees after seven refinements.">
2315 </div>
2316</div>
2317
2318
2319
2320<a name="extensions"></a>
2321<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2322
2323
2324<a name="Differenthpdecisionstrategies"></a><h4>Different hp-decision strategies</h4>
2325
2326
2327The deal.II library offers multiple strategies to decide which type of
2328adaptation to impose on cells: either adjust the grid resolution or
2329change the polynomial degree. We only presented the <i>Legendre
2330coefficient decay</i> strategy in this tutorial, while @ref step_27 "step-27"
2331demonstrated the <i>Fourier</i> equivalent of the same idea.
2332
2333See the "possibilities for extensions" section of @ref step_27 "step-27" for an
2334overview over these strategies, or the corresponding documentation
2335for a detailed description.
2336
2337There, another strategy is mentioned that has not been shown in any
2338tutorial so far: the strategy based on <i>refinement history</i>. The
2339usage of this method for parallel distributed applications is more
2340tricky than the others, so we will highlight the challenges that come
2341along with it. We need information about the final state of refinement
2342flags, and we need to transfer the solution across refined meshes. For
2343the former, we need to attach the hp::Refinement::predict_error()
2344function to the Triangulation::Signals::post_p4est_refinement signal in
2345a way that it will be called <i>after</i> the
2346hp::Refinement::limit_p_level_difference() function. At this stage, all
2347refinement flags and future FE indices are terminally set and a reliable
2348prediction of the error is possible. The predicted error then needs to
2349be transferred across refined meshes with the aid of
2350parallel::distributed::CellDataTransfer.
2351
2352Try implementing one of these strategies into this tutorial and observe
2353the subtle changes to the results. You will notice that all strategies
2354are capable of identifying the singularities near the reentrant corners
2355and will perform @f$h@f$-refinement in these regions, while preferring
2356@f$p@f$-refinement in the bulk domain. A detailed comparison of these
2357strategies is presented in @cite fehling2020 .
2358
2359
2360<a name="Solvewithmatrixbasedmethods"></a><h4>Solve with matrix-based methods</h4>
2361
2362
2363This tutorial focuses solely on matrix-free strategies. All hp-adaptive
2364algorithms however also work with matrix-based approaches in the
2365parallel distributed context.
2366
2367To create a system matrix, you can either use the
2368LaplaceOperator::get_system_matrix() function, or use an
2369<code>assemble_system()</code> function similar to the one of @ref step_27 "step-27".
2370You can then pass the system matrix to the solver as usual.
2371
2372You can time the results of both matrix-based and matrix-free
2373implementations, quantify the speed-up, and convince yourself which
2374variant is faster.
2375
2376
2377<a name="Multigridvariants"></a><h4>Multigrid variants</h4>
2378
2379
2380For sake of simplicity, we have restricted ourselves to a single type of
2381coarse-grid solver (CG with AMG), smoother (Chebyshev smoother with
2382point Jacobi preconditioner), and geometric-coarsening scheme (global
2383coarsening) within the multigrid algorithm. Feel free to try out
2384alternatives and investigate their performance and robustness.
2385 *
2386 *
2387<a name="PlainProg"></a>
2388<h1> The plain program</h1>
2389@include "step-75.cc"
2390*/
void reinit(const IndexSet &local_constraints=IndexSet())
void attach_dof_handler(const DoFHandlerType &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1085
void reinit(const Triangulation< dim, spacedim > &tria)
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
Definition: fe_q.h:549
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void resize(const unsigned int new_minlevel, const unsigned int new_maxlevel, Args &&... args)
void initialize(const MGLevelObject< MatrixType2 > &matrices, const typename PreconditionerType::AdditionalData &additional_data=typename PreconditionerType::AdditionalData())
const DoFHandler< dim > & get_dof_handler(const unsigned int dof_handler_index=0) const
void initialize_dof_vector(VectorType &vec, const unsigned int dof_handler_index=0) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
void reinit(const MappingType &mapping, const DoFHandler< dim > &dof_handler, const AffineConstraints< number2 > &constraint, const QuadratureType &quad, const AdditionalData &additional_data=AdditionalData())
Definition: point.h:111
void solve(const MatrixType &A, VectorType &x, const VectorType &b, const PreconditionerType &preconditioner)
@ wall_times
Definition: timer.h:653
@ summary
Definition: timer.h:609
void coarsen_global(const unsigned int times=1)
virtual void execute_coarsening_and_refinement()
Definition: vector.h:110
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
unsigned int size() const
Definition: collection.h:109
SmartPointer< const ::DoFHandler< dim, spacedim >, CellWeights > dof_handler
Definition: cell_weights.h:263
@ update_gradients
Shape function gradients.
Point< 2 > second
Definition: grid_out.cc:4588
Point< 2 > first
Definition: grid_out.cc:4587
unsigned int level
Definition: grid_out.cc:4590
__global__ void set(Number *val, const Number s, const size_type N)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
Definition: exceptions.h:1465
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:439
void initialize(const SparseMatrix &matrix, const AdditionalData &additional_data=AdditionalData())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition: event.cc:65
void extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:1210
std::array< double, dim > to_spherical(const Point< dim > &point)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void subdivided_hyper_L(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &bottom_left, const Point< dim > &top_right, const std::vector< int > &n_cells_to_remove)
void subdivided_hyper_rectangle(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2042
static const char L
@ matrix
Contents is actually a matrix.
static const char A
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
static const types::blas_int one
std::vector< std::shared_ptr< const Triangulation< dim, spacedim > > > create_geometric_coarsening_sequence(const Triangulation< dim, spacedim > &tria)
unsigned int create_next_polynomial_coarsening_degree(const unsigned int degree, const PolynomialCoarseningSequenceType &p_sequence)
std::vector< unsigned int > create_polynomial_coarsening_sequence(const unsigned int max_degree, const PolynomialCoarseningSequenceType &p_sequence)
void compute_diagonal(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, LinearAlgebra::distributed::Vector< Number > &diagonal_global, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &local_vmult, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0)
void compute_matrix(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, const AffineConstraints< Number > &constraints, MatrixType &matrix, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &local_vmult, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
FESeries::Legendre< dim, spacedim > default_fe_series(const hp::FECollection< dim, spacedim > &fe_collection, const unsigned int component=numbers::invalid_unsigned_int)
void coefficient_decay(FESeries::Legendre< dim, spacedim > &fe_legendre, const DoFHandler< dim, spacedim > &dof_handler, const VectorType &solution, Vector< float > &smoothness_indicators, const VectorTools::NormType regression_strategy=VectorTools::Linfty_norm, const double smallest_abs_coefficient=1e-10, const bool only_flagged_cells=false)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
T max(const T &t, const MPI_Comm &mpi_communicator)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:472
bool limit_p_level_difference(const ::DoFHandler< dim, spacedim > &dof_handler, const unsigned int max_difference=1, const unsigned int contains_fe_index=0)
Definition: refinement.cc:804
void choose_p_over_h(const ::DoFHandler< dim, spacedim > &dof_handler)
Definition: refinement.cc:690
void p_adaptivity_fixed_number(const ::DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< typename identity< Number >::type > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< typename identity< Number >::type > &compare_coarsen=std::less_equal< Number >())
Definition: refinement.cc:247
void predict_error(const ::DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &error_indicators, Vector< Number > &predicted_errors, const double gamma_p=std::sqrt(0.4), const double gamma_h=2., const double gamma_n=1.)
Definition: refinement.cc:549
Definition: hp.h:118
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:618
Definition: mg.h:82
const types::material_id invalid_material_id
Definition: types.h:228
const types::subdomain_id invalid_subdomain_id
Definition: types.h:276
static const unsigned int invalid_unsigned_int
Definition: types.h:196
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
double legendre(unsigned int l, double x)
Definition: cmath.h:75
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition: types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
UpdateFlags mapping_update_flags
Definition: matrix_free.h:368
boost::signals2::signal< void()> post_p4est_refinement
Definition: tria.h:2230
boost::signals2::signal< unsigned int(const cell_iterator &, const CellStatus), CellWeightSum< unsigned int > > cell_weight
Definition: tria.h:2208