Reference documentation for deal.II version GIT relicensing-224-gc660c0d696 2024-03-28 18:40:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-50.h
Go to the documentation of this file.
1,
481 *   const unsigned int /*component*/ = 0) const override
482 *   {
483 *   return 1.0;
484 *   }
485 *  
486 *  
487 *   template <typename number>
489 *   value(const Point<dim, VectorizedArray<number>> & /*p*/,
490 *   const unsigned int /*component*/ = 0) const
491 *   {
492 *   return VectorizedArray<number>(1.0);
493 *   }
494 *   };
495 *  
496 *  
497 * @endcode
498 *
499 * This next class represents the diffusion coefficient. We use a variable
500 * coefficient which is 100.0 at any point where at least one coordinate is
501 * less than -0.5, and 1.0 at all other points. As above, a separate value()
502 * returning a VectorizedArray is used for the matrix-free code. An @p
503 * average() function computes the arithmetic average for a set of points.
504 *
505 * @code
506 *   template <int dim>
507 *   class Coefficient : public Function<dim>
508 *   {
509 *   public:
510 *   virtual double value(const Point<dim> &p,
511 *   const unsigned int /*component*/ = 0) const override;
512 *  
513 *   template <typename number>
515 *   const unsigned int /*component*/ = 0) const;
516 *  
517 *   template <typename number>
518 *   number average_value(const std::vector<Point<dim, number>> &points) const;
519 *  
520 * @endcode
521 *
522 * When using a coefficient in the MatrixFree framework, we also
523 * need a function that creates a Table of coefficient values for a
524 * set of cells provided by the MatrixFree operator argument here.
525 *
526 * @code
527 *   template <typename number>
528 *   std::shared_ptr<Table<2, VectorizedArray<number>>> make_coefficient_table(
529 *   const MatrixFree<dim, number, VectorizedArray<number>> &mf_storage) const;
530 *   };
531 *  
532 *  
533 *  
534 *   template <int dim>
535 *   double Coefficient<dim>::value(const Point<dim> &p, const unsigned int) const
536 *   {
537 *   for (int d = 0; d < dim; ++d)
538 *   {
539 *   if (p[d] < -0.5)
540 *   return 100.0;
541 *   }
542 *   return 1.0;
543 *   }
544 *  
545 *  
546 *  
547 *   template <int dim>
548 *   template <typename number>
550 *   Coefficient<dim>::value(const Point<dim, VectorizedArray<number>> &p,
551 *   const unsigned int) const
552 *   {
553 *   VectorizedArray<number> return_value = VectorizedArray<number>(1.0);
554 *   for (unsigned int i = 0; i < VectorizedArray<number>::size(); ++i)
555 *   {
556 *   for (int d = 0; d < dim; ++d)
557 *   if (p[d][i] < -0.5)
558 *   {
559 *   return_value[i] = 100.0;
560 *   break;
561 *   }
562 *   }
563 *  
564 *   return return_value;
565 *   }
566 *  
567 *  
568 *  
569 *   template <int dim>
570 *   template <typename number>
571 *   number Coefficient<dim>::average_value(
572 *   const std::vector<Point<dim, number>> &points) const
573 *   {
574 *   number average(0);
575 *   for (unsigned int i = 0; i < points.size(); ++i)
576 *   average += value(points[i]);
577 *   average /= points.size();
578 *  
579 *   return average;
580 *   }
581 *  
582 *  
583 *  
584 *   template <int dim>
585 *   template <typename number>
586 *   std::shared_ptr<Table<2, VectorizedArray<number>>>
587 *   Coefficient<dim>::make_coefficient_table(
588 *   const MatrixFree<dim, number, VectorizedArray<number>> &mf_storage) const
589 *   {
590 *   auto coefficient_table =
591 *   std::make_shared<Table<2, VectorizedArray<number>>>();
592 *  
593 *   FEEvaluation<dim, -1, 0, 1, number> fe_eval(mf_storage);
594 *  
595 *   const unsigned int n_cells = mf_storage.n_cell_batches();
596 *  
597 *   coefficient_table->reinit(n_cells, 1);
598 *  
599 *   for (unsigned int cell = 0; cell < n_cells; ++cell)
600 *   {
601 *   fe_eval.reinit(cell);
602 *  
603 *   VectorizedArray<number> average_value = 0.;
604 *   for (const unsigned int q : fe_eval.quadrature_point_indices())
605 *   average_value += value(fe_eval.quadrature_point(q));
606 *   average_value /= fe_eval.n_q_points;
607 *  
608 *   (*coefficient_table)(cell, 0) = average_value;
609 *   }
610 *  
611 *   return coefficient_table;
612 *   }
613 *  
614 *  
615 *  
616 * @endcode
617 *
618 *
619 * <a name="step_50-Runtimeparameters"></a>
620 * <h3>Run time parameters</h3>
621 *
622
623 *
624 * We will use ParameterHandler to pass in parameters at runtime. The
625 * structure @p Settings parses and stores these parameters to be queried
626 * throughout the program.
627 *
628 * @code
629 *   struct Settings
630 *   {
631 *   bool try_parse(const std::string &prm_filename);
632 *  
633 *   enum SolverType
634 *   {
635 *   gmg_mb,
636 *   gmg_mf,
637 *   amg
638 *   };
639 *  
640 *   SolverType solver;
641 *  
642 *   int dimension;
643 *   double smoother_dampen;
644 *   unsigned int smoother_steps;
645 *   unsigned int n_steps;
646 *   bool output;
647 *   };
648 *  
649 *  
650 *  
651 *   bool Settings::try_parse(const std::string &prm_filename)
652 *   {
653 *   ParameterHandler prm;
654 *   prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension.");
655 *   prm.declare_entry("n_steps",
656 *   "10",
657 *   Patterns::Integer(0),
658 *   "Number of adaptive refinement steps.");
659 *   prm.declare_entry("smoother dampen",
660 *   "1.0",
661 *   Patterns::Double(0.0),
662 *   "Dampen factor for the smoother.");
663 *   prm.declare_entry("smoother steps",
664 *   "1",
665 *   Patterns::Integer(1),
666 *   "Number of smoother steps.");
667 *   prm.declare_entry("solver",
668 *   "MF",
669 *   Patterns::Selection("MF|MB|AMG"),
670 *   "Switch between matrix-free GMG, "
671 *   "matrix-based GMG, and AMG.");
672 *   prm.declare_entry("output",
673 *   "false",
674 *   Patterns::Bool(),
675 *   "Output graphical results.");
676 *  
677 *   if (prm_filename.empty())
678 *   {
679 *   std::cout << "**** Error: No input file provided!\n"
680 *   << "**** Error: Call this program as './step-50 input.prm\n"
681 *   << '\n'
682 *   << "**** You may want to use one of the input files in this\n"
683 *   << "**** directory, or use the following default values\n"
684 *   << "**** to create an input file:\n";
685 *   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
686 *   prm.print_parameters(std::cout, ParameterHandler::Text);
687 *   return false;
688 *   }
689 *  
690 *   try
691 *   {
692 *   prm.parse_input(prm_filename);
693 *   }
694 *   catch (std::exception &e)
695 *   {
696 *   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
697 *   std::cerr << e.what() << std::endl;
698 *   return false;
699 *   }
700 *  
701 *   if (prm.get("solver") == "MF")
702 *   this->solver = gmg_mf;
703 *   else if (prm.get("solver") == "MB")
704 *   this->solver = gmg_mb;
705 *   else if (prm.get("solver") == "AMG")
706 *   this->solver = amg;
707 *   else
708 *   AssertThrow(false, ExcNotImplemented());
709 *  
710 *   this->dimension = prm.get_integer("dim");
711 *   this->n_steps = prm.get_integer("n_steps");
712 *   this->smoother_dampen = prm.get_double("smoother dampen");
713 *   this->smoother_steps = prm.get_integer("smoother steps");
714 *   this->output = prm.get_bool("output");
715 *  
716 *   return true;
717 *   }
718 *  
719 *  
720 *  
721 * @endcode
722 *
723 *
724 * <a name="step_50-LaplaceProblemclass"></a>
725 * <h3>LaplaceProblem class</h3>
726 *
727
728 *
729 * This is the main class of the program. It looks very similar to
730 * @ref step_16 "step-16", @ref step_37 "step-37", and @ref step_40 "step-40". For the MatrixFree setup, we use the
731 * MatrixFreeOperators::LaplaceOperator class which defines `local_apply()`,
732 * `compute_diagonal()`, and `set_coefficient()` functions internally. Note that
733 * the polynomial degree is a template parameter of this class. This is
734 * necessary for the matrix-free code.
735 *
736 * @code
737 *   template <int dim, int degree>
738 *   class LaplaceProblem
739 *   {
740 *   public:
741 *   LaplaceProblem(const Settings &settings);
742 *   void run();
743 *  
744 *   private:
745 * @endcode
746 *
747 * We will use the following types throughout the program. First the
748 * matrix-based types, after that the matrix-free classes. For the
749 * matrix-free implementation, we use @p float for the level operators.
750 *
751 * @code
752 *   using MatrixType = LA::MPI::SparseMatrix;
753 *   using VectorType = LA::MPI::Vector;
754 *   using PreconditionAMG = LA::MPI::PreconditionAMG;
755 *  
756 *   using MatrixFreeLevelMatrix = MatrixFreeOperators::LaplaceOperator<
757 *   dim,
758 *   degree,
759 *   degree + 1,
760 *   1,
762 *   using MatrixFreeActiveMatrix = MatrixFreeOperators::LaplaceOperator<
763 *   dim,
764 *   degree,
765 *   degree + 1,
766 *   1,
768 *  
769 *   using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector<float>;
770 *   using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector<double>;
771 *  
772 *   void setup_system();
773 *   void setup_multigrid();
774 *   void assemble_system();
775 *   void assemble_multigrid();
776 *   void assemble_rhs();
777 *   void solve();
778 *   void estimate();
779 *   void refine_grid();
780 *   void output_results(const unsigned int cycle);
781 *  
782 *   Settings settings;
783 *  
784 *   MPI_Comm mpi_communicator;
785 *   ConditionalOStream pcout;
786 *  
788 *   const MappingQ1<dim> mapping;
789 *   FE_Q<dim> fe;
790 *  
791 *   DoFHandler<dim> dof_handler;
792 *  
793 *   IndexSet locally_owned_dofs;
794 *   IndexSet locally_relevant_dofs;
795 *   AffineConstraints<double> constraints;
796 *  
797 *   MatrixType system_matrix;
798 *   MatrixFreeActiveMatrix mf_system_matrix;
799 *   VectorType solution;
800 *   VectorType right_hand_side;
801 *   Vector<double> estimated_error_square_per_cell;
802 *  
803 *   MGLevelObject<MatrixType> mg_matrix;
804 *   MGLevelObject<MatrixType> mg_interface_in;
805 *   MGConstrainedDoFs mg_constrained_dofs;
806 *  
807 *   MGLevelObject<MatrixFreeLevelMatrix> mf_mg_matrix;
808 *  
809 *   TimerOutput computing_timer;
810 *   };
811 *  
812 *  
813 * @endcode
814 *
815 * The only interesting part about the constructor is that we construct the
816 * multigrid hierarchy unless we use AMG. For that, we need to parse the
817 * run time parameters before this constructor completes.
818 *
819 * @code
820 *   template <int dim, int degree>
821 *   LaplaceProblem<dim, degree>::LaplaceProblem(const Settings &settings)
822 *   : settings(settings)
823 *   , mpi_communicator(MPI_COMM_WORLD)
824 *   , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
825 *   , triangulation(mpi_communicator,
827 *   (settings.solver == Settings::amg) ?
829 *   parallel::distributed::Triangulation<
831 *   , mapping()
832 *   , fe(degree)
833 *   , dof_handler(triangulation)
834 *   , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times)
835 *   {
836 *   GridGenerator::hyper_L(triangulation, -1., 1., /*colorize*/ false);
837 *   triangulation.refine_global(1);
838 *   }
839 *  
840 *  
841 *  
842 * @endcode
843 *
844 *
845 * <a name="step_50-LaplaceProblemsetup_system"></a>
846 * <h4>LaplaceProblem::setup_system()</h4>
847 *
848
849 *
850 * Unlike @ref step_16 "step-16" and @ref step_37 "step-37", we split the set up into two parts,
851 * setup_system() and setup_multigrid(). Here is the typical setup_system()
852 * function for the active mesh found in most tutorials. For matrix-free, the
853 * active mesh set up is similar to @ref step_37 "step-37"; for matrix-based (GMG and AMG
854 * solvers), the setup is similar to @ref step_40 "step-40".
855 *
856 * @code
857 *   template <int dim, int degree>
858 *   void LaplaceProblem<dim, degree>::setup_system()
859 *   {
860 *   TimerOutput::Scope timing(computing_timer, "Setup");
861 *  
862 *   dof_handler.distribute_dofs(fe);
863 *  
864 *   locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler);
865 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
866 *  
867 *   solution.reinit(locally_owned_dofs, mpi_communicator);
868 *   right_hand_side.reinit(locally_owned_dofs, mpi_communicator);
869 *   constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
870 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
871 *  
873 *   mapping, dof_handler, 0, Functions::ZeroFunction<dim>(), constraints);
874 *   constraints.close();
875 *  
876 *   switch (settings.solver)
877 *   {
878 *   case Settings::gmg_mf:
879 *   {
880 *   typename MatrixFree<dim, double>::AdditionalData additional_data;
881 *   additional_data.tasks_parallel_scheme =
883 *   additional_data.mapping_update_flags =
885 *   std::shared_ptr<MatrixFree<dim, double>> mf_storage =
886 *   std::make_shared<MatrixFree<dim, double>>();
887 *   mf_storage->reinit(mapping,
888 *   dof_handler,
889 *   constraints,
890 *   QGauss<1>(degree + 1),
891 *   additional_data);
892 *  
893 *   mf_system_matrix.initialize(mf_storage);
894 *  
895 *   const Coefficient<dim> coefficient;
896 *   mf_system_matrix.set_coefficient(
897 *   coefficient.make_coefficient_table(*mf_storage));
898 *  
899 *   break;
900 *   }
901 *  
902 *   case Settings::gmg_mb:
903 *   case Settings::amg:
904 *   {
905 *   #ifdef USE_PETSC_LA
906 *   DynamicSparsityPattern dsp(locally_relevant_dofs);
907 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
908 *  
910 *   locally_owned_dofs,
911 *   mpi_communicator,
912 *   locally_relevant_dofs);
913 *  
914 *   system_matrix.reinit(locally_owned_dofs,
915 *   locally_owned_dofs,
916 *   dsp,
917 *   mpi_communicator);
918 *   #else
919 *   TrilinosWrappers::SparsityPattern dsp(locally_owned_dofs,
920 *   locally_owned_dofs,
921 *   locally_relevant_dofs,
922 *   mpi_communicator);
923 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
924 *   dsp.compress();
925 *   system_matrix.reinit(dsp);
926 *   #endif
927 *  
928 *   break;
929 *   }
930 *  
931 *   default:
933 *   }
934 *   }
935 *  
936 * @endcode
937 *
938 *
939 * <a name="step_50-LaplaceProblemsetup_multigrid"></a>
940 * <h4>LaplaceProblem::setup_multigrid()</h4>
941 *
942
943 *
944 * This function does the multilevel setup for both matrix-free and
945 * matrix-based GMG. The matrix-free setup is similar to that of @ref step_37 "step-37", and
946 * the matrix-based is similar to @ref step_16 "step-16", except we must use appropriate
947 * distributed sparsity patterns.
948 *
949
950 *
951 * The function is not called for the AMG approach, but to err on the
952 * safe side, the main `switch` statement of this function
953 * nevertheless makes sure that the function only operates on known
954 * multigrid settings by throwing an assertion if the function were
955 * called for anything other than the two geometric multigrid methods.
956 *
957 * @code
958 *   template <int dim, int degree>
959 *   void LaplaceProblem<dim, degree>::setup_multigrid()
960 *   {
961 *   TimerOutput::Scope timing(computing_timer, "Setup multigrid");
962 *  
963 *   dof_handler.distribute_mg_dofs();
964 *  
965 *   mg_constrained_dofs.clear();
966 *   mg_constrained_dofs.initialize(dof_handler);
967 *  
968 *   const std::set<types::boundary_id> boundary_ids = {types::boundary_id(0)};
969 *   mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, boundary_ids);
970 *  
971 *   const unsigned int n_levels = triangulation.n_global_levels();
972 *  
973 *   switch (settings.solver)
974 *   {
975 *   case Settings::gmg_mf:
976 *   {
977 *   mf_mg_matrix.resize(0, n_levels - 1);
978 *  
979 *   for (unsigned int level = 0; level < n_levels; ++level)
980 *   {
981 *   AffineConstraints<double> level_constraints(
982 *   dof_handler.locally_owned_mg_dofs(level),
984 *   level));
985 *   for (const types::global_dof_index dof_index :
986 *   mg_constrained_dofs.get_boundary_indices(level))
987 *   level_constraints.constrain_dof_to_zero(dof_index);
988 *   level_constraints.close();
989 *  
990 *   typename MatrixFree<dim, float>::AdditionalData additional_data;
991 *   additional_data.tasks_parallel_scheme =
993 *   additional_data.mapping_update_flags =
996 *   additional_data.mg_level = level;
997 *   std::shared_ptr<MatrixFree<dim, float>> mf_storage_level(
998 *   new MatrixFree<dim, float>());
999 *   mf_storage_level->reinit(mapping,
1000 *   dof_handler,
1001 *   level_constraints,
1002 *   QGauss<1>(degree + 1),
1003 *   additional_data);
1004 *  
1005 *   mf_mg_matrix[level].initialize(mf_storage_level,
1006 *   mg_constrained_dofs,
1007 *   level);
1008 *  
1009 *   const Coefficient<dim> coefficient;
1010 *   mf_mg_matrix[level].set_coefficient(
1011 *   coefficient.make_coefficient_table(*mf_storage_level));
1012 *  
1013 *   mf_mg_matrix[level].compute_diagonal();
1014 *   }
1015 *  
1016 *   break;
1017 *   }
1018 *  
1019 *   case Settings::gmg_mb:
1020 *   {
1021 *   mg_matrix.resize(0, n_levels - 1);
1022 *   mg_matrix.clear_elements();
1023 *   mg_interface_in.resize(0, n_levels - 1);
1024 *   mg_interface_in.clear_elements();
1025 *  
1026 *   for (unsigned int level = 0; level < n_levels; ++level)
1027 *   {
1028 *   const IndexSet dof_set =
1030 *   level);
1031 *  
1032 *   {
1033 *   #ifdef USE_PETSC_LA
1034 *   DynamicSparsityPattern dsp(dof_set);
1035 *   MGTools::make_sparsity_pattern(dof_handler, dsp, level);
1036 *   dsp.compress();
1038 *   dsp,
1039 *   dof_handler.locally_owned_mg_dofs(level),
1040 *   mpi_communicator,
1041 *   dof_set);
1042 *  
1043 *   mg_matrix[level].reinit(
1044 *   dof_handler.locally_owned_mg_dofs(level),
1045 *   dof_handler.locally_owned_mg_dofs(level),
1046 *   dsp,
1047 *   mpi_communicator);
1048 *   #else
1050 *   dof_handler.locally_owned_mg_dofs(level),
1051 *   dof_handler.locally_owned_mg_dofs(level),
1052 *   dof_set,
1053 *   mpi_communicator);
1054 *   MGTools::make_sparsity_pattern(dof_handler, dsp, level);
1055 *  
1056 *   dsp.compress();
1057 *   mg_matrix[level].reinit(dsp);
1058 *   #endif
1059 *   }
1060 *  
1061 *   {
1062 *   #ifdef USE_PETSC_LA
1063 *   DynamicSparsityPattern dsp(dof_set);
1065 *   mg_constrained_dofs,
1066 *   dsp,
1067 *   level);
1068 *   dsp.compress();
1070 *   dsp,
1071 *   dof_handler.locally_owned_mg_dofs(level),
1072 *   mpi_communicator,
1073 *   dof_set);
1074 *  
1075 *   mg_interface_in[level].reinit(
1076 *   dof_handler.locally_owned_mg_dofs(level),
1077 *   dof_handler.locally_owned_mg_dofs(level),
1078 *   dsp,
1079 *   mpi_communicator);
1080 *   #else
1082 *   dof_handler.locally_owned_mg_dofs(level),
1083 *   dof_handler.locally_owned_mg_dofs(level),
1084 *   dof_set,
1085 *   mpi_communicator);
1086 *  
1088 *   mg_constrained_dofs,
1089 *   dsp,
1090 *   level);
1091 *   dsp.compress();
1092 *   mg_interface_in[level].reinit(dsp);
1093 *   #endif
1094 *   }
1095 *   }
1096 *   break;
1097 *   }
1098 *  
1099 *   default:
1101 *   }
1102 *   }
1103 *  
1104 *  
1105 * @endcode
1106 *
1107 *
1108 * <a name="step_50-LaplaceProblemassemble_system"></a>
1109 * <h4>LaplaceProblem::assemble_system()</h4>
1110 *
1111
1112 *
1113 * The assembly is split into three parts: `assemble_system()`,
1114 * `assemble_multigrid()`, and `assemble_rhs()`. The
1115 * `assemble_system()` function here assembles and stores the (global)
1116 * system matrix and the right-hand side for the matrix-based
1117 * methods. It is similar to the assembly in @ref step_40 "step-40".
1118 *
1119
1120 *
1121 * Note that the matrix-free method does not execute this function as it does
1122 * not need to assemble a matrix, and it will instead assemble the right-hand
1123 * side in assemble_rhs().
1124 *
1125 * @code
1126 *   template <int dim, int degree>
1127 *   void LaplaceProblem<dim, degree>::assemble_system()
1128 *   {
1129 *   TimerOutput::Scope timing(computing_timer, "Assemble");
1130 *  
1131 *   const QGauss<dim> quadrature_formula(degree + 1);
1132 *  
1133 *   FEValues<dim> fe_values(fe,
1134 *   quadrature_formula,
1137 *  
1138 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1139 *   const unsigned int n_q_points = quadrature_formula.size();
1140 *  
1141 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1142 *   Vector<double> cell_rhs(dofs_per_cell);
1143 *  
1144 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1145 *  
1146 *   const Coefficient<dim> coefficient;
1147 *   RightHandSide<dim> rhs;
1148 *   std::vector<double> rhs_values(n_q_points);
1149 *  
1150 *   for (const auto &cell : dof_handler.active_cell_iterators())
1151 *   if (cell->is_locally_owned())
1152 *   {
1153 *   cell_matrix = 0;
1154 *   cell_rhs = 0;
1155 *  
1156 *   fe_values.reinit(cell);
1157 *  
1158 *   const double coefficient_value =
1159 *   coefficient.average_value(fe_values.get_quadrature_points());
1160 *   rhs.value_list(fe_values.get_quadrature_points(), rhs_values);
1161 *  
1162 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1163 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1164 *   {
1165 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1166 *   cell_matrix(i, j) +=
1167 *   coefficient_value * // epsilon(x)
1168 *   fe_values.shape_grad(i, q_point) * // * grad phi_i(x)
1169 *   fe_values.shape_grad(j, q_point) * // * grad phi_j(x)
1170 *   fe_values.JxW(q_point); // * dx
1171 *  
1172 *   cell_rhs(i) +=
1173 *   fe_values.shape_value(i, q_point) * // grad phi_i(x)
1174 *   rhs_values[q_point] * // * f(x)
1175 *   fe_values.JxW(q_point); // * dx
1176 *   }
1177 *  
1178 *   cell->get_dof_indices(local_dof_indices);
1179 *   constraints.distribute_local_to_global(cell_matrix,
1180 *   cell_rhs,
1181 *   local_dof_indices,
1182 *   system_matrix,
1183 *   right_hand_side);
1184 *   }
1185 *  
1186 *   system_matrix.compress(VectorOperation::add);
1187 *   right_hand_side.compress(VectorOperation::add);
1188 *   }
1189 *  
1190 *  
1191 * @endcode
1192 *
1193 *
1194 * <a name="step_50-LaplaceProblemassemble_multigrid"></a>
1195 * <h4>LaplaceProblem::assemble_multigrid()</h4>
1196 *
1197
1198 *
1199 * The following function assembles and stores the multilevel matrices for the
1200 * matrix-based GMG method. This function is similar to the one found in
1201 * @ref step_16 "step-16", only here it works for distributed meshes. This difference amounts
1202 * to adding a condition that we only assemble on locally owned level cells and
1203 * a call to compress() for each matrix that is built.
1204 *
1205 * @code
1206 *   template <int dim, int degree>
1207 *   void LaplaceProblem<dim, degree>::assemble_multigrid()
1208 *   {
1209 *   TimerOutput::Scope timing(computing_timer, "Assemble multigrid");
1210 *  
1211 *   QGauss<dim> quadrature_formula(degree + 1);
1212 *  
1213 *   FEValues<dim> fe_values(fe,
1214 *   quadrature_formula,
1217 *  
1218 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1219 *   const unsigned int n_q_points = quadrature_formula.size();
1220 *  
1221 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1222 *  
1223 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1224 *  
1225 *   const Coefficient<dim> coefficient;
1226 *  
1227 *   std::vector<AffineConstraints<double>> boundary_constraints(
1228 *   triangulation.n_global_levels());
1229 *   for (unsigned int level = 0; level < triangulation.n_global_levels(); ++level)
1230 *   {
1231 *   boundary_constraints[level].reinit(
1232 *   dof_handler.locally_owned_mg_dofs(level),
1234 *  
1235 *   for (const types::global_dof_index dof_index :
1236 *   mg_constrained_dofs.get_refinement_edge_indices(level))
1237 *   boundary_constraints[level].constrain_dof_to_zero(dof_index);
1238 *   for (const types::global_dof_index dof_index :
1239 *   mg_constrained_dofs.get_boundary_indices(level))
1240 *   boundary_constraints[level].constrain_dof_to_zero(dof_index);
1241 *   boundary_constraints[level].close();
1242 *   }
1243 *  
1244 *   for (const auto &cell : dof_handler.cell_iterators())
1245 *   if (cell->level_subdomain_id() == triangulation.locally_owned_subdomain())
1246 *   {
1247 *   cell_matrix = 0;
1248 *   fe_values.reinit(cell);
1249 *  
1250 *   const double coefficient_value =
1251 *   coefficient.average_value(fe_values.get_quadrature_points());
1252 *  
1253 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1254 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1255 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1256 *   cell_matrix(i, j) +=
1257 *   coefficient_value * fe_values.shape_grad(i, q_point) *
1258 *   fe_values.shape_grad(j, q_point) * fe_values.JxW(q_point);
1259 *  
1260 *   cell->get_mg_dof_indices(local_dof_indices);
1261 *  
1262 *   boundary_constraints[cell->level()].distribute_local_to_global(
1263 *   cell_matrix, local_dof_indices, mg_matrix[cell->level()]);
1264 *  
1265 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1266 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1267 *   if (mg_constrained_dofs.is_interface_matrix_entry(
1268 *   cell->level(), local_dof_indices[i], local_dof_indices[j]))
1269 *   mg_interface_in[cell->level()].add(local_dof_indices[i],
1270 *   local_dof_indices[j],
1271 *   cell_matrix(i, j));
1272 *   }
1273 *  
1274 *   for (unsigned int i = 0; i < triangulation.n_global_levels(); ++i)
1275 *   {
1276 *   mg_matrix[i].compress(VectorOperation::add);
1277 *   mg_interface_in[i].compress(VectorOperation::add);
1278 *   }
1279 *   }
1280 *  
1281 *  
1282 *  
1283 * @endcode
1284 *
1285 *
1286 * <a name="step_50-LaplaceProblemassemble_rhs"></a>
1287 * <h4>LaplaceProblem::assemble_rhs()</h4>
1288 *
1289
1290 *
1291 * The final function in this triptych assembles the right-hand side
1292 * vector for the matrix-free method -- because in the matrix-free
1293 * framework, we don't have to assemble the matrix and can get away
1294 * with only assembling the right hand side. We could do this by extracting the
1295 * code from the `assemble_system()` function above that deals with the right
1296 * hand side, but we decide instead to go all in on the matrix-free approach and
1297 * do the assembly using that way as well.
1298 *
1299
1300 *
1301 * The result is a function that is similar
1302 * to the one found in the "Use FEEvaluation::read_dof_values_plain()
1303 * to avoid resolving constraints" subsection in the "Possibilities
1304 * for extensions" section of @ref step_37 "step-37".
1305 *
1306
1307 *
1308 * The reason for this function is that the MatrixFree operators do not take
1309 * into account non-homogeneous Dirichlet constraints, instead treating all
1310 * Dirichlet constraints as homogeneous. To account for this, the right-hand
1311 * side here is assembled as the residual @f$r_0 = f-Au_0@f$, where @f$u_0@f$ is a
1312 * zero vector except in the Dirichlet values. Then when solving, we have that
1313 * the solution is @f$u = u_0 + A^{-1}r_0@f$. This can be seen as a Newton
1314 * iteration on a linear system with initial guess @f$u_0@f$. The CG solve in the
1315 * `solve()` function below computes @f$A^{-1}r_0@f$ and the call to
1316 * `constraints.distribute()` (which directly follows) adds the @f$u_0@f$.
1317 *
1318
1319 *
1320 * Obviously, since we are considering a problem with zero Dirichlet boundary,
1321 * we could have taken a similar approach to @ref step_37 "step-37" `assemble_rhs()`, but this
1322 * additional work allows us to change the problem declaration if we so
1323 * choose.
1324 *
1325
1326 *
1327 * This function has two parts in the integration loop: applying the negative
1328 * of matrix @f$A@f$ to @f$u_0@f$ by submitting the negative of the gradient, and adding
1329 * the right-hand side contribution by submitting the value @f$f@f$. We must be sure
1330 * to use `read_dof_values_plain()` for evaluating @f$u_0@f$ as `read_dof_values()`
1331 * would set all Dirichlet values to zero.
1332 *
1333
1334 *
1335 * Finally, the system_rhs vector is of type LA::MPI::Vector, but the
1336 * MatrixFree class only work for
1337 * LinearAlgebra::distributed::Vector. Therefore we must
1338 * compute the right-hand side using MatrixFree functionality and then
1339 * use the functions in the `ChangeVectorType` namespace to copy it to
1340 * the correct type.
1341 *
1342 * @code
1343 *   template <int dim, int degree>
1344 *   void LaplaceProblem<dim, degree>::assemble_rhs()
1345 *   {
1346 *   TimerOutput::Scope timing(computing_timer, "Assemble right-hand side");
1347 *  
1348 *   MatrixFreeActiveVector solution_copy;
1349 *   MatrixFreeActiveVector right_hand_side_copy;
1350 *   mf_system_matrix.initialize_dof_vector(solution_copy);
1351 *   mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1352 *  
1353 *   solution_copy = 0.;
1354 *   constraints.distribute(solution_copy);
1355 *   solution_copy.update_ghost_values();
1356 *   right_hand_side_copy = 0;
1357 *   const Table<2, VectorizedArray<double>> &coefficient =
1358 *   *(mf_system_matrix.get_coefficient());
1359 *  
1360 *   RightHandSide<dim> right_hand_side_function;
1361 *  
1362 *   FEEvaluation<dim, degree, degree + 1, 1, double> phi(
1363 *   *mf_system_matrix.get_matrix_free());
1364 *  
1365 *   for (unsigned int cell = 0;
1366 *   cell < mf_system_matrix.get_matrix_free()->n_cell_batches();
1367 *   ++cell)
1368 *   {
1369 *   phi.reinit(cell);
1370 *   phi.read_dof_values_plain(solution_copy);
1371 *   phi.evaluate(EvaluationFlags::gradients);
1372 *  
1373 *   for (const unsigned int q : phi.quadrature_point_indices())
1374 *   {
1375 *   phi.submit_gradient(-1.0 *
1376 *   (coefficient(cell, 0) * phi.get_gradient(q)),
1377 *   q);
1378 *   phi.submit_value(
1379 *   right_hand_side_function.value(phi.quadrature_point(q)), q);
1380 *   }
1381 *  
1382 *   phi.integrate_scatter(EvaluationFlags::values |
1383 *   EvaluationFlags::gradients,
1384 *   right_hand_side_copy);
1385 *   }
1386 *  
1387 *   right_hand_side_copy.compress(VectorOperation::add);
1388 *  
1389 *   ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy);
1390 *   }
1391 *  
1392 *  
1393 *  
1394 * @endcode
1395 *
1396 *
1397 * <a name="step_50-LaplaceProblemsolve"></a>
1398 * <h4>LaplaceProblem::solve()</h4>
1399 *
1400
1401 *
1402 * Here we set up the multigrid preconditioner, test the timing of a single
1403 * V-cycle, and solve the linear system. Unsurprisingly, this is one of the
1404 * places where the three methods differ the most.
1405 *
1406 * @code
1407 *   template <int dim, int degree>
1408 *   void LaplaceProblem<dim, degree>::solve()
1409 *   {
1410 *   TimerOutput::Scope timing(computing_timer, "Solve");
1411 *  
1412 *   SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm());
1413 *   solver_control.enable_history_data();
1414 *  
1415 *   solution = 0.;
1416 *  
1417 * @endcode
1418 *
1419 * The solver for the matrix-free GMG method is similar to @ref step_37 "step-37", apart
1420 * from adding some interface matrices in complete analogy to @ref step_16 "step-16".
1421 *
1422 * @code
1423 *   switch (settings.solver)
1424 *   {
1425 *   case Settings::gmg_mf:
1426 *   {
1427 *   computing_timer.enter_subsection("Solve: Preconditioner setup");
1428 *  
1429 *   MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
1430 *   mg_transfer.build(dof_handler);
1431 *  
1432 *   SolverControl coarse_solver_control(1000, 1e-12, false, false);
1433 *   SolverCG<MatrixFreeLevelVector> coarse_solver(coarse_solver_control);
1434 *   PreconditionIdentity identity;
1435 *   MGCoarseGridIterativeSolver<MatrixFreeLevelVector,
1436 *   SolverCG<MatrixFreeLevelVector>,
1437 *   MatrixFreeLevelMatrix,
1438 *   PreconditionIdentity>
1439 *   coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity);
1440 *  
1441 *   using Smoother = PreconditionJacobi<MatrixFreeLevelMatrix>;
1442 *   MGSmootherPrecondition<MatrixFreeLevelMatrix,
1443 *   Smoother,
1444 *   MatrixFreeLevelVector>
1445 *   smoother;
1446 *   smoother.initialize(mf_mg_matrix,
1447 *   typename Smoother::AdditionalData(
1448 *   settings.smoother_dampen));
1449 *   smoother.set_steps(settings.smoother_steps);
1450 *  
1451 *   mg::Matrix<MatrixFreeLevelVector> mg_m(mf_mg_matrix);
1452 *  
1453 *   MGLevelObject<
1454 *   MatrixFreeOperators::MGInterfaceOperator<MatrixFreeLevelMatrix>>
1455 *   mg_interface_matrices;
1456 *   mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1);
1457 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
1458 *   ++level)
1459 *   mg_interface_matrices[level].initialize(mf_mg_matrix[level]);
1460 *   mg::Matrix<MatrixFreeLevelVector> mg_interface(mg_interface_matrices);
1461 *  
1462 *   Multigrid<MatrixFreeLevelVector> mg(
1463 *   mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1464 *   mg.set_edge_matrices(mg_interface, mg_interface);
1465 *  
1466 *   PreconditionMG<dim,
1467 *   MatrixFreeLevelVector,
1468 *   MGTransferMatrixFree<dim, float>>
1469 *   preconditioner(dof_handler, mg, mg_transfer);
1470 *  
1471 * @endcode
1472 *
1473 * Copy the solution vector and right-hand side from LA::MPI::Vector
1474 * to LinearAlgebra::distributed::Vector so that we can solve.
1475 *
1476 * @code
1477 *   MatrixFreeActiveVector solution_copy;
1478 *   MatrixFreeActiveVector right_hand_side_copy;
1479 *   mf_system_matrix.initialize_dof_vector(solution_copy);
1480 *   mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1481 *  
1482 *   ChangeVectorTypes::copy(solution_copy, solution);
1483 *   ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side);
1484 *   computing_timer.leave_subsection("Solve: Preconditioner setup");
1485 *  
1486 * @endcode
1487 *
1488 * Timing for 1 V-cycle.
1489 *
1490 * @code
1491 *   {
1492 *   TimerOutput::Scope timing(computing_timer,
1493 *   "Solve: 1 multigrid V-cycle");
1494 *   preconditioner.vmult(solution_copy, right_hand_side_copy);
1495 *   }
1496 *   solution_copy = 0.;
1497 *  
1498 * @endcode
1499 *
1500 * Solve the linear system, update the ghost values of the solution,
1501 * copy back to LA::MPI::Vector and distribute constraints.
1502 *
1503 * @code
1504 *   {
1505 *   SolverCG<MatrixFreeActiveVector> solver(solver_control);
1506 *  
1507 *   TimerOutput::Scope timing(computing_timer, "Solve: CG");
1508 *   solver.solve(mf_system_matrix,
1509 *   solution_copy,
1510 *   right_hand_side_copy,
1511 *   preconditioner);
1512 *   }
1513 *  
1514 *   solution_copy.update_ghost_values();
1515 *   ChangeVectorTypes::copy(solution, solution_copy);
1516 *   constraints.distribute(solution);
1517 *  
1518 *   break;
1519 *   }
1520 *  
1521 * @endcode
1522 *
1523 * Solver for the matrix-based GMG method, similar to @ref step_16 "step-16", only
1524 * using a Jacobi smoother instead of a SOR smoother (which is not
1525 * implemented in parallel).
1526 *
1527 * @code
1528 *   case Settings::gmg_mb:
1529 *   {
1530 *   computing_timer.enter_subsection("Solve: Preconditioner setup");
1531 *  
1532 *   MGTransferPrebuilt<VectorType> mg_transfer(mg_constrained_dofs);
1533 *   mg_transfer.build(dof_handler);
1534 *  
1535 *   SolverControl coarse_solver_control(1000, 1e-12, false, false);
1536 *   SolverCG<VectorType> coarse_solver(coarse_solver_control);
1537 *   PreconditionIdentity identity;
1538 *   MGCoarseGridIterativeSolver<VectorType,
1539 *   SolverCG<VectorType>,
1540 *   MatrixType,
1541 *   PreconditionIdentity>
1542 *   coarse_grid_solver(coarse_solver, mg_matrix[0], identity);
1543 *  
1544 *   using Smoother = LA::MPI::PreconditionJacobi;
1545 *   MGSmootherPrecondition<MatrixType, Smoother, VectorType> smoother;
1546 *  
1547 *   #ifdef USE_PETSC_LA
1548 *   smoother.initialize(mg_matrix);
1549 *   Assert(
1550 *   settings.smoother_dampen == 1.0,
1551 *   ExcNotImplemented(
1552 *   "PETSc's PreconditionJacobi has no support for a damping parameter."));
1553 *   #else
1554 *   smoother.initialize(mg_matrix, settings.smoother_dampen);
1555 *   #endif
1556 *  
1557 *   smoother.set_steps(settings.smoother_steps);
1558 *  
1559 *   mg::Matrix<VectorType> mg_m(mg_matrix);
1560 *   mg::Matrix<VectorType> mg_in(mg_interface_in);
1561 *   mg::Matrix<VectorType> mg_out(mg_interface_in);
1562 *  
1563 *   Multigrid<VectorType> mg(
1564 *   mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1565 *   mg.set_edge_matrices(mg_out, mg_in);
1566 *  
1567 *  
1568 *   PreconditionMG<dim, VectorType, MGTransferPrebuilt<VectorType>>
1569 *   preconditioner(dof_handler, mg, mg_transfer);
1570 *  
1571 *   computing_timer.leave_subsection("Solve: Preconditioner setup");
1572 *  
1573 * @endcode
1574 *
1575 * Timing for 1 V-cycle.
1576 *
1577 * @code
1578 *   {
1579 *   TimerOutput::Scope timing(computing_timer,
1580 *   "Solve: 1 multigrid V-cycle");
1581 *   preconditioner.vmult(solution, right_hand_side);
1582 *   }
1583 *   solution = 0.;
1584 *  
1585 * @endcode
1586 *
1587 * Solve the linear system and distribute constraints.
1588 *
1589 * @code
1590 *   {
1591 *   SolverCG<VectorType> solver(solver_control);
1592 *  
1593 *   TimerOutput::Scope timing(computing_timer, "Solve: CG");
1594 *   solver.solve(system_matrix,
1595 *   solution,
1596 *   right_hand_side,
1597 *   preconditioner);
1598 *   }
1599 *  
1600 *   constraints.distribute(solution);
1601 *  
1602 *   break;
1603 *   }
1604 *  
1605 * @endcode
1606 *
1607 * Solver for the AMG method, similar to @ref step_40 "step-40".
1608 *
1609 * @code
1610 *   case Settings::amg:
1611 *   {
1612 *   computing_timer.enter_subsection("Solve: Preconditioner setup");
1613 *  
1614 *   PreconditionAMG preconditioner;
1615 *   PreconditionAMG::AdditionalData Amg_data;
1616 *  
1617 *   #ifdef USE_PETSC_LA
1618 *   Amg_data.symmetric_operator = true;
1619 *   #else
1620 *   Amg_data.elliptic = true;
1621 *   Amg_data.smoother_type = "Jacobi";
1622 *   Amg_data.higher_order_elements = true;
1623 *   Amg_data.smoother_sweeps = settings.smoother_steps;
1624 *   Amg_data.aggregation_threshold = 0.02;
1625 *   #endif
1626 *  
1627 *   Amg_data.output_details = false;
1628 *  
1629 *   preconditioner.initialize(system_matrix, Amg_data);
1630 *   computing_timer.leave_subsection("Solve: Preconditioner setup");
1631 *  
1632 * @endcode
1633 *
1634 * Timing for 1 V-cycle.
1635 *
1636 * @code
1637 *   {
1638 *   TimerOutput::Scope timing(computing_timer,
1639 *   "Solve: 1 multigrid V-cycle");
1640 *   preconditioner.vmult(solution, right_hand_side);
1641 *   }
1642 *   solution = 0.;
1643 *  
1644 * @endcode
1645 *
1646 * Solve the linear system and distribute constraints.
1647 *
1648 * @code
1649 *   {
1650 *   SolverCG<VectorType> solver(solver_control);
1651 *  
1652 *   TimerOutput::Scope timing(computing_timer, "Solve: CG");
1653 *   solver.solve(system_matrix,
1654 *   solution,
1655 *   right_hand_side,
1656 *   preconditioner);
1657 *   }
1658 *   constraints.distribute(solution);
1659 *  
1660 *   break;
1661 *   }
1662 *  
1663 *   default:
1664 *   DEAL_II_ASSERT_UNREACHABLE();
1665 *   }
1666 *  
1667 *   pcout << " Number of CG iterations: " << solver_control.last_step()
1668 *   << std::endl;
1669 *   }
1670 *  
1671 *  
1672 * @endcode
1673 *
1674 *
1675 * <a name="step_50-Theerrorestimator"></a>
1676 * <h3>The error estimator</h3>
1677 *
1678
1679 *
1680 * We use the FEInterfaceValues class to assemble an error estimator to decide
1681 * which cells to refine. See the exact definition of the cell and face
1682 * integrals in the introduction. To use the method, we define Scratch and
1683 * Copy objects for the MeshWorker::mesh_loop() with much of the following code
1684 * being in essence as was set up in @ref step_12 "step-12" already (or at least similar in
1685 * spirit).
1686 *
1687 * @code
1688 *   template <int dim>
1689 *   struct ScratchData
1690 *   {
1691 *   ScratchData(const Mapping<dim> &mapping,
1692 *   const FiniteElement<dim> &fe,
1693 *   const unsigned int quadrature_degree,
1694 *   const UpdateFlags update_flags,
1695 *   const UpdateFlags interface_update_flags)
1696 *   : fe_values(mapping, fe, QGauss<dim>(quadrature_degree), update_flags)
1697 *   , fe_interface_values(mapping,
1698 *   fe,
1699 *   QGauss<dim - 1>(quadrature_degree),
1700 *   interface_update_flags)
1701 *   {}
1702 *  
1703 *  
1704 *   ScratchData(const ScratchData<dim> &scratch_data)
1705 *   : fe_values(scratch_data.fe_values.get_mapping(),
1706 *   scratch_data.fe_values.get_fe(),
1707 *   scratch_data.fe_values.get_quadrature(),
1708 *   scratch_data.fe_values.get_update_flags())
1709 *   , fe_interface_values(scratch_data.fe_values.get_mapping(),
1710 *   scratch_data.fe_values.get_fe(),
1711 *   scratch_data.fe_interface_values.get_quadrature(),
1712 *   scratch_data.fe_interface_values.get_update_flags())
1713 *   {}
1714 *  
1715 *   FEValues<dim> fe_values;
1716 *   FEInterfaceValues<dim> fe_interface_values;
1717 *   };
1718 *  
1719 *  
1720 *  
1721 *   struct CopyData
1722 *   {
1723 *   CopyData()
1724 *   : cell_index(numbers::invalid_unsigned_int)
1725 *   , value(0.)
1726 *   {}
1727 *  
1728 *   struct FaceData
1729 *   {
1730 *   unsigned int cell_indices[2];
1731 *   double values[2];
1732 *   };
1733 *  
1734 *   unsigned int cell_index;
1735 *   double value;
1736 *   std::vector<FaceData> face_data;
1737 *   };
1738 *  
1739 *  
1740 *   template <int dim, int degree>
1741 *   void LaplaceProblem<dim, degree>::estimate()
1742 *   {
1743 *   TimerOutput::Scope timing(computing_timer, "Estimate");
1744 *  
1745 *   VectorType temp_solution;
1746 *   temp_solution.reinit(locally_owned_dofs,
1747 *   locally_relevant_dofs,
1748 *   mpi_communicator);
1749 *   temp_solution = solution;
1750 *  
1751 *   const Coefficient<dim> coefficient;
1752 *  
1753 *   estimated_error_square_per_cell.reinit(triangulation.n_active_cells());
1754 *  
1755 *   using Iterator = typename DoFHandler<dim>::active_cell_iterator;
1756 *  
1757 * @endcode
1758 *
1759 * Assembler for cell residual @f$h^2 \| f + \epsilon \triangle u \|_K^2@f$
1760 *
1761 * @code
1762 *   auto cell_worker = [&](const Iterator &cell,
1763 *   ScratchData<dim> &scratch_data,
1764 *   CopyData &copy_data) {
1765 *   FEValues<dim> &fe_values = scratch_data.fe_values;
1766 *   fe_values.reinit(cell);
1767 *  
1768 *   RightHandSide<dim> rhs;
1769 *   const double rhs_value = rhs.value(cell->center());
1770 *  
1771 *   const double nu = coefficient.value(cell->center());
1772 *  
1773 *   std::vector<Tensor<2, dim>> hessians(fe_values.n_quadrature_points);
1774 *   fe_values.get_function_hessians(temp_solution, hessians);
1775 *  
1776 *   copy_data.cell_index = cell->active_cell_index();
1777 *  
1778 *   double residual_norm_square = 0.;
1779 *   for (unsigned k = 0; k < fe_values.n_quadrature_points; ++k)
1780 *   {
1781 *   const double residual = (rhs_value + nu * trace(hessians[k]));
1782 *   residual_norm_square += residual * residual * fe_values.JxW(k);
1783 *   }
1784 *  
1785 *   copy_data.value =
1786 *   cell->diameter() * cell->diameter() * residual_norm_square;
1787 *   };
1788 *  
1789 * @endcode
1790 *
1791 * Assembler for face term @f$\sum_F h_F \| \jump{\epsilon \nabla u \cdot n}
1792 * \|_F^2@f$
1793 *
1794 * @code
1795 *   auto face_worker = [&](const Iterator &cell,
1796 *   const unsigned int &f,
1797 *   const unsigned int &sf,
1798 *   const Iterator &ncell,
1799 *   const unsigned int &nf,
1800 *   const unsigned int &nsf,
1801 *   ScratchData<dim> &scratch_data,
1802 *   CopyData &copy_data) {
1803 *   FEInterfaceValues<dim> &fe_interface_values =
1804 *   scratch_data.fe_interface_values;
1805 *   fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf);
1806 *  
1807 *   copy_data.face_data.emplace_back();
1808 *   CopyData::FaceData &copy_data_face = copy_data.face_data.back();
1809 *  
1810 *   copy_data_face.cell_indices[0] = cell->active_cell_index();
1811 *   copy_data_face.cell_indices[1] = ncell->active_cell_index();
1812 *  
1813 *   const double coeff1 = coefficient.value(cell->center());
1814 *   const double coeff2 = coefficient.value(ncell->center());
1815 *  
1816 *   std::vector<Tensor<1, dim>> grad_u[2];
1817 *  
1818 *   for (unsigned int i = 0; i < 2; ++i)
1819 *   {
1820 *   grad_u[i].resize(fe_interface_values.n_quadrature_points);
1821 *   fe_interface_values.get_fe_face_values(i).get_function_gradients(
1822 *   temp_solution, grad_u[i]);
1823 *   }
1824 *  
1825 *   double jump_norm_square = 0.;
1826 *  
1827 *   for (unsigned int qpoint = 0;
1828 *   qpoint < fe_interface_values.n_quadrature_points;
1829 *   ++qpoint)
1830 *   {
1831 *   const double jump =
1832 *   coeff1 * grad_u[0][qpoint] * fe_interface_values.normal(qpoint) -
1833 *   coeff2 * grad_u[1][qpoint] * fe_interface_values.normal(qpoint);
1834 *  
1835 *   jump_norm_square += jump * jump * fe_interface_values.JxW(qpoint);
1836 *   }
1837 *  
1838 *   const double h = cell->face(f)->measure();
1839 *   copy_data_face.values[0] = 0.5 * h * jump_norm_square;
1840 *   copy_data_face.values[1] = copy_data_face.values[0];
1841 *   };
1842 *  
1843 *   auto copier = [&](const CopyData &copy_data) {
1844 *   if (copy_data.cell_index != numbers::invalid_unsigned_int)
1845 *   estimated_error_square_per_cell[copy_data.cell_index] += copy_data.value;
1846 *  
1847 *   for (const auto &cdf : copy_data.face_data)
1848 *   for (unsigned int j = 0; j < 2; ++j)
1849 *   estimated_error_square_per_cell[cdf.cell_indices[j]] += cdf.values[j];
1850 *   };
1851 *  
1852 *   const unsigned int n_gauss_points = degree + 1;
1853 *   ScratchData<dim> scratch_data(mapping,
1854 *   fe,
1855 *   n_gauss_points,
1856 *   update_hessians | update_quadrature_points |
1857 *   update_JxW_values,
1858 *   update_values | update_gradients |
1859 *   update_JxW_values | update_normal_vectors);
1860 *   CopyData copy_data;
1861 *  
1862 * @endcode
1863 *
1864 * We need to assemble each interior face once but we need to make sure that
1865 * both processes assemble the face term between a locally owned and a ghost
1866 * cell. This is achieved by setting the
1867 * MeshWorker::assemble_ghost_faces_both flag. We need to do this, because
1868 * we do not communicate the error estimator contributions here.
1869 *
1870 * @code
1871 *   MeshWorker::mesh_loop(dof_handler.begin_active(),
1872 *   dof_handler.end(),
1873 *   cell_worker,
1874 *   copier,
1875 *   scratch_data,
1876 *   copy_data,
1877 *   MeshWorker::assemble_own_cells |
1878 *   MeshWorker::assemble_ghost_faces_both |
1879 *   MeshWorker::assemble_own_interior_faces_once,
1880 *   /*boundary_worker=*/nullptr,
1881 *   face_worker);
1882 *  
1883 *   const double global_error_estimate =
1884 *   std::sqrt(Utilities::MPI::sum(estimated_error_square_per_cell.l1_norm(),
1885 *   mpi_communicator));
1886 *   pcout << " Global error estimate: " << global_error_estimate
1887 *   << std::endl;
1888 *   }
1889 *  
1890 *  
1891 * @endcode
1892 *
1893 *
1894 * <a name="step_50-LaplaceProblemrefine_grid"></a>
1895 * <h4>LaplaceProblem::refine_grid()</h4>
1896 *
1897
1898 *
1899 * We use the cell-wise estimator stored in the vector @p estimate_vector and
1900 * refine a fixed number of cells (chosen here to roughly double the number of
1901 * DoFs in each step).
1902 *
1903 * @code
1904 *   template <int dim, int degree>
1905 *   void LaplaceProblem<dim, degree>::refine_grid()
1906 *   {
1907 *   TimerOutput::Scope timing(computing_timer, "Refine grid");
1908 *  
1909 *   const double refinement_fraction = 1. / (std::pow(2.0, dim) - 1.);
1910 *   parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
1911 *   triangulation, estimated_error_square_per_cell, refinement_fraction, 0.0);
1912 *  
1913 *   triangulation.execute_coarsening_and_refinement();
1914 *   }
1915 *  
1916 *  
1917 * @endcode
1918 *
1919 *
1920 * <a name="step_50-LaplaceProblemoutput_results"></a>
1921 * <h4>LaplaceProblem::output_results()</h4>
1922 *
1923
1924 *
1925 * The output_results() function is similar to the ones found in many of the
1926 * tutorials (see @ref step_40 "step-40" for example).
1927 *
1928 * @code
1929 *   template <int dim, int degree>
1930 *   void LaplaceProblem<dim, degree>::output_results(const unsigned int cycle)
1931 *   {
1932 *   TimerOutput::Scope timing(computing_timer, "Output results");
1933 *  
1934 *   VectorType temp_solution;
1935 *   temp_solution.reinit(locally_owned_dofs,
1936 *   locally_relevant_dofs,
1937 *   mpi_communicator);
1938 *   temp_solution = solution;
1939 *  
1940 *   DataOut<dim> data_out;
1941 *   data_out.attach_dof_handler(dof_handler);
1942 *   data_out.add_data_vector(temp_solution, "solution");
1943 *  
1944 *   Vector<float> subdomain(triangulation.n_active_cells());
1945 *   for (unsigned int i = 0; i < subdomain.size(); ++i)
1946 *   subdomain(i) = triangulation.locally_owned_subdomain();
1947 *   data_out.add_data_vector(subdomain, "subdomain");
1948 *  
1949 *   Vector<float> level(triangulation.n_active_cells());
1950 *   for (const auto &cell : triangulation.active_cell_iterators())
1951 *   level(cell->active_cell_index()) = cell->level();
1952 *   data_out.add_data_vector(level, "level");
1953 *  
1954 *   if (estimated_error_square_per_cell.size() > 0)
1955 *   data_out.add_data_vector(estimated_error_square_per_cell,
1956 *   "estimated_error_square_per_cell");
1957 *  
1958 *   data_out.build_patches();
1959 *  
1960 *   const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record(
1961 *   "", "solution", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/);
1962 *  
1963 *   pcout << " Wrote " << pvtu_filename << std::endl;
1964 *   }
1965 *  
1966 *  
1967 * @endcode
1968 *
1969 *
1970 * <a name="step_50-LaplaceProblemrun"></a>
1971 * <h4>LaplaceProblem::run()</h4>
1972 *
1973
1974 *
1975 * As in most tutorials, this function calls the various functions defined
1976 * above to set up, assemble, solve, and output the results.
1977 *
1978 * @code
1979 *   template <int dim, int degree>
1980 *   void LaplaceProblem<dim, degree>::run()
1981 *   {
1982 *   for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle)
1983 *   {
1984 *   pcout << "Cycle " << cycle << ':' << std::endl;
1985 *   if (cycle > 0)
1986 *   refine_grid();
1987 *  
1988 *   pcout << " Number of active cells: "
1989 *   << triangulation.n_global_active_cells();
1990 *  
1991 * @endcode
1992 *
1993 * We only output level cell data for the GMG methods (same with DoF
1994 * data below). Note that the partition efficiency is irrelevant for AMG
1995 * since the level hierarchy is not distributed or used during the
1996 * computation.
1997 *
1998 * @code
1999 *   if (settings.solver == Settings::gmg_mf ||
2000 *   settings.solver == Settings::gmg_mb)
2001 *   pcout << " (" << triangulation.n_global_levels() << " global levels)"
2002 *   << std::endl
2003 *   << " Partition efficiency: "
2004 *   << 1.0 / MGTools::workload_imbalance(triangulation);
2005 *   pcout << std::endl;
2006 *  
2007 *   setup_system();
2008 *  
2009 * @endcode
2010 *
2011 * Only set up the multilevel hierarchy for GMG.
2012 *
2013 * @code
2014 *   if (settings.solver == Settings::gmg_mf ||
2015 *   settings.solver == Settings::gmg_mb)
2016 *   setup_multigrid();
2017 *  
2018 *   pcout << " Number of degrees of freedom: " << dof_handler.n_dofs();
2019 *   if (settings.solver == Settings::gmg_mf ||
2020 *   settings.solver == Settings::gmg_mb)
2021 *   {
2022 *   pcout << " (by level: ";
2023 *   for (unsigned int level = 0; level < triangulation.n_global_levels();
2024 *   ++level)
2025 *   pcout << dof_handler.n_dofs(level)
2026 *   << (level == triangulation.n_global_levels() - 1 ? ")" :
2027 *   ", ");
2028 *   }
2029 *   pcout << std::endl;
2030 *  
2031 * @endcode
2032 *
2033 * For the matrix-free method, we only assemble the right-hand side.
2034 * For both matrix-based methods, we assemble both active matrix and
2035 * right-hand side, and only assemble the multigrid matrices for
2036 * matrix-based GMG.
2037 *
2038 * @code
2039 *   if (settings.solver == Settings::gmg_mf)
2040 *   assemble_rhs();
2041 *   else /*gmg_mb or amg*/
2042 *   {
2043 *   assemble_system();
2044 *   if (settings.solver == Settings::gmg_mb)
2045 *   assemble_multigrid();
2046 *   }
2047 *  
2048 *   solve();
2049 *   estimate();
2050 *  
2051 *   if (settings.output)
2052 *   output_results(cycle);
2053 *  
2054 *   computing_timer.print_summary();
2055 *   computing_timer.reset();
2056 *   }
2057 *   }
2058 *  
2059 *  
2060 * @endcode
2061 *
2062 *
2063 * <a name="step_50-Themainfunction"></a>
2064 * <h3>The main() function</h3>
2065 *
2066
2067 *
2068 * This is a similar main function to @ref step_40 "step-40", with the exception that
2069 * we require the user to pass a .prm file as a sole command line
2070 * argument (see @ref step_29 "step-29" and the documentation of the ParameterHandler
2071 * class for a complete discussion of parameter files).
2072 *
2073 * @code
2074 *   int main(int argc, char *argv[])
2075 *   {
2076 *   using namespace dealii;
2077 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2078 *  
2079 *   Settings settings;
2080 *   if (!settings.try_parse((argc > 1) ? (argv[1]) : ""))
2081 *   return 0;
2082 *  
2083 *   try
2084 *   {
2085 *   constexpr unsigned int fe_degree = 2;
2086 *  
2087 *   switch (settings.dimension)
2088 *   {
2089 *   case 2:
2090 *   {
2091 *   LaplaceProblem<2, fe_degree> test(settings);
2092 *   test.run();
2093 *  
2094 *   break;
2095 *   }
2096 *  
2097 *   case 3:
2098 *   {
2099 *   LaplaceProblem<3, fe_degree> test(settings);
2100 *   test.run();
2101 *  
2102 *   break;
2103 *   }
2104 *  
2105 *   default:
2106 *   DEAL_II_NOT_IMPLEMENTED();
2107 *   }
2108 *   }
2109 *   catch (std::exception &exc)
2110 *   {
2111 *   std::cerr << std::endl
2112 *   << std::endl
2113 *   << "----------------------------------------------------"
2114 *   << std::endl;
2115 *   std::cerr << "Exception on processing: " << std::endl
2116 *   << exc.what() << std::endl
2117 *   << "Aborting!" << std::endl
2118 *   << "----------------------------------------------------"
2119 *   << std::endl;
2120 *   MPI_Abort(MPI_COMM_WORLD, 1);
2121 *   return 1;
2122 *   }
2123 *   catch (...)
2124 *   {
2125 *   std::cerr << std::endl
2126 *   << std::endl
2127 *   << "----------------------------------------------------"
2128 *   << std::endl;
2129 *   std::cerr << "Unknown exception!" << std::endl
2130 *   << "Aborting!" << std::endl
2131 *   << "----------------------------------------------------"
2132 *   << std::endl;
2133 *   MPI_Abort(MPI_COMM_WORLD, 2);
2134 *   return 1;
2135 *   }
2136 *  
2137 *   return 0;
2138 *   }
2139 * @endcode
2140<a name="step_50-Results"></a><h1>Results</h1>
2141
2142
2143When you run the program using the following command
2144@code
2145mpirun -np 16 ./step-50 gmg_mf_2d.prm
2146@endcode
2147the screen output should look like the following:
2148@code
2149Cycle 0:
2150 Number of active cells: 12 (2 global levels)
2151 Partition efficiency: 0.1875
2152 Number of degrees of freedom: 65 (by level: 21, 65)
2153 Number of CG iterations: 10
2154 Global error estimate: 0.355373
2155 Wrote solution_00.pvtu
2156
2157
2158+---------------------------------------------+------------+------------+
2159| Total wallclock time elapsed since start | 0.0163s | |
2160| | | |
2161| Section | no. calls | wall time | % of total |
2162+---------------------------------+-----------+------------+------------+
2163| Assemble right-hand side | 1 | 0.000374s | 2.3% |
2164| Estimate | 1 | 0.000724s | 4.4% |
2165| Output results | 1 | 0.00277s | 17% |
2166| Setup | 1 | 0.00225s | 14% |
2167| Setup multigrid | 1 | 0.00181s | 11% |
2168| Solve | 1 | 0.00364s | 22% |
2169| Solve: 1 multigrid V-cycle | 1 | 0.000354s | 2.2% |
2170| Solve: CG | 1 | 0.00151s | 9.3% |
2171| Solve: Preconditioner setup | 1 | 0.00125s | 7.7% |
2172+---------------------------------+-----------+------------+------------+
2173
2174Cycle 1:
2175 Number of active cells: 24 (3 global levels)
2176 Partition efficiency: 0.276786
2177 Number of degrees of freedom: 139 (by level: 21, 65, 99)
2178 Number of CG iterations: 10
2179 Global error estimate: 0.216726
2180 Wrote solution_01.pvtu
2181
2182
2183+---------------------------------------------+------------+------------+
2184| Total wallclock time elapsed since start | 0.0169s | |
2185| | | |
2186| Section | no. calls | wall time | % of total |
2187+---------------------------------+-----------+------------+------------+
2188| Assemble right-hand side | 1 | 0.000309s | 1.8% |
2189| Estimate | 1 | 0.00156s | 9.2% |
2190| Output results | 1 | 0.00222s | 13% |
2191| Refine grid | 1 | 0.00278s | 16% |
2192| Setup | 1 | 0.00196s | 12% |
2193| Setup multigrid | 1 | 0.0023s | 14% |
2194| Solve | 1 | 0.00565s | 33% |
2195| Solve: 1 multigrid V-cycle | 1 | 0.000349s | 2.1% |
2196| Solve: CG | 1 | 0.00285s | 17% |
2197| Solve: Preconditioner setup | 1 | 0.00195s | 12% |
2198+---------------------------------+-----------+------------+------------+
2199
2200Cycle 2:
2201 Number of active cells: 51 (4 global levels)
2202 Partition efficiency: 0.41875
2203 Number of degrees of freedom: 245 (by level: 21, 65, 225, 25)
2204 Number of CG iterations: 11
2205 Global error estimate: 0.112098
2206 Wrote solution_02.pvtu
2207
2208
2209+---------------------------------------------+------------+------------+
2210| Total wallclock time elapsed since start | 0.0183s | |
2211| | | |
2212| Section | no. calls | wall time | % of total |
2213+---------------------------------+-----------+------------+------------+
2214| Assemble right-hand side | 1 | 0.000274s | 1.5% |
2215| Estimate | 1 | 0.00127s | 6.9% |
2216| Output results | 1 | 0.00227s | 12% |
2217| Refine grid | 1 | 0.0024s | 13% |
2218| Setup | 1 | 0.00191s | 10% |
2219| Setup multigrid | 1 | 0.00295s | 16% |
2220| Solve | 1 | 0.00702s | 38% |
2221| Solve: 1 multigrid V-cycle | 1 | 0.000398s | 2.2% |
2222| Solve: CG | 1 | 0.00376s | 21% |
2223| Solve: Preconditioner setup | 1 | 0.00238s | 13% |
2224+---------------------------------+-----------+------------+------------+
2225.
2226.
2227.
2228@endcode
2229Here, the timing of the `solve()` function is split up in 3 parts: setting
2230up the multigrid preconditioner, execution of a single multigrid V-cycle, and
2231the CG solver. The V-cycle that is timed is unnecessary for the overall solve
2232and only meant to give an insight at the different costs for AMG and GMG.
2233Also it should be noted that when using the AMG solver, "Workload imbalance"
2234is not included in the output since the hierarchy of coarse meshes is not
2235required.
2236
2237All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade
2238Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions,
2239allowing for vectorization over 8 doubles (vectorization used only in the matrix-free
2240computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos
224112.10.1 is used for the matrix-based GMG/AMG computations.
2242
2243We can then gather a variety of information by calling the program
2244with the input files that are provided in the directory in which
2245@ref step_50 "step-50" is located. Using these, and adjusting the number of mesh
2246refinement steps, we can produce information about how well the
2247program scales.
2248
2249The following table gives weak scaling timings for this program on up to 256M DoFs
2250and 7,168 processors. (Recall that weak scaling keeps the number of
2251degrees of freedom per processor constant while increasing the number of
2252processors; i.e., it considers larger and larger problems.)
2253Here, @f$\mathbb{E}@f$ is the partition efficiency from the
2254 introduction (also equal to 1.0/workload imbalance), "Setup" is a combination
2255of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks,
2256and "Prec" is the preconditioner setup. Ideally all times would stay constant
2257over each problem size for the individual solvers, but since the partition
2258efficiency decreases from 0.371 to 0.161 from largest to smallest problem size,
2259we expect to see an approximately @f$0.371/0.161=2.3@f$ times increase in timings
2260for GMG. This is, in fact, pretty close to what we really get:
2261
2262<table align="center" class="doxtable">
2263<tr>
2264 <th colspan="4"></th>
2265 <th></th>
2266 <th colspan="4">MF-GMG</th>
2267 <th></th>
2268 <th colspan="4">MB-GMG</th>
2269 <th></th>
2270 <th colspan="4">AMG</th>
2271</tr>
2272<tr>
2273 <th align="right">Procs</th>
2274 <th align="right">Cycle</th>
2275 <th align="right">DoFs</th>
2276 <th align="right">@f$\mathbb{E}@f$</th>
2277 <th></th>
2278 <th align="right">Setup</th>
2279 <th align="right">Prec</th>
2280 <th align="right">Solve</th>
2281 <th align="right">Total</th>
2282 <th></th>
2283 <th align="right">Setup</th>
2284 <th align="right">Prec</th>
2285 <th align="right">Solve</th>
2286 <th align="right">Total</th>
2287 <th></th>
2288 <th align="right">Setup</th>
2289 <th align="right">Prec</th>
2290 <th align="right">Solve</th>
2291 <th align="right">Total</th>
2292</tr>
2293<tr>
2294 <td align="right">112</th>
2295 <td align="right">13</th>
2296 <td align="right">4M</th>
2297 <td align="right">0.37</th>
2298 <td></td>
2299 <td align="right">0.742</th>
2300 <td align="right">0.393</th>
2301 <td align="right">0.200</th>
2302 <td align="right">1.335</th>
2303 <td></td>
2304 <td align="right">1.714</th>
2305 <td align="right">2.934</th>
2306 <td align="right">0.716</th>
2307 <td align="right">5.364</th>
2308 <td></td>
2309 <td align="right">1.544</th>
2310 <td align="right">0.456</th>
2311 <td align="right">1.150</th>
2312 <td align="right">3.150</th>
2313</tr>
2314<tr>
2315 <td align="right">448</th>
2316 <td align="right">15</th>
2317 <td align="right">16M</th>
2318 <td align="right">0.29</th>
2319 <td></td>
2320 <td align="right">0.884</th>
2321 <td align="right">0.535</th>
2322 <td align="right">0.253</th>
2323 <td align="right">1.672</th>
2324 <td></td>
2325 <td align="right">1.927</th>
2326 <td align="right">3.776</th>
2327 <td align="right">1.190</th>
2328 <td align="right">6.893</th>
2329 <td></td>
2330 <td align="right">1.544</th>
2331 <td align="right">0.456</th>
2332 <td align="right">1.150</th>
2333 <td align="right">3.150</th>
2334</tr>
2335<tr>
2336 <td align="right">1,792</th>
2337 <td align="right">17</th>
2338 <td align="right">65M</th>
2339 <td align="right">0.22</th>
2340 <td></td>
2341 <td align="right">1.122</th>
2342 <td align="right">0.686</th>
2343 <td align="right">0.309</th>
2344 <td align="right">2.117</th>
2345 <td></td>
2346 <td align="right">2.171</th>
2347 <td align="right">4.862</th>
2348 <td align="right">1.660</th>
2349 <td align="right">8.693</th>
2350 <td></td>
2351 <td align="right">1.654</th>
2352 <td align="right">0.546</th>
2353 <td align="right">1.460</th>
2354 <td align="right">3.660</th>
2355</tr>
2356<tr>
2357 <td align="right">7,168</th>
2358 <td align="right">19</th>
2359 <td align="right">256M</th>
2360 <td align="right">0.16</th>
2361 <td></td>
2362 <td align="right">1.214</th>
2363 <td align="right">0.893</th>
2364 <td align="right">0.521</th>
2365 <td align="right">2.628</th>
2366 <td></td>
2367 <td align="right">2.386</th>
2368 <td align="right">7.260</th>
2369 <td align="right">2.560</th>
2370 <td align="right">12.206</th>
2371 <td></td>
2372 <td align="right">1.844</th>
2373 <td align="right">1.010</th>
2374 <td align="right">1.890</th>
2375 <td align="right">4.744</th>
2376</tr>
2377</table>
2378
2379On the other hand, the algebraic multigrid in the last set of columns
2380is relatively unaffected by the increasing imbalance of the mesh
2381hierarchy (because it doesn't use the mesh hierarchy) and the growth
2382in time is rather driven by other factors that are well documented in
2383the literature (most notably that the algorithmic complexity of
2384some parts of algebraic multigrid methods appears to be @f${\cal O}(N
2385\log N)@f$ instead of @f${\cal O}(N)@f$ for geometric multigrid).
2386
2387The upshort of the table above is that the matrix-free geometric multigrid
2388method appears to be the fastest approach to solving this equation if
2389not by a huge margin. Matrix-based methods, on the other hand, are
2390consistently the worst.
2391
2392The following figure provides strong scaling results for each method, i.e.,
2393we solve the same problem on more and more processors. Specifically,
2394we consider the problems after 16 mesh refinement cycles
2395(32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:
2396
2397<img width="600px" src="https://www.dealii.org/images/steps/developer/step-50-strong-scaling.png" alt="">
2398
2399While the matrix-based GMG solver and AMG scale similarly and have a
2400similar time to solution (at least as long as there is a substantial
2401number of unknowns per processor -- say, several 10,000), the
2402matrix-free GMG solver scales much better and solves the finer problem
2403in roughly the same time as the AMG solver for the coarser mesh with
2404only an eighth of the number of processors. Conversely, it can solve the
2405same problem on the same number of processors in about one eighth the
2406time.
2407
2408
2409<a name="step_50-Possibilitiesforextensions"></a><h3> Possibilities for extensions </h3>
2410
2411
2412<a name="step_50-Testingconvergenceandhigherorderelements"></a><h4> Testing convergence and higher order elements </h4>
2413
2414
2415The finite element degree is currently hard-coded as 2, see the template
2416arguments of the main class. It is easy to change. To test, it would be
2417interesting to switch to a test problem with a reference solution. This way,
2418you can compare error rates.
2419
2420<a name="step_50-Coarsesolver"></a><h4> Coarse solver </h4>
2421
2422
2423A more interesting example would involve a more complicated coarse mesh (see
2424@ref step_49 "step-49" for inspiration). The issue in that case is that the coarsest
2425level of the mesh hierarchy is actually quite large, and one would
2426have to think about ways to solve the coarse level problem
2427efficiently. (This is not an issue for algebraic multigrid methods
2428because they would just continue to build coarser and coarser levels
2429of the matrix, regardless of their geometric origin.)
2430
2431In the program here, we simply solve the coarse level problem with a
2432Conjugate Gradient method without any preconditioner. That is acceptable
2433if the coarse problem is really small -- for example, if the coarse
2434mesh had a single cell, then the coarse mesh problems has a @f$9\times 9@f$
2435matrix in 2d, and a @f$27\times 27@f$ matrix in 3d; for the coarse mesh we
2436use on the @f$L@f$-shaped domain of the current program, these sizes are
2437@f$21\times 21@f$ in 2d and @f$117\times 117@f$ in 3d. But if the coarse mesh
2438consists of hundreds or thousands of cells, this approach will no
2439longer work and might start to dominate the overall run-time of each V-cycle.
2440A common approach is then to solve the coarse mesh problem using an
2441algebraic multigrid preconditioner; this would then, however, require
2442assembling the coarse matrix (even for the matrix-free version) as
2443input to the AMG implementation.
2444 *
2445 *
2446<a name="step_50-PlainProg"></a>
2447<h1> The plain program</h1>
2448@include "step-50.cc"
2449*/
Definition fe_q.h:550
void declare_entry(const std::string &entry, const std::string &default_value, const Patterns::PatternBase &pattern=Patterns::Anything(), const std::string &documentation="", const bool has_to_be_set=false)
Definition point.h:111
Point< 3 > center
unsigned int level
Definition grid_out.cc:4616
__global__ void set(Number *val, const Number s, const size_type N)
#define AssertThrow(cond, exc)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_locally_relevant_level_dofs(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
spacedim MeshType< dim - 1, spacedim > const std::set< types::boundary_id > & boundary_ids
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
spacedim & mapping
spacedim const Point< spacedim > & p
Definition grid_tools.h:981
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:980
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ matrix
Contents is actually a matrix.
PETScWrappers::PreconditionBoomerAMG PreconditionAMG
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
void make_interface_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, const MGConstrainedDoFs &mg_constrained_dofs, SparsityPatternBase &sparsity, const unsigned int level)
Definition mg_tools.cc:1013
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity, const unsigned int level, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true)
Definition mg_tools.cc:575
void compute_diagonal(const MatrixFree< dim, Number, VectorizedArrayType > &matrix_free, VectorType &diagonal_global, const std::function< void(FEEvaluation< dim, fe_degree, n_q_points_1d, n_components, Number, VectorizedArrayType > &)> &local_vmult, const unsigned int dof_no=0, const unsigned int quad_no=0, const unsigned int first_selected_component=0)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
void free(T *&pointer)
Definition cuda.h:96
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
std::string compress(const std::string &input)
Definition utilities.cc:389
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:15051
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
Definition types.h:32
unsigned int boundary_id
Definition types.h:144
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
TasksParallelScheme tasks_parallel_scheme