Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-50.h
Go to the documentation of this file.
1 ,
477  * const unsigned int /*component*/ = 0) const override
478  * {
479  * return 1.0;
480  * }
481  *
482  *
483  * template <typename number>
485  * value(const Point<dim, VectorizedArray<number>> & /*p*/,
486  * const unsigned int /*component*/ = 0) const
487  * {
488  * return VectorizedArray<number>(1.0);
489  * }
490  * };
491  *
492  *
493  * @endcode
494  *
495  * This next class represents the diffusion coefficient. We use a variable
496  * coefficient which is 100.0 at any point where at least one coordinate is
497  * less than -0.5, and 1.0 at all other points. As above, a separate value()
498  * returning a VectorizedArray is used for the matrix-free code. An @p
499  * average() function computes the arithmetic average for a set of points.
500  *
501  * @code
502  * template <int dim>
503  * class Coefficient : public Function<dim>
504  * {
505  * public:
506  * virtual double value(const Point<dim> &p,
507  * const unsigned int /*component*/ = 0) const override;
508  *
509  * template <typename number>
511  * const unsigned int /*component*/ = 0) const;
512  *
513  * template <typename number>
514  * number average_value(const std::vector<Point<dim, number>> &points) const;
515  *
516  * @endcode
517  *
518  * When using a coefficient in the MatrixFree framework, we also
519  * need a function that creates a Table of coefficient values for a
520  * set of cells provided by the MatrixFree operator argument here.
521  *
522  * @code
523  * template <typename number>
524  * std::shared_ptr<Table<2, VectorizedArray<number>>> make_coefficient_table(
525  * const MatrixFree<dim, number, VectorizedArray<number>> &mf_storage) const;
526  * };
527  *
528  *
529  *
530  * template <int dim>
531  * double Coefficient<dim>::value(const Point<dim> &p, const unsigned int) const
532  * {
533  * for (int d = 0; d < dim; ++d)
534  * {
535  * if (p[d] < -0.5)
536  * return 100.0;
537  * }
538  * return 1.0;
539  * }
540  *
541  *
542  *
543  * template <int dim>
544  * template <typename number>
547  * const unsigned int) const
548  * {
549  * VectorizedArray<number> return_value = VectorizedArray<number>(1.0);
550  * for (unsigned int i = 0; i < VectorizedArray<number>::size(); ++i)
551  * {
552  * for (int d = 0; d < dim; ++d)
553  * if (p[d][i] < -0.5)
554  * {
555  * return_value[i] = 100.0;
556  * break;
557  * }
558  * }
559  *
560  * return return_value;
561  * }
562  *
563  *
564  *
565  * template <int dim>
566  * template <typename number>
567  * number Coefficient<dim>::average_value(
568  * const std::vector<Point<dim, number>> &points) const
569  * {
570  * number average(0);
571  * for (unsigned int i = 0; i < points.size(); ++i)
572  * average += value(points[i]);
573  * average /= points.size();
574  *
575  * return average;
576  * }
577  *
578  *
579  *
580  * template <int dim>
581  * template <typename number>
582  * std::shared_ptr<Table<2, VectorizedArray<number>>>
583  * Coefficient<dim>::make_coefficient_table(
584  * const MatrixFree<dim, number, VectorizedArray<number>> &mf_storage) const
585  * {
586  * auto coefficient_table =
587  * std::make_shared<Table<2, VectorizedArray<number>>>();
588  *
589  * FEEvaluation<dim, -1, 0, 1, number> fe_eval(mf_storage);
590  *
591  * const unsigned int n_cells = mf_storage.n_macro_cells();
592  * const unsigned int n_q_points = fe_eval.n_q_points;
593  *
594  * coefficient_table->reinit(n_cells, 1);
595  *
596  * for (unsigned int cell = 0; cell < n_cells; ++cell)
597  * {
598  * fe_eval.reinit(cell);
599  *
600  * VectorizedArray<number> average_value = 0.;
601  * for (unsigned int q = 0; q < n_q_points; ++q)
602  * average_value += value(fe_eval.quadrature_point(q));
603  * average_value /= n_q_points;
604  *
605  * (*coefficient_table)(cell, 0) = average_value;
606  * }
607  *
608  * return coefficient_table;
609  * }
610  *
611  *
612  *
613  * @endcode
614  *
615  *
616  * <a name="Runtimeparameters"></a>
617  * <h3>Run time parameters</h3>
618  *
619 
620  *
621  * We will use ParameterHandler to pass in parameters at runtime. The
622  * structure @p Settings parses and stores these parameters to be queried
623  * throughout the program.
624  *
625  * @code
626  * struct Settings
627  * {
628  * bool try_parse(const std::string &prm_filename);
629  *
630  * enum SolverType
631  * {
632  * gmg_mb,
633  * gmg_mf,
634  * amg
635  * };
636  *
637  * SolverType solver;
638  *
639  * int dimension;
640  * double smoother_dampen;
641  * unsigned int smoother_steps;
642  * unsigned int n_steps;
643  * bool output;
644  * };
645  *
646  *
647  *
648  * bool Settings::try_parse(const std::string &prm_filename)
649  * {
650  * ParameterHandler prm;
651  * prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension.");
652  * prm.declare_entry("n_steps",
653  * "10",
654  * Patterns::Integer(0),
655  * "Number of adaptive refinement steps.");
656  * prm.declare_entry("smoother dampen",
657  * "1.0",
658  * Patterns::Double(0.0),
659  * "Dampen factor for the smoother.");
660  * prm.declare_entry("smoother steps",
661  * "1",
662  * Patterns::Integer(1),
663  * "Number of smoother steps.");
664  * prm.declare_entry("solver",
665  * "MF",
666  * Patterns::Selection("MF|MB|AMG"),
667  * "Switch between matrix-free GMG, "
668  * "matrix-based GMG, and AMG.");
669  * prm.declare_entry("output",
670  * "false",
671  * Patterns::Bool(),
672  * "Output graphical results.");
673  *
674  * if (prm_filename.size() == 0)
675  * {
676  * std::cout << "**** Error: No input file provided!\n"
677  * << "**** Error: Call this program as './step-50 input.prm\n"
678  * << "\n"
679  * << "**** You may want to use one of the input files in this\n"
680  * << "**** directory, or use the following default values\n"
681  * << "**** to create an input file:\n";
682  * if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
683  * prm.print_parameters(std::cout, ParameterHandler::Text);
684  * return false;
685  * }
686  *
687  * try
688  * {
689  * prm.parse_input(prm_filename);
690  * }
691  * catch (std::exception &e)
692  * {
693  * if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
694  * std::cerr << e.what() << std::endl;
695  * return false;
696  * }
697  *
698  * if (prm.get("solver") == "MF")
699  * this->solver = gmg_mf;
700  * else if (prm.get("solver") == "MB")
701  * this->solver = gmg_mb;
702  * else if (prm.get("solver") == "AMG")
703  * this->solver = amg;
704  * else
705  * AssertThrow(false, ExcNotImplemented());
706  *
707  * this->dimension = prm.get_integer("dim");
708  * this->n_steps = prm.get_integer("n_steps");
709  * this->smoother_dampen = prm.get_double("smoother dampen");
710  * this->smoother_steps = prm.get_integer("smoother steps");
711  * this->output = prm.get_bool("output");
712  *
713  * return true;
714  * }
715  *
716  *
717  *
718  * @endcode
719  *
720  *
721  * <a name="LaplaceProblemclass"></a>
722  * <h3>LaplaceProblem class</h3>
723  *
724 
725  *
726  * This is the main class of the program. It looks very similar to
727  * @ref step_16 "step-16", @ref step_37 "step-37", and @ref step_40 "step-40". For the MatrixFree setup, we use the
728  * MatrixFreeOperators::LaplaceOperator class which defines `local_apply()`,
729  * `compute_diagonal()`, and `set_coefficient()` functions internally. Note that
730  * the polynomial degree is a template parameter of this class. This is
731  * necesary for the matrix-free code.
732  *
733  * @code
734  * template <int dim, int degree>
735  * class LaplaceProblem
736  * {
737  * public:
738  * LaplaceProblem(const Settings &settings);
739  * void run();
740  *
741  * private:
742  * @endcode
743  *
744  * We will use the following types throughout the program. First the
745  * matrix-based types, after that the matrix-free classes. For the
746  * matrix-free implementation, we use @p float for the level operators.
747  *
748  * @code
749  * using MatrixType = LA::MPI::SparseMatrix;
750  * using VectorType = LA::MPI::Vector;
753  *
754  * using MatrixFreeLevelMatrix = MatrixFreeOperators::LaplaceOperator<
755  * dim,
756  * degree,
757  * degree + 1,
758  * 1,
760  * using MatrixFreeActiveMatrix = MatrixFreeOperators::LaplaceOperator<
761  * dim,
762  * degree,
763  * degree + 1,
764  * 1,
766  *
767  * using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector<float>;
768  * using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector<double>;
769  *
770  * void setup_system();
771  * void setup_multigrid();
772  * void assemble_system();
773  * void assemble_multigrid();
774  * void assemble_rhs();
775  * void solve();
776  * void estimate();
777  * void refine_grid();
778  * void output_results(const unsigned int cycle);
779  *
780  * Settings settings;
781  *
782  * MPI_Comm mpi_communicator;
783  * ConditionalOStream pcout;
784  *
786  * const MappingQ1<dim> mapping;
787  * FE_Q<dim> fe;
788  *
789  * DoFHandler<dim> dof_handler;
790  *
791  * IndexSet locally_owned_dofs;
792  * IndexSet locally_relevant_dofs;
793  * AffineConstraints<double> constraints;
794  *
795  * MatrixType system_matrix;
796  * MatrixFreeActiveMatrix mf_system_matrix;
797  * VectorType solution;
798  * VectorType right_hand_side;
799  * Vector<double> estimated_error_per_cell;
800  *
801  * MGLevelObject<MatrixType> mg_matrix;
802  * MGLevelObject<MatrixType> mg_interface_in;
803  * MGConstrainedDoFs mg_constrained_dofs;
804  *
806  *
807  * TimerOutput computing_timer;
808  * };
809  *
810  *
811  * @endcode
812  *
813  * The only interesting part about the constructor is that we construct the
814  * multigrid hierarchy unless we use AMG. For that, we need to parse the
815  * run time parameters before this constructor completes.
816  *
817  * @code
818  * template <int dim, int degree>
819  * LaplaceProblem<dim, degree>::LaplaceProblem(const Settings &settings)
820  * : settings(settings)
821  * , mpi_communicator(MPI_COMM_WORLD)
822  * , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
823  * , triangulation(mpi_communicator,
825  * (settings.solver == Settings::amg) ?
829  * , mapping()
830  * , fe(degree)
831  * , dof_handler(triangulation)
832  * , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times)
833  * {
834  * GridGenerator::hyper_L(triangulation, -1., 1., /*colorize*/ false);
835  * triangulation.refine_global(1);
836  * }
837  *
838  *
839  *
840  * @endcode
841  *
842  *
843  * <a name="LaplaceProblemsetup_system"></a>
844  * <h4>LaplaceProblem::setup_system()</h4>
845  *
846 
847  *
848  * Unlike @ref step_16 "step-16" and @ref step_37 "step-37", we split the set up into two parts,
849  * setup_system() and setup_multigrid(). Here is the typical setup_system()
850  * function for the active mesh found in most tutorials. For matrix-free, the
851  * active mesh set up is similar to @ref step_37 "step-37"; for matrix-based (GMG and AMG
852  * solvers), the setup is similar to @ref step_40 "step-40".
853  *
854  * @code
855  * template <int dim, int degree>
856  * void LaplaceProblem<dim, degree>::setup_system()
857  * {
858  * TimerOutput::Scope timing(computing_timer, "Setup");
859  *
860  * dof_handler.distribute_dofs(fe);
861  *
862  * DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
863  * locally_owned_dofs = dof_handler.locally_owned_dofs();
864  *
865  * solution.reinit(locally_owned_dofs, mpi_communicator);
866  * right_hand_side.reinit(locally_owned_dofs, mpi_communicator);
867  * constraints.reinit(locally_relevant_dofs);
868  * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
869  *
871  * mapping, dof_handler, 0, Functions::ZeroFunction<dim>(), constraints);
872  * constraints.close();
873  *
874  * switch (settings.solver)
875  * {
876  * case Settings::gmg_mf:
877  * {
878  * typename MatrixFree<dim, double>::AdditionalData additional_data;
879  * additional_data.tasks_parallel_scheme =
881  * additional_data.mapping_update_flags =
883  * std::shared_ptr<MatrixFree<dim, double>> mf_storage =
884  * std::make_shared<MatrixFree<dim, double>>();
885  * mf_storage->reinit(dof_handler,
886  * constraints,
887  * QGauss<1>(degree + 1),
888  * additional_data);
889  *
890  * mf_system_matrix.initialize(mf_storage);
891  *
892  * const Coefficient<dim> coefficient;
893  * mf_system_matrix.set_coefficient(
894  * coefficient.make_coefficient_table(*mf_storage));
895  *
896  * break;
897  * }
898  *
899  * case Settings::gmg_mb:
900  * case Settings::amg:
901  * {
902  * #ifdef USE_PETSC_LA
903  * DynamicSparsityPattern dsp(locally_relevant_dofs);
904  * DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
905  *
907  * locally_owned_dofs,
908  * mpi_communicator,
909  * locally_relevant_dofs);
910  *
911  * system_matrix.reinit(locally_owned_dofs,
912  * locally_owned_dofs,
913  * dsp,
914  * mpi_communicator);
915  * #else
916  * TrilinosWrappers::SparsityPattern dsp(locally_owned_dofs,
917  * locally_owned_dofs,
918  * locally_relevant_dofs,
919  * MPI_COMM_WORLD);
920  * DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
921  * dsp.compress();
922  * system_matrix.reinit(dsp);
923  * #endif
924  *
925  * break;
926  * }
927  *
928  * default:
929  * Assert(false, ExcNotImplemented());
930  * }
931  * }
932  *
933  * @endcode
934  *
935  *
936  * <a name="LaplaceProblemsetup_multigrid"></a>
937  * <h4>LaplaceProblem::setup_multigrid()</h4>
938  *
939 
940  *
941  * This function does the multilevel setup for both matrix-free and
942  * matrix-based GMG. The matrix-free setup is similar to that of @ref step_37 "step-37", and
943  * the matrix-based is similar to @ref step_16 "step-16", except we must use appropriate
944  * distributed sparsity patterns.
945  *
946 
947  *
948  * The function is not called for the AMG approach, but to err on the
949  * safe side, the main `switch` statement of this function
950  * nevertheless makes sure that the function only operates on known
951  * multigrid settings by throwing an assertion if the function were
952  * called for anything other than the two geometric multigrid methods.
953  *
954  * @code
955  * template <int dim, int degree>
956  * void LaplaceProblem<dim, degree>::setup_multigrid()
957  * {
958  * TimerOutput::Scope timing(computing_timer, "Setup multigrid");
959  *
960  * dof_handler.distribute_mg_dofs();
961  *
962  * mg_constrained_dofs.clear();
963  * mg_constrained_dofs.initialize(dof_handler);
964  *
965  * const std::set<types::boundary_id> boundary_ids = {types::boundary_id(0)};
966  * mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, boundary_ids);
967  *
968  * const unsigned int n_levels = triangulation.n_global_levels();
969  *
970  * switch (settings.solver)
971  * {
972  * case Settings::gmg_mf:
973  * {
974  * mf_mg_matrix.resize(0, n_levels - 1);
975  *
976  * for (unsigned int level = 0; level < n_levels; ++level)
977  * {
978  * IndexSet relevant_dofs;
980  * level,
981  * relevant_dofs);
982  * AffineConstraints<double> level_constraints;
983  * level_constraints.reinit(relevant_dofs);
984  * level_constraints.add_lines(
985  * mg_constrained_dofs.get_boundary_indices(level));
986  * level_constraints.close();
987  *
988  * typename MatrixFree<dim, float>::AdditionalData additional_data;
989  * additional_data.tasks_parallel_scheme =
991  * additional_data.mapping_update_flags =
994  * additional_data.mg_level = level;
995  * std::shared_ptr<MatrixFree<dim, float>> mf_storage_level(
996  * new MatrixFree<dim, float>());
997  * mf_storage_level->reinit(dof_handler,
998  * level_constraints,
999  * QGauss<1>(degree + 1),
1000  * additional_data);
1001  *
1002  * mf_mg_matrix[level].initialize(mf_storage_level,
1003  * mg_constrained_dofs,
1004  * level);
1005  *
1006  * const Coefficient<dim> coefficient;
1007  * mf_mg_matrix[level].set_coefficient(
1008  * coefficient.make_coefficient_table(*mf_storage_level));
1009  *
1010  * mf_mg_matrix[level].compute_diagonal();
1011  * }
1012  *
1013  * break;
1014  * }
1015  *
1016  * case Settings::gmg_mb:
1017  * {
1018  * mg_matrix.resize(0, n_levels - 1);
1019  * mg_matrix.clear_elements();
1020  * mg_interface_in.resize(0, n_levels - 1);
1021  * mg_interface_in.clear_elements();
1022  *
1023  * for (unsigned int level = 0; level < n_levels; ++level)
1024  * {
1025  * IndexSet dof_set;
1027  * level,
1028  * dof_set);
1029  *
1030  * {
1031  * #ifdef USE_PETSC_LA
1032  * DynamicSparsityPattern dsp(dof_set);
1033  * MGTools::make_sparsity_pattern(dof_handler, dsp, level);
1034  * dsp.compress();
1036  * dsp,
1037  * dof_handler.locally_owned_mg_dofs(level),
1038  * mpi_communicator,
1039  * dof_set);
1040  *
1041  * mg_matrix[level].reinit(
1042  * dof_handler.locally_owned_mg_dofs(level),
1043  * dof_handler.locally_owned_mg_dofs(level),
1044  * dsp,
1045  * mpi_communicator);
1046  * #else
1048  * dof_handler.locally_owned_mg_dofs(level),
1049  * dof_handler.locally_owned_mg_dofs(level),
1050  * dof_set,
1051  * mpi_communicator);
1052  * MGTools::make_sparsity_pattern(dof_handler, dsp, level);
1053  *
1054  * dsp.compress();
1055  * mg_matrix[level].reinit(dsp);
1056  * #endif
1057  * }
1058  *
1059  * {
1060  * #ifdef USE_PETSC_LA
1061  * DynamicSparsityPattern dsp(dof_set);
1063  * mg_constrained_dofs,
1064  * dsp,
1065  * level);
1066  * dsp.compress();
1068  * dsp,
1069  * dof_handler.locally_owned_mg_dofs(level),
1070  * mpi_communicator,
1071  * dof_set);
1072  *
1073  * mg_interface_in[level].reinit(
1074  * dof_handler.locally_owned_mg_dofs(level),
1075  * dof_handler.locally_owned_mg_dofs(level),
1076  * dsp,
1077  * mpi_communicator);
1078  * #else
1080  * dof_handler.locally_owned_mg_dofs(level),
1081  * dof_handler.locally_owned_mg_dofs(level),
1082  * dof_set,
1083  * mpi_communicator);
1084  *
1086  * mg_constrained_dofs,
1087  * dsp,
1088  * level);
1089  * dsp.compress();
1090  * mg_interface_in[level].reinit(dsp);
1091  * #endif
1092  * }
1093  * }
1094  * break;
1095  * }
1096  *
1097  * default:
1098  * Assert(false, ExcNotImplemented());
1099  * }
1100  * }
1101  *
1102  *
1103  * @endcode
1104  *
1105  *
1106  * <a name="LaplaceProblemassemble_system"></a>
1107  * <h4>LaplaceProblem::assemble_system()</h4>
1108  *
1109 
1110  *
1111  * The assembly is split into three parts: `assemble_system()`,
1112  * `assemble_multigrid()`, and `assemble_rhs()`. The
1113  * `assemble_system()` function here assembles and stores the (global)
1114  * system matrix and the right-hand side for the matrix-based
1115  * methods. It is similar to the assembly in @ref step_40 "step-40".
1116  *
1117 
1118  *
1119  * Note that the matrix-free method does not execute this function as it does
1120  * not need to assemble a matrix, and it will instead assemble the right-hand
1121  * side in assemble_rhs().
1122  *
1123  * @code
1124  * template <int dim, int degree>
1125  * void LaplaceProblem<dim, degree>::assemble_system()
1126  * {
1127  * TimerOutput::Scope timing(computing_timer, "Assemble");
1128  *
1129  * const QGauss<dim> quadrature_formula(degree + 1);
1130  *
1131  * FEValues<dim> fe_values(fe,
1132  * quadrature_formula,
1135  *
1136  * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1137  * const unsigned int n_q_points = quadrature_formula.size();
1138  *
1139  * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1140  * Vector<double> cell_rhs(dofs_per_cell);
1141  *
1142  * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1143  *
1144  * const Coefficient<dim> coefficient;
1145  * RightHandSide<dim> rhs;
1146  * std::vector<double> rhs_values(n_q_points);
1147  *
1148  * for (const auto &cell : dof_handler.active_cell_iterators())
1149  * if (cell->is_locally_owned())
1150  * {
1151  * cell_matrix = 0;
1152  * cell_rhs = 0;
1153  *
1154  * fe_values.reinit(cell);
1155  *
1156  * const double coefficient_value =
1157  * coefficient.average_value(fe_values.get_quadrature_points());
1158  * rhs.value_list(fe_values.get_quadrature_points(), rhs_values);
1159  *
1160  * for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1161  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1162  * {
1163  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1164  * cell_matrix(i, j) +=
1165  * coefficient_value * // epsilon(x)
1166  * fe_values.shape_grad(i, q_point) * // * grad phi_i(x)
1167  * fe_values.shape_grad(j, q_point) * // * grad phi_j(x)
1168  * fe_values.JxW(q_point); // * dx
1169  *
1170  * cell_rhs(i) +=
1171  * fe_values.shape_value(i, q_point) * // grad phi_i(x)
1172  * rhs_values[q_point] * // * f(x)
1173  * fe_values.JxW(q_point); // * dx
1174  * }
1175  *
1176  * cell->get_dof_indices(local_dof_indices);
1177  * constraints.distribute_local_to_global(cell_matrix,
1178  * cell_rhs,
1179  * local_dof_indices,
1180  * system_matrix,
1181  * right_hand_side);
1182  * }
1183  *
1184  * system_matrix.compress(VectorOperation::add);
1185  * right_hand_side.compress(VectorOperation::add);
1186  * }
1187  *
1188  *
1189  * @endcode
1190  *
1191  *
1192  * <a name="LaplaceProblemassemble_multigrid"></a>
1193  * <h4>LaplaceProblem::assemble_multigrid()</h4>
1194  *
1195 
1196  *
1197  * The following function assembles and stores the multilevel matrices for the
1198  * matrix-based GMG method. This function is similar to the one found in
1199  * @ref step_16 "step-16", only here it works for distributed meshes. This difference amounts
1200  * to adding a condition that we only assemble on locally owned level cells and
1201  * a call to compress() for each matrix that is built.
1202  *
1203  * @code
1204  * template <int dim, int degree>
1205  * void LaplaceProblem<dim, degree>::assemble_multigrid()
1206  * {
1207  * TimerOutput::Scope timing(computing_timer, "Assemble multigrid");
1208  *
1209  * QGauss<dim> quadrature_formula(degree + 1);
1210  *
1211  * FEValues<dim> fe_values(fe,
1212  * quadrature_formula,
1215  *
1216  * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1217  * const unsigned int n_q_points = quadrature_formula.size();
1218  *
1219  * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1220  *
1221  * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1222  *
1223  * const Coefficient<dim> coefficient;
1224  *
1225  * std::vector<AffineConstraints<double>> boundary_constraints(
1226  * triangulation.n_global_levels());
1227  * for (unsigned int level = 0; level < triangulation.n_global_levels(); ++level)
1228  * {
1229  * IndexSet dof_set;
1231  * level,
1232  * dof_set);
1233  * boundary_constraints[level].reinit(dof_set);
1234  * boundary_constraints[level].add_lines(
1235  * mg_constrained_dofs.get_refinement_edge_indices(level));
1236  * boundary_constraints[level].add_lines(
1237  * mg_constrained_dofs.get_boundary_indices(level));
1238  *
1239  * boundary_constraints[level].close();
1240  * }
1241  *
1242  * for (const auto &cell : dof_handler.cell_iterators())
1243  * if (cell->level_subdomain_id() == triangulation.locally_owned_subdomain())
1244  * {
1245  * cell_matrix = 0;
1246  * fe_values.reinit(cell);
1247  *
1248  * const double coefficient_value =
1249  * coefficient.average_value(fe_values.get_quadrature_points());
1250  *
1251  * for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1252  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1253  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1254  * cell_matrix(i, j) +=
1255  * coefficient_value * fe_values.shape_grad(i, q_point) *
1256  * fe_values.shape_grad(j, q_point) * fe_values.JxW(q_point);
1257  *
1258  * cell->get_mg_dof_indices(local_dof_indices);
1259  *
1260  * boundary_constraints[cell->level()].distribute_local_to_global(
1261  * cell_matrix, local_dof_indices, mg_matrix[cell->level()]);
1262  *
1263  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1264  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1265  * if (mg_constrained_dofs.is_interface_matrix_entry(
1266  * cell->level(), local_dof_indices[i], local_dof_indices[j]))
1267  * mg_interface_in[cell->level()].add(local_dof_indices[i],
1268  * local_dof_indices[j],
1269  * cell_matrix(i, j));
1270  * }
1271  *
1272  * for (unsigned int i = 0; i < triangulation.n_global_levels(); ++i)
1273  * {
1274  * mg_matrix[i].compress(VectorOperation::add);
1275  * mg_interface_in[i].compress(VectorOperation::add);
1276  * }
1277  * }
1278  *
1279  *
1280  *
1281  * @endcode
1282  *
1283  *
1284  * <a name="LaplaceProblemassemble_rhs"></a>
1285  * <h4>LaplaceProblem::assemble_rhs()</h4>
1286  *
1287 
1288  *
1289  * The final function in this triptych assembles the right-hand side
1290  * vector for the matrix-free method -- because in the matrix-free
1291  * framework, we don't have to assemble the matrix and can get away
1292  * with only assembling the right hand side. We could do this by extracting the
1293  * code from the `assemble_system()` function above that deals with the right
1294  * hand side, but we decide instead to go all in on the matrix-free approach and
1295  * do the assembly using that way as well.
1296  *
1297 
1298  *
1299  * The result is a function that is similar
1300  * to the one found in the "Use FEEvaluation::read_dof_values_plain()
1301  * to avoid resolving constraints" subsection in the "Possibilities
1302  * for extensions" section of @ref step_37 "step-37".
1303  *
1304 
1305  *
1306  * The reason for this function is that the MatrixFree operators do not take
1307  * into account non-homogeneous Dirichlet constraints, instead treating all
1308  * Dirichlet constraints as homogeneous. To account for this, the right-hand
1309  * side here is assembled as the residual @f$r_0 = f-Au_0@f$, where @f$u_0@f$ is a
1310  * zero vector except in the Dirichlet values. Then when solving, we have that
1311  * the solution is @f$u = u_0 + A^{-1}r_0@f$. This can be seen as a Newton
1312  * iteration on a linear system with initial guess @f$u_0@f$. The CG solve in the
1313  * `solve()` function below computes @f$A^{-1}r_0@f$ and the call to
1314  * `constraints.distribute()` (which directly follows) adds the @f$u_0@f$.
1315  *
1316 
1317  *
1318  * Obviously, since we are considering a problem with zero Dirichlet boundary,
1319  * we could have taken a similar approach to @ref step_37 "step-37" `assemble_rhs()`, but this
1320  * additional work allows us to change the problem declaration if we so
1321  * choose.
1322  *
1323 
1324  *
1325  * This function has two parts in the integration loop: applying the negative
1326  * of matrix @f$A@f$ to @f$u_0@f$ by submitting the negative of the gradient, and adding
1327  * the right-hand side contribution by submitting the value @f$f@f$. We must be sure
1328  * to use `read_dof_values_plain()` for evaluating @f$u_0@f$ as `read_dof_vaues()`
1329  * would set all Dirichlet values to zero.
1330  *
1331 
1332  *
1333  * Finally, the system_rhs vector is of type LA::MPI::Vector, but the
1334  * MatrixFree class only work for
1335  * ::LinearAlgebra::distributed::Vector. Therefore we must
1336  * compute the right-hand side using MatrixFree funtionality and then
1337  * use the functions in the `ChangeVectorType` namespace to copy it to
1338  * the correct type.
1339  *
1340  * @code
1341  * template <int dim, int degree>
1342  * void LaplaceProblem<dim, degree>::assemble_rhs()
1343  * {
1344  * TimerOutput::Scope timing(computing_timer, "Assemble right-hand side");
1345  *
1346  * MatrixFreeActiveVector solution_copy;
1347  * MatrixFreeActiveVector right_hand_side_copy;
1348  * mf_system_matrix.initialize_dof_vector(solution_copy);
1349  * mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1350  *
1351  * solution_copy = 0.;
1352  * constraints.distribute(solution_copy);
1353  * solution_copy.update_ghost_values();
1354  * right_hand_side_copy = 0;
1355  * const Table<2, VectorizedArray<double>> &coefficient =
1356  * *(mf_system_matrix.get_coefficient());
1357  *
1358  * RightHandSide<dim> right_hand_side_function;
1359  *
1360  * FEEvaluation<dim, degree, degree + 1, 1, double> phi(
1361  * *mf_system_matrix.get_matrix_free());
1362  *
1363  * for (unsigned int cell = 0;
1364  * cell < mf_system_matrix.get_matrix_free()->n_macro_cells();
1365  * ++cell)
1366  * {
1367  * phi.reinit(cell);
1368  * phi.read_dof_values_plain(solution_copy);
1369  * phi.evaluate(false, true, false);
1370  *
1371  * for (unsigned int q = 0; q < phi.n_q_points; ++q)
1372  * {
1373  * phi.submit_gradient(-1.0 *
1374  * (coefficient(cell, 0) * phi.get_gradient(q)),
1375  * q);
1376  * phi.submit_value(
1377  * right_hand_side_function.value(phi.quadrature_point(q)), q);
1378  * }
1379  *
1380  * phi.integrate_scatter(true, true, right_hand_side_copy);
1381  * }
1382  *
1383  * right_hand_side_copy.compress(VectorOperation::add);
1384  *
1385  * ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy);
1386  * }
1387  *
1388  *
1389  *
1390  * @endcode
1391  *
1392  *
1393  * <a name="LaplaceProblemsolve"></a>
1394  * <h4>LaplaceProblem::solve()</h4>
1395  *
1396 
1397  *
1398  * Here we set up the multigrid preconditioner, test the timing of a single
1399  * V-cycle, and solve the linear system. Unsurprisingly, this is one of the
1400  * places where the three methods differ the most.
1401  *
1402  * @code
1403  * template <int dim, int degree>
1404  * void LaplaceProblem<dim, degree>::solve()
1405  * {
1406  * TimerOutput::Scope timing(computing_timer, "Solve");
1407  *
1408  * SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm());
1409  * solver_control.enable_history_data();
1410  *
1411  * solution = 0.;
1412  *
1413  * @endcode
1414  *
1415  * The solver for the matrix-free GMG method is similar to @ref step_37 "step-37", apart
1416  * from adding some interface matrices in complete analogy to @ref step_16 "step-16".
1417  *
1418  * @code
1419  * switch (settings.solver)
1420  * {
1421  * case Settings::gmg_mf:
1422  * {
1423  * computing_timer.enter_subsection("Solve: Preconditioner setup");
1424  *
1425  * MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
1426  * mg_transfer.build(dof_handler);
1427  *
1428  * SolverControl coarse_solver_control(1000, 1e-12, false, false);
1429  * SolverCG<MatrixFreeLevelVector> coarse_solver(coarse_solver_control);
1430  * PreconditionIdentity identity;
1431  * MGCoarseGridIterativeSolver<MatrixFreeLevelVector,
1432  * SolverCG<MatrixFreeLevelVector>,
1433  * MatrixFreeLevelMatrix,
1434  * PreconditionIdentity>
1435  * coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity);
1436  *
1437  * using Smoother = ::PreconditionJacobi<MatrixFreeLevelMatrix>;
1438  * MGSmootherPrecondition<MatrixFreeLevelMatrix,
1439  * Smoother,
1440  * MatrixFreeLevelVector>
1441  * smoother;
1442  * smoother.initialize(mf_mg_matrix,
1443  * typename Smoother::AdditionalData(
1444  * settings.smoother_dampen));
1445  * smoother.set_steps(settings.smoother_steps);
1446  *
1447  * mg::Matrix<MatrixFreeLevelVector> mg_m(mf_mg_matrix);
1448  *
1449  * MGLevelObject<
1450  * MatrixFreeOperators::MGInterfaceOperator<MatrixFreeLevelMatrix>>
1451  * mg_interface_matrices;
1452  * mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1);
1453  * for (unsigned int level = 0; level < triangulation.n_global_levels();
1454  * ++level)
1455  * mg_interface_matrices[level].initialize(mf_mg_matrix[level]);
1456  * mg::Matrix<MatrixFreeLevelVector> mg_interface(mg_interface_matrices);
1457  *
1458  * Multigrid<MatrixFreeLevelVector> mg(
1459  * mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1460  * mg.set_edge_matrices(mg_interface, mg_interface);
1461  *
1462  * PreconditionMG<dim,
1463  * MatrixFreeLevelVector,
1464  * MGTransferMatrixFree<dim, float>>
1465  * preconditioner(dof_handler, mg, mg_transfer);
1466  *
1467  * @endcode
1468  *
1469  * Copy the solution vector and right-hand side from LA::MPI::Vector
1470  * to ::LinearAlgebra::distributed::Vector so that we can solve.
1471  *
1472  * @code
1473  * MatrixFreeActiveVector solution_copy;
1474  * MatrixFreeActiveVector right_hand_side_copy;
1475  * mf_system_matrix.initialize_dof_vector(solution_copy);
1476  * mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
1477  *
1478  * ChangeVectorTypes::copy(solution_copy, solution);
1479  * ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side);
1480  * computing_timer.leave_subsection("Solve: Preconditioner setup");
1481  *
1482  * @endcode
1483  *
1484  * Timing for 1 V-cycle.
1485  *
1486  * @code
1487  * {
1488  * TimerOutput::Scope timing(computing_timer,
1489  * "Solve: 1 multigrid V-cycle");
1490  * preconditioner.vmult(solution_copy, right_hand_side_copy);
1491  * }
1492  * solution_copy = 0.;
1493  *
1494  * @endcode
1495  *
1496  * Solve the linear system, update the ghost values of the solution,
1497  * copy back to LA::MPI::Vector and distribute constraints.
1498  *
1499  * @code
1500  * {
1501  * SolverCG<MatrixFreeActiveVector> solver(solver_control);
1502  *
1503  * TimerOutput::Scope timing(computing_timer, "Solve: CG");
1504  * solver.solve(mf_system_matrix,
1505  * solution_copy,
1506  * right_hand_side_copy,
1507  * preconditioner);
1508  * }
1509  *
1510  * solution_copy.update_ghost_values();
1511  * ChangeVectorTypes::copy(solution, solution_copy);
1512  * constraints.distribute(solution);
1513  *
1514  * break;
1515  * }
1516  *
1517  * @endcode
1518  *
1519  * Solver for the matrix-based GMG method, similar to @ref step_16 "step-16", only
1520  * using a Jacobi smoother instead of a SOR smoother (which is not
1521  * implemented in parallel).
1522  *
1523  * @code
1524  * case Settings::gmg_mb:
1525  * {
1526  * computing_timer.enter_subsection("Solve: Preconditioner setup");
1527  *
1528  * MGTransferPrebuilt<VectorType> mg_transfer(mg_constrained_dofs);
1529  * mg_transfer.build(dof_handler);
1530  *
1531  * SolverControl coarse_solver_control(1000, 1e-12, false, false);
1532  * SolverCG<VectorType> coarse_solver(coarse_solver_control);
1533  * PreconditionIdentity identity;
1534  * MGCoarseGridIterativeSolver<VectorType,
1535  * SolverCG<VectorType>,
1536  * MatrixType,
1537  * PreconditionIdentity>
1538  * coarse_grid_solver(coarse_solver, mg_matrix[0], identity);
1539  *
1540  * using Smoother = LA::MPI::PreconditionJacobi;
1541  * MGSmootherPrecondition<MatrixType, Smoother, VectorType> smoother;
1542  *
1543  * #ifdef USE_PETSC_LA
1544  * smoother.initialize(mg_matrix);
1545  * Assert(
1546  * settings.smoother_dampen == 1.0,
1547  * ExcNotImplemented(
1548  * "PETSc's PreconditionJacobi has no support for a damping parameter."));
1549  * #else
1550  * smoother.initialize(mg_matrix, settings.smoother_dampen);
1551  * #endif
1552  *
1553  * smoother.set_steps(settings.smoother_steps);
1554  *
1555  * mg::Matrix<VectorType> mg_m(mg_matrix);
1556  * mg::Matrix<VectorType> mg_in(mg_interface_in);
1557  * mg::Matrix<VectorType> mg_out(mg_interface_in);
1558  *
1559  * Multigrid<VectorType> mg(
1560  * mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
1561  * mg.set_edge_matrices(mg_out, mg_in);
1562  *
1563  *
1564  * PreconditionMG<dim, VectorType, MGTransferPrebuilt<VectorType>>
1565  * preconditioner(dof_handler, mg, mg_transfer);
1566  *
1567  * computing_timer.leave_subsection("Solve: Preconditioner setup");
1568  *
1569  * @endcode
1570  *
1571  * Timing for 1 V-cycle.
1572  *
1573  * @code
1574  * {
1575  * TimerOutput::Scope timing(computing_timer,
1576  * "Solve: 1 multigrid V-cycle");
1577  * preconditioner.vmult(solution, right_hand_side);
1578  * }
1579  * solution = 0.;
1580  *
1581  * @endcode
1582  *
1583  * Solve the linear system and distribute constraints.
1584  *
1585  * @code
1586  * {
1587  * SolverCG<VectorType> solver(solver_control);
1588  *
1589  * TimerOutput::Scope timing(computing_timer, "Solve: CG");
1590  * solver.solve(system_matrix,
1591  * solution,
1592  * right_hand_side,
1593  * preconditioner);
1594  * }
1595  *
1596  * constraints.distribute(solution);
1597  *
1598  * break;
1599  * }
1600  *
1601  * @endcode
1602  *
1603  * Solver for the AMG method, similar to @ref step_40 "step-40".
1604  *
1605  * @code
1606  * case Settings::amg:
1607  * {
1608  * computing_timer.enter_subsection("Solve: Preconditioner setup");
1609  *
1610  * PreconditionAMG preconditioner;
1611  * PreconditionAMG::AdditionalData Amg_data;
1612  *
1613  * #ifdef USE_PETSC_LA
1614  * Amg_data.symmetric_operator = true;
1615  * #else
1616  * Amg_data.elliptic = true;
1617  * Amg_data.smoother_type = "Jacobi";
1618  * Amg_data.higher_order_elements = true;
1619  * Amg_data.smoother_sweeps = settings.smoother_steps;
1620  * Amg_data.aggregation_threshold = 0.02;
1621  * #endif
1622  *
1623  * Amg_data.output_details = false;
1624  *
1625  * preconditioner.initialize(system_matrix, Amg_data);
1626  * computing_timer.leave_subsection("Solve: Preconditioner setup");
1627  *
1628  * @endcode
1629  *
1630  * Timing for 1 V-cycle.
1631  *
1632  * @code
1633  * {
1634  * TimerOutput::Scope timing(computing_timer,
1635  * "Solve: 1 multigrid V-cycle");
1636  * preconditioner.vmult(solution, right_hand_side);
1637  * }
1638  * solution = 0.;
1639  *
1640  * @endcode
1641  *
1642  * Solve the linear system and distribute constraints.
1643  *
1644  * @code
1645  * {
1646  * SolverCG<VectorType> solver(solver_control);
1647  *
1648  * TimerOutput::Scope timing(computing_timer, "Solve: CG");
1649  * solver.solve(system_matrix,
1650  * solution,
1651  * right_hand_side,
1652  * preconditioner);
1653  * }
1654  * constraints.distribute(solution);
1655  *
1656  * break;
1657  * }
1658  *
1659  * default:
1660  * Assert(false, ExcInternalError());
1661  * }
1662  *
1663  * pcout << " Number of CG iterations: " << solver_control.last_step()
1664  * << std::endl;
1665  * }
1666  *
1667  *
1668  * @endcode
1669  *
1670  *
1671  * <a name="Theerrorestimator"></a>
1672  * <h3>The error estimator</h3>
1673  *
1674 
1675  *
1676  * We use the FEInterfaceValues class to assemble an error estimator to decide
1677  * which cells to refine. See the exact definition of the cell and face
1678  * integrals in the introduction. To use the method, we define Scratch and
1679  * Copy objects for the MeshWorker::mesh_loop() with much of the following code
1680  * being in essence as was set up in @ref step_12 "step-12" already (or at least similar in
1681  * spirit).
1682  *
1683  * @code
1684  * template <int dim>
1685  * struct ScratchData
1686  * {
1687  * ScratchData(const Mapping<dim> & mapping,
1688  * const FiniteElement<dim> &fe,
1689  * const unsigned int quadrature_degree,
1690  * const UpdateFlags update_flags,
1691  * const UpdateFlags interface_update_flags)
1692  * : fe_values(mapping, fe, QGauss<dim>(quadrature_degree), update_flags)
1693  * , fe_interface_values(mapping,
1694  * fe,
1695  * QGauss<dim - 1>(quadrature_degree),
1696  * interface_update_flags)
1697  * {}
1698  *
1699  *
1700  * ScratchData(const ScratchData<dim> &scratch_data)
1701  * : fe_values(scratch_data.fe_values.get_mapping(),
1702  * scratch_data.fe_values.get_fe(),
1703  * scratch_data.fe_values.get_quadrature(),
1704  * scratch_data.fe_values.get_update_flags())
1705  * , fe_interface_values(scratch_data.fe_values.get_mapping(),
1706  * scratch_data.fe_values.get_fe(),
1707  * scratch_data.fe_interface_values.get_quadrature(),
1708  * scratch_data.fe_interface_values.get_update_flags())
1709  * {}
1710  *
1711  * FEValues<dim> fe_values;
1712  * FEInterfaceValues<dim> fe_interface_values;
1713  * };
1714  *
1715  *
1716  *
1717  * struct CopyData
1718  * {
1719  * CopyData()
1720  * : cell_index(numbers::invalid_unsigned_int)
1721  * , value(0.)
1722  * {}
1723  *
1724  * CopyData(const CopyData &) = default;
1725  *
1726  * struct FaceData
1727  * {
1728  * unsigned int cell_indices[2];
1729  * double values[2];
1730  * };
1731  *
1732  * unsigned int cell_index;
1733  * double value;
1734  * std::vector<FaceData> face_data;
1735  * };
1736  *
1737  *
1738  * template <int dim, int degree>
1739  * void LaplaceProblem<dim, degree>::estimate()
1740  * {
1741  * TimerOutput::Scope timing(computing_timer, "Estimate");
1742  *
1743  * VectorType temp_solution;
1744  * temp_solution.reinit(locally_owned_dofs,
1745  * locally_relevant_dofs,
1746  * mpi_communicator);
1747  * temp_solution = solution;
1748  *
1749  * const Coefficient<dim> coefficient;
1750  *
1751  * estimated_error_per_cell.reinit(triangulation.n_active_cells());
1752  *
1753  * using Iterator = typename DoFHandler<dim>::active_cell_iterator;
1754  *
1755  * @endcode
1756  *
1757  * Assembler for cell residual @f$h \| f + \epsilon \triangle u \|_K@f$
1758  *
1759  * @code
1760  * auto cell_worker = [&](const Iterator & cell,
1761  * ScratchData<dim> &scratch_data,
1762  * CopyData & copy_data) {
1763  * FEValues<dim> &fe_values = scratch_data.fe_values;
1764  * fe_values.reinit(cell);
1765  *
1766  * RightHandSide<dim> rhs;
1767  * const double rhs_value = rhs.value(cell->center());
1768  *
1769  * const double nu = coefficient.value(cell->center());
1770  *
1771  * std::vector<Tensor<2, dim>> hessians(fe_values.n_quadrature_points);
1772  * fe_values.get_function_hessians(temp_solution, hessians);
1773  *
1774  * copy_data.cell_index = cell->active_cell_index();
1775  *
1776  * double residual_norm_square = 0.;
1777  * for (unsigned k = 0; k < fe_values.n_quadrature_points; ++k)
1778  * {
1779  * const double residual = (rhs_value + nu * trace(hessians[k]));
1780  * residual_norm_square += residual * residual * fe_values.JxW(k);
1781  * }
1782  *
1783  * copy_data.value = cell->diameter() * std::sqrt(residual_norm_square);
1784  * };
1785  *
1786  * @endcode
1787  *
1788  * Assembler for face term @f$\sum_F h_F \| \jump{\epsilon \nabla u \cdot n}
1789  * \|_F^2@f$
1790  *
1791  * @code
1792  * auto face_worker = [&](const Iterator & cell,
1793  * const unsigned int &f,
1794  * const unsigned int &sf,
1795  * const Iterator & ncell,
1796  * const unsigned int &nf,
1797  * const unsigned int &nsf,
1798  * ScratchData<dim> & scratch_data,
1799  * CopyData & copy_data) {
1800  * FEInterfaceValues<dim> &fe_interface_values =
1801  * scratch_data.fe_interface_values;
1802  * fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf);
1803  *
1804  * copy_data.face_data.emplace_back();
1805  * CopyData::FaceData &copy_data_face = copy_data.face_data.back();
1806  *
1807  * copy_data_face.cell_indices[0] = cell->active_cell_index();
1808  * copy_data_face.cell_indices[1] = ncell->active_cell_index();
1809  *
1810  * const double coeff1 = coefficient.value(cell->center());
1811  * const double coeff2 = coefficient.value(ncell->center());
1812  *
1813  * std::vector<Tensor<1, dim>> grad_u[2];
1814  *
1815  * for (unsigned int i = 0; i < 2; ++i)
1816  * {
1817  * grad_u[i].resize(fe_interface_values.n_quadrature_points);
1818  * fe_interface_values.get_fe_face_values(i).get_function_gradients(
1819  * temp_solution, grad_u[i]);
1820  * }
1821  *
1822  * double jump_norm_square = 0.;
1823  *
1824  * for (unsigned int qpoint = 0;
1825  * qpoint < fe_interface_values.n_quadrature_points;
1826  * ++qpoint)
1827  * {
1828  * const double jump =
1829  * coeff1 * grad_u[0][qpoint] * fe_interface_values.normal(qpoint) -
1830  * coeff2 * grad_u[1][qpoint] * fe_interface_values.normal(qpoint);
1831  *
1832  * jump_norm_square += jump * jump * fe_interface_values.JxW(qpoint);
1833  * }
1834  *
1835  * const double h = cell->face(f)->measure();
1836  * copy_data_face.values[0] = 0.5 * h * std::sqrt(jump_norm_square);
1837  * copy_data_face.values[1] = copy_data_face.values[0];
1838  * };
1839  *
1840  * auto copier = [&](const CopyData &copy_data) {
1841  * if (copy_data.cell_index != numbers::invalid_unsigned_int)
1842  * estimated_error_per_cell[copy_data.cell_index] += copy_data.value;
1843  *
1844  * for (auto &cdf : copy_data.face_data)
1845  * for (unsigned int j = 0; j < 2; ++j)
1846  * estimated_error_per_cell[cdf.cell_indices[j]] += cdf.values[j];
1847  * };
1848  *
1849  * const unsigned int n_gauss_points = degree + 1;
1850  * ScratchData<dim> scratch_data(mapping,
1851  * fe,
1852  * n_gauss_points,
1853  * update_hessians | update_quadrature_points |
1854  * update_JxW_values,
1855  * update_values | update_gradients |
1856  * update_JxW_values | update_normal_vectors);
1857  * CopyData copy_data;
1858  *
1859  * @endcode
1860  *
1861  * We need to assemble each interior face once but we need to make sure that
1862  * both processes assemble the face term between a locally owned and a ghost
1863  * cell. This is achieved by setting the
1864  * MeshWorker::assemble_ghost_faces_both flag. We need to do this, because
1865  * we do not communicate the error estimator contributions here.
1866  *
1867  * @code
1868  * MeshWorker::mesh_loop(dof_handler.begin_active(),
1869  * dof_handler.end(),
1870  * cell_worker,
1871  * copier,
1872  * scratch_data,
1873  * copy_data,
1874  * MeshWorker::assemble_own_cells |
1875  * MeshWorker::assemble_ghost_faces_both |
1876  * MeshWorker::assemble_own_interior_faces_once,
1877  * /*boundary_worker=*/nullptr,
1878  * face_worker);
1879  * }
1880  *
1881  *
1882  * @endcode
1883  *
1884  *
1885  * <a name="LaplaceProblemrefine_grid"></a>
1886  * <h4>LaplaceProblem::refine_grid()</h4>
1887  *
1888 
1889  *
1890  * We use the cell-wise estimator stored in the vector @p estimate_vector and
1891  * refine a fixed number of cells (chosen here to roughly double the number of
1892  * DoFs in each step).
1893  *
1894  * @code
1895  * template <int dim, int degree>
1896  * void LaplaceProblem<dim, degree>::refine_grid()
1897  * {
1898  * TimerOutput::Scope timing(computing_timer, "Refine grid");
1899  *
1900  * const double refinement_fraction = 1. / (std::pow(2.0, dim) - 1.);
1901  * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
1902  * triangulation, estimated_error_per_cell, refinement_fraction, 0.0);
1903  *
1904  * triangulation.execute_coarsening_and_refinement();
1905  * }
1906  *
1907  *
1908  * @endcode
1909  *
1910  *
1911  * <a name="LaplaceProblemoutput_results"></a>
1912  * <h4>LaplaceProblem::output_results()</h4>
1913  *
1914 
1915  *
1916  * The output_results() function is similar to the ones found in many of the
1917  * tutorials (see @ref step_40 "step-40" for example).
1918  *
1919  * @code
1920  * template <int dim, int degree>
1921  * void LaplaceProblem<dim, degree>::output_results(const unsigned int cycle)
1922  * {
1923  * TimerOutput::Scope timing(computing_timer, "Output results");
1924  *
1925  * VectorType temp_solution;
1926  * temp_solution.reinit(locally_owned_dofs,
1927  * locally_relevant_dofs,
1928  * mpi_communicator);
1929  * temp_solution = solution;
1930  *
1931  * DataOut<dim> data_out;
1932  * data_out.attach_dof_handler(dof_handler);
1933  * data_out.add_data_vector(temp_solution, "solution");
1934  *
1935  * Vector<float> subdomain(triangulation.n_active_cells());
1936  * for (unsigned int i = 0; i < subdomain.size(); ++i)
1937  * subdomain(i) = triangulation.locally_owned_subdomain();
1938  * data_out.add_data_vector(subdomain, "subdomain");
1939  *
1940  * Vector<float> level(triangulation.n_active_cells());
1941  * for (const auto &cell : triangulation.active_cell_iterators())
1942  * level(cell->active_cell_index()) = cell->level();
1943  * data_out.add_data_vector(level, "level");
1944  *
1945  * if (estimated_error_per_cell.size() > 0)
1946  * data_out.add_data_vector(estimated_error_per_cell,
1947  * "estimated_error_per_cell");
1948  *
1949  * data_out.build_patches();
1950  *
1951  * const std::string master = data_out.write_vtu_with_pvtu_record(
1952  * "", "solution", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/);
1953  *
1954  * pcout << " Wrote " << master << std::endl;
1955  * }
1956  *
1957  *
1958  * @endcode
1959  *
1960  *
1961  * <a name="LaplaceProblemrun"></a>
1962  * <h4>LaplaceProblem::run()</h4>
1963  *
1964 
1965  *
1966  * As in most tutorials, this function calls the various functions defined
1967  * above to setup, assemble, solve, and output the results.
1968  *
1969  * @code
1970  * template <int dim, int degree>
1971  * void LaplaceProblem<dim, degree>::run()
1972  * {
1973  * for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle)
1974  * {
1975  * pcout << "Cycle " << cycle << ':' << std::endl;
1976  * if (cycle > 0)
1977  * refine_grid();
1978  *
1979  * pcout << " Number of active cells: "
1980  * << triangulation.n_global_active_cells();
1981  *
1982  * @endcode
1983  *
1984  * We only output level cell data for the GMG methods (same with DoF
1985  * data below). Note that the partition efficiency is irrelevant for AMG
1986  * since the level hierarchy is not distributed or used during the
1987  * computation.
1988  *
1989  * @code
1990  * if (settings.solver == Settings::gmg_mf ||
1991  * settings.solver == Settings::gmg_mb)
1992  * pcout << " (" << triangulation.n_global_levels() << " global levels)"
1993  * << std::endl
1994  * << " Partition efficiency: "
1995  * << 1.0 / MGTools::workload_imbalance(triangulation);
1996  * pcout << std::endl;
1997  *
1998  * setup_system();
1999  *
2000  * @endcode
2001  *
2002  * Only set up the multilevel hierarchy for GMG.
2003  *
2004  * @code
2005  * if (settings.solver == Settings::gmg_mf ||
2006  * settings.solver == Settings::gmg_mb)
2007  * setup_multigrid();
2008  *
2009  * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs();
2010  * if (settings.solver == Settings::gmg_mf ||
2011  * settings.solver == Settings::gmg_mb)
2012  * {
2013  * pcout << " (by level: ";
2014  * for (unsigned int level = 0; level < triangulation.n_global_levels();
2015  * ++level)
2016  * pcout << dof_handler.n_dofs(level)
2017  * << (level == triangulation.n_global_levels() - 1 ? ")" :
2018  * ", ");
2019  * }
2020  * pcout << std::endl;
2021  *
2022  * @endcode
2023  *
2024  * For the matrix-free method, we only assemble the right-hand side.
2025  * For both matrix-based methods, we assemble both active matrix and
2026  * right-hand side, and only assemble the multigrid matrices for
2027  * matrix-based GMG.
2028  *
2029  * @code
2030  * if (settings.solver == Settings::gmg_mf)
2031  * assemble_rhs();
2032  * else /*gmg_mb or amg*/
2033  * {
2034  * assemble_system();
2035  * if (settings.solver == Settings::gmg_mb)
2036  * assemble_multigrid();
2037  * }
2038  *
2039  * solve();
2040  * estimate();
2041  *
2042  * if (settings.output)
2043  * output_results(cycle);
2044  *
2045  * computing_timer.print_summary();
2046  * computing_timer.reset();
2047  * }
2048  * }
2049  *
2050  *
2051  * @endcode
2052  *
2053  *
2054  * <a name="Themainfunction"></a>
2055  * <h3>The main() function</h3>
2056  *
2057 
2058  *
2059  * This is a similar main function to @ref step_40 "step-40", with the exception that
2060  * we require the user to pass a .prm file as a sole command line
2061  * argument (see @ref step_29 "step-29" and the documentation of the ParameterHandler
2062  * class for a complete discussion of parameter files).
2063  *
2064  * @code
2065  * int main(int argc, char *argv[])
2066  * {
2067  * using namespace dealii;
2068  * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2069  *
2070  * Settings settings;
2071  * if (!settings.try_parse((argc > 1) ? (argv[1]) : ""))
2072  * return 0;
2073  *
2074  * try
2075  * {
2076  * constexpr unsigned int fe_degree = 2;
2077  *
2078  * switch (settings.dimension)
2079  * {
2080  * case 2:
2081  * {
2082  * LaplaceProblem<2, fe_degree> test(settings);
2083  * test.run();
2084  *
2085  * break;
2086  * }
2087  *
2088  * case 3:
2089  * {
2090  * LaplaceProblem<3, fe_degree> test(settings);
2091  * test.run();
2092  *
2093  * break;
2094  * }
2095  *
2096  * default:
2097  * Assert(false, ExcMessage("This program only works in 2d and 3d."));
2098  * }
2099  * }
2100  * catch (std::exception &exc)
2101  * {
2102  * std::cerr << std::endl
2103  * << std::endl
2104  * << "----------------------------------------------------"
2105  * << std::endl;
2106  * std::cerr << "Exception on processing: " << std::endl
2107  * << exc.what() << std::endl
2108  * << "Aborting!" << std::endl
2109  * << "----------------------------------------------------"
2110  * << std::endl;
2111  * MPI_Abort(MPI_COMM_WORLD, 1);
2112  * return 1;
2113  * }
2114  * catch (...)
2115  * {
2116  * std::cerr << std::endl
2117  * << std::endl
2118  * << "----------------------------------------------------"
2119  * << std::endl;
2120  * std::cerr << "Unknown exception!" << std::endl
2121  * << "Aborting!" << std::endl
2122  * << "----------------------------------------------------"
2123  * << std::endl;
2124  * MPI_Abort(MPI_COMM_WORLD, 2);
2125  * return 1;
2126  * }
2127  *
2128  * return 0;
2129  * }
2130  * @endcode
2131 <a name="Results"></a><h1>Results</h1>
2132 
2133 
2134 When you run the program, the screen output should look like the following:
2135 @code
2136 Cycle 0:
2137  Number of active cells: 56 (2 global levels)
2138  Workload imbalance: 1.14286
2139  Number of degrees of freedom: 665 (by level: 117, 665)
2140  Number of CG iterations: 10
2141  Wrote solution_00.pvtu
2142 
2143 
2144 +---------------------------------------------+------------+------------+
2145 | Total wallclock time elapsed since start | 0.0457s | |
2146 | | | |
2147 | Section | no. calls | wall time | % of total |
2148 +---------------------------------+-----------+------------+------------+
2149 | Assemble right hand side | 1 | 0.000241s | 0.53% |
2150 | Estimate | 1 | 0.0288s | 63% |
2151 | Output results | 1 | 0.00219s | 4.8% |
2152 | Setup | 1 | 0.00264s | 5.8% |
2153 | Setup multigrid | 1 | 0.00261s | 5.7% |
2154 | Solve | 1 | 0.00355s | 7.8% |
2155 | Solve: 1 multigrid V-cycle | 1 | 0.000315s | 0.69% |
2156 | Solve: CG | 1 | 0.00186s | 4.1% |
2157 | Solve: Preconditioner setup | 1 | 0.000968s | 2.1% |
2158 +---------------------------------+-----------+------------+------------+
2159 
2160 Cycle 1:
2161  Number of active cells: 126 (3 global levels)
2162  Workload imbalance: 1.17483
2163  Number of degrees of freedom: 1672 (by level: 117, 665, 1100)
2164  Number of CG iterations: 11
2165  Wrote solution_01.pvtu
2166 
2167 
2168 +---------------------------------------------+------------+------------+
2169 | Total wallclock time elapsed since start | 0.0433s | |
2170 | | | |
2171 | Section | no. calls | wall time | % of total |
2172 +---------------------------------+-----------+------------+------------+
2173 | Assemble right hand side | 1 | 0.000286s | 0.66% |
2174 | Estimate | 1 | 0.0272s | 63% |
2175 | Output results | 1 | 0.00333s | 7.7% |
2176 | Refine grid | 1 | 0.00196s | 4.5% |
2177 | Setup | 1 | 0.0023s | 5.3% |
2178 | Setup multigrid | 1 | 0.00262s | 6% |
2179 | Solve | 1 | 0.00549s | 13% |
2180 | Solve: 1 multigrid V-cycle | 1 | 0.000343s | 0.79% |
2181 | Solve: CG | 1 | 0.00293s | 6.8% |
2182 | Solve: Preconditioner setup | 1 | 0.00174s | 4% |
2183 +---------------------------------+-----------+------------+------------+
2184 
2185 Cycle 2:
2186 .
2187 .
2188 .
2189 @endcode
2190 Here, the timing of the `solve()` function is split up in 3 parts: setting
2191 up the multigrid preconditioner, execution of a single multigrid V-cycle, and
2192 the CG solver. The V-cycle that is timed is unnecessary for the overall solve
2193 and only meant to give an insight at the different costs for AMG and GMG.
2194 Also it should be noted that when using the AMG solver, "Workload imbalance"
2195 is not included in the output since the hierarchy of coarse meshes is not
2196 required.
2197 
2198 All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade
2199 Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions,
2200 allowing for vectorization over 8 doubles (vectorization used only in the matrix-free
2201 computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos
2202 12.10.1 is used for the matrix-based GMG/AMG computations.
2203 
2204 We can then gather a variety of information by calling the program
2205 with the input files that are provided in the directory in which
2206 @ref step_50 "step-50" is located. Using these, and adjusting the number of mesh
2207 refinement steps, we can produce information about how well the
2208 program scales.
2209 
2210 The following table gives weak scaling timings for this program on up to 256M DoFs
2211 and 7,168 processors. (Recall that weak scaling keeps the number of
2212 degrees of freedom per processor constant while increasing the number of
2213 processors; i.e., it considers larger and larger problems.)
2214 Here, @f$\mathbb{E}@f$ is the partition efficiency from the
2215  introduction (also equal to 1.0/workload imbalance), "Setup" is a combination
2216 of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks,
2217 and "Prec" is the preconditioner setup. Ideally all times would stay constant
2218 over each problem size for the individual solvers, but since the partition
2219 efficiency decreases from 0.371 to 0.161 from largest to smallest problem size,
2220 we expect to see an approximately @f$0.371/0.161=2.3@f$ times increase in timings
2221 for GMG. This is, in fact, pretty close to what we really get:
2222 
2223 <table align="center" class="doxtable">
2224 <tr>
2225  <th colspan="4"></th>
2226  <th></th>
2227  <th colspan="4">MF-GMG</th>
2228  <th></th>
2229  <th colspan="4">MB-GMG</th>
2230  <th></th>
2231  <th colspan="4">AMG</th>
2232 </tr>
2233 <tr>
2234  <th align="right">Procs</th>
2235  <th align="right">Cycle</th>
2236  <th align="right">DoFs</th>
2237  <th align="right">@f$\mathbb{E}@f$</th>
2238  <th></th>
2239  <th align="right">Setup</th>
2240  <th align="right">Prec</th>
2241  <th align="right">Solve</th>
2242  <th align="right">Total</th>
2243  <th></th>
2244  <th align="right">Setup</th>
2245  <th align="right">Prec</th>
2246  <th align="right">Solve</th>
2247  <th align="right">Total</th>
2248  <th></th>
2249  <th align="right">Setup</th>
2250  <th align="right">Prec</th>
2251  <th align="right">Solve</th>
2252  <th align="right">Total</th>
2253 </tr>
2254 <tr>
2255  <td align="right">112</th>
2256  <td align="right">13</th>
2257  <td align="right">4M</th>
2258  <td align="right">0.37</th>
2259  <td></td>
2260  <td align="right">0.742</th>
2261  <td align="right">0.393</th>
2262  <td align="right">0.200</th>
2263  <td align="right">1.335</th>
2264  <td></td>
2265  <td align="right">1.714</th>
2266  <td align="right">2.934</th>
2267  <td align="right">0.716</th>
2268  <td align="right">5.364</th>
2269  <td></td>
2270  <td align="right">1.544</th>
2271  <td align="right">0.456</th>
2272  <td align="right">1.150</th>
2273  <td align="right">3.150</th>
2274 </tr>
2275 <tr>
2276  <td align="right">448</th>
2277  <td align="right">15</th>
2278  <td align="right">16M</th>
2279  <td align="right">0.29</th>
2280  <td></td>
2281  <td align="right">0.884</th>
2282  <td align="right">0.535</th>
2283  <td align="right">0.253</th>
2284  <td align="right">1.672</th>
2285  <td></td>
2286  <td align="right">1.927</th>
2287  <td align="right">3.776</th>
2288  <td align="right">1.190</th>
2289  <td align="right">6.893</th>
2290  <td></td>
2291  <td align="right">1.544</th>
2292  <td align="right">0.456</th>
2293  <td align="right">1.150</th>
2294  <td align="right">3.150</th>
2295 </tr>
2296 <tr>
2297  <td align="right">1,792</th>
2298  <td align="right">17</th>
2299  <td align="right">65M</th>
2300  <td align="right">0.22</th>
2301  <td></td>
2302  <td align="right">1.122</th>
2303  <td align="right">0.686</th>
2304  <td align="right">0.309</th>
2305  <td align="right">2.117</th>
2306  <td></td>
2307  <td align="right">2.171</th>
2308  <td align="right">4.862</th>
2309  <td align="right">1.660</th>
2310  <td align="right">8.693</th>
2311  <td></td>
2312  <td align="right">1.654</th>
2313  <td align="right">0.546</th>
2314  <td align="right">1.460</th>
2315  <td align="right">3.660</th>
2316 </tr>
2317 <tr>
2318  <td align="right">7,168</th>
2319  <td align="right">19</th>
2320  <td align="right">256M</th>
2321  <td align="right">0.16</th>
2322  <td></td>
2323  <td align="right">1.214</th>
2324  <td align="right">0.893</th>
2325  <td align="right">0.521</th>
2326  <td align="right">2.628</th>
2327  <td></td>
2328  <td align="right">2.386</th>
2329  <td align="right">7.260</th>
2330  <td align="right">2.560</th>
2331  <td align="right">12.206</th>
2332  <td></td>
2333  <td align="right">1.844</th>
2334  <td align="right">1.010</th>
2335  <td align="right">1.890</th>
2336  <td align="right">4.744</th>
2337 </tr>
2338 </table>
2339 
2340 On the other hand, the algebraic multigrid in the last set of columns
2341 is relatively unaffected by the increasing imbalance of the mesh
2342 hierarchy (because it doesn't use the mesh hierarchy) and the growth
2343 in time is rather driven by other factors that are well documented in
2344 the literature (most notably that the algorithmic complexity of
2345 some parts of algebraic multigrid methods appears to be @f${\cal O}(N
2346 \log N)@f$ instead of @f${\cal O}(N)@f$ for geometric multigrid).
2347 
2348 The upshort of the table above is that the matrix-free geometric multigrid
2349 method appears to be the fastest approach to solving this equation if
2350 not by a huge margin. Matrix-based methods, on the other hand, are
2351 consistently the worst.
2352 
2353 The following figure provides strong scaling results for each method, i.e.,
2354 we solve the same problem on more and more processors. Specifically,
2355 we consider the problems after 16 mesh refinement cycles
2356 (32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:
2357 
2358 <img width="600px" src="https://www.dealii.org/images/steps/developer/step-50-strong-scaling.png" alt="">
2359 
2360 While the matrix-based GMG solver and AMG scale similarly and have a
2361 similar time to solution (at least as long as there is a substantial
2362 number of unknowns per processor -- say, several 10,000), the
2363 matrix-free GMG solver scales much better and solves the finer problem
2364 in roughly the same time as the AMG solver for the coarser mesh with
2365 only an eighth of the number of processors. Conversely, it can solve the
2366 same problem on the same number of processors in about one eighth the
2367 time.
2368 
2369 
2370 <a name="Possibleextensions"></a><h3> Possible extensions </h3>
2371 
2372 
2373 <a name="Testingconvergenceandhigherorderelements"></a><h4> Testing convergence and higher order elements </h4>
2374 
2375 
2376 The finite element degree is currently hard-coded as 2, see the template
2377 arguments of the main class. It is easy to change. To test, it would be
2378 interesting to switch to a test problem with a reference solution. This way,
2379 you can compare error rates.
2380 
2381 <a name="Coarsesolver"></a><h4> Coarse solver </h4>
2382 
2383 
2384 A more interesting example would involve a more complicated coarse mesh (see
2385 @ref step_49 "step-49" for inspiration). The issue in that case is that the coarsest
2386 level of the mesh hierarchy is actually quite large, and one would
2387 have to think about ways to solve the coarse level problem
2388 efficiently. (This is not an issue for algebraic multigrid methods
2389 because they would just continue to build coarser and coarser levels
2390 of the matrix, regardless of their geometric origin.)
2391 
2392 In the program here, we simply solve the coarse level problem with a
2393 Conjugate Gradient method without any preconditioner. That is acceptable
2394 if the coarse problem is really small -- for example, if the coarse
2395 mesh had a single cell, then the coarse mesh problems has a @f$9\times 9@f$
2396 matrix in 2d, and a @f$27\times 27@f$ matrix in 3d; for the coarse mesh we
2397 use on the @f$L@f$-shaped domain of the current program, these sizes are
2398 @f$21\times 21@f$ in 2d and @f$117\times 117@f$ in 3d. But if the coarse mesh
2399 consists of hundreds or thousands of cells, this approach will no
2400 longer work and might start to dominate the overall run-time of each V-cyle.
2401 A common approach is then to solve the coarse mesh problem using an
2402 algebraic multigrid preconditioner; this would then, however, require
2403 assembling the coarse matrix (even for the matrix-free version) as
2404 input to the AMG implementation.
2405  *
2406  *
2407 <a name="PlainProg"></a>
2408 <h1> The plain program</h1>
2409 @include "step-50.cc"
2410 */
ParameterHandler::get
std::string get(const std::string &entry_string) const
Definition: parameter_handler.cc:975
IndexSet
Definition: index_set.h:74
ParameterHandler::get_double
double get_double(const std::string &entry_name) const
Definition: parameter_handler.cc:1056
LinearAlgebraDealII::Vector
Vector< double > Vector
Definition: generic_linear_algebra.h:43
update_quadrature_points
@ update_quadrature_points
Transformed quadrature points.
Definition: fe_update_flags.h:122
MatrixFree::AdditionalData::mg_level
unsigned int mg_level
Definition: matrix_free.h:440
ParameterHandler::Text
@ Text
Definition: parameter_handler.h:894
TimerOutput::Scope
Definition: timer.h:554
FE_Q
Definition: fe_q.h:554
AffineConstraints::add_lines
void add_lines(const std::vector< bool > &lines)
LinearAlgebra::distributed::Vector< float >
ParameterHandler::get_bool
bool get_bool(const std::string &entry_name) const
Definition: parameter_handler.cc:1101
MGConstrainedDoFs
Definition: mg_constrained_dofs.h:45
StandardExceptions::ExcNotImplemented
static ::ExceptionBase & ExcNotImplemented()
Triangulation
Definition: tria.h:1109
ParameterHandler::declare_entry
void declare_entry(const std::string &entry, const std::string &default_value, const Patterns::PatternBase &pattern=Patterns::Anything(), const std::string &documentation="", const bool has_to_be_set=false)
Definition: parameter_handler.cc:784
LAPACKSupport::V
static const char V
Definition: lapack_support.h:175
VectorType
Patterns::Bool
Definition: patterns.h:984
VectorizedArray
Definition: vectorization.h:395
MGTools::make_sparsity_pattern
void make_sparsity_pattern(const DoFHandlerType &dof_handler, SparsityPatternType &sparsity, const unsigned int level)
Definition: mg_tools.cc:565
MatrixFree::AdditionalData::mapping_update_flags
UpdateFlags mapping_update_flags
Definition: matrix_free.h:360
MatrixFreeOperators::LaplaceOperator
Definition: operators.h:815
MPI_Comm
VectorOperation::add
@ add
Definition: vector_operation.h:53
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
TimerOutput::wall_times
@ wall_times
Definition: timer.h:649
MatrixFree
Definition: matrix_free.h:117
types::boundary_id
unsigned int boundary_id
Definition: types.h:129
LocalIntegrators::Advection::cell_matrix
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
Definition: advection.h:80
update_values
@ update_values
Shape function values.
Definition: fe_update_flags.h:78
SolverType
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
DoFHandler< dim >
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
TrilinosWrappers::SparsityPattern
Definition: trilinos_sparsity_pattern.h:279
Table
Definition: table.h:699
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
FEValues< dim >
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
Utilities::CUDA::free
void free(T *&pointer)
Definition: cuda.h:96
level
unsigned int level
Definition: grid_out.cc:4355
TimerOutput
Definition: timer.h:546
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
DoFTools::make_sparsity_pattern
void make_sparsity_pattern(const DoFHandlerType &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
Definition: dof_tools_sparsity.cc:63
GridGenerator::hyper_L
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
VectorTools::interpolate_boundary_values
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandlerType< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
MatrixFree::AdditionalData
Definition: matrix_free.h:182
MGTools::make_interface_sparsity_pattern
void make_interface_sparsity_pattern(const DoFHandlerType &dof_handler, const MGConstrainedDoFs &mg_constrained_dofs, SparsityPatternType &sparsity, const unsigned int level)
Definition: mg_tools.cc:1023
ParameterHandler::get_integer
long int get_integer(const std::string &entry_string) const
Definition: parameter_handler.cc:1013
MappingQ1
Definition: mapping_q1.h:57
GridTools::scale
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:837
update_gradients
@ update_gradients
Shape function gradients.
Definition: fe_update_flags.h:84
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
parallel::distributed::Triangulation< dim >
LinearAlgebraPETSc::MPI::PreconditionAMG
PETScWrappers::PreconditionBoomerAMG PreconditionAMG
Definition: generic_linear_algebra.h:133
DynamicSparsityPattern
Definition: dynamic_sparsity_pattern.h:323
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
ParameterHandler::print_parameters
std::ostream & print_parameters(std::ostream &out, const OutputStyle style) const
Definition: parameter_handler.cc:1238
LinearAlgebraPETSc::MPI::PreconditionJacobi
PETScWrappers::PreconditionJacobi PreconditionJacobi
Definition: generic_linear_algebra.h:148
DoFTools::make_hanging_node_constraints
void make_hanging_node_constraints(const DoFHandlerType &dof_handler, AffineConstraints< number > &constraints)
Definition: dof_tools_constraints.cc:1725
types
Definition: types.h:31
AffineConstraints::reinit
void reinit(const IndexSet &local_constraints=IndexSet())
TimerOutput::never
@ never
Definition: timer.h:613
QGauss
Definition: quadrature_lib.h:40
LAPACKSupport::A
static const char A
Definition: lapack_support.h:155
value
static const bool value
Definition: dof_tools_constraints.cc:433
AffineConstraints< double >
update_JxW_values
@ update_JxW_values
Transformed quadrature weights.
Definition: fe_update_flags.h:129
LinearAlgebraDealII::SparseMatrix
SparseMatrix< double > SparseMatrix
Definition: generic_linear_algebra.h:53
AdaptationStrategies::Refinement::split
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
internal::assemble
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition: loop.h:71
MGLevelObject< MatrixType >
DoFTools::extract_locally_relevant_dofs
void extract_locally_relevant_dofs(const DoFHandlerType &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:1173
MatrixFree::AdditionalData::tasks_parallel_scheme
TasksParallelScheme tasks_parallel_scheme
Definition: matrix_free.h:336
ConditionalOStream
Definition: conditional_ostream.h:81
SparsityTools::distribute_sparsity_pattern
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm &mpi_comm, const IndexSet &locally_relevant_rows)
Definition: sparsity_tools.cc:1046
PreconditionJacobi
Definition: precondition.h:508
Point
Definition: point.h:111
Functions::ZeroFunction
Definition: function.h:513
ParameterHandler
Definition: parameter_handler.h:845
DoFTools::extract_locally_relevant_level_dofs
void extract_locally_relevant_level_dofs(const DoFHandlerType &dof_handler, const unsigned int level, IndexSet &dof_set)
Definition: dof_tools.cc:1215
FullMatrix< double >
FEEvaluation
Definition: fe_evaluation.h:57
Function
Definition: function.h:151
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
Patterns::Selection
Definition: patterns.h:383
TriangulationDescription::Settings
Settings
Definition: tria_description.h:253
internal::TriangulationImplementation::n_cells
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:12618
Vector< double >
Utilities::compress
std::string compress(const std::string &input)
Definition: utilities.cc:392
AffineConstraints::close
void close()
AssertThrow
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
center
Point< 3 > center
Definition: data_out_base.cc:169
ParameterHandler::parse_input
virtual void parse_input(std::istream &input, const std::string &filename="input file", const std::string &last_line="", const bool skip_undefined=false)
Definition: parameter_handler.cc:399
Patterns::Double
Definition: patterns.h:293
TriangulationDescription::construct_multigrid_hierarchy
@ construct_multigrid_hierarchy
Definition: tria_description.h:264
Patterns::Integer
Definition: patterns.h:190