807 *
enter_subsection(
"Time stepper");
809 *
enter_my_subsection(this->prm);
813 *
leave_my_subsection(this->prm);
815 *
leave_subsection();
817 *
add_parameter(
"Initial global refinement",
819 *
"Number of times the mesh is refined globally before "
820 *
"starting the time stepping.");
821 *
add_parameter(
"Maximum delta refinement level",
823 *
"Maximum number of local refinement levels.");
824 *
add_parameter(
"Mesh adaptation frequency",
826 *
"When to adapt the mesh.");
834 * <a name=
"step_86-TheHeatEquationsetup_systemfunction"></a>
867 *
dof_handler.distribute_dofs(fe);
869 *
<<
"Number of active cells: " <<
triangulation.n_active_cells()
871 *
<<
"Number of degrees of freedom: " << dof_handler.n_dofs()
875 *
locally_owned_dofs = dof_handler.locally_owned_dofs();
920 *
locally_owned_dofs,
925 *
locally_owned_dofs,
934 * <a name=
"step_86-TheHeatEquationoutput_resultsfunction"></a>
954 *
data_out.attach_dof_handler(dof_handler);
955 *
data_out.add_data_vector(
y,
"U");
956 *
data_out.build_patches();
962 *
data_out.write_vtu_in_parallel(
filename, mpi_communicator);
979 * <a name=
"step_86-TheHeatEquationimplicit_functionfunction"></a>
980 * <
h4>
The HeatEquation::implicit_function() function</
h4>
991 * in the documentation of the PETScWrappers::TimeStepper class.
995 * At the top of the function, we do the usual set up when computing
996 * integrals. We face two minor difficulties here: the first is that
997 * the `y` and `y_dot` vectors we get as input are read only, but we
998 * need to make sure they satisfy the correct boundary conditions and so
999 * have to set elements in these vectors. The second is that we need to
1000 * compute the residual, and therefore in general we need to evaluate solution
1001 * values and gradients inside locally owned cells, and for this need access
1002 * to degrees of freedom which may be owned by neighboring processors. To
1003 * address these issues, we create (non-ghosted) writable copies of the input
1004 * vectors, apply boundary conditions and hanging node current_constraints;
1005 * and then copy these vectors to ghosted vectors before we can do anything
1006 * sensible with them.
1009 * template <int dim>
1011 * HeatEquation<dim>::implicit_function(const double time,
1012 * const PETScWrappers::MPI::Vector &y,
1013 * const PETScWrappers::MPI::Vector &y_dot,
1014 * PETScWrappers::MPI::Vector &residual)
1016 * TimerOutput::Scope t(computing_timer, "implicit function");
1018 * PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1019 * mpi_communicator);
1020 * PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1021 * mpi_communicator);
1023 * tmp_solution_dot = y_dot;
1025 * update_current_constraints(time);
1026 * current_constraints.distribute(tmp_solution);
1027 * homogeneous_constraints.distribute(tmp_solution_dot);
1029 * PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1030 * locally_relevant_dofs,
1031 * mpi_communicator);
1032 * PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1033 * locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1034 * locally_relevant_solution = tmp_solution;
1035 * locally_relevant_solution_dot = tmp_solution_dot;
1038 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1039 * FEValues<dim> fe_values(fe,
1040 * quadrature_formula,
1041 * update_values | update_gradients |
1042 * update_quadrature_points | update_JxW_values);
1044 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1045 * const unsigned int n_q_points = quadrature_formula.size();
1047 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1049 * std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1050 * std::vector<double> solution_dot_values(n_q_points);
1052 * Vector<double> cell_residual(dofs_per_cell);
1054 * right_hand_side_function.set_time(time);
1058 * Now for computing the actual residual. Recall that we wan to compute the
1061 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1063 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1064 * is not efficient. Instead, recall (by writing out how the elements of
1065 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1066 * @f$i@f$th element of the residual vector is given by
1069 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1070 * {\partial U_j(t)}{\partial t}
1071 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1072 * \varphi_j(\mathbf x, t) U_j(t)
1073 * - \int_\Omega \varphi_i f(\mathbf x, t)
1075 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1076 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1078 * - \int_\Omega \varphi_i f(\mathbf x, t).
1080 * We can compute these integrals efficiently by breaking them up into
1081 * a sum over all cells and then applying quadrature. For the integrand,
1082 * we need to evaluate the solution and its gradient at the quadrature
1083 * points within each locally owned cell, and for this, we need also
1084 * access to degrees of freedom that may be owned by neighboring
1085 * processors. We therefore use the locally_relevant_solution and and
1086 * locally_relevant_solution_dot vectors.
1090 * for (const auto &cell : dof_handler.active_cell_iterators())
1091 * if (cell->is_locally_owned())
1093 * fe_values.reinit(cell);
1095 * fe_values.get_function_gradients(locally_relevant_solution,
1096 * solution_gradients);
1097 * fe_values.get_function_values(locally_relevant_solution_dot,
1098 * solution_dot_values);
1100 * cell->get_dof_indices(local_dof_indices);
1102 * cell_residual = 0;
1103 * for (const unsigned int q : fe_values.quadrature_point_indices())
1104 * for (const unsigned int i : fe_values.dof_indices())
1106 * cell_residual(i) +=
1107 * (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1108 * solution_dot_values[q] // u(x_q)
1110 * fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1111 * solution_gradients[q] // grad u(x_q)
1113 * fe_values.shape_value(i, q) * // phi_i(x_q) *
1114 * right_hand_side_function.value(
1115 * fe_values.quadrature_point(q))) // f(x_q)]
1116 * * fe_values.JxW(q); // * dx
1118 * current_constraints.distribute_local_to_global(cell_residual,
1119 * local_dof_indices,
1122 * residual.compress(VectorOperation::add);
1126 * The end result of the operations above is a vector that contains the
1127 * residual vector, having taken into account the constraints due to
1128 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1129 * used `current_constraints.distribute_local_to_global()` to add the
1130 * local contributions to the global vector. At the end of the day, the
1131 * residual vector @f$r@f$ will be used in the solution of linear systems
1132 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1133 * below. We want to achieve that for algebraic components, the algebraic
1134 * components of @f$z@f$ have predictable values that achieve the purposes
1135 * discussed in the following. We do this by ensuring that the entries
1136 * corresponding to algebraic components in the residual @f$r@f$ have specific
1137 * values, and then we will do the same in the next function for the
1138 * matrix; for this, you will have to know that the rows and columns
1139 * of the matrix corresponding to constrained entries are zero with the
1140 * exception of the diagonal entries. We will manually set that diagonal
1141 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1145 * From the point of view of the residual vector, if the input `y`
1146 * vector does not contain the correct values on constrained degrees of
1147 * freedom (hanging nodes or boundary conditions), we need to communicate
1148 * this to the time stepper, and we do so by setting the residual to the
1149 * actual difference between the input `y` vector and the our local copy of
1150 * it, in which we have applied the constraints (see the top of the
1151 * function where we called `current_constraints.distribute(tmp_solution)`
1152 * and a similar operation on the time derivative). Since we have made a
1153 * copy of the input vector for this purpose, we use it to compute the
1154 * residual value. However, there is a difference between hanging nodes
1155 * constraints and boundary conditions: we do not want to make hanging node
1156 * constraints actually depend on their dependent degrees of freedom, since
1157 * this would imply that we are actually solving for the dependent degrees
1158 * of freedom. This is not what we are actually doing, however, since
1159 * hanging nodes are not actually solved for. They are eliminated from the
1160 * system by the call to AffineConstraints::distribute_local_to_global()
1161 * above. From the point of view of the Jacobian matrix, we are effectively
1162 * setting hanging nodes to an artificial value (usually zero), and we
1163 * simply want to make sure that we solve for those degrees of freedom a
1164 * posteriori, by calling the function AffineConstraints::distribute().
1168 * Here we therefore check that the residual is equal to the input value on
1169 * the constrained dofs corresponding to hanging nodes (i.e., those for
1170 * which the lines of the `current_constraints` contain at least one other
1171 * entry), and to the difference between the input vector and the actual
1172 * solution on those constraints that correspond to boundary conditions.
1175 * for (const auto &c : current_constraints.get_lines())
1176 * if (locally_owned_dofs.is_element(c.index))
1178 * if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1179 * residual[c.index] = y[c.index] - tmp_solution[c.index];
1180 * else /* has dependencies -> a hanging node */
1181 * residual[c.index] = y[c.index];
1183 * residual.compress(VectorOperation::insert);
1190 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1191 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1195 * The next operation is to compute the "Jacobian", which PETSc TS defines
1198 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1199 * R}{\partial \dot y}
1201 * which, for the current linear problem, is simply
1203 * J_\alpha = A + \alpha M
1205 * and which is in particular independent of time and the current solution
1206 * vectors @f$y@f$ and @f$\dot y@f$.
1210 * Having seen the assembly of matrices before, there is little that should
1211 * surprise you in the actual assembly here:
1214 * template <int dim>
1215 * void HeatEquation<dim>::assemble_implicit_jacobian(
1216 * const double /* time */,
1217 * const PETScWrappers::MPI::Vector & /* y */,
1218 * const PETScWrappers::MPI::Vector & /* y_dot */,
1219 * const double alpha)
1221 * TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1223 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1224 * FEValues<dim> fe_values(fe,
1225 * quadrature_formula,
1226 * update_values | update_gradients |
1227 * update_quadrature_points | update_JxW_values);
1229 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1231 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1233 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1235 * jacobian_matrix = 0;
1236 * for (const auto &cell : dof_handler.active_cell_iterators())
1237 * if (cell->is_locally_owned())
1239 * fe_values.reinit(cell);
1241 * cell->get_dof_indices(local_dof_indices);
1244 * for (const unsigned int q : fe_values.quadrature_point_indices())
1245 * for (const unsigned int i : fe_values.dof_indices())
1246 * for (const unsigned int j : fe_values.dof_indices())
1248 * cell_matrix(i, j) +=
1249 * (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1250 * fe_values.shape_grad(j, q) // grad phi_j(x_q)
1252 * fe_values.shape_value(i, q) * // phi_i(x_q) *
1253 * fe_values.shape_value(j, q) // phi_j(x_q)
1255 * fe_values.JxW(q); // * dx
1257 * current_constraints.distribute_local_to_global(cell_matrix,
1258 * local_dof_indices,
1261 * jacobian_matrix.compress(VectorOperation::add);
1265 * The only interesting part is the following. Recall that we modified the
1304 * <a name=
"step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1305 * <
h4>
The HeatEquation::solve_with_jacobian() function</
h4>
1365 *
preconditioner.initialize(
1371 *
cg.set_prefix(
"user_");
1375 *
pcout <<
" " << solver_control.last_step() <<
" linear iterations."
1383 * <a name=
"step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1384 * <
h4>
The HeatEquation::prepare_for_coarsening_and_refinement() function</
h4>
1423 *
void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1428 *
mpi_communicator);
1446 *
for (
const auto &cell :
1448 *
cell->clear_refine_flag();
1449 *
for (
const auto &cell :
1451 *
cell->clear_coarsen_flag();
1458 * <a name=
"step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1459 * <
h4>
The HeatEquation::transfer_solution_vectors_to_new_mesh() function</
h4>
1470 *
the vectors
we transfer.
1496 *
void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1497 *
const double time,
1508 *
for (
unsigned int i = 0; i <
all_in.size(); ++i)
1512 *
mpi_communicator);
1524 *
for (
unsigned int i = 0; i <
all_in.size(); ++i)
1526 *
all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1539 * <a name=
"step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1573 *
mpi_communicator);
1582 * <a name=
"step_86-TheHeatEquationrunfunction"></a>
1583 * <
h4>
The HeatEquation::run() function</
h4>
1628 *
the "
implicit function" (i.e., that part of the right hand side of the
1629 * ODE system that we want to treat implicitly -- which in our case is
1635 * const PETScWrappers::MPI::Vector &y,
1636 * const PETScWrappers::MPI::Vector &y_dot,
1637 * PETScWrappers::MPI::Vector &res) {
1641 *
petsc_ts.setup_jacobian = [&](
const double time,
1644 *
const double alpha) {
1650 *
this->solve_with_jacobian(src, dst);
1689 *
petsc_ts.algebraic_components = [&]() {
1697 *
petsc_ts.update_constrained_components =
1713 *
calling the `prepare_for_coarsening_and_refinement` function.
1720 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`)
with a
1730 *
petsc_ts.decide_and_prepare_for_remeshing =
1731 *
[&](
const double ,
1738 *
this->prepare_for_coarsening_and_refinement(
y);
1745 *
petsc_ts.transfer_solution_vectors_to_new_mesh =
1746 *
[&](
const double time,
1747 *
const std::vector<PETScWrappers::MPI::Vector> &
all_in,
1748 *
std::vector<PETScWrappers::MPI::Vector> &
all_out) {
1749 *
this->transfer_solution_vectors_to_new_mesh(time,
all_in,
all_out);
1761 *
petsc_ts.monitor = [&](
const double time,
1763 *
const unsigned int step_number) {
1764 *
pcout <<
"Time step " << step_number <<
" at t=" << time << std::endl;
1780 *
petsc_ts.solve(solution);
1788 * <a name=
"step_86-Themainfunction"></a>
1802 *
using namespace Step86;
1808 *
(
argc > 1 ?
argv[1] :
"heat_equation.prm");
1812 *
catch (std::exception &exc)
1814 *
std::cerr << std::endl
1816 *
<<
"----------------------------------------------------"
1818 *
std::cerr <<
"Exception on processing: " << std::endl
1819 *
<< exc.what() << std::endl
1820 *
<<
"Aborting!" << std::endl
1821 *
<<
"----------------------------------------------------"
1828 *
std::cerr << std::endl
1830 *
<<
"----------------------------------------------------"
1832 *
std::cerr <<
"Unknown exception!" << std::endl
1833 *
<<
"Aborting!" << std::endl
1834 *
<<
"----------------------------------------------------"
1854Time step 1 at t=0.025
1856Time step 2 at t=0.05
1859Time step 3 at t=0.075
1863Time step 5 at t=0.125
1865Time step 6 at t=0.15
1867Time step 7 at t=0.175
1871Time step 9 at t=0.225
1879Time step 10 at t=0.25
1882Time step 11 at t=0.275
1888Time step 195 at t=4.875
1890Time step 196 at t=4.9
1892Time step 197 at t=4.925
1894Time step 198 at t=4.95
1896Time step 199 at t=4.975
1907+---------------------------------------------+------------+------------+
1911+---------------------------------+-----------+------------+------------+
1913|
implicit function | 426 | 16.2s | 37% |
1914| output results | 201 | 9.74s | 23% |
1915| set
algebraic components | 200 | 0.0496s | 0.11% |
1916| setup
system | 21 | 0.799s | 1.8% |
1919+---------------------------------+-----------+------------+------------+
1926 <
iframe width=
"560" height=
"315" src=
"https://www.youtube.com/embed/fhJVkcdRksM"
1928 allow=
"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1948 set match
final time =
false
1975 set match
final time =
false
1978 set solver type =
bdf
1998===========================================
2008Time step 1 at t=0.01
2011Time step 2 at t=0.02
2014Time step 3 at t=0.03
2017Time step 4 at t=0.042574
2020Time step 5 at t=0.0588392
2023Time step 6 at t=0.0783573
2028Time step 7 at t=0.100456
2033Time step 8 at t=0.124982
2038Time step 9 at t=0.153156
2044Time step 206 at t=4.96911
2047Time step 207 at t=4.99398
2050Time step 208 at t=5.01723
2053+---------------------------------------------+------------+------------+
2057+---------------------------------+-----------+------------+------------+
2059|
implicit function | 1101 | 56.3s | 48% |
2060| output results | 209 | 12.5s | 11% |
2061| set
algebraic components | 508 | 0.156s | 0.13% |
2062| setup
system | 21 | 1.11s | 0.95% |
2065+---------------------------------+-----------+------------+------------+
2075ourselves to blame for that by setting
2077 set match final time = false
2079in the input file. If hitting the end time exactly is important to us,
2080setting the flag to `true` resolves this issue.
2082We can even reason why PETSc eventually chooses a time step of around
20830.025: The boundary values undergo a complete cosine cycle within 0.5
2084time units; we should expect that it takes around ten or twenty time
2085steps to resolve each period of a cycle to reasonable accuracy, and
2086this leads to the time step choice PETSc finds.
2088Not all combinations of methods, time step adaptation algorithms, and
2089other parameters are valid, but the main messages from the experiment
2090above that you should take away are:
2091- It would undoubtedly be quite time consuming to implement many of the
2092 methods that PETSc TS offers for time stepping -- but with a program
2102 have to: We can rely on a library written by experts in that area.
2106<a name="step-86-extensions"></a>
2107<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2110The program actually runs in parallel, even though we have not used
2111that above. Specifically, if you have configured deal.II to use MPI,
2112then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2113program with 8 processes.
2115For the program as currently written (and in debug mode), this makes
2116little difference: It will run about twice as fast, but take about 8
2117times as much CPU time. That is because the problem is just so small:
2118Generally between 1000 and 2000 degrees of freedom. A good rule of
2119thumb is that programs really only benefit from parallel computing if
2120you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2121process*. But it is not difficult to adapt the program at hand here to
2122run with a much finer mesh, or perhaps in 3d, so that one is beyond
2123that limit and sees the benefits of parallel computing.
2126<a name="step_86-PlainProg"></a>
2127<h1> The plain program</h1>
2128@include "step-86.cc"
void distribute(VectorType &vec) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > ×_and_names)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
constexpr types::blas_int zero
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation