Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-9.h
Go to the documentation of this file.
1 false);
962  * sparsity_pattern.copy_from(dsp);
963  *
964  * system_matrix.reinit(sparsity_pattern);
965  *
966  * solution.reinit(dof_handler.n_dofs());
967  * system_rhs.reinit(dof_handler.n_dofs());
968  * }
969  *
970  *
971  *
972  * @endcode
973  *
974  * In the following function, the matrix and right hand side are
975  * assembled. As stated in the documentation of the main class above, it
976  * does not do this itself, but rather delegates to the function following
977  * next, utilizing the WorkStream concept discussed in @ref threads .
978  *
979 
980  *
981  * If you have looked through the @ref threads module, you will have
982  * seen that assembling in parallel does not take an incredible
983  * amount of extra code as long as you diligently describe what the
984  * scratch and copy data objects are, and if you define suitable
985  * functions for the local assembly and the copy operation from local
986  * contributions to global objects. This done, the following will do
987  * all the heavy lifting to get these operations done on multiple
988  * threads on as many cores as you have in your system:
989  *
990  * @code
991  * template <int dim>
992  * void AdvectionProblem<dim>::assemble_system()
993  * {
994  * WorkStream::run(dof_handler.begin_active(),
995  * dof_handler.end(),
996  * *this,
997  * &AdvectionProblem::local_assemble_system,
998  * &AdvectionProblem::copy_local_to_global,
999  * AssemblyScratchData(fe),
1000  * AssemblyCopyData());
1001  * }
1002  *
1003  *
1004  *
1005  * @endcode
1006  *
1007  * As already mentioned above, we need to have scratch objects for
1008  * the parallel computation of local contributions. These objects
1009  * contain FEValues and FEFaceValues objects (as well as some arrays), and so
1010  * we will need to have constructors and copy constructors that allow us to
1011  * create them. For the cell terms we need the values
1012  * and gradients of the shape functions, the quadrature points in
1013  * order to determine the source density and the advection field at
1014  * a given point, and the weights of the quadrature points times the
1015  * determinant of the Jacobian at these points. In contrast, for the
1016  * boundary integrals, we don't need the gradients, but rather the
1017  * normal vectors to the cells. This determines which update flags
1018  * we will have to pass to the constructors of the members of the
1019  * class:
1020  *
1021  * @code
1022  * template <int dim>
1023  * AdvectionProblem<dim>::AssemblyScratchData::AssemblyScratchData(
1024  * const FiniteElement<dim> &fe)
1025  * : fe_values(fe,
1026  * QGauss<dim>(fe.degree + 1),
1027  * update_values | update_gradients | update_quadrature_points |
1028  * update_JxW_values)
1029  * , fe_face_values(fe,
1030  * QGauss<dim - 1>(fe.degree + 1),
1031  * update_values | update_quadrature_points |
1032  * update_JxW_values | update_normal_vectors)
1033  * , rhs_values(fe_values.get_quadrature().size())
1034  * , advection_directions(fe_values.get_quadrature().size())
1035  * , face_boundary_values(fe_face_values.get_quadrature().size())
1036  * , face_advection_directions(fe_face_values.get_quadrature().size())
1037  * {}
1038  *
1039  *
1040  *
1041  * template <int dim>
1042  * AdvectionProblem<dim>::AssemblyScratchData::AssemblyScratchData(
1043  * const AssemblyScratchData &scratch_data)
1044  * : fe_values(scratch_data.fe_values.get_fe(),
1045  * scratch_data.fe_values.get_quadrature(),
1046  * update_values | update_gradients | update_quadrature_points |
1047  * update_JxW_values)
1048  * , fe_face_values(scratch_data.fe_face_values.get_fe(),
1049  * scratch_data.fe_face_values.get_quadrature(),
1050  * update_values | update_quadrature_points |
1051  * update_JxW_values | update_normal_vectors)
1052  * , rhs_values(scratch_data.rhs_values.size())
1053  * , advection_directions(scratch_data.advection_directions.size())
1054  * , face_boundary_values(scratch_data.face_boundary_values.size())
1055  * , face_advection_directions(scratch_data.face_advection_directions.size())
1056  * {}
1057  *
1058  *
1059  *
1060  * @endcode
1061  *
1062  * Now, this is the function that does the actual work. It is not very
1063  * different from the <code>assemble_system</code> functions of previous
1064  * example programs, so we will again only comment on the differences. The
1065  * mathematical stuff closely follows what we have said in the introduction.
1066  *
1067 
1068  *
1069  * There are a number of points worth mentioning here, though. The
1070  * first one is that we have moved the FEValues and FEFaceValues
1071  * objects into the ScratchData object. We have done so because the
1072  * alternative would have been to simply create one every time we
1073  * get into this function -- i.e., on every cell. It now turns out
1074  * that the FEValues classes were written with the explicit goal of
1075  * moving everything that remains the same from cell to cell into
1076  * the construction of the object, and only do as little work as
1077  * possible in FEValues::reinit() whenever we move to a new
1078  * cell. What this means is that it would be very expensive to
1079  * create a new object of this kind in this function as we would
1080  * have to do it for every cell -- exactly the thing we wanted to
1081  * avoid with the FEValues class. Instead, what we do is create it
1082  * only once (or a small number of times) in the scratch objects and
1083  * then re-use it as often as we can.
1084  *
1085 
1086  *
1087  * This begs the question of whether there are other objects we
1088  * create in this function whose creation is expensive compared to
1089  * its use. Indeed, at the top of the function, we declare all sorts
1090  * of objects. The <code>AdvectionField</code>,
1091  * <code>RightHandSide</code> and <code>BoundaryValues</code> do not
1092  * cost much to create, so there is no harm here. However,
1093  * allocating memory in creating the <code>rhs_values</code> and
1094  * similar variables below typically costs a significant amount of
1095  * time, compared to just accessing the (temporary) values we store
1096  * in them. Consequently, these would be candidates for moving into
1097  * the <code>AssemblyScratchData</code> class. We will leave this as
1098  * an exercise.
1099  *
1100  * @code
1101  * template <int dim>
1102  * void AdvectionProblem<dim>::local_assemble_system(
1103  * const typename DoFHandler<dim>::active_cell_iterator &cell,
1104  * AssemblyScratchData & scratch_data,
1105  * AssemblyCopyData & copy_data)
1106  * {
1107  * @endcode
1108  *
1109  * We define some abbreviations to avoid unnecessarily long lines:
1110  *
1111  * @code
1112  * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1113  * const unsigned int n_q_points =
1114  * scratch_data.fe_values.get_quadrature().size();
1115  * const unsigned int n_face_q_points =
1116  * scratch_data.fe_face_values.get_quadrature().size();
1117  *
1118  * @endcode
1119  *
1120  * We declare cell matrix and cell right hand side...
1121  *
1122  * @code
1123  * copy_data.cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1124  * copy_data.cell_rhs.reinit(dofs_per_cell);
1125  *
1126  * @endcode
1127  *
1128  * ... an array to hold the global indices of the degrees of freedom of
1129  * the cell on which we are presently working...
1130  *
1131  * @code
1132  * copy_data.local_dof_indices.resize(dofs_per_cell);
1133  *
1134  * @endcode
1135  *
1136  * ... then initialize the <code>FEValues</code> object...
1137  *
1138  * @code
1139  * scratch_data.fe_values.reinit(cell);
1140  *
1141  * @endcode
1142  *
1143  * ... obtain the values of right hand side and advection directions
1144  * at the quadrature points...
1145  *
1146  * @code
1147  * scratch_data.advection_field.value_list(
1148  * scratch_data.fe_values.get_quadrature_points(),
1149  * scratch_data.advection_directions);
1150  * scratch_data.right_hand_side.value_list(
1151  * scratch_data.fe_values.get_quadrature_points(), scratch_data.rhs_values);
1152  *
1153  * @endcode
1154  *
1155  * ... set the value of the streamline diffusion parameter as
1156  * described in the introduction...
1157  *
1158  * @code
1159  * const double delta = 0.1 * cell->diameter();
1160  *
1161  * @endcode
1162  *
1163  * ... and assemble the local contributions to the system matrix and
1164  * right hand side as also discussed above:
1165  *
1166  * @code
1167  * for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1168  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1169  * {
1170  * @endcode
1171  *
1172  * Alias the AssemblyScratchData object to keep the lines from
1173  * getting too long:
1174  *
1175  * @code
1176  * const auto &sd = scratch_data;
1177  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1178  * copy_data.cell_matrix(i, j) +=
1179  * ((sd.fe_values.shape_value(i, q_point) + // (phi_i +
1180  * delta * (sd.advection_directions[q_point] * // delta beta
1181  * sd.fe_values.shape_grad(i, q_point))) * // grad phi_i)
1182  * sd.advection_directions[q_point] * // beta
1183  * sd.fe_values.shape_grad(j, q_point)) * // grad phi_j
1184  * sd.fe_values.JxW(q_point); // dx
1185  *
1186  * copy_data.cell_rhs(i) +=
1187  * (sd.fe_values.shape_value(i, q_point) + // (phi_i +
1188  * delta * (sd.advection_directions[q_point] * // delta beta
1189  * sd.fe_values.shape_grad(i, q_point))) * // grad phi_i)
1190  * sd.rhs_values[q_point] * // f
1191  * sd.fe_values.JxW(q_point); // dx
1192  * }
1193  *
1194  * @endcode
1195  *
1196  * Besides the cell terms which we have built up now, the bilinear
1197  * form of the present problem also contains terms on the boundary of
1198  * the domain. Therefore, we have to check whether any of the faces of
1199  * this cell are on the boundary of the domain, and if so assemble the
1200  * contributions of this face as well. Of course, the bilinear form
1201  * only contains contributions from the <code>inflow</code> part of
1202  * the boundary, but to find out whether a certain part of a face of
1203  * the present cell is part of the inflow boundary, we have to have
1204  * information on the exact location of the quadrature points and on
1205  * the direction of flow at this point; we obtain this information
1206  * using the FEFaceValues object and only decide within the main loop
1207  * whether a quadrature point is on the inflow boundary.
1208  *
1209  * @code
1210  * for (const auto &face : cell->face_iterators())
1211  * if (face->at_boundary())
1212  * {
1213  * @endcode
1214  *
1215  * Ok, this face of the present cell is on the boundary of the
1216  * domain. Just as for the usual FEValues object which we have
1217  * used in previous examples and also above, we have to
1218  * reinitialize the FEFaceValues object for the present face:
1219  *
1220  * @code
1221  * scratch_data.fe_face_values.reinit(cell, face);
1222  *
1223  * @endcode
1224  *
1225  * For the quadrature points at hand, we ask for the values of
1226  * the inflow function and for the direction of flow:
1227  *
1228  * @code
1229  * scratch_data.boundary_values.value_list(
1230  * scratch_data.fe_face_values.get_quadrature_points(),
1231  * scratch_data.face_boundary_values);
1232  * scratch_data.advection_field.value_list(
1233  * scratch_data.fe_face_values.get_quadrature_points(),
1234  * scratch_data.face_advection_directions);
1235  *
1236  * @endcode
1237  *
1238  * Now loop over all quadrature points and see whether this face is on
1239  * the inflow or outflow part of the boundary. The normal
1240  * vector points out of the cell: since the face is at
1241  * the boundary, the normal vector points out of the domain,
1242  * so if the advection direction points into the domain, its
1243  * scalar product with the normal vector must be negative (to see why
1244  * this is true, consider the scalar product definition that uses a
1245  * cosine):
1246  *
1247  * @code
1248  * for (unsigned int q_point = 0; q_point < n_face_q_points; ++q_point)
1249  * if (scratch_data.fe_face_values.normal_vector(q_point) *
1250  * scratch_data.face_advection_directions[q_point] <
1251  * 0.)
1252  * @endcode
1253  *
1254  * If the face is part of the inflow boundary, then compute the
1255  * contributions of this face to the global matrix and right
1256  * hand side, using the values obtained from the
1257  * FEFaceValues object and the formulae discussed in the
1258  * introduction:
1259  *
1260  * @code
1261  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1262  * {
1263  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1264  * copy_data.cell_matrix(i, j) -=
1265  * (scratch_data.face_advection_directions[q_point] *
1266  * scratch_data.fe_face_values.normal_vector(q_point) *
1267  * scratch_data.fe_face_values.shape_value(i, q_point) *
1268  * scratch_data.fe_face_values.shape_value(j, q_point) *
1269  * scratch_data.fe_face_values.JxW(q_point));
1270  *
1271  * copy_data.cell_rhs(i) -=
1272  * (scratch_data.face_advection_directions[q_point] *
1273  * scratch_data.fe_face_values.normal_vector(q_point) *
1274  * scratch_data.face_boundary_values[q_point] *
1275  * scratch_data.fe_face_values.shape_value(i, q_point) *
1276  * scratch_data.fe_face_values.JxW(q_point));
1277  * }
1278  * }
1279  *
1280  * @endcode
1281  *
1282  * The final piece of information the copy routine needs is the global
1283  * indices of the degrees of freedom on this cell, so we end by writing
1284  * them to the local array:
1285  *
1286  * @code
1287  * cell->get_dof_indices(copy_data.local_dof_indices);
1288  * }
1289  *
1290  *
1291  *
1292  * @endcode
1293  *
1294  * The second function we needed to write was the one that copies
1295  * the local contributions the previous function computed (and
1296  * put into the AssemblyCopyData object) into the global matrix and right
1297  * hand side vector objects. This is essentially what we always had
1298  * as the last block of code when assembling something on every
1299  * cell. The following should therefore be pretty obvious:
1300  *
1301  * @code
1302  * template <int dim>
1303  * void
1304  * AdvectionProblem<dim>::copy_local_to_global(const AssemblyCopyData &copy_data)
1305  * {
1306  * hanging_node_constraints.distribute_local_to_global(
1307  * copy_data.cell_matrix,
1308  * copy_data.cell_rhs,
1309  * copy_data.local_dof_indices,
1310  * system_matrix,
1311  * system_rhs);
1312  * }
1313  *
1314  * @endcode
1315  *
1316  * Here comes the linear solver routine. As the system is no longer
1317  * symmetric positive definite as in all the previous examples, we cannot
1318  * use the Conjugate Gradient method anymore. Rather, we use a solver that
1319  * is more general and does not rely on any special properties of the
1320  * matrix: the GMRES method. GMRES, like the conjugate gradient method,
1321  * requires a decent preconditioner: we use a Jacobi preconditioner here,
1322  * which works well enough for this problem.
1323  *
1324  * @code
1325  * template <int dim>
1326  * void AdvectionProblem<dim>::solve()
1327  * {
1328  * SolverControl solver_control(std::max<std::size_t>(1000,
1329  * system_rhs.size() / 10),
1330  * 1e-10 * system_rhs.l2_norm());
1331  * SolverGMRES<Vector<double>> solver(solver_control);
1332  * PreconditionJacobi<SparseMatrix<double>> preconditioner;
1333  * preconditioner.initialize(system_matrix, 1.0);
1334  * solver.solve(system_matrix, solution, system_rhs, preconditioner);
1335  *
1336  * Vector<double> residual(dof_handler.n_dofs());
1337  *
1338  * system_matrix.vmult(residual, solution);
1339  * residual -= system_rhs;
1340  * std::cout << " Iterations required for convergence: "
1341  * << solver_control.last_step() << '\n'
1342  * << " Max norm of residual: "
1343  * << residual.linfty_norm() << '\n';
1344  *
1345  * hanging_node_constraints.distribute(solution);
1346  * }
1347  *
1348  * @endcode
1349  *
1350  * The following function refines the grid according to the quantity
1351  * described in the introduction. The respective computations are made in
1352  * the class <code>GradientEstimation</code>.
1353  *
1354  * @code
1355  * template <int dim>
1356  * void AdvectionProblem<dim>::refine_grid()
1357  * {
1358  * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1359  *
1360  * GradientEstimation::estimate(dof_handler,
1361  * solution,
1362  * estimated_error_per_cell);
1363  *
1364  * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
1365  * estimated_error_per_cell,
1366  * 0.3,
1367  * 0.03);
1368  *
1369  * triangulation.execute_coarsening_and_refinement();
1370  * }
1371  *
1372  * @endcode
1373  *
1374  * This function is similar to the one in step 6, but since we use a higher
1375  * degree finite element we save the solution in a different
1376  * way. Visualization programs like VisIt and Paraview typically only
1377  * understand data that is associated with nodes: they cannot plot
1378  * fifth-degree basis functions, which results in a very inaccurate picture
1379  * of the solution we computed. To get around this we save multiple
1380  * <em>patches</em> per cell: in 2D we save 64 bilinear `cells' to the VTU
1381  * file for each cell, and in 3D we save 512. The end result is that the
1382  * visualization program will use a piecewise linear interpolation of the
1383  * cubic basis functions: this captures the solution detail and, with most
1384  * screen resolutions, looks smooth. We save the grid in a separate step
1385  * with no extra patches so that we have a visual representation of the cell
1386  * faces.
1387  *
1388 
1389  *
1390  * Version 9.1 of deal.II gained the ability to write higher degree
1391  * polynomials (i.e., write piecewise bicubic visualization data for our
1392  * piecewise bicubic solution) VTK and VTU output: however, not all recent
1393  * versions of ParaView and VisIt (as of 2018) can read this format, so we
1394  * use the older, more general (but less efficient) approach here.
1395  *
1396  * @code
1397  * template <int dim>
1398  * void AdvectionProblem<dim>::output_results(const unsigned int cycle) const
1399  * {
1400  * {
1401  * GridOut grid_out;
1402  * std::ofstream output("grid-" + std::to_string(cycle) + ".vtu");
1403  * grid_out.write_vtu(triangulation, output);
1404  * }
1405  *
1406  * {
1407  * DataOut<dim> data_out;
1408  * data_out.attach_dof_handler(dof_handler);
1409  * data_out.add_data_vector(solution, "solution");
1410  * data_out.build_patches(8);
1411  *
1412  * @endcode
1413  *
1414  * VTU output can be expensive, both to compute and to write to
1415  * disk. Here we ask ZLib, a compression library, to compress the data
1416  * in a way that maximizes throughput.
1417  *
1418  * @code
1419  * DataOutBase::VtkFlags vtk_flags;
1420  * vtk_flags.compression_level =
1421  * DataOutBase::VtkFlags::ZlibCompressionLevel::best_speed;
1422  * data_out.set_flags(vtk_flags);
1423  *
1424  * std::ofstream output("solution-" + std::to_string(cycle) + ".vtu");
1425  * data_out.write_vtu(output);
1426  * }
1427  * }
1428  *
1429  *
1430  * @endcode
1431  *
1432  * ... as is the main loop (setup -- solve -- refine), aside from the number
1433  * of cycles and the initial grid:
1434  *
1435  * @code
1436  * template <int dim>
1438  * {
1439  * for (unsigned int cycle = 0; cycle < 10; ++cycle)
1440  * {
1441  * std::cout << "Cycle " << cycle << ':' << std::endl;
1442  *
1443  * if (cycle == 0)
1444  * {
1446  * triangulation.refine_global(3);
1447  * }
1448  * else
1449  * {
1450  * refine_grid();
1451  * }
1452  *
1453  *
1454  * std::cout << " Number of active cells: "
1455  * << triangulation.n_active_cells() << std::endl;
1456  *
1457  * setup_system();
1458  *
1459  * std::cout << " Number of degrees of freedom: "
1460  * << dof_handler.n_dofs() << std::endl;
1461  *
1462  * assemble_system();
1463  * solve();
1464  * output_results(cycle);
1465  * }
1466  * }
1467  *
1468  *
1469  *
1470  * @endcode
1471  *
1472  *
1473  * <a name="GradientEstimationclassimplementation"></a>
1474  * <h3>GradientEstimation class implementation</h3>
1475  *
1476 
1477  *
1478  * Now for the implementation of the <code>GradientEstimation</code> class.
1479  * Let us start by defining constructors for the
1480  * <code>EstimateScratchData</code> class used by the
1481  * <code>estimate_cell()</code> function:
1482  *
1483  * @code
1484  * template <int dim>
1485  * GradientEstimation::EstimateScratchData<dim>::EstimateScratchData(
1486  * const FiniteElement<dim> &fe,
1487  * const Vector<double> & solution,
1488  * Vector<float> & error_per_cell)
1489  * : fe_midpoint_value(fe,
1490  * QMidpoint<dim>(),
1492  * , solution(solution)
1493  * , error_per_cell(error_per_cell)
1494  * , cell_midpoint_value(1)
1495  * , neighbor_midpoint_value(1)
1496  * {
1497  * @endcode
1498  *
1499  * We allocate a vector to hold iterators to all active neighbors of
1500  * a cell. We reserve the maximal number of active neighbors in order to
1501  * avoid later reallocations. Note how this maximal number of active
1502  * neighbors is computed here.
1503  *
1504  * @code
1505  * active_neighbors.reserve(GeometryInfo<dim>::faces_per_cell *
1507  * }
1508  *
1509  *
1510  * template <int dim>
1511  * GradientEstimation::EstimateScratchData<dim>::EstimateScratchData(
1512  * const EstimateScratchData &scratch_data)
1513  * : fe_midpoint_value(scratch_data.fe_midpoint_value.get_fe(),
1514  * scratch_data.fe_midpoint_value.get_quadrature(),
1516  * , solution(scratch_data.solution)
1517  * , error_per_cell(scratch_data.error_per_cell)
1518  * , cell_midpoint_value(1)
1519  * , neighbor_midpoint_value(1)
1520  * {}
1521  *
1522  *
1523  * @endcode
1524  *
1525  * Next comes the implementation of the <code>GradientEstimation</code>
1526  * class. The first function does not much except for delegating work to the
1527  * other function, but there is a bit of setup at the top.
1528  *
1529 
1530  *
1531  * Before starting with the work, we check that the vector into
1532  * which the results are written has the right size. Programming
1533  * mistakes in which one forgets to size arguments correctly at the
1534  * calling site are quite common. Because the resulting damage from
1535  * not catching such errors is often subtle (e.g., corruption of
1536  * data somewhere in memory, or non-reproducible results), it is
1537  * well worth the effort to check for such things.
1538  *
1539  * @code
1540  * template <int dim>
1541  * void GradientEstimation::estimate(const DoFHandler<dim> &dof_handler,
1542  * const Vector<double> & solution,
1543  * Vector<float> & error_per_cell)
1544  * {
1545  * Assert(
1546  * error_per_cell.size() == dof_handler.get_triangulation().n_active_cells(),
1547  * ExcInvalidVectorLength(error_per_cell.size(),
1548  * dof_handler.get_triangulation().n_active_cells()));
1549  *
1550  * WorkStream::run(dof_handler.begin_active(),
1551  * dof_handler.end(),
1552  * &GradientEstimation::template estimate_cell<dim>,
1553  * std::function<void(const EstimateCopyData &)>(),
1554  * EstimateScratchData<dim>(dof_handler.get_fe(),
1555  * solution,
1556  * error_per_cell),
1557  * EstimateCopyData());
1558  * }
1559  *
1560  *
1561  * @endcode
1562  *
1563  * Here comes the function that estimates the local error by computing the
1564  * finite difference approximation of the gradient. The function first
1565  * computes the list of active neighbors of the present cell and then
1566  * computes the quantities described in the introduction for each of
1567  * the neighbors. The reason for this order is that it is not a one-liner
1568  * to find a given neighbor with locally refined meshes. In principle, an
1569  * optimized implementation would find neighbors and the quantities
1570  * depending on them in one step, rather than first building a list of
1571  * neighbors and in a second step their contributions but we will gladly
1572  * leave this as an exercise. As discussed before, the worker function
1573  * passed to WorkStream::run works on "scratch" objects that keep all
1574  * temporary objects. This way, we do not need to create and initialize
1575  * objects that are expensive to initialize within the function that does
1576  * the work every time it is called for a given cell. Such an argument is
1577  * passed as the second argument. The third argument would be a "copy-data"
1578  * object (see @ref threads for more information) but we do not actually use
1579  * any of these here. Since WorkStream::run() insists on passing three
1580  * arguments, we declare this function with three arguments, but simply
1581  * ignore the last one.
1582  *
1583 
1584  *
1585  * (This is unsatisfactory from an aesthetic perspective. It can be avoided
1586  * by using an anonymous (lambda) function. If you allow, let us here show
1587  * how. First, assume that we had declared this function to only take two
1588  * arguments by omitting the unused last one. Now, WorkStream::run still
1589  * wants to call this function with three arguments, so we need to find a
1590  * way to "forget" the third argument in the call. Simply passing
1591  * WorkStream::run the pointer to the function as we do above will not do
1592  * this -- the compiler will complain that a function declared to have two
1593  * arguments is called with three arguments. However, we can do this by
1594  * passing the following as the third argument to WorkStream::run():
1595  * <div class=CodeFragmentInTutorialComment>
1596  * @code
1597  * [](const typename DoFHandler<dim>::active_cell_iterator &cell,
1598  * EstimateScratchData<dim> & scratch_data,
1599  * EstimateCopyData &)
1600  * {
1601  * GradientEstimation::estimate_cell<dim>(cell, scratch_data);
1602  * }
1603  * @endcode
1604  * </div>
1605  * This is not much better than the solution implemented below: either the
1606  * routine itself must take three arguments or it must be wrapped by
1607  * something that takes three arguments. We don't use this since adding the
1608  * unused argument at the beginning is simpler.
1609  *
1610 
1611  *
1612  * Now for the details:
1613  *
1614  * @code
1615  * template <int dim>
1616  * void GradientEstimation::estimate_cell(
1617  * const typename DoFHandler<dim>::active_cell_iterator &cell,
1618  * EstimateScratchData<dim> & scratch_data,
1619  * const EstimateCopyData &)
1620  * {
1621  * @endcode
1622  *
1623  * We need space for the tensor <code>Y</code>, which is the sum of
1624  * outer products of the y-vectors.
1625  *
1626  * @code
1627  * Tensor<2, dim> Y;
1628  *
1629  * @endcode
1630  *
1631  * First initialize the <code>FEValues</code> object, as well as the
1632  * <code>Y</code> tensor:
1633  *
1634  * @code
1635  * scratch_data.fe_midpoint_value.reinit(cell);
1636  *
1637  * @endcode
1638  *
1639  * Now, before we go on, we first compute a list of all active neighbors
1640  * of the present cell. We do so by first looping over all faces and see
1641  * whether the neighbor there is active, which would be the case if it
1642  * is on the same level as the present cell or one level coarser (note
1643  * that a neighbor can only be once coarser than the present cell, as
1644  * we only allow a maximal difference of one refinement over a face in
1645  * deal.II). Alternatively, the neighbor could be on the same level
1646  * and be further refined; then we have to find which of its children
1647  * are next to the present cell and select these (note that if a child
1648  * of a neighbor of an active cell that is next to this active cell,
1649  * needs necessarily be active itself, due to the one-refinement rule
1650  * cited above).
1651  *
1652 
1653  *
1654  * Things are slightly different in one space dimension, as there the
1655  * one-refinement rule does not exist: neighboring active cells may
1656  * differ in as many refinement levels as they like. In this case, the
1657  * computation becomes a little more difficult, but we will explain
1658  * this below.
1659  *
1660 
1661  *
1662  * Before starting the loop over all neighbors of the present cell, we
1663  * have to clear the array storing the iterators to the active
1664  * neighbors, of course.
1665  *
1666  * @code
1667  * scratch_data.active_neighbors.clear();
1668  * for (unsigned int face_n : GeometryInfo<dim>::face_indices())
1669  * if (!cell->at_boundary(face_n))
1670  * {
1671  * @endcode
1672  *
1673  * First define an abbreviation for the iterator to the face and
1674  * the neighbor
1675  *
1676  * @code
1677  * const auto face = cell->face(face_n);
1678  * const auto neighbor = cell->neighbor(face_n);
1679  *
1680  * @endcode
1681  *
1682  * Then check whether the neighbor is active. If it is, then it
1683  * is on the same level or one level coarser (if we are not in
1684  * 1D), and we are interested in it in any case.
1685  *
1686  * @code
1687  * if (neighbor->is_active())
1688  * scratch_data.active_neighbors.push_back(neighbor);
1689  * else
1690  * {
1691  * @endcode
1692  *
1693  * If the neighbor is not active, then check its children.
1694  *
1695  * @code
1696  * if (dim == 1)
1697  * {
1698  * @endcode
1699  *
1700  * To find the child of the neighbor which bounds to the
1701  * present cell, successively go to its right child if
1702  * we are left of the present cell (n==0), or go to the
1703  * left child if we are on the right (n==1), until we
1704  * find an active cell.
1705  *
1706  * @code
1707  * auto neighbor_child = neighbor;
1708  * while (neighbor_child->has_children())
1709  * neighbor_child = neighbor_child->child(face_n == 0 ? 1 : 0);
1710  *
1711  * @endcode
1712  *
1713  * As this used some non-trivial geometrical intuition,
1714  * we might want to check whether we did it right,
1715  * i.e., check whether the neighbor of the cell we found
1716  * is indeed the cell we are presently working
1717  * on. Checks like this are often useful and have
1718  * frequently uncovered errors both in algorithms like
1719  * the line above (where it is simple to involuntarily
1720  * exchange <code>n==1</code> for <code>n==0</code> or
1721  * the like) and in the library (the assumptions
1722  * underlying the algorithm above could either be wrong,
1723  * wrongly documented, or are violated due to an error
1724  * in the library). One could in principle remove such
1725  * checks after the program works for some time, but it
1726  * might be a good things to leave it in anyway to check
1727  * for changes in the library or in the algorithm above.
1728  *
1729 
1730  *
1731  * Note that if this check fails, then this is certainly
1732  * an error that is irrecoverable and probably qualifies
1733  * as an internal error. We therefore use a predefined
1734  * exception class to throw here.
1735  *
1736  * @code
1737  * Assert(neighbor_child->neighbor(face_n == 0 ? 1 : 0) == cell,
1738  * ExcInternalError());
1739  *
1740  * @endcode
1741  *
1742  * If the check succeeded, we push the active neighbor
1743  * we just found to the stack we keep:
1744  *
1745  * @code
1746  * scratch_data.active_neighbors.push_back(neighbor_child);
1747  * }
1748  * else
1749  * @endcode
1750  *
1751  * If we are not in 1d, we collect all neighbor children
1752  * `behind' the subfaces of the current face and move on:
1753  *
1754  * @code
1755  * for (unsigned int subface_n = 0; subface_n < face->n_children();
1756  * ++subface_n)
1757  * scratch_data.active_neighbors.push_back(
1758  * cell->neighbor_child_on_subface(face_n, subface_n));
1759  * }
1760  * }
1761  *
1762  * @endcode
1763  *
1764  * OK, now that we have all the neighbors, lets start the computation
1765  * on each of them. First we do some preliminaries: find out about the
1766  * center of the present cell and the solution at this point. The
1767  * latter is obtained as a vector of function values at the quadrature
1768  * points, of which there are only one, of course. Likewise, the
1769  * position of the center is the position of the first (and only)
1770  * quadrature point in real space.
1771  *
1772  * @code
1773  * const Point<dim> this_center =
1774  * scratch_data.fe_midpoint_value.quadrature_point(0);
1775  *
1776  * scratch_data.fe_midpoint_value.get_function_values(
1777  * scratch_data.solution, scratch_data.cell_midpoint_value);
1778  *
1779  * @endcode
1780  *
1781  * Now loop over all active neighbors and collect the data we
1782  * need.
1783  *
1784  * @code
1785  * Tensor<1, dim> projected_gradient;
1786  * for (const auto &neighbor : scratch_data.active_neighbors)
1787  * {
1788  * @endcode
1789  *
1790  * Then get the center of the neighbor cell and the value of the
1791  * finite element function at that point. Note that for this
1792  * information we have to reinitialize the <code>FEValues</code>
1793  * object for the neighbor cell.
1794  *
1795  * @code
1796  * scratch_data.fe_midpoint_value.reinit(neighbor);
1797  * const Point<dim> neighbor_center =
1798  * scratch_data.fe_midpoint_value.quadrature_point(0);
1799  *
1800  * scratch_data.fe_midpoint_value.get_function_values(
1801  * scratch_data.solution, scratch_data.neighbor_midpoint_value);
1802  *
1803  * @endcode
1804  *
1805  * Compute the vector <code>y</code> connecting the centers of the
1806  * two cells. Note that as opposed to the introduction, we denote
1807  * by <code>y</code> the normalized difference vector, as this is
1808  * the quantity used everywhere in the computations.
1809  *
1810  * @code
1811  * Tensor<1, dim> y = neighbor_center - this_center;
1812  * const double distance = y.norm();
1813  * y /= distance;
1814  *
1815  * @endcode
1816  *
1817  * Then add up the contribution of this cell to the Y matrix...
1818  *
1819  * @code
1820  * for (unsigned int i = 0; i < dim; ++i)
1821  * for (unsigned int j = 0; j < dim; ++j)
1822  * Y[i][j] += y[i] * y[j];
1823  *
1824  * @endcode
1825  *
1826  * ... and update the sum of difference quotients:
1827  *
1828  * @code
1829  * projected_gradient += (scratch_data.neighbor_midpoint_value[0] -
1830  * scratch_data.cell_midpoint_value[0]) /
1831  * distance * y;
1832  * }
1833  *
1834  * @endcode
1835  *
1836  * If now, after collecting all the information from the neighbors, we
1837  * can determine an approximation of the gradient for the present
1838  * cell, then we need to have passed over vectors <code>y</code> which
1839  * span the whole space, otherwise we would not have all components of
1840  * the gradient. This is indicated by the invertibility of the matrix.
1841  *
1842 
1843  *
1844  * If the matrix is not invertible, then the present
1845  * cell had an insufficient number of active neighbors. In contrast to
1846  * all previous cases (where we raised exceptions) this is, however,
1847  * not a programming error: it is a runtime error that can happen in
1848  * optimized mode even if it ran well in debug mode, so it is
1849  * reasonable to try to catch this error also in optimized mode. For
1850  * this case, there is the <code>AssertThrow</code> macro: it checks
1851  * the condition like the <code>Assert</code> macro, but not only in
1852  * debug mode; it then outputs an error message, but instead of
1853  * aborting the program as in the case of the <code>Assert</code>
1854  * macro, the exception is thrown using the <code>throw</code> command
1855  * of C++. This way, one has the possibility to catch this error and
1856  * take reasonable counter actions. One such measure would be to
1857  * refine the grid globally, as the case of insufficient directions
1858  * can not occur if every cell of the initial grid has been refined at
1859  * least once.
1860  *
1861  * @code
1863  *
1864  * @endcode
1865  *
1866  * If, on the other hand, the matrix is invertible, then invert it,
1867  * multiply the other quantity with it, and compute the estimated error
1868  * using this quantity and the correct powers of the mesh width:
1869  *
1870  * @code
1871  * const Tensor<2, dim> Y_inverse = invert(Y);
1872  *
1873  * const Tensor<1, dim> gradient = Y_inverse * projected_gradient;
1874  *
1875  * @endcode
1876  *
1877  * The last part of this function is the one where we write into
1878  * the element of the output vector what we have just
1879  * computed. The address of this vector has been stored in the
1880  * scratch data object, and all we have to do is know how to get
1881  * at the correct element inside this vector -- but we can ask the
1882  * cell we're on the how-manyth active cell it is for this:
1883  *
1884  * @code
1885  * scratch_data.error_per_cell(cell->active_cell_index()) =
1886  * (std::pow(cell->diameter(), 1 + 1.0 * dim / 2) * gradient.norm());
1887  * }
1888  * } // namespace Step9
1889  *
1890  *
1891  * @endcode
1892  *
1893  *
1894  * <a name="Mainfunction"></a>
1895  * <h3>Main function</h3>
1896  *
1897 
1898  *
1899  * The <code>main</code> function is similar to the previous examples. The
1900  * primary difference is that we use MultithreadInfo to set the maximum
1901  * number of threads (see the documentation module @ref threads
1902  * "Parallel computing with multiple processors accessing shared memory"
1903  * for more information). The number of threads used is the minimum of the
1904  * environment variable DEAL_II_NUM_THREADS and the parameter of
1905  * <code>set_thread_limit</code>. If no value is given to
1906  * <code>set_thread_limit</code>, the default value from the Intel Threading
1907  * Building Blocks (TBB) library is used. If the call to
1908  * <code>set_thread_limit</code> is omitted, the number of threads will be
1909  * chosen by TBB independently of DEAL_II_NUM_THREADS.
1910  *
1911  * @code
1912  * int main()
1913  * {
1914  * using namespace dealii;
1915  * try
1916  * {
1917  * MultithreadInfo::set_thread_limit();
1918  *
1919  * Step9::AdvectionProblem<2> advection_problem_2d;
1920  * advection_problem_2d.run();
1921  * }
1922  * catch (std::exception &exc)
1923  * {
1924  * std::cerr << std::endl
1925  * << std::endl
1926  * << "----------------------------------------------------"
1927  * << std::endl;
1928  * std::cerr << "Exception on processing: " << std::endl
1929  * << exc.what() << std::endl
1930  * << "Aborting!" << std::endl
1931  * << "----------------------------------------------------"
1932  * << std::endl;
1933  * return 1;
1934  * }
1935  * catch (...)
1936  * {
1937  * std::cerr << std::endl
1938  * << std::endl
1939  * << "----------------------------------------------------"
1940  * << std::endl;
1941  * std::cerr << "Unknown exception!" << std::endl
1942  * << "Aborting!" << std::endl
1943  * << "----------------------------------------------------"
1944  * << std::endl;
1945  * return 1;
1946  * }
1947  *
1948  * return 0;
1949  * }
1950  * @endcode
1951 <a name="Results"></a><h1>Results</h1>
1952 
1953 
1954 
1955 The results of this program are not particularly spectacular. They
1956 consist of the console output, some grid files, and the solution on
1957 each of these grids. First for the console output:
1958 @code
1959 Cycle 0:
1960  Number of active cells: 64
1961  Number of degrees of freedom: 1681
1962  Iterations required for convergence: 298
1963  Max norm of residual: 3.60316e-12
1964 Cycle 1:
1965  Number of active cells: 124
1966  Number of degrees of freedom: 3537
1967  Iterations required for convergence: 415
1968  Max norm of residual: 3.70682e-12
1969 Cycle 2:
1970  Number of active cells: 247
1971  Number of degrees of freedom: 6734
1972  Iterations required for convergence: 543
1973  Max norm of residual: 7.19716e-13
1974 Cycle 3:
1975  Number of active cells: 502
1976  Number of degrees of freedom: 14105
1977  Iterations required for convergence: 666
1978  Max norm of residual: 3.45628e-13
1979 Cycle 4:
1980  Number of active cells: 1003
1981  Number of degrees of freedom: 27462
1982  Iterations required for convergence: 1064
1983  Max norm of residual: 1.86495e-13
1984 Cycle 5:
1985  Number of active cells: 1993
1986  Number of degrees of freedom: 55044
1987  Iterations required for convergence: 1251
1988  Max norm of residual: 1.28765e-13
1989 Cycle 6:
1990  Number of active cells: 3985
1991  Number of degrees of freedom: 108492
1992  Iterations required for convergence: 2035
1993  Max norm of residual: 6.78085e-14
1994 Cycle 7:
1995  Number of active cells: 7747
1996  Number of degrees of freedom: 210612
1997  Iterations required for convergence: 2187
1998  Max norm of residual: 2.61457e-14
1999 Cycle 8:
2000  Number of active cells: 15067
2001  Number of degrees of freedom: 406907
2002  Iterations required for convergence: 3079
2003  Max norm of residual: 2.9932e-14
2004 Cycle 9:
2005  Number of active cells: 29341
2006  Number of degrees of freedom: 780591
2007  Iterations required for convergence: 3913
2008  Max norm of residual: 8.15689e-15
2009 @endcode
2010 
2011 Quite a number of cells are used on the finest level to resolve the features of
2012 the solution. Here are the fourth and tenth grids:
2013 <div class="twocolumn" style="width: 80%">
2014  <div>
2015  <img src="https://www.dealii.org/images/steps/developer/step-9-grid-3.png"
2016  alt="Fourth grid in the refinement cycle, showing some adaptivity to features."
2017  width="400" height="400">
2018  </div>
2019  <div>
2020  <img src="https://www.dealii.org/images/steps/developer/step-9-grid-9.png"
2021  alt="Tenth grid in the refinement cycle, showing that the waves are fully captured."
2022  width="400" height="400">
2023  </div>
2024 </div>
2025 and the fourth and tenth solutions:
2026 <div class="twocolumn" style="width: 80%">
2027  <div>
2028  <img src="https://www.dealii.org/images/steps/developer/step-9-solution-3.png"
2029  alt="Fourth solution, showing that we resolve most features but some
2030  are sill unresolved and appear blury."
2031  width="400" height="400">
2032  </div>
2033  <div>
2034  <img src="https://www.dealii.org/images/steps/developer/step-9-solution-9.png"
2035  alt="Tenth solution, showing a fully resolved flow."
2036  width="400" height="400">
2037  </div>
2038 </div>
2039 and both the grid and solution zoomed in:
2040 <div class="twocolumn" style="width: 80%">
2041  <div>
2042  <img src="https://www.dealii.org/images/steps/developer/step-9-solution-3-zoom.png"
2043  alt="Detail of the fourth solution, showing that we resolve most
2044  features but some are sill unresolved and appear blury. In particular,
2045  the larger cells need to be refined."
2046  width="400" height="400">
2047  </div>
2048  <div>
2049  <img src="https://www.dealii.org/images/steps/developer/step-9-solution-9-zoom.png"
2050  alt="Detail of the tenth solution, showing that we needed a lot more
2051  cells than were present in the fourth solution."
2052  width="400" height="400">
2053  </div>
2054 </div>
2055 
2056 The solution is created by that part that is transported along the wiggly
2057 advection field from the left and lower boundaries to the top right, and the
2058 part that is created by the source in the lower left corner, and the results of
2059 which are also transported along. The grid shown above is well-adapted to
2060 resolve these features. The comparison between plots shows that, even though we
2061 are using a high-order approximation, we still need adaptive mesh refinement to
2062 fully resolve the wiggles.
2063  *
2064  *
2065 <a name="PlainProg"></a>
2066 <h1> The plain program</h1>
2067 @include "step-9.cc"
2068 */
update_quadrature_points
@ update_quadrature_points
Transformed quadrature points.
Definition: fe_update_flags.h:122
DataOutInterface::write_vtu
void write_vtu(std::ostream &out) const
Definition: data_out_base.cc:6864
WorkStream
Definition: work_stream.h:157
GeometryInfo
Definition: geometry_info.h:1224
Patterns::Tools::to_string
std::string to_string(const T &t)
Definition: patterns.h:2360
SymmetricTensor::invert
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:3467
Physics::Elasticity::Kinematics::C
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
DataOutBase::VtkFlags::compression_level
ZlibCompressionLevel compression_level
Definition: data_out_base.h:1159
update_values
@ update_values
Shape function values.
Definition: fe_update_flags.h:78
second
Point< 2 > second
Definition: grid_out.cc:4353
DataOut::build_patches
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1071
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
DoFHandler< dim >
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
DerivativeApproximation::ExcInsufficientDirections
static ::ExceptionBase & ExcInsufficientDirections()
FEValues
Definition: fe.h:38
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
Algorithms::Events::initial
const Event initial
Definition: event.cc:65
Tensor::norm
numbers::NumberTraits< Number >::real_type norm() const
GridOut::write_vtu
void write_vtu(const Triangulation< dim, spacedim > &tria, std::ostream &out) const
Definition: grid_out.cc:3360
Tensor< 1, dim >
SymmetricTensor::sum
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
FEValues::reinit
void reinit(const TriaIterator< DoFCellAccessor< DoFHandlerType< dim, spacedim >, level_dof_access >> &cell)
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
LAPACKSupport::general
@ general
No special properties.
Definition: lapack_support.h:113
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
FiniteElement< dim >
DataOutBase::VtkFlags
Definition: data_out_base.h:1095
DataOut_DoFData::attach_dof_handler
void attach_dof_handler(const DoFHandlerType &)
value
static const bool value
Definition: dof_tools_constraints.cc:433
QMidpoint
Definition: quadrature_lib.h:93
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
SymmetricTensor::determinant
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:2645
DataOutInterface::set_flags
void set_flags(const FlagType &flags)
Definition: data_out_base.cc:7837
GridGenerator::hyper_cube
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
Point< dim >
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
FEFaceValues
Definition: fe.h:40
MeshWorker::loop
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:443
first
Point< 2 > first
Definition: grid_out.cc:4352
GridOut
Definition: grid_out.h:1020
DataOut< dim >
Vector< double >
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
Utilities::compress
std::string compress(const std::string &input)
Definition: utilities.cc:392
AssertThrow
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
center
Point< 3 > center
Definition: data_out_base.cc:169
parallel
Definition: distributed.h:416
internal::VectorOperations::copy
void copy(const T *begin, const T *end, U *dest)
Definition: vector_operations_internal.h:67
DataOut_DoFData::add_data_vector
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
Definition: data_out_dof_data.h:1090