Reference documentation for deal.II version GIT relicensing-233-g802318d791 2024-03-28 20:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-9.h
Go to the documentation of this file.
1false);
1071 *   sparsity_pattern.copy_from(dsp);
1072 *  
1073 *   system_matrix.reinit(sparsity_pattern);
1074 *  
1075 *   solution.reinit(dof_handler.n_dofs());
1076 *   system_rhs.reinit(dof_handler.n_dofs());
1077 *   }
1078 *  
1079 *  
1080 *  
1081 * @endcode
1082 *
1083 * In the following function, the matrix and right hand side are
1084 * assembled. As stated in the documentation of the main class above, it
1085 * does not do this itself, but rather delegates to the function following
1086 * next, utilizing the WorkStream concept discussed in @ref threads .
1087 *
1088
1089 *
1090 * If you have looked through the @ref threads module, you will have
1091 * seen that assembling in parallel does not take an incredible
1092 * amount of extra code as long as you diligently describe what the
1093 * scratch and copy data objects are, and if you define suitable
1094 * functions for the local assembly and the copy operation from local
1095 * contributions to global objects. This done, the following will do
1096 * all the heavy lifting to get these operations done on multiple
1097 * threads on as many cores as you have in your system:
1098 *
1099 * @code
1100 *   template <int dim>
1101 *   void AdvectionProblem<dim>::assemble_system()
1102 *   {
1103 *   WorkStream::run(dof_handler.begin_active(),
1104 *   dof_handler.end(),
1105 *   *this,
1106 *   &AdvectionProblem::local_assemble_system,
1107 *   &AdvectionProblem::copy_local_to_global,
1108 *   AssemblyScratchData(fe),
1109 *   AssemblyCopyData());
1110 *   }
1111 *  
1112 *  
1113 *  
1114 * @endcode
1115 *
1116 * As already mentioned above, we need to have scratch objects for
1117 * the parallel computation of local contributions. These objects
1118 * contain FEValues and FEFaceValues objects (as well as some arrays), and so
1119 * we will need to have constructors and copy constructors that allow us to
1120 * create them. For the cell terms we need the values
1121 * and gradients of the shape functions, the quadrature points in
1122 * order to determine the source density and the advection field at
1123 * a given point, and the weights of the quadrature points times the
1124 * determinant of the Jacobian at these points. In contrast, for the
1125 * boundary integrals, we don't need the gradients, but rather the
1126 * normal vectors to the cells. This determines which update flags
1127 * we will have to pass to the constructors of the members of the
1128 * class:
1129 *
1130 * @code
1131 *   template <int dim>
1132 *   AdvectionProblem<dim>::AssemblyScratchData::AssemblyScratchData(
1133 *   const FiniteElement<dim> &fe)
1134 *   : fe_values(fe,
1135 *   QGauss<dim>(fe.degree + 1),
1137 *   update_JxW_values)
1138 *   , fe_face_values(fe,
1139 *   QGauss<dim - 1>(fe.degree + 1),
1142 *   , rhs_values(fe_values.get_quadrature().size())
1143 *   , advection_directions(fe_values.get_quadrature().size())
1144 *   , face_boundary_values(fe_face_values.get_quadrature().size())
1145 *   , face_advection_directions(fe_face_values.get_quadrature().size())
1146 *   {}
1147 *  
1148 *  
1149 *  
1150 *   template <int dim>
1151 *   AdvectionProblem<dim>::AssemblyScratchData::AssemblyScratchData(
1152 *   const AssemblyScratchData &scratch_data)
1153 *   : fe_values(scratch_data.fe_values.get_fe(),
1154 *   scratch_data.fe_values.get_quadrature(),
1156 *   update_JxW_values)
1157 *   , fe_face_values(scratch_data.fe_face_values.get_fe(),
1158 *   scratch_data.fe_face_values.get_quadrature(),
1161 *   , rhs_values(scratch_data.rhs_values.size())
1162 *   , advection_directions(scratch_data.advection_directions.size())
1163 *   , face_boundary_values(scratch_data.face_boundary_values.size())
1164 *   , face_advection_directions(scratch_data.face_advection_directions.size())
1165 *   {}
1166 *  
1167 *  
1168 *  
1169 * @endcode
1170 *
1171 * Now, this is the function that does the actual work. It is not very
1172 * different from the <code>assemble_system</code> functions of previous
1173 * example programs, so we will again only comment on the differences. The
1174 * mathematical stuff closely follows what we have said in the introduction.
1175 *
1176
1177 *
1178 * There are a number of points worth mentioning here, though. The
1179 * first one is that we have moved the FEValues and FEFaceValues
1180 * objects into the ScratchData object. We have done so because the
1181 * alternative would have been to simply create one every time we
1182 * get into this function -- i.e., on every cell. It now turns out
1183 * that the FEValues classes were written with the explicit goal of
1184 * moving everything that remains the same from cell to cell into
1185 * the construction of the object, and only do as little work as
1186 * possible in FEValues::reinit() whenever we move to a new
1187 * cell. What this means is that it would be very expensive to
1188 * create a new object of this kind in this function as we would
1189 * have to do it for every cell -- exactly the thing we wanted to
1190 * avoid with the FEValues class. Instead, what we do is create it
1191 * only once (or a small number of times) in the scratch objects and
1192 * then re-use it as often as we can.
1193 *
1194
1195 *
1196 * This begs the question of whether there are other objects we
1197 * create in this function whose creation is expensive compared to
1198 * its use. Indeed, at the top of the function, we declare all sorts
1199 * of objects. The <code>AdvectionField</code>,
1200 * <code>RightHandSide</code> and <code>BoundaryValues</code> do not
1201 * cost much to create, so there is no harm here. However,
1202 * allocating memory in creating the <code>rhs_values</code> and
1203 * similar variables below typically costs a significant amount of
1204 * time, compared to just accessing the (temporary) values we store
1205 * in them. Consequently, these would be candidates for moving into
1206 * the <code>AssemblyScratchData</code> class. We will leave this as
1207 * an exercise.
1208 *
1209 * @code
1210 *   template <int dim>
1211 *   void AdvectionProblem<dim>::local_assemble_system(
1212 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1213 *   AssemblyScratchData &scratch_data,
1214 *   AssemblyCopyData &copy_data)
1215 *   {
1216 * @endcode
1217 *
1218 * We define some abbreviations to avoid unnecessarily long lines:
1219 *
1220 * @code
1221 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1222 *   const unsigned int n_q_points =
1223 *   scratch_data.fe_values.get_quadrature().size();
1224 *   const unsigned int n_face_q_points =
1225 *   scratch_data.fe_face_values.get_quadrature().size();
1226 *  
1227 * @endcode
1228 *
1229 * We declare cell matrix and cell right hand side...
1230 *
1231 * @code
1232 *   copy_data.cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1233 *   copy_data.cell_rhs.reinit(dofs_per_cell);
1234 *  
1235 * @endcode
1236 *
1237 * ... an array to hold the global indices of the degrees of freedom of
1238 * the cell on which we are presently working...
1239 *
1240 * @code
1241 *   copy_data.local_dof_indices.resize(dofs_per_cell);
1242 *  
1243 * @endcode
1244 *
1245 * ... then initialize the <code>FEValues</code> object...
1246 *
1247 * @code
1248 *   scratch_data.fe_values.reinit(cell);
1249 *  
1250 * @endcode
1251 *
1252 * ... obtain the values of right hand side and advection directions
1253 * at the quadrature points...
1254 *
1255 * @code
1256 *   scratch_data.advection_field.value_list(
1257 *   scratch_data.fe_values.get_quadrature_points(),
1258 *   scratch_data.advection_directions);
1259 *   scratch_data.right_hand_side.value_list(
1260 *   scratch_data.fe_values.get_quadrature_points(), scratch_data.rhs_values);
1261 *  
1262 * @endcode
1263 *
1264 * ... set the value of the streamline diffusion parameter as
1265 * described in the introduction...
1266 *
1267 * @code
1268 *   const double delta = 0.1 * cell->diameter();
1269 *  
1270 * @endcode
1271 *
1272 * ... and assemble the local contributions to the system matrix and
1273 * right hand side as also discussed above:
1274 *
1275 * @code
1276 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1277 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1278 *   {
1279 * @endcode
1280 *
1281 * Alias the AssemblyScratchData object to keep the lines from
1282 * getting too long:
1283 *
1284 * @code
1285 *   const auto &sd = scratch_data;
1286 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1287 *   copy_data.cell_matrix(i, j) +=
1288 *   ((sd.fe_values.shape_value(i, q_point) + // (phi_i +
1289 *   delta * (sd.advection_directions[q_point] * // delta beta
1290 *   sd.fe_values.shape_grad(i, q_point))) * // grad phi_i)
1291 *   sd.advection_directions[q_point] * // beta
1292 *   sd.fe_values.shape_grad(j, q_point)) * // grad phi_j
1293 *   sd.fe_values.JxW(q_point); // dx
1294 *  
1295 *   copy_data.cell_rhs(i) +=
1296 *   (sd.fe_values.shape_value(i, q_point) + // (phi_i +
1297 *   delta * (sd.advection_directions[q_point] * // delta beta
1298 *   sd.fe_values.shape_grad(i, q_point))) * // grad phi_i)
1299 *   sd.rhs_values[q_point] * // f
1300 *   sd.fe_values.JxW(q_point); // dx
1301 *   }
1302 *  
1303 * @endcode
1304 *
1305 * Besides the cell terms which we have built up now, the bilinear
1306 * form of the present problem also contains terms on the boundary of
1307 * the domain. Therefore, we have to check whether any of the faces of
1308 * this cell are on the boundary of the domain, and if so assemble the
1309 * contributions of this face as well. Of course, the bilinear form
1310 * only contains contributions from the <code>inflow</code> part of
1311 * the boundary, but to find out whether a certain part of a face of
1312 * the present cell is part of the inflow boundary, we have to have
1313 * information on the exact location of the quadrature points and on
1314 * the direction of flow at this point; we obtain this information
1315 * using the FEFaceValues object and only decide within the main loop
1316 * whether a quadrature point is on the inflow boundary.
1317 *
1318 * @code
1319 *   for (const auto &face : cell->face_iterators())
1320 *   if (face->at_boundary())
1321 *   {
1322 * @endcode
1323 *
1324 * Ok, this face of the present cell is on the boundary of the
1325 * domain. Just as for the usual FEValues object which we have
1326 * used in previous examples and also above, we have to
1327 * reinitialize the FEFaceValues object for the present face:
1328 *
1329 * @code
1330 *   scratch_data.fe_face_values.reinit(cell, face);
1331 *  
1332 * @endcode
1333 *
1334 * For the quadrature points at hand, we ask for the values of
1335 * the inflow function and for the direction of flow:
1336 *
1337 * @code
1338 *   scratch_data.boundary_values.value_list(
1339 *   scratch_data.fe_face_values.get_quadrature_points(),
1340 *   scratch_data.face_boundary_values);
1341 *   scratch_data.advection_field.value_list(
1342 *   scratch_data.fe_face_values.get_quadrature_points(),
1343 *   scratch_data.face_advection_directions);
1344 *  
1345 * @endcode
1346 *
1347 * Now loop over all quadrature points and see whether this face is on
1348 * the inflow or outflow part of the boundary. The normal
1349 * vector points out of the cell: since the face is at
1350 * the boundary, the normal vector points out of the domain,
1351 * so if the advection direction points into the domain, its
1352 * scalar product with the normal vector must be negative (to see why
1353 * this is true, consider the scalar product definition that uses a
1354 * cosine):
1355 *
1356 * @code
1357 *   for (unsigned int q_point = 0; q_point < n_face_q_points; ++q_point)
1358 *   if (scratch_data.fe_face_values.normal_vector(q_point) *
1359 *   scratch_data.face_advection_directions[q_point] <
1360 *   0.)
1361 * @endcode
1362 *
1363 * If the face is part of the inflow boundary, then compute the
1364 * contributions of this face to the global matrix and right
1365 * hand side, using the values obtained from the
1366 * FEFaceValues object and the formulae discussed in the
1367 * introduction:
1368 *
1369 * @code
1370 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1371 *   {
1372 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1373 *   copy_data.cell_matrix(i, j) -=
1374 *   (scratch_data.face_advection_directions[q_point] *
1375 *   scratch_data.fe_face_values.normal_vector(q_point) *
1376 *   scratch_data.fe_face_values.shape_value(i, q_point) *
1377 *   scratch_data.fe_face_values.shape_value(j, q_point) *
1378 *   scratch_data.fe_face_values.JxW(q_point));
1379 *  
1380 *   copy_data.cell_rhs(i) -=
1381 *   (scratch_data.face_advection_directions[q_point] *
1382 *   scratch_data.fe_face_values.normal_vector(q_point) *
1383 *   scratch_data.face_boundary_values[q_point] *
1384 *   scratch_data.fe_face_values.shape_value(i, q_point) *
1385 *   scratch_data.fe_face_values.JxW(q_point));
1386 *   }
1387 *   }
1388 *  
1389 * @endcode
1390 *
1391 * The final piece of information the copy routine needs is the global
1392 * indices of the degrees of freedom on this cell, so we end by writing
1393 * them to the local array:
1394 *
1395 * @code
1396 *   cell->get_dof_indices(copy_data.local_dof_indices);
1397 *   }
1398 *  
1399 *  
1400 *  
1401 * @endcode
1402 *
1403 * The second function we needed to write was the one that copies
1404 * the local contributions the previous function computed (and
1405 * put into the AssemblyCopyData object) into the global matrix and right
1406 * hand side vector objects. This is essentially what we always had
1407 * as the last block of code when assembling something on every
1408 * cell. The following should therefore be pretty obvious:
1409 *
1410 * @code
1411 *   template <int dim>
1412 *   void
1413 *   AdvectionProblem<dim>::copy_local_to_global(const AssemblyCopyData &copy_data)
1414 *   {
1415 *   hanging_node_constraints.distribute_local_to_global(
1416 *   copy_data.cell_matrix,
1417 *   copy_data.cell_rhs,
1418 *   copy_data.local_dof_indices,
1419 *   system_matrix,
1420 *   system_rhs);
1421 *   }
1422 *  
1423 * @endcode
1424 *
1425 * Here comes the linear solver routine. As the system is no longer
1426 * symmetric positive definite as in all the previous examples, we cannot
1427 * use the Conjugate Gradient method anymore. Rather, we use a solver that
1428 * is more general and does not rely on any special properties of the
1429 * matrix: the GMRES method. GMRES, like the conjugate gradient method,
1430 * requires a decent preconditioner: we use a Jacobi preconditioner here,
1431 * which works well enough for this problem.
1432 *
1433 * @code
1434 *   template <int dim>
1435 *   void AdvectionProblem<dim>::solve()
1436 *   {
1437 *   SolverControl solver_control(std::max<std::size_t>(1000,
1438 *   system_rhs.size() / 10),
1439 *   1e-10 * system_rhs.l2_norm());
1440 *   SolverGMRES<Vector<double>> solver(solver_control);
1441 *   PreconditionJacobi<SparseMatrix<double>> preconditioner;
1442 *   preconditioner.initialize(system_matrix, 1.0);
1443 *   solver.solve(system_matrix, solution, system_rhs, preconditioner);
1444 *  
1445 *   Vector<double> residual(dof_handler.n_dofs());
1446 *  
1447 *   system_matrix.vmult(residual, solution);
1448 *   residual -= system_rhs;
1449 *   std::cout << " Iterations required for convergence: "
1450 *   << solver_control.last_step() << '\n'
1451 *   << " Max norm of residual: "
1452 *   << residual.linfty_norm() << '\n';
1453 *  
1454 *   hanging_node_constraints.distribute(solution);
1455 *   }
1456 *  
1457 * @endcode
1458 *
1459 * The following function refines the grid according to the quantity
1460 * described in the introduction. The respective computations are made in
1461 * the class <code>GradientEstimation</code>.
1462 *
1463 * @code
1464 *   template <int dim>
1465 *   void AdvectionProblem<dim>::refine_grid()
1466 *   {
1467 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1468 *  
1469 *   GradientEstimation::estimate(dof_handler,
1470 *   solution,
1471 *   estimated_error_per_cell);
1472 *  
1474 *   estimated_error_per_cell,
1475 *   0.3,
1476 *   0.03);
1477 *  
1478 *   triangulation.execute_coarsening_and_refinement();
1479 *   }
1480 *  
1481 * @endcode
1482 *
1483 * This function is similar to the one in step 6, but since we use a higher
1484 * degree finite element we save the solution in a different
1485 * way. Visualization programs like VisIt and Paraview typically only
1486 * understand data that is associated with nodes: they cannot plot
1487 * fifth-degree basis functions, which results in a very inaccurate picture
1488 * of the solution we computed. To get around this we save multiple
1489 * <em>patches</em> per cell: in 2d we save 64 bilinear `cells' to the VTU
1490 * file for each cell, and in 3d we save 512. The end result is that the
1491 * visualization program will use a piecewise linear interpolation of the
1492 * cubic basis functions: this captures the solution detail and, with most
1493 * screen resolutions, looks smooth. We save the grid in a separate step
1494 * with no extra patches so that we have a visual representation of the cell
1495 * faces.
1496 *
1497
1498 *
1499 * Version 9.1 of deal.II gained the ability to write higher degree
1500 * polynomials (i.e., write piecewise bicubic visualization data for our
1501 * piecewise bicubic solution) VTK and VTU output: however, not all recent
1502 * versions of ParaView and VisIt (as of 2018) can read this format, so we
1503 * use the older, more general (but less efficient) approach here.
1504 *
1505 * @code
1506 *   template <int dim>
1507 *   void AdvectionProblem<dim>::output_results(const unsigned int cycle) const
1508 *   {
1509 *   {
1510 *   GridOut grid_out;
1511 *   const std::string filename = "grid-" + std::to_string(cycle) + ".vtu";
1512 *   std::ofstream output(filename);
1513 *   grid_out.write_vtu(triangulation, output);
1514 *   std::cout << "Grid written to " << filename << std::endl;
1515 *   }
1516 *  
1517 *   {
1518 *   DataOut<dim> data_out;
1519 *   data_out.attach_dof_handler(dof_handler);
1520 *   data_out.add_data_vector(solution, "solution");
1521 *   data_out.build_patches(8);
1522 *  
1523 * @endcode
1524 *
1525 * VTU output can be expensive, both to compute and to write to
1526 * disk. Here we ask ZLib, a compression library, to compress the data
1527 * in a way that maximizes throughput.
1528 *
1529 * @code
1530 *   DataOutBase::VtkFlags vtk_flags;
1531 *   vtk_flags.compression_level = DataOutBase::CompressionLevel::best_speed;
1532 *   data_out.set_flags(vtk_flags);
1533 *  
1534 *   const std::string filename = "solution-" + std::to_string(cycle) + ".vtu";
1535 *   std::ofstream output(filename);
1536 *   data_out.write_vtu(output);
1537 *   std::cout << "Solution written to " << filename << std::endl;
1538 *   }
1539 *   }
1540 *  
1541 *  
1542 * @endcode
1543 *
1544 * ... as is the main loop (setup -- solve -- refine), aside from the number
1545 * of cycles and the initial grid:
1546 *
1547 * @code
1548 *   template <int dim>
1549 *   void AdvectionProblem<dim>::run()
1550 *   {
1551 *   for (unsigned int cycle = 0; cycle < 10; ++cycle)
1552 *   {
1553 *   std::cout << "Cycle " << cycle << ':' << std::endl;
1554 *  
1555 *   if (cycle == 0)
1556 *   {
1557 *   GridGenerator::hyper_cube(triangulation, -1, 1);
1558 *   triangulation.refine_global(3);
1559 *   }
1560 *   else
1561 *   {
1562 *   refine_grid();
1563 *   }
1564 *  
1565 *  
1566 *   std::cout << " Number of active cells: "
1567 *   << triangulation.n_active_cells() << std::endl;
1568 *  
1569 *   setup_system();
1570 *  
1571 *   std::cout << " Number of degrees of freedom: "
1572 *   << dof_handler.n_dofs() << std::endl;
1573 *  
1574 *   assemble_system();
1575 *   solve();
1576 *   output_results(cycle);
1577 *   }
1578 *   }
1579 *  
1580 *  
1581 *  
1582 * @endcode
1583 *
1584 *
1585 * <a name="step_9-GradientEstimationclassimplementation"></a>
1586 * <h3>GradientEstimation class implementation</h3>
1587 *
1588
1589 *
1590 * Now for the implementation of the <code>GradientEstimation</code> class.
1591 * Let us start by defining constructors for the
1592 * <code>EstimateScratchData</code> class used by the
1593 * <code>estimate_cell()</code> function:
1594 *
1595 * @code
1596 *   template <int dim>
1597 *   GradientEstimation::EstimateScratchData<dim>::EstimateScratchData(
1598 *   const FiniteElement<dim> &fe,
1599 *   const Vector<double> &solution,
1600 *   Vector<float> &error_per_cell)
1601 *   : fe_midpoint_value(fe,
1602 *   QMidpoint<dim>(),
1603 *   update_values | update_quadrature_points)
1604 *   , solution(solution)
1605 *   , error_per_cell(error_per_cell)
1606 *   , cell_midpoint_value(1)
1607 *   , neighbor_midpoint_value(1)
1608 *   {
1609 * @endcode
1610 *
1611 * We allocate a vector to hold iterators to all active neighbors of
1612 * a cell. We reserve the maximal number of active neighbors in order to
1613 * avoid later reallocations. Note how this maximal number of active
1614 * neighbors is computed here.
1615 *
1616 * @code
1617 *   active_neighbors.reserve(GeometryInfo<dim>::faces_per_cell *
1618 *   GeometryInfo<dim>::max_children_per_face);
1619 *   }
1620 *  
1621 *  
1622 *   template <int dim>
1623 *   GradientEstimation::EstimateScratchData<dim>::EstimateScratchData(
1624 *   const EstimateScratchData &scratch_data)
1625 *   : fe_midpoint_value(scratch_data.fe_midpoint_value.get_fe(),
1626 *   scratch_data.fe_midpoint_value.get_quadrature(),
1627 *   update_values | update_quadrature_points)
1628 *   , solution(scratch_data.solution)
1629 *   , error_per_cell(scratch_data.error_per_cell)
1630 *   , cell_midpoint_value(1)
1631 *   , neighbor_midpoint_value(1)
1632 *   {}
1633 *  
1634 *  
1635 * @endcode
1636 *
1637 * Next comes the implementation of the <code>GradientEstimation</code>
1638 * class. The first function does not much except for delegating work to the
1639 * other function, but there is a bit of setup at the top.
1640 *
1641
1642 *
1643 * Before starting with the work, we check that the vector into
1644 * which the results are written has the right size, using the `Assert`
1645 * macro and the exception class we declared above. Programming
1646 * mistakes in which one forgets to size arguments correctly at the
1647 * calling site are quite common. Because the resulting damage from
1648 * not catching such errors is often subtle (e.g., corruption of
1649 * data somewhere in memory, or non-reproducible results), it is
1650 * well worth the effort to check for such things.
1651 *
1652 * @code
1653 *   template <int dim>
1654 *   void GradientEstimation::estimate(const DoFHandler<dim> &dof_handler,
1655 *   const Vector<double> &solution,
1656 *   Vector<float> &error_per_cell)
1657 *   {
1658 *   Assert(
1659 *   error_per_cell.size() == dof_handler.get_triangulation().n_active_cells(),
1660 *   ExcInvalidVectorLength(error_per_cell.size(),
1661 *   dof_handler.get_triangulation().n_active_cells()));
1662 *  
1663 *   WorkStream::run(dof_handler.begin_active(),
1664 *   dof_handler.end(),
1665 *   &GradientEstimation::template estimate_cell<dim>,
1666 *   std::function<void(const EstimateCopyData &)>(),
1667 *   EstimateScratchData<dim>(dof_handler.get_fe(),
1668 *   solution,
1669 *   error_per_cell),
1670 *   EstimateCopyData());
1671 *   }
1672 *  
1673 *  
1674 * @endcode
1675 *
1676 * Here comes the function that estimates the local error by computing the
1677 * finite difference approximation of the gradient. The function first
1678 * computes the list of active neighbors of the present cell and then
1679 * computes the quantities described in the introduction for each of
1680 * the neighbors. The reason for this order is that it is not a one-liner
1681 * to find a given neighbor with locally refined meshes. In principle, an
1682 * optimized implementation would find neighbors and the quantities
1683 * depending on them in one step, rather than first building a list of
1684 * neighbors and in a second step their contributions but we will gladly
1685 * leave this as an exercise. As discussed before, the worker function
1686 * passed to WorkStream::run works on "scratch" objects that keep all
1687 * temporary objects. This way, we do not need to create and initialize
1688 * objects that are expensive to initialize within the function that does
1689 * the work every time it is called for a given cell. Such an argument is
1690 * passed as the second argument. The third argument would be a "copy-data"
1691 * object (see @ref threads for more information) but we do not actually use
1692 * any of these here. Since WorkStream::run() insists on passing three
1693 * arguments, we declare this function with three arguments, but simply
1694 * ignore the last one.
1695 *
1696
1697 *
1698 * (This is unsatisfactory from an aesthetic perspective. It can be avoided
1699 * by using an anonymous (lambda) function. If you allow, let us here show
1700 * how. First, assume that we had declared this function to only take two
1701 * arguments by omitting the unused last one. Now, WorkStream::run still
1702 * wants to call this function with three arguments, so we need to find a
1703 * way to "forget" the third argument in the call. Simply passing
1704 * WorkStream::run the pointer to the function as we do above will not do
1705 * this -- the compiler will complain that a function declared to have two
1706 * arguments is called with three arguments. However, we can do this by
1707 * passing the following as the third argument to WorkStream::run():
1708 * <div class=CodeFragmentInTutorialComment>
1709 * @code
1710 * [](const typename DoFHandler<dim>::active_cell_iterator &cell,
1711 * EstimateScratchData<dim> & scratch_data,
1712 * EstimateCopyData &)
1713 * {
1714 * GradientEstimation::estimate_cell<dim>(cell, scratch_data);
1715 * }
1716 * @endcode
1717 * </div>
1718 * This is not much better than the solution implemented below: either the
1719 * routine itself must take three arguments or it must be wrapped by
1720 * something that takes three arguments. We don't use this since adding the
1721 * unused argument at the beginning is simpler.
1722 *
1723
1724 *
1725 * Now for the details:
1726 *
1727 * @code
1728 *   template <int dim>
1729 *   void GradientEstimation::estimate_cell(
1730 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1731 *   EstimateScratchData<dim> &scratch_data,
1732 *   const EstimateCopyData &)
1733 *   {
1734 * @endcode
1735 *
1736 * We need space for the tensor <code>Y</code>, which is the sum of
1737 * outer products of the y-vectors.
1738 *
1739 * @code
1740 *   Tensor<2, dim> Y;
1741 *  
1742 * @endcode
1743 *
1744 * First initialize the <code>FEValues</code> object, as well as the
1745 * <code>Y</code> tensor:
1746 *
1747 * @code
1748 *   scratch_data.fe_midpoint_value.reinit(cell);
1749 *  
1750 * @endcode
1751 *
1752 * Now, before we go on, we first compute a list of all active neighbors
1753 * of the present cell. We do so by first looping over all faces and see
1754 * whether the neighbor there is active, which would be the case if it
1755 * is on the same level as the present cell or one level coarser (note
1756 * that a neighbor can only be once coarser than the present cell, as
1757 * we only allow a maximal difference of one refinement over a face in
1758 * deal.II). Alternatively, the neighbor could be on the same level
1759 * and be further refined; then we have to find which of its children
1760 * are next to the present cell and select these (note that if a child
1761 * of a neighbor of an active cell that is next to this active cell,
1762 * needs necessarily be active itself, due to the one-refinement rule
1763 * cited above).
1764 *
1765
1766 *
1767 * Things are slightly different in one space dimension, as there the
1768 * one-refinement rule does not exist: neighboring active cells may
1769 * differ in as many refinement levels as they like. In this case, the
1770 * computation becomes a little more difficult, but we will explain
1771 * this below.
1772 *
1773
1774 *
1775 * Before starting the loop over all neighbors of the present cell, we
1776 * have to clear the array storing the iterators to the active
1777 * neighbors, of course.
1778 *
1779 * @code
1780 *   scratch_data.active_neighbors.clear();
1781 *   for (const auto face_n : cell->face_indices())
1782 *   if (!cell->at_boundary(face_n))
1783 *   {
1784 * @endcode
1785 *
1786 * First define an abbreviation for the iterator to the face and
1787 * the neighbor
1788 *
1789 * @code
1790 *   const auto face = cell->face(face_n);
1791 *   const auto neighbor = cell->neighbor(face_n);
1792 *  
1793 * @endcode
1794 *
1795 * Then check whether the neighbor is active. If it is, then it
1796 * is on the same level or one level coarser (if we are not in
1797 * 1d), and we are interested in it in any case.
1798 *
1799 * @code
1800 *   if (neighbor->is_active())
1801 *   scratch_data.active_neighbors.push_back(neighbor);
1802 *   else
1803 *   {
1804 * @endcode
1805 *
1806 * If the neighbor is not active, then check its children.
1807 *
1808 * @code
1809 *   if (dim == 1)
1810 *   {
1811 * @endcode
1812 *
1813 * To find the child of the neighbor which bounds to the
1814 * present cell, successively go to its right child if
1815 * we are left of the present cell (n==0), or go to the
1816 * left child if we are on the right (n==1), until we
1817 * find an active cell.
1818 *
1819 * @code
1820 *   auto neighbor_child = neighbor;
1821 *   while (neighbor_child->has_children())
1822 *   neighbor_child = neighbor_child->child(face_n == 0 ? 1 : 0);
1823 *  
1824 * @endcode
1825 *
1826 * As this used some non-trivial geometrical intuition,
1827 * we might want to check whether we did it right,
1828 * i.e., check whether the neighbor of the cell we found
1829 * is indeed the cell we are presently working
1830 * on. Checks like this are often useful and have
1831 * frequently uncovered errors both in algorithms like
1832 * the line above (where it is simple to involuntarily
1833 * exchange <code>n==1</code> for <code>n==0</code> or
1834 * the like) and in the library (the assumptions
1835 * underlying the algorithm above could either be wrong,
1836 * wrongly documented, or are violated due to an error
1837 * in the library). One could in principle remove such
1838 * checks after the program works for some time, but it
1839 * might be a good things to leave it in anyway to check
1840 * for changes in the library or in the algorithm above.
1841 *
1842
1843 *
1844 * Note that if this check fails, then this is certainly
1845 * an error that is irrecoverable and probably qualifies
1846 * as an internal error. We therefore use a predefined
1847 * exception class to throw here.
1848 *
1849 * @code
1850 *   Assert(neighbor_child->neighbor(face_n == 0 ? 1 : 0) == cell,
1851 *   ExcInternalError());
1852 *  
1853 * @endcode
1854 *
1855 * If the check succeeded, we push the active neighbor
1856 * we just found to the stack we keep:
1857 *
1858 * @code
1859 *   scratch_data.active_neighbors.push_back(neighbor_child);
1860 *   }
1861 *   else
1862 * @endcode
1863 *
1864 * If we are not in 1d, we collect all neighbor children
1865 * `behind' the subfaces of the current face and move on:
1866 *
1867 * @code
1868 *   for (unsigned int subface_n = 0; subface_n < face->n_children();
1869 *   ++subface_n)
1870 *   scratch_data.active_neighbors.push_back(
1871 *   cell->neighbor_child_on_subface(face_n, subface_n));
1872 *   }
1873 *   }
1874 *  
1875 * @endcode
1876 *
1877 * OK, now that we have all the neighbors, lets start the computation
1878 * on each of them. First we do some preliminaries: find out about the
1879 * center of the present cell and the solution at this point. The
1880 * latter is obtained as a vector of function values at the quadrature
1881 * points, of which there are only one, of course. Likewise, the
1882 * position of the center is the position of the first (and only)
1883 * quadrature point in real space.
1884 *
1885 * @code
1886 *   const Point<dim> this_center =
1887 *   scratch_data.fe_midpoint_value.quadrature_point(0);
1888 *  
1889 *   scratch_data.fe_midpoint_value.get_function_values(
1890 *   scratch_data.solution, scratch_data.cell_midpoint_value);
1891 *  
1892 * @endcode
1893 *
1894 * Now loop over all active neighbors and collect the data we
1895 * need.
1896 *
1897 * @code
1898 *   Tensor<1, dim> projected_gradient;
1899 *   for (const auto &neighbor : scratch_data.active_neighbors)
1900 *   {
1901 * @endcode
1902 *
1903 * Then get the center of the neighbor cell and the value of the
1904 * finite element function at that point. Note that for this
1905 * information we have to reinitialize the <code>FEValues</code>
1906 * object for the neighbor cell.
1907 *
1908 * @code
1909 *   scratch_data.fe_midpoint_value.reinit(neighbor);
1910 *   const Point<dim> neighbor_center =
1911 *   scratch_data.fe_midpoint_value.quadrature_point(0);
1912 *  
1913 *   scratch_data.fe_midpoint_value.get_function_values(
1914 *   scratch_data.solution, scratch_data.neighbor_midpoint_value);
1915 *  
1916 * @endcode
1917 *
1918 * Compute the vector <code>y</code> connecting the centers of the
1919 * two cells. Note that as opposed to the introduction, we denote
1920 * by <code>y</code> the normalized difference vector, as this is
1921 * the quantity used everywhere in the computations.
1922 *
1923 * @code
1924 *   Tensor<1, dim> y = neighbor_center - this_center;
1925 *   const double distance = y.norm();
1926 *   y /= distance;
1927 *  
1928 * @endcode
1929 *
1930 * Then add up the contribution of this cell to the Y matrix...
1931 *
1932 * @code
1933 *   for (unsigned int i = 0; i < dim; ++i)
1934 *   for (unsigned int j = 0; j < dim; ++j)
1935 *   Y[i][j] += y[i] * y[j];
1936 *  
1937 * @endcode
1938 *
1939 * ... and update the sum of difference quotients:
1940 *
1941 * @code
1942 *   projected_gradient += (scratch_data.neighbor_midpoint_value[0] -
1943 *   scratch_data.cell_midpoint_value[0]) /
1944 *   distance * y;
1945 *   }
1946 *  
1947 * @endcode
1948 *
1949 * If now, after collecting all the information from the neighbors, we
1950 * can determine an approximation of the gradient for the present
1951 * cell, then we need to have passed over vectors <code>y</code> which
1952 * span the whole space, otherwise we would not have all components of
1953 * the gradient. This is indicated by the invertibility of the matrix.
1954 *
1955
1956 *
1957 * If the matrix is not invertible, then the present
1958 * cell had an insufficient number of active neighbors. In contrast to
1959 * all previous cases (where we raised exceptions) this is, however,
1960 * not a programming error: it is a runtime error that can happen in
1961 * optimized mode even if it ran well in debug mode, so it is
1962 * reasonable to try to catch this error also in optimized mode. For
1963 * this case, there is the <code>AssertThrow</code> macro: it checks
1964 * the condition like the <code>Assert</code> macro, but not only in
1965 * debug mode; it then outputs an error message, but instead of
1966 * aborting the program as in the case of the <code>Assert</code>
1967 * macro, the exception is thrown using the <code>throw</code> command
1968 * of C++. This way, one has the possibility to catch this error and
1969 * take reasonable counter actions. One such measure would be to
1970 * refine the grid globally, as the case of insufficient directions
1971 * can not occur if every cell of the initial grid has been refined at
1972 * least once.
1973 *
1974 * @code
1975 *   AssertThrow(determinant(Y) != 0, ExcInsufficientDirections());
1976 *  
1977 * @endcode
1978 *
1979 * If, on the other hand, the matrix is invertible, then invert it,
1980 * multiply the other quantity with it, and compute the estimated error
1981 * using this quantity and the correct powers of the mesh width:
1982 *
1983 * @code
1984 *   const Tensor<2, dim> Y_inverse = invert(Y);
1985 *  
1986 *   const Tensor<1, dim> gradient = Y_inverse * projected_gradient;
1987 *  
1988 * @endcode
1989 *
1990 * The last part of this function is the one where we write into
1991 * the element of the output vector what we have just
1992 * computed. The address of this vector has been stored in the
1993 * scratch data object, and all we have to do is know how to get
1994 * at the correct element inside this vector -- but we can ask the
1995 * cell we're on the how-manyth active cell it is for this:
1996 *
1997 * @code
1998 *   scratch_data.error_per_cell(cell->active_cell_index()) =
1999 *   (std::pow(cell->diameter(), 1 + 1.0 * dim / 2) * gradient.norm());
2000 *   }
2001 *   } // namespace Step9
2002 *  
2003 *  
2004 * @endcode
2005 *
2006 *
2007 * <a name="step_9-Mainfunction"></a>
2008 * <h3>Main function</h3>
2009 *
2010
2011 *
2012 * The <code>main</code> function is similar to the previous examples. The
2013 * primary difference is that we use MultithreadInfo to set the maximum
2014 * number of threads (see the documentation module @ref threads
2015 * "Parallel computing with multiple processors accessing shared memory"
2016 * for more information). The number of threads used is the minimum of the
2017 * environment variable DEAL_II_NUM_THREADS and the parameter of
2018 * <code>set_thread_limit</code>. If no value is given to
2019 * <code>set_thread_limit</code>, the default value from the Intel Threading
2020 * Building Blocks (TBB) library is used. If the call to
2021 * <code>set_thread_limit</code> is omitted, the number of threads will be
2022 * chosen by TBB independently of DEAL_II_NUM_THREADS.
2023 *
2024 * @code
2025 *   int main()
2026 *   {
2027 *   using namespace dealii;
2028 *   try
2029 *   {
2031 *  
2032 *   Step9::AdvectionProblem<2> advection_problem_2d;
2033 *   advection_problem_2d.run();
2034 *   }
2035 *   catch (std::exception &exc)
2036 *   {
2037 *   std::cerr << std::endl
2038 *   << std::endl
2039 *   << "----------------------------------------------------"
2040 *   << std::endl;
2041 *   std::cerr << "Exception on processing: " << std::endl
2042 *   << exc.what() << std::endl
2043 *   << "Aborting!" << std::endl
2044 *   << "----------------------------------------------------"
2045 *   << std::endl;
2046 *   return 1;
2047 *   }
2048 *   catch (...)
2049 *   {
2050 *   std::cerr << std::endl
2051 *   << std::endl
2052 *   << "----------------------------------------------------"
2053 *   << std::endl;
2054 *   std::cerr << "Unknown exception!" << std::endl
2055 *   << "Aborting!" << std::endl
2056 *   << "----------------------------------------------------"
2057 *   << std::endl;
2058 *   return 1;
2059 *   }
2060 *  
2061 *   return 0;
2062 *   }
2063 * @endcode
2064<a name="step_9-Results"></a><h1>Results</h1>
2065
2066
2067
2068The results of this program are not particularly spectacular. They
2069consist of the console output, some grid files, and the solution on
2070each of these grids. First for the console output:
2071@code
2072Cycle 0:
2073 Number of active cells: 64
2074 Number of degrees of freedom: 1681
2075 Iterations required for convergence: 298
2076 Max norm of residual: 3.60316e-12
2077Cycle 1:
2078 Number of active cells: 124
2079 Number of degrees of freedom: 3537
2080 Iterations required for convergence: 415
2081 Max norm of residual: 3.70682e-12
2082Cycle 2:
2083 Number of active cells: 247
2084 Number of degrees of freedom: 6734
2085 Iterations required for convergence: 543
2086 Max norm of residual: 7.19716e-13
2087Cycle 3:
2088 Number of active cells: 502
2089 Number of degrees of freedom: 14105
2090 Iterations required for convergence: 666
2091 Max norm of residual: 3.45628e-13
2092Cycle 4:
2093 Number of active cells: 1003
2094 Number of degrees of freedom: 27462
2095 Iterations required for convergence: 1064
2096 Max norm of residual: 1.86495e-13
2097Cycle 5:
2098 Number of active cells: 1993
2099 Number of degrees of freedom: 55044
2100 Iterations required for convergence: 1251
2101 Max norm of residual: 1.28765e-13
2102Cycle 6:
2103 Number of active cells: 3985
2104 Number of degrees of freedom: 108492
2105 Iterations required for convergence: 2035
2106 Max norm of residual: 6.78085e-14
2107Cycle 7:
2108 Number of active cells: 7747
2109 Number of degrees of freedom: 210612
2110 Iterations required for convergence: 2187
2111 Max norm of residual: 2.61457e-14
2112Cycle 8:
2113 Number of active cells: 15067
2114 Number of degrees of freedom: 406907
2115 Iterations required for convergence: 3079
2116 Max norm of residual: 2.9932e-14
2117Cycle 9:
2118 Number of active cells: 29341
2119 Number of degrees of freedom: 780591
2120 Iterations required for convergence: 3913
2121 Max norm of residual: 8.15689e-15
2122@endcode
2123
2124Quite a number of cells are used on the finest level to resolve the features of
2125the solution. Here are the fourth and tenth grids:
2126<div class="twocolumn" style="width: 80%">
2127 <div>
2128 <img src="https://www.dealii.org/images/steps/developer/step-9-grid-3.png"
2129 alt="Fourth grid in the refinement cycle, showing some adaptivity to features."
2130 width="400" height="400">
2131 </div>
2132 <div>
2133 <img src="https://www.dealii.org/images/steps/developer/step-9-grid-9.png"
2134 alt="Tenth grid in the refinement cycle, showing that the waves are fully captured."
2135 width="400" height="400">
2136 </div>
2137</div>
2138and the fourth and tenth solutions:
2139<div class="twocolumn" style="width: 80%">
2140 <div>
2141 <img src="https://www.dealii.org/images/steps/developer/step-9-solution-3.png"
2142 alt="Fourth solution, showing that we resolve most features but some
2143 are sill unresolved and appear blury."
2144 width="400" height="400">
2145 </div>
2146 <div>
2147 <img src="https://www.dealii.org/images/steps/developer/step-9-solution-9.png"
2148 alt="Tenth solution, showing a fully resolved flow."
2149 width="400" height="400">
2150 </div>
2151</div>
2152and both the grid and solution zoomed in:
2153<div class="twocolumn" style="width: 80%">
2154 <div>
2155 <img src="https://www.dealii.org/images/steps/developer/step-9-solution-3-zoom.png"
2156 alt="Detail of the fourth solution, showing that we resolve most
2157 features but some are sill unresolved and appear blury. In particular,
2158 the larger cells need to be refined."
2159 width="400" height="400">
2160 </div>
2161 <div>
2162 <img src="https://www.dealii.org/images/steps/developer/step-9-solution-9-zoom.png"
2163 alt="Detail of the tenth solution, showing that we needed a lot more
2164 cells than were present in the fourth solution."
2165 width="400" height="400">
2166 </div>
2167</div>
2168
2169The solution is created by that part that is transported along the wiggly
2170advection field from the left and lower boundaries to the top right, and the
2171part that is created by the source in the lower left corner, and the results of
2172which are also transported along. The grid shown above is well-adapted to
2173resolve these features. The comparison between plots shows that, even though we
2174are using a high-order approximation, we still need adaptive mesh refinement to
2175fully resolve the wiggles.
2176 *
2177 *
2178<a name="step_9-PlainProg"></a>
2179<h1> The plain program</h1>
2180@include "step-9.cc"
2181*/
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell)
virtual void value_list(const std::vector< Point< dim > > &points, std::vector< RangeNumberType > &values, const unsigned int component=0) const
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void initialize(const MatrixType &A, const AdditionalData &parameters=AdditionalData())
Point< 2 > second
Definition grid_out.cc:4614
Point< 2 > first
Definition grid_out.cc:4613
unsigned int level
Definition grid_out.cc:4616
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:979
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ general
No special properties.
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
const InputIterator OutputIterator out
Definition parallel.h:167
const Iterator const std_cxx20::type_identity_t< Iterator > & end
Definition parallel.h:610
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)