deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-67.h
Go to the documentation of this file.
1
896 *   stage_5_order_4, /* Kennedy, Carpenter, Lewis, 2000 */
897 *   stage_7_order_4, /* Tselios, Simos, 2007 */
898 *   stage_9_order_5, /* Kennedy, Carpenter, Lewis, 2000 */
899 *   };
901 *  
902 * @endcode
903 *
904 * Eventually, we select a detail of the spatial discretization, namely the
905 * numerical flux (Riemann solver) at the faces between cells. For this
908 *
909 * @code
911 *   {
914 *   };
916 *  
917 *  
918 *  
919 * @endcode
920 *
921 *
922 * <a name="step_67-Equationdata"></a>
923 * <h3>Equation data</h3>
924 *
925
926 *
927 * We now define a class with the exact solution for the test case 0 and one
928 * with a background flow field for test case 1 of the channel. Given that
929 * the Euler equations are a problem with @f$d+2@f$ equations in @f$d@f$ dimensions,
930 * we need to tell the Function base class about the correct number of
931 * components.
932 *
933 * @code
934 *   template <int dim>
935 *   class ExactSolution : public Function<dim>
936 *   {
937 *   public:
938 *   ExactSolution(const double time)
939 *   : Function<dim>(dim + 2, time)
940 *   {}
941 *  
942 *   virtual double value(const Point<dim> &p,
943 *   const unsigned int component = 0) const override;
944 *   };
945 *  
946 *  
947 *  
948 * @endcode
949 *
951 * test case is an isentropic vortex case (see e.g. the book by Hesthaven
952 * and Warburton, Example 6.1 in Section 6.6 on page 209) which fulfills the
953 * Euler equations with zero force term on the right hand side. Given that
954 * definition, we return either the density, the momentum, or the energy
960 * optimized. This formula might lose accuracy in the last digits
962 * it anyway, since small numbers map to data close to 1.
963 *
964
965 *
966 * For the channel test case, we simply select a density of 1, a velocity of
967 * 0.4 in @f$x@f$ direction and zero in the other directions, and an energy that
968 * corresponds to a speed of sound of 1.3 measured against the background
969 * velocity field, computed from the relation @f$E = \frac{c^2}{\gamma (\gamma
970 * -1)} + \frac 12 \rho \|u\|^2@f$.
971 *
972 * @code
973 *   template <int dim>
975 *   const unsigned int component) const
976 *   {
977 *   const double t = this->get_time();
978 *  
979 *   switch (testcase)
980 *   {
981 *   case 0:
982 *   {
983 *   Assert(dim == 2, ExcNotImplemented());
984 *   const double beta = 5;
985 *  
986 *   Point<dim> x0;
987 *   x0[0] = 5.;
988 *   const double radius_sqr =
989 *   (x - x0).norm_square() - 2. * (x[0] - x0[0]) * t + t * t;
990 *   const double factor =
991 *   beta / (numbers::PI * 2) * std::exp(1. - radius_sqr);
992 *   const double density_log = std::log2(
993 *   std::abs(1. - (gamma - 1.) / gamma * 0.25 * factor * factor));
994 *   const double density = std::exp2(density_log * (1. / (gamma - 1.)));
995 *   const double u = 1. - factor * (x[1] - x0[1]);
996 *   const double v = factor * (x[0] - t - x0[0]);
997 *  
998 *   if (component == 0)
999 *   return density;
1000 *   else if (component == 1)
1001 *   return density * u;
1002 *   else if (component == 2)
1003 *   return density * v;
1004 *   else
1005 *   {
1006 *   const double pressure =
1007 *   std::exp2(density_log * (gamma / (gamma - 1.)));
1008 *   return pressure / (gamma - 1.) +
1009 *   0.5 * (density * u * u + density * v * v);
1010 *   }
1011 *   }
1012 *  
1013 *   case 1:
1014 *   {
1015 *   if (component == 0)
1016 *   return 1.;
1017 *   else if (component == 1)
1018 *   return 0.4;
1019 *   else if (component == dim + 1)
1020 *   return 3.097857142857143;
1021 *   else
1022 *   return 0.;
1023 *   }
1024 *  
1025 *   default:
1027 *   return 0.;
1028 *   }
1029 *   }
1030 *  
1031 *  
1032 *  
1033 * @endcode
1034 *
1035 *
1036 * <a name="step_67-LowstorageexplicitRungeKuttatimeintegrators"></a>
1037 * <h3>Low-storage explicit Runge--Kutta time integrators</h3>
1038 *
1039
1040 *
1041 * The next few lines implement a few low-storage variants of Runge--Kutta
1044 * method, we can deduce time steps, @f$c_i = \sum_{j=1}^{i-2} b_i + a_{i-1}@f$
1045 * from those coefficients. The main advantage of this kind of scheme is the
1047 * part of the solution @f$\mathbf{w}@f$ (that will hold the solution
1048 * @f$\mathbf{w}^{n+1}@f$ at the new time @f$t^{n+1}@f$ after the last stage), the
1050 * one vector @f$\mathbf{k}_i@f$ to hold the evaluation of the operator. Such a
1051 * Runge--Kutta setup reduces the memory storage and memory access. As the
1052 * memory bandwidth is often the performance-limiting factor on modern
1054 * well-optimized, performance can be improved over standard time
1056 * conventional Runge--Kutta scheme might allow for slightly larger time
1057 * steps as more free parameters allow for better stability properties.
1058 *
1059
1060 *
1062 * low-storage schemes defined in the article by Kennedy, Carpenter, and
1063 * Lewis (2000), as well as one variant described by Tselios and Simos
1064 * (2007). There is a large series of other schemes available, which could
1066 * update formulas.
1067 *
1068
1069 *
1071 * enum described above. To each scheme, we then fill the vectors for the
1072 * @f$b_i@f$ and @f$a_i@f$ to the given variables in the class.
1073 *
1074 * @code
1076 *   {
1077 *   public:
1079 *   {
1081 * @endcode
1082 *
1083 * First comes the three-stage scheme of order three by Kennedy et al.
1086 * competitive in terms of the work per stage.
1087 *
1088 * @code
1089 *   switch (scheme)
1090 *   {
1091 *   case stage_3_order_3:
1092 *   {
1094 *   break;
1095 *   }
1096 *  
1097 * @endcode
1098 *
1099 * The next scheme is a five-stage scheme of order four, again
1100 * defined in the paper by Kennedy et al. (2000).
1101 *
1102 * @code
1103 *   case stage_5_order_4:
1104 *   {
1106 *   break;
1107 *   }
1108 *  
1109 * @endcode
1110 *
1111 * The following scheme of seven stages and order four has been
1112 * explicitly derived for acoustics problems. It is a balance of
1113 * accuracy for imaginary eigenvalues among fourth order schemes,
1116 * necessarily translate to the highest possible time step per
1119 * also the maximal stable time step size. For the modified
1120 * Lax--Friedrichs flux, this scheme is similar to the
1121 * `stage_5_order_4` scheme in terms of step size per stage if only
1123 * flux.
1124 *
1125 * @code
1126 *   case stage_7_order_4:
1127 *   {
1129 *   break;
1130 *   }
1131 *  
1132 * @endcode
1133 *
1134 * The last scheme included here is the nine-stage scheme of order
1136 * the schemes used here, but the higher order of accuracy
1138 * stage is less than for the fourth order schemes.
1139 *
1140 * @code
1141 *   case stage_9_order_5:
1142 *   {
1144 *   break;
1145 *   }
1146 *  
1147 *   default:
1148 *   AssertThrow(false, ExcNotImplemented());
1149 *   }
1153 *   rk_integrator.get_coefficients(ai, bi, ci);
1154 *   }
1155 *  
1156 *   unsigned int n_stages() const
1157 *   {
1158 *   return bi.size();
1159 *   }
1160 *  
1161 * @endcode
1162 *
1163 * The main function of the time integrator is to go through the stages,
1164 * evaluate the operator, prepare the @f$\mathbf{r}_i@f$ vector for the next
1165 * evaluation, and update the solution vector @f$\mathbf{w}@f$. We hand off
1166 * the work to the `pde_operator` involved in order to be able to merge
1168 * the differential operator for better performance, so all we do here is
1169 * to delegate the vectors and coefficients.
1170 *
1171
1172 *
1173 * We separately call the operator for the first stage because we need
1174 * slightly modified arguments there: We evaluate the solution from
1175 * the old solution @f$\mathbf{w}^n@f$ rather than a @f$\mathbf r_i@f$ vector, so
1176 * the first argument is `solution`. We here let the stage vector
1178 * is not used otherwise. For all subsequent stages, we use the vector
1179 * `vec_ki` as the second vector argument to store the result of the
1180 * operator evaluation. Finally, when we are at the last stage, we must
1181 * skip the computation of the vector @f$\mathbf{r}_{s+1}@f$ as there is no
1183 *
1184 * @code
1185 *   template <typename VectorType, typename Operator>
1187 *   const double current_time,
1188 *   const double time_step,
1189 *   VectorType &solution,
1190 *   VectorType &vec_ri,
1191 *   VectorType &vec_ki) const
1192 *   {
1193 *   AssertDimension(ai.size() + 1, bi.size());
1194 *  
1195 *   pde_operator.perform_stage(current_time,
1196 *   bi[0] * time_step,
1197 *   ai[0] * time_step,
1198 *   solution,
1199 *   vec_ri,
1200 *   solution,
1201 *   vec_ri);
1202 *  
1203 *   for (unsigned int stage = 1; stage < bi.size(); ++stage)
1204 *   {
1205 *   const double c_i = ci[stage];
1206 *   pde_operator.perform_stage(current_time + c_i * time_step,
1207 *   bi[stage] * time_step,
1208 *   (stage == bi.size() - 1 ?
1209 *   0 :
1210 *   ai[stage] * time_step),
1211 *   vec_ri,
1212 *   vec_ki,
1213 *   solution,
1214 *   vec_ri);
1215 *   }
1216 *   }
1217 *  
1218 *   private:
1219 *   std::vector<double> bi;
1220 *   std::vector<double> ai;
1221 *   std::vector<double> ci;
1222 *   };
1223 *  
1224 *  
1225 *  
1226 * @endcode
1227 *
1228 *
1229 * <a name="step_67-ImplementationofpointwiseoperationsoftheEulerequations"></a>
1230 * <h3>Implementation of point-wise operations of the Euler equations</h3>
1231 *
1232
1233 *
1235 * operators pertaining to the Euler equations. Each function acts on the
1236 * vector of conserved variables @f$[\rho, \rho\mathbf{u}, E]@f$ that we hold in
1237 * the solution vectors, and computes various derived quantities.
1238 *
1239
1240 *
1245 * compiler-specific keyword that tells the compiler to never create a
1246 * function call for any of those functions, and instead move the
1247 * implementation <a
1248 * href="https://en.wikipedia.org/wiki/Inline_function">inline</a> to where
1253 * every quadrature point of every cell. Making sure these functions are
1255 * instruction into the function (and the corresponding return jump), but also
1257 * context in code that comes after the place where the function was called.
1258 * (We note that compilers are generally quite good at figuring out which
1259 * functions to inline by themselves. Here is a place where compilers may or
1260 * may not have figured it out by themselves but where we know for sure that
1261 * inlining is a win.)
1262 *
1263
1264 *
1265 * Another trick we apply is a separate variable for the inverse density
1266 * @f$\frac{1}{\rho}@f$. This enables the compiler to only perform a single
1267 * division for the flux, despite the division being used at several
1268 * places. As divisions are around ten to twenty times as expensive as
1269 * multiplications or additions, avoiding redundant divisions is crucial for
1270 * performance. We note that taking the inverse first and later multiplying
1271 * with it is not equivalent to a division in floating point arithmetic due
1272 * to roundoff effects, so the compiler is not allowed to exchange one way by
1273 * the other with standard optimization flags. However, it is also not
1274 * particularly difficult to write the code in the right way.
1275 *
1276
1277 *
1278 * To summarize, the chosen strategy of always inlining and careful
1279 * definition of expensive arithmetic operations allows us to write compact
1280 * code without passing all intermediate results around, despite making sure
1281 * that the code maps to excellent machine code.
1282 *
1283 * @code
1284 *   template <int dim, typename Number>
1285 *   inline DEAL_II_ALWAYS_INLINE
1286 *   Tensor<1, dim, Number>
1287 *   euler_velocity(const Tensor<1, dim + 2, Number> &conserved_variables)
1288 *   {
1289 *   const Number inverse_density = Number(1.) / conserved_variables[0];
1290 *  
1291 *   Tensor<1, dim, Number> velocity;
1292 *   for (unsigned int d = 0; d < dim; ++d)
1293 *   velocity[d] = conserved_variables[1 + d] * inverse_density;
1294 *  
1295 *   return velocity;
1296 *   }
1297 *  
1298 * @endcode
1299 *
1300 * The next function computes the pressure from the vector of conserved
1301 * variables, using the formula @f$p = (\gamma - 1) \left(E - \frac 12 \rho
1302 * \mathbf{u}\cdot \mathbf{u}\right)@f$. As explained above, we use the
1303 * velocity from the `euler_velocity()` function. Note that we need to
1304 * specify the first template argument `dim` here because the compiler is
1305 * not able to deduce it from the arguments of the tensor, whereas the
1306 * second argument (number type) can be automatically deduced.
1307 *
1308 * @code
1309 *   template <int dim, typename Number>
1310 *   inline DEAL_II_ALWAYS_INLINE
1311 *   Number
1312 *   euler_pressure(const Tensor<1, dim + 2, Number> &conserved_variables)
1313 *   {
1314 *   const Tensor<1, dim, Number> velocity =
1315 *   euler_velocity<dim>(conserved_variables);
1316 *  
1317 *   Number rho_u_dot_u = conserved_variables[1] * velocity[0];
1318 *   for (unsigned int d = 1; d < dim; ++d)
1319 *   rho_u_dot_u += conserved_variables[1 + d] * velocity[d];
1320 *  
1321 *   return (gamma - 1.) * (conserved_variables[dim + 1] - 0.5 * rho_u_dot_u);
1322 *   }
1323 *  
1324 * @endcode
1325 *
1326 * Here is the definition of the Euler flux function, i.e., the definition
1327 * of the actual equation. Given the velocity and pressure (that the
1328 * compiler optimization will make sure are done only once), this is
1329 * straight-forward given the equation stated in the introduction.
1330 *
1331 * @code
1332 *   template <int dim, typename Number>
1333 *   inline DEAL_II_ALWAYS_INLINE
1334 *   Tensor<1, dim + 2, Tensor<1, dim, Number>>
1335 *   euler_flux(const Tensor<1, dim + 2, Number> &conserved_variables)
1336 *   {
1337 *   const Tensor<1, dim, Number> velocity =
1338 *   euler_velocity<dim>(conserved_variables);
1339 *   const Number pressure = euler_pressure<dim>(conserved_variables);
1340 *  
1341 *   Tensor<1, dim + 2, Tensor<1, dim, Number>> flux;
1342 *   for (unsigned int d = 0; d < dim; ++d)
1343 *   {
1344 *   flux[0][d] = conserved_variables[1 + d];
1345 *   for (unsigned int e = 0; e < dim; ++e)
1346 *   flux[e + 1][d] = conserved_variables[e + 1] * velocity[d];
1347 *   flux[d + 1][d] += pressure;
1348 *   flux[dim + 1][d] =
1349 *   velocity[d] * (conserved_variables[dim + 1] + pressure);
1350 *   }
1351 *  
1352 *   return flux;
1353 *   }
1354 *  
1355 * @endcode
1356 *
1357 * This next function is a helper to simplify the implementation of the
1358 * numerical flux, implementing the action of a tensor of tensors (with
1359 * non-standard outer dimension of size `dim + 2`, so the standard overloads
1360 * provided by deal.II's tensor classes do not apply here) with another
1361 * tensor of the same inner dimension, i.e., a matrix-vector product.
1362 *
1363 * @code
1365 *   inline DEAL_II_ALWAYS_INLINE
1367 *   operator*(const Tensor<1, n_components, Tensor<1, dim, Number>> &matrix,
1368 *   const Tensor<1, dim, Number> &vector)
1369 *   {
1371 *   for (unsigned int d = 0; d < n_components; ++d)
1372 *   result[d] = matrix[d] * vector;
1373 *   return result;
1374 *   }
1375 *  
1376 * @endcode
1377 *
1378 * This function implements the numerical flux (Riemann solver). It gets the
1379 * state from the two sides of an interface and the normal vector, oriented
1380 * from the side of the solution @f$\mathbf{w}^-@f$ towards the solution
1384 * numerical flux is less central due to the polynomials within the elements
1385 * and the physical flux used there. As a result of higher-degree
1387 * continuous solution, the numerical flux can be seen as a control of the
1388 * jump of the solution from both sides to weakly impose continuity. It is
1390 * high-order DG method in the presence of shocks, and thus any DG method
1392 * cases. In this tutorial, we focus on wave-like solutions of the Euler
1394 * basic scheme is sufficient.
1395 *
1396
1397 *
1400 * size with explicit Runge--Kutta methods. We consider two choices, a
1404 * Euler flux.
1405 *
1406
1407 *
1409 * =\frac{\mathbf{F}(\mathbf{w}^-)+\mathbf{F}(\mathbf{w}^+)}{2} +
1410 * \frac{\lambda}{2}\left[\mathbf{w}^--\mathbf{w}^+\right]\otimes
1411 * \mathbf{n^-}@f$, where the factor @f$\lambda =
1412 * \max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)@f$ gives the
1413 * maximal wave speed and @f$c = \sqrt{\gamma p / \rho}@f$ is the speed of
1414 * sound. Here, we choose two modifications of that expression for reasons
1416 * solution. For the above definition of the factor @f$\lambda@f$, we would need
1417 * to take four square roots, two for the two velocity norms and two for the
1419 * use @f$\sqrt{\|\mathbf{u}\|^2+c^2}@f$ as an estimate of the maximal speed
1420 * (which is at most a factor of 2 away from the actual maximum, as shown in
1421 * the introduction). This allows us to pull the square root out of the
1423 * modification is to further relax on the parameter @f$\lambda@f$---the smaller
1430 * formulation is not energy-stable in the limit of @f$\lambda\to 0@f$ as it is
1431 * not skew-symmetric, and would need additional measures such as split-form
1432 * DG schemes in that case.
1433 *
1434
1435 *
1439 * the Euler equations in terms of the current direction of velocity and
1442 * parameters.
1443 *
1444
1445 *
1446 * Since the numerical flux is multiplied by the normal vector in the weak
1447 * form, we multiply by the result by the normal vector for all terms in the
1450 *
1451
1452 *
1453 * In this and the following functions, we use variable suffixes `_m` and
1455 * i.e., values "here" and "there" relative to the current cell when looking
1456 * at a neighbor cell.
1457 *
1458 * @code
1459 *   template <int dim, typename Number>
1460 *   inline DEAL_II_ALWAYS_INLINE
1464 *   const Tensor<1, dim, Number> &normal)
1465 *   {
1466 *   const auto velocity_m = euler_velocity<dim>(u_m);
1467 *   const auto velocity_p = euler_velocity<dim>(u_p);
1468 *  
1469 *   const auto pressure_m = euler_pressure<dim>(u_m);
1470 *   const auto pressure_p = euler_pressure<dim>(u_p);
1471 *  
1472 *   const auto flux_m = euler_flux<dim>(u_m);
1473 *   const auto flux_p = euler_flux<dim>(u_p);
1474 *  
1475 *   switch (numerical_flux_type)
1476 *   {
1478 *   {
1479 *   const auto lambda =
1481 *   gamma * pressure_p * (1. / u_p[0]),
1483 *   gamma * pressure_m * (1. / u_m[0])));
1484 *  
1485 *   return 0.5 * (flux_m * normal + flux_p * normal) +
1486 *   0.5 * lambda * (u_m - u_p);
1487 *   }
1488 *  
1489 *   case harten_lax_vanleer:
1490 *   {
1491 *   const auto avg_velocity_normal =
1492 *   0.5 * ((velocity_m + velocity_p) * normal);
1493 *   const auto avg_c = std::sqrt(std::abs(
1494 *   0.5 * gamma *
1495 *   (pressure_p * (1. / u_p[0]) + pressure_m * (1. / u_m[0]))));
1496 *   const Number s_pos =
1497 *   std::max(Number(), avg_velocity_normal + avg_c);
1498 *   const Number s_neg =
1499 *   std::min(Number(), avg_velocity_normal - avg_c);
1500 *   const Number inverse_s = Number(1.) / (s_pos - s_neg);
1501 *  
1502 *   return inverse_s *
1503 *   ((s_pos * (flux_m * normal) - s_neg * (flux_p * normal)) -
1504 *   s_pos * s_neg * (u_m - u_p));
1505 *   }
1506 *  
1507 *   default:
1508 *   {
1510 *   return {};
1511 *   }
1512 *   }
1513 *   }
1514 *  
1515 *  
1516 *  
1517 * @endcode
1518 *
1519 * This and the next function are helper functions to provide compact
1521 * VectorizedArray argument (see the @ref step_37 "step-37" tutorial for details). This
1522 * function is used for the subsonic outflow boundary conditions where we
1523 * need to set the energy component to a prescribed value. The next one
1524 * requests the solution on all components and is used for inflow boundaries
1525 * where all components of the solution are set.
1526 *
1527 * @code
1528 *   template <int dim, typename Number>
1530 *   evaluate_function(const Function<dim> &function,
1532 *   const unsigned int component)
1533 *   {
1535 *   for (unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
1536 *   {
1537 *   Point<dim> p;
1538 *   for (unsigned int d = 0; d < dim; ++d)
1539 *   p[d] = p_vectorized[d][v];
1540 *   result[v] = function.value(p, component);
1541 *   }
1542 *   return result;
1543 *   }
1544 *  
1545 *  
1546 *   template <int dim, typename Number, int n_components = dim + 2>
1548 *   evaluate_function(const Function<dim> &function,
1550 *   {
1551 *   AssertDimension(function.n_components, n_components);
1553 *   for (unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
1554 *   {
1555 *   Point<dim> p;
1556 *   for (unsigned int d = 0; d < dim; ++d)
1557 *   p[d] = p_vectorized[d][v];
1558 *   for (unsigned int d = 0; d < n_components; ++d)
1559 *   result[d][v] = function.value(p, d);
1560 *   }
1561 *   return result;
1562 *   }
1563 *  
1564 *  
1565 *  
1566 * @endcode
1567 *
1568 *
1569 * <a name="step_67-TheEulerOperationclass"></a>
1570 * <h3>The EulerOperation class</h3>
1571 *
1572
1573 *
1575 * the `LaplaceOperator` class of @ref step_37 "step-37" or @ref step_59 "step-59". Since the present
1576 * operator is non-linear and does not require a matrix interface (to be
1579 * function as well as the combination of `apply` with the required vector
1580 * updates for the low-storage Runge--Kutta time integrator mentioned above
1582 * functions involving matrix-free routines, namely one to compute an
1583 * estimate of the time step scaling (that is combined with the Courant
1584 * number for the actual time step size) based on the velocity and speed of
1585 * sound in the elements, one for the projection of solutions (specializing
1586 * VectorTools::project() for the DG case), and one to compute the errors
1587 * against a possible analytical solution or norms against some background
1588 * state.
1589 *
1590
1591 *
1592 * The rest of the class is similar to other matrix-free tutorials. As
1597 *
1598 * @code
1599 *   template <int dim, int degree, int n_points_1d>
1600 *   class EulerOperator
1601 *   {
1602 *   public:
1603 *   static constexpr unsigned int n_quadrature_points_1d = n_points_1d;
1604 *  
1606 *  
1607 *   void reinit(const Mapping<dim> &mapping,
1608 *   const DoFHandler<dim> &dof_handler);
1609 *  
1610 *   void set_inflow_boundary(const types::boundary_id boundary_id,
1611 *   std::unique_ptr<Function<dim>> inflow_function);
1612 *  
1614 *   const types::boundary_id boundary_id,
1615 *   std::unique_ptr<Function<dim>> outflow_energy);
1616 *  
1617 *   void set_wall_boundary(const types::boundary_id boundary_id);
1618 *  
1619 *   void set_body_force(std::unique_ptr<Function<dim>> body_force);
1620 *  
1621 *   void apply(const double current_time,
1624 *  
1625 *   void
1626 *   perform_stage(const Number cur_time,
1627 *   const Number factor_solution,
1628 *   const Number factor_ai,
1633 *  
1634 *   void project(const Function<dim> &function,
1636 *  
1637 *   std::array<double, 3> compute_errors(
1638 *   const Function<dim> &function,
1639 *   const LinearAlgebra::distributed::Vector<Number> &solution) const;
1640 *  
1642 *   const LinearAlgebra::distributed::Vector<Number> &solution) const;
1643 *  
1644 *   void
1646 *  
1647 *   private:
1649 *  
1650 *   TimerOutput &timer;
1651 *  
1652 *   std::map<types::boundary_id, std::unique_ptr<Function<dim>>>
1654 *   std::map<types::boundary_id, std::unique_ptr<Function<dim>>>
1656 *   std::set<types::boundary_id> wall_boundaries;
1657 *   std::unique_ptr<Function<dim>> body_force;
1658 *  
1663 *   const std::pair<unsigned int, unsigned int> &cell_range) const;
1664 *  
1665 *   void local_apply_cell(
1669 *   const std::pair<unsigned int, unsigned int> &cell_range) const;
1670 *  
1671 *   void local_apply_face(
1675 *   const std::pair<unsigned int, unsigned int> &face_range) const;
1676 *  
1681 *   const std::pair<unsigned int, unsigned int> &face_range) const;
1682 *   };
1683 *  
1684 *  
1685 *  
1686 *   template <int dim, int degree, int n_points_1d>
1688 *   : timer(timer)
1689 *   {}
1690 *  
1691 *  
1692 *  
1693 * @endcode
1694 *
1695 * For the initialization of the Euler operator, we set up the MatrixFree
1696 * variable contained in the class. This can be done given a mapping to
1698 * describing the degrees of freedom. Since we use a discontinuous Galerkin
1699 * discretization in this tutorial program where no constraints are imposed
1700 * strongly on the solution field, we do not need to pass in an
1701 * AffineConstraints object and rather use a dummy for the
1702 * construction. With respect to quadrature, we want to select two different
1704 * based on a template parameter `n_points_1d` (that will be assigned the
1705 * `n_q_points_1d` value specified at the top of this file). More accurate
1707 * variable coefficients in the Euler operator. The second less accurate
1708 * quadrature formula is a tight one based on `fe_degree+1` and needed for
1709 * the inverse mass matrix. While that formula provides an exact inverse
1710 * only on affine element shapes and not on deformed elements, it enables
1711 * the fast inversion of the mass matrix by tensor product techniques,
1713 *
1714 * @code
1717 *   const Mapping<dim> &mapping,
1718 *   const DoFHandler<dim> &dof_handler)
1719 *   {
1720 *   const std::vector<const DoFHandler<dim> *> dof_handlers = {&dof_handler};
1721 *   const AffineConstraints<double> dummy;
1722 *   const std::vector<const AffineConstraints<double> *> constraints = {&dummy};
1723 *   const std::vector<Quadrature<1>> quadratures = {QGauss<1>(n_q_points_1d),
1724 *   QGauss<1>(fe_degree + 1)};
1725 *  
1726 *   typename MatrixFree<dim, Number>::AdditionalData additional_data;
1727 *   additional_data.mapping_update_flags =
1729 *   update_values);
1730 *   additional_data.mapping_update_flags_inner_faces =
1732 *   update_values);
1733 *   additional_data.mapping_update_flags_boundary_faces =
1735 *   update_values);
1736 *   additional_data.tasks_parallel_scheme =
1738 *  
1739 *   data.reinit(
1740 *   mapping, dof_handlers, constraints, quadratures, additional_data);
1741 *   }
1742 *  
1743 *  
1744 *  
1745 *   template <int dim, int degree, int n_points_1d>
1748 *   {
1749 *   data.initialize_dof_vector(vector);
1750 *   }
1751 *  
1752 *  
1753 *  
1754 * @endcode
1755 *
1758 * we must specify all components in terms of density @f$\rho@f$, momentum @f$\rho
1759 * \mathbf{u}@f$ and energy @f$E@f$. Given this information, we then store the
1760 * function alongside the respective boundary id in a map member variable of
1762 * we request a function as well, which we use to retrieve the energy) and for
1764 * function necessary, so we only request the boundary id). For the present
1766 * form (during time integration), the call to set the boundary conditions
1767 * can appear both before or after the `reinit()` call to this class. This
1768 * is different from continuous finite element codes where the boundary
1772 *
1773
1774 *
1775 * The checks added in each of the four function are used to
1777 * parts of the boundary, i.e., that a user does not accidentally designate a
1778 * boundary as both an inflow and say a subsonic outflow boundary.
1779 *
1780 * @code
1781 *   template <int dim, int degree, int n_points_1d>
1783 *   const types::boundary_id boundary_id,
1784 *   std::unique_ptr<Function<dim>> inflow_function)
1785 *   {
1786 *   AssertThrow(subsonic_outflow_boundaries.find(boundary_id) ==
1788 *   wall_boundaries.find(boundary_id) == wall_boundaries.end(),
1789 *   ExcMessage("You already set the boundary with id " +
1790 *   std::to_string(static_cast<int>(boundary_id)) +
1791 *   " to another type of boundary before now setting " +
1792 *   "it as inflow"));
1793 *   AssertThrow(inflow_function->n_components == dim + 2,
1794 *   ExcMessage("Expected function with dim+2 components"));
1795 *  
1797 *   }
1798 *  
1799 *  
1800 *   template <int dim, int degree, int n_points_1d>
1802 *   const types::boundary_id boundary_id,
1803 *   std::unique_ptr<Function<dim>> outflow_function)
1804 *   {
1805 *   AssertThrow(inflow_boundaries.find(boundary_id) ==
1806 *   inflow_boundaries.end() &&
1807 *   wall_boundaries.find(boundary_id) == wall_boundaries.end(),
1808 *   ExcMessage("You already set the boundary with id " +
1809 *   std::to_string(static_cast<int>(boundary_id)) +
1810 *   " to another type of boundary before now setting " +
1811 *   "it as subsonic outflow"));
1812 *   AssertThrow(outflow_function->n_components == dim + 2,
1813 *   ExcMessage("Expected function with dim+2 components"));
1814 *  
1816 *   }
1817 *  
1818 *  
1819 *   template <int dim, int degree, int n_points_1d>
1821 *   const types::boundary_id boundary_id)
1822 *   {
1823 *   AssertThrow(inflow_boundaries.find(boundary_id) ==
1824 *   inflow_boundaries.end() &&
1825 *   subsonic_outflow_boundaries.find(boundary_id) ==
1827 *   ExcMessage("You already set the boundary with id " +
1828 *   std::to_string(static_cast<int>(boundary_id)) +
1829 *   " to another type of boundary before now setting " +
1830 *   "it as wall boundary"));
1831 *  
1832 *   wall_boundaries.insert(boundary_id);
1833 *   }
1834 *  
1835 *  
1836 *   template <int dim, int degree, int n_points_1d>
1838 *   std::unique_ptr<Function<dim>> body_force)
1839 *   {
1840 *   AssertDimension(body_force->n_components, dim);
1841 *  
1842 *   this->body_force = std::move(body_force);
1843 *   }
1844 *  
1845 *  
1846 *  
1847 * @endcode
1848 *
1849 *
1850 * <a name="step_67-Localevaluators"></a>
1851 * <h4>Local evaluators</h4>
1852 *
1853
1854 *
1855 * Now we proceed to the local evaluators for the Euler problem. The
1857 * @ref step_37 "step-37", @ref step_48 "step-48", or @ref step_59 "step-59". The first notable difference is the fact
1858 * that we use an FEEvaluation with a non-standard number of quadrature
1859 * points. Whereas we previously always set the number of quadrature points
1860 * to equal the polynomial degree plus one (ensuring exact integration on
1861 * affine element shapes), we now set the number quadrature points as a
1862 * separate variable (e.g. the polynomial degree plus two or three halves of
1863 * the polynomial degree) to more accurately handle nonlinear terms. Since
1865 * argument and keeps the number of quadrature points in the whole cell in
1868 *
1869
1870 *
1871 * The second difference is due to the fact that we are now evaluating a
1874 * multi-component case. The variant shown here utilizes an FEEvaluation
1875 * object with multiple components embedded into it, specified by the fourth
1876 * template argument `dim + 2` for the components in the Euler system. As a
1877 * consequence, the return type of FEEvaluation::get_value() is not a scalar
1879 * several elements), but a Tensor of `dim+2` components. The functionality
1882 * variant would have been to use several FEEvaluation objects, a scalar one
1883 * for the density, a vector-valued one with `dim` components for the
1884 * momentum, and another scalar evaluator for the energy. To ensure that
1885 * those components point to the correct part of the solution, the
1886 * constructor of FEEvaluation takes three optional integer arguments after
1889 * quadrature point in case there are multiple Quadrature objects (see more
1890 * below), and as a third argument the component within a vector system. As
1891 * we have a single vector for all components, we would go with the third
1892 * argument, and set it to `0` for the density, `1` for the vector-valued
1893 * momentum, and `dim+1` for the energy slot. FEEvaluation then picks the
1894 * appropriate subrange of the solution vector during
1895 * FEEvaluationBase::read_dof_values() and
1897 * FEEvaluation::gather_evaluate() and FEEvaluation::integrate_scatter()
1898 * calls.
1899 *
1900
1901 *
1904 * function (derived from Functions::ConstantFunction), we can precompute
1905 * the value outside the loop over quadrature points and simply use the
1906 * value everywhere. For a more general function, we instead need to call
1908 * expensive because we need to access the memory associated with the
1909 * quadrature point data.
1910 *
1911
1912 *
1914 * all physics for the Euler equations in the separate `euler_flux()`
1915 * function, all we have to do here is to call this function
1916 * given the current solution evaluated at quadrature points, returned by
1917 * `phi.get_value(q)`, and tell the FEEvaluation object to queue the flux
1918 * for testing it by the gradients of the shape functions (which is a Tensor
1919 * of outer `dim+2` components, each holding a tensor of `dim` components
1920 * for the @f$x,y,z@f$ component of the Euler flux). One final thing worth
1921 * mentioning is the order in which we queue the data for testing by the
1922 * value of the test function, `phi.submit_value()`, in case we are given an
1923 * external function: We must do this after calling `phi.get_value(q)`,
1924 * because `get_value()` (reading the solution) and `submit_value()`
1925 * (queuing the value for multiplication by the test function and summation
1926 * over quadrature points) access the same underlying data field. Here it
1929 * setups, one has to first copy out e.g. both the value and gradient at a
1930 * quadrature point and then queue results again by
1931 * FEEvaluationBase::submit_value() and FEEvaluationBase::submit_gradient().
1932 *
1933
1934 *
1936 * argument of this function, which is a call-back from MatrixFree::loop().
1937 * The interfaces imposes the present list of arguments, but since we are in
1938 * a member function where the MatrixFree object is already available as the
1939 * `data` variable, we stick with that to avoid confusion.
1940 *
1941 * @code
1942 *   template <int dim, int degree, int n_points_1d>
1943 *   void EulerOperator<dim, degree, n_points_1d>::local_apply_cell(
1944 *   const MatrixFree<dim, Number> &,
1945 *   LinearAlgebra::distributed::Vector<Number> &dst,
1946 *   const LinearAlgebra::distributed::Vector<Number> &src,
1947 *   const std::pair<unsigned int, unsigned int> &cell_range) const
1948 *   {
1950 *  
1953 *   dynamic_cast<Functions::ConstantFunction<dim> *>(body_force.get());
1954 *  
1955 *   if (constant_function)
1958 *  
1959 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
1960 *   {
1961 *   phi.reinit(cell);
1962 *   phi.gather_evaluate(src, EvaluationFlags::values);
1963 *  
1964 *   for (const unsigned int q : phi.quadrature_point_indices())
1965 *   {
1966 *   const auto w_q = phi.get_value(q);
1967 *   phi.submit_gradient(euler_flux<dim>(w_q), q);
1968 *   if (body_force.get() != nullptr)
1969 *   {
1973 *   *body_force, phi.quadrature_point(q));
1974 *  
1976 *   for (unsigned int d = 0; d < dim; ++d)
1977 *   forcing[d + 1] = w_q[0] * force[d];
1978 *   for (unsigned int d = 0; d < dim; ++d)
1979 *   forcing[dim + 1] += force[d] * w_q[d + 1];
1980 *  
1981 *   phi.submit_value(forcing, q);
1982 *   }
1983 *   }
1984 *  
1985 *   phi.integrate_scatter(((body_force.get() != nullptr) ?
1989 *   dst);
1990 *   }
1991 *   }
1992 *  
1993 *  
1994 *  
1995 * @endcode
1996 *
1997 * The next function concerns the computation of integrals on interior
1998 * faces, where we need evaluators from both cells adjacent to the face. We
1999 * associate the variable `phi_m` with the solution component @f$\mathbf{w}^-@f$
2000 * and the variable `phi_p` with the solution component @f$\mathbf{w}^+@f$. We
2002 * second argument, with `true` for the interior side and `false` for the
2003 * exterior side, with interior and exterior denoting the orientation with
2004 * respect to the normal vector.
2005 *
2006
2007 *
2009 * FEFaceEvaluation::integrate_scatter() combine the access to the vectors
2012 * use a nodal basis in terms of the Lagrange polynomials in the points of
2013 * the Gauss-Lobatto quadrature formula, only @f$(p+1)^{d-1}@f$ out of the
2014 * @f$(p+1)^d@f$ basis functions evaluate to non-zero on each face. Thus, the
2016 * parts which are multiplied by zero. If we had first read the vector, we
2017 * would have needed to load all data from the vector, as the call in
2020 * values and derivatives, indeed all @f$(p+1)^d@f$ vector entries for each
2021 * component are needed, as the normal derivative is nonzero for all basis
2022 * functions.
2023 *
2024
2025 *
2029 * argument in the list. At the quadrature points, we then go to our
2030 * free-standing function for the numerical flux. It receives the solution
2031 * evaluated at quadrature points from both sides (i.e., @f$\mathbf{w}^-@f$ and
2032 * @f$\mathbf{w}^+@f$), as well as the normal vector onto the minus side. As
2034 * vector from the minus side. We need to switch the sign because the
2035 * boundary term comes with a minus sign in the weak form derived in the
2036 * introduction. The flux is then queued for testing both on the minus sign
2037 * and on the plus sign, with switched sign as the normal vector from the
2038 * plus side is exactly opposed to the one from the minus side.
2039 *
2040 * @code
2043 *   const MatrixFree<dim, Number> &,
2046 *   const std::pair<unsigned int, unsigned int> &face_range) const
2047 *   {
2049 *   true);
2051 *   false);
2052 *  
2053 *   for (unsigned int face = face_range.first; face < face_range.second; ++face)
2054 *   {
2055 *   phi_p.reinit(face);
2056 *   phi_p.gather_evaluate(src, EvaluationFlags::values);
2057 *  
2058 *   phi_m.reinit(face);
2059 *   phi_m.gather_evaluate(src, EvaluationFlags::values);
2060 *  
2061 *   for (const unsigned int q : phi_m.quadrature_point_indices())
2062 *   {
2064 *   euler_numerical_flux<dim>(phi_m.get_value(q),
2065 *   phi_p.get_value(q),
2066 *   phi_m.normal_vector(q));
2067 *   phi_m.submit_value(-numerical_flux, q);
2068 *   phi_p.submit_value(numerical_flux, q);
2069 *   }
2070 *  
2071 *   phi_p.integrate_scatter(EvaluationFlags::values, dst);
2072 *   phi_m.integrate_scatter(EvaluationFlags::values, dst);
2073 *   }
2074 *   }
2075 *  
2076 *  
2077 *  
2078 * @endcode
2079 *
2080 * For faces located at the boundary, we need to impose the appropriate
2083 * discussed in the "Results" section below.) The discontinuous Galerkin
2084 * method imposes boundary conditions not as constraints, but only
2086 * <i>exterior</i> quantity @f$\mathbf{w}^+@f$ that is then handed to the
2087 * numerical flux function also used for the interior faces. In essence,
2088 * we "pretend" a state on the outside of the domain in such a way that
2089 * if that were reality, the solution of the PDE would satisfy the boundary
2090 * conditions we want.
2091 *
2092
2093 *
2096 * energy with @f$\rho^+ = \rho^-@f$ and @f$E^+ = E^-@f$. To achieve the no-normal
2097 * flux condition, we set the exterior values to the interior values and
2098 * subtract two times the velocity in wall-normal direction, i.e., in the
2099 * direction of the normal vector.
2100 *
2101
2102 *
2105 * to use @f$\mathbf{w}^+ = -\mathbf{w}^- + 2 \mathbf{w}_\mathrm{D}@f$, the
2107 *
2108
2109 *
2111 * setting @f$\mathbf{w}^+ = \mathbf{w}^-@f$. For the case of subsonic outflow,
2112 * we still need to impose a value for the energy, which we derive from the
2113 * respective function. A special step is needed for the case of
2114 * <i>backflow</i>, i.e., the case where there is a momentum flux into the
2116 * be derived by appropriate energy arguments), we must switch to another
2118 * Yoshihara, Ismail, Wall, "A novel formulation for Neumann inflow
2119 * conditions in biomechanics", Int. J. Numer. Meth. Biomed. Eng., vol. 28
2122 * variables. We do this in a post-processing step, and only for the case
2123 * when we both are at an outflow boundary and the dot product between the
2124 * normal vector and the momentum (or, equivalently, velocity) is
2125 * negative. As we work on data of several quadrature points at once for
2127 * entries of the SIMD array.
2128 *
2129
2130 *
2132 * of boundaries at the level of quadrature points. Of course, we could also
2133 * have moved the decision out of the quadrature point loop and treat entire
2134 * faces as of the same kind, which avoids some map/set lookups in the inner
2135 * loop over quadrature points. However, the loss of efficiency is hardly
2137 * `else` clause will catch the case when some part of the boundary was not
2138 * assigned any boundary condition via `EulerOperator::set_..._boundary(...)`.
2139 *
2140 * @code
2141 *   template <int dim, int degree, int n_points_1d>
2143 *   const MatrixFree<dim, Number> &,
2146 *   const std::pair<unsigned int, unsigned int> &face_range) const
2147 *   {
2149 *  
2150 *   for (unsigned int face = face_range.first; face < face_range.second; ++face)
2151 *   {
2152 *   phi.reinit(face);
2153 *   phi.gather_evaluate(src, EvaluationFlags::values);
2154 *  
2155 *   for (const unsigned int q : phi.quadrature_point_indices())
2156 *   {
2157 *   const auto w_m = phi.get_value(q);
2158 *   const auto normal = phi.normal_vector(q);
2159 *  
2160 *   auto rho_u_dot_n = w_m[1] * normal[0];
2161 *   for (unsigned int d = 1; d < dim; ++d)
2162 *   rho_u_dot_n += w_m[1 + d] * normal[d];
2163 *  
2164 *   bool at_outflow = false;
2165 *  
2167 *   const auto boundary_id = data.get_boundary_id(face);
2168 *   if (wall_boundaries.find(boundary_id) != wall_boundaries.end())
2169 *   {
2170 *   w_p[0] = w_m[0];
2171 *   for (unsigned int d = 0; d < dim; ++d)
2172 *   w_p[d + 1] = w_m[d + 1] - 2. * rho_u_dot_n * normal[d];
2173 *   w_p[dim + 1] = w_m[dim + 1];
2174 *   }
2175 *   else if (inflow_boundaries.find(boundary_id) !=
2176 *   inflow_boundaries.end())
2177 *   w_p =
2178 *   evaluate_function(*inflow_boundaries.find(boundary_id)->second,
2179 *   phi.quadrature_point(q));
2180 *   else if (subsonic_outflow_boundaries.find(boundary_id) !=
2182 *   {
2183 *   w_p = w_m;
2184 *   w_p[dim + 1] = evaluate_function(
2185 *   *subsonic_outflow_boundaries.find(boundary_id)->second,
2186 *   phi.quadrature_point(q),
2187 *   dim + 1);
2188 *   at_outflow = true;
2189 *   }
2190 *   else
2191 *   AssertThrow(false,
2192 *   ExcMessage("Unknown boundary id, did "
2193 *   "you set a boundary condition for "
2194 *   "this part of the domain boundary?"));
2195 *  
2196 *   auto flux = euler_numerical_flux<dim>(w_m, w_p, normal);
2197 *  
2198 *   if (at_outflow)
2199 *   for (unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
2200 *   {
2201 *   if (rho_u_dot_n[v] < -1e-12)
2202 *   for (unsigned int d = 0; d < dim; ++d)
2203 *   flux[d + 1][v] = 0.;
2204 *   }
2205 *  
2206 *   phi.submit_value(-flux, q);
2207 *   }
2208 *  
2209 *   phi.integrate_scatter(EvaluationFlags::values, dst);
2210 *   }
2211 *   }
2212 *  
2213 *  
2214 *  
2215 * @endcode
2216 *
2217 * The next function implements the inverse mass matrix operation. The
2223 * factors. These represent a change of basis from the specified basis (in
2224 * this case, the Lagrange basis in the points of the Gauss--Lobatto
2225 * quadrature formula) to the Lagrange basis in the points of the Gauss
2226 * quadrature formula. In the latter basis, we can apply the inverse of the
2227 * point-wise `JxW` factor, i.e., the quadrature weight times the
2228 * determinant of the Jacobian of the mapping from reference to real
2229 * coordinates. Once this is done, the basis is changed back to the nodal
2230 * Gauss-Lobatto basis again. All of these operations are done by the
2231 * `apply()` function below. What we need to provide is the local fields to
2232 * operate on (which we extract from the global vector by an FEEvaluation
2233 * object) and write the results back to the destination vector of the mass
2234 * matrix operation.
2235 *
2236
2237 *
2238 * One thing to note is that we added two integer arguments (that are
2241 * only have one) and the second being 1 to make the quadrature formula
2242 * selection. As we use the quadrature formula 0 for the over-integration of
2243 * nonlinear terms, we use the formula 1 with the default @f$p+1@f$ (or
2244 * `fe_degree+1` in terms of the variable name) points for the mass
2247 *
2248 * @code
2249 *   template <int dim, int degree, int n_points_1d>
2251 *   const MatrixFree<dim, Number> &,
2254 *   const std::pair<unsigned int, unsigned int> &cell_range) const
2255 *   {
2258 *   inverse(phi);
2259 *  
2260 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
2261 *   {
2262 *   phi.reinit(cell);
2263 *   phi.read_dof_values(src);
2264 *  
2265 *   inverse.apply(phi.begin_dof_values(), phi.begin_dof_values());
2266 *  
2267 *   phi.set_dof_values(dst);
2268 *   }
2269 *   }
2270 *  
2271 *  
2272 *  
2273 * @endcode
2274 *
2275 *
2276 * <a name="step_67-Theapplyandrelatedfunctions"></a>
2277 * <h4>The apply() and related functions</h4>
2278 *
2279
2280 *
2281 * We now come to the function which implements the evaluation of the Euler
2282 * operator as a whole, i.e., @f$\mathcal M^{-1} \mathcal L(t, \mathbf{w})@f$,
2285 * the time in the functions we have associated with the various parts of
2286 * the boundary, in order to be consistent with the equation in case the
2287 * boundary data is time-dependent. Then, we call MatrixFree::loop() to
2288 * perform the cell and face integrals, including the necessary ghost data
2289 * exchange in the `src` vector. The seventh argument to the function,
2290 * `true`, specifies that we want to zero the `dst` vector as part of the
2291 * loop, before we start accumulating integrals into it. This variant is
2295 * for caching, saving one roundtrip of vector data to main memory and
2299 * a nodal basis with nodes at the reference element surface, we only need
2300 * to exchange those parts. This again saves precious memory bandwidth.
2301 *
2302
2303 *
2304 * Once the spatial operator @f$\mathcal L@f$ is applied, we need to make a
2305 * second round and apply the inverse mass matrix. Here, we call
2307 * is cheaper than the full loop as access only goes to the degrees of
2309 * locally owned degrees of freedom for DG discretizations. Thus, no ghost
2311 *
2312
2313 *
2314 * Around all these functions, we put timer scopes to record the
2316 * parts.
2317 *
2318 * @code
2319 *   template <int dim, int degree, int n_points_1d>
2320 *   void EulerOperator<dim, degree, n_points_1d>::apply(
2321 *   const double current_time,
2322 *   const LinearAlgebra::distributed::Vector<Number> &src,
2323 *   LinearAlgebra::distributed::Vector<Number> &dst) const
2324 *   {
2325 *   {
2326 *   TimerOutput::Scope t(timer, "apply - integrals");
2327 *  
2328 *   for (auto &i : inflow_boundaries)
2329 *   i.second->set_time(current_time);
2330 *   for (auto &i : subsonic_outflow_boundaries)
2331 *   i.second->set_time(current_time);
2332 *  
2333 *   data.loop(&EulerOperator::local_apply_cell,
2336 *   this,
2337 *   dst,
2338 *   src,
2339 *   true,
2342 *   }
2343 *  
2344 *   {
2345 *   TimerOutput::Scope t(timer, "apply - inverse mass");
2346 *  
2348 *   this,
2349 *   dst,
2350 *   dst);
2351 *   }
2352 *   }
2353 *  
2354 *  
2355 *  
2356 * @endcode
2357 *
2358 * Let us move to the function that does an entire stage of a Runge--Kutta
2359 * update. It calls EulerOperator::apply() followed by some updates
2360 * to the vectors, namely `next_ri = solution + factor_ai * k_i` and
2361 * `solution += factor_solution * k_i`. Rather than performing these
2363 * strategy that is faster on cache-based architectures. As the memory
2364 * consumed by the vectors is often much larger than what fits into caches,
2366 * can be improved by loop fusion, i.e., performing both the updates to
2367 * `next_ki` and `solution` within a single sweep. In that case, we would
2368 * read the two vectors `rhs` and `solution` and write into `next_ki` and
2369 * `solution`, compared to at least 4 reads and two writes in the baseline
2370 * case. Here, we go one step further and perform the loop immediately when
2371 * the mass matrix inversion has finished on a part of the
2372 * vector. MatrixFree::cell_loop() provides a mechanism to attach an
2373 * `std::function` both before the loop over cells first touches a vector
2374 * entry (which we do not use here, but is e.g. used for zeroing the vector)
2375 * and a second `std::function` to be called after the loop last touches
2376 * an entry. The callback is in form of a range over the given vector (in
2377 * terms of the local index numbering in the MPI universe) that can be
2378 * addressed by `local_element()` functions.
2379 *
2380
2381 *
2382 * For this second callback, we create a lambda that works on a range and
2386 * ensure that there is no overlap, also called aliasing, between the index
2387 * ranges of the pointers we use inside the loops). It turns out that at the
2388 * time of this writing, GCC 7.2 fails to compile an OpenMP pragma inside a
2389 * lambda function, so we comment this pragma out below. If your compiler is
2391 *
2392
2393 *
2394 * Note that we select a different code path for the last
2396 * vector. This strategy gives a considerable speedup. Whereas the inverse
2397 * mass matrix and vector updates take more than 60% of the computational
2398 * time with default vector updates on a 40-core machine, the percentage is
2399 * around 35% with the more optimized variant. In other words, this is a
2400 * speedup of around a third.
2401 *
2402 * @code
2403 *   template <int dim, int degree, int n_points_1d>
2404 *   void EulerOperator<dim, degree, n_points_1d>::perform_stage(
2405 *   const Number current_time,
2406 *   const Number factor_solution,
2407 *   const Number factor_ai,
2408 *   const LinearAlgebra::distributed::Vector<Number> &current_ri,
2409 *   LinearAlgebra::distributed::Vector<Number> &vec_ki,
2410 *   LinearAlgebra::distributed::Vector<Number> &solution,
2411 *   LinearAlgebra::distributed::Vector<Number> &next_ri) const
2412 *   {
2413 *   {
2414 *   TimerOutput::Scope t(timer, "rk_stage - integrals L_h");
2415 *  
2416 *   for (auto &i : inflow_boundaries)
2417 *   i.second->set_time(current_time);
2418 *   for (auto &i : subsonic_outflow_boundaries)
2419 *   i.second->set_time(current_time);
2420 *  
2421 *   data.loop(&EulerOperator::local_apply_cell,
2424 *   this,
2425 *   vec_ki,
2426 *   current_ri,
2427 *   true,
2430 *   }
2431 *  
2432 *  
2433 *   {
2434 *   TimerOutput::Scope t(timer, "rk_stage - inv mass + vec upd");
2435 *   data.cell_loop(
2437 *   this,
2438 *   next_ri,
2439 *   vec_ki,
2440 *   std::function<void(const unsigned int, const unsigned int)>(),
2441 *   [&](const unsigned int start_range, const unsigned int end_range) {
2442 *   const Number ai = factor_ai;
2443 *   const Number bi = factor_solution;
2444 *   if (ai == Number())
2445 *   {
2446 *   /* DEAL_II_OPENMP_SIMD_PRAGMA */
2447 *   for (unsigned int i = start_range; i < end_range; ++i)
2448 *   {
2449 *   const Number k_i = next_ri.local_element(i);
2450 *   const Number sol_i = solution.local_element(i);
2451 *   solution.local_element(i) = sol_i + bi * k_i;
2452 *   }
2453 *   }
2454 *   else
2455 *   {
2456 *   /* DEAL_II_OPENMP_SIMD_PRAGMA */
2457 *   for (unsigned int i = start_range; i < end_range; ++i)
2458 *   {
2459 *   const Number k_i = next_ri.local_element(i);
2460 *   const Number sol_i = solution.local_element(i);
2461 *   solution.local_element(i) = sol_i + bi * k_i;
2462 *   next_ri.local_element(i) = sol_i + ai * k_i;
2463 *   }
2464 *   }
2465 *   });
2466 *   }
2467 *   }
2468 *  
2469 *  
2470 *  
2471 * @endcode
2472 *
2474 * advancing the solution by one time step, let us now move to functions
2476 * functions that compute projections, evaluate errors, and compute the speed
2477 * of information transport on a cell.
2478 *
2479
2480 *
2483 * elements where there is no need to set up and solve a linear system, as
2484 * each element has independent basis functions. The reason why we show the
2488 *
2489
2490 *
2492 * shape functions evaluated at quadrature points by @f$S@f$, the projection on
2493 * cell @f$K@f$ is an operation of the form @f$\underbrace{S J^K S^\mathrm
2494 * T}_{\mathcal M^K} \mathbf{w}^K = S J^K
2497 * weight (JxW), @f$\mathcal M^K@f$ is the cell-wise mass matrix, and
2499 * field to be projected onto quadrature points. (In reality the matrix @f$S@f$
2502 * @f$\mathbf{w}^K = \left(S J^K S^\mathrm T\right)^{-1} S J^K
2503 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q} = S^{-\mathrm T}
2504 * \left(J^K\right)^{-1} S^{-1} S J^K
2505 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q}@f$. Now, the term @f$S^{-1} S@f$ and
2506 * then @f$\left(J^K\right)^{-1} J^K@f$ cancel, resulting in the final
2507 * expression @f$\mathbf{w}^K = S^{-\mathrm T}
2508 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q}@f$. This operation is
2509 * implemented by
2512 * the multiplication by @f$S^{-\mathrm T}@f$, a basis change from the
2513 * nodal basis in the points of the Gaussian quadrature to the given finite
2514 * element basis. Note that we call FEEvaluation::set_dof_values() to write
2515 * the result into the vector, overwriting previous content, rather than
2516 * accumulating the results as typical in integration tasks -- we can do
2518 * cell for discontinuous Galerkin discretizations.
2519 *
2520 * @code
2521 *   template <int dim, int degree, int n_points_1d>
2522 *   void EulerOperator<dim, degree, n_points_1d>::project(
2523 *   const Function<dim> &function,
2524 *   LinearAlgebra::distributed::Vector<Number> &solution) const
2525 *   {
2528 *   inverse(phi);
2529 *   solution.zero_out_ghost_values();
2530 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2531 *   {
2532 *   phi.reinit(cell);
2533 *   for (const unsigned int q : phi.quadrature_point_indices())
2536 *   q);
2537 *   inverse.transform_from_q_points_to_basis(dim + 2,
2538 *   phi.begin_dof_values(),
2539 *   phi.begin_dof_values());
2540 *   phi.set_dof_values(solution);
2541 *   }
2542 *   }
2543 *  
2544 *  
2545 *  
2546 * @endcode
2547 *
2552 * <i>lane</i> of the vectorized array holds data from a different cell. By
2553 * the loop over all cell batches that are owned by the current MPI process,
2554 * we could then fill a VectorizedArray of results; to obtain a global sum,
2555 * we would need to further go on and sum across the entries in the SIMD
2556 * array. However, such a procedure is not stable as the SIMD array could in
2557 * fact not hold valid data for all its lanes. This happens when the number
2558 * of locally owned cells is not a multiple of the SIMD width. To avoid
2562 * situation is more complicated than that: What if we were to compute a
2568 * number of lanes with valid data. It equals VectorizedArray::size() on
2569 * most cells, but can be less on the last cell batch if the number of cells
2570 * has a remainder compared to the SIMD width.
2571 *
2572 * @code
2573 *   template <int dim, int degree, int n_points_1d>
2574 *   std::array<double, 3> EulerOperator<dim, degree, n_points_1d>::compute_errors(
2575 *   const Function<dim> &function,
2576 *   const LinearAlgebra::distributed::Vector<Number> &solution) const
2577 *   {
2578 *   TimerOutput::Scope t(timer, "compute errors");
2579 *   double errors_squared[3] = {};
2581 *  
2582 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2583 *   {
2584 *   phi.reinit(cell);
2585 *   phi.gather_evaluate(solution, EvaluationFlags::values);
2587 *   for (const unsigned int q : phi.quadrature_point_indices())
2588 *   {
2589 *   const auto error =
2590 *   evaluate_function(function, phi.quadrature_point(q)) -
2591 *   phi.get_value(q);
2592 *   const auto JxW = phi.JxW(q);
2593 *  
2594 *   local_errors_squared[0] += error[0] * error[0] * JxW;
2595 *   for (unsigned int d = 0; d < dim; ++d)
2596 *   local_errors_squared[1] += (error[d + 1] * error[d + 1]) * JxW;
2597 *   local_errors_squared[2] += (error[dim + 1] * error[dim + 1]) * JxW;
2598 *   }
2599 *   for (unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell);
2600 *   ++v)
2601 *   for (unsigned int d = 0; d < 3; ++d)
2603 *   }
2604 *  
2606 *  
2607 *   std::array<double, 3> errors;
2608 *   for (unsigned int d = 0; d < 3; ++d)
2609 *   errors[d] = std::sqrt(errors_squared[d]);
2610 *  
2611 *   return errors;
2612 *   }
2613 *  
2614 *  
2615 *  
2616 * @endcode
2617 *
2618 * This final function of the EulerOperator class is used to estimate the
2620 * the time step size in the explicit time integrator. In the Euler
2625 *
2626
2627 *
2628 * In the formula for the time step size, we are interested not by
2638 * in the variable `convective_limit` in the code below.
2639 *
2640
2641 *
2642 * The sound propagation is isotropic, so we need to take mesh sizes in any
2643 * direction into account. The appropriate mesh size scaling is then given
2645 * singular value of @f$J^{-1}@f$. Note that one could approximate this quantity
2646 * by the minimal distance between vertices of a cell when ignoring curved
2648 * strategy would be some LAPACK function. Since all we need here is an
2649 * estimate, we can avoid the hassle of decomposing a tensor of
2651 * eigenvalue function without vectorization, and instead use a few
2653 * @f$J^{-1}J^{-\mathrm T}@f$. The speed of convergence of this method depends
2654 * on the ratio of the largest to the next largest eigenvalue and the
2656 * we get slow convergence on cells close to a cube shape where all
2662 *
2663 * @code
2664 *   template <int dim, int degree, int n_points_1d>
2666 *   const LinearAlgebra::distributed::Vector<Number> &solution) const
2667 *   {
2668 *   TimerOutput::Scope t(timer, "compute transport speed");
2669 *   Number max_transport = 0;
2671 *  
2672 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2673 *   {
2674 *   phi.reinit(cell);
2675 *   phi.gather_evaluate(solution, EvaluationFlags::values);
2677 *   for (const unsigned int q : phi.quadrature_point_indices())
2678 *   {
2679 *   const auto solution = phi.get_value(q);
2680 *   const auto velocity = euler_velocity<dim>(solution);
2681 *   const auto pressure = euler_pressure<dim>(solution);
2682 *  
2683 *   const auto inverse_jacobian = phi.inverse_jacobian(q);
2684 *   const auto convective_speed = inverse_jacobian * velocity;
2686 *   for (unsigned int d = 0; d < dim; ++d)
2689 *  
2690 *   const auto speed_of_sound =
2691 *   std::sqrt(gamma * pressure * (1. / solution[0]));
2692 *  
2694 *   for (unsigned int d = 0; d < dim; ++d)
2695 *   eigenvector[d] = 1.;
2696 *   for (unsigned int i = 0; i < 5; ++i)
2697 *   {
2698 *   eigenvector = transpose(inverse_jacobian) *
2699 *   (inverse_jacobian * eigenvector);
2701 *   for (unsigned int d = 0; d < dim; ++d)
2705 *   }
2706 *   const auto jac_times_ev = inverse_jacobian * eigenvector;
2707 *   const auto max_eigenvalue = std::sqrt(
2709 *   local_max =
2711 *   max_eigenvalue * speed_of_sound + convective_limit);
2712 *   }
2713 *  
2714 * @endcode
2715 *
2717 * speed only on the valid cells of a cell batch.
2718 *
2719 * @code
2720 *   for (unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell);
2721 *   ++v)
2723 *   }
2724 *  
2726 *  
2727 *   return max_transport;
2728 *   }
2729 *  
2730 *  
2731 *  
2732 * @endcode
2733 *
2734 *
2735 * <a name="step_67-TheEulerProblemclass"></a>
2736 * <h3>The EulerProblem class</h3>
2737 *
2738
2739 *
2743 *
2744
2745 *
2746 * The member variables are a triangulation, a finite element, a mapping (to
2747 * create high-order curved surfaces, see e.g. @ref step_10 "step-10"), and a DoFHandler to
2750 * terms of integrals, and some parameters for time integration like the
2751 * current time or the time step size.
2752 *
2753
2754 *
2756 * information to the output file, in similarity to what was done in
2757 * @ref step_33 "step-33". The interface of the DataPostprocessor class is intuitive,
2760 * that we only enable in 2d where it makes sense), and the names of what
2764 * the output.
2765 *
2766 * @code
2768 *   class EulerProblem
2769 *   {
2770 *   public:
2771 *   EulerProblem();
2772 *  
2773 *   void run();
2774 *  
2775 *   private:
2776 *   void make_grid_and_dofs();
2777 *  
2778 *   void output_results(const unsigned int result_number);
2779 *  
2781 *  
2783 *  
2786 *   #else
2788 *   #endif
2789 *  
2790 *   const FESystem<dim> fe;
2791 *   const MappingQ<dim> mapping;
2792 *   DoFHandler<dim> dof_handler;
2793 *  
2794 *   TimerOutput timer;
2795 *  
2797 *  
2798 *   double time, time_step;
2799 *  
2800 *   class Postprocessor : public DataPostprocessor<dim>
2801 *   {
2802 *   public:
2803 *   Postprocessor();
2804 *  
2805 *   virtual void evaluate_vector_field(
2807 *   std::vector<Vector<double>> &computed_quantities) const override;
2808 *  
2809 *   virtual std::vector<std::string> get_names() const override;
2810 *  
2811 *   virtual std::vector<
2813 *   get_data_component_interpretation() const override;
2814 *  
2815 *   virtual UpdateFlags get_needed_update_flags() const override;
2816 *  
2817 *   private:
2818 *   const bool do_schlieren_plot;
2819 *   };
2820 *   };
2821 *  
2822 *  
2823 *  
2824 *   template <int dim>
2826 *   : do_schlieren_plot(dim == 2)
2827 *   {}
2828 *  
2829 *  
2830 *  
2831 * @endcode
2832 *
2835 * `2*dim+5` are derived from the sizes of the names we specify in the
2836 * get_names() function below). Then we loop over all evaluation points and
2837 * fill the respective information: First we fill the primal solution
2841 * showing @f$s = |\nabla \rho|^2@f$ in case it is enabled. (See @ref step_69 "step-69" for
2842 * another example where we create a Schlieren plot.)
2843 *
2844 * @code
2845 *   template <int dim>
2848 *   std::vector<Vector<double>> &computed_quantities) const
2849 *   {
2850 *   const unsigned int n_evaluation_points = inputs.solution_values.size();
2851 *  
2852 *   if (do_schlieren_plot == true)
2853 *   Assert(inputs.solution_gradients.size() == n_evaluation_points,
2854 *   ExcInternalError());
2855 *  
2857 *   ExcInternalError());
2858 *   Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError());
2860 *   dim + 2 + (do_schlieren_plot == true ? 1 : 0),
2861 *   ExcInternalError());
2862 *  
2863 *   for (unsigned int p = 0; p < n_evaluation_points; ++p)
2864 *   {
2865 *   Tensor<1, dim + 2> solution;
2866 *   for (unsigned int d = 0; d < dim + 2; ++d)
2867 *   solution[d] = inputs.solution_values[p](d);
2868 *  
2869 *   const double density = solution[0];
2870 *   const Tensor<1, dim> velocity = euler_velocity<dim>(solution);
2871 *   const double pressure = euler_pressure<dim>(solution);
2872 *  
2873 *   for (unsigned int d = 0; d < dim; ++d)
2874 *   computed_quantities[p](d) = velocity[d];
2875 *   computed_quantities[p](dim) = pressure;
2876 *   computed_quantities[p](dim + 1) = std::sqrt(gamma * pressure / density);
2877 *  
2878 *   if (do_schlieren_plot == true)
2879 *   computed_quantities[p](dim + 2) =
2880 *   inputs.solution_gradients[p][0] * inputs.solution_gradients[p][0];
2881 *   }
2882 *   }
2883 *  
2884 *  
2885 *  
2886 *   template <int dim>
2887 *   std::vector<std::string> EulerProblem<dim>::Postprocessor::get_names() const
2888 *   {
2889 *   std::vector<std::string> names;
2890 *   for (unsigned int d = 0; d < dim; ++d)
2891 *   names.emplace_back("velocity");
2892 *   names.emplace_back("pressure");
2893 *   names.emplace_back("speed_of_sound");
2894 *  
2895 *   if (do_schlieren_plot == true)
2896 *   names.emplace_back("schlieren_plot");
2897 *  
2898 *   return names;
2899 *   }
2900 *  
2901 *  
2902 *  
2903 * @endcode
2904 *
2906 * pressure, speed of sound, and the Schlieren plot, and vectors for the
2908 *
2909 * @code
2910 *   template <int dim>
2911 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2913 *   {
2914 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2916 *   for (unsigned int d = 0; d < dim; ++d)
2917 *   interpretation.push_back(
2921 *  
2922 *   if (do_schlieren_plot == true)
2923 *   interpretation.push_back(
2925 *  
2926 *   return interpretation;
2927 *   }
2928 *  
2929 *  
2930 *  
2931 * @endcode
2932 *
2935 * gradient.
2936 *
2937 * @code
2938 *   template <int dim>
2940 *   {
2941 *   if (do_schlieren_plot == true)
2943 *   else
2944 *   return update_values;
2945 *   }
2946 *  
2947 *  
2948 *  
2949 * @endcode
2950 *
2951 * The constructor for this class is unsurprising: We set up a parallel
2952 * triangulation based on the `MPI_COMM_WORLD` communicator, a vector finite
2953 * element with `dim+2` components for density, momentum, and energy, a
2954 * high-order mapping of the same degree as the underlying finite element,
2955 * and initialize the time and time step to zero.
2956 *
2957 * @code
2958 *   template <int dim>
2960 *   : pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
2962 *   , triangulation(MPI_COMM_WORLD)
2963 *   #endif
2964 *   , fe(FE_DGQ<dim>(fe_degree) ^ (dim + 2))
2965 *   , mapping(fe_degree)
2966 *   , dof_handler(triangulation)
2967 *   , timer(pcout, TimerOutput::never, TimerOutput::wall_times)
2968 *   , euler_operator(timer)
2969 *   , time(0)
2970 *   , time_step(0)
2971 *   {}
2972 *  
2973 *  
2974 *  
2975 * @endcode
2976 *
2978 * global variable `testcase`: For the analytical variant (`testcase==0`),
2979 * the domain is @f$(0, 10) \times (-5, 5)@f$, with Dirichlet boundary
2980 * conditions (inflow) all around the domain. For `testcase==1`, we set the
2984 * part at the left of the channel is given the inflow type, for which we
2986 * the right. For the boundary around the cylinder (boundary id equal to 2)
2987 * as well as the channel walls (boundary id equal to 3) we use the wall
2988 * boundary type, which is no-normal flow. Furthermore, for the 3d cylinder
2989 * we also add a gravity force in vertical direction. Having the base mesh
2990 * in place (including the manifolds set by
2992 * specified number of global refinements, create the unknown numbering from
2995 *
2996 * @code
2997 *   template <int dim>
2999 *   {
3000 *   switch (testcase)
3001 *   {
3002 *   case 0:
3003 *   {
3005 *   for (unsigned int d = 1; d < dim; ++d)
3006 *   lower_left[d] = -5;
3007 *  
3009 *   upper_right[0] = 10;
3010 *   for (unsigned int d = 1; d < dim; ++d)
3011 *   upper_right[d] = 5;
3012 *  
3014 *   lower_left,
3015 *   upper_right);
3016 *   triangulation.refine_global(2);
3017 *  
3018 *   euler_operator.set_inflow_boundary(
3019 *   0, std::make_unique<ExactSolution<dim>>(0));
3020 *  
3021 *   break;
3022 *   }
3023 *  
3024 *   case 1:
3025 *   {
3027 *   triangulation, 0.03, 1, 0, true);
3028 *  
3029 *   euler_operator.set_inflow_boundary(
3030 *   0, std::make_unique<ExactSolution<dim>>(0));
3031 *   euler_operator.set_subsonic_outflow_boundary(
3032 *   1, std::make_unique<ExactSolution<dim>>(0));
3033 *  
3034 *   euler_operator.set_wall_boundary(2);
3035 *   euler_operator.set_wall_boundary(3);
3036 *  
3037 *   if (dim == 3)
3038 *   euler_operator.set_body_force(
3039 *   std::make_unique<Functions::ConstantFunction<dim>>(
3040 *   std::vector<double>({0., 0., -0.2})));
3041 *  
3042 *   break;
3043 *   }
3044 *  
3045 *   default:
3047 *   }
3048 *  
3049 *   triangulation.refine_global(n_global_refinements);
3050 *  
3051 *   dof_handler.distribute_dofs(fe);
3052 *  
3053 *   euler_operator.reinit(mapping, dof_handler);
3054 *   euler_operator.initialize_vector(solution);
3055 *  
3056 * @endcode
3057 *
3059 * often end up with quite large numbers of cells or degrees of freedom, we
3060 * would like to print them with a comma to separate each set of three
3061 * digits. This can be done via "locales", although the way this works is
3062 * not particularly intuitive. @ref step_32 "step-32" explains this in slightly more
3063 * detail.
3064 *
3065 * @code
3066 *   std::locale s = pcout.get_stream().getloc();
3067 *   pcout.get_stream().imbue(std::locale(""));
3068 *   pcout << "Number of degrees of freedom: " << dof_handler.n_dofs()
3069 *   << " ( = " << (dim + 2) << " [vars] x "
3070 *   << triangulation.n_global_active_cells() << " [cells] x "
3071 *   << Utilities::pow(fe_degree + 1, dim) << " [dofs/cell/var] )"
3072 *   << std::endl;
3073 *   pcout.get_stream().imbue(s);
3074 *   }
3075 *  
3076 *  
3077 *  
3078 * @endcode
3079 *
3080 * For output, we first let the Euler operator compute the errors of the
3081 * numerical results. More precisely, we compute the error against the
3082 * analytical result for the analytical solution case, whereas we compute
3084 * energy and constant velocity in @f$x@f$ direction for the second test case.
3085 *
3086
3087 *
3088 * The next step is to create output. This is similar to what is done in
3089 * @ref step_33 "step-33": We let the postprocessor defined above control most of the
3090 * output, except for the primal field that we write directly. For the
3091 * analytical solution test case, we also perform another projection of the
3092 * analytical solution and print the difference between that field and the
3094 * build the patches for output. Similarly to @ref step_65 "step-65", we create a
3095 * high-order VTK output by setting the appropriate flag, which enables us
3096 * to visualize fields of high polynomial degrees. Finally, we call the
3098 * to the given file name. This function uses special MPI parallel write
3099 * facilities, which are typically more optimized for parallel file systems
3100 * than the standard library's `std::ofstream` variants used in most other
3101 * tutorial programs. A particularly nice feature of the
3102 * `write_vtu_in_parallel()` function is the fact that it can combine output
3103 * from all MPI ranks into a single file, making it unnecessary to have a
3104 * central record of all such files (namely, the "pvtu" file).
3105 *
3106
3107 *
3108 * For parallel programs, it is often instructive to look at the partitioning
3109 * of cells among processors. To this end, one can pass a vector of numbers
3110 * to DataOut::add_data_vector() that contains as many entries as the
3111 * current processor has active cells; these numbers should then be the
3112 * rank of the processor that owns each of these cells. Such a vector
3113 * could, for example, be obtained from
3114 * GridTools::get_subdomain_association(). On the other hand, on each MPI
3115 * process, DataOut will only read those entries that correspond to locally
3116 * owned cells, and these of course all have the same value: namely, the rank
3117 * of the current process. What is in the remaining entries of the vector
3118 * doesn't actually matter, and so we can just get away with a cheap trick: We
3119 * just fill *all* values of the vector we give to DataOut::add_data_vector()
3120 * with the rank of the current MPI process. The key is that on each process,
3121 * only the entries corresponding to the locally owned cells will be read,
3122 * ignoring the (wrong) values in other entries. The fact that every process
3123 * submits a vector in which the correct subset of entries is correct is all
3124 * that is necessary.
3125 *
3126
3127 *
3128 * @note As of 2023, Visit 3.3.3 can still not deal with higher-order cells.
3129 * Rather, it simply reports that there is no data to show. To view the
3130 * results of this program with Visit, you will want to comment out the
3131 * line that sets `flags.write_higher_order_cells = true;`. On the other
3133 * just fine.
3134 *
3135 * @code
3136 *   template <int dim>
3137 *   void EulerProblem<dim>::output_results(const unsigned int result_number)
3138 *   {
3139 *   const std::array<double, 3> errors =
3140 *   euler_operator.compute_errors(ExactSolution<dim>(time), solution);
3141 *   const std::string quantity_name = testcase == 0 ? "error" : "norm";
3142 *  
3143 *   pcout << "Time:" << std::setw(8) << std::setprecision(3) << time
3144 *   << ", dt: " << std::setw(8) << std::setprecision(2) << time_step
3145 *   << ", " << quantity_name << " rho: " << std::setprecision(4)
3146 *   << std::setw(10) << errors[0] << ", rho * u: " << std::setprecision(4)
3147 *   << std::setw(10) << errors[1] << ", energy:" << std::setprecision(4)
3148 *   << std::setw(10) << errors[2] << std::endl;
3149 *  
3150 *   {
3151 *   TimerOutput::Scope t(timer, "output");
3152 *  
3153 *   Postprocessor postprocessor;
3154 *   DataOut<dim> data_out;
3155 *  
3156 *   DataOutBase::VtkFlags flags;
3157 *   flags.write_higher_order_cells = true;
3158 *   data_out.set_flags(flags);
3159 *  
3160 *   data_out.attach_dof_handler(dof_handler);
3161 *   {
3162 *   std::vector<std::string> names;
3163 *   names.emplace_back("density");
3164 *   for (unsigned int d = 0; d < dim; ++d)
3165 *   names.emplace_back("momentum");
3166 *   names.emplace_back("energy");
3167 *  
3168 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
3170 *   interpretation.push_back(
3172 *   for (unsigned int d = 0; d < dim; ++d)
3173 *   interpretation.push_back(
3175 *   interpretation.push_back(
3177 *  
3178 *   data_out.add_data_vector(dof_handler, solution, names, interpretation);
3179 *   }
3180 *   data_out.add_data_vector(solution, postprocessor);
3181 *  
3183 *   if (testcase == 0 && dim == 2)
3184 *   {
3185 *   reference.reinit(solution);
3186 *   euler_operator.project(ExactSolution<dim>(time), reference);
3187 *   reference.sadd(-1., 1, solution);
3188 *   std::vector<std::string> names;
3189 *   names.emplace_back("error_density");
3190 *   for (unsigned int d = 0; d < dim; ++d)
3191 *   names.emplace_back("error_momentum");
3192 *   names.emplace_back("error_energy");
3193 *  
3194 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
3196 *   interpretation.push_back(
3198 *   for (unsigned int d = 0; d < dim; ++d)
3199 *   interpretation.push_back(
3201 *   interpretation.push_back(
3203 *  
3204 *   data_out.add_data_vector(dof_handler,
3205 *   reference,
3206 *   names,
3207 *   interpretation);
3208 *   }
3209 *  
3210 *   Vector<double> mpi_owner(triangulation.n_active_cells());
3212 *   data_out.add_data_vector(mpi_owner, "owner");
3213 *  
3214 *   data_out.build_patches(mapping,
3215 *   fe.degree,
3217 *  
3218 *   const std::string filename =
3219 *   "solution_" + Utilities::int_to_string(result_number, 3) + ".vtu";
3220 *   data_out.write_vtu_in_parallel(filename, MPI_COMM_WORLD);
3221 *   }
3222 *   }
3223 *  
3224 *  
3225 *  
3226 * @endcode
3227 *
3228 * The EulerProblem::run() function puts all pieces together. It starts off
3231 * the low-storage integrator. We call these vectors `rk_register_1` and
3234 * the Runge--Kutta scheme outlined in the introduction. Before we start the
3235 * time loop, we compute the time step size by the
3240 * will be close, but they could vary if scaling were different.
3241 *
3242 * @code
3243 *   template <int dim>
3244 *   void EulerProblem<dim>::run()
3245 *   {
3246 *   {
3247 *   const unsigned int n_vect_number = VectorizedArray<Number>::size();
3248 *   const unsigned int n_vect_bits = 8 * sizeof(Number) * n_vect_number;
3249 *  
3250 *   pcout << "Running with "
3252 *   << " MPI processes" << std::endl;
3253 *   pcout << "Vectorization over " << n_vect_number << ' '
3254 *   << (std::is_same_v<Number, double> ? "doubles" : "floats") << " = "
3255 *   << n_vect_bits << " bits ("
3257 *   << std::endl;
3258 *   }
3259 *  
3261 *  
3263 *  
3266 *   rk_register_1.reinit(solution);
3267 *   rk_register_2.reinit(solution);
3268 *  
3269 *   euler_operator.project(ExactSolution<dim>(time), solution);
3270 *  
3271 *   double min_vertex_distance = std::numeric_limits<double>::max();
3272 *   for (const auto &cell : triangulation.active_cell_iterators())
3273 *   if (cell->is_locally_owned())
3275 *   std::min(min_vertex_distance, cell->minimum_vertex_distance());
3278 *  
3279 *   time_step = courant_number * integrator.n_stages() /
3280 *   euler_operator.compute_cell_transport_speed(solution);
3281 *   pcout << "Time step size: " << time_step
3282 *   << ", minimal h: " << min_vertex_distance
3283 *   << ", initial transport scaling: "
3284 *   << 1. / euler_operator.compute_cell_transport_speed(solution)
3285 *   << std::endl
3286 *   << std::endl;
3287 *  
3288 *   output_results(0);
3289 *  
3290 * @endcode
3291 *
3292 * Now we are ready to start the time loop, which we run until the time
3293 * has reached the desired end time. Every 5 time steps, we compute a new
3294 * estimate for the time step -- since the solution is nonlinear, it is
3302 * or truncate the time step size to a few digits, e.g. 3 in this case. In
3303 * case the current time is near the prescribed 'tick' value for output
3304 * (e.g. 0.02), we also write the output. After the end of the time loop,
3307 *
3308 * @code
3309 *   unsigned int timestep_number = 0;
3310 *  
3311 *   while (time < final_time - 1e-12)
3312 *   {
3313 *   ++timestep_number;
3314 *   if (timestep_number % 5 == 0)
3315 *   time_step =
3316 *   courant_number * integrator.n_stages() /
3318 *   euler_operator.compute_cell_transport_speed(solution), 3);
3319 *  
3320 *   {
3321 *   TimerOutput::Scope t(timer, "rk time stepping total");
3322 *   integrator.perform_time_step(euler_operator,
3323 *   time,
3324 *   time_step,
3325 *   solution,
3326 *   rk_register_1,
3327 *   rk_register_2);
3328 *   }
3329 *  
3330 *   time += time_step;
3331 *  
3332 *   if (static_cast<int>(time / output_tick) !=
3333 *   static_cast<int>((time - time_step) / output_tick) ||
3334 *   time >= final_time - 1e-12)
3336 *   static_cast<unsigned int>(std::round(time / output_tick)));
3337 *   }
3338 *  
3339 *   timer.print_wall_time_statistics(MPI_COMM_WORLD);
3340 *   pcout << std::endl;
3341 *   }
3342 *  
3343 *   } // namespace Euler_DG
3344 *  
3345 *  
3346 *  
3347 * @endcode
3348 *
3349 * The main() function is not surprising and follows what was done in all
3350 * previous MPI programs: As we run an MPI program, we need to call `MPI_Init()`
3352 * Utilities::MPI::MPI_InitFinalize data structure. Note that we run the program
3353 * only with MPI, and set the thread count to 1.
3354 *
3355 * @code
3356 *   int main(int argc, char **argv)
3357 *   {
3358 *   using namespace Euler_DG;
3359 *   using namespace dealii;
3360 *  
3362 *  
3363 *   try
3364 *   {
3366 *   euler_problem.run();
3367 *   }
3368 *   catch (std::exception &exc)
3369 *   {
3370 *   std::cerr << std::endl
3371 *   << std::endl
3372 *   << "----------------------------------------------------"
3373 *   << std::endl;
3374 *   std::cerr << "Exception on processing: " << std::endl
3375 *   << exc.what() << std::endl
3376 *   << "Aborting!" << std::endl
3377 *   << "----------------------------------------------------"
3378 *   << std::endl;
3379 *  
3380 *   return 1;
3381 *   }
3382 *   catch (...)
3383 *   {
3384 *   std::cerr << std::endl
3385 *   << std::endl
3386 *   << "----------------------------------------------------"
3387 *   << std::endl;
3388 *   std::cerr << "Unknown exception!" << std::endl
3389 *   << "Aborting!" << std::endl
3390 *   << "----------------------------------------------------"
3391 *   << std::endl;
3392 *   return 1;
3393 *   }
3394 *  
3395 *   return 0;
3396 *   }
3397 * @endcode
3398<a name="step_67-Results"></a><h1>Results</h1>
3399
3400
3401<a name="step_67-Programoutput"></a><h3>Program output</h3>
3402
3403
3404Running the program with the default settings on a machine with 40 processes
3405produces the following output:
3406@code
3409Number of degrees of freedom: 147,456 ( = 4 [vars] x 1,024 [cells] x 36 [dofs/cell/var] )
3410Time step size: 0.00689325, minimal h: 0.3125, initial transport scaling: 0.102759
3411
3412Time: 0, dt: 0.0069, error rho: 2.76e-07, rho * u: 1.259e-06, energy: 2.987e-06
3413Time: 1.01, dt: 0.0069, error rho: 1.37e-06, rho * u: 2.252e-06, energy: 4.153e-06
3414Time: 2.01, dt: 0.0069, error rho: 1.561e-06, rho * u: 2.43e-06, energy: 4.493e-06
3415Time: 3.01, dt: 0.0069, error rho: 1.714e-06, rho * u: 2.591e-06, energy: 4.762e-06
3416Time: 4.01, dt: 0.0069, error rho: 1.843e-06, rho * u: 2.625e-06, energy: 4.985e-06
3417Time: 5.01, dt: 0.0069, error rho: 1.496e-06, rho * u: 1.961e-06, energy: 4.142e-06
3418Time: 6, dt: 0.0083, error rho: 1.007e-06, rho * u: 7.119e-07, energy: 2.972e-06
3419Time: 7, dt: 0.0095, error rho: 9.096e-07, rho * u: 3.786e-07, energy: 2.626e-06
3420Time: 8, dt: 0.0096, error rho: 8.439e-07, rho * u: 3.338e-07, energy: 2.43e-06
3421Time: 9, dt: 0.0096, error rho: 7.822e-07, rho * u: 2.984e-07, energy: 2.248e-06
3422Time: 10, dt: 0.0096, error rho: 7.231e-07, rho * u: 2.666e-07, energy: 2.074e-06
3423
3424+-------------------------------------------+------------------+------------+------------------+
3425| Total wallclock time elapsed | 2.249s 30 | 2.249s | 2.249s 8 |
3426| | | |
3427| Section | no. calls | min time rank | avg time | max time rank |
3428+-------------------------------------------+------------------+------------+------------------+
3429| compute errors | 11 | 0.008066s 13 | 0.00952s | 0.01041s 20 |
3430| compute transport speed | 258 | 0.01012s 13 | 0.05392s | 0.08574s 25 |
3431| output | 11 | 0.9597s 13 | 0.9613s | 0.9623s 6 |
3432| rk time stepping total | 1283 | 0.9827s 25 | 1.015s | 1.06s 13 |
3433| rk_stage - integrals L_h | 6415 | 0.8803s 26 | 0.9198s | 0.9619s 14 |
3434| rk_stage - inv mass + vec upd | 6415 | 0.05677s 15 | 0.06487s | 0.07597s 13 |
3435+-------------------------------------------+------------------+------------+------------------+
3436@endcode
3437
3439that we use a relatively fine mesh of @f$32^2@f$ cells with polynomials of degree
34405 for a solution that is smooth. An interesting pattern shows for the time
3441step size: whereas it is 0.0069 up to time 5, it increases to 0.0096 for later
3447by the vortex. Our time step formula recognizes this effect.
3448
3449The final block of output shows detailed information about the timing
3455slow for other reasons.
3456The summary shows that 1283 time steps have been performed
3457in 1.02 seconds (looking at the average time among all MPI processes), while
3458the output of 11 files has taken additional 0.96 seconds. Broken down per time
3459step and into the five Runge--Kutta stages, the compute time per evaluation is
3461and a reason why explicit time integration is very competitive against
3467contributes with another 0.05 seconds of compute time.
3468
3469If we use three more levels of global refinement and 9.4 million DoFs in total,
3472@code
3473+-------------------------------------------+------------------+------------+------------------+
3474| Total wallclock time elapsed | 244.9s 12 | 244.9s | 244.9s 34 |
3475| | | |
3476| Section | no. calls | min time rank | avg time | max time rank |
3477+-------------------------------------------+------------------+------------+------------------+
3478| compute errors | 11 | 0.4239s 12 | 0.4318s | 0.4408s 9 |
3479| compute transport speed | 2053 | 3.962s 12 | 6.727s | 10.12s 7 |
3480| output | 11 | 30.35s 12 | 30.36s | 30.37s 9 |
3481| rk time stepping total | 10258 | 201.7s 7 | 205.1s | 207.8s 12 |
3482| rk_stage - integrals L_h | 51290 | 121.3s 6 | 126.6s | 136.3s 16 |
3483| rk_stage - inv mass + vec upd | 51290 | 66.19s 16 | 77.52s | 81.84s 10 |
3484+-------------------------------------------+------------------+------------+------------------+
3485@endcode
3486
3487Per time step, the solver now takes 0.02 seconds, about 25 times as long as
3490surprising. Since we also do 8 times as many time steps, the compute time
3491should in theory increase by a factor of 512. The actual increase is 205 s /
3496from 0.92 seconds to 127 seconds, i.e., it increases with a factor of 138. On
3499all, has increased by a factor of 1195. The increase is more than the
3500theoretical factor of 512 because the operation is limited by the bandwidth
3501from RAM memory for the larger size while for the smaller size, all vectors
3508part can also reach 70%. If we compute a throughput number in terms of DoFs
3510\frac{n_\mathrm{time steps} n_\mathrm{stages}
3511n_\mathrm{dofs}}{t_\mathrm{compute}} = \frac{10258 \cdot 5 \cdot
35129.4\,\text{MDoFs}}{205s} = 2360\, \text{MDoFs/s} @f] This throughput number is
3514only around 10,000 MDoFs/s.
3515
3517simulation time is 2196 seconds, with 1978 seconds spent in the time
3518stepping. The increase in run time is a factor of 9.3 for the L_h operator
3519(1179 versus 127 seconds) and a factor of 10.3 for the inverse mass matrix and
3520vector updates (797 vs 77.5 seconds). The reason for this non-optimal increase
3521in run time can be traced back to cache effects on the given hardware (with 40
3522MB of L2 cache and 55 MB of L3 cache): While not all of the relevant data fits
3524vectors plus some additional data in MatrixFree), there is capacity for one and
3531
3532
3533<a name="step_67-Convergenceratesfortheanalyticaltestcase"></a><h3>Convergence rates for the analytical test case</h3>
3534
3535
3539
3540<table align="center" class="doxtable">
3541 <tr>
3542 <th>&nbsp;</th>
3543 <th colspan="3"><i>p</i>=2</th>
3544 <th colspan="3"><i>p</i>=3</th>
3545 <th colspan="3"><i>p</i>=5</th>
3546 </tr>
3547 <tr>
3548 <th>n_cells</th>
3549 <th>n_dofs</th>
3550 <th>error mom</th>
3551 <th>rate</th>
3552 <th>n_dofs</th>
3553 <th>error mom</th>
3554 <th>rate</th>
3555 <th>n_dofs</th>
3556 <th>error mom</th>
3557 <th>rate</th>
3558 </tr>
3559 <tr>
3560 <td align="right">16</td>
3561 <td>&nbsp;</td>
3562 <td>&nbsp;</td>
3563 <td>&nbsp;</td>
3564 <td>&nbsp;</td>
3565 <td>&nbsp;</td>
3566 <td>&nbsp;</td>
3567 <td align="right">2,304</td>
3568 <td align="center">1.373e-01</td>
3569 <td>&nbsp;</td>
3570 </tr>
3571 <tr>
3572 <td align="right">64</td>
3573 <td>&nbsp;</td>
3574 <td>&nbsp;</td>
3575 <td>&nbsp;</td>
3576 <td align="right">4,096</td>
3577 <td align="center">9.130e-02</td>
3578 <td>&nbsp;</td>
3579 <td align="right">9,216</td>
3580 <td align="center">8.899e-03</td>
3581 <td>3.94</td>
3582 </tr>
3583 <tr>
3584 <td align="right">256</td>
3585 <td align="right">9,216</td>
3586 <td align="center">5.577e-02</td>
3587 <td>&nbsp;</td>
3588 <td align="right">16,384</td>
3589 <td align="center">7.381e-03</td>
3590 <td>3.64</td>
3591 <td align="right">36,864</td>
3592 <td align="center">2.082e-04</td>
3593 <td>5.42</td>
3594 </tr>
3595 <tr>
3596 <td align="right">1024</td>
3597 <td align="right">36,864</td>
3598 <td align="center">4.724e-03</td>
3599 <td>3.56</td>
3600 <td align="right">65,536</td>
3601 <td align="center">3.072e-04</td>
3602 <td>4.59</td>
3603 <td align="right">147,456</td>
3604 <td align="center">2.625e-06</td>
3605 <td>6.31</td>
3606 </tr>
3607 <tr>
3608 <td align="right">4096</td>
3609 <td align="right">147,456</td>
3610 <td align="center">6.205e-04</td>
3611 <td>2.92</td>
3612 <td align="right">262,144</td>
3613 <td align="center">1.880e-05</td>
3614 <td>4.03</td>
3615 <td align="right">589,824</td>
3616 <td align="center">3.268e-08</td>
3617 <td>6.33</td>
3618 </tr>
3619 <tr>
3620 <td align="right">16,384</td>
3621 <td align="right">589,824</td>
3622 <td align="center">8.279e-05</td>
3623 <td>2.91</td>
3624 <td align="right">1,048,576</td>
3625 <td align="center">1.224e-06</td>
3626 <td>3.94</td>
3627 <td align="right">2,359,296</td>
3628 <td align="center">9.252e-10</td>
3629 <td>5.14</td>
3630 </tr>
3631 <tr>
3632 <td align="right">65,536</td>
3633 <td align="right">2,359,296</td>
3634 <td align="center">1.105e-05</td>
3635 <td>2.91</td>
3636 <td align="right">4,194,304</td>
3637 <td align="center">7.871e-08</td>
3638 <td>3.96</td>
3639 <td align="right">9,437,184</td>
3640 <td align="center">1.369e-10</td>
3641 <td>2.77</td>
3642 </tr>
3643 <tr>
3644 <td align="right">262,144</td>
3645 <td align="right">9,437,184</td>
3646 <td align="center">1.615e-06</td>
3647 <td>2.77</td>
3648 <td align="right">16,777,216</td>
3649 <td align="center">4.961e-09</td>
3650 <td>3.99</td>
3651 <td align="right">37,748,736</td>
3652 <td align="center">7.091e-11</td>
3653 <td>0.95</td>
3654 </tr>
3655</table>
3656
3657If we switch to the Harten-Lax-van Leer flux, the results are as follows:
3658<table align="center" class="doxtable">
3659 <tr>
3660 <th>&nbsp;</th>
3661 <th colspan="3"><i>p</i>=2</th>
3662 <th colspan="3"><i>p</i>=3</th>
3663 <th colspan="3"><i>p</i>=5</th>
3664 </tr>
3665 <tr>
3666 <th>n_cells</th>
3667 <th>n_dofs</th>
3668 <th>error mom</th>
3669 <th>rate</th>
3670 <th>n_dofs</th>
3671 <th>error mom</th>
3672 <th>rate</th>
3673 <th>n_dofs</th>
3674 <th>error mom</th>
3675 <th>rate</th>
3676 </tr>
3677 <tr>
3678 <td align="right">16</td>
3679 <td>&nbsp;</td>
3680 <td>&nbsp;</td>
3681 <td>&nbsp;</td>
3682 <td>&nbsp;</td>
3683 <td>&nbsp;</td>
3684 <td>&nbsp;</td>
3685 <td align="right">2,304</td>
3686 <td align="center">1.339e-01</td>
3687 <td>&nbsp;</td>
3688 </tr>
3689 <tr>
3690 <td align="right">64</td>
3691 <td>&nbsp;</td>
3692 <td>&nbsp;</td>
3693 <td>&nbsp;</td>
3694 <td align="right">4,096</td>
3695 <td align="center">9.037e-02</td>
3696 <td>&nbsp;</td>
3697 <td align="right">9,216</td>
3698 <td align="center">8.849e-03</td>
3699 <td>3.92</td>
3700 </tr>
3701 <tr>
3702 <td align="right">256</td>
3703 <td align="right">9,216</td>
3704 <td align="center">4.204e-02</td>
3705 <td>&nbsp;</td>
3706 <td align="right">16,384</td>
3707 <td align="center">9.143e-03</td>
3708 <td>3.31</td>
3709 <td align="right">36,864</td>
3710 <td align="center">2.501e-04</td>
3711 <td>5.14</td>
3712 </tr>
3713 <tr>
3714 <td align="right">1024</td>
3715 <td align="right">36,864</td>
3716 <td align="center">4.913e-03</td>
3717 <td>3.09</td>
3718 <td align="right">65,536</td>
3719 <td align="center">3.257e-04</td>
3720 <td>4.81</td>
3721 <td align="right">147,456</td>
3722 <td align="center">3.260e-06</td>
3723 <td>6.26</td>
3724 </tr>
3725 <tr>
3726 <td align="right">4096</td>
3727 <td align="right">147,456</td>
3728 <td align="center">7.862e-04</td>
3729 <td>2.64</td>
3730 <td align="right">262,144</td>
3731 <td align="center">1.588e-05</td>
3732 <td>4.36</td>
3733 <td align="right">589,824</td>
3734 <td align="center">2.953e-08</td>
3735 <td>6.79</td>
3736 </tr>
3737 <tr>
3738 <td align="right">16,384</td>
3739 <td align="right">589,824</td>
3740 <td align="center">1.137e-04</td>
3741 <td>2.79</td>
3742 <td align="right">1,048,576</td>
3743 <td align="center">9.400e-07</td>
3744 <td>4.08</td>
3745 <td align="right">2,359,296</td>
3746 <td align="center">4.286e-10</td>
3747 <td>6.11</td>
3748 </tr>
3749 <tr>
3750 <td align="right">65,536</td>
3751 <td align="right">2,359,296</td>
3752 <td align="center">1.476e-05</td>
3753 <td>2.95</td>
3754 <td align="right">4,194,304</td>
3755 <td align="center">5.799e-08</td>
3756 <td>4.02</td>
3757 <td align="right">9,437,184</td>
3758 <td align="center">2.789e-11</td>
3759 <td>3.94</td>
3760 </tr>
3761 <tr>
3762 <td align="right">262,144</td>
3763 <td align="right">9,437,184</td>
3764 <td align="center">2.038e-06</td>
3765 <td>2.86</td>
3766 <td align="right">16,777,216</td>
3767 <td align="center">3.609e-09</td>
3768 <td>4.01</td>
3769 <td align="right">37,748,736</td>
3770 <td align="center">5.730e-11</td>
3771 <td>-1.04</td>
3772 </tr>
3773</table>
3774
3775The tables show that we get optimal @f$\mathcal O\left(h^{p+1}\right)@f$
3777for the Lax--Friedrichs flux for @f$p=2@f$, but the picture is reversed for
3778@f$p=3@f$; in any case, the differences on this testcase are relatively
3779small.
3780
3781For @f$p=5@f$, we reach the roundoff accuracy of @f$10^{-11}@f$ with both
3783domain length of @f$10^2@f$, so relative errors are below @f$10^{-12}@f$. The HLL flux
3786condition on the solution that leaves the domain, which results in a small
3789minor, as the polynomial part inside elements is the main driver of the
3792example the parameters and grid of @ref step_33 "step-33", we get oscillations (which in turn
3793make density negative and make the solution explode) with both fluxes once the
3794high-mass part comes near the boundary, as opposed to the low-order finite
3795volume case (@f$p=0@f$). Thus, any case that leads to shocks in the solution
3797alternative, see the @ref step_69 "step-69" tutorial program.
3798
3799
3800<a name="step_67-Resultsforflowinchannelaroundcylinderin2D"></a><h3>Results for flow in channel around cylinder in 2D</h3>
3801
3802
3803For the test case of the flow around a cylinder in a channel, we need to
3804change the first code line to
3805@code
3806 constexpr unsigned int testcase = 1;
3807@endcode
3808This test case starts with a background field of a constant velocity
3814times 0.1, 0.25, 0.5, and 1.0 (top left to bottom right) for the 2D case with
38155 levels of global refinement, using 102,400 cells with polynomial degree of
38165 and 14.7 million degrees of freedom over all 4 solution variables.
3818propagates slowly in the upstream direction and more quickly in downstream
3819direction in the first snapshot at time 0.1. At time 0.25, the sound wave has
3826
3827<table align="center" class="doxtable" style="width:85%">
3828 <tr>
3829 <td>
3830 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_010.png" alt="" width="100%">
3831 </td>
3832 <td>
3833 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_025.png" alt="" width="100%">
3834 </td>
3835 </tr>
3836 <tr>
3837 <td>
3838 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_050.png" alt="" width="100%">
3839 </td>
3840 <td>
3841 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_100.png" alt="" width="100%">
3842 </td>
3843 </tr>
3844</table>
3845
3848we can see the large number
3854travels through elements with high-order polynomials. This effect can be cured
3856the result of the transport accuracy of the high-order DG method.
3857
3858<img src="https://www.dealii.org/images/steps/developer/step-67.pressure_elevated.jpg" alt="" width="40%">
3859
3860If we run the program with degree 2 and 6 levels of global refinements (410k
3861cells, 14.7M unknowns), we get the following evolution of the pressure
3863
3864@htmlonly
3865<p align="center">
3866 <iframe width="560" height="315" src="https://www.youtube.com/embed/ivxCLiSdQpc"
3867 frameborder="0"
3870 </p>
3872
3873
3874With 2 levels of global refinement with 1,600 cells, the mesh and its
3876
3877<img src="https://www.dealii.org/images/steps/developer/step-67.grid-owner.png" alt="" width="70%">
3878
3879When we run the code with 4 levels of global refinements on 40 cores, we get
3880the following output:
3881@code
3884Number of degrees of freedom: 3,686,400 ( = 4 [vars] x 25,600 [cells] x 36 [dofs/cell/var] )
3885Time step size: 7.39876e-05, minimal h: 0.001875, initial transport scaling: 0.00110294
3886
3887Time: 0, dt: 7.4e-05, norm rho: 4.17e-16, rho * u: 1.629e-16, energy: 1.381e-15
3888Time: 0.05, dt: 6.3e-05, norm rho: 0.02075, rho * u: 0.03801, energy: 0.08772
3889Time: 0.1, dt: 5.9e-05, norm rho: 0.02211, rho * u: 0.04515, energy: 0.08953
3890Time: 0.15, dt: 5.7e-05, norm rho: 0.02261, rho * u: 0.04592, energy: 0.08967
3891Time: 0.2, dt: 5.8e-05, norm rho: 0.02058, rho * u: 0.04361, energy: 0.08222
3892Time: 0.25, dt: 5.9e-05, norm rho: 0.01695, rho * u: 0.04203, energy: 0.06873
3893Time: 0.3, dt: 5.9e-05, norm rho: 0.01653, rho * u: 0.0401, energy: 0.06604
3894Time: 0.35, dt: 5.7e-05, norm rho: 0.01774, rho * u: 0.04264, energy: 0.0706
3895
3896...
3897
3898Time: 1.95, dt: 5.8e-05, norm rho: 0.01488, rho * u: 0.03923, energy: 0.05185
3899Time: 2, dt: 5.7e-05, norm rho: 0.01432, rho * u: 0.03969, energy: 0.04889
3900
3901+-------------------------------------------+------------------+------------+------------------+
3902| Total wallclock time elapsed | 273.6s 13 | 273.6s | 273.6s 0 |
3903| | | |
3904| Section | no. calls | min time rank | avg time | max time rank |
3905+-------------------------------------------+------------------+------------+------------------+
3906| compute errors | 41 | 0.01112s 35 | 0.0672s | 0.1337s 0 |
3907| compute transport speed | 6914 | 5.422s 35 | 15.96s | 29.99s 1 |
3908| output | 41 | 37.24s 35 | 37.3s | 37.37s 0 |
3909| rk time stepping total | 34564 | 205.4s 1 | 219.5s | 230.1s 35 |
3910| rk_stage - integrals L_h | 172820 | 153.6s 1 | 164.9s | 175.6s 27 |
3911| rk_stage - inv mass + vec upd | 172820 | 47.13s 13 | 53.09s | 64.05s 33 |
3912+-------------------------------------------+------------------+------------+------------------+
3913@endcode
3914
3916@f$\rho'@f$, @f$(\rho u)'@f$, and @f$E'@f$ against the background field (namely, the
3917initial condition). The distribution of run time is overall similar as in the
3919time spent in @f$\mathcal L_h@f$ as compared to the inverse mass matrix and vector
3920updates. This is because the geometry is deformed and the matrix-free
3921framework needs to load additional arrays for the geometry from memory that
3922are compressed in the affine mesh case.
3923
3924Increasing the number of global refinements to 5, the output becomes:
3925@code
3928Number of degrees of freedom: 14,745,600 ( = 4 [vars] x 102,400 [cells] x 36 [dofs/cell/var] )
3929
3930...
3931
3932+-------------------------------------------+------------------+------------+------------------+
3933| Total wallclock time elapsed | 2693s 32 | 2693s | 2693s 23 |
3934| | | |
3935| Section | no. calls | min time rank | avg time | max time rank |
3936+-------------------------------------------+------------------+------------+------------------+
3937| compute errors | 41 | 0.04537s 32 | 0.173s | 0.3489s 0 |
3938| compute transport speed | 13858 | 40.75s 32 | 85.99s | 149.8s 0 |
3939| output | 41 | 153.8s 32 | 153.9s | 154.1s 0 |
3940| rk time stepping total | 69284 | 2386s 0 | 2450s | 2496s 32 |
3941| rk_stage - integrals L_h | 346420 | 1365s 32 | 1574s | 1718s 19 |
3942| rk_stage - inv mass + vec upd | 346420 | 722.5s 10 | 870.7s | 1125s 32 |
3943+-------------------------------------------+------------------+------------+------------------+
3944@endcode
3945
3948see an increase by a factor of 11 for the time steps (219.5 seconds versus
3955total" part. At the same time, it appears to be slowest for the "compute
3957average and almost a factor of 4 compared to the faster rank.
3961catch up. At this point, one can wonder about the reason for this imbalance:
3962The number of cells is almost the same on all MPI processes.
3963However, the matrix-free framework is faster on affine and Cartesian
3965are assigned. On the other hand, rank 32, which reports the highest run time
3966for the Runga--Kutta stages, owns the curved cells near the cylinder, for
3969parallel::distributed::Triangulation object, or even measure the run time for a
3970few time steps and try to rebalance then.
3971
3975
3977@code
3980Number of degrees of freedom: 58,982,400 ( = 4 [vars] x 409,600 [cells] x 36 [dofs/cell/var] )
3981
3982...
3983
3984Time: 1.95, dt: 1.4e-05, norm rho: 0.01488, rho * u: 0.03923, energy: 0.05183
3985Time: 2, dt: 1.4e-05, norm rho: 0.01431, rho * u: 0.03969, energy: 0.04887
3986
3987+-------------------------------------------+------------------+------------+------------------+
3988| Total wallclock time elapsed | 2.166e+04s 26 | 2.166e+04s | 2.166e+04s 24 |
3989| | | |
3990| Section | no. calls | min time rank | avg time | max time rank |
3991+-------------------------------------------+------------------+------------+------------------+
3992| compute errors | 41 | 0.1758s 30 | 0.672s | 1.376s 1 |
3993| compute transport speed | 27748 | 321.3s 34 | 678.8s | 1202s 1 |
3994| output | 41 | 616.3s 32 | 616.4s | 616.4s 34 |
3995| rk time stepping total | 138733 | 1.983e+04s 1 | 2.036e+04s | 2.072e+04s 34 |
3996| rk_stage - integrals L_h | 693665 | 1.052e+04s 32 | 1.248e+04s | 1.387e+04s 19 |
3997| rk_stage - inv mass + vec upd | 693665 | 6404s 10 | 7868s | 1.018e+04s 32 |
3998+-------------------------------------------+------------------+------------+------------------+
3999@endcode
4000
4002overall run time to perform 139k time steps is 20k seconds (5.7 hours) or 7
4003time steps per second -- not so bad for having nearly 60 million
4006
4007
4008<a name="step_67-Resultsforflowinchannelaroundcylinderin3D"></a><h3>Results for flow in channel around cylinder in 3D</h3>
4009
4010
4011Switching the channel test case to 3D with 3 global refinements, the output is
4012@code
4015Number of degrees of freedom: 221,184,000 ( = 5 [vars] x 204,800 [cells] x 216 [dofs/cell/var] )
4016
4017...
4018
4019Time: 1.95, dt: 0.00011, norm rho: 0.01131, rho * u: 0.03056, energy: 0.04091
4020Time: 2, dt: 0.00011, norm rho: 0.0119, rho * u: 0.03142, energy: 0.04425
4021
4022+-------------------------------------------+------------------+------------+------------------+
4023| Total wallclock time elapsed | 1.734e+04s 4 | 1.734e+04s | 1.734e+04s 38 |
4024| | | |
4025| Section | no. calls | min time rank | avg time | max time rank |
4026+-------------------------------------------+------------------+------------+------------------+
4027| compute errors | 41 | 0.6551s 34 | 3.216s | 7.281s 0 |
4028| compute transport speed | 3546 | 160s 34 | 393.2s | 776.9s 0 |
4029| output | 41 | 1350s 34 | 1353s | 1357s 0 |
4030| rk time stepping total | 17723 | 1.519e+04s 0 | 1.558e+04s | 1.582e+04s 34 |
4031| rk_stage - integrals L_h | 88615 | 1.005e+04s 32 | 1.126e+04s | 1.23e+04s 11 |
4032| rk_stage - inv mass + vec upd | 88615 | 3056s 11 | 4322s | 5759s 32 |
4033+-------------------------------------------+------------------+------------+------------------+
4034@endcode
4035
4036The physics are similar to the 2D case, with a slight motion in the z
4038stage in this case is
4039@f[
4040\text{throughput} = \frac{n_\mathrm{time steps} n_\mathrm{stages}
4041n_\mathrm{dofs}}{t_\mathrm{compute}} =
4042\frac{17723 \cdot 5 \cdot 221.2\,\text{M}}{15580s} = 1258\, \text{MDoFs/s}.
4043@f]
4044
4046is more expensive. This is due to over-integration with `degree+2` points and
4047the larger fraction of face integrals (worse volume-to-surface ratio) with
4049and vector update part, we record a throughput of 4857 MDoFs/s for the 2D case
4052fact limited by the memory bandwidth.
4053
4054If we go to four levels of global refinement, we need to increase the number
4056GB of RAM memory in this case. Also, the time it takes to complete 35k time
4059@code
4062Number of degrees of freedom: 1,769,472,000 ( = 5 [vars] x 1,638,400 [cells] x 216 [dofs/cell/var] )
4063
4064...
4065
4066Time: 1.95, dt: 5.6e-05, norm rho: 0.01129, rho * u: 0.0306, energy: 0.04086
4067Time: 2, dt: 5.6e-05, norm rho: 0.01189, rho * u: 0.03145, energy: 0.04417
4068
4069+-------------------------------------------+------------------+------------+------------------+
4070| Total wallclock time elapsed | 5.396e+04s 151 | 5.396e+04s | 5.396e+04s 0 |
4071| | | |
4072| Section | no. calls | min time rank | avg time | max time rank |
4073+-------------------------------------------+------------------+------------+------------------+
4074| compute errors | 41 | 2.632s 178 | 7.221s | 16.56s 0 |
4075| compute transport speed | 7072 | 714s 193 | 1553s | 3351s 0 |
4076| output | 41 | 8065s 176 | 8070s | 8079s 0 |
4077| rk time stepping total | 35350 | 4.25e+04s 0 | 4.43e+04s | 4.515e+04s 193 |
4078| rk_stage - integrals L_h | 176750 | 2.936e+04s 134 | 3.222e+04s | 3.67e+04s 99 |
4079| rk_stage - inv mass + vec upd | 176750 | 7004s 99 | 1.207e+04s | 1.55e+04s 132 |
4080+-------------------------------------------+------------------+------------+------------------+
4081@endcode
4084step.
4085
4086
4087<a name="step_67-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
4088
4089
4093has been achieved by the <a href="https://github.com/kronbichler/exwave">exwave
4099
4110
4111
4112<a name="step_67-Moreadvancednumericalfluxfunctionsandskewsymmetricformulations"></a><h4>More advanced numerical flux functions and skew-symmetric formulations</h4>
4113
4114
4122
4131nonlinearities and higher-degree content from curved cells. A way out of this
4133simple variant. Skew symmetry means that switching the role of the solution
4144by special skew-symmetric finite difference schemes.
4145
4147https://github.com/kronbichler/advection_miniapp, where a
4149equation.
4150
4151<a name="step_67-Equippingthecodeforsupersoniccalculations"></a><h4>Equipping the code for supersonic calculations</h4>
4152
4153
4162
4167data,
4168@f[
4169\mathbf{w}^+ = \mathbf{w}^- = \begin{pmatrix} \rho^-\\
4170(\rho \mathbf u)^- \\ E^-\end{pmatrix} \quad
4171 \text{(Neumann)}.
4172@f]
4173
4175@code
4178 {
4179 w_p = w_m;
4180 at_outflow = true;
4181 }
4182@endcode
4183in the `local_apply_boundary_face()` function.
4184
4185<a name="step_67-ExtensiontothelinearizedEulerequations"></a><h4>Extension to the linearized Euler equations</h4>
4186
4187
4190background state, i.e., a given density, velocity and energy (or pressure)
4198quadrature points) or by a vector similar to the solution. Based on that
4199vector, we would create an additional FEEvaluation object to read from it and
4200provide the values of the field at quadrature points. If the background
4204
4212<a
4213href="https://en.wikipedia.org/wiki/Perfectly_matched_layer">perfectly
4215-- are common.
4216
4217
4218<a name="step_67-ExtensiontothecompressibleNavierStokesequations"></a><h4>Extension to the compressible Navier-Stokes equations</h4>
4219
4220
4224here despite the additional cost of elliptic terms, e.g. via an interior
4225penalty method, one can switch the basis from FE_DGQ to FE_DGQHermite like in
4226the @ref step_59 "step-59" tutorial program.
4227
4228
4229<a name="step_67-Usingcellcentricloopsandsharedmemory"></a><h4>Using cell-centric loops and shared memory</h4>
4230
4231
4232In this tutorial, we used face-centric loops. Here, cell and face integrals
4237processing all its 2d faces. Although this kind of loop implies that fluxes have
4238to be computed twice (for each side of an interior face), the fact that the
4241shared memory - already give a performance boost. If you are interested in these
4242advanced topics, you can take a look at @ref step_76 "step-76" where we take the present
4244 *
4245 *
4246<a name="step_67-PlainProg"></a>
4247<h1> The plain program</h1>
4248@include "step-67.cc"
4249*/
void write_vtu_in_parallel(const std::string &filename, const MPI_Comm comm) const
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
void submit_dof_value(const value_type val_in, const unsigned int dof)
void set_dof_values(VectorType &dst, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip()) const
Tensor< 2, dim, VectorizedArrayType > inverse_jacobian(const unsigned int q_point) const
std_cxx20::ranges::iota_view< unsigned int, unsigned int > quadrature_point_indices() const
Point< dim, VectorizedArrayType > quadrature_point(const unsigned int q) const
const unsigned int n_q_points
void gather_evaluate(const VectorType &input_vector, const EvaluationFlags::EvaluationFlags evaluation_flag)
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
Abstract base class for mapping classes.
Definition mapping.h:320
void transform_from_q_points_to_basis(const unsigned int n_actual_components, const VectorizedArrayType *in_array, VectorizedArrayType *out_array) const
Definition operators.h:1191
unsigned int n_active_entries_per_cell_batch(const unsigned int cell_batch_index) const
void loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &inner_face_operation, const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &boundary_face_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false, const DataAccessOnFaces dst_vector_face_access=DataAccessOnFaces::unspecified, const DataAccessOnFaces src_vector_face_access=DataAccessOnFaces::unspecified) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
Definition point.h:113
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
constexpr void clear()
friend class Tensor
Definition tensor.h:865
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
void print_wall_time_statistics(const MPI_Comm mpi_comm, const double print_quantile=0.) const
Definition timer.cc:836
#define DEAL_II_ALWAYS_INLINE
Definition config.h:109
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition config.h:156
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
unsigned int level
Definition grid_out.cc:4632
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
UpdateFlags
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
std::vector< index_type > data
Definition mpi.cc:740
std::size_t size
Definition mpi.cc:739
const Event initial
Definition event.cc:68
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
The namespace for the EvaluationFlags enum.
void hyper_rectangle(Triangulation< dim, spacedim > &tria, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void channel_with_cylinder(Triangulation< dim > &tria, const double shell_region_width=0.03, const unsigned int n_shells=2, const double skewness=2.0, const bool colorize=false)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
constexpr char L
constexpr char T
constexpr types::blas_int zero
constexpr char A
constexpr types::blas_int one
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
@ LOW_STORAGE_RK_STAGE9_ORDER5
@ LOW_STORAGE_RK_STAGE3_ORDER3
@ LOW_STORAGE_RK_STAGE7_ORDER4
@ LOW_STORAGE_RK_STAGE5_ORDER4
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:97
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:112
std::string get_time()
std::string get_current_vectorization_level()
Definition utilities.cc:942
Number truncate_to_n_digits(const Number number, const unsigned int n_digits)
Definition utilities.cc:582
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:474
constexpr T pow(const T base, const int iexp)
Definition utilities.h:967
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
long double gamma(const unsigned int n)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14889
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
constexpr double E
Definition numbers.h:216
constexpr double PI
Definition numbers.h:241
STL namespace.
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int boundary_id
Definition types.h:153
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code *  *  MPI_Finalize()
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)