deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-67.h
Go to the documentation of this file.
1
895 *   stage_5_order_4, /* Kennedy, Carpenter, Lewis, 2000 */
896 *   stage_7_order_4, /* Tselios, Simos, 2007 */
897 *   stage_9_order_5, /* Kennedy, Carpenter, Lewis, 2000 */
898 *   };
899 *   constexpr LowStorageRungeKuttaScheme lsrk_scheme = stage_5_order_4;
900 *  
901 * @endcode
902 *
903 * Eventually, we select a detail of the spatial discretization, namely the
904 * numerical flux (Riemann solver) at the faces between cells. For this
905 * program, we have implemented a modified variant of the Lax--Friedrichs
906 * flux and the Harten--Lax--van Leer (HLL) flux.
907 *
908 * @code
909 *   enum EulerNumericalFlux
910 *   {
911 *   lax_friedrichs_modified,
912 *   harten_lax_vanleer,
913 *   };
914 *   constexpr EulerNumericalFlux numerical_flux_type = lax_friedrichs_modified;
915 *  
916 *  
917 *  
918 * @endcode
919 *
920 *
921 * <a name="step_67-Equationdata"></a>
922 * <h3>Equation data</h3>
923 *
924
925 *
926 * We now define a class with the exact solution for the test case 0 and one
927 * with a background flow field for test case 1 of the channel. Given that
928 * the Euler equations are a problem with @f$d+2@f$ equations in @f$d@f$ dimensions,
929 * we need to tell the Function base class about the correct number of
930 * components.
931 *
932 * @code
933 *   template <int dim>
934 *   class ExactSolution : public Function<dim>
935 *   {
936 *   public:
937 *   ExactSolution(const double time)
938 *   : Function<dim>(dim + 2, time)
939 *   {}
940 *  
941 *   virtual double value(const Point<dim> &p,
942 *   const unsigned int component = 0) const override;
943 *   };
944 *  
945 *  
946 *  
947 * @endcode
948 *
949 * As far as the actual function implemented is concerned, the analytical
950 * test case is an isentropic vortex case (see e.g. the book by Hesthaven
951 * and Warburton, Example 6.1 in Section 6.6 on page 209) which fulfills the
952 * Euler equations with zero force term on the right hand side. Given that
953 * definition, we return either the density, the momentum, or the energy
954 * depending on which component is requested. Note that the original
955 * definition of the density involves the @f$\frac{1}{\gamma -1}@f$-th power of
956 * some expression. Since `std::pow()` has pretty slow implementations on
957 * some systems, we replace it by logarithm followed by exponentiation (of
958 * base 2), which is mathematically equivalent but usually much better
959 * optimized. This formula might lose accuracy in the last digits
960 * for very small numbers compared to `std::pow()`, but we are happy with
961 * it anyway, since small numbers map to data close to 1.
962 *
963
964 *
965 * For the channel test case, we simply select a density of 1, a velocity of
966 * 0.4 in @f$x@f$ direction and zero in the other directions, and an energy that
967 * corresponds to a speed of sound of 1.3 measured against the background
968 * velocity field, computed from the relation @f$E = \frac{c^2}{\gamma (\gamma
969 * -1)} + \frac 12 \rho \|u\|^2@f$.
970 *
971 * @code
972 *   template <int dim>
973 *   double ExactSolution<dim>::value(const Point<dim> &x,
974 *   const unsigned int component) const
975 *   {
976 *   const double t = this->get_time();
977 *  
978 *   switch (testcase)
979 *   {
980 *   case 0:
981 *   {
982 *   Assert(dim == 2, ExcNotImplemented());
983 *   const double beta = 5;
984 *  
985 *   Point<dim> x0;
986 *   x0[0] = 5.;
987 *   const double radius_sqr =
988 *   (x - x0).norm_square() - 2. * (x[0] - x0[0]) * t + t * t;
989 *   const double factor =
990 *   beta / (numbers::PI * 2) * std::exp(1. - radius_sqr);
991 *   const double density_log = std::log2(
992 *   std::abs(1. - (gamma - 1.) / gamma * 0.25 * factor * factor));
993 *   const double density = std::exp2(density_log * (1. / (gamma - 1.)));
994 *   const double u = 1. - factor * (x[1] - x0[1]);
995 *   const double v = factor * (x[0] - t - x0[0]);
996 *  
997 *   if (component == 0)
998 *   return density;
999 *   else if (component == 1)
1000 *   return density * u;
1001 *   else if (component == 2)
1002 *   return density * v;
1003 *   else
1004 *   {
1005 *   const double pressure =
1006 *   std::exp2(density_log * (gamma / (gamma - 1.)));
1007 *   return pressure / (gamma - 1.) +
1008 *   0.5 * (density * u * u + density * v * v);
1009 *   }
1010 *   }
1011 *  
1012 *   case 1:
1013 *   {
1014 *   if (component == 0)
1015 *   return 1.;
1016 *   else if (component == 1)
1017 *   return 0.4;
1018 *   else if (component == dim + 1)
1019 *   return 3.097857142857143;
1020 *   else
1021 *   return 0.;
1022 *   }
1023 *  
1024 *   default:
1026 *   return 0.;
1027 *   }
1028 *   }
1029 *  
1030 *  
1031 *  
1032 * @endcode
1033 *
1034 *
1035 * <a name="step_67-LowstorageexplicitRungeKuttatimeintegrators"></a>
1036 * <h3>Low-storage explicit Runge--Kutta time integrators</h3>
1037 *
1038
1039 *
1040 * The next few lines implement a few low-storage variants of Runge--Kutta
1041 * methods. These methods have specific Butcher tableaux with coefficients
1042 * @f$b_i@f$ and @f$a_i@f$ as shown in the introduction. As usual in Runge--Kutta
1043 * method, we can deduce time steps, @f$c_i = \sum_{j=1}^{i-2} b_i + a_{i-1}@f$
1044 * from those coefficients. The main advantage of this kind of scheme is the
1045 * fact that only two vectors are needed per stage, namely the accumulated
1046 * part of the solution @f$\mathbf{w}@f$ (that will hold the solution
1047 * @f$\mathbf{w}^{n+1}@f$ at the new time @f$t^{n+1}@f$ after the last stage), the
1048 * update vector @f$\mathbf{r}_i@f$ that gets evaluated during the stages, plus
1049 * one vector @f$\mathbf{k}_i@f$ to hold the evaluation of the operator. Such a
1050 * Runge--Kutta setup reduces the memory storage and memory access. As the
1051 * memory bandwidth is often the performance-limiting factor on modern
1052 * hardware when the evaluation of the differential operator is
1053 * well-optimized, performance can be improved over standard time
1054 * integrators. This is true also when taking into account that a
1055 * conventional Runge--Kutta scheme might allow for slightly larger time
1056 * steps as more free parameters allow for better stability properties.
1057 *
1058
1059 *
1060 * In this tutorial programs, we concentrate on a few variants of
1061 * low-storage schemes defined in the article by Kennedy, Carpenter, and
1062 * Lewis (2000), as well as one variant described by Tselios and Simos
1063 * (2007). There is a large series of other schemes available, which could
1064 * be addressed by additional sets of coefficients or slightly different
1065 * update formulas.
1066 *
1067
1068 *
1069 * We define a single class for the four integrators, distinguished by the
1070 * enum described above. To each scheme, we then fill the vectors for the
1071 * @f$b_i@f$ and @f$a_i@f$ to the given variables in the class.
1072 *
1073 * @code
1074 *   class LowStorageRungeKuttaIntegrator
1075 *   {
1076 *   public:
1077 *   LowStorageRungeKuttaIntegrator(const LowStorageRungeKuttaScheme scheme)
1078 *   {
1080 * @endcode
1081 *
1082 * First comes the three-stage scheme of order three by Kennedy et al.
1083 * (2000). While its stability region is significantly smaller than for
1084 * the other schemes, it only involves three stages, so it is very
1085 * competitive in terms of the work per stage.
1086 *
1087 * @code
1088 *   switch (scheme)
1089 *   {
1090 *   case stage_3_order_3:
1091 *   {
1093 *   break;
1094 *   }
1095 *  
1096 * @endcode
1097 *
1098 * The next scheme is a five-stage scheme of order four, again
1099 * defined in the paper by Kennedy et al. (2000).
1100 *
1101 * @code
1102 *   case stage_5_order_4:
1103 *   {
1105 *   break;
1106 *   }
1107 *  
1108 * @endcode
1109 *
1110 * The following scheme of seven stages and order four has been
1111 * explicitly derived for acoustics problems. It is a balance of
1112 * accuracy for imaginary eigenvalues among fourth order schemes,
1113 * combined with a large stability region. Since DG schemes are
1114 * dissipative among the highest frequencies, this does not
1115 * necessarily translate to the highest possible time step per
1116 * stage. In the context of the present tutorial program, the
1117 * numerical flux plays a crucial role in the dissipation and thus
1118 * also the maximal stable time step size. For the modified
1119 * Lax--Friedrichs flux, this scheme is similar to the
1120 * `stage_5_order_4` scheme in terms of step size per stage if only
1121 * stability is considered, but somewhat less efficient for the HLL
1122 * flux.
1123 *
1124 * @code
1125 *   case stage_7_order_4:
1126 *   {
1128 *   break;
1129 *   }
1130 *  
1131 * @endcode
1132 *
1133 * The last scheme included here is the nine-stage scheme of order
1134 * five from Kennedy et al. (2000). It is the most accurate among
1135 * the schemes used here, but the higher order of accuracy
1136 * sacrifices some stability, so the step length normalized per
1137 * stage is less than for the fourth order schemes.
1138 *
1139 * @code
1140 *   case stage_9_order_5:
1141 *   {
1143 *   break;
1144 *   }
1145 *  
1146 *   default:
1147 *   AssertThrow(false, ExcNotImplemented());
1148 *   }
1151 *   rk_integrator(lsrk);
1152 *   rk_integrator.get_coefficients(ai, bi, ci);
1153 *   }
1154 *  
1155 *   unsigned int n_stages() const
1156 *   {
1157 *   return bi.size();
1158 *   }
1159 *  
1160 * @endcode
1161 *
1162 * The main function of the time integrator is to go through the stages,
1163 * evaluate the operator, prepare the @f$\mathbf{r}_i@f$ vector for the next
1164 * evaluation, and update the solution vector @f$\mathbf{w}@f$. We hand off
1165 * the work to the `pde_operator` involved in order to be able to merge
1166 * the vector operations of the Runge--Kutta setup with the evaluation of
1167 * the differential operator for better performance, so all we do here is
1168 * to delegate the vectors and coefficients.
1169 *
1170
1171 *
1172 * We separately call the operator for the first stage because we need
1173 * slightly modified arguments there: We evaluate the solution from
1174 * the old solution @f$\mathbf{w}^n@f$ rather than a @f$\mathbf r_i@f$ vector, so
1175 * the first argument is `solution`. We here let the stage vector
1176 * @f$\mathbf{r}_i@f$ also hold the temporary result of the evaluation, as it
1177 * is not used otherwise. For all subsequent stages, we use the vector
1178 * `vec_ki` as the second vector argument to store the result of the
1179 * operator evaluation. Finally, when we are at the last stage, we must
1180 * skip the computation of the vector @f$\mathbf{r}_{s+1}@f$ as there is no
1181 * coefficient @f$a_s@f$ available (nor will it be used).
1182 *
1183 * @code
1184 *   template <typename VectorType, typename Operator>
1185 *   void perform_time_step(const Operator &pde_operator,
1186 *   const double current_time,
1187 *   const double time_step,
1188 *   VectorType &solution,
1189 *   VectorType &vec_ri,
1190 *   VectorType &vec_ki) const
1191 *   {
1192 *   AssertDimension(ai.size() + 1, bi.size());
1193 *  
1194 *   pde_operator.perform_stage(current_time,
1195 *   bi[0] * time_step,
1196 *   ai[0] * time_step,
1197 *   solution,
1198 *   vec_ri,
1199 *   solution,
1200 *   vec_ri);
1201 *  
1202 *   for (unsigned int stage = 1; stage < bi.size(); ++stage)
1203 *   {
1204 *   const double c_i = ci[stage];
1205 *   pde_operator.perform_stage(current_time + c_i * time_step,
1206 *   bi[stage] * time_step,
1207 *   (stage == bi.size() - 1 ?
1208 *   0 :
1209 *   ai[stage] * time_step),
1210 *   vec_ri,
1211 *   vec_ki,
1212 *   solution,
1213 *   vec_ri);
1214 *   }
1215 *   }
1216 *  
1217 *   private:
1218 *   std::vector<double> bi;
1219 *   std::vector<double> ai;
1220 *   std::vector<double> ci;
1221 *   };
1222 *  
1223 *  
1224 *  
1225 * @endcode
1226 *
1227 *
1228 * <a name="step_67-ImplementationofpointwiseoperationsoftheEulerequations"></a>
1229 * <h3>Implementation of point-wise operations of the Euler equations</h3>
1230 *
1231
1232 *
1233 * In the following functions, we implement the various problem-specific
1234 * operators pertaining to the Euler equations. Each function acts on the
1235 * vector of conserved variables @f$[\rho, \rho\mathbf{u}, E]@f$ that we hold in
1236 * the solution vectors, and computes various derived quantities.
1237 *
1238
1239 *
1240 * First out is the computation of the velocity, that we derive from the
1241 * momentum variable @f$\rho \mathbf{u}@f$ by division by @f$\rho@f$. One thing to
1242 * note here is that we decorate all those functions with the keyword
1243 * `DEAL_II_ALWAYS_INLINE`. This is a special macro that maps to a
1244 * compiler-specific keyword that tells the compiler to never create a
1245 * function call for any of those functions, and instead move the
1246 * implementation <a
1247 * href="https://en.wikipedia.org/wiki/Inline_function">inline</a> to where
1248 * they are called. This is critical for performance because we call into some
1249 * of those functions millions or billions of times: For example, we both use
1250 * the velocity for the computation of the flux further down, but also for the
1251 * computation of the pressure, and both of these places are evaluated at
1252 * every quadrature point of every cell. Making sure these functions are
1253 * inlined ensures not only that the processor does not have to execute a jump
1254 * instruction into the function (and the corresponding return jump), but also
1255 * that the compiler can re-use intermediate information from one function's
1256 * context in code that comes after the place where the function was called.
1257 * (We note that compilers are generally quite good at figuring out which
1258 * functions to inline by themselves. Here is a place where compilers may or
1259 * may not have figured it out by themselves but where we know for sure that
1260 * inlining is a win.)
1261 *
1262
1263 *
1264 * Another trick we apply is a separate variable for the inverse density
1265 * @f$\frac{1}{\rho}@f$. This enables the compiler to only perform a single
1266 * division for the flux, despite the division being used at several
1267 * places. As divisions are around ten to twenty times as expensive as
1268 * multiplications or additions, avoiding redundant divisions is crucial for
1269 * performance. We note that taking the inverse first and later multiplying
1270 * with it is not equivalent to a division in floating point arithmetic due
1271 * to roundoff effects, so the compiler is not allowed to exchange one way by
1272 * the other with standard optimization flags. However, it is also not
1273 * particularly difficult to write the code in the right way.
1274 *
1275
1276 *
1277 * To summarize, the chosen strategy of always inlining and careful
1278 * definition of expensive arithmetic operations allows us to write compact
1279 * code without passing all intermediate results around, despite making sure
1280 * that the code maps to excellent machine code.
1281 *
1282 * @code
1283 *   template <int dim, typename Number>
1284 *   inline DEAL_II_ALWAYS_INLINE
1285 *   Tensor<1, dim, Number>
1286 *   euler_velocity(const Tensor<1, dim + 2, Number> &conserved_variables)
1287 *   {
1288 *   const Number inverse_density = Number(1.) / conserved_variables[0];
1289 *  
1290 *   Tensor<1, dim, Number> velocity;
1291 *   for (unsigned int d = 0; d < dim; ++d)
1292 *   velocity[d] = conserved_variables[1 + d] * inverse_density;
1293 *  
1294 *   return velocity;
1295 *   }
1296 *  
1297 * @endcode
1298 *
1299 * The next function computes the pressure from the vector of conserved
1300 * variables, using the formula @f$p = (\gamma - 1) \left(E - \frac 12 \rho
1301 * \mathbf{u}\cdot \mathbf{u}\right)@f$. As explained above, we use the
1302 * velocity from the `euler_velocity()` function. Note that we need to
1303 * specify the first template argument `dim` here because the compiler is
1304 * not able to deduce it from the arguments of the tensor, whereas the
1305 * second argument (number type) can be automatically deduced.
1306 *
1307 * @code
1308 *   template <int dim, typename Number>
1309 *   inline DEAL_II_ALWAYS_INLINE
1310 *   Number
1311 *   euler_pressure(const Tensor<1, dim + 2, Number> &conserved_variables)
1312 *   {
1313 *   const Tensor<1, dim, Number> velocity =
1314 *   euler_velocity<dim>(conserved_variables);
1315 *  
1316 *   Number rho_u_dot_u = conserved_variables[1] * velocity[0];
1317 *   for (unsigned int d = 1; d < dim; ++d)
1318 *   rho_u_dot_u += conserved_variables[1 + d] * velocity[d];
1319 *  
1320 *   return (gamma - 1.) * (conserved_variables[dim + 1] - 0.5 * rho_u_dot_u);
1321 *   }
1322 *  
1323 * @endcode
1324 *
1325 * Here is the definition of the Euler flux function, i.e., the definition
1326 * of the actual equation. Given the velocity and pressure (that the
1327 * compiler optimization will make sure are done only once), this is
1328 * straight-forward given the equation stated in the introduction.
1329 *
1330 * @code
1331 *   template <int dim, typename Number>
1332 *   inline DEAL_II_ALWAYS_INLINE
1333 *   Tensor<1, dim + 2, Tensor<1, dim, Number>>
1334 *   euler_flux(const Tensor<1, dim + 2, Number> &conserved_variables)
1335 *   {
1336 *   const Tensor<1, dim, Number> velocity =
1337 *   euler_velocity<dim>(conserved_variables);
1338 *   const Number pressure = euler_pressure<dim>(conserved_variables);
1339 *  
1340 *   Tensor<1, dim + 2, Tensor<1, dim, Number>> flux;
1341 *   for (unsigned int d = 0; d < dim; ++d)
1342 *   {
1343 *   flux[0][d] = conserved_variables[1 + d];
1344 *   for (unsigned int e = 0; e < dim; ++e)
1345 *   flux[e + 1][d] = conserved_variables[e + 1] * velocity[d];
1346 *   flux[d + 1][d] += pressure;
1347 *   flux[dim + 1][d] =
1348 *   velocity[d] * (conserved_variables[dim + 1] + pressure);
1349 *   }
1350 *  
1351 *   return flux;
1352 *   }
1353 *  
1354 * @endcode
1355 *
1356 * This next function is a helper to simplify the implementation of the
1357 * numerical flux, implementing the action of a tensor of tensors (with
1358 * non-standard outer dimension of size `dim + 2`, so the standard overloads
1359 * provided by deal.II's tensor classes do not apply here) with another
1360 * tensor of the same inner dimension, i.e., a matrix-vector product.
1361 *
1362 * @code
1363 *   template <int n_components, int dim, typename Number>
1364 *   inline DEAL_II_ALWAYS_INLINE
1366 *   operator*(const Tensor<1, n_components, Tensor<1, dim, Number>> &matrix,
1367 *   const Tensor<1, dim, Number> &vector)
1368 *   {
1370 *   for (unsigned int d = 0; d < n_components; ++d)
1371 *   result[d] = matrix[d] * vector;
1372 *   return result;
1373 *   }
1374 *  
1375 * @endcode
1376 *
1377 * This function implements the numerical flux (Riemann solver). It gets the
1378 * state from the two sides of an interface and the normal vector, oriented
1379 * from the side of the solution @f$\mathbf{w}^-@f$ towards the solution
1380 * @f$\mathbf{w}^+@f$. In finite volume methods which rely on piece-wise
1381 * constant data, the numerical flux is the central ingredient as it is the
1382 * only place where the physical information is entered. In DG methods, the
1383 * numerical flux is less central due to the polynomials within the elements
1384 * and the physical flux used there. As a result of higher-degree
1385 * interpolation with consistent values from both sides in the limit of a
1386 * continuous solution, the numerical flux can be seen as a control of the
1387 * jump of the solution from both sides to weakly impose continuity. It is
1388 * important to realize that a numerical flux alone cannot stabilize a
1389 * high-order DG method in the presence of shocks, and thus any DG method
1390 * must be combined with further shock-capturing techniques to handle those
1391 * cases. In this tutorial, we focus on wave-like solutions of the Euler
1392 * equations in the subsonic regime without strong discontinuities where our
1393 * basic scheme is sufficient.
1394 *
1395
1396 *
1397 * Nonetheless, the numerical flux is decisive in terms of the numerical
1398 * dissipation of the overall scheme and influences the admissible time step
1399 * size with explicit Runge--Kutta methods. We consider two choices, a
1400 * modified Lax--Friedrichs scheme and the widely used Harten--Lax--van Leer
1401 * (HLL) flux. For both variants, we first need to get the velocities and
1402 * pressures from both sides of the interface and evaluate the physical
1403 * Euler flux.
1404 *
1405
1406 *
1407 * For the local Lax--Friedrichs flux, the definition is @f$\hat{\mathbf{F}}
1408 * =\frac{\mathbf{F}(\mathbf{w}^-)+\mathbf{F}(\mathbf{w}^+)}{2} +
1409 * \frac{\lambda}{2}\left[\mathbf{w}^--\mathbf{w}^+\right]\otimes
1410 * \mathbf{n^-}@f$, where the factor @f$\lambda =
1411 * \max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)@f$ gives the
1412 * maximal wave speed and @f$c = \sqrt{\gamma p / \rho}@f$ is the speed of
1413 * sound. Here, we choose two modifications of that expression for reasons
1414 * of computational efficiency, given the small impact of the flux on the
1415 * solution. For the above definition of the factor @f$\lambda@f$, we would need
1416 * to take four square roots, two for the two velocity norms and two for the
1417 * speed of sound on either side. The first modification is hence to rather
1418 * use @f$\sqrt{\|\mathbf{u}\|^2+c^2}@f$ as an estimate of the maximal speed
1419 * (which is at most a factor of 2 away from the actual maximum, as shown in
1420 * the introduction). This allows us to pull the square root out of the
1421 * maximum and get away with a single square root computation. The second
1422 * modification is to further relax on the parameter @f$\lambda@f$---the smaller
1423 * it is, the smaller the dissipation factor (which is multiplied by the
1424 * jump in @f$\mathbf{w}@f$, which might result in a smaller or bigger
1425 * dissipation in the end). This allows us to fit the spectrum into the
1426 * stability region of the explicit Runge--Kutta integrator with bigger time
1427 * steps. However, we cannot make dissipation too small because otherwise
1428 * imaginary eigenvalues grow larger. Finally, the current conservative
1429 * formulation is not energy-stable in the limit of @f$\lambda\to 0@f$ as it is
1430 * not skew-symmetric, and would need additional measures such as split-form
1431 * DG schemes in that case.
1432 *
1433
1434 *
1435 * For the HLL flux, we follow the formula from literature, introducing an
1436 * additional weighting of the two states from Lax--Friedrichs by a
1437 * parameter @f$s@f$. It is derived from the physical transport directions of
1438 * the Euler equations in terms of the current direction of velocity and
1439 * sound speed. For the velocity, we here choose a simple arithmetic average
1440 * which is sufficient for DG scenarios and moderate jumps in material
1441 * parameters.
1442 *
1443
1444 *
1445 * Since the numerical flux is multiplied by the normal vector in the weak
1446 * form, we multiply by the result by the normal vector for all terms in the
1447 * equation. In these multiplications, the `operator*` defined above enables
1448 * a compact notation similar to the mathematical definition.
1449 *
1450
1451 *
1452 * In this and the following functions, we use variable suffixes `_m` and
1453 * `_p` to indicate quantities derived from @f$\mathbf{w}^-@f$ and @f$\mathbf{w}^+@f$,
1454 * i.e., values "here" and "there" relative to the current cell when looking
1455 * at a neighbor cell.
1456 *
1457 * @code
1458 *   template <int dim, typename Number>
1459 *   inline DEAL_II_ALWAYS_INLINE
1461 *   euler_numerical_flux(const Tensor<1, dim + 2, Number> &u_m,
1462 *   const Tensor<1, dim + 2, Number> &u_p,
1463 *   const Tensor<1, dim, Number> &normal)
1464 *   {
1465 *   const auto velocity_m = euler_velocity<dim>(u_m);
1466 *   const auto velocity_p = euler_velocity<dim>(u_p);
1467 *  
1468 *   const auto pressure_m = euler_pressure<dim>(u_m);
1469 *   const auto pressure_p = euler_pressure<dim>(u_p);
1470 *  
1471 *   const auto flux_m = euler_flux<dim>(u_m);
1472 *   const auto flux_p = euler_flux<dim>(u_p);
1473 *  
1474 *   switch (numerical_flux_type)
1475 *   {
1476 *   case lax_friedrichs_modified:
1477 *   {
1478 *   const auto lambda =
1479 *   0.5 * std::sqrt(std::max(velocity_p.norm_square() +
1480 *   gamma * pressure_p * (1. / u_p[0]),
1481 *   velocity_m.norm_square() +
1482 *   gamma * pressure_m * (1. / u_m[0])));
1483 *  
1484 *   return 0.5 * (flux_m * normal + flux_p * normal) +
1485 *   0.5 * lambda * (u_m - u_p);
1486 *   }
1487 *  
1488 *   case harten_lax_vanleer:
1489 *   {
1490 *   const auto avg_velocity_normal =
1491 *   0.5 * ((velocity_m + velocity_p) * normal);
1492 *   const auto avg_c = std::sqrt(std::abs(
1493 *   0.5 * gamma *
1494 *   (pressure_p * (1. / u_p[0]) + pressure_m * (1. / u_m[0]))));
1495 *   const Number s_pos =
1496 *   std::max(Number(), avg_velocity_normal + avg_c);
1497 *   const Number s_neg =
1498 *   std::min(Number(), avg_velocity_normal - avg_c);
1499 *   const Number inverse_s = Number(1.) / (s_pos - s_neg);
1500 *  
1501 *   return inverse_s *
1502 *   ((s_pos * (flux_m * normal) - s_neg * (flux_p * normal)) -
1503 *   s_pos * s_neg * (u_m - u_p));
1504 *   }
1505 *  
1506 *   default:
1507 *   {
1509 *   return {};
1510 *   }
1511 *   }
1512 *   }
1513 *  
1514 *  
1515 *  
1516 * @endcode
1517 *
1518 * This and the next function are helper functions to provide compact
1519 * evaluation calls as multiple points get batched together via a
1520 * VectorizedArray argument (see the @ref step_37 "step-37" tutorial for details). This
1521 * function is used for the subsonic outflow boundary conditions where we
1522 * need to set the energy component to a prescribed value. The next one
1523 * requests the solution on all components and is used for inflow boundaries
1524 * where all components of the solution are set.
1525 *
1526 * @code
1527 *   template <int dim, typename Number>
1529 *   evaluate_function(const Function<dim> &function,
1530 *   const Point<dim, VectorizedArray<Number>> &p_vectorized,
1531 *   const unsigned int component)
1532 *   {
1533 *   VectorizedArray<Number> result;
1534 *   for (unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
1535 *   {
1536 *   Point<dim> p;
1537 *   for (unsigned int d = 0; d < dim; ++d)
1538 *   p[d] = p_vectorized[d][v];
1539 *   result[v] = function.value(p, component);
1540 *   }
1541 *   return result;
1542 *   }
1543 *  
1544 *  
1545 *   template <int dim, typename Number, int n_components = dim + 2>
1547 *   evaluate_function(const Function<dim> &function,
1548 *   const Point<dim, VectorizedArray<Number>> &p_vectorized)
1549 *   {
1550 *   AssertDimension(function.n_components, n_components);
1552 *   for (unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
1553 *   {
1554 *   Point<dim> p;
1555 *   for (unsigned int d = 0; d < dim; ++d)
1556 *   p[d] = p_vectorized[d][v];
1557 *   for (unsigned int d = 0; d < n_components; ++d)
1558 *   result[d][v] = function.value(p, d);
1559 *   }
1560 *   return result;
1561 *   }
1562 *  
1563 *  
1564 *  
1565 * @endcode
1566 *
1567 *
1568 * <a name="step_67-TheEulerOperationclass"></a>
1569 * <h3>The EulerOperation class</h3>
1570 *
1571
1572 *
1573 * This class implements the evaluators for the Euler problem, in analogy to
1574 * the `LaplaceOperator` class of @ref step_37 "step-37" or @ref step_59 "step-59". Since the present
1575 * operator is non-linear and does not require a matrix interface (to be
1576 * handed over to preconditioners), we skip the various `vmult` functions
1577 * otherwise present in matrix-free operators and only implement an `apply`
1578 * function as well as the combination of `apply` with the required vector
1579 * updates for the low-storage Runge--Kutta time integrator mentioned above
1580 * (called `perform_stage`). Furthermore, we have added three additional
1581 * functions involving matrix-free routines, namely one to compute an
1582 * estimate of the time step scaling (that is combined with the Courant
1583 * number for the actual time step size) based on the velocity and speed of
1584 * sound in the elements, one for the projection of solutions (specializing
1585 * VectorTools::project() for the DG case), and one to compute the errors
1586 * against a possible analytical solution or norms against some background
1587 * state.
1588 *
1589
1590 *
1591 * The rest of the class is similar to other matrix-free tutorials. As
1592 * discussed in the introduction, we provide a few functions to allow a user
1593 * to pass in various forms of boundary conditions on different parts of the
1594 * domain boundary marked by types::boundary_id variables, as well as
1595 * possible body forces.
1596 *
1597 * @code
1598 *   template <int dim, int degree, int n_points_1d>
1599 *   class EulerOperator
1600 *   {
1601 *   public:
1602 *   static constexpr unsigned int n_quadrature_points_1d = n_points_1d;
1603 *  
1604 *   EulerOperator(TimerOutput &timer_output);
1605 *  
1606 *   void reinit(const Mapping<dim> &mapping,
1607 *   const DoFHandler<dim> &dof_handler);
1608 *  
1609 *   void set_inflow_boundary(const types::boundary_id boundary_id,
1610 *   std::unique_ptr<Function<dim>> inflow_function);
1611 *  
1612 *   void set_subsonic_outflow_boundary(
1613 *   const types::boundary_id boundary_id,
1614 *   std::unique_ptr<Function<dim>> outflow_energy);
1615 *  
1616 *   void set_wall_boundary(const types::boundary_id boundary_id);
1617 *  
1618 *   void set_body_force(std::unique_ptr<Function<dim>> body_force);
1619 *  
1620 *   void apply(const double current_time,
1623 *  
1624 *   void
1625 *   perform_stage(const Number cur_time,
1626 *   const Number factor_solution,
1627 *   const Number factor_ai,
1628 *   const LinearAlgebra::distributed::Vector<Number> &current_ri,
1631 *   LinearAlgebra::distributed::Vector<Number> &next_ri) const;
1632 *  
1633 *   void project(const Function<dim> &function,
1634 *   LinearAlgebra::distributed::Vector<Number> &solution) const;
1635 *  
1636 *   std::array<double, 3> compute_errors(
1637 *   const Function<dim> &function,
1638 *   const LinearAlgebra::distributed::Vector<Number> &solution) const;
1639 *  
1640 *   double compute_cell_transport_speed(
1641 *   const LinearAlgebra::distributed::Vector<Number> &solution) const;
1642 *  
1643 *   void
1644 *   initialize_vector(LinearAlgebra::distributed::Vector<Number> &vector) const;
1645 *  
1646 *   private:
1647 *   MatrixFree<dim, Number> data;
1648 *  
1649 *   TimerOutput &timer;
1650 *  
1651 *   std::map<types::boundary_id, std::unique_ptr<Function<dim>>>
1652 *   inflow_boundaries;
1653 *   std::map<types::boundary_id, std::unique_ptr<Function<dim>>>
1654 *   subsonic_outflow_boundaries;
1655 *   std::set<types::boundary_id> wall_boundaries;
1656 *   std::unique_ptr<Function<dim>> body_force;
1657 *  
1658 *   void local_apply_inverse_mass_matrix(
1659 *   const MatrixFree<dim, Number> &data,
1662 *   const std::pair<unsigned int, unsigned int> &cell_range) const;
1663 *  
1664 *   void local_apply_cell(
1665 *   const MatrixFree<dim, Number> &data,
1668 *   const std::pair<unsigned int, unsigned int> &cell_range) const;
1669 *  
1670 *   void local_apply_face(
1671 *   const MatrixFree<dim, Number> &data,
1674 *   const std::pair<unsigned int, unsigned int> &face_range) const;
1675 *  
1676 *   void local_apply_boundary_face(
1677 *   const MatrixFree<dim, Number> &data,
1680 *   const std::pair<unsigned int, unsigned int> &face_range) const;
1681 *   };
1682 *  
1683 *  
1684 *  
1685 *   template <int dim, int degree, int n_points_1d>
1686 *   EulerOperator<dim, degree, n_points_1d>::EulerOperator(TimerOutput &timer)
1687 *   : timer(timer)
1688 *   {}
1689 *  
1690 *  
1691 *  
1692 * @endcode
1693 *
1694 * For the initialization of the Euler operator, we set up the MatrixFree
1695 * variable contained in the class. This can be done given a mapping to
1696 * describe possible curved boundaries as well as a DoFHandler object
1697 * describing the degrees of freedom. Since we use a discontinuous Galerkin
1698 * discretization in this tutorial program where no constraints are imposed
1699 * strongly on the solution field, we do not need to pass in an
1700 * AffineConstraints object and rather use a dummy for the
1701 * construction. With respect to quadrature, we want to select two different
1702 * ways of computing the underlying integrals: The first is a flexible one,
1703 * based on a template parameter `n_points_1d` (that will be assigned the
1704 * `n_q_points_1d` value specified at the top of this file). More accurate
1705 * integration is necessary to avoid the aliasing problem due to the
1706 * variable coefficients in the Euler operator. The second less accurate
1707 * quadrature formula is a tight one based on `fe_degree+1` and needed for
1708 * the inverse mass matrix. While that formula provides an exact inverse
1709 * only on affine element shapes and not on deformed elements, it enables
1710 * the fast inversion of the mass matrix by tensor product techniques,
1711 * necessary to ensure optimal computational efficiency overall.
1712 *
1713 * @code
1714 *   template <int dim, int degree, int n_points_1d>
1715 *   void EulerOperator<dim, degree, n_points_1d>::reinit(
1716 *   const Mapping<dim> &mapping,
1717 *   const DoFHandler<dim> &dof_handler)
1718 *   {
1719 *   const std::vector<const DoFHandler<dim> *> dof_handlers = {&dof_handler};
1720 *   const AffineConstraints<double> dummy;
1721 *   const std::vector<const AffineConstraints<double> *> constraints = {&dummy};
1722 *   const std::vector<Quadrature<1>> quadratures = {QGauss<1>(n_q_points_1d),
1723 *   QGauss<1>(fe_degree + 1)};
1724 *  
1725 *   typename MatrixFree<dim, Number>::AdditionalData additional_data;
1726 *   additional_data.mapping_update_flags =
1728 *   update_values);
1729 *   additional_data.mapping_update_flags_inner_faces =
1731 *   update_values);
1732 *   additional_data.mapping_update_flags_boundary_faces =
1734 *   update_values);
1735 *   additional_data.tasks_parallel_scheme =
1737 *  
1738 *   data.reinit(
1739 *   mapping, dof_handlers, constraints, quadratures, additional_data);
1740 *   }
1741 *  
1742 *  
1743 *  
1744 *   template <int dim, int degree, int n_points_1d>
1745 *   void EulerOperator<dim, degree, n_points_1d>::initialize_vector(
1747 *   {
1748 *   data.initialize_dof_vector(vector);
1749 *   }
1750 *  
1751 *  
1752 *  
1753 * @endcode
1754 *
1755 * The subsequent four member functions are the ones that must be called from
1756 * outside to specify the various types of boundaries. For an inflow boundary,
1757 * we must specify all components in terms of density @f$\rho@f$, momentum @f$\rho
1758 * \mathbf{u}@f$ and energy @f$E@f$. Given this information, we then store the
1759 * function alongside the respective boundary id in a map member variable of
1760 * this class. Likewise, we proceed for the subsonic outflow boundaries (where
1761 * we request a function as well, which we use to retrieve the energy) and for
1762 * wall (no-penetration) boundaries where we impose zero normal velocity (no
1763 * function necessary, so we only request the boundary id). For the present
1764 * DG code where boundary conditions are solely applied as part of the weak
1765 * form (during time integration), the call to set the boundary conditions
1766 * can appear both before or after the `reinit()` call to this class. This
1767 * is different from continuous finite element codes where the boundary
1768 * conditions determine the content of the AffineConstraints object that is
1769 * sent into MatrixFree for initialization, thus requiring to be set before
1770 * the initialization of the matrix-free data structures.
1771 *
1772
1773 *
1774 * The checks added in each of the four function are used to
1775 * ensure that boundary conditions are mutually exclusive on the various
1776 * parts of the boundary, i.e., that a user does not accidentally designate a
1777 * boundary as both an inflow and say a subsonic outflow boundary.
1778 *
1779 * @code
1780 *   template <int dim, int degree, int n_points_1d>
1781 *   void EulerOperator<dim, degree, n_points_1d>::set_inflow_boundary(
1782 *   const types::boundary_id boundary_id,
1783 *   std::unique_ptr<Function<dim>> inflow_function)
1784 *   {
1785 *   AssertThrow(subsonic_outflow_boundaries.find(boundary_id) ==
1786 *   subsonic_outflow_boundaries.end() &&
1787 *   wall_boundaries.find(boundary_id) == wall_boundaries.end(),
1788 *   ExcMessage("You already set the boundary with id " +
1789 *   std::to_string(static_cast<int>(boundary_id)) +
1790 *   " to another type of boundary before now setting " +
1791 *   "it as inflow"));
1792 *   AssertThrow(inflow_function->n_components == dim + 2,
1793 *   ExcMessage("Expected function with dim+2 components"));
1794 *  
1795 *   inflow_boundaries[boundary_id] = std::move(inflow_function);
1796 *   }
1797 *  
1798 *  
1799 *   template <int dim, int degree, int n_points_1d>
1800 *   void EulerOperator<dim, degree, n_points_1d>::set_subsonic_outflow_boundary(
1801 *   const types::boundary_id boundary_id,
1802 *   std::unique_ptr<Function<dim>> outflow_function)
1803 *   {
1804 *   AssertThrow(inflow_boundaries.find(boundary_id) ==
1805 *   inflow_boundaries.end() &&
1806 *   wall_boundaries.find(boundary_id) == wall_boundaries.end(),
1807 *   ExcMessage("You already set the boundary with id " +
1808 *   std::to_string(static_cast<int>(boundary_id)) +
1809 *   " to another type of boundary before now setting " +
1810 *   "it as subsonic outflow"));
1811 *   AssertThrow(outflow_function->n_components == dim + 2,
1812 *   ExcMessage("Expected function with dim+2 components"));
1813 *  
1814 *   subsonic_outflow_boundaries[boundary_id] = std::move(outflow_function);
1815 *   }
1816 *  
1817 *  
1818 *   template <int dim, int degree, int n_points_1d>
1819 *   void EulerOperator<dim, degree, n_points_1d>::set_wall_boundary(
1820 *   const types::boundary_id boundary_id)
1821 *   {
1822 *   AssertThrow(inflow_boundaries.find(boundary_id) ==
1823 *   inflow_boundaries.end() &&
1824 *   subsonic_outflow_boundaries.find(boundary_id) ==
1825 *   subsonic_outflow_boundaries.end(),
1826 *   ExcMessage("You already set the boundary with id " +
1827 *   std::to_string(static_cast<int>(boundary_id)) +
1828 *   " to another type of boundary before now setting " +
1829 *   "it as wall boundary"));
1830 *  
1831 *   wall_boundaries.insert(boundary_id);
1832 *   }
1833 *  
1834 *  
1835 *   template <int dim, int degree, int n_points_1d>
1836 *   void EulerOperator<dim, degree, n_points_1d>::set_body_force(
1837 *   std::unique_ptr<Function<dim>> body_force)
1838 *   {
1839 *   AssertDimension(body_force->n_components, dim);
1840 *  
1841 *   this->body_force = std::move(body_force);
1842 *   }
1843 *  
1844 *  
1845 *  
1846 * @endcode
1847 *
1848 *
1849 * <a name="step_67-Localevaluators"></a>
1850 * <h4>Local evaluators</h4>
1851 *
1852
1853 *
1854 * Now we proceed to the local evaluators for the Euler problem. The
1855 * evaluators are relatively simple and follow what has been presented in
1856 * @ref step_37 "step-37", @ref step_48 "step-48", or @ref step_59 "step-59". The first notable difference is the fact
1857 * that we use an FEEvaluation with a non-standard number of quadrature
1858 * points. Whereas we previously always set the number of quadrature points
1859 * to equal the polynomial degree plus one (ensuring exact integration on
1860 * affine element shapes), we now set the number quadrature points as a
1861 * separate variable (e.g. the polynomial degree plus two or three halves of
1862 * the polynomial degree) to more accurately handle nonlinear terms. Since
1863 * the evaluator is fed with the appropriate loop lengths via the template
1864 * argument and keeps the number of quadrature points in the whole cell in
1865 * the variable FEEvaluation::n_q_points, we now automatically operate on
1866 * the more accurate formula without further changes.
1867 *
1868
1869 *
1870 * The second difference is due to the fact that we are now evaluating a
1871 * multi-component system, as opposed to the scalar systems considered
1872 * previously. The matrix-free framework provides several ways to handle the
1873 * multi-component case. The variant shown here utilizes an FEEvaluation
1874 * object with multiple components embedded into it, specified by the fourth
1875 * template argument `dim + 2` for the components in the Euler system. As a
1876 * consequence, the return type of FEEvaluation::get_value() is not a scalar
1877 * any more (that would return a VectorizedArray type, collecting data from
1878 * several elements), but a Tensor of `dim+2` components. The functionality
1879 * is otherwise similar to the scalar case; it is handled by a template
1880 * specialization of a base class, called FEEvaluationAccess. An alternative
1881 * variant would have been to use several FEEvaluation objects, a scalar one
1882 * for the density, a vector-valued one with `dim` components for the
1883 * momentum, and another scalar evaluator for the energy. To ensure that
1884 * those components point to the correct part of the solution, the
1885 * constructor of FEEvaluation takes three optional integer arguments after
1886 * the required MatrixFree field, namely the number of the DoFHandler for
1887 * multi-DoFHandler systems (taking the first by default), the number of the
1888 * quadrature point in case there are multiple Quadrature objects (see more
1889 * below), and as a third argument the component within a vector system. As
1890 * we have a single vector for all components, we would go with the third
1891 * argument, and set it to `0` for the density, `1` for the vector-valued
1892 * momentum, and `dim+1` for the energy slot. FEEvaluation then picks the
1893 * appropriate subrange of the solution vector during
1894 * FEEvaluationBase::read_dof_values() and
1895 * FEEvaluation::distributed_local_to_global() or the more compact
1896 * FEEvaluation::gather_evaluate() and FEEvaluation::integrate_scatter()
1897 * calls.
1898 *
1899
1900 *
1901 * When it comes to the evaluation of the body force vector, we distinguish
1902 * between two cases for efficiency reasons: In case we have a constant
1903 * function (derived from Functions::ConstantFunction), we can precompute
1904 * the value outside the loop over quadrature points and simply use the
1905 * value everywhere. For a more general function, we instead need to call
1906 * the `evaluate_function()` method we provided above; this path is more
1907 * expensive because we need to access the memory associated with the
1908 * quadrature point data.
1909 *
1910
1911 *
1912 * The rest follows the other tutorial programs. Since we have implemented
1913 * all physics for the Euler equations in the separate `euler_flux()`
1914 * function, all we have to do here is to call this function
1915 * given the current solution evaluated at quadrature points, returned by
1916 * `phi.get_value(q)`, and tell the FEEvaluation object to queue the flux
1917 * for testing it by the gradients of the shape functions (which is a Tensor
1918 * of outer `dim+2` components, each holding a tensor of `dim` components
1919 * for the @f$x,y,z@f$ component of the Euler flux). One final thing worth
1920 * mentioning is the order in which we queue the data for testing by the
1921 * value of the test function, `phi.submit_value()`, in case we are given an
1922 * external function: We must do this after calling `phi.get_value(q)`,
1923 * because `get_value()` (reading the solution) and `submit_value()`
1924 * (queuing the value for multiplication by the test function and summation
1925 * over quadrature points) access the same underlying data field. Here it
1926 * would be easy to achieve also without temporary variable `w_q` since
1927 * there is no mixing between values and gradients. For more complicated
1928 * setups, one has to first copy out e.g. both the value and gradient at a
1929 * quadrature point and then queue results again by
1930 * FEEvaluationBase::submit_value() and FEEvaluationBase::submit_gradient().
1931 *
1932
1933 *
1934 * As a final note, we mention that we do not use the first MatrixFree
1935 * argument of this function, which is a call-back from MatrixFree::loop().
1936 * The interfaces imposes the present list of arguments, but since we are in
1937 * a member function where the MatrixFree object is already available as the
1938 * `data` variable, we stick with that to avoid confusion.
1939 *
1940 * @code
1941 *   template <int dim, int degree, int n_points_1d>
1942 *   void EulerOperator<dim, degree, n_points_1d>::local_apply_cell(
1943 *   const MatrixFree<dim, Number> &,
1944 *   LinearAlgebra::distributed::Vector<Number> &dst,
1945 *   const LinearAlgebra::distributed::Vector<Number> &src,
1946 *   const std::pair<unsigned int, unsigned int> &cell_range) const
1947 *   {
1949 *  
1950 *   Tensor<1, dim, VectorizedArray<Number>> constant_body_force;
1951 *   const Functions::ConstantFunction<dim> *constant_function =
1952 *   dynamic_cast<Functions::ConstantFunction<dim> *>(body_force.get());
1953 *  
1954 *   if (constant_function)
1955 *   constant_body_force = evaluate_function<dim, Number, dim>(
1956 *   *constant_function, Point<dim, VectorizedArray<Number>>());
1957 *  
1958 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
1959 *   {
1960 *   phi.reinit(cell);
1961 *   phi.gather_evaluate(src, EvaluationFlags::values);
1962 *  
1963 *   for (const unsigned int q : phi.quadrature_point_indices())
1964 *   {
1965 *   const auto w_q = phi.get_value(q);
1966 *   phi.submit_gradient(euler_flux<dim>(w_q), q);
1967 *   if (body_force.get() != nullptr)
1968 *   {
1969 *   const Tensor<1, dim, VectorizedArray<Number>> force =
1970 *   constant_function ? constant_body_force :
1971 *   evaluate_function<dim, Number, dim>(
1972 *   *body_force, phi.quadrature_point(q));
1973 *  
1975 *   for (unsigned int d = 0; d < dim; ++d)
1976 *   forcing[d + 1] = w_q[0] * force[d];
1977 *   for (unsigned int d = 0; d < dim; ++d)
1978 *   forcing[dim + 1] += force[d] * w_q[d + 1];
1979 *  
1980 *   phi.submit_value(forcing, q);
1981 *   }
1982 *   }
1983 *  
1984 *   phi.integrate_scatter(((body_force.get() != nullptr) ?
1986 *   EvaluationFlags::nothing) |
1988 *   dst);
1989 *   }
1990 *   }
1991 *  
1992 *  
1993 *  
1994 * @endcode
1995 *
1996 * The next function concerns the computation of integrals on interior
1997 * faces, where we need evaluators from both cells adjacent to the face. We
1998 * associate the variable `phi_m` with the solution component @f$\mathbf{w}^-@f$
1999 * and the variable `phi_p` with the solution component @f$\mathbf{w}^+@f$. We
2000 * distinguish the two sides in the constructor of FEFaceEvaluation by the
2001 * second argument, with `true` for the interior side and `false` for the
2002 * exterior side, with interior and exterior denoting the orientation with
2003 * respect to the normal vector.
2004 *
2005
2006 *
2007 * Note that the calls FEFaceEvaluation::gather_evaluate() and
2008 * FEFaceEvaluation::integrate_scatter() combine the access to the vectors
2009 * and the sum factorization parts. This combined operation not only saves a
2010 * line of code, but also contains an important optimization: Given that we
2011 * use a nodal basis in terms of the Lagrange polynomials in the points of
2012 * the Gauss-Lobatto quadrature formula, only @f$(p+1)^{d-1}@f$ out of the
2013 * @f$(p+1)^d@f$ basis functions evaluate to non-zero on each face. Thus, the
2014 * evaluator only accesses the necessary data in the vector and skips the
2015 * parts which are multiplied by zero. If we had first read the vector, we
2016 * would have needed to load all data from the vector, as the call in
2017 * isolation would not know what data is required in subsequent
2018 * operations. If the subsequent FEFaceEvaluation::evaluate() call requests
2019 * values and derivatives, indeed all @f$(p+1)^d@f$ vector entries for each
2020 * component are needed, as the normal derivative is nonzero for all basis
2021 * functions.
2022 *
2023
2024 *
2025 * The arguments to the evaluators as well as the procedure is similar to
2026 * the cell evaluation. We again use the more accurate (over-)integration
2027 * scheme due to the nonlinear terms, specified as the third template
2028 * argument in the list. At the quadrature points, we then go to our
2029 * free-standing function for the numerical flux. It receives the solution
2030 * evaluated at quadrature points from both sides (i.e., @f$\mathbf{w}^-@f$ and
2031 * @f$\mathbf{w}^+@f$), as well as the normal vector onto the minus side. As
2032 * explained above, the numerical flux is already multiplied by the normal
2033 * vector from the minus side. We need to switch the sign because the
2034 * boundary term comes with a minus sign in the weak form derived in the
2035 * introduction. The flux is then queued for testing both on the minus sign
2036 * and on the plus sign, with switched sign as the normal vector from the
2037 * plus side is exactly opposed to the one from the minus side.
2038 *
2039 * @code
2040 *   template <int dim, int degree, int n_points_1d>
2041 *   void EulerOperator<dim, degree, n_points_1d>::local_apply_face(
2042 *   const MatrixFree<dim, Number> &,
2045 *   const std::pair<unsigned int, unsigned int> &face_range) const
2046 *   {
2048 *   true);
2050 *   false);
2051 *  
2052 *   for (unsigned int face = face_range.first; face < face_range.second; ++face)
2053 *   {
2054 *   phi_p.reinit(face);
2055 *   phi_p.gather_evaluate(src, EvaluationFlags::values);
2056 *  
2057 *   phi_m.reinit(face);
2058 *   phi_m.gather_evaluate(src, EvaluationFlags::values);
2059 *  
2060 *   for (const unsigned int q : phi_m.quadrature_point_indices())
2061 *   {
2062 *   const auto numerical_flux =
2063 *   euler_numerical_flux<dim>(phi_m.get_value(q),
2064 *   phi_p.get_value(q),
2065 *   phi_m.normal_vector(q));
2066 *   phi_m.submit_value(-numerical_flux, q);
2067 *   phi_p.submit_value(numerical_flux, q);
2068 *   }
2069 *  
2070 *   phi_p.integrate_scatter(EvaluationFlags::values, dst);
2071 *   phi_m.integrate_scatter(EvaluationFlags::values, dst);
2072 *   }
2073 *   }
2074 *  
2075 *  
2076 *  
2077 * @endcode
2078 *
2079 * For faces located at the boundary, we need to impose the appropriate
2080 * boundary conditions. In this tutorial program, we implement four cases as
2081 * mentioned above. (A fifth case, for supersonic outflow conditions is
2082 * discussed in the "Results" section below.) The discontinuous Galerkin
2083 * method imposes boundary conditions not as constraints, but only
2084 * weakly. Thus, the various conditions are imposed by finding an appropriate
2085 * <i>exterior</i> quantity @f$\mathbf{w}^+@f$ that is then handed to the
2086 * numerical flux function also used for the interior faces. In essence,
2087 * we "pretend" a state on the outside of the domain in such a way that
2088 * if that were reality, the solution of the PDE would satisfy the boundary
2089 * conditions we want.
2090 *
2091
2092 *
2093 * For wall boundaries, we need to impose a no-normal-flux condition on the
2094 * momentum variable, whereas we use a Neumann condition for the density and
2095 * energy with @f$\rho^+ = \rho^-@f$ and @f$E^+ = E^-@f$. To achieve the no-normal
2096 * flux condition, we set the exterior values to the interior values and
2097 * subtract two times the velocity in wall-normal direction, i.e., in the
2098 * direction of the normal vector.
2099 *
2100
2101 *
2102 * For inflow boundaries, we simply set the given Dirichlet data
2103 * @f$\mathbf{w}_\mathrm{D}@f$ as a boundary value. An alternative would have been
2104 * to use @f$\mathbf{w}^+ = -\mathbf{w}^- + 2 \mathbf{w}_\mathrm{D}@f$, the
2105 * so-called mirror principle.
2106 *
2107
2108 *
2109 * The imposition of outflow is essentially a Neumann condition, i.e.,
2110 * setting @f$\mathbf{w}^+ = \mathbf{w}^-@f$. For the case of subsonic outflow,
2111 * we still need to impose a value for the energy, which we derive from the
2112 * respective function. A special step is needed for the case of
2113 * <i>backflow</i>, i.e., the case where there is a momentum flux into the
2114 * domain on the Neumann portion. According to the literature (a fact that can
2115 * be derived by appropriate energy arguments), we must switch to another
2116 * variant of the flux on inflow parts, see Gravemeier, Comerford,
2117 * Yoshihara, Ismail, Wall, "A novel formulation for Neumann inflow
2118 * conditions in biomechanics", Int. J. Numer. Meth. Biomed. Eng., vol. 28
2119 * (2012). Here, the momentum term needs to be added once again, which
2120 * corresponds to removing the flux contribution on the momentum
2121 * variables. We do this in a post-processing step, and only for the case
2122 * when we both are at an outflow boundary and the dot product between the
2123 * normal vector and the momentum (or, equivalently, velocity) is
2124 * negative. As we work on data of several quadrature points at once for
2125 * SIMD vectorizations, we here need to explicitly loop over the array
2126 * entries of the SIMD array.
2127 *
2128
2129 *
2130 * In the implementation below, we check for the various types
2131 * of boundaries at the level of quadrature points. Of course, we could also
2132 * have moved the decision out of the quadrature point loop and treat entire
2133 * faces as of the same kind, which avoids some map/set lookups in the inner
2134 * loop over quadrature points. However, the loss of efficiency is hardly
2135 * noticeable, so we opt for the simpler code here. Also note that the final
2136 * `else` clause will catch the case when some part of the boundary was not
2137 * assigned any boundary condition via `EulerOperator::set_..._boundary(...)`.
2138 *
2139 * @code
2140 *   template <int dim, int degree, int n_points_1d>
2141 *   void EulerOperator<dim, degree, n_points_1d>::local_apply_boundary_face(
2142 *   const MatrixFree<dim, Number> &,
2145 *   const std::pair<unsigned int, unsigned int> &face_range) const
2146 *   {
2148 *  
2149 *   for (unsigned int face = face_range.first; face < face_range.second; ++face)
2150 *   {
2151 *   phi.reinit(face);
2152 *   phi.gather_evaluate(src, EvaluationFlags::values);
2153 *  
2154 *   for (const unsigned int q : phi.quadrature_point_indices())
2155 *   {
2156 *   const auto w_m = phi.get_value(q);
2157 *   const auto normal = phi.normal_vector(q);
2158 *  
2159 *   auto rho_u_dot_n = w_m[1] * normal[0];
2160 *   for (unsigned int d = 1; d < dim; ++d)
2161 *   rho_u_dot_n += w_m[1 + d] * normal[d];
2162 *  
2163 *   bool at_outflow = false;
2164 *  
2166 *   const auto boundary_id = data.get_boundary_id(face);
2167 *   if (wall_boundaries.find(boundary_id) != wall_boundaries.end())
2168 *   {
2169 *   w_p[0] = w_m[0];
2170 *   for (unsigned int d = 0; d < dim; ++d)
2171 *   w_p[d + 1] = w_m[d + 1] - 2. * rho_u_dot_n * normal[d];
2172 *   w_p[dim + 1] = w_m[dim + 1];
2173 *   }
2174 *   else if (inflow_boundaries.find(boundary_id) !=
2175 *   inflow_boundaries.end())
2176 *   w_p =
2177 *   evaluate_function(*inflow_boundaries.find(boundary_id)->second,
2178 *   phi.quadrature_point(q));
2179 *   else if (subsonic_outflow_boundaries.find(boundary_id) !=
2180 *   subsonic_outflow_boundaries.end())
2181 *   {
2182 *   w_p = w_m;
2183 *   w_p[dim + 1] = evaluate_function(
2184 *   *subsonic_outflow_boundaries.find(boundary_id)->second,
2185 *   phi.quadrature_point(q),
2186 *   dim + 1);
2187 *   at_outflow = true;
2188 *   }
2189 *   else
2190 *   AssertThrow(false,
2191 *   ExcMessage("Unknown boundary id, did "
2192 *   "you set a boundary condition for "
2193 *   "this part of the domain boundary?"));
2194 *  
2195 *   auto flux = euler_numerical_flux<dim>(w_m, w_p, normal);
2196 *  
2197 *   if (at_outflow)
2198 *   for (unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
2199 *   {
2200 *   if (rho_u_dot_n[v] < -1e-12)
2201 *   for (unsigned int d = 0; d < dim; ++d)
2202 *   flux[d + 1][v] = 0.;
2203 *   }
2204 *  
2205 *   phi.submit_value(-flux, q);
2206 *   }
2207 *  
2208 *   phi.integrate_scatter(EvaluationFlags::values, dst);
2209 *   }
2210 *   }
2211 *  
2212 *  
2213 *  
2214 * @endcode
2215 *
2216 * The next function implements the inverse mass matrix operation. The
2217 * algorithms and rationale have been discussed extensively in the
2218 * introduction, so we here limit ourselves to the technicalities of the
2219 * MatrixFreeOperators::CellwiseInverseMassMatrix class. It does similar
2220 * operations as the forward evaluation of the mass matrix, except with a
2221 * different interpolation matrix, representing the inverse @f$S^{-1}@f$
2222 * factors. These represent a change of basis from the specified basis (in
2223 * this case, the Lagrange basis in the points of the Gauss--Lobatto
2224 * quadrature formula) to the Lagrange basis in the points of the Gauss
2225 * quadrature formula. In the latter basis, we can apply the inverse of the
2226 * point-wise `JxW` factor, i.e., the quadrature weight times the
2227 * determinant of the Jacobian of the mapping from reference to real
2228 * coordinates. Once this is done, the basis is changed back to the nodal
2229 * Gauss-Lobatto basis again. All of these operations are done by the
2230 * `apply()` function below. What we need to provide is the local fields to
2231 * operate on (which we extract from the global vector by an FEEvaluation
2232 * object) and write the results back to the destination vector of the mass
2233 * matrix operation.
2234 *
2235
2236 *
2237 * One thing to note is that we added two integer arguments (that are
2238 * optional) to the constructor of FEEvaluation, the first being 0
2239 * (selecting among the DoFHandler in multi-DoFHandler systems; here, we
2240 * only have one) and the second being 1 to make the quadrature formula
2241 * selection. As we use the quadrature formula 0 for the over-integration of
2242 * nonlinear terms, we use the formula 1 with the default @f$p+1@f$ (or
2243 * `fe_degree+1` in terms of the variable name) points for the mass
2244 * matrix. This leads to square contributions to the mass matrix and ensures
2245 * exact integration, as explained in the introduction.
2246 *
2247 * @code
2248 *   template <int dim, int degree, int n_points_1d>
2249 *   void EulerOperator<dim, degree, n_points_1d>::local_apply_inverse_mass_matrix(
2250 *   const MatrixFree<dim, Number> &,
2253 *   const std::pair<unsigned int, unsigned int> &cell_range) const
2254 *   {
2257 *   inverse(phi);
2258 *  
2259 *   for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
2260 *   {
2261 *   phi.reinit(cell);
2262 *   phi.read_dof_values(src);
2263 *  
2264 *   inverse.apply(phi.begin_dof_values(), phi.begin_dof_values());
2265 *  
2266 *   phi.set_dof_values(dst);
2267 *   }
2268 *   }
2269 *  
2270 *  
2271 *  
2272 * @endcode
2273 *
2274 *
2275 * <a name="step_67-Theapplyandrelatedfunctions"></a>
2276 * <h4>The apply() and related functions</h4>
2277 *
2278
2279 *
2280 * We now come to the function which implements the evaluation of the Euler
2281 * operator as a whole, i.e., @f$\mathcal M^{-1} \mathcal L(t, \mathbf{w})@f$,
2282 * calling into the local evaluators presented above. The steps should be
2283 * clear from the previous code. One thing to note is that we need to adjust
2284 * the time in the functions we have associated with the various parts of
2285 * the boundary, in order to be consistent with the equation in case the
2286 * boundary data is time-dependent. Then, we call MatrixFree::loop() to
2287 * perform the cell and face integrals, including the necessary ghost data
2288 * exchange in the `src` vector. The seventh argument to the function,
2289 * `true`, specifies that we want to zero the `dst` vector as part of the
2290 * loop, before we start accumulating integrals into it. This variant is
2291 * preferred over explicitly calling `dst = 0.;` before the loop as the
2292 * zeroing operation is done on a subrange of the vector in parts that are
2293 * written by the integrals nearby. This enhances data locality and allows
2294 * for caching, saving one roundtrip of vector data to main memory and
2295 * enhancing performance. The last two arguments to the loop determine which
2296 * data is exchanged: Since we only access the values of the shape functions
2297 * one faces, typical of first-order hyperbolic problems, and since we have
2298 * a nodal basis with nodes at the reference element surface, we only need
2299 * to exchange those parts. This again saves precious memory bandwidth.
2300 *
2301
2302 *
2303 * Once the spatial operator @f$\mathcal L@f$ is applied, we need to make a
2304 * second round and apply the inverse mass matrix. Here, we call
2305 * MatrixFree::cell_loop() since only cell integrals appear. The cell loop
2306 * is cheaper than the full loop as access only goes to the degrees of
2307 * freedom associated with the locally owned cells, which is simply the
2308 * locally owned degrees of freedom for DG discretizations. Thus, no ghost
2309 * exchange is needed here.
2310 *
2311
2312 *
2313 * Around all these functions, we put timer scopes to record the
2314 * computational time for statistics about the contributions of the various
2315 * parts.
2316 *
2317 * @code
2318 *   template <int dim, int degree, int n_points_1d>
2319 *   void EulerOperator<dim, degree, n_points_1d>::apply(
2320 *   const double current_time,
2321 *   const LinearAlgebra::distributed::Vector<Number> &src,
2322 *   LinearAlgebra::distributed::Vector<Number> &dst) const
2323 *   {
2324 *   {
2325 *   TimerOutput::Scope t(timer, "apply - integrals");
2326 *  
2327 *   for (auto &i : inflow_boundaries)
2328 *   i.second->set_time(current_time);
2329 *   for (auto &i : subsonic_outflow_boundaries)
2330 *   i.second->set_time(current_time);
2331 *  
2332 *   data.loop(&EulerOperator::local_apply_cell,
2333 *   &EulerOperator::local_apply_face,
2334 *   &EulerOperator::local_apply_boundary_face,
2335 *   this,
2336 *   dst,
2337 *   src,
2338 *   true,
2341 *   }
2342 *  
2343 *   {
2344 *   TimerOutput::Scope t(timer, "apply - inverse mass");
2345 *  
2346 *   data.cell_loop(&EulerOperator::local_apply_inverse_mass_matrix,
2347 *   this,
2348 *   dst,
2349 *   dst);
2350 *   }
2351 *   }
2352 *  
2353 *  
2354 *  
2355 * @endcode
2356 *
2357 * Let us move to the function that does an entire stage of a Runge--Kutta
2358 * update. It calls EulerOperator::apply() followed by some updates
2359 * to the vectors, namely `next_ri = solution + factor_ai * k_i` and
2360 * `solution += factor_solution * k_i`. Rather than performing these
2361 * steps through the vector interfaces, we here present an alternative
2362 * strategy that is faster on cache-based architectures. As the memory
2363 * consumed by the vectors is often much larger than what fits into caches,
2364 * the data has to effectively come from the slow RAM memory. The situation
2365 * can be improved by loop fusion, i.e., performing both the updates to
2366 * `next_ki` and `solution` within a single sweep. In that case, we would
2367 * read the two vectors `rhs` and `solution` and write into `next_ki` and
2368 * `solution`, compared to at least 4 reads and two writes in the baseline
2369 * case. Here, we go one step further and perform the loop immediately when
2370 * the mass matrix inversion has finished on a part of the
2371 * vector. MatrixFree::cell_loop() provides a mechanism to attach an
2372 * `std::function` both before the loop over cells first touches a vector
2373 * entry (which we do not use here, but is e.g. used for zeroing the vector)
2374 * and a second `std::function` to be called after the loop last touches
2375 * an entry. The callback is in form of a range over the given vector (in
2376 * terms of the local index numbering in the MPI universe) that can be
2377 * addressed by `local_element()` functions.
2378 *
2379
2380 *
2381 * For this second callback, we create a lambda that works on a range and
2382 * write the respective update on this range. Ideally, we would add the
2383 * `DEAL_II_OPENMP_SIMD_PRAGMA` before the local loop to suggest to the
2384 * compiler to SIMD parallelize this loop (which means in practice that we
2385 * ensure that there is no overlap, also called aliasing, between the index
2386 * ranges of the pointers we use inside the loops). It turns out that at the
2387 * time of this writing, GCC 7.2 fails to compile an OpenMP pragma inside a
2388 * lambda function, so we comment this pragma out below. If your compiler is
2389 * newer, you should be able to uncomment these lines again.
2390 *
2391
2392 *
2393 * Note that we select a different code path for the last
2394 * Runge--Kutta stage when we do not need to update the `next_ri`
2395 * vector. This strategy gives a considerable speedup. Whereas the inverse
2396 * mass matrix and vector updates take more than 60% of the computational
2397 * time with default vector updates on a 40-core machine, the percentage is
2398 * around 35% with the more optimized variant. In other words, this is a
2399 * speedup of around a third.
2400 *
2401 * @code
2402 *   template <int dim, int degree, int n_points_1d>
2403 *   void EulerOperator<dim, degree, n_points_1d>::perform_stage(
2404 *   const Number current_time,
2405 *   const Number factor_solution,
2406 *   const Number factor_ai,
2407 *   const LinearAlgebra::distributed::Vector<Number> &current_ri,
2408 *   LinearAlgebra::distributed::Vector<Number> &vec_ki,
2409 *   LinearAlgebra::distributed::Vector<Number> &solution,
2410 *   LinearAlgebra::distributed::Vector<Number> &next_ri) const
2411 *   {
2412 *   {
2413 *   TimerOutput::Scope t(timer, "rk_stage - integrals L_h");
2414 *  
2415 *   for (auto &i : inflow_boundaries)
2416 *   i.second->set_time(current_time);
2417 *   for (auto &i : subsonic_outflow_boundaries)
2418 *   i.second->set_time(current_time);
2419 *  
2420 *   data.loop(&EulerOperator::local_apply_cell,
2421 *   &EulerOperator::local_apply_face,
2422 *   &EulerOperator::local_apply_boundary_face,
2423 *   this,
2424 *   vec_ki,
2425 *   current_ri,
2426 *   true,
2429 *   }
2430 *  
2431 *  
2432 *   {
2433 *   TimerOutput::Scope t(timer, "rk_stage - inv mass + vec upd");
2434 *   data.cell_loop(
2435 *   &EulerOperator::local_apply_inverse_mass_matrix,
2436 *   this,
2437 *   next_ri,
2438 *   vec_ki,
2439 *   std::function<void(const unsigned int, const unsigned int)>(),
2440 *   [&](const unsigned int start_range, const unsigned int end_range) {
2441 *   const Number ai = factor_ai;
2442 *   const Number bi = factor_solution;
2443 *   if (ai == Number())
2444 *   {
2445 *   /* DEAL_II_OPENMP_SIMD_PRAGMA */
2446 *   for (unsigned int i = start_range; i < end_range; ++i)
2447 *   {
2448 *   const Number k_i = next_ri.local_element(i);
2449 *   const Number sol_i = solution.local_element(i);
2450 *   solution.local_element(i) = sol_i + bi * k_i;
2451 *   }
2452 *   }
2453 *   else
2454 *   {
2455 *   /* DEAL_II_OPENMP_SIMD_PRAGMA */
2456 *   for (unsigned int i = start_range; i < end_range; ++i)
2457 *   {
2458 *   const Number k_i = next_ri.local_element(i);
2459 *   const Number sol_i = solution.local_element(i);
2460 *   solution.local_element(i) = sol_i + bi * k_i;
2461 *   next_ri.local_element(i) = sol_i + ai * k_i;
2462 *   }
2463 *   }
2464 *   });
2465 *   }
2466 *   }
2467 *  
2468 *  
2469 *  
2470 * @endcode
2471 *
2472 * Having discussed the implementation of the functions that deal with
2473 * advancing the solution by one time step, let us now move to functions
2474 * that implement other, ancillary operations. Specifically, these are
2475 * functions that compute projections, evaluate errors, and compute the speed
2476 * of information transport on a cell.
2477 *
2478
2479 *
2480 * The first of these functions is essentially equivalent to
2481 * VectorTools::project(), just much faster because it is specialized for DG
2482 * elements where there is no need to set up and solve a linear system, as
2483 * each element has independent basis functions. The reason why we show the
2484 * code here, besides a small speedup of this non-critical operation, is that
2485 * it shows additional functionality provided by
2487 *
2488
2489 *
2490 * The projection operation works as follows: If we denote the matrix of
2491 * shape functions evaluated at quadrature points by @f$S@f$, the projection on
2492 * cell @f$K@f$ is an operation of the form @f$\underbrace{S J^K S^\mathrm
2493 * T}_{\mathcal M^K} \mathbf{w}^K = S J^K
2494 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q}@f$, where @f$J^K@f$ is the diagonal
2495 * matrix containing the determinant of the Jacobian times the quadrature
2496 * weight (JxW), @f$\mathcal M^K@f$ is the cell-wise mass matrix, and
2497 * @f$\tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q}@f$ is the evaluation of the
2498 * field to be projected onto quadrature points. (In reality the matrix @f$S@f$
2499 * has additional structure through the tensor product, as explained in the
2500 * introduction.) This system can now equivalently be written as
2501 * @f$\mathbf{w}^K = \left(S J^K S^\mathrm T\right)^{-1} S J^K
2502 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q} = S^{-\mathrm T}
2503 * \left(J^K\right)^{-1} S^{-1} S J^K
2504 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q}@f$. Now, the term @f$S^{-1} S@f$ and
2505 * then @f$\left(J^K\right)^{-1} J^K@f$ cancel, resulting in the final
2506 * expression @f$\mathbf{w}^K = S^{-\mathrm T}
2507 * \tilde{\mathbf{w}}(\mathbf{x}_q)_{q=1:n_q}@f$. This operation is
2508 * implemented by
2510 * The name is derived from the fact that this projection is simply
2511 * the multiplication by @f$S^{-\mathrm T}@f$, a basis change from the
2512 * nodal basis in the points of the Gaussian quadrature to the given finite
2513 * element basis. Note that we call FEEvaluation::set_dof_values() to write
2514 * the result into the vector, overwriting previous content, rather than
2515 * accumulating the results as typical in integration tasks -- we can do
2516 * this because every vector entry has contributions from only a single
2517 * cell for discontinuous Galerkin discretizations.
2518 *
2519 * @code
2520 *   template <int dim, int degree, int n_points_1d>
2521 *   void EulerOperator<dim, degree, n_points_1d>::project(
2522 *   const Function<dim> &function,
2523 *   LinearAlgebra::distributed::Vector<Number> &solution) const
2524 *   {
2527 *   inverse(phi);
2528 *   solution.zero_out_ghost_values();
2529 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2530 *   {
2531 *   phi.reinit(cell);
2532 *   for (const unsigned int q : phi.quadrature_point_indices())
2533 *   phi.submit_dof_value(evaluate_function(function,
2534 *   phi.quadrature_point(q)),
2535 *   q);
2536 *   inverse.transform_from_q_points_to_basis(dim + 2,
2537 *   phi.begin_dof_values(),
2538 *   phi.begin_dof_values());
2539 *   phi.set_dof_values(solution);
2540 *   }
2541 *   }
2542 *  
2543 *  
2544 *  
2545 * @endcode
2546 *
2547 * The next function again repeats functionality also provided by the
2548 * deal.II library, namely VectorTools::integrate_difference(). We here show
2549 * the explicit code to highlight how the vectorization across several cells
2550 * works and how to accumulate results via that interface: Recall that each
2551 * <i>lane</i> of the vectorized array holds data from a different cell. By
2552 * the loop over all cell batches that are owned by the current MPI process,
2553 * we could then fill a VectorizedArray of results; to obtain a global sum,
2554 * we would need to further go on and sum across the entries in the SIMD
2555 * array. However, such a procedure is not stable as the SIMD array could in
2556 * fact not hold valid data for all its lanes. This happens when the number
2557 * of locally owned cells is not a multiple of the SIMD width. To avoid
2558 * invalid data, we must explicitly skip those invalid lanes when accessing
2559 * the data. While one could imagine that we could make it work by simply
2560 * setting the empty lanes to zero (and thus, not contribute to a sum), the
2561 * situation is more complicated than that: What if we were to compute a
2562 * velocity out of the momentum? Then, we would need to divide by the
2563 * density, which is zero -- the result would consequently be NaN and
2564 * contaminate the result. This trap is avoided by accumulating the results
2565 * from the valid SIMD range as we loop through the cell batches, using the
2566 * function MatrixFree::n_active_entries_per_cell_batch() to give us the
2567 * number of lanes with valid data. It equals VectorizedArray::size() on
2568 * most cells, but can be less on the last cell batch if the number of cells
2569 * has a remainder compared to the SIMD width.
2570 *
2571 * @code
2572 *   template <int dim, int degree, int n_points_1d>
2573 *   std::array<double, 3> EulerOperator<dim, degree, n_points_1d>::compute_errors(
2574 *   const Function<dim> &function,
2575 *   const LinearAlgebra::distributed::Vector<Number> &solution) const
2576 *   {
2577 *   TimerOutput::Scope t(timer, "compute errors");
2578 *   double errors_squared[3] = {};
2580 *  
2581 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2582 *   {
2583 *   phi.reinit(cell);
2584 *   phi.gather_evaluate(solution, EvaluationFlags::values);
2585 *   VectorizedArray<Number> local_errors_squared[3] = {};
2586 *   for (const unsigned int q : phi.quadrature_point_indices())
2587 *   {
2588 *   const auto error =
2589 *   evaluate_function(function, phi.quadrature_point(q)) -
2590 *   phi.get_value(q);
2591 *   const auto JxW = phi.JxW(q);
2592 *  
2593 *   local_errors_squared[0] += error[0] * error[0] * JxW;
2594 *   for (unsigned int d = 0; d < dim; ++d)
2595 *   local_errors_squared[1] += (error[d + 1] * error[d + 1]) * JxW;
2596 *   local_errors_squared[2] += (error[dim + 1] * error[dim + 1]) * JxW;
2597 *   }
2598 *   for (unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell);
2599 *   ++v)
2600 *   for (unsigned int d = 0; d < 3; ++d)
2601 *   errors_squared[d] += local_errors_squared[d][v];
2602 *   }
2603 *  
2604 *   Utilities::MPI::sum(errors_squared, MPI_COMM_WORLD, errors_squared);
2605 *  
2606 *   std::array<double, 3> errors;
2607 *   for (unsigned int d = 0; d < 3; ++d)
2608 *   errors[d] = std::sqrt(errors_squared[d]);
2609 *  
2610 *   return errors;
2611 *   }
2612 *  
2613 *  
2614 *  
2615 * @endcode
2616 *
2617 * This final function of the EulerOperator class is used to estimate the
2618 * transport speed, scaled by the mesh size, that is relevant for setting
2619 * the time step size in the explicit time integrator. In the Euler
2620 * equations, there are two speeds of transport, namely the convective
2621 * velocity @f$\mathbf{u}@f$ and the propagation of sound waves with sound
2622 * speed @f$c = \sqrt{\gamma p/\rho}@f$ relative to the medium moving at
2623 * velocity @f$\mathbf u@f$.
2624 *
2625
2626 *
2627 * In the formula for the time step size, we are interested not by
2628 * these absolute speeds, but by the amount of time it takes for
2629 * information to cross a single cell. For information transported along with
2630 * the medium, @f$\mathbf u@f$ is scaled by the mesh size,
2631 * so an estimate of the maximal velocity can be obtained by computing
2632 * @f$\|J^{-\mathrm T} \mathbf{u}\|_\infty@f$, where @f$J@f$ is the Jacobian of the
2633 * transformation from real to the reference domain. Note that
2634 * FEEvaluationBase::inverse_jacobian() returns the inverse and transpose
2635 * Jacobian, representing the metric term from real to reference
2636 * coordinates, so we do not need to transpose it again. We store this limit
2637 * in the variable `convective_limit` in the code below.
2638 *
2639
2640 *
2641 * The sound propagation is isotropic, so we need to take mesh sizes in any
2642 * direction into account. The appropriate mesh size scaling is then given
2643 * by the minimal singular value of @f$J@f$ or, equivalently, the maximal
2644 * singular value of @f$J^{-1}@f$. Note that one could approximate this quantity
2645 * by the minimal distance between vertices of a cell when ignoring curved
2646 * cells. To get the maximal singular value of the Jacobian, the general
2647 * strategy would be some LAPACK function. Since all we need here is an
2648 * estimate, we can avoid the hassle of decomposing a tensor of
2649 * VectorizedArray numbers into several matrices and go into an (expensive)
2650 * eigenvalue function without vectorization, and instead use a few
2651 * iterations (five in the code below) of the power method applied to
2652 * @f$J^{-1}J^{-\mathrm T}@f$. The speed of convergence of this method depends
2653 * on the ratio of the largest to the next largest eigenvalue and the
2654 * initial guess, which is the vector of all ones. This might suggest that
2655 * we get slow convergence on cells close to a cube shape where all
2656 * lengths are almost the same. However, this slow convergence means that
2657 * the result will sit between the two largest singular values, which both
2658 * are close to the maximal value anyway. In all other cases, convergence
2659 * will be quick. Thus, we can merely hardcode 5 iterations here and be
2660 * confident that the result is good.
2661 *
2662 * @code
2663 *   template <int dim, int degree, int n_points_1d>
2664 *   double EulerOperator<dim, degree, n_points_1d>::compute_cell_transport_speed(
2665 *   const LinearAlgebra::distributed::Vector<Number> &solution) const
2666 *   {
2667 *   TimerOutput::Scope t(timer, "compute transport speed");
2668 *   Number max_transport = 0;
2670 *  
2671 *   for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2672 *   {
2673 *   phi.reinit(cell);
2674 *   phi.gather_evaluate(solution, EvaluationFlags::values);
2675 *   VectorizedArray<Number> local_max = 0.;
2676 *   for (const unsigned int q : phi.quadrature_point_indices())
2677 *   {
2678 *   const auto solution = phi.get_value(q);
2679 *   const auto velocity = euler_velocity<dim>(solution);
2680 *   const auto pressure = euler_pressure<dim>(solution);
2681 *  
2682 *   const auto inverse_jacobian = phi.inverse_jacobian(q);
2683 *   const auto convective_speed = inverse_jacobian * velocity;
2684 *   VectorizedArray<Number> convective_limit = 0.;
2685 *   for (unsigned int d = 0; d < dim; ++d)
2686 *   convective_limit =
2687 *   std::max(convective_limit, std::abs(convective_speed[d]));
2688 *  
2689 *   const auto speed_of_sound =
2690 *   std::sqrt(gamma * pressure * (1. / solution[0]));
2691 *  
2693 *   for (unsigned int d = 0; d < dim; ++d)
2694 *   eigenvector[d] = 1.;
2695 *   for (unsigned int i = 0; i < 5; ++i)
2696 *   {
2697 *   eigenvector = transpose(inverse_jacobian) *
2698 *   (inverse_jacobian * eigenvector);
2699 *   VectorizedArray<Number> eigenvector_norm = 0.;
2700 *   for (unsigned int d = 0; d < dim; ++d)
2701 *   eigenvector_norm =
2702 *   std::max(eigenvector_norm, std::abs(eigenvector[d]));
2703 *   eigenvector /= eigenvector_norm;
2704 *   }
2705 *   const auto jac_times_ev = inverse_jacobian * eigenvector;
2706 *   const auto max_eigenvalue = std::sqrt(
2707 *   (jac_times_ev * jac_times_ev) / (eigenvector * eigenvector));
2708 *   local_max =
2709 *   std::max(local_max,
2710 *   max_eigenvalue * speed_of_sound + convective_limit);
2711 *   }
2712 *  
2713 * @endcode
2714 *
2715 * Similarly to the previous function, we must make sure to accumulate
2716 * speed only on the valid cells of a cell batch.
2717 *
2718 * @code
2719 *   for (unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell);
2720 *   ++v)
2721 *   max_transport = std::max(max_transport, local_max[v]);
2722 *   }
2723 *  
2724 *   max_transport = Utilities::MPI::max(max_transport, MPI_COMM_WORLD);
2725 *  
2726 *   return max_transport;
2727 *   }
2728 *  
2729 *  
2730 *  
2731 * @endcode
2732 *
2733 *
2734 * <a name="step_67-TheEulerProblemclass"></a>
2735 * <h3>The EulerProblem class</h3>
2736 *
2737
2738 *
2739 * This class combines the EulerOperator class with the time integrator and
2740 * the usual global data structures such as FiniteElement and DoFHandler, to
2741 * actually run the simulations of the Euler problem.
2742 *
2743
2744 *
2745 * The member variables are a triangulation, a finite element, a mapping (to
2746 * create high-order curved surfaces, see e.g. @ref step_10 "step-10"), and a DoFHandler to
2747 * describe the degrees of freedom. In addition, we keep an instance of the
2748 * EulerOperator described above around, which will do all heavy lifting in
2749 * terms of integrals, and some parameters for time integration like the
2750 * current time or the time step size.
2751 *
2752
2753 *
2754 * Furthermore, we use a PostProcessor instance to write some additional
2755 * information to the output file, in similarity to what was done in
2756 * @ref step_33 "step-33". The interface of the DataPostprocessor class is intuitive,
2757 * requiring us to provide information about what needs to be evaluated
2758 * (typically only the values of the solution, except for the Schlieren plot
2759 * that we only enable in 2d where it makes sense), and the names of what
2760 * gets evaluated. Note that it would also be possible to extract most
2761 * information by calculator tools within visualization programs such as
2762 * ParaView, but it is so much more convenient to do it already when writing
2763 * the output.
2764 *
2765 * @code
2766 *   template <int dim>
2767 *   class EulerProblem
2768 *   {
2769 *   public:
2770 *   EulerProblem();
2771 *  
2772 *   void run();
2773 *  
2774 *   private:
2775 *   void make_grid_and_dofs();
2776 *  
2777 *   void output_results(const unsigned int result_number);
2778 *  
2780 *  
2781 *   ConditionalOStream pcout;
2782 *  
2783 *   #ifdef DEAL_II_WITH_P4EST
2785 *   #else
2787 *   #endif
2788 *  
2789 *   const FESystem<dim> fe;
2790 *   const MappingQ<dim> mapping;
2791 *   DoFHandler<dim> dof_handler;
2792 *  
2793 *   TimerOutput timer;
2794 *  
2795 *   EulerOperator<dim, fe_degree, n_q_points_1d> euler_operator;
2796 *  
2797 *   double time, time_step;
2798 *  
2799 *   class Postprocessor : public DataPostprocessor<dim>
2800 *   {
2801 *   public:
2802 *   Postprocessor();
2803 *  
2804 *   virtual void evaluate_vector_field(
2805 *   const DataPostprocessorInputs::Vector<dim> &inputs,
2806 *   std::vector<Vector<double>> &computed_quantities) const override;
2807 *  
2808 *   virtual std::vector<std::string> get_names() const override;
2809 *  
2810 *   virtual std::vector<
2812 *   get_data_component_interpretation() const override;
2813 *  
2814 *   virtual UpdateFlags get_needed_update_flags() const override;
2815 *  
2816 *   private:
2817 *   const bool do_schlieren_plot;
2818 *   };
2819 *   };
2820 *  
2821 *  
2822 *  
2823 *   template <int dim>
2824 *   EulerProblem<dim>::Postprocessor::Postprocessor()
2825 *   : do_schlieren_plot(dim == 2)
2826 *   {}
2827 *  
2828 *  
2829 *  
2830 * @endcode
2831 *
2832 * For the main evaluation of the field variables, we first check that the
2833 * lengths of the arrays equal the expected values (the lengths `2*dim+4` or
2834 * `2*dim+5` are derived from the sizes of the names we specify in the
2835 * get_names() function below). Then we loop over all evaluation points and
2836 * fill the respective information: First we fill the primal solution
2837 * variables of density @f$\rho@f$, momentum @f$\rho \mathbf{u}@f$ and energy @f$E@f$,
2838 * then we compute the derived velocity @f$\mathbf u@f$, the pressure @f$p@f$, the
2839 * speed of sound @f$c=\sqrt{\gamma p / \rho}@f$, as well as the Schlieren plot
2840 * showing @f$s = |\nabla \rho|^2@f$ in case it is enabled. (See @ref step_69 "step-69" for
2841 * another example where we create a Schlieren plot.)
2842 *
2843 * @code
2844 *   template <int dim>
2845 *   void EulerProblem<dim>::Postprocessor::evaluate_vector_field(
2846 *   const DataPostprocessorInputs::Vector<dim> &inputs,
2847 *   std::vector<Vector<double>> &computed_quantities) const
2848 *   {
2849 *   const unsigned int n_evaluation_points = inputs.solution_values.size();
2850 *  
2851 *   if (do_schlieren_plot == true)
2852 *   Assert(inputs.solution_gradients.size() == n_evaluation_points,
2853 *   ExcInternalError());
2854 *  
2855 *   Assert(computed_quantities.size() == n_evaluation_points,
2856 *   ExcInternalError());
2857 *   Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError());
2858 *   Assert(computed_quantities[0].size() ==
2859 *   dim + 2 + (do_schlieren_plot == true ? 1 : 0),
2860 *   ExcInternalError());
2861 *  
2862 *   for (unsigned int p = 0; p < n_evaluation_points; ++p)
2863 *   {
2864 *   Tensor<1, dim + 2> solution;
2865 *   for (unsigned int d = 0; d < dim + 2; ++d)
2866 *   solution[d] = inputs.solution_values[p](d);
2867 *  
2868 *   const double density = solution[0];
2869 *   const Tensor<1, dim> velocity = euler_velocity<dim>(solution);
2870 *   const double pressure = euler_pressure<dim>(solution);
2871 *  
2872 *   for (unsigned int d = 0; d < dim; ++d)
2873 *   computed_quantities[p](d) = velocity[d];
2874 *   computed_quantities[p](dim) = pressure;
2875 *   computed_quantities[p](dim + 1) = std::sqrt(gamma * pressure / density);
2876 *  
2877 *   if (do_schlieren_plot == true)
2878 *   computed_quantities[p](dim + 2) =
2879 *   inputs.solution_gradients[p][0] * inputs.solution_gradients[p][0];
2880 *   }
2881 *   }
2882 *  
2883 *  
2884 *  
2885 *   template <int dim>
2886 *   std::vector<std::string> EulerProblem<dim>::Postprocessor::get_names() const
2887 *   {
2888 *   std::vector<std::string> names;
2889 *   for (unsigned int d = 0; d < dim; ++d)
2890 *   names.emplace_back("velocity");
2891 *   names.emplace_back("pressure");
2892 *   names.emplace_back("speed_of_sound");
2893 *  
2894 *   if (do_schlieren_plot == true)
2895 *   names.emplace_back("schlieren_plot");
2896 *  
2897 *   return names;
2898 *   }
2899 *  
2900 *  
2901 *  
2902 * @endcode
2903 *
2904 * For the interpretation of quantities, we have scalar density, energy,
2905 * pressure, speed of sound, and the Schlieren plot, and vectors for the
2906 * momentum and the velocity.
2907 *
2908 * @code
2909 *   template <int dim>
2910 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2911 *   EulerProblem<dim>::Postprocessor::get_data_component_interpretation() const
2912 *   {
2913 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2914 *   interpretation;
2915 *   for (unsigned int d = 0; d < dim; ++d)
2916 *   interpretation.push_back(
2918 *   interpretation.push_back(DataComponentInterpretation::component_is_scalar);
2919 *   interpretation.push_back(DataComponentInterpretation::component_is_scalar);
2920 *  
2921 *   if (do_schlieren_plot == true)
2922 *   interpretation.push_back(
2924 *  
2925 *   return interpretation;
2926 *   }
2927 *  
2928 *  
2929 *  
2930 * @endcode
2931 *
2932 * With respect to the necessary update flags, we only need the values for
2933 * all quantities but the Schlieren plot, which is based on the density
2934 * gradient.
2935 *
2936 * @code
2937 *   template <int dim>
2938 *   UpdateFlags EulerProblem<dim>::Postprocessor::get_needed_update_flags() const
2939 *   {
2940 *   if (do_schlieren_plot == true)
2941 *   return update_values | update_gradients;
2942 *   else
2943 *   return update_values;
2944 *   }
2945 *  
2946 *  
2947 *  
2948 * @endcode
2949 *
2950 * The constructor for this class is unsurprising: We set up a parallel
2951 * triangulation based on the `MPI_COMM_WORLD` communicator, a vector finite
2952 * element with `dim+2` components for density, momentum, and energy, a
2953 * high-order mapping of the same degree as the underlying finite element,
2954 * and initialize the time and time step to zero.
2955 *
2956 * @code
2957 *   template <int dim>
2958 *   EulerProblem<dim>::EulerProblem()
2959 *   : pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
2960 *   #ifdef DEAL_II_WITH_P4EST
2961 *   , triangulation(MPI_COMM_WORLD)
2962 *   #endif
2963 *   , fe(FE_DGQ<dim>(fe_degree) ^ (dim + 2))
2964 *   , mapping(fe_degree)
2965 *   , dof_handler(triangulation)
2966 *   , timer(pcout, TimerOutput::never, TimerOutput::wall_times)
2967 *   , euler_operator(timer)
2968 *   , time(0)
2969 *   , time_step(0)
2970 *   {}
2971 *  
2972 *  
2973 *  
2974 * @endcode
2975 *
2976 * As a mesh, this tutorial program implements two options, depending on the
2977 * global variable `testcase`: For the analytical variant (`testcase==0`),
2978 * the domain is @f$(0, 10) \times (-5, 5)@f$, with Dirichlet boundary
2979 * conditions (inflow) all around the domain. For `testcase==1`, we set the
2980 * domain to a cylinder in a rectangular box, derived from the flow past
2981 * cylinder testcase for incompressible viscous flow by Sch&auml;fer and
2982 * Turek (1996). Here, we have a larger variety of boundaries. The inflow
2983 * part at the left of the channel is given the inflow type, for which we
2984 * choose a constant inflow profile, whereas we set a subsonic outflow at
2985 * the right. For the boundary around the cylinder (boundary id equal to 2)
2986 * as well as the channel walls (boundary id equal to 3) we use the wall
2987 * boundary type, which is no-normal flow. Furthermore, for the 3d cylinder
2988 * we also add a gravity force in vertical direction. Having the base mesh
2989 * in place (including the manifolds set by
2990 * GridGenerator::channel_with_cylinder()), we can then perform the
2991 * specified number of global refinements, create the unknown numbering from
2992 * the DoFHandler, and hand the DoFHandler and Mapping objects to the
2993 * initialization of the EulerOperator.
2994 *
2995 * @code
2996 *   template <int dim>
2997 *   void EulerProblem<dim>::make_grid_and_dofs()
2998 *   {
2999 *   switch (testcase)
3000 *   {
3001 *   case 0:
3002 *   {
3003 *   Point<dim> lower_left;
3004 *   for (unsigned int d = 1; d < dim; ++d)
3005 *   lower_left[d] = -5;
3006 *  
3007 *   Point<dim> upper_right;
3008 *   upper_right[0] = 10;
3009 *   for (unsigned int d = 1; d < dim; ++d)
3010 *   upper_right[d] = 5;
3011 *  
3013 *   lower_left,
3014 *   upper_right);
3015 *   triangulation.refine_global(2);
3016 *  
3017 *   euler_operator.set_inflow_boundary(
3018 *   0, std::make_unique<ExactSolution<dim>>(0));
3019 *  
3020 *   break;
3021 *   }
3022 *  
3023 *   case 1:
3024 *   {
3026 *   triangulation, 0.03, 1, 0, true);
3027 *  
3028 *   euler_operator.set_inflow_boundary(
3029 *   0, std::make_unique<ExactSolution<dim>>(0));
3030 *   euler_operator.set_subsonic_outflow_boundary(
3031 *   1, std::make_unique<ExactSolution<dim>>(0));
3032 *  
3033 *   euler_operator.set_wall_boundary(2);
3034 *   euler_operator.set_wall_boundary(3);
3035 *  
3036 *   if (dim == 3)
3037 *   euler_operator.set_body_force(
3038 *   std::make_unique<Functions::ConstantFunction<dim>>(
3039 *   std::vector<double>({0., 0., -0.2})));
3040 *  
3041 *   break;
3042 *   }
3043 *  
3044 *   default:
3046 *   }
3047 *  
3048 *   triangulation.refine_global(n_global_refinements);
3049 *  
3050 *   dof_handler.distribute_dofs(fe);
3051 *  
3052 *   euler_operator.reinit(mapping, dof_handler);
3053 *   euler_operator.initialize_vector(solution);
3054 *  
3055 * @endcode
3056 *
3057 * In the following, we output some statistics about the problem. Because we
3058 * often end up with quite large numbers of cells or degrees of freedom, we
3059 * would like to print them with a comma to separate each set of three
3060 * digits. This can be done via "locales", although the way this works is
3061 * not particularly intuitive. @ref step_32 "step-32" explains this in slightly more
3062 * detail.
3063 *
3064 * @code
3065 *   std::locale s = pcout.get_stream().getloc();
3066 *   pcout.get_stream().imbue(std::locale(""));
3067 *   pcout << "Number of degrees of freedom: " << dof_handler.n_dofs()
3068 *   << " ( = " << (dim + 2) << " [vars] x "
3069 *   << triangulation.n_global_active_cells() << " [cells] x "
3070 *   << Utilities::pow(fe_degree + 1, dim) << " [dofs/cell/var] )"
3071 *   << std::endl;
3072 *   pcout.get_stream().imbue(s);
3073 *   }
3074 *  
3075 *  
3076 *  
3077 * @endcode
3078 *
3079 * For output, we first let the Euler operator compute the errors of the
3080 * numerical results. More precisely, we compute the error against the
3081 * analytical result for the analytical solution case, whereas we compute
3082 * the deviation against the background field with constant density and
3083 * energy and constant velocity in @f$x@f$ direction for the second test case.
3084 *
3085
3086 *
3087 * The next step is to create output. This is similar to what is done in
3088 * @ref step_33 "step-33": We let the postprocessor defined above control most of the
3089 * output, except for the primal field that we write directly. For the
3090 * analytical solution test case, we also perform another projection of the
3091 * analytical solution and print the difference between that field and the
3092 * numerical solution. Once we have defined all quantities to be written, we
3093 * build the patches for output. Similarly to @ref step_65 "step-65", we create a
3094 * high-order VTK output by setting the appropriate flag, which enables us
3095 * to visualize fields of high polynomial degrees. Finally, we call the
3096 * `DataOutInterface::write_vtu_in_parallel()` function to write the result
3097 * to the given file name. This function uses special MPI parallel write
3098 * facilities, which are typically more optimized for parallel file systems
3099 * than the standard library's `std::ofstream` variants used in most other
3100 * tutorial programs. A particularly nice feature of the
3101 * `write_vtu_in_parallel()` function is the fact that it can combine output
3102 * from all MPI ranks into a single file, making it unnecessary to have a
3103 * central record of all such files (namely, the "pvtu" file).
3104 *
3105
3106 *
3107 * For parallel programs, it is often instructive to look at the partitioning
3108 * of cells among processors. To this end, one can pass a vector of numbers
3109 * to DataOut::add_data_vector() that contains as many entries as the
3110 * current processor has active cells; these numbers should then be the
3111 * rank of the processor that owns each of these cells. Such a vector
3112 * could, for example, be obtained from
3113 * GridTools::get_subdomain_association(). On the other hand, on each MPI
3114 * process, DataOut will only read those entries that correspond to locally
3115 * owned cells, and these of course all have the same value: namely, the rank
3116 * of the current process. What is in the remaining entries of the vector
3117 * doesn't actually matter, and so we can just get away with a cheap trick: We
3118 * just fill *all* values of the vector we give to DataOut::add_data_vector()
3119 * with the rank of the current MPI process. The key is that on each process,
3120 * only the entries corresponding to the locally owned cells will be read,
3121 * ignoring the (wrong) values in other entries. The fact that every process
3122 * submits a vector in which the correct subset of entries is correct is all
3123 * that is necessary.
3124 *
3125
3126 *
3127 * @note As of 2023, Visit 3.3.3 can still not deal with higher-order cells.
3128 * Rather, it simply reports that there is no data to show. To view the
3129 * results of this program with Visit, you will want to comment out the
3130 * line that sets `flags.write_higher_order_cells = true;`. On the other
3131 * hand, Paraview is able to understand VTU files with higher order cells
3132 * just fine.
3133 *
3134 * @code
3135 *   template <int dim>
3136 *   void EulerProblem<dim>::output_results(const unsigned int result_number)
3137 *   {
3138 *   const std::array<double, 3> errors =
3139 *   euler_operator.compute_errors(ExactSolution<dim>(time), solution);
3140 *   const std::string quantity_name = testcase == 0 ? "error" : "norm";
3141 *  
3142 *   pcout << "Time:" << std::setw(8) << std::setprecision(3) << time
3143 *   << ", dt: " << std::setw(8) << std::setprecision(2) << time_step
3144 *   << ", " << quantity_name << " rho: " << std::setprecision(4)
3145 *   << std::setw(10) << errors[0] << ", rho * u: " << std::setprecision(4)
3146 *   << std::setw(10) << errors[1] << ", energy:" << std::setprecision(4)
3147 *   << std::setw(10) << errors[2] << std::endl;
3148 *  
3149 *   {
3150 *   TimerOutput::Scope t(timer, "output");
3151 *  
3152 *   Postprocessor postprocessor;
3153 *   DataOut<dim> data_out;
3154 *  
3155 *   DataOutBase::VtkFlags flags;
3156 *   flags.write_higher_order_cells = true;
3157 *   data_out.set_flags(flags);
3158 *  
3159 *   data_out.attach_dof_handler(dof_handler);
3160 *   {
3161 *   std::vector<std::string> names;
3162 *   names.emplace_back("density");
3163 *   for (unsigned int d = 0; d < dim; ++d)
3164 *   names.emplace_back("momentum");
3165 *   names.emplace_back("energy");
3166 *  
3167 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
3168 *   interpretation;
3169 *   interpretation.push_back(
3171 *   for (unsigned int d = 0; d < dim; ++d)
3172 *   interpretation.push_back(
3174 *   interpretation.push_back(
3176 *  
3177 *   data_out.add_data_vector(dof_handler, solution, names, interpretation);
3178 *   }
3179 *   data_out.add_data_vector(solution, postprocessor);
3180 *  
3182 *   if (testcase == 0 && dim == 2)
3183 *   {
3184 *   reference.reinit(solution);
3185 *   euler_operator.project(ExactSolution<dim>(time), reference);
3186 *   reference.sadd(-1., 1, solution);
3187 *   std::vector<std::string> names;
3188 *   names.emplace_back("error_density");
3189 *   for (unsigned int d = 0; d < dim; ++d)
3190 *   names.emplace_back("error_momentum");
3191 *   names.emplace_back("error_energy");
3192 *  
3193 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
3194 *   interpretation;
3195 *   interpretation.push_back(
3197 *   for (unsigned int d = 0; d < dim; ++d)
3198 *   interpretation.push_back(
3200 *   interpretation.push_back(
3202 *  
3203 *   data_out.add_data_vector(dof_handler,
3204 *   reference,
3205 *   names,
3206 *   interpretation);
3207 *   }
3208 *  
3209 *   Vector<double> mpi_owner(triangulation.n_active_cells());
3210 *   mpi_owner = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
3211 *   data_out.add_data_vector(mpi_owner, "owner");
3212 *  
3213 *   data_out.build_patches(mapping,
3214 *   fe.degree,
3216 *  
3217 *   const std::string filename =
3218 *   "solution_" + Utilities::int_to_string(result_number, 3) + ".vtu";
3219 *   data_out.write_vtu_in_parallel(filename, MPI_COMM_WORLD);
3220 *   }
3221 *   }
3222 *  
3223 *  
3224 *  
3225 * @endcode
3226 *
3227 * The EulerProblem::run() function puts all pieces together. It starts off
3228 * by calling the function that creates the mesh and sets up data structures,
3229 * and then initializing the time integrator and the two temporary vectors of
3230 * the low-storage integrator. We call these vectors `rk_register_1` and
3231 * `rk_register_2`, and use the first vector to represent the quantity
3232 * @f$\mathbf{r}_i@f$ and the second one for @f$\mathbf{k}_i@f$ in the formulas for
3233 * the Runge--Kutta scheme outlined in the introduction. Before we start the
3234 * time loop, we compute the time step size by the
3235 * `EulerOperator::compute_cell_transport_speed()` function. For reasons of
3236 * comparison, we compare the result obtained there with the minimal mesh
3237 * size and print them to screen. For velocities and speeds of sound close
3238 * to unity as in this tutorial program, the predicted effective mesh size
3239 * will be close, but they could vary if scaling were different.
3240 *
3241 * @code
3242 *   template <int dim>
3243 *   void EulerProblem<dim>::run()
3244 *   {
3245 *   {
3246 *   const unsigned int n_vect_number = VectorizedArray<Number>::size();
3247 *   const unsigned int n_vect_bits = 8 * sizeof(Number) * n_vect_number;
3248 *  
3249 *   pcout << "Running with "
3250 *   << Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)
3251 *   << " MPI processes" << std::endl;
3252 *   pcout << "Vectorization over " << n_vect_number << ' '
3253 *   << (std::is_same_v<Number, double> ? "doubles" : "floats") << " = "
3254 *   << n_vect_bits << " bits ("
3256 *   << std::endl;
3257 *   }
3258 *  
3259 *   make_grid_and_dofs();
3260 *  
3261 *   const LowStorageRungeKuttaIntegrator integrator(lsrk_scheme);
3262 *  
3265 *   rk_register_1.reinit(solution);
3266 *   rk_register_2.reinit(solution);
3267 *  
3268 *   euler_operator.project(ExactSolution<dim>(time), solution);
3269 *  
3270 *   double min_vertex_distance = std::numeric_limits<double>::max();
3271 *   for (const auto &cell : triangulation.active_cell_iterators())
3272 *   if (cell->is_locally_owned())
3273 *   min_vertex_distance =
3274 *   std::min(min_vertex_distance, cell->minimum_vertex_distance());
3275 *   min_vertex_distance =
3276 *   Utilities::MPI::min(min_vertex_distance, MPI_COMM_WORLD);
3277 *  
3278 *   time_step = courant_number * integrator.n_stages() /
3279 *   euler_operator.compute_cell_transport_speed(solution);
3280 *   pcout << "Time step size: " << time_step
3281 *   << ", minimal h: " << min_vertex_distance
3282 *   << ", initial transport scaling: "
3283 *   << 1. / euler_operator.compute_cell_transport_speed(solution)
3284 *   << std::endl
3285 *   << std::endl;
3286 *  
3287 *   output_results(0);
3288 *  
3289 * @endcode
3290 *
3291 * Now we are ready to start the time loop, which we run until the time
3292 * has reached the desired end time. Every 5 time steps, we compute a new
3293 * estimate for the time step -- since the solution is nonlinear, it is
3294 * most effective to adapt the value during the course of the
3295 * simulation. In case the Courant number was chosen too aggressively, the
3296 * simulation will typically blow up with time step NaN, so that is easy
3297 * to detect here. One thing to note is that roundoff errors might
3298 * propagate to the leading digits due to an interaction of slightly
3299 * different time step selections that in turn lead to slightly different
3300 * solutions. To decrease this sensitivity, it is common practice to round
3301 * or truncate the time step size to a few digits, e.g. 3 in this case. In
3302 * case the current time is near the prescribed 'tick' value for output
3303 * (e.g. 0.02), we also write the output. After the end of the time loop,
3304 * we summarize the computation by printing some statistics, which is
3305 * mostly done by the TimerOutput::print_wall_time_statistics() function.
3306 *
3307 * @code
3308 *   unsigned int timestep_number = 0;
3309 *  
3310 *   while (time < final_time - 1e-12)
3311 *   {
3312 *   ++timestep_number;
3313 *   if (timestep_number % 5 == 0)
3314 *   time_step =
3315 *   courant_number * integrator.n_stages() /
3317 *   euler_operator.compute_cell_transport_speed(solution), 3);
3318 *  
3319 *   {
3320 *   TimerOutput::Scope t(timer, "rk time stepping total");
3321 *   integrator.perform_time_step(euler_operator,
3322 *   time,
3323 *   time_step,
3324 *   solution,
3325 *   rk_register_1,
3326 *   rk_register_2);
3327 *   }
3328 *  
3329 *   time += time_step;
3330 *  
3331 *   if (static_cast<int>(time / output_tick) !=
3332 *   static_cast<int>((time - time_step) / output_tick) ||
3333 *   time >= final_time - 1e-12)
3334 *   output_results(
3335 *   static_cast<unsigned int>(std::round(time / output_tick)));
3336 *   }
3337 *  
3338 *   timer.print_wall_time_statistics(MPI_COMM_WORLD);
3339 *   pcout << std::endl;
3340 *   }
3341 *  
3342 *   } // namespace Euler_DG
3343 *  
3344 *  
3345 *  
3346 * @endcode
3347 *
3348 * The main() function is not surprising and follows what was done in all
3349 * previous MPI programs: As we run an MPI program, we need to call `MPI_Init()`
3350 * and `MPI_Finalize()`, which we do through the
3351 * Utilities::MPI::MPI_InitFinalize data structure. Note that we run the program
3352 * only with MPI, and set the thread count to 1.
3353 *
3354 * @code
3355 *   int main(int argc, char **argv)
3356 *   {
3357 *   using namespace Euler_DG;
3358 *   using namespace dealii;
3359 *  
3360 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
3361 *  
3362 *   try
3363 *   {
3364 *   EulerProblem<dimension> euler_problem;
3365 *   euler_problem.run();
3366 *   }
3367 *   catch (std::exception &exc)
3368 *   {
3369 *   std::cerr << std::endl
3370 *   << std::endl
3371 *   << "----------------------------------------------------"
3372 *   << std::endl;
3373 *   std::cerr << "Exception on processing: " << std::endl
3374 *   << exc.what() << std::endl
3375 *   << "Aborting!" << std::endl
3376 *   << "----------------------------------------------------"
3377 *   << std::endl;
3378 *  
3379 *   return 1;
3380 *   }
3381 *   catch (...)
3382 *   {
3383 *   std::cerr << std::endl
3384 *   << std::endl
3385 *   << "----------------------------------------------------"
3386 *   << std::endl;
3387 *   std::cerr << "Unknown exception!" << std::endl
3388 *   << "Aborting!" << std::endl
3389 *   << "----------------------------------------------------"
3390 *   << std::endl;
3391 *   return 1;
3392 *   }
3393 *  
3394 *   return 0;
3395 *   }
3396 * @endcode
3397<a name="step_67-Results"></a><h1>Results</h1>
3398
3399
3400<a name="step_67-Programoutput"></a><h3>Program output</h3>
3401
3402
3403Running the program with the default settings on a machine with 40 processes
3404produces the following output:
3405@code
3406Running with 40 MPI processes
3407Vectorization over 8 doubles = 512 bits (AVX512)
3408Number of degrees of freedom: 147,456 ( = 4 [vars] x 1,024 [cells] x 36 [dofs/cell/var] )
3409Time step size: 0.00689325, minimal h: 0.3125, initial transport scaling: 0.102759
3410
3411Time: 0, dt: 0.0069, error rho: 2.76e-07, rho * u: 1.259e-06, energy: 2.987e-06
3412Time: 1.01, dt: 0.0069, error rho: 1.37e-06, rho * u: 2.252e-06, energy: 4.153e-06
3413Time: 2.01, dt: 0.0069, error rho: 1.561e-06, rho * u: 2.43e-06, energy: 4.493e-06
3414Time: 3.01, dt: 0.0069, error rho: 1.714e-06, rho * u: 2.591e-06, energy: 4.762e-06
3415Time: 4.01, dt: 0.0069, error rho: 1.843e-06, rho * u: 2.625e-06, energy: 4.985e-06
3416Time: 5.01, dt: 0.0069, error rho: 1.496e-06, rho * u: 1.961e-06, energy: 4.142e-06
3417Time: 6, dt: 0.0083, error rho: 1.007e-06, rho * u: 7.119e-07, energy: 2.972e-06
3418Time: 7, dt: 0.0095, error rho: 9.096e-07, rho * u: 3.786e-07, energy: 2.626e-06
3419Time: 8, dt: 0.0096, error rho: 8.439e-07, rho * u: 3.338e-07, energy: 2.43e-06
3420Time: 9, dt: 0.0096, error rho: 7.822e-07, rho * u: 2.984e-07, energy: 2.248e-06
3421Time: 10, dt: 0.0096, error rho: 7.231e-07, rho * u: 2.666e-07, energy: 2.074e-06
3422
3423+-------------------------------------------+------------------+------------+------------------+
3424| Total wallclock time elapsed | 2.249s 30 | 2.249s | 2.249s 8 |
3425| | | |
3426| Section | no. calls | min time rank | avg time | max time rank |
3427+-------------------------------------------+------------------+------------+------------------+
3428| compute errors | 11 | 0.008066s 13 | 0.00952s | 0.01041s 20 |
3429| compute transport speed | 258 | 0.01012s 13 | 0.05392s | 0.08574s 25 |
3430| output | 11 | 0.9597s 13 | 0.9613s | 0.9623s 6 |
3431| rk time stepping total | 1283 | 0.9827s 25 | 1.015s | 1.06s 13 |
3432| rk_stage - integrals L_h | 6415 | 0.8803s 26 | 0.9198s | 0.9619s 14 |
3433| rk_stage - inv mass + vec upd | 6415 | 0.05677s 15 | 0.06487s | 0.07597s 13 |
3434+-------------------------------------------+------------------+------------+------------------+
3435@endcode
3436
3437The program output shows that all errors are small. This is due to the fact
3438that we use a relatively fine mesh of @f$32^2@f$ cells with polynomials of degree
34395 for a solution that is smooth. An interesting pattern shows for the time
3440step size: whereas it is 0.0069 up to time 5, it increases to 0.0096 for later
3441times. The step size increases once the vortex with some motion on top of the
3442speed of sound (and thus faster propagation) leaves the computational domain
3443between times 5 and 6.5. After that point, the flow is simply uniform
3444in the same direction, and the maximum velocity of the gas is reduced
3445compared to the previous state where the uniform velocity was overlaid
3446by the vortex. Our time step formula recognizes this effect.
3447
3448The final block of output shows detailed information about the timing
3449of individual parts of the programs; it breaks this down by showing
3450the time taken by the fastest and the slowest processor, and the
3451average time -- this is often useful in very large computations to
3452find whether there are processors that are consistently overheated
3453(and consequently are throttling their clock speed) or consistently
3454slow for other reasons.
3455The summary shows that 1283 time steps have been performed
3456in 1.02 seconds (looking at the average time among all MPI processes), while
3457the output of 11 files has taken additional 0.96 seconds. Broken down per time
3458step and into the five Runge--Kutta stages, the compute time per evaluation is
34590.16 milliseconds. This high performance is typical of matrix-free evaluators
3460and a reason why explicit time integration is very competitive against
3461implicit solvers, especially for large-scale simulations. The breakdown of
3462computational times at the end of the program run shows that the evaluation of
3463integrals in @f$\mathcal L_h@f$ contributes with around 0.92 seconds and the
3464application of the inverse mass matrix with 0.06 seconds. Furthermore, the
3465estimation of the transport speed for the time step size computation
3466contributes with another 0.05 seconds of compute time.
3467
3468If we use three more levels of global refinement and 9.4 million DoFs in total,
3469the final statistics are as follows (for the modified Lax--Friedrichs flux,
3470@f$p=5@f$, and the same system of 40 cores of dual-socket Intel Xeon Gold 6230):
3471@code
3472+-------------------------------------------+------------------+------------+------------------+
3473| Total wallclock time elapsed | 244.9s 12 | 244.9s | 244.9s 34 |
3474| | | |
3475| Section | no. calls | min time rank | avg time | max time rank |
3476+-------------------------------------------+------------------+------------+------------------+
3477| compute errors | 11 | 0.4239s 12 | 0.4318s | 0.4408s 9 |
3478| compute transport speed | 2053 | 3.962s 12 | 6.727s | 10.12s 7 |
3479| output | 11 | 30.35s 12 | 30.36s | 30.37s 9 |
3480| rk time stepping total | 10258 | 201.7s 7 | 205.1s | 207.8s 12 |
3481| rk_stage - integrals L_h | 51290 | 121.3s 6 | 126.6s | 136.3s 16 |
3482| rk_stage - inv mass + vec upd | 51290 | 66.19s 16 | 77.52s | 81.84s 10 |
3483+-------------------------------------------+------------------+------------+------------------+
3484@endcode
3485
3486Per time step, the solver now takes 0.02 seconds, about 25 times as long as
3487for the small problem with 147k unknowns. Given that the problem involves 64
3488times as many unknowns, the increase in computing time is not
3489surprising. Since we also do 8 times as many time steps, the compute time
3490should in theory increase by a factor of 512. The actual increase is 205 s /
34911.02 s = 202. This is because the small problem size cannot fully utilize the
349240 cores due to communication overhead. This becomes clear if we look into the
3493details of the operations done per time step. The evaluation of the
3494differential operator @f$\mathcal L_h@f$ with nearest neighbor communication goes
3495from 0.92 seconds to 127 seconds, i.e., it increases with a factor of 138. On
3496the other hand, the cost for application of the inverse mass matrix and the
3497vector updates, which do not need to communicate between the MPI processes at
3498all, has increased by a factor of 1195. The increase is more than the
3499theoretical factor of 512 because the operation is limited by the bandwidth
3500from RAM memory for the larger size while for the smaller size, all vectors
3501fit into the caches of the CPU. The numbers show that the mass matrix
3502evaluation and vector update part consume almost 40% of the time spent by the
3503Runge--Kutta stages -- despite using a low-storage Runge--Kutta integrator and
3504merging of vector operations! And despite using over-integration for the
3505@f$\mathcal L_h@f$ operator. For simpler differential operators and more expensive
3506time integrators, the proportion spent in the mass matrix and vector update
3507part can also reach 70%. If we compute a throughput number in terms of DoFs
3508processed per second and Runge--Kutta stage, we obtain @f[ \text{throughput} =
3509\frac{n_\mathrm{time steps} n_\mathrm{stages}
3510n_\mathrm{dofs}}{t_\mathrm{compute}} = \frac{10258 \cdot 5 \cdot
35119.4\,\text{MDoFs}}{205s} = 2360\, \text{MDoFs/s} @f] This throughput number is
3512very high, given that simply copying one vector to another one runs at
3513only around 10,000 MDoFs/s.
3514
3515If we go to the next-larger size with 37.7 million DoFs, the overall
3516simulation time is 2196 seconds, with 1978 seconds spent in the time
3517stepping. The increase in run time is a factor of 9.3 for the L_h operator
3518(1179 versus 127 seconds) and a factor of 10.3 for the inverse mass matrix and
3519vector updates (797 vs 77.5 seconds). The reason for this non-optimal increase
3520in run time can be traced back to cache effects on the given hardware (with 40
3521MB of L2 cache and 55 MB of L3 cache): While not all of the relevant data fits
3522into caches for 9.4 million DoFs (one vector takes 75 MB and we have three
3523vectors plus some additional data in MatrixFree), there is capacity for one and
3524a half vector nonetheless. Given that modern caches are more sophisticated than
3525the naive least-recently-used strategy (where we would have little re-use as
3526the data is used in a streaming-like fashion), we can assume that a sizeable
3527fraction of data can indeed be delivered from caches for the 9.4 million DoFs
3528case. For the larger case, even with optimal caching less than 10 percent of
3529data would fit into caches, with an associated loss in performance.
3530
3531
3532<a name="step_67-Convergenceratesfortheanalyticaltestcase"></a><h3>Convergence rates for the analytical test case</h3>
3533
3534
3535For the modified Lax--Friedrichs flux and measuring the error in the momentum
3536variable, we obtain the following convergence table (the rates are very
3537similar for the density and energy variables):
3538
3539<table align="center" class="doxtable">
3540 <tr>
3541 <th>&nbsp;</th>
3542 <th colspan="3"><i>p</i>=2</th>
3543 <th colspan="3"><i>p</i>=3</th>
3544 <th colspan="3"><i>p</i>=5</th>
3545 </tr>
3546 <tr>
3547 <th>n_cells</th>
3548 <th>n_dofs</th>
3549 <th>error mom</th>
3550 <th>rate</th>
3551 <th>n_dofs</th>
3552 <th>error mom</th>
3553 <th>rate</th>
3554 <th>n_dofs</th>
3555 <th>error mom</th>
3556 <th>rate</th>
3557 </tr>
3558 <tr>
3559 <td align="right">16</td>
3560 <td>&nbsp;</td>
3561 <td>&nbsp;</td>
3562 <td>&nbsp;</td>
3563 <td>&nbsp;</td>
3564 <td>&nbsp;</td>
3565 <td>&nbsp;</td>
3566 <td align="right">2,304</td>
3567 <td align="center">1.373e-01</td>
3568 <td>&nbsp;</td>
3569 </tr>
3570 <tr>
3571 <td align="right">64</td>
3572 <td>&nbsp;</td>
3573 <td>&nbsp;</td>
3574 <td>&nbsp;</td>
3575 <td align="right">4,096</td>
3576 <td align="center">9.130e-02</td>
3577 <td>&nbsp;</td>
3578 <td align="right">9,216</td>
3579 <td align="center">8.899e-03</td>
3580 <td>3.94</td>
3581 </tr>
3582 <tr>
3583 <td align="right">256</td>
3584 <td align="right">9,216</td>
3585 <td align="center">5.577e-02</td>
3586 <td>&nbsp;</td>
3587 <td align="right">16,384</td>
3588 <td align="center">7.381e-03</td>
3589 <td>3.64</td>
3590 <td align="right">36,864</td>
3591 <td align="center">2.082e-04</td>
3592 <td>5.42</td>
3593 </tr>
3594 <tr>
3595 <td align="right">1024</td>
3596 <td align="right">36,864</td>
3597 <td align="center">4.724e-03</td>
3598 <td>3.56</td>
3599 <td align="right">65,536</td>
3600 <td align="center">3.072e-04</td>
3601 <td>4.59</td>
3602 <td align="right">147,456</td>
3603 <td align="center">2.625e-06</td>
3604 <td>6.31</td>
3605 </tr>
3606 <tr>
3607 <td align="right">4096</td>
3608 <td align="right">147,456</td>
3609 <td align="center">6.205e-04</td>
3610 <td>2.92</td>
3611 <td align="right">262,144</td>
3612 <td align="center">1.880e-05</td>
3613 <td>4.03</td>
3614 <td align="right">589,824</td>
3615 <td align="center">3.268e-08</td>
3616 <td>6.33</td>
3617 </tr>
3618 <tr>
3619 <td align="right">16,384</td>
3620 <td align="right">589,824</td>
3621 <td align="center">8.279e-05</td>
3622 <td>2.91</td>
3623 <td align="right">1,048,576</td>
3624 <td align="center">1.224e-06</td>
3625 <td>3.94</td>
3626 <td align="right">2,359,296</td>
3627 <td align="center">9.252e-10</td>
3628 <td>5.14</td>
3629 </tr>
3630 <tr>
3631 <td align="right">65,536</td>
3632 <td align="right">2,359,296</td>
3633 <td align="center">1.105e-05</td>
3634 <td>2.91</td>
3635 <td align="right">4,194,304</td>
3636 <td align="center">7.871e-08</td>
3637 <td>3.96</td>
3638 <td align="right">9,437,184</td>
3639 <td align="center">1.369e-10</td>
3640 <td>2.77</td>
3641 </tr>
3642 <tr>
3643 <td align="right">262,144</td>
3644 <td align="right">9,437,184</td>
3645 <td align="center">1.615e-06</td>
3646 <td>2.77</td>
3647 <td align="right">16,777,216</td>
3648 <td align="center">4.961e-09</td>
3649 <td>3.99</td>
3650 <td align="right">37,748,736</td>
3651 <td align="center">7.091e-11</td>
3652 <td>0.95</td>
3653 </tr>
3654</table>
3655
3656If we switch to the Harten-Lax-van Leer flux, the results are as follows:
3657<table align="center" class="doxtable">
3658 <tr>
3659 <th>&nbsp;</th>
3660 <th colspan="3"><i>p</i>=2</th>
3661 <th colspan="3"><i>p</i>=3</th>
3662 <th colspan="3"><i>p</i>=5</th>
3663 </tr>
3664 <tr>
3665 <th>n_cells</th>
3666 <th>n_dofs</th>
3667 <th>error mom</th>
3668 <th>rate</th>
3669 <th>n_dofs</th>
3670 <th>error mom</th>
3671 <th>rate</th>
3672 <th>n_dofs</th>
3673 <th>error mom</th>
3674 <th>rate</th>
3675 </tr>
3676 <tr>
3677 <td align="right">16</td>
3678 <td>&nbsp;</td>
3679 <td>&nbsp;</td>
3680 <td>&nbsp;</td>
3681 <td>&nbsp;</td>
3682 <td>&nbsp;</td>
3683 <td>&nbsp;</td>
3684 <td align="right">2,304</td>
3685 <td align="center">1.339e-01</td>
3686 <td>&nbsp;</td>
3687 </tr>
3688 <tr>
3689 <td align="right">64</td>
3690 <td>&nbsp;</td>
3691 <td>&nbsp;</td>
3692 <td>&nbsp;</td>
3693 <td align="right">4,096</td>
3694 <td align="center">9.037e-02</td>
3695 <td>&nbsp;</td>
3696 <td align="right">9,216</td>
3697 <td align="center">8.849e-03</td>
3698 <td>3.92</td>
3699 </tr>
3700 <tr>
3701 <td align="right">256</td>
3702 <td align="right">9,216</td>
3703 <td align="center">4.204e-02</td>
3704 <td>&nbsp;</td>
3705 <td align="right">16,384</td>
3706 <td align="center">9.143e-03</td>
3707 <td>3.31</td>
3708 <td align="right">36,864</td>
3709 <td align="center">2.501e-04</td>
3710 <td>5.14</td>
3711 </tr>
3712 <tr>
3713 <td align="right">1024</td>
3714 <td align="right">36,864</td>
3715 <td align="center">4.913e-03</td>
3716 <td>3.09</td>
3717 <td align="right">65,536</td>
3718 <td align="center">3.257e-04</td>
3719 <td>4.81</td>
3720 <td align="right">147,456</td>
3721 <td align="center">3.260e-06</td>
3722 <td>6.26</td>
3723 </tr>
3724 <tr>
3725 <td align="right">4096</td>
3726 <td align="right">147,456</td>
3727 <td align="center">7.862e-04</td>
3728 <td>2.64</td>
3729 <td align="right">262,144</td>
3730 <td align="center">1.588e-05</td>
3731 <td>4.36</td>
3732 <td align="right">589,824</td>
3733 <td align="center">2.953e-08</td>
3734 <td>6.79</td>
3735 </tr>
3736 <tr>
3737 <td align="right">16,384</td>
3738 <td align="right">589,824</td>
3739 <td align="center">1.137e-04</td>
3740 <td>2.79</td>
3741 <td align="right">1,048,576</td>
3742 <td align="center">9.400e-07</td>
3743 <td>4.08</td>
3744 <td align="right">2,359,296</td>
3745 <td align="center">4.286e-10</td>
3746 <td>6.11</td>
3747 </tr>
3748 <tr>
3749 <td align="right">65,536</td>
3750 <td align="right">2,359,296</td>
3751 <td align="center">1.476e-05</td>
3752 <td>2.95</td>
3753 <td align="right">4,194,304</td>
3754 <td align="center">5.799e-08</td>
3755 <td>4.02</td>
3756 <td align="right">9,437,184</td>
3757 <td align="center">2.789e-11</td>
3758 <td>3.94</td>
3759 </tr>
3760 <tr>
3761 <td align="right">262,144</td>
3762 <td align="right">9,437,184</td>
3763 <td align="center">2.038e-06</td>
3764 <td>2.86</td>
3765 <td align="right">16,777,216</td>
3766 <td align="center">3.609e-09</td>
3767 <td>4.01</td>
3768 <td align="right">37,748,736</td>
3769 <td align="center">5.730e-11</td>
3770 <td>-1.04</td>
3771 </tr>
3772</table>
3773
3774The tables show that we get optimal @f$\mathcal O\left(h^{p+1}\right)@f$
3775convergence rates for both numerical fluxes. The errors are slightly smaller
3776for the Lax--Friedrichs flux for @f$p=2@f$, but the picture is reversed for
3777@f$p=3@f$; in any case, the differences on this testcase are relatively
3778small.
3779
3780For @f$p=5@f$, we reach the roundoff accuracy of @f$10^{-11}@f$ with both
3781fluxes on the finest grids. Also note that the errors are absolute with a
3782domain length of @f$10^2@f$, so relative errors are below @f$10^{-12}@f$. The HLL flux
3783is somewhat better for the highest degree, which is due to a slight inaccuracy
3784of the Lax--Friedrichs flux: The Lax--Friedrichs flux sets a Dirichlet
3785condition on the solution that leaves the domain, which results in a small
3786artificial reflection, which is accentuated for the Lax--Friedrichs
3787flux. Apart from that, we see that the influence of the numerical flux is
3788minor, as the polynomial part inside elements is the main driver of the
3789accucary. The limited influence of the flux also has consequences when trying
3790to approach more challenging setups with the higher-order DG setup: Taking for
3791example the parameters and grid of @ref step_33 "step-33", we get oscillations (which in turn
3792make density negative and make the solution explode) with both fluxes once the
3793high-mass part comes near the boundary, as opposed to the low-order finite
3794volume case (@f$p=0@f$). Thus, any case that leads to shocks in the solution
3795necessitates some form of limiting or artificial dissipation. For another
3796alternative, see the @ref step_69 "step-69" tutorial program.
3797
3798
3799<a name="step_67-Resultsforflowinchannelaroundcylinderin2D"></a><h3>Results for flow in channel around cylinder in 2D</h3>
3800
3801
3802For the test case of the flow around a cylinder in a channel, we need to
3803change the first code line to
3804@code
3805 constexpr unsigned int testcase = 1;
3806@endcode
3807This test case starts with a background field of a constant velocity
3808of Mach number 0.31 and a constant initial density; the flow will have
3809to go around an obstacle in the form of a cylinder. Since we impose a
3810no-penetration condition on the cylinder walls, the flow that
3811initially impinges head-on onto to cylinder has to rearrange,
3812which creates a big sound wave. The following pictures show the pressure at
3813times 0.1, 0.25, 0.5, and 1.0 (top left to bottom right) for the 2D case with
38145 levels of global refinement, using 102,400 cells with polynomial degree of
38155 and 14.7 million degrees of freedom over all 4 solution variables.
3816We clearly see the discontinuity that
3817propagates slowly in the upstream direction and more quickly in downstream
3818direction in the first snapshot at time 0.1. At time 0.25, the sound wave has
3819reached the top and bottom walls and reflected back to the interior. From the
3820different distances of the reflected waves from lower and upper walls we can
3821see the slight asymmetry of the Sch&auml;fer-Turek test case represented by
3822GridGenerator::channel_with_cylinder() with somewhat more space above the
3823cylinder compared to below. At later times, the picture is more chaotic with
3824many sound waves all over the place.
3825
3826<table align="center" class="doxtable" style="width:85%">
3827 <tr>
3828 <td>
3829 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_010.png" alt="" width="100%">
3830 </td>
3831 <td>
3832 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_025.png" alt="" width="100%">
3833 </td>
3834 </tr>
3835 <tr>
3836 <td>
3837 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_050.png" alt="" width="100%">
3838 </td>
3839 <td>
3840 <img src="https://www.dealii.org/images/steps/developer/step-67.pressure_100.png" alt="" width="100%">
3841 </td>
3842 </tr>
3843</table>
3844
3845The next picture shows an elevation plot of the pressure at time 1.0 looking
3846from the channel inlet towards the outlet at the same resolution -- here,
3847we can see the large number
3848of reflections. In the figure, two types of waves are visible. The
3849larger-amplitude waves correspond to various reflections that happened as the
3850initial discontinuity hit the walls, whereas the small-amplitude waves of
3851size similar to the elements correspond to numerical artifacts. They have their
3852origin in the finite resolution of the scheme and appear as the discontinuity
3853travels through elements with high-order polynomials. This effect can be cured
3854by increasing resolution. Apart from this effect, the rich wave structure is
3855the result of the transport accuracy of the high-order DG method.
3856
3857<img src="https://www.dealii.org/images/steps/developer/step-67.pressure_elevated.jpg" alt="" width="40%">
3858
3859If we run the program with degree 2 and 6 levels of global refinements (410k
3860cells, 14.7M unknowns), we get the following evolution of the pressure
3861(elevation plot, colored by the value of the density):
3862
3863@htmlonly
3864<p align="center">
3865 <iframe width="560" height="315" src="https://www.youtube.com/embed/ivxCLiSdQpc"
3866 frameborder="0"
3867 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
3868 allowfullscreen></iframe>
3869 </p>
3870@endhtmlonly
3871
3872
3873With 2 levels of global refinement with 1,600 cells, the mesh and its
3874partitioning on 40 MPI processes looks as follows:
3875
3876<img src="https://www.dealii.org/images/steps/developer/step-67.grid-owner.png" alt="" width="70%">
3877
3878When we run the code with 4 levels of global refinements on 40 cores, we get
3879the following output:
3880@code
3881Running with 40 MPI processes
3882Vectorization over 8 doubles = 512 bits (AVX512)
3883Number of degrees of freedom: 3,686,400 ( = 4 [vars] x 25,600 [cells] x 36 [dofs/cell/var] )
3884Time step size: 7.39876e-05, minimal h: 0.001875, initial transport scaling: 0.00110294
3885
3886Time: 0, dt: 7.4e-05, norm rho: 4.17e-16, rho * u: 1.629e-16, energy: 1.381e-15
3887Time: 0.05, dt: 6.3e-05, norm rho: 0.02075, rho * u: 0.03801, energy: 0.08772
3888Time: 0.1, dt: 5.9e-05, norm rho: 0.02211, rho * u: 0.04515, energy: 0.08953
3889Time: 0.15, dt: 5.7e-05, norm rho: 0.02261, rho * u: 0.04592, energy: 0.08967
3890Time: 0.2, dt: 5.8e-05, norm rho: 0.02058, rho * u: 0.04361, energy: 0.08222
3891Time: 0.25, dt: 5.9e-05, norm rho: 0.01695, rho * u: 0.04203, energy: 0.06873
3892Time: 0.3, dt: 5.9e-05, norm rho: 0.01653, rho * u: 0.0401, energy: 0.06604
3893Time: 0.35, dt: 5.7e-05, norm rho: 0.01774, rho * u: 0.04264, energy: 0.0706
3894
3895...
3896
3897Time: 1.95, dt: 5.8e-05, norm rho: 0.01488, rho * u: 0.03923, energy: 0.05185
3898Time: 2, dt: 5.7e-05, norm rho: 0.01432, rho * u: 0.03969, energy: 0.04889
3899
3900+-------------------------------------------+------------------+------------+------------------+
3901| Total wallclock time elapsed | 273.6s 13 | 273.6s | 273.6s 0 |
3902| | | |
3903| Section | no. calls | min time rank | avg time | max time rank |
3904+-------------------------------------------+------------------+------------+------------------+
3905| compute errors | 41 | 0.01112s 35 | 0.0672s | 0.1337s 0 |
3906| compute transport speed | 6914 | 5.422s 35 | 15.96s | 29.99s 1 |
3907| output | 41 | 37.24s 35 | 37.3s | 37.37s 0 |
3908| rk time stepping total | 34564 | 205.4s 1 | 219.5s | 230.1s 35 |
3909| rk_stage - integrals L_h | 172820 | 153.6s 1 | 164.9s | 175.6s 27 |
3910| rk_stage - inv mass + vec upd | 172820 | 47.13s 13 | 53.09s | 64.05s 33 |
3911+-------------------------------------------+------------------+------------+------------------+
3912@endcode
3913
3914The norms shown here for the various quantities are the deviations
3915@f$\rho'@f$, @f$(\rho u)'@f$, and @f$E'@f$ against the background field (namely, the
3916initial condition). The distribution of run time is overall similar as in the
3917previous test case. The only slight difference is the larger proportion of
3918time spent in @f$\mathcal L_h@f$ as compared to the inverse mass matrix and vector
3919updates. This is because the geometry is deformed and the matrix-free
3920framework needs to load additional arrays for the geometry from memory that
3921are compressed in the affine mesh case.
3922
3923Increasing the number of global refinements to 5, the output becomes:
3924@code
3925Running with 40 MPI processes
3926Vectorization over 8 doubles = 512 bits (AVX512)
3927Number of degrees of freedom: 14,745,600 ( = 4 [vars] x 102,400 [cells] x 36 [dofs/cell/var] )
3928
3929...
3930
3931+-------------------------------------------+------------------+------------+------------------+
3932| Total wallclock time elapsed | 2693s 32 | 2693s | 2693s 23 |
3933| | | |
3934| Section | no. calls | min time rank | avg time | max time rank |
3935+-------------------------------------------+------------------+------------+------------------+
3936| compute errors | 41 | 0.04537s 32 | 0.173s | 0.3489s 0 |
3937| compute transport speed | 13858 | 40.75s 32 | 85.99s | 149.8s 0 |
3938| output | 41 | 153.8s 32 | 153.9s | 154.1s 0 |
3939| rk time stepping total | 69284 | 2386s 0 | 2450s | 2496s 32 |
3940| rk_stage - integrals L_h | 346420 | 1365s 32 | 1574s | 1718s 19 |
3941| rk_stage - inv mass + vec upd | 346420 | 722.5s 10 | 870.7s | 1125s 32 |
3942+-------------------------------------------+------------------+------------+------------------+
3943@endcode
3944
3945The effect on performance is similar to the analytical test case -- in
3946theory, computation times should increase by a factor of 8, but we actually
3947see an increase by a factor of 11 for the time steps (219.5 seconds versus
39482450 seconds). This can be traced back to caches, with the small case mostly
3949fitting in caches. An interesting effect, typical of programs with a mix of
3950local communication (integrals @f$\mathcal L_h@f$) and global communication (computation of
3951transport speed) with some load imbalance, can be observed by looking at the
3952MPI ranks that encounter the minimal and maximal time of different phases,
3953respectively. Rank 0 reports the fastest throughput for the "rk time stepping
3954total" part. At the same time, it appears to be slowest for the "compute
3955transport speed" part, almost a factor of 2 slower than the
3956average and almost a factor of 4 compared to the faster rank.
3957Since the latter involves global communication, we can attribute the
3958slowness in this part to the fact that the local Runge--Kutta stages have
3959advanced more quickly on this rank and need to wait until the other processors
3960catch up. At this point, one can wonder about the reason for this imbalance:
3961The number of cells is almost the same on all MPI processes.
3962However, the matrix-free framework is faster on affine and Cartesian
3963cells located towards the outlet of the channel, to which the lower MPI ranks
3964are assigned. On the other hand, rank 32, which reports the highest run time
3965for the Runga--Kutta stages, owns the curved cells near the cylinder, for
3966which no data compression is possible. To improve throughput, we could assign
3967different weights to different cell types when partitioning the
3968parallel::distributed::Triangulation object, or even measure the run time for a
3969few time steps and try to rebalance then.
3970
3971The throughput per Runge--Kutta stage can be computed to 2085 MDoFs/s for the
397214.7 million DoFs test case over the 346,000 Runge--Kutta stages, slightly slower
3973than the Cartesian mesh throughput of 2360 MDoFs/s reported above.
3974
3975Finally, if we add one additional refinement, we record the following output:
3976@code
3977Running with 40 MPI processes
3978Vectorization over 8 doubles = 512 bits (AVX512)
3979Number of degrees of freedom: 58,982,400 ( = 4 [vars] x 409,600 [cells] x 36 [dofs/cell/var] )
3980
3981...
3982
3983Time: 1.95, dt: 1.4e-05, norm rho: 0.01488, rho * u: 0.03923, energy: 0.05183
3984Time: 2, dt: 1.4e-05, norm rho: 0.01431, rho * u: 0.03969, energy: 0.04887
3985
3986+-------------------------------------------+------------------+------------+------------------+
3987| Total wallclock time elapsed | 2.166e+04s 26 | 2.166e+04s | 2.166e+04s 24 |
3988| | | |
3989| Section | no. calls | min time rank | avg time | max time rank |
3990+-------------------------------------------+------------------+------------+------------------+
3991| compute errors | 41 | 0.1758s 30 | 0.672s | 1.376s 1 |
3992| compute transport speed | 27748 | 321.3s 34 | 678.8s | 1202s 1 |
3993| output | 41 | 616.3s 32 | 616.4s | 616.4s 34 |
3994| rk time stepping total | 138733 | 1.983e+04s 1 | 2.036e+04s | 2.072e+04s 34 |
3995| rk_stage - integrals L_h | 693665 | 1.052e+04s 32 | 1.248e+04s | 1.387e+04s 19 |
3996| rk_stage - inv mass + vec upd | 693665 | 6404s 10 | 7868s | 1.018e+04s 32 |
3997+-------------------------------------------+------------------+------------+------------------+
3998@endcode
3999
4000The "rk time stepping total" part corresponds to a throughput of 2010 MDoFs/s. The
4001overall run time to perform 139k time steps is 20k seconds (5.7 hours) or 7
4002time steps per second -- not so bad for having nearly 60 million
4003unknowns. More throughput can be achieved by adding more cores to
4004the computation.
4005
4006
4007<a name="step_67-Resultsforflowinchannelaroundcylinderin3D"></a><h3>Results for flow in channel around cylinder in 3D</h3>
4008
4009
4010Switching the channel test case to 3D with 3 global refinements, the output is
4011@code
4012Running with 40 MPI processes
4013Vectorization over 8 doubles = 512 bits (AVX512)
4014Number of degrees of freedom: 221,184,000 ( = 5 [vars] x 204,800 [cells] x 216 [dofs/cell/var] )
4015
4016...
4017
4018Time: 1.95, dt: 0.00011, norm rho: 0.01131, rho * u: 0.03056, energy: 0.04091
4019Time: 2, dt: 0.00011, norm rho: 0.0119, rho * u: 0.03142, energy: 0.04425
4020
4021+-------------------------------------------+------------------+------------+------------------+
4022| Total wallclock time elapsed | 1.734e+04s 4 | 1.734e+04s | 1.734e+04s 38 |
4023| | | |
4024| Section | no. calls | min time rank | avg time | max time rank |
4025+-------------------------------------------+------------------+------------+------------------+
4026| compute errors | 41 | 0.6551s 34 | 3.216s | 7.281s 0 |
4027| compute transport speed | 3546 | 160s 34 | 393.2s | 776.9s 0 |
4028| output | 41 | 1350s 34 | 1353s | 1357s 0 |
4029| rk time stepping total | 17723 | 1.519e+04s 0 | 1.558e+04s | 1.582e+04s 34 |
4030| rk_stage - integrals L_h | 88615 | 1.005e+04s 32 | 1.126e+04s | 1.23e+04s 11 |
4031| rk_stage - inv mass + vec upd | 88615 | 3056s 11 | 4322s | 5759s 32 |
4032+-------------------------------------------+------------------+------------+------------------+
4033@endcode
4034
4035The physics are similar to the 2D case, with a slight motion in the z
4036direction due to the gravitational force. The throughput per Runge--Kutta
4037stage in this case is
4038@f[
4039\text{throughput} = \frac{n_\mathrm{time steps} n_\mathrm{stages}
4040n_\mathrm{dofs}}{t_\mathrm{compute}} =
4041\frac{17723 \cdot 5 \cdot 221.2\,\text{M}}{15580s} = 1258\, \text{MDoFs/s}.
4042@f]
4043
4044The throughput is lower than in 2D because the computation of the @f$\mathcal L_h@f$ term
4045is more expensive. This is due to over-integration with `degree+2` points and
4046the larger fraction of face integrals (worse volume-to-surface ratio) with
4047more expensive flux computations. If we only consider the inverse mass matrix
4048and vector update part, we record a throughput of 4857 MDoFs/s for the 2D case
4049of the isentropic vortex with 37.7 million unknowns, whereas the 3D case
4050runs with 4535 MDoFs/s. The performance is similar because both cases are in
4051fact limited by the memory bandwidth.
4052
4053If we go to four levels of global refinement, we need to increase the number
4054of processes to fit everything in memory -- the computation needs around 350
4055GB of RAM memory in this case. Also, the time it takes to complete 35k time
4056steps becomes more tolerable by adding additional resources. We therefore use
40576 nodes with 40 cores each, resulting in a computation with 240 MPI processes:
4058@code
4059Running with 240 MPI processes
4060Vectorization over 8 doubles = 512 bits (AVX512)
4061Number of degrees of freedom: 1,769,472,000 ( = 5 [vars] x 1,638,400 [cells] x 216 [dofs/cell/var] )
4062
4063...
4064
4065Time: 1.95, dt: 5.6e-05, norm rho: 0.01129, rho * u: 0.0306, energy: 0.04086
4066Time: 2, dt: 5.6e-05, norm rho: 0.01189, rho * u: 0.03145, energy: 0.04417
4067
4068+-------------------------------------------+------------------+------------+------------------+
4069| Total wallclock time elapsed | 5.396e+04s 151 | 5.396e+04s | 5.396e+04s 0 |
4070| | | |
4071| Section | no. calls | min time rank | avg time | max time rank |
4072+-------------------------------------------+------------------+------------+------------------+
4073| compute errors | 41 | 2.632s 178 | 7.221s | 16.56s 0 |
4074| compute transport speed | 7072 | 714s 193 | 1553s | 3351s 0 |
4075| output | 41 | 8065s 176 | 8070s | 8079s 0 |
4076| rk time stepping total | 35350 | 4.25e+04s 0 | 4.43e+04s | 4.515e+04s 193 |
4077| rk_stage - integrals L_h | 176750 | 2.936e+04s 134 | 3.222e+04s | 3.67e+04s 99 |
4078| rk_stage - inv mass + vec upd | 176750 | 7004s 99 | 1.207e+04s | 1.55e+04s 132 |
4079+-------------------------------------------+------------------+------------+------------------+
4080@endcode
4081This simulation had nearly 2 billion unknowns -- quite a large
4082computation indeed, and still only needed around 1.5 seconds per time
4083step.
4084
4085
4086<a name="step_67-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
4087
4088
4089The code presented here straight-forwardly extends to adaptive meshes, given
4090appropriate indicators for setting the refinement flags. Large-scale
4091adaptivity of a similar solver in the context of the acoustic wave equation
4092has been achieved by the <a href="https://github.com/kronbichler/exwave">exwave
4093project</a>. However, in the present context, the benefits of adaptivity are often
4094limited to early times and effects close to the origin of sound waves, as the
4095waves eventually reflect and diffract. This leads to steep gradients all over
4096the place, similar to turbulent flow, and a more or less globally
4097refined mesh.
4098
4099Another topic that we did not discuss in the results section is a comparison
4100of different time integration schemes. The program provides four variants of
4101low-storage Runga--Kutta integrators that each have slightly different
4102accuracy and stability behavior. Among the schemes implemented here, the
4103higher-order ones provide additional accuracy but come with slightly lower
4104efficiency in terms of step size per stage before they violate the CFL
4105condition. An interesting extension would be to compare the low-storage
4106variants proposed here with standard Runge--Kutta integrators or to use vector
4107operations that are run separate from the mass matrix operation and compare
4108performance.
4109
4110
4111<a name="step_67-Moreadvancednumericalfluxfunctionsandskewsymmetricformulations"></a><h4>More advanced numerical flux functions and skew-symmetric formulations</h4>
4112
4113
4114As mentioned in the introduction, the modified Lax--Friedrichs flux and the
4115HLL flux employed in this program are only two variants of a large body of
4116numerical fluxes available in the literature on the Euler equations. One
4117example is the HLLC flux (Harten-Lax-van Leer-Contact) flux which adds the
4118effect of rarefaction waves missing in the HLL flux, or the Roe flux. As
4119mentioned in the introduction, the effect of numerical fluxes on high-order DG
4120schemes is debatable (unlike for the case of low-order discretizations).
4121
4122A related improvement to increase the stability of the solver is to also
4123consider the spatial integral terms. A shortcoming in the rather naive
4124implementation used above is the fact that the energy conservation of the
4125original Euler equations (in the absence of shocks) only holds up to a
4126discretization error. If the solution is under-resolved, the discretization
4127error can give rise to an increase in the numerical energy and eventually
4128render the discretization unstable. This is because of the inexact numerical
4129integration of the terms in the Euler equations, which both contain rational
4130nonlinearities and higher-degree content from curved cells. A way out of this
4131dilemma are so-called skew-symmetric formulations, see @cite Gassner2013 for a
4132simple variant. Skew symmetry means that switching the role of the solution
4133@f$\mathbf{w}@f$ and test functions @f$\mathbf{v}@f$ in the weak form produces the
4134exact negative of the original quantity, apart from some boundary terms. In
4135the discrete setting, the challenge is to keep this skew symmetry also when
4136the integrals are only computed approximately (in the continuous case,
4137skew-symmetry is a consequence of integration by parts). Skew-symmetric
4138numerical schemes balance spatial derivatives in the conservative form
4139@f$(\nabla \mathbf v, \mathbf{F}(\mathbf w))_{K}@f$ with contributions in the
4140convective form @f$(\mathbf v, \tilde{\mathbf{F}}(\mathbf w)\nabla
4141\mathbf{w})_{K}@f$ for some @f$\tilde{\mathbf{F}}@f$. The precise terms depend on
4142the equation and the integration formula, and can in some cases by understood
4143by special skew-symmetric finite difference schemes.
4144
4145To get started, interested readers could take a look at
4146https://github.com/kronbichler/advection_miniapp, where a
4147skew-symmetric DG formulation is implemented with deal.II for a simple advection
4148equation.
4149
4150<a name="step_67-Equippingthecodeforsupersoniccalculations"></a><h4>Equipping the code for supersonic calculations</h4>
4151
4152
4153As mentioned in the introduction, the solution to the Euler equations develops
4154shocks as the Mach number increases, which require additional mechanisms to
4155stabilize the scheme, e.g. in the form of limiters. The main challenge besides
4156actually implementing the limiter or artificial viscosity approach would be to
4157load-balance the computations, as the additional computations involved for
4158limiting the oscillations in troubled cells would make them more expensive than the
4159plain DG cells without limiting. Furthermore, additional numerical fluxes that
4160better cope with the discontinuities would also be an option.
4161
4162One ingredient also necessary for supersonic flows are appropriate boundary
4163conditions. As opposed to the subsonic outflow boundaries discussed in the
4164introduction and implemented in the program, all characteristics are outgoing
4165for supersonic outflow boundaries, so we do not want to prescribe any external
4166data,
4167@f[
4168\mathbf{w}^+ = \mathbf{w}^- = \begin{pmatrix} \rho^-\\
4169(\rho \mathbf u)^- \\ E^-\end{pmatrix} \quad
4170 \text{(Neumann)}.
4171@f]
4172
4173In the code, we would simply add the additional statement
4174@code
4175 else if (supersonic_outflow_boundaries.find(boundary_id) !=
4176 supersonic_outflow_boundaries.end())
4177 {
4178 w_p = w_m;
4179 at_outflow = true;
4180 }
4181@endcode
4182in the `local_apply_boundary_face()` function.
4183
4184<a name="step_67-ExtensiontothelinearizedEulerequations"></a><h4>Extension to the linearized Euler equations</h4>
4185
4186
4187When the interest with an Euler solution is mostly in the propagation of sound
4188waves, it often makes sense to linearize the Euler equations around a
4189background state, i.e., a given density, velocity and energy (or pressure)
4190field, and only compute the change against these fields. This is the setting
4191of the wide field of aeroacoustics. Even though the resolution requirements
4192are sometimes considerably reduced, implementation gets somewhat more
4193complicated as the linearization gives rise to additional terms. From a code
4194perspective, in the operator evaluation we also need to equip the code with
4195the state to linearize against. This information can be provided either by
4196analytical functions (that are evaluated in terms of the position of the
4197quadrature points) or by a vector similar to the solution. Based on that
4198vector, we would create an additional FEEvaluation object to read from it and
4199provide the values of the field at quadrature points. If the background
4200velocity is zero and the density is constant, the linearized Euler equations
4201further simplify and can equivalently be written in the form of the
4202acoustic wave equation.
4203
4204A challenge in the context of sound propagation is often the definition of
4205boundary conditions, as the computational domain needs to be of finite size,
4206whereas the actual simulation often spans an infinite (or at least much
4207larger) physical domain. Conventional Dirichlet or Neumann boundary conditions
4208give rise to reflections of the sound waves that eventually propagate back to
4209the region of interest and spoil the solution. Therefore, various variants of
4210non-reflecting boundary conditions or sponge layers, often in the form of
4211<a
4212href="https://en.wikipedia.org/wiki/Perfectly_matched_layer">perfectly
4213matched layers</a> -- where the solution is damped without reflection
4214-- are common.
4215
4216
4217<a name="step_67-ExtensiontothecompressibleNavierStokesequations"></a><h4>Extension to the compressible Navier-Stokes equations</h4>
4218
4219
4220The solver presented in this tutorial program can also be extended to the
4221compressible Navier--Stokes equations by adding viscous terms, as described in
4222@cite FehnWallKronbichler2019. To keep as much of the performance obtained
4223here despite the additional cost of elliptic terms, e.g. via an interior
4224penalty method, one can switch the basis from FE_DGQ to FE_DGQHermite like in
4225the @ref step_59 "step-59" tutorial program.
4226
4227
4228<a name="step_67-Usingcellcentricloopsandsharedmemory"></a><h4>Using cell-centric loops and shared memory</h4>
4229
4230
4231In this tutorial, we used face-centric loops. Here, cell and face integrals
4232are treated in separate loops, resulting in multiple writing accesses into the
4233result vector, which is relatively expensive on modern hardware since writing
4234operations generally result also in an implicit read operation. Element-centric
4235loops, on the other hand, are processing a cell and in direct succession
4236processing all its 2d faces. Although this kind of loop implies that fluxes have
4237to be computed twice (for each side of an interior face), the fact that the
4238result vector has to accessed only once might - and the fact that the resulting
4239algorithm is free of race-conditions and as such perfectly suitable for
4240shared memory - already give a performance boost. If you are interested in these
4241advanced topics, you can take a look at @ref step_76 "step-76" where we take the present
4242tutorial and modify it so that we can use these features.
4243 *
4244 *
4245<a name="step_67-PlainProg"></a>
4246<h1> The plain program</h1>
4247@include "step-67.cc"
4248*/
void write_vtu_in_parallel(const std::string &filename, const MPI_Comm comm) const
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
virtual UpdateFlags get_needed_update_flags() const =0
virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const
virtual std::vector< std::string > get_names() const =0
virtual std::vector< DataComponentInterpretation::DataComponentInterpretation > get_data_component_interpretation() const
void submit_dof_value(const value_type val_in, const unsigned int dof)
void set_dof_values(VectorType &dst, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip()) const
Tensor< 2, dim, VectorizedArrayType > inverse_jacobian(const unsigned int q_point) const
std_cxx20::ranges::iota_view< unsigned int, unsigned int > quadrature_point_indices() const
Point< dim, VectorizedArrayType > quadrature_point(const unsigned int q) const
const unsigned int n_q_points
void gather_evaluate(const VectorType &input_vector, const EvaluationFlags::EvaluationFlags evaluation_flag)
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
void reinit(const size_type size, const bool omit_zeroing_entries=false)
Abstract base class for mapping classes.
Definition mapping.h:318
void transform_from_q_points_to_basis(const unsigned int n_actual_components, const VectorizedArrayType *in_array, VectorizedArrayType *out_array) const
Definition operators.h:1191
unsigned int n_active_entries_per_cell_batch(const unsigned int cell_batch_index) const
void loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &inner_face_operation, const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &boundary_face_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false, const DataAccessOnFaces dst_vector_face_access=DataAccessOnFaces::unspecified, const DataAccessOnFaces src_vector_face_access=DataAccessOnFaces::unspecified) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
void reinit(const MappingType &mapping, const DoFHandler< dim > &dof_handler, const AffineConstraints< number2 > &constraint, const QuadratureType &quad, const AdditionalData &additional_data=AdditionalData())
Definition point.h:111
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
void print_wall_time_statistics(const MPI_Comm mpi_comm, const double print_quantile=0.) const
Definition timer.cc:835
#define DEAL_II_ALWAYS_INLINE
Definition config.h:108
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition config.h:142
Point< 3 > center
Point< 3 > vertices[4]
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrow(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
UpdateFlags
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
const Event initial
Definition event.cc:64
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
The namespace for the EvaluationFlags enum.
void hyper_rectangle(Triangulation< dim, spacedim > &tria, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void channel_with_cylinder(Triangulation< dim > &tria, const double shell_region_width=0.03, const unsigned int n_shells=2, const double skewness=2.0, const bool colorize=false)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > E(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
@ LOW_STORAGE_RK_STAGE9_ORDER5
@ LOW_STORAGE_RK_STAGE3_ORDER3
@ LOW_STORAGE_RK_STAGE7_ORDER4
@ LOW_STORAGE_RK_STAGE5_ORDER4
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string get_time()
std::string get_current_vectorization_level()
Definition utilities.cc:938
Number truncate_to_n_digits(const Number number, const unsigned int n_digits)
Definition utilities.cc:578
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
constexpr T pow(const T base, const int iexp)
Definition utilities.h:966
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
long double gamma(const unsigned int n)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14882
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
static constexpr double PI
Definition numbers.h:254
STL namespace.
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int boundary_id
Definition types.h:144
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code *  *  MPI_Finalize()
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)