901 *
constexpr LowStorageRungeKuttaScheme lsrk_scheme = stage_5_order_4;
905 * Eventually, we select a detail of the spatial discretization, namely the
906 * numerical flux (Riemann solver) at the faces between cells. For
this
907 * program, we have implemented a modified variant of the Lax--Friedrichs
908 * flux and the Harten--Lax--van Leer (HLL) flux.
911 *
enum EulerNumericalFlux
913 * lax_friedrichs_modified,
914 * harten_lax_vanleer,
916 *
constexpr EulerNumericalFlux numerical_flux_type = lax_friedrichs_modified;
923 * <a name=
"step_67-Equationdata"></a>
924 * <h3>Equation data</h3>
928 * We now define a
class with the exact solution for the test case 0 and one
929 * with a background flow field
for test
case 1 of the channel. Given that
930 * the Euler equations are a problem with @f$d+2@f$ equations in @f$d@f$ dimensions,
931 * we need to tell the
Function base
class about the correct number of
936 *
class ExactSolution :
public Function<dim>
939 * ExactSolution(
const double time)
944 *
const unsigned int component = 0)
const override;
951 * As far as the actual function implemented is concerned, the analytical
952 * test
case is an isentropic vortex
case (see
e.g. the book by Hesthaven
953 * and Warburton, Example 6.1 in Section 6.6 on page 209) which fulfills the
954 * Euler equations with zero force term on the right hand side. Given that
955 * definition, we
return either the density, the momentum, or the energy
956 * depending on which component is requested. Note that the original
957 * definition of the density involves the @f$\frac{1}{\
gamma -1}@f$-th power of
958 * some expression. Since `
std::pow()` has pretty slow implementations on
959 * some systems, we replace it by logarithm followed by exponentiation (of
960 * base 2), which is mathematically equivalent but usually much better
961 * optimized. This formula might lose accuracy in the last digits
962 *
for very small
numbers compared to `
std::pow()`, but we are happy with
963 * it anyway, since small
numbers map to data close to 1.
967 * For the channel test
case, we simply select a density of 1, a velocity of
968 * 0.4 in @f$x@f$ direction and zero in the other directions, and an energy that
969 * corresponds to a speed of sound of 1.3 measured against the background
970 * velocity field, computed from the relation @f$E = \frac{c^2}{\gamma (\gamma
971 * -1)} + \frac 12 \rho \|u\|^2@f$.
975 *
double ExactSolution<dim>::value(
const Point<dim> &x,
976 *
const unsigned int component)
const
978 *
const double t = this->
get_time();
984 *
Assert(dim == 2, ExcNotImplemented());
985 *
const double beta = 5;
989 *
const double radius_sqr =
990 * (x - x0).
norm_square() - 2. * (x[0] - x0[0]) * t + t * t;
991 *
const double factor =
993 *
const double density_log = std::log2(
994 *
std::abs(1. - (gamma - 1.) / gamma * 0.25 * factor * factor));
995 *
const double density = std::exp2(density_log * (1. / (gamma - 1.)));
996 *
const double u = 1. - factor * (x[1] - x0[1]);
997 *
const double v = factor * (x[0] - t - x0[0]);
999 *
if (component == 0)
1001 *
else if (component == 1)
1002 *
return density * u;
1003 *
else if (component == 2)
1004 *
return density * v;
1007 *
const double pressure =
1008 * std::exp2(density_log * (gamma / (gamma - 1.)));
1009 *
return pressure / (
gamma - 1.) +
1010 * 0.5 * (density * u * u + density * v * v);
1016 *
if (component == 0)
1018 *
else if (component == 1)
1020 *
else if (component == dim + 1)
1021 *
return 3.097857142857143;
1037 * <a name=
"step_67-LowstorageexplicitRungeKuttatimeintegrators"></a>
1038 * <h3>Low-storage
explicit Runge--Kutta time integrators</h3>
1042 * The next few lines implement a few low-storage variants of Runge--Kutta
1043 * methods. These methods have specific Butcher tableaux with coefficients
1044 * @f$b_i@f$ and @f$a_i@f$ as shown in the introduction. As usual in Runge--Kutta
1045 * method, we can deduce time steps, @f$c_i = \sum_{j=1}^{i-2} b_i + a_{i-1}@f$
1046 * from those coefficients. The main advantage of
this kind of scheme is the
1047 * fact that only two vectors are needed per stage, namely the accumulated
1048 * part of the solution @f$\mathbf{
w}@f$ (that will hold the solution
1049 * @f$\mathbf{
w}^{n+1}@f$ at the
new time @f$t^{n+1}@f$ after the last stage), the
1050 * update vector @f$\mathbf{r}_i@f$ that gets evaluated during the stages, plus
1051 * one vector @f$\mathbf{k}_i@f$ to hold the evaluation of the
operator. Such a
1052 * Runge--Kutta setup reduces the memory storage and memory access. As the
1053 * memory bandwidth is often the performance-limiting factor on modern
1054 * hardware when the evaluation of the differential
operator is
1055 * well-optimized, performance can be improved over standard time
1056 * integrators. This is
true also when taking into account that a
1057 * conventional Runge--Kutta scheme might allow
for slightly larger time
1058 * steps as more
free parameters allow
for better stability properties.
1062 * In
this tutorial programs, we concentrate on a few variants of
1063 * low-storage schemes defined in the article by Kennedy, Carpenter, and
1064 * Lewis (2000), as well as one variant described by Tselios and Simos
1065 * (2007). There is a large series of other schemes available, which could
1066 * be addressed by additional sets of coefficients or slightly different
1071 * We define a single
class for the four integrators, distinguished by the
1072 *
enum described above. To each scheme, we then fill the vectors
for the
1073 * @f$b_i@f$ and @f$a_i@f$ to the given variables in the
class.
1076 *
class LowStorageRungeKuttaIntegrator
1079 * LowStorageRungeKuttaIntegrator(const LowStorageRungeKuttaScheme scheme)
1084 * First comes the three-stage scheme of order three by Kennedy et al.
1085 * (2000). While its stability region is significantly smaller than
for
1086 * the other schemes, it only involves three stages, so it is very
1087 * competitive in terms of the work per stage.
1092 *
case stage_3_order_3:
1100 * The next scheme is a five-stage scheme of order four, again
1101 * defined in the paper by Kennedy et al. (2000).
1104 *
case stage_5_order_4:
1112 * The following scheme of seven stages and order four has been
1113 * explicitly derived
for acoustics problems. It is a balance of
1114 * accuracy
for imaginary
eigenvalues among fourth order schemes,
1115 * combined with a large stability region. Since DG schemes are
1116 * dissipative among the highest frequencies,
this does not
1117 * necessarily translate to the highest possible time step per
1118 * stage. In the context of the present tutorial program, the
1119 * numerical flux plays a crucial role in the dissipation and thus
1120 * also the maximal stable time step size. For the modified
1121 * Lax--Friedrichs flux,
this scheme is similar to the
1122 * `stage_5_order_4` scheme in terms of step size per stage
if only
1123 * stability is considered, but somewhat less efficient
for the HLL
1127 *
case stage_7_order_4:
1135 * The last scheme included here is the nine-stage scheme of order
1136 * five from Kennedy et al. (2000). It is the most accurate among
1137 * the schemes used here, but the higher order of accuracy
1138 * sacrifices some stability, so the step length normalized per
1139 * stage is less than
for the fourth order schemes.
1142 *
case stage_9_order_5:
1153 * rk_integrator(lsrk);
1154 * rk_integrator.get_coefficients(ai, bi, ci);
1157 *
unsigned int n_stages() const
1164 * The main function of the time integrator is to go through the stages,
1165 * evaluate the
operator, prepare the @f$\mathbf{r}_i@f$ vector
for the next
1166 * evaluation, and update the solution vector @f$\mathbf{
w}@f$. We hand off
1167 * the work to the `pde_operator` involved in order to be able to
merge
1168 * the vector operations of the Runge--Kutta setup with the evaluation of
1169 * the differential
operator for better performance, so all we
do here is
1170 * to delegate the vectors and coefficients.
1174 * We separately call the
operator for the
first stage because we need
1175 * slightly modified arguments there: We evaluate the solution from
1176 * the old solution @f$\mathbf{
w}^n@f$ rather than a @f$\mathbf r_i@f$ vector, so
1177 * the
first argument is `solution`. We here let the stage vector
1178 * @f$\mathbf{r}_i@f$ also hold the temporary result of the evaluation, as it
1179 * is not used otherwise. For all subsequent stages, we use the vector
1180 * `vec_ki` as the
second vector argument to store the result of the
1181 *
operator evaluation. Finally, when we are at the last stage, we must
1182 * skip the computation of the vector @f$\mathbf{r}_{s+1}@f$ as there is no
1183 * coefficient @f$a_s@f$ available (nor will it be used).
1186 *
template <
typename VectorType,
typename Operator>
1187 *
void perform_time_step(
const Operator &pde_operator,
1188 *
const double current_time,
1189 *
const double time_step,
1190 * VectorType &solution,
1191 * VectorType &vec_ri,
1192 * VectorType &vec_ki)
const
1196 * pde_operator.perform_stage(current_time,
1197 * bi[0] * time_step,
1198 * ai[0] * time_step,
1204 *
for (
unsigned int stage = 1; stage < bi.size(); ++stage)
1206 *
const double c_i = ci[stage];
1207 * pde_operator.perform_stage(current_time + c_i * time_step,
1208 * bi[stage] * time_step,
1209 * (stage == bi.size() - 1 ?
1211 * ai[stage] * time_step),
1220 * std::vector<double> bi;
1221 * std::vector<double> ai;
1222 * std::vector<double> ci;
1230 * <a name=
"step_67-ImplementationofpointwiseoperationsoftheEulerequations"></a>
1231 * <h3>Implementation of
point-wise operations of the Euler equations</h3>
1235 * In the following
functions, we implement the various problem-specific
1236 * operators pertaining to the Euler equations. Each function acts on the
1237 * vector of conserved variables @f$[\rho, \rho\mathbf{u},
E]@f$ that we hold in
1238 * the solution vectors, and computes various derived quantities.
1242 * First out is the computation of the velocity, that we derive from the
1243 * momentum variable @f$\rho \mathbf{u}@f$ by division by @f$\rho@f$. One thing to
1244 * note here is that we decorate all those
functions with the keyword
1246 * compiler-specific keyword that tells the compiler to never create a
1247 * function call
for any of those
functions, and instead move the
1249 * href=
"https://en.wikipedia.org/wiki/Inline_function">
inline</a> to where
1250 * they are called. This is critical
for performance because we call into some
1251 * of those
functions millions or billions of times: For example, we both use
1252 * the velocity
for the computation of the flux further down, but also
for the
1253 * computation of the pressure, and both of these places are evaluated at
1254 * every quadrature
point of every cell. Making sure these
functions are
1255 * inlined ensures not only that the processor does not have to execute a jump
1256 * instruction into the function (and the corresponding
return jump), but also
1257 * that the compiler can re-use intermediate information from one function
's
1258 * context in code that comes after the place where the function was called.
1259 * (We note that compilers are generally quite good at figuring out which
1260 * functions to inline by themselves. Here is a place where compilers may or
1261 * may not have figured it out by themselves but where we know for sure that
1262 * inlining is a win.)
1266 * Another trick we apply is a separate variable for the inverse density
1267 * @f$\frac{1}{\rho}@f$. This enables the compiler to only perform a single
1268 * division for the flux, despite the division being used at several
1269 * places. As divisions are around ten to twenty times as expensive as
1270 * multiplications or additions, avoiding redundant divisions is crucial for
1271 * performance. We note that taking the inverse first and later multiplying
1272 * with it is not equivalent to a division in floating point arithmetic due
1273 * to roundoff effects, so the compiler is not allowed to exchange one way by
1274 * the other with standard optimization flags. However, it is also not
1275 * particularly difficult to write the code in the right way.
1279 * To summarize, the chosen strategy of always inlining and careful
1280 * definition of expensive arithmetic operations allows us to write compact
1281 * code without passing all intermediate results around, despite making sure
1282 * that the code maps to excellent machine code.
1285 * template <int dim, typename Number>
1286 * inline DEAL_II_ALWAYS_INLINE
1287 * Tensor<1, dim, Number>
1288 * euler_velocity(const Tensor<1, dim + 2, Number> &conserved_variables)
1290 * const Number inverse_density = Number(1.) / conserved_variables[0];
1292 * Tensor<1, dim, Number> velocity;
1293 * for (unsigned int d = 0; d < dim; ++d)
1294 * velocity[d] = conserved_variables[1 + d] * inverse_density;
1301 * The next function computes the pressure from the vector of conserved
1302 * variables, using the formula @f$p = (\gamma - 1) \left(E - \frac 12 \rho
1303 * \mathbf{u}\cdot \mathbf{u}\right)@f$. As explained above, we use the
1304 * velocity from the `euler_velocity()` function. Note that we need to
1305 * specify the first template argument `dim` here because the compiler is
1306 * not able to deduce it from the arguments of the tensor, whereas the
1307 * second argument (number type) can be automatically deduced.
1310 * template <int dim, typename Number>
1311 * inline DEAL_II_ALWAYS_INLINE
1313 * euler_pressure(const Tensor<1, dim + 2, Number> &conserved_variables)
1315 * const Tensor<1, dim, Number> velocity =
1316 * euler_velocity<dim>(conserved_variables);
1318 * Number rho_u_dot_u = conserved_variables[1] * velocity[0];
1319 * for (unsigned int d = 1; d < dim; ++d)
1320 * rho_u_dot_u += conserved_variables[1 + d] * velocity[d];
1322 * return (gamma - 1.) * (conserved_variables[dim + 1] - 0.5 * rho_u_dot_u);
1327 * Here is the definition of the Euler flux function, i.e., the definition
1328 * of the actual equation. Given the velocity and pressure (that the
1329 * compiler optimization will make sure are done only once), this is
1330 * straight-forward given the equation stated in the introduction.
1333 * template <int dim, typename Number>
1334 * inline DEAL_II_ALWAYS_INLINE
1335 * Tensor<1, dim + 2, Tensor<1, dim, Number>>
1336 * euler_flux(const Tensor<1, dim + 2, Number> &conserved_variables)
1338 * const Tensor<1, dim, Number> velocity =
1339 * euler_velocity<dim>(conserved_variables);
1340 * const Number pressure = euler_pressure<dim>(conserved_variables);
1342 * Tensor<1, dim + 2, Tensor<1, dim, Number>> flux;
1343 * for (unsigned int d = 0; d < dim; ++d)
1345 * flux[0][d] = conserved_variables[1 + d];
1346 * for (unsigned int e = 0; e < dim; ++e)
1347 * flux[e + 1][d] = conserved_variables[e + 1] * velocity[d];
1348 * flux[d + 1][d] += pressure;
1349 * flux[dim + 1][d] =
1350 * velocity[d] * (conserved_variables[dim + 1] + pressure);
1358 * This next function is a helper to simplify the implementation of the
1359 * numerical flux, implementing the action of a tensor of tensors (with
1360 * non-standard outer dimension of size `dim + 2`, so the standard overloads
1361 * provided by deal.II's tensor classes
do not apply here) with another
1362 * tensor of the same inner dimension, i.e., a matrix-vector product.
1365 * template <int n_components, int dim, typename Number>
1372 *
for (
unsigned int d = 0;
d < n_components; ++
d)
1373 * result[d] = matrix[d] * vector;
1379 * This function implements the numerical flux (Riemann solver). It gets the
1380 * state from the two sides of an
interface and the normal vector, oriented
1381 * from the side of the solution @f$\mathbf{
w}^-@f$ towards the solution
1382 * @f$\mathbf{
w}^+@f$. In finite
volume methods which rely on piece-wise
1383 *
constant data, the numerical flux is the central ingredient as it is the
1384 * only place where the physical information is entered. In DG methods, the
1385 * numerical flux is less central due to the polynomials within the elements
1386 * and the physical flux used there. As a result of higher-degree
1387 * interpolation with consistent values from both sides in the limit of a
1388 * continuous solution, the numerical flux can be seen as a control of the
1389 * jump of the solution from both sides to weakly impose continuity. It is
1390 * important to realize that a numerical flux alone cannot stabilize a
1391 * high-order DG method in the presence of shocks, and thus any DG method
1392 * must be combined with further shock-capturing techniques to handle those
1393 * cases. In
this tutorial, we focus on wave-like solutions of the Euler
1394 * equations in the subsonic regime without strong discontinuities where our
1395 * basic scheme is sufficient.
1399 * Nonetheless, the numerical flux is decisive in terms of the numerical
1400 * dissipation of the overall scheme and influences the admissible time step
1401 * size with
explicit Runge--Kutta methods. We consider two choices, a
1402 * modified Lax--Friedrichs scheme and the widely used Harten--Lax--van Leer
1403 * (HLL) flux. For both variants, we
first need to get the velocities and
1404 * pressures from both sides of the interface and evaluate the physical
1409 * For the local Lax--Friedrichs flux, the definition is @f$\hat{\mathbf{F}}
1410 * =\frac{\mathbf{
F}(\mathbf{
w}^-)+\mathbf{
F}(\mathbf{
w}^+)}{2} +
1411 * \frac{\
lambda}{2}\left[\mathbf{
w}^--\mathbf{
w}^+\right]\otimes
1412 * \mathbf{n^-}@f$, where the factor @f$\lambda =
1413 * \max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)@f$ gives the
1414 * maximal wave speed and @f$c = \sqrt{\
gamma p / \rho}@f$ is the speed of
1415 * sound. Here, we choose two modifications of that expression
for reasons
1416 * of computational efficiency, given the small impact of the flux on the
1417 * solution. For the above definition of the factor @f$\lambda@f$, we would need
1418 * to take four square roots, two
for the two velocity norms and two
for the
1419 * speed of sound on either side. The
first modification is hence to rather
1420 * use @f$\sqrt{\|\mathbf{u}\|^2+c^2}@f$ as an estimate of the maximal speed
1421 * (which is at most a factor of 2 away from the actual maximum, as shown in
1422 * the introduction). This allows us to pull the square root out of the
1423 * maximum and get away with a single square root computation. The
second
1424 * modification is to further relax on the parameter @f$\lambda@f$---the smaller
1425 * it is, the smaller the dissipation factor (which is multiplied by the
1426 * jump in @f$\mathbf{
w}@f$, which might result in a smaller or bigger
1427 * dissipation in the
end). This allows us to fit the spectrum into the
1428 * stability region of the
explicit Runge--Kutta integrator with bigger time
1429 * steps. However, we cannot make dissipation too small because otherwise
1430 * imaginary
eigenvalues grow larger. Finally, the current conservative
1431 * formulation is not energy-stable in the limit of @f$\lambda\to 0@f$ as it is
1432 * not skew-symmetric, and would need additional measures such as split-form
1433 * DG schemes in that
case.
1437 * For the HLL flux, we follow the formula from literature, introducing an
1438 * additional weighting of the two states from Lax--Friedrichs by a
1439 * parameter @f$s@f$. It is derived from the physical transport directions of
1440 * the Euler equations in terms of the current direction of velocity and
1441 * sound speed. For the velocity, we here choose a simple arithmetic average
1442 * which is sufficient
for DG scenarios and moderate jumps in material
1447 * Since the numerical flux is multiplied by the normal vector in the weak
1448 * form, we multiply by the result by the normal vector
for all terms in the
1449 * equation. In these multiplications, the `
operator*` defined above enables
1450 * a compact notation similar to the mathematical definition.
1454 * In
this and the following functions, we use variable suffixes `_m` and
1455 * `_p` to indicate quantities derived from @f$\mathbf{w}^-@f$ and @f$\mathbf{
w}^+@f$,
1456 * i.e., values
"here" and
"there" relative to the current cell when looking
1457 * at a neighbor cell.
1460 *
template <
int dim,
typename Number>
1467 *
const auto velocity_m = euler_velocity<dim>(u_m);
1468 *
const auto velocity_p = euler_velocity<dim>(u_p);
1470 *
const auto pressure_m = euler_pressure<dim>(u_m);
1471 *
const auto pressure_p = euler_pressure<dim>(u_p);
1473 *
const auto flux_m = euler_flux<dim>(u_m);
1474 *
const auto flux_p = euler_flux<dim>(u_p);
1476 *
switch (numerical_flux_type)
1478 *
case lax_friedrichs_modified:
1482 * gamma * pressure_p * (1. / u_p[0]),
1483 * velocity_m.norm_square() +
1484 * gamma * pressure_m * (1. / u_m[0])));
1486 *
return 0.5 * (flux_m * normal + flux_p * normal) +
1487 * 0.5 * lambda * (u_m - u_p);
1490 *
case harten_lax_vanleer:
1492 *
const auto avg_velocity_normal =
1493 * 0.5 * ((velocity_m + velocity_p) * normal);
1496 * (pressure_p * (1. / u_p[0]) + pressure_m * (1. / u_m[0]))));
1497 *
const Number s_pos =
1498 *
std::max(Number(), avg_velocity_normal + avg_c);
1499 *
const Number s_neg =
1500 *
std::min(Number(), avg_velocity_normal - avg_c);
1501 *
const Number inverse_s = Number(1.) / (s_pos - s_neg);
1503 *
return inverse_s *
1504 * ((s_pos * (flux_m * normal) - s_neg * (flux_p * normal)) -
1505 * s_pos * s_neg * (u_m - u_p));
1520 * This and the next function are helper
functions to provide compact
1521 * evaluation calls as multiple points get batched together via a
1522 *
VectorizedArray argument (see the @ref step_37
"step-37" tutorial
for details). This
1523 * function is used
for the subsonic outflow boundary conditions where we
1524 * need to
set the energy component to a prescribed
value. The next one
1525 * requests the solution on all components and is used
for inflow boundaries
1526 * where all components of the solution are
set.
1529 *
template <
int dim,
typename Number>
1533 *
const unsigned int component)
1536 *
for (
unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
1539 *
for (
unsigned int d = 0;
d < dim; ++
d)
1540 * p[d] = p_vectorized[d][v];
1541 * result[v] = function.value(p, component);
1547 *
template <
int dim,
typename Number,
int n_components = dim + 2>
1554 *
for (
unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
1557 *
for (
unsigned int d = 0;
d < dim; ++
d)
1558 * p[d] = p_vectorized[d][v];
1559 *
for (
unsigned int d = 0;
d < n_components; ++
d)
1560 * result[d][v] = function.value(p, d);
1570 * <a name=
"step_67-TheEulerOperationclass"></a>
1571 * <h3>The EulerOperation
class</h3>
1575 * This
class implements the evaluators for the Euler problem, in analogy to
1576 * the `LaplaceOperator`
class of @ref step_37
"step-37" or @ref step_59
"step-59". Since the present
1577 *
operator is non-linear and does not require a
matrix interface (to be
1578 * handed over to preconditioners), we skip the various `vmult`
functions
1579 * otherwise present in
matrix-
free operators and only implement an `apply`
1580 * function as well as the combination of `apply` with the required vector
1581 * updates
for the low-storage Runge--Kutta time integrator mentioned above
1582 * (called `perform_stage`). Furthermore, we have added three additional
1583 * functions involving matrix-free routines, namely one to compute an
1584 * estimate of the time step scaling (that is combined with the Courant
1585 * number
for the actual time step size) based on the velocity and speed of
1586 * sound in the elements, one
for the projection of solutions (specializing
1588 * against a possible analytical solution or norms against some background
1593 * The rest of the
class is similar to other
matrix-
free tutorials. As
1594 * discussed in the introduction, we provide a few
functions to allow a user
1595 * to pass in various forms of boundary conditions on different parts of the
1597 * possible body forces.
1600 *
template <
int dim,
int degree,
int n_po
ints_1d>
1601 *
class EulerOperator
1604 *
static constexpr unsigned int n_quadrature_points_1d = n_points_1d;
1614 *
void set_subsonic_outflow_boundary(
1620 *
void set_body_force(std::unique_ptr<
Function<dim>> body_force);
1622 *
void apply(
const double current_time,
1627 * perform_stage(
const Number cur_time,
1628 *
const Number factor_solution,
1629 *
const Number factor_ai,
1638 * std::array<double, 3> compute_errors(
1642 *
double compute_cell_transport_speed(
1653 * std::map<types::boundary_id, std::unique_ptr<Function<dim>>>
1654 * inflow_boundaries;
1655 * std::map<types::boundary_id, std::unique_ptr<Function<dim>>>
1656 * subsonic_outflow_boundaries;
1657 * std::set<types::boundary_id> wall_boundaries;
1658 * std::unique_ptr<Function<dim>> body_force;
1660 *
void local_apply_inverse_mass_matrix(
1664 *
const std::pair<unsigned int, unsigned int> &cell_range)
const;
1666 *
void local_apply_cell(
1670 *
const std::pair<unsigned int, unsigned int> &cell_range)
const;
1672 *
void local_apply_face(
1676 *
const std::pair<unsigned int, unsigned int> &face_range)
const;
1678 *
void local_apply_boundary_face(
1682 *
const std::pair<unsigned int, unsigned int> &face_range)
const;
1687 *
template <
int dim,
int degree,
int n_po
ints_1d>
1688 * EulerOperator<dim, degree, n_points_1d>::EulerOperator(
TimerOutput &timer)
1696 * For the initialization of the Euler
operator, we
set up the
MatrixFree
1697 * variable contained in the
class. This can be done given a mapping to
1698 * describe possible curved boundaries as well as a
DoFHandler object
1699 * describing the degrees of freedom. Since we use a discontinuous Galerkin
1700 * discretization in
this tutorial program where no constraints are imposed
1701 * strongly on the solution field, we
do not need to pass in an
1703 * construction. With respect to quadrature, we want to select two different
1704 * ways of computing the underlying integrals: The
first is a flexible one,
1705 * based on a
template parameter `n_points_1d` (that will be assigned the
1706 * `n_q_points_1d`
value specified at the top of
this file). More accurate
1707 * integration is necessary to avoid the aliasing problem due to the
1708 * variable coefficients in the Euler
operator. The
second less accurate
1709 * quadrature formula is a tight one based on `fe_degree+1` and needed
for
1710 * the inverse mass
matrix. While that formula provides an exact inverse
1711 * only on affine element shapes and not on deformed elements, it enables
1712 * the fast inversion of the mass matrix by tensor product techniques,
1713 * necessary to ensure optimal computational efficiency overall.
1716 * template <int dim, int degree, int n_points_1d>
1717 *
void EulerOperator<dim, degree, n_points_1d>::reinit(
1721 *
const std::vector<const DoFHandler<dim> *> dof_handlers = {&dof_handler};
1723 *
const std::vector<const AffineConstraints<double> *> constraints = {&dummy};
1724 *
const std::vector<Quadrature<1>> quadratures = {
QGauss<1>(n_q_points_1d),
1731 * additional_data.mapping_update_flags_inner_faces =
1734 * additional_data.mapping_update_flags_boundary_faces =
1737 * additional_data.tasks_parallel_scheme =
1741 * mapping, dof_handlers, constraints, quadratures, additional_data);
1746 *
template <
int dim,
int degree,
int n_po
ints_1d>
1747 *
void EulerOperator<dim, degree, n_points_1d>::initialize_vector(
1750 * data.initialize_dof_vector(vector);
1757 * The subsequent four member
functions are the ones that must be called from
1758 *
outside to specify the various
types of boundaries. For an inflow boundary,
1759 * we must specify all components in terms of density @f$\rho@f$, momentum @f$\rho
1760 * \mathbf{u}@f$ and energy @f$E@f$. Given
this information, we then store the
1761 * function alongside the respective boundary
id in a map member variable of
1762 *
this class. Likewise, we proceed
for the subsonic outflow boundaries (where
1763 * we request a function as well, which we use to retrieve the energy) and
for
1764 * wall (no-penetration) boundaries where we impose zero normal velocity (no
1765 * function necessary, so we only request the boundary
id). For the present
1766 * DG code where boundary conditions are solely applied as part of the weak
1767 * form (during time integration), the call to
set the boundary conditions
1768 * can appear both before or after the `
reinit()` call to
this class. This
1769 * is different from continuous finite element codes where the boundary
1771 * sent into
MatrixFree for initialization, thus requiring to be
set before
1772 * the initialization of the
matrix-
free data structures.
1776 * The checks added in each of the four function are used to
1777 * ensure that boundary conditions are mutually exclusive on the various
1778 * parts of the boundary, i.e., that a user does not accidentally designate a
1779 * boundary as both an inflow and say a subsonic outflow boundary.
1782 *
template <
int dim,
int degree,
int n_po
ints_1d>
1783 *
void EulerOperator<dim, degree, n_points_1d>::set_inflow_boundary(
1787 *
AssertThrow(subsonic_outflow_boundaries.find(boundary_id) ==
1788 * subsonic_outflow_boundaries.end() &&
1789 * wall_boundaries.find(boundary_id) == wall_boundaries.end(),
1790 * ExcMessage(
"You already set the boundary with id " +
1791 * std::to_string(
static_cast<int>(boundary_id)) +
1792 *
" to another type of boundary before now setting " +
1794 *
AssertThrow(inflow_function->n_components == dim + 2,
1795 * ExcMessage(
"Expected function with dim+2 components"));
1797 * inflow_boundaries[
boundary_id] = std::move(inflow_function);
1801 *
template <
int dim,
int degree,
int n_po
ints_1d>
1802 *
void EulerOperator<dim, degree, n_points_1d>::set_subsonic_outflow_boundary(
1806 *
AssertThrow(inflow_boundaries.find(boundary_id) ==
1807 * inflow_boundaries.end() &&
1808 * wall_boundaries.find(boundary_id) == wall_boundaries.end(),
1809 * ExcMessage(
"You already set the boundary with id " +
1810 * std::to_string(
static_cast<int>(boundary_id)) +
1811 *
" to another type of boundary before now setting " +
1812 *
"it as subsonic outflow"));
1813 *
AssertThrow(outflow_function->n_components == dim + 2,
1814 * ExcMessage(
"Expected function with dim+2 components"));
1816 * subsonic_outflow_boundaries[
boundary_id] = std::move(outflow_function);
1820 *
template <
int dim,
int degree,
int n_po
ints_1d>
1821 *
void EulerOperator<dim, degree, n_points_1d>::set_wall_boundary(
1824 *
AssertThrow(inflow_boundaries.find(boundary_id) ==
1825 * inflow_boundaries.end() &&
1826 * subsonic_outflow_boundaries.find(boundary_id) ==
1827 * subsonic_outflow_boundaries.end(),
1828 * ExcMessage(
"You already set the boundary with id " +
1829 * std::to_string(
static_cast<int>(boundary_id)) +
1830 *
" to another type of boundary before now setting " +
1831 *
"it as wall boundary"));
1833 * wall_boundaries.insert(boundary_id);
1837 *
template <
int dim,
int degree,
int n_po
ints_1d>
1838 *
void EulerOperator<dim, degree, n_points_1d>::set_body_force(
1843 * this->body_force = std::move(body_force);
1851 * <a name=
"step_67-Localevaluators"></a>
1852 * <h4>Local evaluators</h4>
1856 * Now we proceed to the local evaluators
for the Euler problem. The
1857 * evaluators are relatively simple and follow what has been presented in
1858 * @ref step_37
"step-37", @ref step_48
"step-48", or @ref step_59
"step-59". The
first notable difference is the fact
1859 * that we use an
FEEvaluation with a non-standard number of quadrature
1860 * points. Whereas we previously
always set the number of quadrature points
1861 * to
equal the polynomial degree plus one (ensuring exact integration on
1862 * affine element shapes), we now
set the number quadrature points as a
1863 * separate variable (
e.g. the polynomial degree plus two or three halves of
1864 * the polynomial degree) to more accurately handle nonlinear terms. Since
1865 * the evaluator is fed with the appropriate
loop lengths via the
template
1866 * argument and keeps the number of quadrature points in the whole cell in
1868 * the more accurate formula without further changes.
1872 * The
second difference is due to the fact that we are now evaluating a
1873 * multi-component system, as opposed to the
scalar systems considered
1874 * previously. The
matrix-
free framework provides several ways to handle the
1875 * multi-component
case. The variant shown here utilizes an
FEEvaluation
1876 *
object with multiple components embedded into it, specified by the fourth
1877 *
template argument `dim + 2`
for the components in the Euler system. As a
1879 * any more (that would return a
VectorizedArray type, collecting data from
1880 * several elements), but a
Tensor of `dim+2` components. The functionality
1881 * is otherwise similar to the scalar case; it is handled by a template
1883 * variant would have been to use several
FEEvaluation objects, a scalar one
1884 * for the density, a vector-valued one with `dim` components for the
1885 * momentum, and another scalar evaluator for the energy. To ensure that
1886 * those components point to the correct part of the solution, the
1887 * constructor of
FEEvaluation takes three optional integer arguments after
1889 * multi-
DoFHandler systems (taking the
first by default), the number of the
1890 * quadrature point in case there are multiple
Quadrature objects (see more
1891 * below), and as a third argument the component within a vector system. As
1892 * we have a single vector for all components, we would go with the third
1893 * argument, and set it to `0` for the density, `1` for the vector-valued
1894 * momentum, and `dim+1` for the energy slot.
FEEvaluation then picks the
1895 * appropriate subrange of the solution vector during
1897 *
FEEvaluation::distributed_local_to_global() or the more compact
1903 * When it comes to the evaluation of the body force vector, we distinguish
1904 * between two cases for efficiency reasons: In case we have a constant
1905 * function (derived from
Functions::ConstantFunction), we can precompute
1906 * the value outside the loop over quadrature points and simply use the
1907 * value everywhere. For a more general function, we instead need to call
1908 * the `evaluate_function()` method we provided above; this path is more
1909 * expensive because we need to access the memory associated with the
1910 * quadrature point data.
1914 * The rest follows the other tutorial programs. Since we have implemented
1915 * all physics for the Euler equations in the separate `euler_flux()`
1916 * function, all we have to do here is to call this function
1917 * given the current solution evaluated at quadrature points, returned by
1918 * `phi.get_value(q)`, and tell the
FEEvaluation object to queue the flux
1919 * for testing it by the gradients of the shape functions (which is a
Tensor
1920 * of outer `dim+2` components, each holding a tensor of `dim` components
1921 * for the @f$x,y,z@f$ component of the Euler flux). One final thing worth
1922 * mentioning is the order in which we queue the data for testing by the
1923 * value of the test function, `phi.submit_value()`, in case we are given an
1924 * external function: We must do this after calling `phi.get_value(q)`,
1925 * because `get_value()` (reading the solution) and `submit_value()`
1926 * (queuing the value for multiplication by the test function and summation
1927 * over quadrature points) access the same underlying data field. Here it
1928 * would be easy to achieve also without temporary variable `w_q` since
1929 * there is no mixing between values and gradients. For more complicated
1930 * setups, one has to
first copy out e.g. both the value and gradient at a
1931 * quadrature point and then queue results again by
1937 * argument of this function, which is a call-back from
MatrixFree::loop().
1938 * The interfaces imposes the present list of arguments, but since we are in
1939 * a member function where the
MatrixFree object is already available as the
1940 * `data` variable, we stick with that to avoid confusion.
1943 * template <
int dim,
int degree,
int n_points_1d>
1944 *
void EulerOperator<dim, degree, n_points_1d>::local_apply_cell(
1948 * const
std::pair<
unsigned int,
unsigned int> &cell_range) const
1956 *
if (constant_function)
1957 * constant_body_force = evaluate_function<dim, Number, dim>(
1960 *
for (
unsigned int cell = cell_range.first;
cell < cell_range.second; ++
cell)
1968 * phi.submit_gradient(euler_flux<dim>(w_q), q);
1969 *
if (body_force.get() !=
nullptr)
1972 * constant_function ? constant_body_force :
1973 * evaluate_function<dim, Number, dim>(
1974 * *body_force, phi.quadrature_point(q));
1977 *
for (
unsigned int d = 0;
d < dim; ++
d)
1978 * forcing[d + 1] = w_q[0] * force[d];
1979 *
for (
unsigned int d = 0;
d < dim; ++
d)
1980 * forcing[dim + 1] += force[d] * w_q[d + 1];
1982 * phi.submit_value(forcing, q);
1986 * phi.integrate_scatter(((body_force.get() !=
nullptr) ?
1998 * The next function concerns the computation of integrals on interior
1999 * faces, where we need evaluators from both cells adjacent to the face. We
2000 * associate the variable `phi_m` with the solution component @f$\mathbf{
w}^-@f$
2001 * and the variable `phi_p` with the solution component @f$\mathbf{
w}^+@f$. We
2003 *
second argument, with `
true`
for the interior side and `
false`
for the
2004 * exterior side, with interior and exterior denoting the orientation with
2005 * respect to the normal vector.
2011 * and the sum factorization parts. This combined operation not only saves a
2012 * line of code, but also contains an important optimization: Given that we
2013 * use a nodal basis in terms of the Lagrange polynomials in the points of
2014 * the Gauss-Lobatto quadrature formula, only @f$(p+1)^{
d-1}@f$ out of the
2015 * @f$(p+1)^
d@f$ basis
functions evaluate to non-zero on each face. Thus, the
2016 * evaluator only accesses the necessary data in the vector and skips the
2017 * parts which are multiplied by zero. If we had
first read the vector, we
2018 * would have needed to load all data from the vector, as the call in
2019 * isolation would not know what data is required in subsequent
2021 * values and derivatives, indeed all @f$(p+1)^d@f$ vector entries for each
2022 * component are needed, as the normal derivative is nonzero for all basis
2027 * The arguments to the evaluators as well as the procedure is similar to
2028 * the cell evaluation. We again use the more accurate (over-)integration
2029 * scheme due to the nonlinear terms, specified as the third template
2030 * argument in the list. At the quadrature points, we then go to our
2031 * free-standing function for the numerical flux. It receives the solution
2032 * evaluated at quadrature points from both sides (i.e., @f$\mathbf{
w}^-@f$ and
2033 * @f$\mathbf{
w}^+@f$), as well as the normal vector onto the minus side. As
2034 * explained above, the numerical flux is already multiplied by the normal
2035 * vector from the minus side. We need to
switch the sign because the
2036 * boundary term comes with a minus sign in the weak form derived in the
2037 * introduction. The flux is then queued
for testing both on the minus sign
2038 * and on the plus sign, with switched sign as the normal vector from the
2039 * plus side is exactly opposed to the one from the minus side.
2042 * template <int dim, int degree, int n_points_1d>
2043 *
void EulerOperator<dim, degree, n_points_1d>::local_apply_face(
2047 *
const std::pair<unsigned int, unsigned int> &face_range)
const
2054 *
for (
unsigned int face = face_range.first; face < face_range.second; ++face)
2056 * phi_p.reinit(face);
2059 * phi_m.reinit(face);
2062 *
for (
const unsigned int q : phi_m.quadrature_point_indices())
2064 * const auto numerical_flux =
2065 * euler_numerical_flux<dim>(phi_m.get_value(q),
2066 * phi_p.get_value(q),
2067 * phi_m.normal_vector(q));
2068 * phi_m.submit_value(-numerical_flux, q);
2069 * phi_p.submit_value(numerical_flux, q);
2081 * For faces located at the boundary, we need to impose the appropriate
2082 * boundary conditions. In
this tutorial program, we implement four cases as
2083 * mentioned above. (A fifth
case,
for supersonic outflow conditions is
2084 * discussed in the
"Results" section below.) The discontinuous Galerkin
2085 * method imposes boundary conditions not as constraints, but only
2086 * weakly. Thus, the various conditions are imposed by finding an appropriate
2087 * <i>exterior</i> quantity @f$\mathbf{
w}^+@f$ that is then handed to the
2088 * numerical flux function also used
for the interior faces. In essence,
2089 * we
"pretend" a state on the
outside of the domain in such a way that
2090 *
if that were reality, the solution of the PDE would satisfy the boundary
2091 * conditions we want.
2095 * For wall boundaries, we need to impose a no-normal-flux condition on the
2096 * momentum variable, whereas we use a Neumann condition
for the density and
2097 * energy with @f$\rho^+ = \rho^-@f$ and @f$E^+ =
E^-@f$. To achieve the no-normal
2098 * flux condition, we
set the exterior values to the interior values and
2099 * subtract two times the velocity in wall-normal direction, i.e., in the
2100 * direction of the normal vector.
2104 * For inflow boundaries, we simply
set the given Dirichlet data
2105 * @f$\mathbf{
w}_\mathrm{D}@f$ as a boundary
value. An alternative would have been
2106 * to use @f$\mathbf{
w}^+ = -\mathbf{
w}^- + 2 \mathbf{
w}_\mathrm{D}@f$, the
2107 * so-called mirror principle.
2111 * The imposition of outflow is essentially a Neumann condition, i.e.,
2112 * setting @f$\mathbf{
w}^+ = \mathbf{
w}^-@f$. For the
case of subsonic outflow,
2113 * we still need to impose a
value for the energy, which we derive from the
2114 * respective function. A special step is needed
for the
case of
2115 * <i>backflow</i>, i.e., the
case where there is a momentum flux into the
2116 * domain on the Neumann portion. According to the literature (a fact that can
2117 * be derived by appropriate energy arguments), we must
switch to another
2118 * variant of the flux on inflow parts, see Gravemeier, Comerford,
2119 * Yoshihara, Ismail, Wall,
"A novel formulation for Neumann inflow
2120 * conditions in biomechanics", Int. J. Numer. Meth. Biomed. Eng., vol. 28
2121 * (2012). Here, the momentum term needs to be added once again, which
2122 * corresponds to removing the flux contribution on the momentum
2123 * variables. We
do this in a post-processing step, and only
for the
case
2124 * when we both are at an outflow boundary and the dot product between the
2125 * normal vector and the momentum (or, equivalently, velocity) is
2126 *
negative. As we work on data of several quadrature points at once
for
2127 * SIMD vectorizations, we here need to explicitly
loop over the array
2128 * entries of the SIMD array.
2132 * In the implementation below, we
check for the various
types
2133 * of boundaries at the
level of quadrature points. Of course, we could also
2134 * have moved the decision out of the quadrature
point loop and treat entire
2135 * faces as of the same kind, which avoids some map/
set lookups in the inner
2136 *
loop over quadrature points. However, the loss of efficiency is hardly
2137 * noticeable, so we opt
for the simpler code here. Also note that the
final
2138 * `
else` clause will
catch the
case when some part of the boundary was not
2139 * assigned any boundary condition via `EulerOperator::set_..._boundary(...)`.
2142 *
template <
int dim,
int degree,
int n_po
ints_1d>
2143 *
void EulerOperator<dim, degree, n_points_1d>::local_apply_boundary_face(
2147 *
const std::pair<unsigned int, unsigned int> &face_range)
const
2151 *
for (
unsigned int face = face_range.first; face < face_range.second; ++face)
2156 *
for (
const unsigned int q : phi.quadrature_point_indices())
2158 * const auto w_m = phi.get_value(q);
2159 *
const auto normal = phi.normal_vector(q);
2161 *
auto rho_u_dot_n = w_m[1] * normal[0];
2162 *
for (
unsigned int d = 1;
d < dim; ++
d)
2163 * rho_u_dot_n += w_m[1 + d] * normal[d];
2165 *
bool at_outflow =
false;
2168 *
const auto boundary_id = data.get_boundary_id(face);
2169 *
if (wall_boundaries.find(boundary_id) != wall_boundaries.end())
2172 *
for (
unsigned int d = 0;
d < dim; ++
d)
2173 * w_p[d + 1] = w_m[d + 1] - 2. * rho_u_dot_n * normal[d];
2174 * w_p[dim + 1] = w_m[dim + 1];
2176 *
else if (inflow_boundaries.find(boundary_id) !=
2177 * inflow_boundaries.end())
2179 * evaluate_function(*inflow_boundaries.find(boundary_id)->second,
2180 * phi.quadrature_point(q));
2181 *
else if (subsonic_outflow_boundaries.find(boundary_id) !=
2182 * subsonic_outflow_boundaries.end())
2185 * w_p[dim + 1] = evaluate_function(
2186 * *subsonic_outflow_boundaries.find(boundary_id)->second,
2187 * phi.quadrature_point(q),
2189 * at_outflow =
true;
2193 * ExcMessage(
"Unknown boundary id, did "
2194 *
"you set a boundary condition for "
2195 *
"this part of the domain boundary?"));
2197 *
auto flux = euler_numerical_flux<dim>(w_m, w_p, normal);
2200 *
for (
unsigned int v = 0; v < VectorizedArray<Number>::size(); ++v)
2202 *
if (rho_u_dot_n[v] < -1e-12)
2203 *
for (
unsigned int d = 0;
d < dim; ++
d)
2204 * flux[d + 1][v] = 0.;
2207 * phi.submit_value(-flux, q);
2218 * The next function implements the inverse mass
matrix operation. The
2219 * algorithms and rationale have been discussed extensively in the
2220 * introduction, so we here limit ourselves to the technicalities of the
2222 * operations as the forward evaluation of the mass
matrix, except with a
2223 * different interpolation
matrix, representing the inverse @f$S^{-1}@f$
2224 * factors. These represent a change of basis from the specified basis (in
2225 *
this case, the Lagrange basis in the points of the Gauss--Lobatto
2226 * quadrature formula) to the Lagrange basis in the points of the Gauss
2227 * quadrature formula. In the latter basis, we can apply the inverse of the
2228 *
point-wise `JxW` factor, i.e., the quadrature weight times the
2229 *
determinant of the Jacobian of the mapping from reference to real
2230 * coordinates. Once
this is done, the basis is changed back to the nodal
2231 * Gauss-Lobatto basis again. All of these operations are done by the
2232 * `apply()` function below. What we need to provide is the local fields to
2233 * operate on (which we extract from the global vector by an
FEEvaluation
2234 *
object) and write the results back to the destination vector of the mass
2239 * One thing to note is that we added two integer arguments (that are
2242 * only have one) and the
second being 1 to make the quadrature formula
2243 * selection. As we use the quadrature formula 0
for the over-integration of
2244 * nonlinear terms, we use the formula 1 with the
default @f$p+1@f$ (or
2245 * `fe_degree+1` in terms of the variable name) points
for the mass
2246 *
matrix. This leads to square contributions to the mass
matrix and ensures
2247 * exact integration, as explained in the introduction.
2250 *
template <
int dim,
int degree,
int n_po
ints_1d>
2251 *
void EulerOperator<dim, degree, n_points_1d>::local_apply_inverse_mass_matrix(
2255 *
const std::pair<unsigned int, unsigned int> &cell_range)
const
2261 *
for (
unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
2264 * phi.read_dof_values(src);
2266 * inverse.apply(phi.begin_dof_values(), phi.begin_dof_values());
2268 * phi.set_dof_values(dst);
2277 * <a name=
"step_67-Theapplyandrelatedfunctions"></a>
2278 * <h4>The apply() and related functions</h4>
2282 * We now come to the function which implements the evaluation of the Euler
2283 * operator as a whole, i.e., @f$\mathcal M^{-1} \mathcal L(t, \mathbf{
w})@f$,
2284 * calling into the local evaluators presented above. The steps should be
2285 * clear from the previous code. One thing to note is that we need to adjust
2286 * the time in the functions we have associated with the various parts of
2287 * the boundary, in order to be consistent with the equation in
case the
2289 * perform the cell and face integrals, including the necessary ghost data
2290 * exchange in the `src` vector. The seventh argument to the function,
2291 * `
true`, specifies that we want to zero the `dst` vector as part of the
2292 *
loop, before we start accumulating integrals into it. This variant is
2293 * preferred over explicitly calling `dst = 0.;` before the
loop as the
2294 * zeroing operation is done on a subrange of the vector in parts that are
2295 * written by the integrals nearby. This enhances data locality and allows
2296 *
for caching, saving one roundtrip of vector data to main memory and
2297 * enhancing performance. The last two arguments to the
loop determine which
2298 * data is exchanged: Since we only access the values of the shape
functions
2299 * one faces, typical of
first-order hyperbolic problems, and since we have
2300 * a nodal basis with nodes at the reference element surface, we only need
2301 * to exchange those parts. This again saves precious memory bandwidth.
2305 * Once the spatial
operator @f$\mathcal L@f$ is applied, we need to make a
2306 *
second round and apply the inverse mass
matrix. Here, we call
2308 * is cheaper than the full loop as access only goes to the degrees of
2309 * freedom associated with the locally owned cells, which is simply the
2310 * locally owned degrees of freedom for DG discretizations. Thus, no ghost
2311 * exchange is needed here.
2315 * Around all these functions, we put timer scopes to record the
2316 * computational time for statistics about the contributions of the various
2320 * template <
int dim,
int degree,
int n_points_1d>
2321 *
void EulerOperator<dim, degree, n_points_1d>::apply(
2322 * const
double current_time,
2329 *
for (
auto &i : inflow_boundaries)
2330 * i.
second->set_time(current_time);
2331 *
for (
auto &i : subsonic_outflow_boundaries)
2332 * i.
second->set_time(current_time);
2334 * data.loop(&EulerOperator::local_apply_cell,
2335 * &EulerOperator::local_apply_face,
2336 * &EulerOperator::local_apply_boundary_face,
2348 * data.cell_loop(&EulerOperator::local_apply_inverse_mass_matrix,
2359 * Let us move to the function that does an entire stage of a Runge--Kutta
2360 * update. It calls EulerOperator::apply() followed by some updates
2361 * to the vectors, namely `next_ri = solution + factor_ai * k_i` and
2362 * `solution += factor_solution * k_i`. Rather than performing these
2363 * steps through the vector interfaces, we here present an alternative
2364 * strategy that is faster on cache-based architectures. As the memory
2365 * consumed by the vectors is often much larger than what fits into caches,
2366 * the data has to effectively come from the slow RAM memory. The situation
2367 * can be improved by loop fusion, i.e., performing both the updates to
2368 * `next_ki` and `solution` within a single sweep. In that case, we would
2369 * read the two vectors `rhs` and `solution` and write into `next_ki` and
2370 * `solution`, compared to at least 4 reads and two writes in the baseline
2371 * case. Here, we go one step further and perform the loop immediately when
2372 * the mass matrix inversion has finished on a part of the
2373 * vector.
MatrixFree::cell_loop() provides a mechanism to attach an
2374 * `
std::function` both before the loop over cells
first touches a vector
2375 * entry (which we do not use here, but is e.g. used for zeroing the vector)
2376 * and a
second `
std::function` to be called after the loop last touches
2377 * an entry. The callback is in form of a range over the given vector (in
2378 * terms of the local index numbering in the
MPI universe) that can be
2379 * addressed by `local_element()` functions.
2383 * For this
second callback, we create a lambda that works on a range and
2384 * write the respective update on this range. Ideally, we would add the
2386 * compiler to SIMD parallelize this loop (which means in practice that we
2387 * ensure that there is no overlap, also called aliasing, between the index
2388 * ranges of the pointers we use inside the loops). It turns out that at the
2389 * time of this writing, GCC 7.2 fails to compile an OpenMP pragma inside a
2390 * lambda function, so we comment this pragma out below. If your compiler is
2391 * newer, you should be able to uncomment these lines again.
2395 * Note that we select a different code path for the last
2396 * Runge--Kutta stage when we do not need to update the `next_ri`
2397 * vector. This strategy gives a considerable speedup. Whereas the inverse
2398 * mass matrix and vector updates take more than 60% of the computational
2399 * time with default vector updates on a 40-core machine, the percentage is
2400 * around 35% with the more optimized variant. In other words, this is a
2401 * speedup of around a third.
2404 * template <
int dim,
int degree,
int n_points_1d>
2405 *
void EulerOperator<dim, degree, n_points_1d>::perform_stage(
2406 * const Number current_time,
2407 * const Number factor_solution,
2408 * const Number factor_ai,
2417 *
for (
auto &i : inflow_boundaries)
2418 * i.
second->set_time(current_time);
2419 *
for (
auto &i : subsonic_outflow_boundaries)
2420 * i.
second->set_time(current_time);
2422 * data.loop(&EulerOperator::local_apply_cell,
2423 * &EulerOperator::local_apply_face,
2424 * &EulerOperator::local_apply_boundary_face,
2437 * &EulerOperator::local_apply_inverse_mass_matrix,
2441 * std::function<
void(
const unsigned int,
const unsigned int)>(),
2442 * [&](
const unsigned int start_range,
const unsigned int end_range) {
2443 *
const Number ai = factor_ai;
2444 *
const Number bi = factor_solution;
2445 *
if (ai == Number())
2448 *
for (
unsigned int i = start_range; i < end_range; ++i)
2450 *
const Number k_i = next_ri.local_element(i);
2451 *
const Number sol_i = solution.local_element(i);
2452 * solution.local_element(i) = sol_i + bi * k_i;
2458 *
for (
unsigned int i = start_range; i < end_range; ++i)
2460 *
const Number k_i = next_ri.local_element(i);
2461 *
const Number sol_i = solution.local_element(i);
2462 * solution.local_element(i) = sol_i + bi * k_i;
2463 * next_ri.local_element(i) = sol_i + ai * k_i;
2474 * Having discussed the implementation of the
functions that deal with
2475 * advancing the solution by one time step, let us now move to
functions
2476 * that implement other, ancillary operations. Specifically, these are
2477 *
functions that compute projections, evaluate errors, and compute the speed
2478 * of information transport on a cell.
2484 * elements where there is no need to
set up and solve a linear system, as
2486 * code here, besides a small speedup of
this non-critical operation, is that
2487 * it shows additional functionality provided by
2492 * The projection operation works as follows: If we denote the
matrix of
2493 * shape
functions evaluated at quadrature points by @f$S@f$, the projection on
2494 * cell @f$K@f$ is an operation of the form @f$\underbrace{S J^
K S^\mathrm
2495 * T}_{\mathcal M^
K} \mathbf{
w}^
K = S J^
K
2496 * \tilde{\mathbf{
w}}(\mathbf{x}_q)_{q=1:n_q}@f$, where @f$J^
K@f$ is the
diagonal
2498 * weight (JxW), @f$\mathcal M^
K@f$ is the cell-wise mass
matrix, and
2499 * @f$\tilde{\mathbf{
w}}(\mathbf{x}_q)_{q=1:n_q}@f$ is the evaluation of the
2500 * field to be projected onto quadrature points. (In reality the
matrix @f$S@f$
2501 * has additional structure through the tensor product, as explained in the
2502 * introduction.) This system can now equivalently be written as
2503 * @f$\mathbf{
w}^
K = \left(S J^K S^\mathrm T\right)^{-1} S J^
K
2504 * \tilde{\mathbf{
w}}(\mathbf{x}_q)_{q=1:n_q} = S^{-\mathrm T}
2505 * \left(J^K\right)^{-1} S^{-1} S J^
K
2506 * \tilde{\mathbf{
w}}(\mathbf{x}_q)_{q=1:n_q}@f$. Now, the term @f$S^{-1} S@f$ and
2507 * then @f$\left(J^K\right)^{-1} J^
K@f$ cancel, resulting in the
final
2508 * expression @f$\mathbf{
w}^
K = S^{-\mathrm T}
2509 * \tilde{\mathbf{
w}}(\mathbf{x}_q)_{q=1:n_q}@f$. This operation is
2512 * The name is derived from the fact that
this projection is simply
2513 * the multiplication by @f$S^{-\mathrm T}@f$, a basis change from the
2514 * nodal basis in the points of the Gaussian quadrature to the given finite
2516 * the result into the vector, overwriting previous content, rather than
2517 * accumulating the results as typical in integration tasks -- we can do
2518 * this because every vector entry has contributions from only a single
2519 * cell for discontinuous Galerkin discretizations.
2522 * template <
int dim,
int degree,
int n_points_1d>
2523 *
void EulerOperator<dim, degree, n_points_1d>::project(
2530 * solution.zero_out_ghost_values();
2538 * inverse.transform_from_q_points_to_basis(dim + 2,
2539 * phi.begin_dof_values(),
2540 * phi.begin_dof_values());
2541 * phi.set_dof_values(solution);
2549 * The next function again repeats functionality also provided by the
2551 * the
explicit code to highlight how the vectorization across several cells
2552 * works and how to accumulate results via that interface: Recall that each
2553 * <i>lane</i> of the
vectorized array holds data from a different cell. By
2554 * the
loop over all cell batches that are owned by the current
MPI process,
2556 * we would need to further go on and
sum across the entries in the SIMD
2557 * array. However, such a procedure is not stable as the SIMD array could in
2558 * fact not hold
valid data
for all its lanes. This happens when the number
2559 * of locally owned cells is not a multiple of the SIMD width. To avoid
2560 *
invalid data, we must explicitly skip those
invalid lanes when accessing
2561 * the data. While one could imagine that we could make it work by simply
2562 * setting the empty lanes to zero (and thus, not contribute to a sum), the
2563 * situation is more complicated than that: What
if we were to compute a
2564 * velocity out of the momentum? Then, we would need to divide by the
2565 * density, which is zero -- the result would consequently be NaN and
2566 * contaminate the result. This trap is avoided by accumulating the results
2567 * from the
valid SIMD range as we
loop through the cell batches,
using the
2569 * number of lanes with valid data. It equals
VectorizedArray::size() on
2570 * most cells, but can be less on the last cell batch if the number of cells
2571 * has a remainder compared to the SIMD width.
2574 * template <
int dim,
int degree,
int n_points_1d>
2575 *
std::array<
double, 3> EulerOperator<dim, degree, n_points_1d>::compute_errors(
2580 *
double errors_squared[3] = {};
2583 *
for (
unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2588 *
for (
const unsigned int q : phi.quadrature_point_indices())
2590 * const auto error =
2591 * evaluate_function(function, phi.quadrature_point(q)) -
2593 *
const auto JxW = phi.JxW(q);
2595 * local_errors_squared[0] += error[0] * error[0] * JxW;
2596 *
for (
unsigned int d = 0;
d < dim; ++
d)
2597 * local_errors_squared[1] += (error[d + 1] * error[d + 1]) * JxW;
2598 * local_errors_squared[2] += (error[dim + 1] * error[dim + 1]) * JxW;
2600 *
for (
unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell);
2602 *
for (
unsigned int d = 0;
d < 3; ++
d)
2603 * errors_squared[d] += local_errors_squared[d][v];
2608 * std::array<double, 3> errors;
2609 *
for (
unsigned int d = 0;
d < 3; ++
d)
2610 * errors[d] =
std::sqrt(errors_squared[d]);
2619 * This
final function of the EulerOperator
class is used to estimate the
2620 * transport speed, scaled by the mesh size, that is relevant
for setting
2621 * the time step size in the
explicit time integrator. In the Euler
2622 * equations, there are two speeds of transport, namely the convective
2623 * velocity @f$\mathbf{u}@f$ and the propagation of sound waves with sound
2624 * speed @f$c = \
sqrt{\
gamma p/\rho}@f$ relative to the medium moving at
2625 * velocity @f$\mathbf u@f$.
2629 * In the formula
for the time step size, we are interested not by
2630 * these absolute speeds, but by the amount of time it takes
for
2631 * information to cross a single cell. For information transported along with
2632 * the medium, @f$\mathbf u@f$ is scaled by the mesh size,
2633 * so an estimate of the maximal velocity can be obtained by computing
2634 * @f$\|J^{-\mathrm T} \mathbf{u}\|_\infty@f$, where @f$J@f$ is the Jacobian of the
2635 * transformation from real to the reference domain. Note that
2637 * Jacobian, representing the metric term from real to reference
2638 * coordinates, so we do not need to
transpose it again. We store this limit
2639 * in the variable `convective_limit` in the code below.
2643 * The sound propagation is isotropic, so we need to take mesh sizes in any
2644 * direction into account. The appropriate mesh size scaling is then given
2645 * by the minimal singular value of @f$J@f$ or, equivalently, the maximal
2646 * singular value of @f$J^{-1}@f$. Note that one could
approximate this quantity
2647 * by the minimal distance between
vertices of a cell when ignoring curved
2648 * cells. To get the maximal singular
value of the Jacobian, the
general
2649 * strategy would be some LAPACK function. Since all we need here is an
2650 * estimate, we can avoid the hassle of decomposing a tensor of
2652 * eigenvalue function without vectorization, and instead use a few
2653 * iterations (five in the code below) of the power method applied to
2654 * @f$J^{-1}J^{-\mathrm T}@f$. The speed of convergence of
this method depends
2655 * on the ratio of the largest to the next largest eigenvalue and the
2656 *
initial guess, which is the vector of all ones. This might suggest that
2657 * we get slow convergence on cells close to a cube shape where all
2658 * lengths are almost the same. However,
this slow convergence means that
2659 * the result will sit between the two largest singular values, which both
2660 * are close to the maximal
value anyway. In all other cases, convergence
2661 * will be quick. Thus, we can merely hardcode 5 iterations here and be
2662 * confident that the result is good.
2665 *
template <
int dim,
int degree,
int n_po
ints_1d>
2666 *
double EulerOperator<dim, degree, n_points_1d>::compute_cell_transport_speed(
2670 * Number max_transport = 0;
2673 *
for (
unsigned int cell = 0; cell < data.n_cell_batches(); ++cell)
2678 *
for (
const unsigned int q : phi.quadrature_point_indices())
2680 * const auto solution = phi.get_value(q);
2681 *
const auto velocity = euler_velocity<dim>(solution);
2682 *
const auto pressure = euler_pressure<dim>(solution);
2684 *
const auto inverse_jacobian = phi.inverse_jacobian(q);
2685 *
const auto convective_speed = inverse_jacobian * velocity;
2687 *
for (
unsigned int d = 0;
d < dim; ++
d)
2688 * convective_limit =
2691 *
const auto speed_of_sound =
2692 *
std::sqrt(gamma * pressure * (1. / solution[0]));
2695 *
for (
unsigned int d = 0;
d < dim; ++
d)
2696 * eigenvector[d] = 1.;
2697 *
for (
unsigned int i = 0; i < 5; ++i)
2699 * eigenvector =
transpose(inverse_jacobian) *
2700 * (inverse_jacobian * eigenvector);
2702 *
for (
unsigned int d = 0;
d < dim; ++
d)
2703 * eigenvector_norm =
2705 * eigenvector /= eigenvector_norm;
2707 *
const auto jac_times_ev = inverse_jacobian * eigenvector;
2708 *
const auto max_eigenvalue =
std::sqrt(
2709 * (jac_times_ev * jac_times_ev) / (eigenvector * eigenvector));
2712 * max_eigenvalue * speed_of_sound + convective_limit);
2717 * Similarly to the previous function, we must make sure to accumulate
2718 * speed only on the
valid cells of a cell batch.
2721 *
for (
unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell);
2723 * max_transport =
std::max(max_transport, local_max[v]);
2728 *
return max_transport;
2736 * <a name=
"step_67-TheEulerProblemclass"></a>
2737 * <h3>The EulerProblem
class</h3>
2741 * This
class combines the EulerOperator class with the time integrator and
2743 * actually
run the simulations of the Euler problem.
2747 * The member variables are a
triangulation, a finite element, a mapping (to
2748 * create high-order curved surfaces, see
e.g. @ref step_10
"step-10"), and a
DoFHandler to
2749 * describe the degrees of freedom. In addition, we keep an instance of the
2750 * EulerOperator described above around, which will
do all heavy lifting in
2751 * terms of integrals, and some parameters
for time integration like the
2752 * current time or the time step size.
2756 * Furthermore, we use a PostProcessor instance to write some additional
2757 * information to the output file, in similarity to what was done in
2758 * @ref step_33
"step-33". The
interface of the
DataPostprocessor class is intuitive,
2759 * requiring us to provide information about what needs to be evaluated
2760 * (typically only the values of the solution, except
for the Schlieren plot
2761 * that we only enable in 2
d where it makes sense), and the names of what
2762 * gets evaluated. Note that it would also be possible to extract most
2763 * information by calculator tools within visualization programs such as
2764 * ParaView, but it is so much more convenient to
do it already when writing
2768 * template <int dim>
2769 *
class EulerProblem
2777 *
void make_grid_and_dofs();
2779 *
void output_results(
const unsigned int result_number);
2785 * #ifdef DEAL_II_WITH_P4EST
2797 * EulerOperator<dim, fe_degree, n_q_points_1d> euler_operator;
2799 *
double time, time_step;
2808 * std::vector<
Vector<double>> &computed_quantities)
const override;
2810 *
virtual std::vector<std::string>
get_names()
const override;
2812 *
virtual std::vector<
2819 *
const bool do_schlieren_plot;
2825 *
template <
int dim>
2826 * EulerProblem<dim>::Postprocessor::Postprocessor()
2827 * : do_schlieren_plot(dim == 2)
2834 * For the main evaluation of the field variables, we
first check that the
2835 * lengths of the arrays
equal the expected values (the lengths `2*dim+4` or
2836 * `2*dim+5` are derived from the sizes of the names we specify in the
2837 * get_names() function below). Then we
loop over all evaluation points and
2838 * fill the respective information: First we fill the primal solution
2839 * variables of density @f$\rho@f$, momentum @f$\rho \mathbf{u}@f$ and energy @f$E@f$,
2840 * then we compute the derived velocity @f$\mathbf u@f$, the pressure @f$p@f$, the
2841 * speed of sound @f$c=\
sqrt{\
gamma p / \rho}@f$, as well as the Schlieren plot
2842 * showing @f$s = |\nabla \rho|^2@f$ in
case it is enabled. (See @ref step_69
"step-69" for
2843 * another example where we create a Schlieren plot.)
2846 *
template <
int dim>
2847 *
void EulerProblem<dim>::Postprocessor::evaluate_vector_field(
2851 *
const unsigned int n_evaluation_points = inputs.solution_values.size();
2853 *
if (do_schlieren_plot ==
true)
2854 *
Assert(inputs.solution_gradients.size() == n_evaluation_points,
2855 * ExcInternalError());
2857 *
Assert(computed_quantities.size() == n_evaluation_points,
2858 * ExcInternalError());
2859 *
Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError());
2860 *
Assert(computed_quantities[0].size() ==
2861 * dim + 2 + (do_schlieren_plot ==
true ? 1 : 0),
2862 * ExcInternalError());
2864 *
for (
unsigned int p = 0; p < n_evaluation_points; ++p)
2867 *
for (
unsigned int d = 0;
d < dim + 2; ++
d)
2868 * solution[d] = inputs.solution_values[p](d);
2870 *
const double density = solution[0];
2872 *
const double pressure = euler_pressure<dim>(solution);
2874 *
for (
unsigned int d = 0;
d < dim; ++
d)
2875 * computed_quantities[p](d) = velocity[
d];
2876 * computed_quantities[p](dim) = pressure;
2877 * computed_quantities[p](dim + 1) =
std::sqrt(gamma * pressure / density);
2879 *
if (do_schlieren_plot ==
true)
2880 * computed_quantities[p](dim + 2) =
2881 * inputs.solution_gradients[p][0] * inputs.solution_gradients[p][0];
2887 *
template <
int dim>
2888 * std::vector<std::string> EulerProblem<dim>::Postprocessor::get_names() const
2890 * std::vector<std::string> names;
2891 *
for (
unsigned int d = 0;
d < dim; ++
d)
2892 * names.emplace_back(
"velocity");
2893 * names.emplace_back(
"pressure");
2894 * names.emplace_back(
"speed_of_sound");
2896 *
if (do_schlieren_plot ==
true)
2897 * names.emplace_back(
"schlieren_plot");
2906 * For the interpretation of quantities, we have
scalar density, energy,
2907 * pressure, speed of sound, and the Schlieren plot, and vectors
for the
2908 * momentum and the velocity.
2911 *
template <
int dim>
2912 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
2913 * EulerProblem<dim>::Postprocessor::get_data_component_interpretation() const
2915 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
2917 *
for (
unsigned int d = 0;
d < dim; ++
d)
2918 * interpretation.push_back(
2923 *
if (do_schlieren_plot ==
true)
2924 * interpretation.push_back(
2927 *
return interpretation;
2934 * With respect to the necessary update flags, we only need the values
for
2935 * all quantities but the Schlieren plot, which is based on the density
2939 *
template <
int dim>
2940 *
UpdateFlags EulerProblem<dim>::Postprocessor::get_needed_update_flags() const
2942 *
if (do_schlieren_plot ==
true)
2952 * The constructor
for this class is unsurprising: We
set up a
parallel
2953 *
triangulation based on the `MPI_COMM_WORLD` communicator, a vector finite
2954 * element with `dim+2` components for density, momentum, and energy, a
2955 * high-order mapping of the same degree as the underlying finite element,
2956 * and initialize the time and time step to zero.
2959 * template <int dim>
2960 * EulerProblem<dim>::EulerProblem()
2961 * : pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
2962 * #ifdef DEAL_II_WITH_P4EST
2965 * , fe(FE_DGQ<dim>(fe_degree) ^ (dim + 2))
2966 * , mapping(fe_degree)
2967 * , dof_handler(triangulation)
2968 * , timer(pcout, TimerOutput::never, TimerOutput::wall_times)
2969 * , euler_operator(timer)
2978 * As a mesh,
this tutorial program implements two options, depending on the
2979 * global variable `testcase`: For the analytical variant (`testcase==0`),
2980 * the domain is @f$(0, 10) \times (-5, 5)@f$, with Dirichlet boundary
2981 * conditions (inflow) all around the domain. For `testcase==1`, we
set the
2982 * domain to a
cylinder in a rectangular box, derived from the flow past
2983 *
cylinder testcase
for incompressible viscous flow by Schäfer and
2984 * Turek (1996). Here, we have a larger variety of boundaries. The inflow
2985 * part at the left of the channel is given the inflow type,
for which we
2986 * choose a
constant inflow profile, whereas we
set a subsonic outflow at
2987 * the right. For the boundary around the
cylinder (boundary
id equal to 2)
2988 * as well as the channel walls (boundary
id equal to 3) we use the wall
2989 * boundary type, which is no-normal flow. Furthermore,
for the 3
d cylinder
2990 * we also add a gravity force in vertical direction. Having the base mesh
2991 * in place (including the manifolds set by
2993 * specified number of global refinements, create the unknown numbering from
2995 * initialization of the EulerOperator.
2998 *
template <
int dim>
2999 *
void EulerProblem<dim>::make_grid_and_dofs()
3006 *
for (
unsigned int d = 1;
d < dim; ++
d)
3007 * lower_left[d] = -5;
3010 * upper_right[0] = 10;
3011 *
for (
unsigned int d = 1;
d < dim; ++
d)
3012 * upper_right[d] = 5;
3019 * euler_operator.set_inflow_boundary(
3020 * 0, std::make_unique<ExactSolution<dim>>(0));
3030 * euler_operator.set_inflow_boundary(
3031 * 0, std::make_unique<ExactSolution<dim>>(0));
3032 * euler_operator.set_subsonic_outflow_boundary(
3033 * 1, std::make_unique<ExactSolution<dim>>(0));
3035 * euler_operator.set_wall_boundary(2);
3036 * euler_operator.set_wall_boundary(3);
3039 * euler_operator.set_body_force(
3041 * std::vector<double>({0., 0., -0.2})));
3052 * dof_handler.distribute_dofs(fe);
3054 * euler_operator.reinit(mapping, dof_handler);
3055 * euler_operator.initialize_vector(solution);
3059 * In the following, we output some statistics about the problem. Because we
3060 * often
end up with quite large
numbers of cells or degrees of freedom, we
3061 * would like to print them with a comma to separate each
set of three
3062 * digits. This can be done via
"locales", although the way
this works is
3063 * not particularly intuitive. @ref step_32
"step-32" explains
this in slightly more
3067 * std::locale s = pcout.get_stream().getloc();
3068 * pcout.get_stream().imbue(std::locale(
""));
3069 * pcout <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
3070 * <<
" ( = " << (dim + 2) <<
" [vars] x "
3074 * pcout.get_stream().imbue(s);
3081 * For output, we
first let the Euler
operator compute the errors of the
3082 * numerical results. More precisely, we compute the error against the
3083 * analytical result
for the analytical solution
case, whereas we compute
3084 * the deviation against the background field with
constant density and
3085 * energy and
constant velocity in @f$x@f$ direction
for the
second test
case.
3089 * The next step is to create output. This is similar to what is done in
3090 * @ref step_33
"step-33": We let the postprocessor defined above control most of the
3091 * output, except
for the primal field that we write directly. For the
3092 * analytical solution test
case, we also perform another projection of the
3093 * analytical solution and print the difference between that field and the
3094 * numerical solution. Once we have defined all quantities to be written, we
3095 * build the patches
for output. Similarly to @ref step_65
"step-65", we create a
3096 * high-order VTK output by setting the appropriate flag, which enables us
3097 * to visualize fields of high polynomial degrees. Finally, we call the
3099 * to the given file name. This function uses special
MPI parallel write
3100 * facilities, which are typically more optimized
for parallel file systems
3101 * than the standard library
's `std::ofstream` variants used in most other
3102 * tutorial programs. A particularly nice feature of the
3103 * `write_vtu_in_parallel()` function is the fact that it can combine output
3104 * from all MPI ranks into a single file, making it unnecessary to have a
3105 * central record of all such files (namely, the "pvtu" file).
3109 * For parallel programs, it is often instructive to look at the partitioning
3110 * of cells among processors. To this end, one can pass a vector of numbers
3111 * to DataOut::add_data_vector() that contains as many entries as the
3112 * current processor has active cells; these numbers should then be the
3113 * rank of the processor that owns each of these cells. Such a vector
3114 * could, for example, be obtained from
3115 * GridTools::get_subdomain_association(). On the other hand, on each MPI
3116 * process, DataOut will only read those entries that correspond to locally
3117 * owned cells, and these of course all have the same value: namely, the rank
3118 * of the current process. What is in the remaining entries of the vector
3119 * doesn't actually matter, and so we can just get away with a cheap trick: We
3121 * with the rank of the current
MPI process. The key is that on each process,
3122 * only the entries corresponding to the locally owned cells will be read,
3123 * ignoring the (wrong) values in other entries. The fact that every process
3124 * submits a vector in which the correct subset of entries is correct is all
3125 * that is necessary.
3129 * @note As of 2023, Visit 3.3.3 can still not deal with higher-order cells.
3130 * Rather, it simply reports that there is no data to show. To view the
3131 * results of
this program with Visit, you will want to comment out the
3132 * line that sets `flags.write_higher_order_cells =
true;`. On the other
3133 * hand, Paraview is able to understand VTU files with higher order cells
3137 *
template <
int dim>
3138 *
void EulerProblem<dim>::output_results(
const unsigned int result_number)
3140 *
const std::array<double, 3> errors =
3141 * euler_operator.compute_errors(ExactSolution<dim>(time), solution);
3142 *
const std::string quantity_name = testcase == 0 ?
"error" :
"norm";
3144 * pcout <<
"Time:" << std::setw(8) << std::setprecision(3) << time
3145 * <<
", dt: " << std::setw(8) << std::setprecision(2) << time_step
3146 * <<
", " << quantity_name <<
" rho: " << std::setprecision(4)
3147 * << std::setw(10) << errors[0] <<
", rho * u: " << std::setprecision(4)
3148 * << std::setw(10) << errors[1] <<
", energy:" << std::setprecision(4)
3149 * << std::setw(10) << errors[2] << std::endl;
3154 * Postprocessor postprocessor;
3159 * data_out.set_flags(flags);
3161 * data_out.attach_dof_handler(dof_handler);
3163 * std::vector<std::string> names;
3164 * names.emplace_back(
"density");
3165 *
for (
unsigned int d = 0;
d < dim; ++
d)
3166 * names.emplace_back(
"momentum");
3167 * names.emplace_back(
"energy");
3169 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
3171 * interpretation.push_back(
3173 *
for (
unsigned int d = 0;
d < dim; ++
d)
3174 * interpretation.push_back(
3176 * interpretation.push_back(
3179 * data_out.add_data_vector(dof_handler, solution, names, interpretation);
3181 * data_out.add_data_vector(solution, postprocessor);
3184 *
if (testcase == 0 && dim == 2)
3186 * reference.
reinit(solution);
3187 * euler_operator.project(ExactSolution<dim>(time), reference);
3188 * reference.sadd(-1., 1, solution);
3189 * std::vector<std::string> names;
3190 * names.emplace_back(
"error_density");
3191 *
for (
unsigned int d = 0;
d < dim; ++
d)
3192 * names.emplace_back(
"error_momentum");
3193 * names.emplace_back(
"error_energy");
3195 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
3197 * interpretation.push_back(
3199 *
for (
unsigned int d = 0;
d < dim; ++
d)
3200 * interpretation.push_back(
3202 * interpretation.push_back(
3205 * data_out.add_data_vector(dof_handler,
3213 * data_out.add_data_vector(mpi_owner,
"owner");
3215 * data_out.build_patches(mapping,
3219 *
const std::string filename =
3221 * data_out.write_vtu_in_parallel(filename, MPI_COMM_WORLD);
3229 * The EulerProblem::run() function puts all pieces together. It starts off
3230 * by calling the function that creates the mesh and sets up data structures,
3231 * and then initializing the time integrator and the two temporary vectors of
3232 * the low-storage integrator. We call these vectors `rk_register_1` and
3233 * `rk_register_2`, and use the
first vector to represent the quantity
3234 * @f$\mathbf{r}_i@f$ and the
second one
for @f$\mathbf{k}_i@f$ in the formulas
for
3235 * the Runge--Kutta scheme outlined in the introduction. Before we start the
3236 * time
loop, we compute the time step size by the
3237 * `EulerOperator::compute_cell_transport_speed()` function. For reasons of
3238 * comparison, we compare the result obtained there with the minimal mesh
3239 * size and print them to screen. For velocities and speeds of sound close
3240 * to unity as in
this tutorial program, the predicted effective mesh size
3241 * will be close, but they could vary
if scaling were different.
3244 *
template <
int dim>
3245 *
void EulerProblem<dim>::run()
3249 *
const unsigned int n_vect_bits = 8 *
sizeof(Number) * n_vect_number;
3251 * pcout <<
"Running with "
3253 * <<
" MPI processes" << std::endl;
3254 * pcout <<
"Vectorization over " << n_vect_number <<
' '
3255 * << (std::is_same_v<Number, double> ?
"doubles" :
"floats") <<
" = "
3256 * << n_vect_bits <<
" bits ("
3261 * make_grid_and_dofs();
3263 *
const LowStorageRungeKuttaIntegrator integrator(lsrk_scheme);
3267 * rk_register_1.
reinit(solution);
3268 * rk_register_2.reinit(solution);
3270 * euler_operator.project(ExactSolution<dim>(time), solution);
3272 *
double min_vertex_distance = std::numeric_limits<double>::max();
3273 *
for (
const auto &cell :
triangulation.active_cell_iterators())
3274 * if (cell->is_locally_owned())
3275 * min_vertex_distance =
3276 *
std::
min(min_vertex_distance, cell->minimum_vertex_distance());
3277 * min_vertex_distance =
3280 * time_step = courant_number * integrator.n_stages() /
3281 * euler_operator.compute_cell_transport_speed(solution);
3282 * pcout <<
"Time step size: " << time_step
3283 * <<
", minimal h: " << min_vertex_distance
3284 * <<
", initial transport scaling: "
3285 * << 1. / euler_operator.compute_cell_transport_speed(solution)
3289 * output_results(0);
3293 * Now we are ready to start the time
loop, which we
run until the time
3294 * has reached the desired
end time. Every 5 time steps, we compute a
new
3295 * estimate
for the time step -- since the solution is nonlinear, it is
3296 * most effective to adapt the
value during the course of the
3297 * simulation. In
case the Courant number was chosen too aggressively, the
3298 * simulation will typically blow up with time step NaN, so that is easy
3299 * to detect here. One thing to note is that roundoff errors might
3300 * propagate to the leading digits due to an interaction of slightly
3301 * different time step selections that in turn lead to slightly different
3302 * solutions. To decrease
this sensitivity, it is common practice to round
3303 * or truncate the time step size to a few digits,
e.g. 3 in
this case. In
3304 *
case the current time is near the prescribed
'tick' value for output
3305 * (
e.g. 0.02), we also write the output. After the end of the time loop,
3306 * we summarize the computation by printing some statistics, which is
3310 *
unsigned int timestep_number = 0;
3312 *
while (time < final_time - 1e-12)
3314 * ++timestep_number;
3315 *
if (timestep_number % 5 == 0)
3317 * courant_number * integrator.n_stages() /
3319 * euler_operator.compute_cell_transport_speed(solution), 3);
3323 * integrator.perform_time_step(euler_operator,
3331 * time += time_step;
3333 *
if (
static_cast<int>(time / output_tick) !=
3334 *
static_cast<int>((time - time_step) / output_tick) ||
3335 * time >= final_time - 1e-12)
3337 *
static_cast<unsigned int>(std::round(time / output_tick)));
3340 * timer.print_wall_time_statistics(MPI_COMM_WORLD);
3341 * pcout << std::endl;
3350 * The main() function is not surprising and follows what was done in all
3351 * previous
MPI programs: As we run an
MPI program, we need to call `MPI_Init()`
3353 *
Utilities::
MPI::MPI_InitFinalize data structure. Note that we run the program
3354 * only with
MPI, and set the thread count to 1.
3357 *
int main(
int argc,
char **argv)
3359 *
using namespace Euler_DG;
3360 *
using namespace dealii;
3366 * EulerProblem<dimension> euler_problem;
3367 * euler_problem.run();
3369 *
catch (std::exception &exc)
3371 * std::cerr << std::endl
3373 * <<
"----------------------------------------------------"
3375 * std::cerr <<
"Exception on processing: " << std::endl
3376 * << exc.what() << std::endl
3377 * <<
"Aborting!" << std::endl
3378 * <<
"----------------------------------------------------"
3385 * std::cerr << std::endl
3387 * <<
"----------------------------------------------------"
3389 * std::cerr <<
"Unknown exception!" << std::endl
3390 * <<
"Aborting!" << std::endl
3391 * <<
"----------------------------------------------------"
3399<a name=
"step_67-Results"></a><h1>Results</h1>
3402<a name=
"step_67-Programoutput"></a><h3>Program output</h3>
3405Running the program with the
default settings on a machine with 40 processes
3406produces the following output:
3408Running with 40
MPI processes
3409Vectorization over 8 doubles = 512 bits (AVX512)
3410Number of degrees of freedom: 147,456 ( = 4 [vars] x 1,024 [cells] x 36 [dofs/cell/var] )
3411Time step size: 0.00689325, minimal h: 0.3125,
initial transport scaling: 0.102759
3413Time: 0, dt: 0.0069, error rho: 2.76e-07, rho * u: 1.259e-06, energy: 2.987e-06
3414Time: 1.01, dt: 0.0069, error rho: 1.37e-06, rho * u: 2.252e-06, energy: 4.153e-06
3415Time: 2.01, dt: 0.0069, error rho: 1.561e-06, rho * u: 2.43e-06, energy: 4.493e-06
3416Time: 3.01, dt: 0.0069, error rho: 1.714e-06, rho * u: 2.591e-06, energy: 4.762e-06
3417Time: 4.01, dt: 0.0069, error rho: 1.843e-06, rho * u: 2.625e-06, energy: 4.985e-06
3418Time: 5.01, dt: 0.0069, error rho: 1.496e-06, rho * u: 1.961e-06, energy: 4.142e-06
3419Time: 6, dt: 0.0083, error rho: 1.007e-06, rho * u: 7.119e-07, energy: 2.972e-06
3420Time: 7, dt: 0.0095, error rho: 9.096e-07, rho * u: 3.786e-07, energy: 2.626e-06
3421Time: 8, dt: 0.0096, error rho: 8.439e-07, rho * u: 3.338e-07, energy: 2.43e-06
3422Time: 9, dt: 0.0096, error rho: 7.822e-07, rho * u: 2.984e-07, energy: 2.248e-06
3423Time: 10, dt: 0.0096, error rho: 7.231e-07, rho * u: 2.666e-07, energy: 2.074e-06
3425+-------------------------------------------+------------------+------------+------------------+
3426| Total wallclock time elapsed | 2.249s 30 | 2.249s | 2.249s 8 |
3428| Section | no. calls |
min time rank |
avg time |
max time rank |
3429+-------------------------------------------+------------------+------------+------------------+
3430| compute errors | 11 | 0.008066s 13 | 0.00952s | 0.01041s 20 |
3431| compute transport speed | 258 | 0.01012s 13 | 0.05392s | 0.08574s 25 |
3432| output | 11 | 0.9597s 13 | 0.9613s | 0.9623s 6 |
3433| rk time stepping total | 1283 | 0.9827s 25 | 1.015s | 1.06s 13 |
3434| rk_stage - integrals L_h | 6415 | 0.8803s 26 | 0.9198s | 0.9619s 14 |
3435| rk_stage - inv mass + vec upd | 6415 | 0.05677s 15 | 0.06487s | 0.07597s 13 |
3436+-------------------------------------------+------------------+------------+------------------+
3439The program output shows that all errors are small. This is due to the fact
3440that we use a relatively fine mesh of @f$32^2@f$ cells with polynomials of degree
34415 for a solution that is smooth. An interesting pattern shows for the time
3442step size: whereas it is 0.0069 up to time 5, it increases to 0.0096 for later
3443times. The step size increases once the vortex with some motion on top of the
3444speed of sound (and thus faster propagation) leaves the computational domain
3445between times 5 and 6.5. After that
point, the flow is simply uniform
3446in the same direction, and the maximum velocity of the gas is reduced
3447compared to the previous state where the uniform velocity was overlaid
3448by the vortex. Our time step formula recognizes this effect.
3450The final block of output shows detailed information about the timing
3451of individual parts of the programs; it breaks
this down by showing
3452the time taken by the fastest and the slowest processor, and the
3453average time --
this is often useful in very large computations to
3454find whether there are processors that are consistently overheated
3455(and consequently are throttling their clock speed) or consistently
3456slow
for other reasons.
3457The summary shows that 1283 time steps have been performed
3458in 1.02 seconds (looking at the average time among all
MPI processes),
while
3459the output of 11 files has taken additional 0.96 seconds. Broken down per time
3460step and into the five Runge--Kutta stages, the compute time per evaluation is
34610.16 milliseconds. This high performance is typical of
matrix-
free evaluators
3462and a reason why
explicit time integration is very competitive against
3463implicit solvers, especially
for large-
scale simulations. The breakdown of
3464computational times at the
end of the program
run shows that the evaluation of
3465integrals in @f$\mathcal L_h@f$ contributes with around 0.92 seconds and the
3466application of the inverse mass
matrix with 0.06 seconds. Furthermore, the
3467estimation of the transport speed
for the time step size computation
3468contributes with another 0.05 seconds of compute time.
3470If we use three more levels of global refinement and 9.4 million DoFs in total,
3471the
final statistics are as follows (
for the modified Lax--Friedrichs flux,
3472@f$p=5@f$, and the same system of 40 cores of dual-socket Intel Xeon Gold 6230):
3474+-------------------------------------------+------------------+------------+------------------+
3475| Total wallclock time elapsed | 244.9s 12 | 244.9s | 244.9s 34 |
3477| Section | no. calls |
min time rank |
avg time |
max time rank |
3478+-------------------------------------------+------------------+------------+------------------+
3479| compute errors | 11 | 0.4239s 12 | 0.4318s | 0.4408s 9 |
3480| compute transport speed | 2053 | 3.962s 12 | 6.727s | 10.12s 7 |
3481| output | 11 | 30.35s 12 | 30.36s | 30.37s 9 |
3482| rk time stepping total | 10258 | 201.7s 7 | 205.1s | 207.8s 12 |
3483| rk_stage - integrals L_h | 51290 | 121.3s 6 | 126.6s | 136.3s 16 |
3484| rk_stage - inv mass + vec upd | 51290 | 66.19s 16 | 77.52s | 81.84s 10 |
3485+-------------------------------------------+------------------+------------+------------------+
3488Per time step, the solver now takes 0.02 seconds, about 25 times as long as
3489for the small problem with 147k unknowns. Given that the problem involves 64
3490times as many unknowns, the increase in computing time is not
3491surprising. Since we also do 8 times as many time steps, the compute time
3492should in theory increase by a factor of 512. The actual increase is 205 s /
34931.02 s = 202. This is because the small problem size cannot fully utilize the
349440 cores due to communication overhead. This becomes clear if we look into the
3495details of the operations done per time step. The evaluation of the
3496differential operator @f$\mathcal L_h@f$ with nearest neighbor communication goes
3497from 0.92 seconds to 127 seconds, i.
e., it increases with a factor of 138. On
3498the other hand, the cost for application of the inverse mass
matrix and the
3499vector updates, which do not need to communicate between the
MPI processes at
3500all, has increased by a factor of 1195. The increase is more than the
3501theoretical factor of 512 because the operation is limited by the bandwidth
3502from RAM memory for the larger size while for the smaller size, all vectors
3503fit into the caches of the CPU. The
numbers show that the mass
matrix
3504evaluation and vector update part consume almost 40% of the time spent by the
3505Runge--Kutta stages -- despite using a low-storage Runge--Kutta integrator and
3506merging of vector operations! And despite using over-integration for the
3507@f$\mathcal L_h@f$ operator. For simpler differential operators and more expensive
3508time integrators, the proportion spent in the mass
matrix and vector update
3509part can also reach 70%. If we compute a throughput number in terms of DoFs
3510processed per
second and Runge--Kutta stage, we obtain @f[ \text{throughput} =
3511\frac{n_\mathrm{time steps} n_\mathrm{stages}
3512n_\mathrm{dofs}}{t_\mathrm{compute}} = \frac{10258 \cdot 5 \cdot
35139.4\,\text{MDoFs}}{205s} = 2360\, \text{MDoFs/s} @f] This throughput number is
3514very high, given that simply copying one vector to another one runs at
3515only around 10,000 MDoFs/s.
3517If we go to the next-larger size with 37.7 million DoFs, the overall
3518simulation time is 2196 seconds, with 1978 seconds spent in the time
3519stepping. The increase in
run time is a factor of 9.3
for the L_h
operator
3520(1179 versus 127 seconds) and a factor of 10.3
for the inverse mass matrix and
3521vector updates (797 vs 77.5 seconds). The reason
for this non-optimal increase
3522in
run time can be traced back to cache effects on the given hardware (with 40
3523MB of L2 cache and 55 MB of L3 cache): While not all of the relevant data fits
3524into caches for 9.4 million DoFs (one vector takes 75 MB and we have three
3525vectors plus some additional data in
MatrixFree), there is capacity for one and
3526a half vector nonetheless. Given that modern caches are more sophisticated than
3527the naive least-recently-used strategy (where we would have little re-use as
3528the data is used in a streaming-like fashion), we can assume that a sizeable
3529fraction of data can indeed be delivered from caches for the 9.4 million DoFs
3530case. For the larger case, even with optimal caching less than 10 percent of
3531data would fit into caches, with an associated loss in performance.
3534<a name=
"step_67-Convergenceratesfortheanalyticaltestcase"></a><h3>Convergence rates for the analytical test case</h3>
3537For the modified Lax--Friedrichs flux and measuring the error in the momentum
3538variable, we obtain the following convergence table (the rates are very
3539similar for the density and energy variables):
3541<table align=
"center" class=
"doxtable">
3544 <th colspan=
"3"><i>p</i>=2</th>
3545 <th colspan=
"3"><i>p</i>=3</th>
3546 <th colspan=
"3"><i>p</i>=5</th>
3561 <td align=
"right">16</td>
3568 <td align=
"right">2,304</td>
3569 <td align=
"center">1.373e-01</td>
3573 <td align=
"right">64</td>
3577 <td align=
"right">4,096</td>
3578 <td align=
"center">9.130e-02</td>
3580 <td align=
"right">9,216</td>
3581 <td align=
"center">8.899e-03</td>
3585 <td align=
"right">256</td>
3586 <td align=
"right">9,216</td>
3587 <td align=
"center">5.577e-02</td>
3589 <td align=
"right">16,384</td>
3590 <td align=
"center">7.381e-03</td>
3592 <td align=
"right">36,864</td>
3593 <td align=
"center">2.082e-04</td>
3597 <td align=
"right">1024</td>
3598 <td align=
"right">36,864</td>
3599 <td align=
"center">4.724e-03</td>
3601 <td align=
"right">65,536</td>
3602 <td align=
"center">3.072e-04</td>
3604 <td align=
"right">147,456</td>
3605 <td align=
"center">2.625e-06</td>
3609 <td align=
"right">4096</td>
3610 <td align=
"right">147,456</td>
3611 <td align=
"center">6.205e-04</td>
3613 <td align=
"right">262,144</td>
3614 <td align=
"center">1.880e-05</td>
3616 <td align=
"right">589,824</td>
3617 <td align=
"center">3.268e-08</td>
3621 <td align=
"right">16,384</td>
3622 <td align=
"right">589,824</td>
3623 <td align=
"center">8.279e-05</td>
3625 <td align=
"right">1,048,576</td>
3626 <td align=
"center">1.224e-06</td>
3628 <td align=
"right">2,359,296</td>
3629 <td align=
"center">9.252e-10</td>
3633 <td align=
"right">65,536</td>
3634 <td align=
"right">2,359,296</td>
3635 <td align=
"center">1.105e-05</td>
3637 <td align=
"right">4,194,304</td>
3638 <td align=
"center">7.871e-08</td>
3640 <td align=
"right">9,437,184</td>
3641 <td align=
"center">1.369e-10</td>
3645 <td align=
"right">262,144</td>
3646 <td align=
"right">9,437,184</td>
3647 <td align=
"center">1.615e-06</td>
3649 <td align=
"right">16,777,216</td>
3650 <td align=
"center">4.961e-09</td>
3652 <td align=
"right">37,748,736</td>
3653 <td align=
"center">7.091e-11</td>
3658If we
switch to the Harten-Lax-van Leer flux, the results are as follows:
3659<table align=
"center" class=
"doxtable">
3662 <th colspan=
"3"><i>p</i>=2</th>
3663 <th colspan=
"3"><i>p</i>=3</th>
3664 <th colspan=
"3"><i>p</i>=5</th>
3679 <td align=
"right">16</td>
3686 <td align=
"right">2,304</td>
3687 <td align=
"center">1.339e-01</td>
3691 <td align=
"right">64</td>
3695 <td align=
"right">4,096</td>
3696 <td align=
"center">9.037e-02</td>
3698 <td align=
"right">9,216</td>
3699 <td align=
"center">8.849e-03</td>
3703 <td align=
"right">256</td>
3704 <td align=
"right">9,216</td>
3705 <td align=
"center">4.204e-02</td>
3707 <td align=
"right">16,384</td>
3708 <td align=
"center">9.143e-03</td>
3710 <td align=
"right">36,864</td>
3711 <td align=
"center">2.501e-04</td>
3715 <td align=
"right">1024</td>
3716 <td align=
"right">36,864</td>
3717 <td align=
"center">4.913e-03</td>
3719 <td align=
"right">65,536</td>
3720 <td align=
"center">3.257e-04</td>
3722 <td align=
"right">147,456</td>
3723 <td align=
"center">3.260e-06</td>
3727 <td align=
"right">4096</td>
3728 <td align=
"right">147,456</td>
3729 <td align=
"center">7.862e-04</td>
3731 <td align=
"right">262,144</td>
3732 <td align=
"center">1.588e-05</td>
3734 <td align=
"right">589,824</td>
3735 <td align=
"center">2.953e-08</td>
3739 <td align=
"right">16,384</td>
3740 <td align=
"right">589,824</td>
3741 <td align=
"center">1.137e-04</td>
3743 <td align=
"right">1,048,576</td>
3744 <td align=
"center">9.400e-07</td>
3746 <td align=
"right">2,359,296</td>
3747 <td align=
"center">4.286e-10</td>
3751 <td align=
"right">65,536</td>
3752 <td align=
"right">2,359,296</td>
3753 <td align=
"center">1.476e-05</td>
3755 <td align=
"right">4,194,304</td>
3756 <td align=
"center">5.799e-08</td>
3758 <td align=
"right">9,437,184</td>
3759 <td align=
"center">2.789e-11</td>
3763 <td align=
"right">262,144</td>
3764 <td align=
"right">9,437,184</td>
3765 <td align=
"center">2.038e-06</td>
3767 <td align=
"right">16,777,216</td>
3768 <td align=
"center">3.609e-09</td>
3770 <td align=
"right">37,748,736</td>
3771 <td align=
"center">5.730e-11</td>
3776The tables show that we get optimal @f$\mathcal O\left(h^{p+1}\right)@f$
3777convergence rates
for both numerical fluxes. The errors are slightly smaller
3778for the Lax--Friedrichs flux
for @f$p=2@f$, but the picture is reversed
for
3779@f$p=3@f$; in any
case, the differences on
this testcase are relatively
3782For @f$p=5@f$, we reach the roundoff accuracy of @f$10^{-11}@f$ with both
3783fluxes on the finest grids. Also note that the errors are absolute with a
3784domain length of @f$10^2@f$, so relative errors are below @f$10^{-12}@f$. The HLL flux
3785is somewhat better
for the highest degree, which is due to a slight inaccuracy
3786of the Lax--Friedrichs flux: The Lax--Friedrichs flux sets a Dirichlet
3787condition on the solution that leaves the domain, which results in a small
3788artificial reflection, which is accentuated
for the Lax--Friedrichs
3789flux. Apart from that, we see that the influence of the numerical flux is
3790minor, as the polynomial part
inside elements is the main driver of the
3791accucary. The limited influence of the flux also has consequences when trying
3792to approach more challenging setups with the higher-order DG setup: Taking
for
3793example the parameters and grid of @ref step_33
"step-33", we get oscillations (which in turn
3794make density negative and make the solution explode) with both fluxes once the
3795high-mass part comes near the boundary, as opposed to the low-order finite
3796volume case (@f$p=0@f$). Thus, any
case that leads to shocks in the solution
3797necessitates some form of limiting or artificial dissipation. For another
3798alternative, see the @ref step_69
"step-69" tutorial program.
3801<a name=
"step_67-Resultsforflowinchannelaroundcylinderin2D"></a><h3>Results
for flow in channel around
cylinder in 2D</h3>
3804For the test
case of the flow around a
cylinder in a channel, we need to
3805change the
first code line to
3807 constexpr unsigned int testcase = 1;
3809This test
case starts with a background field of a
constant velocity
3811to go around an obstacle in the form of a
cylinder. Since we impose a
3812no-penetration condition on the
cylinder walls, the flow that
3813initially impinges head-on onto to
cylinder has to rearrange,
3814which creates a big sound wave. The following pictures show the pressure at
3815times 0.1, 0.25, 0.5, and 1.0 (top left to bottom right)
for the 2D
case with
38165 levels of global refinement,
using 102,400 cells with polynomial degree of
38175 and 14.7 million degrees of freedom over all 4 solution variables.
3818We clearly see the discontinuity that
3819propagates slowly in the upstream direction and more quickly in downstream
3820direction in the
first snapshot at time 0.1. At time 0.25, the sound wave has
3821reached the top and bottom walls and reflected back to the interior. From the
3822different distances of the reflected waves from lower and upper walls we can
3823see the slight asymmetry of the Schäfer-Turek test
case represented by
3825cylinder compared to below. At later times, the picture is more chaotic with
3826many sound waves all over the place.
3828<table align="
center" class="doxtable" style="width:85%">
3847The next picture shows an elevation plot of the pressure at time 1.0 looking
3848from the channel inlet towards the outlet at the same resolution -- here,
3849we can see the large number
3850of reflections. In the figure, two
types of waves are visible. The
3851larger-amplitude waves correspond to various reflections that happened as the
3852initial discontinuity hit the walls, whereas the small-amplitude waves of
3853size similar to the elements correspond to numerical artifacts. They have their
3854origin in the finite resolution of the scheme and appear as the discontinuity
3855travels through elements with high-order polynomials. This effect can be cured
3856by increasing resolution. Apart from this effect, the rich wave structure is
3857the result of the transport accuracy of the high-order DG method.
3861If we run the program with degree 2 and 6 levels of global refinements (410k
3862cells, 14.7M unknowns), we get the following evolution of the pressure
3863(elevation plot, colored by the value of the density):
3867 <iframe width="560" height="315" src="https:
3869 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
3870 allowfullscreen></iframe>
3875With 2 levels of global refinement with 1,600 cells, the mesh and its
3876partitioning on 40
MPI processes looks as follows:
3880When we run the code with 4 levels of global refinements on 40 cores, we get
3881the following output:
3883Running with 40
MPI processes
3884Vectorization over 8 doubles = 512 bits (AVX512)
3885Number of degrees of freedom: 3,686,400 ( = 4 [vars] x 25,600 [cells] x 36 [dofs/cell/var] )
3886Time step size: 7.39876e-05, minimal h: 0.001875, initial transport scaling: 0.00110294
3888Time: 0, dt: 7.4e-05, norm rho: 4.17e-16, rho * u: 1.629e-16, energy: 1.381e-15
3889Time: 0.05, dt: 6.3e-05, norm rho: 0.02075, rho * u: 0.03801, energy: 0.08772
3890Time: 0.1, dt: 5.9e-05, norm rho: 0.02211, rho * u: 0.04515, energy: 0.08953
3891Time: 0.15, dt: 5.7e-05, norm rho: 0.02261, rho * u: 0.04592, energy: 0.08967
3892Time: 0.2, dt: 5.8e-05, norm rho: 0.02058, rho * u: 0.04361, energy: 0.08222
3893Time: 0.25, dt: 5.9e-05, norm rho: 0.01695, rho * u: 0.04203, energy: 0.06873
3894Time: 0.3, dt: 5.9e-05, norm rho: 0.01653, rho * u: 0.0401, energy: 0.06604
3895Time: 0.35, dt: 5.7e-05, norm rho: 0.01774, rho * u: 0.04264, energy: 0.0706
3899Time: 1.95, dt: 5.8e-05, norm rho: 0.01488, rho * u: 0.03923, energy: 0.05185
3900Time: 2, dt: 5.7e-05, norm rho: 0.01432, rho * u: 0.03969, energy: 0.04889
3902+-------------------------------------------+------------------+------------+------------------+
3903| Total wallclock time elapsed | 273.6s 13 | 273.6s | 273.6s 0 |
3905| Section | no. calls | min time rank | avg time | max time rank |
3906+-------------------------------------------+------------------+------------+------------------+
3907| compute errors | 41 | 0.01112s 35 | 0.0672s | 0.1337s 0 |
3908| compute transport speed | 6914 | 5.422s 35 | 15.96s | 29.99s 1 |
3909| output | 41 | 37.24s 35 | 37.3s | 37.37s 0 |
3910| rk time stepping total | 34564 | 205.4s 1 | 219.5s | 230.1s 35 |
3911| rk_stage - integrals L_h | 172820 | 153.6s 1 | 164.9s | 175.6s 27 |
3912| rk_stage - inv mass + vec upd | 172820 | 47.13s 13 | 53.09s | 64.05s 33 |
3913+-------------------------------------------+------------------+------------+------------------+
3916The norms shown here for the various quantities are the deviations
3917@f$\rho'@f$, @f$(\rho u)'@f$, and @f$E'@f$ against the background field (namely, the
3918initial condition). The distribution of run time is overall similar as in the
3919previous test case. The only slight difference is the larger proportion of
3920time spent in @f$\mathcal L_h@f$ as compared to the inverse mass matrix and vector
3921updates. This is because the geometry is deformed and the matrix-free
3922framework needs to load additional arrays for the geometry from memory that
3923are compressed in the affine mesh case.
3925Increasing the number of global refinements to 5, the output becomes:
3927Running with 40
MPI processes
3928Vectorization over 8 doubles = 512 bits (AVX512)
3929Number of degrees of freedom: 14,745,600 ( = 4 [vars] x 102,400 [cells] x 36 [dofs/cell/var] )
3933+-------------------------------------------+------------------+------------+------------------+
3934| Total wallclock time elapsed | 2693s 32 | 2693s | 2693s 23 |
3936| Section | no. calls | min time rank | avg time | max time rank |
3937+-------------------------------------------+------------------+------------+------------------+
3938| compute errors | 41 | 0.04537s 32 | 0.173s | 0.3489s 0 |
3939| compute transport speed | 13858 | 40.75s 32 | 85.99s | 149.8s 0 |
3940| output | 41 | 153.8s 32 | 153.9s | 154.1s 0 |
3941| rk time stepping total | 69284 | 2386s 0 | 2450s | 2496s 32 |
3942| rk_stage - integrals L_h | 346420 | 1365s 32 | 1574s | 1718s 19 |
3943| rk_stage - inv mass + vec upd | 346420 | 722.5s 10 | 870.7s | 1125s 32 |
3944+-------------------------------------------+------------------+------------+------------------+
3947The effect on performance is similar to the analytical test case -- in
3948theory, computation times should increase by a factor of 8, but we actually
3949see an increase by a factor of 11 for the time steps (219.5 seconds versus
39502450 seconds). This can be traced back to caches, with the small case mostly
3951fitting in caches. An interesting effect, typical of programs with a mix of
3952local communication (integrals @f$\mathcal L_h@f$) and global communication (computation of
3953transport speed) with some load imbalance, can be observed by looking at the
3954MPI ranks that encounter the minimal and maximal time of different phases,
3955respectively. Rank 0 reports the fastest throughput for the "rk time stepping
3956total" part. At the same time, it appears to be slowest for the "compute
3957transport speed" part, almost a factor of 2 slower than the
3958average and almost a factor of 4 compared to the faster rank.
3959Since the latter involves global communication, we can attribute the
3960slowness in this part to the fact that the local Runge--Kutta stages have
3961advanced more quickly on this rank and need to wait until the other processors
3962catch up. At this point, one can wonder about the reason for this imbalance:
3963The number of cells is almost the same on all
MPI processes.
3964However, the matrix-free framework is faster on affine and Cartesian
3965cells located towards the outlet of the channel, to which the lower
MPI ranks
3966are assigned. On the other hand, rank 32, which reports the highest run time
3967for the Runga--Kutta stages, owns the curved cells near the cylinder, for
3968which no data compression is possible. To improve throughput, we could assign
3969different weights to different cell
types when partitioning the
3971few time steps and try to rebalance then.
3973The throughput per Runge--Kutta stage can be computed to 2085 MDoFs/s for the
397414.7 million DoFs test case over the 346,000 Runge--Kutta stages, slightly slower
3975than the Cartesian mesh throughput of 2360 MDoFs/s reported above.
3977Finally, if we add one additional refinement, we record the following output:
3979Running with 40
MPI processes
3980Vectorization over 8 doubles = 512 bits (AVX512)
3981Number of degrees of freedom: 58,982,400 ( = 4 [vars] x 409,600 [cells] x 36 [dofs/cell/var] )
3985Time: 1.95, dt: 1.4e-05, norm rho: 0.01488, rho * u: 0.03923, energy: 0.05183
3986Time: 2, dt: 1.4e-05, norm rho: 0.01431, rho * u: 0.03969, energy: 0.04887
3988+-------------------------------------------+------------------+------------+------------------+
3989| Total wallclock time elapsed | 2.166e+04s 26 | 2.166e+04s | 2.166e+04s 24 |
3991| Section | no. calls | min time rank | avg time | max time rank |
3992+-------------------------------------------+------------------+------------+------------------+
3993| compute errors | 41 | 0.1758s 30 | 0.672s | 1.376s 1 |
3994| compute transport speed | 27748 | 321.3s 34 | 678.8s | 1202s 1 |
3995| output | 41 | 616.3s 32 | 616.4s | 616.4s 34 |
3996| rk time stepping total | 138733 | 1.983e+04s 1 | 2.036e+04s | 2.072e+04s 34 |
3997| rk_stage - integrals L_h | 693665 | 1.052e+04s 32 | 1.248e+04s | 1.387e+04s 19 |
3998| rk_stage - inv mass + vec upd | 693665 | 6404s 10 | 7868s | 1.018e+04s 32 |
3999+-------------------------------------------+------------------+------------+------------------+
4002The "rk time stepping total" part corresponds to a throughput of 2010 MDoFs/s. The
4003overall run time to perform 139k time steps is 20k seconds (5.7 hours) or 7
4004time steps per
second -- not so bad for having nearly 60 million
4005unknowns. More throughput can be achieved by adding more cores to
4009<a name="step_67-Resultsforflowinchannelaroundcylinderin3D"></a><h3>Results for flow in channel around cylinder in 3D</h3>
4012Switching the channel test case to 3D with 3 global refinements, the output is
4014Running with 40
MPI processes
4015Vectorization over 8 doubles = 512 bits (AVX512)
4016Number of degrees of freedom: 221,184,000 ( = 5 [vars] x 204,800 [cells] x 216 [dofs/cell/var] )
4020Time: 1.95, dt: 0.00011, norm rho: 0.01131, rho * u: 0.03056, energy: 0.04091
4021Time: 2, dt: 0.00011, norm rho: 0.0119, rho * u: 0.03142, energy: 0.04425
4023+-------------------------------------------+------------------+------------+------------------+
4024| Total wallclock time elapsed | 1.734e+04s 4 | 1.734e+04s | 1.734e+04s 38 |
4026| Section | no. calls | min time rank | avg time | max time rank |
4027+-------------------------------------------+------------------+------------+------------------+
4028| compute errors | 41 | 0.6551s 34 | 3.216s | 7.281s 0 |
4029| compute transport speed | 3546 | 160s 34 | 393.2s | 776.9s 0 |
4030| output | 41 | 1350s 34 | 1353s | 1357s 0 |
4031| rk time stepping total | 17723 | 1.519e+04s 0 | 1.558e+04s | 1.582e+04s 34 |
4032| rk_stage - integrals L_h | 88615 | 1.005e+04s 32 | 1.126e+04s | 1.23e+04s 11 |
4033| rk_stage - inv mass + vec upd | 88615 | 3056s 11 | 4322s | 5759s 32 |
4034+-------------------------------------------+------------------+------------+------------------+
4037The physics are similar to the 2D case, with a slight motion in the z
4038direction due to the gravitational force. The throughput per Runge--Kutta
4039stage in this case is
4041\text{throughput} = \frac{n_\mathrm{time steps} n_\mathrm{stages}
4042n_\mathrm{dofs}}{t_\mathrm{compute}} =
4043\frac{17723 \cdot 5 \cdot 221.2\,\text{M}}{15580s} = 1258\, \text{MDoFs/s}.
4046The throughput is lower than in 2D because the computation of the @f$\mathcal L_h@f$ term
4047is more expensive. This is due to over-integration with `degree+2` points and
4048the larger fraction of face integrals (worse volume-to-surface ratio) with
4049more expensive flux computations. If we only consider the inverse mass
matrix
4050and vector update part, we record a throughput of 4857 MDoFs/s
for the 2D
case
4051of the isentropic vortex with 37.7 million unknowns, whereas the 3D
case
4052runs with 4535 MDoFs/s. The performance is similar because both cases are in
4053fact limited by the memory bandwidth.
4055If we go to four levels of global refinement, we need to increase the number
4056of processes to fit everything in memory -- the computation needs around 350
4057GB of RAM memory in
this case. Also, the time it takes to complete 35k time
4058steps becomes more tolerable by adding additional resources. We therefore use
40596 nodes with 40 cores each, resulting in a computation with 240
MPI processes:
4061Running with 240
MPI processes
4062Vectorization over 8 doubles = 512 bits (AVX512)
4063Number of degrees of freedom: 1,769,472,000 ( = 5 [vars] x 1,638,400 [cells] x 216 [dofs/cell/var] )
4067Time: 1.95, dt: 5.6e-05,
norm rho: 0.01129, rho * u: 0.0306, energy: 0.04086
4068Time: 2, dt: 5.6e-05,
norm rho: 0.01189, rho * u: 0.03145, energy: 0.04417
4070+-------------------------------------------+------------------+------------+------------------+
4071| Total wallclock time elapsed | 5.396e+04s 151 | 5.396e+04s | 5.396e+04s 0 |
4073| Section | no. calls |
min time rank |
avg time |
max time rank |
4074+-------------------------------------------+------------------+------------+------------------+
4075| compute errors | 41 | 2.632s 178 | 7.221s | 16.56s 0 |
4076| compute transport speed | 7072 | 714s 193 | 1553s | 3351s 0 |
4077| output | 41 | 8065s 176 | 8070s | 8079s 0 |
4078| rk time stepping total | 35350 | 4.25e+04s 0 | 4.43e+04s | 4.515e+04s 193 |
4079| rk_stage - integrals L_h | 176750 | 2.936e+04s 134 | 3.222e+04s | 3.67e+04s 99 |
4080| rk_stage - inv mass + vec upd | 176750 | 7004s 99 | 1.207e+04s | 1.55e+04s 132 |
4081+-------------------------------------------+------------------+------------+------------------+
4083This simulation had nearly 2 billion unknowns -- quite a large
4084computation indeed, and still only needed around 1.5 seconds per time
4088<a name=
"step_67-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
4091The code presented here straight-forwardly extends to adaptive meshes, given
4092appropriate indicators for setting the refinement flags. Large-
scale
4093adaptivity of a similar solver in the context of the acoustic wave equation
4094has been achieved by the <a href=
"https://github.com/kronbichler/exwave">exwave
4095project</a>. However, in the present context, the benefits of adaptivity are often
4096limited to early times and effects close to the origin of sound waves, as the
4097waves eventually reflect and diffract. This leads to steep
gradients all over
4098the place, similar to turbulent flow, and a more or less globally
4101Another topic that we did not discuss in the results section is a comparison
4102of different time integration schemes. The program provides four variants of
4103low-storage Runga--Kutta integrators that each have slightly different
4104accuracy and stability behavior. Among the schemes implemented here, the
4105higher-order ones provide additional accuracy but come with slightly lower
4106efficiency in terms of step size per stage before they violate the CFL
4107condition. An interesting extension would be to compare the low-storage
4108variants proposed here with standard Runge--Kutta integrators or to use vector
4109operations that are
run separate from the mass
matrix operation and compare
4113<a name=
"step_67-Moreadvancednumericalfluxfunctionsandskewsymmetricformulations"></a><h4>More advanced numerical flux
functions and skew-
symmetric formulations</h4>
4116As mentioned in the introduction, the modified Lax--Friedrichs flux and the
4117HLL flux employed in this program are only two variants of a large body of
4118numerical fluxes available in the literature on the Euler equations. One
4119example is the HLLC flux (Harten-Lax-van Leer-Contact) flux which adds the
4120effect of rarefaction waves missing in the HLL flux, or the Roe flux. As
4121mentioned in the introduction, the effect of numerical fluxes on high-order DG
4122schemes is debatable (unlike for the case of low-order discretizations).
4124A related improvement to increase the stability of the solver is to also
4125consider the spatial integral terms. A shortcoming in the rather naive
4126implementation used above is the fact that the energy conservation of the
4127original Euler equations (in the absence of shocks) only holds up to a
4128discretization error. If the solution is under-resolved, the discretization
4129error can give rise to an increase in the numerical energy and eventually
4130render the discretization unstable. This is because of the inexact numerical
4131integration of the terms in the Euler equations, which both contain rational
4132nonlinearities and higher-degree content from curved cells. A way out of this
4133dilemma are so-called skew-
symmetric formulations, see @cite Gassner2013 for a
4134simple variant. Skew symmetry means that switching the role of the solution
4135@f$\mathbf{
w}@f$ and test
functions @f$\mathbf{v}@f$ in the weak form produces the
4136exact
negative of the original quantity, apart from some boundary terms. In
4137the discrete setting, the challenge is to keep this skew symmetry also when
4138the integrals are only computed approximately (in the continuous case,
4139skew-symmetry is a consequence of integration by parts). Skew-
symmetric
4140numerical schemes balance spatial derivatives in the conservative form
4141@f$(\nabla \mathbf v, \mathbf{
F}(\mathbf
w))_{
K}@f$ with contributions in the
4142convective form @f$(\mathbf v, \tilde{\mathbf{
F}}(\mathbf
w)\nabla
4143\mathbf{
w})_{
K}@f$ for some @f$\tilde{\mathbf{
F}}@f$. The precise terms depend on
4144the equation and the integration formula, and can in some cases by understood
4145by special skew-
symmetric finite difference schemes.
4147To get started, interested readers could take a look at
4149skew-
symmetric DG formulation is implemented with deal.II for a simple advection
4152<a name=
"step_67-Equippingthecodeforsupersoniccalculations"></a><h4>Equipping the code for supersonic calculations</h4>
4155As mentioned in the introduction, the solution to the Euler equations develops
4156shocks as the Mach number increases, which require additional mechanisms to
4157stabilize the scheme,
e.g. in the form of limiters. The main challenge besides
4158actually implementing the limiter or artificial viscosity approach would be to
4159load-balance the computations, as the additional computations involved for
4160limiting the oscillations in troubled cells would make them more expensive than the
4161plain DG cells without limiting. Furthermore, additional numerical fluxes that
4162better cope with the discontinuities would also be an option.
4164One ingredient also necessary for supersonic flows are appropriate boundary
4165conditions. As opposed to the subsonic outflow boundaries discussed in the
4166introduction and implemented in the program, all characteristics are outgoing
4167for supersonic outflow boundaries, so we do not want to prescribe any external
4170\mathbf{
w}^+ = \mathbf{
w}^- = \
begin{pmatrix} \rho^-\\
4171(\rho \mathbf u)^- \\
E^-\
end{pmatrix} \quad
4175In the code, we would simply add the additional statement
4177 else if (supersonic_outflow_boundaries.find(
boundary_id) !=
4178 supersonic_outflow_boundaries.
end())
4184in the `local_apply_boundary_face()` function.
4186<a name=
"step_67-ExtensiontothelinearizedEulerequations"></a><h4>Extension to the linearized Euler equations</h4>
4189When the interest with an Euler solution is mostly in the propagation of sound
4190waves, it often makes sense to linearize the Euler equations around a
4191background state, i.e., a given density, velocity and energy (or pressure)
4192field, and only compute the change against these fields. This is the setting
4193of the wide field of aeroacoustics. Even though the resolution requirements
4194are sometimes considerably reduced, implementation gets somewhat more
4195complicated as the linearization gives rise to additional terms. From a code
4196perspective, in the
operator evaluation we also need to equip the code with
4197the state to linearize against. This information can be provided either by
4198analytical
functions (that are evaluated in terms of the position of the
4199quadrature points) or by a vector similar to the solution. Based on that
4200vector, we would create an additional
FEEvaluation object to read from it and
4201provide the values of the field at quadrature points. If the background
4202velocity is zero and the density is
constant, the linearized Euler equations
4203further simplify and can equivalently be written in the form of the
4204acoustic wave equation.
4206A challenge in the context of sound propagation is often the definition of
4207boundary conditions, as the computational domain needs to be of finite size,
4208whereas the actual simulation often spans an infinite (or at least much
4209larger) physical domain. Conventional Dirichlet or Neumann boundary conditions
4210give rise to reflections of the sound waves that eventually propagate back to
4211the region of interest and spoil the solution. Therefore, various variants of
4212non-reflecting boundary conditions or sponge layers, often in the form of
4214href=
"https://en.wikipedia.org/wiki/Perfectly_matched_layer">perfectly
4215matched layers</a> -- where the solution is damped without reflection
4219<a name=
"step_67-ExtensiontothecompressibleNavierStokesequations"></a><h4>Extension to the compressible Navier-Stokes equations</h4>
4222The solver presented in
this tutorial program can also be extended to the
4223compressible Navier--Stokes equations by adding viscous terms, as described in
4224@cite FehnWallKronbichler2019. To keep as much of the performance obtained
4225here despite the additional cost of elliptic terms,
e.g. via an interior
4227the @ref step_59
"step-59" tutorial program.
4230<a name=
"step_67-Usingcellcentricloopsandsharedmemory"></a><h4>Using cell-centric loops and shared memory</h4>
4233In
this tutorial, we used face-centric loops. Here, cell and face integrals
4234are treated in separate loops, resulting in multiple writing accesses into the
4235result vector, which is relatively expensive on modern hardware since writing
4236operations generally result also in an implicit read operation. Element-centric
4237loops, on the other hand, are processing a cell and in direct succession
4238processing all its 2
d faces. Although
this kind of
loop implies that fluxes have
4239to be computed twice (
for each side of an interior face), the fact that the
4240result vector has to accessed only once might - and the fact that the resulting
4241algorithm is
free of race-conditions and as such perfectly suitable
for
4242shared memory - already give a performance
boost. If you are interested in these
4243advanced topics, you can take a look at @ref step_76
"step-76" where we take the present
4244tutorial and modify it so that we can use these features.
4247<a name=
"step_67-PlainProg"></a>
4248<h1> The plain program</h1>
4249@include
"step-67.cc"
void write_vtu_in_parallel(const std::string &filename, const MPI_Comm comm) const
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
virtual UpdateFlags get_needed_update_flags() const =0
virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const
virtual std::vector< std::string > get_names() const =0
virtual std::vector< DataComponentInterpretation::DataComponentInterpretation > get_data_component_interpretation() const
void submit_dof_value(const value_type val_in, const unsigned int dof)
void set_dof_values(VectorType &dst, const unsigned int first_index=0, const std::bitset< n_lanes > &mask=std::bitset< n_lanes >().flip()) const
value_type get_value(const unsigned int q_point) const
const ShapeInfoType * data
Tensor< 2, dim, VectorizedArrayType > inverse_jacobian(const unsigned int q_point) const
std_cxx20::ranges::iota_view< unsigned int, unsigned int > quadrature_point_indices() const
Point< dim, VectorizedArrayType > quadrature_point(const unsigned int q) const
const unsigned int n_q_points
void gather_evaluate(const VectorType &input_vector, const EvaluationFlags::EvaluationFlags evaluation_flag)
void evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
void reinit(const size_type size, const bool omit_zeroing_entries=false)
Abstract base class for mapping classes.
void transform_from_q_points_to_basis(const unsigned int n_actual_components, const VectorizedArrayType *in_array, VectorizedArrayType *out_array) const
unsigned int n_active_entries_per_cell_batch(const unsigned int cell_batch_index) const
void loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &inner_face_operation, const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &boundary_face_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false, const DataAccessOnFaces dst_vector_face_access=DataAccessOnFaces::unspecified, const DataAccessOnFaces src_vector_face_access=DataAccessOnFaces::unspecified) const
void cell_loop(const std::function< void(const MatrixFree< dim, Number, VectorizedArrayType > &, OutVector &, const InVector &, const std::pair< unsigned int, unsigned int > &)> &cell_operation, OutVector &dst, const InVector &src, const bool zero_dst_vector=false) const
void reinit(const MappingType &mapping, const DoFHandler< dim > &dof_handler, const AffineConstraints< number2 > &constraint, const QuadratureType &quad, const AdditionalData &additional_data=AdditionalData())
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
void print_wall_time_statistics(const MPI_Comm mpi_comm, const double print_quantile=0.) const
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
static constexpr std::size_t size()
virtual types::global_cell_index n_global_active_cells() const override
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_OPENMP_SIMD_PRAGMA
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrow(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
#define DEAL_II_NOT_IMPLEMENTED()
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
DataComponentInterpretation
@ component_is_part_of_vector
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
The namespace for the EvaluationFlags enum.
void hyper_rectangle(Triangulation< dim, spacedim > &tria, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void channel_with_cylinder(Triangulation< dim > &tria, const double shell_region_width=0.03, const unsigned int n_shells=2, const double skewness=2.0, const bool colorize=false)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > E(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
@ LOW_STORAGE_RK_STAGE9_ORDER5
@ LOW_STORAGE_RK_STAGE3_ORDER3
@ LOW_STORAGE_RK_STAGE7_ORDER4
@ LOW_STORAGE_RK_STAGE5_ORDER4
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string get_current_vectorization_level()
Number truncate_to_n_digits(const Number number, const unsigned int n_digits)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
constexpr T pow(const T base, const int iexp)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
long double gamma(const unsigned int n)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
int(& functions)(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
static constexpr double PI
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code * * MPI_Finalize()
bool write_higher_order_cells
UpdateFlags mapping_update_flags
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)