1437 *
const double g = 9.81;
1438 *
const double rho = 7700;
1441 * values(dim - 1) = -rho * g;
1446 *
template <
int dim>
1447 *
void BodyForce<dim>::vector_value_list(
1451 *
const unsigned int n_points = points.size();
1455 *
for (
unsigned int p = 0; p < n_points; ++p)
1456 * BodyForce<dim>::vector_value(points[p], value_list[p]);
1464 * <a name=
"step_18-ThecodeIncrementalBoundaryValuecodeclass"></a>
1465 * <h3>The <code>IncrementalBoundaryValue</code>
class</h3>
1469 * In addition to body forces, movement can be induced by boundary forces
1470 * and forced boundary displacement. The latter
case is equivalent to forces
1471 * being chosen in such a way that they induce certain displacement.
1475 * For quasistatic displacement, typical boundary forces would be pressure
1476 * on a body, or tangential friction against another body. We chose a
1477 * somewhat simpler
case here: we prescribe a certain movement of (parts of)
1478 * the boundary, or at least of certain components of the displacement
1479 * vector. We describe
this by another vector-valued function that,
for a
1480 * given
point on the boundary, returns the prescribed displacement.
1484 * Since we have a time-dependent problem, the displacement increment of the
1485 * boundary equals the displacement accumulated during the length of the
1486 * timestep. The
class therefore has to know both the present time and the
1487 * length of the present time step, and can then
approximate the incremental
1488 * displacement as the present velocity times the present timestep.
1492 * For the purposes of
this program, we choose a simple form of boundary
1493 * displacement: we displace the top boundary with
constant velocity
1494 * downwards. The rest of the boundary is either going to be fixed (and is
1495 * then described
using an
object of type
1497 * nothing special has to be done). The implementation of the
class
1498 * describing the
constant downward motion should then be obvious
using the
1499 * knowledge we gained through all the previous example programs:
1502 *
template <
int dim>
1503 *
class IncrementalBoundaryValues :
public Function<dim>
1506 * IncrementalBoundaryValues(
const double present_time,
1507 *
const double present_timestep);
1517 *
const double velocity;
1518 *
const double present_time;
1519 *
const double present_timestep;
1523 *
template <
int dim>
1524 * IncrementalBoundaryValues<dim>::IncrementalBoundaryValues(
1525 *
const double present_time,
1526 *
const double present_timestep)
1529 * , present_time(present_time)
1530 * , present_timestep(present_timestep)
1534 *
template <
int dim>
1536 * IncrementalBoundaryValues<dim>::vector_value(
const Point<dim> & ,
1542 * values(2) = -present_timestep * velocity;
1547 *
template <
int dim>
1548 *
void IncrementalBoundaryValues<dim>::vector_value_list(
1552 *
const unsigned int n_points = points.size();
1556 *
for (
unsigned int p = 0; p < n_points; ++p)
1557 * IncrementalBoundaryValues<dim>::vector_value(points[p], value_list[p]);
1565 * <a name=
"step_18-ImplementationofthecodeTopLevelcodeclass"></a>
1566 * <h3>Implementation of the <code>TopLevel</code>
class</h3>
1570 * Now
for the implementation of the main
class. First, we initialize the
1571 * stress-strain tensor, which we have declared as a
static const
1572 * variable. We chose Lamé constants that are appropriate
for steel:
1575 *
template <
int dim>
1577 * get_stress_strain_tensor<dim>( 9.695e10,
1585 * <a name=
"step_18-Thepublicinterface"></a>
1586 * <h4>The
public interface</h4>
1590 * The next step is the definition of constructors and destructors. There
1591 * are no surprises here: we choose linear and continuous finite elements
1592 *
for each of the <code>dim</code> vector components of the solution, and a
1593 * Gaussian quadrature formula with 2 points in each coordinate
1594 * direction. The destructor should be obvious:
1597 *
template <
int dim>
1598 * TopLevel<dim>::TopLevel()
1602 * , quadrature_formula(fe.degree + 1)
1603 * , present_time(0.0)
1604 * , present_timestep(1.0)
1607 * , mpi_communicator(MPI_COMM_WORLD)
1610 * , pcout(std::cout, this_mpi_process == 0)
1615 *
template <
int dim>
1616 * TopLevel<dim>::~TopLevel()
1618 * dof_handler.clear();
1625 * The last of the
public functions is the one that directs all the work,
1626 * <code>
run()</code>. It initializes the variables that describe where in
1627 * time we presently are, then runs the
first time step, then loops over all
1628 * the other time steps. Note that
for simplicity we use a fixed time step,
1629 * whereas a more sophisticated program would of course have to choose it in
1630 * some more reasonable way adaptively:
1633 *
template <
int dim>
1634 *
void TopLevel<dim>::run()
1636 * do_initial_timestep();
1638 *
while (present_time < end_time)
1646 * <a name=
"step_18-TopLevelcreate_coarse_grid"></a>
1647 * <h4>TopLevel::create_coarse_grid</h4>
1651 * The next function in the order in which they were declared above is the
1652 * one that creates the coarse grid from which we start. For
this example
1653 * program, we want to compute the deformation of a
cylinder under axial
1654 * compression. The
first step therefore is to generate a mesh
for a
1655 *
cylinder of length 3 and with inner and outer radii of 0.8 and 1,
1656 * respectively. Fortunately, there is a library function
for such a mesh.
1660 * In a
second step, we have to associated boundary conditions with the
1661 * upper and lower faces of the
cylinder. We choose a boundary indicator of
1662 * 0
for the boundary faces that are characterized by their midpoints having
1663 * z-coordinates of either 0 (bottom face), an indicator of 1
for z=3 (top
1664 * face);
finally, we use boundary indicator 2
for all faces on the
inside
1668 *
template <
int dim>
1669 *
void TopLevel<dim>::create_coarse_grid()
1671 *
const double inner_radius = 0.8, outer_radius = 1;
1673 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1674 * for (const auto &face : cell->face_iterators())
1675 * if (face->at_boundary())
1679 *
if (face_center[2] == 0)
1680 * face->set_boundary_id(0);
1681 *
else if (face_center[2] == 3)
1682 * face->set_boundary_id(1);
1683 *
else if (
std::sqrt(face_center[0] * face_center[0] +
1684 * face_center[1] * face_center[1]) <
1685 * (inner_radius + outer_radius) / 2)
1686 * face->set_boundary_id(2);
1688 * face->set_boundary_id(3);
1693 * Once all
this is done, we can
refine the mesh once globally:
1700 * As the
final step, we need to
set up a clean state of the data that we
1701 * store in the quadrature points on all cells that are treated on the
1702 * present processor.
1705 * setup_quadrature_point_history();
1713 * <a name=
"step_18-TopLevelsetup_system"></a>
1714 * <h4>TopLevel::setup_system</h4>
1718 * The next function is the one that sets up the data structures
for a given
1719 * mesh. This is done in most the same way as in @ref step_17
"step-17": distribute the
1720 * degrees of freedom, then sort these degrees of freedom in such a way that
1721 * each processor gets a
contiguous chunk of them. Note that subdivisions into
1722 * chunks
for each processor is handled in the
functions that create or
1723 *
refine grids, unlike in the previous example program (the point where
1724 *
this happens is mostly a matter of taste; here, we chose to
do it when
1725 * grids are created since in the <code>do_initial_timestep</code> and
1726 * <code>do_timestep</code>
functions we want to output the number of cells
1727 * on each processor at a
point where we haven
't called the present function
1731 * template <int dim>
1732 * void TopLevel<dim>::setup_system()
1734 * dof_handler.distribute_dofs(fe);
1735 * locally_owned_dofs = dof_handler.locally_owned_dofs();
1736 * locally_relevant_dofs =
1737 * DoFTools::extract_locally_relevant_dofs(dof_handler);
1741 * The next step is to set up constraints due to hanging nodes. This has
1742 * been handled many times before:
1745 * hanging_node_constraints.clear();
1746 * DoFTools::make_hanging_node_constraints(dof_handler,
1747 * hanging_node_constraints);
1748 * hanging_node_constraints.close();
1752 * And then we have to set up the matrix. Here we deviate from @ref step_17 "step-17", in
1753 * which we simply used PETSc's ability to just know about the size of the
1754 *
matrix and later allocate those
nonzero elements that are being written
1755 * to. While
this works just fine from a correctness viewpoint, it is not
1756 * at all efficient:
if we don
't give PETSc a clue as to which elements
1757 * are written to, it is (at least at the time of this writing) unbearably
1758 * slow when we set the elements in the matrix for the first time (i.e. in
1759 * the first timestep). Later on, when the elements have been allocated,
1760 * everything is much faster. In experiments we made, the first timestep
1761 * can be accelerated by almost two orders of magnitude if we instruct
1762 * PETSc which elements will be used and which are not.
1766 * To do so, we first generate the sparsity pattern of the matrix we are
1767 * going to work with, and make sure that the condensation of hanging node
1768 * constraints add the necessary additional entries in the sparsity
1772 * DynamicSparsityPattern sparsity_pattern(locally_relevant_dofs);
1773 * DoFTools::make_sparsity_pattern(dof_handler,
1775 * hanging_node_constraints,
1776 * /*keep constrained dofs*/ false);
1777 * SparsityTools::distribute_sparsity_pattern(sparsity_pattern,
1778 * locally_owned_dofs,
1780 * locally_relevant_dofs);
1783 * Note that we have used the <code>DynamicSparsityPattern</code> class
1784 * here that was already introduced in @ref step_11 "step-11", rather than the
1785 * <code>SparsityPattern</code> class that we have used in all other
1786 * cases. The reason for this is that for the latter class to work we have
1787 * to give an initial upper bound for the number of entries in each row, a
1788 * task that is traditionally done by
1789 * <code>DoFHandler::max_couplings_between_dofs()</code>. However, this
1790 * function suffers from a serious problem: it has to compute an upper
1791 * bound to the number of nonzero entries in each row, and this is a
1792 * rather complicated task, in particular in 3d. In effect, while it is
1793 * quite accurate in 2d, it often comes up with much too large a number in
1794 * 3d, and in that case the <code>SparsityPattern</code> allocates much
1795 * too much memory at first, often several 100 MBs. This is later
1796 * corrected when <code>DoFTools::make_sparsity_pattern</code> is called
1797 * and we realize that we don't need all that much memory, but at time it
1798 * is already too late:
for large problems, the temporary allocation of
1799 * too much memory can lead to out-of-memory situations.
1803 * In order to avoid
this, we resort to the
1805 * not require any up-front estimate on the number of
nonzero entries per
1806 * row. It therefore only ever allocates as much memory as it needs at any
1807 * given time, and we can build it even
for large 3
d problems.
1811 * It is also worth noting that due to the specifics of
1813 * global, i.e. comprises all degrees of freedom whether they will be
1814 * owned by the processor we are on or another one (in
case this program
1815 * is run in %
parallel via
MPI). This of course is not optimal -- it
1816 * limits the size of the problems we can solve, since storing the entire
1817 * sparsity pattern (even
if only
for a
short time) on each processor does
1818 * not
scale well. However, there are several more places in the program
1819 * in which we
do this,
for example we
always keep the global
1820 *
triangulation and DoF handler objects around, even
if we only work on
1821 * part of them. At present, deal.II does not have the necessary
1822 * facilities to completely distribute these objects (a task that, indeed,
1823 * is very hard to achieve with adaptive meshes, since well-balanced
1824 * subdivisions of a domain tend to become unbalanced as the mesh is
1825 * adaptively refined).
1829 * With
this data structure, we can then go to the
PETSc sparse
matrix and
1830 * tell it to preallocate all the entries we will later want to write to:
1833 * system_matrix.reinit(locally_owned_dofs,
1834 * locally_owned_dofs,
1836 * mpi_communicator);
1839 * After
this point, no further
explicit knowledge of the sparsity pattern
1840 * is required any more and we can let the <code>sparsity_pattern</code>
1841 * variable go out of scope without any problem.
1845 * The last task in
this function is then only to reset the right hand
1846 * side vector as well as the solution vector to its correct size;
1847 * remember that the solution vector is a local one, unlike the right hand
1848 * side that is a distributed %
parallel one and therefore needs to know
1849 * the
MPI communicator over which it is supposed to transmit messages:
1852 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1853 * incremental_displacement.reinit(dof_handler.n_dofs());
1861 * <a name=
"step_18-TopLevelassemble_system"></a>
1862 * <h4>TopLevel::assemble_system</h4>
1866 * Again, assembling the system
matrix and right hand side follows the same
1867 * structure as in many example programs before. In particular, it is mostly
1868 * equivalent to @ref step_17
"step-17", except
for the different right hand side that now
1869 * only has to take into account
internal stresses. In addition, assembling
1870 * the
matrix is made significantly more transparent by
using the
1871 * <code>
SymmetricTensor</code>
class: note the elegance of forming the
1872 *
scalar products of
symmetric tensors of rank 2 and 4. The implementation
1874 * may not be
using an isotropic elasticity tensor.
1878 * The
first part of the assembly routine is as
always:
1881 *
template <
int dim>
1882 *
void TopLevel<dim>::assemble_system()
1885 * system_matrix = 0;
1888 * quadrature_formula,
1892 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1893 *
const unsigned int n_q_points = quadrature_formula.size();
1898 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1900 * BodyForce<dim> body_force;
1901 * std::vector<Vector<double>> body_force_values(n_q_points,
1906 * As in @ref step_17
"step-17", we only need to
loop over all cells that belong to the
1907 * present processor:
1910 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1911 * if (cell->is_locally_owned())
1916 * fe_values.reinit(cell);
1920 * Then
loop over all indices i,j and quadrature points and
assemble
1921 * the system
matrix contributions from
this cell. Note how we
1923 * at a given quadrature
point from the <code>
FEValues</code>
1924 * object, and the elegance with which we form the triple
1925 * contraction <code>eps_phi_i :
C : eps_phi_j</code>; the latter
1926 * needs to be compared to the clumsy computations needed in
1927 * @ref step_17
"step-17", both in the introduction as well as in the respective
1928 * place in the program:
1931 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1932 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
1933 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1936 * eps_phi_i = get_strain(fe_values, i, q_point),
1937 * eps_phi_j = get_strain(fe_values, j, q_point);
1940 * stress_strain_tensor *
1943 * fe_values.JxW(q_point);
1949 * Then also
assemble the local right hand side contributions. For
1950 *
this, we need to access the prior stress
value in
this quadrature
1951 *
point. To get it, we use the user pointer of
this cell that
1952 * points into the global array to the quadrature
point data
1953 * corresponding to the
first quadrature
point of the present cell,
1954 * and then add an offset corresponding to the
index of the
1955 * quadrature
point we presently consider:
1958 *
const PointHistory<dim> *local_quadrature_points_data =
1959 *
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
1962 * In addition, we need the values of the external body forces at
1963 * the quadrature points on
this cell:
1966 * body_force.vector_value_list(fe_values.get_quadrature_points(),
1967 * body_force_values);
1970 * Then we can
loop over all degrees of freedom on
this cell and
1971 * compute local contributions to the right hand side:
1974 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1976 *
const unsigned int component_i =
1977 * fe.system_to_component_index(i).first;
1979 *
for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1982 * local_quadrature_points_data[q_point].old_stress;
1985 * (body_force_values[q_point](component_i) *
1986 * fe_values.shape_value(i, q_point) -
1987 * old_stress * get_strain(fe_values, i, q_point)) *
1988 * fe_values.JxW(q_point);
1994 * Now that we have the local contributions to the linear system, we
1995 * need to transfer it into the global objects. This is done exactly
1996 * as in @ref step_17
"step-17":
1999 * cell->get_dof_indices(local_dof_indices);
2001 * hanging_node_constraints.distribute_local_to_global(cell_matrix,
2003 * local_dof_indices,
2019 * The last step is to again fix up boundary values, just as we already
2020 * did in previous programs. A slight complication is that the
2022 * vector compatible with the
matrix and right hand side (i.e. here a
2023 * distributed %
parallel vector, rather than the sequential vector we use
2024 * in
this program) in order to preset the entries of the solution vector
2025 * with the correct boundary values. We provide such a compatible vector
2026 * in the form of a temporary vector which we then
copy into the
2031 * We make up
for this complication by showing how boundary values can be
2032 * used flexibly: following the way we create the
triangulation, there are
2033 * three distinct boundary indicators used to describe the domain,
2034 * corresponding to the bottom and top faces, as well as the inner/outer
2035 * surfaces. We would like to impose boundary conditions of the following
2036 * type: The inner and outer
cylinder surfaces are
free of external
2037 * forces, a fact that corresponds to natural (Neumann-type) boundary
2038 * conditions
for which we don
't have to do anything. At the bottom, we
2039 * want no movement at all, corresponding to the cylinder being clamped or
2040 * cemented in at this part of the boundary. At the top, however, we want
2041 * a prescribed vertical downward motion compressing the cylinder; in
2042 * addition, we only want to restrict the vertical movement, but not the
2043 * horizontal ones -- one can think of this situation as a well-greased
2044 * plate sitting on top of the cylinder pushing it downwards: the atoms of
2045 * the cylinder are forced to move downward, but they are free to slide
2046 * horizontally along the plate.
2050 * The way to describe this is as follows: for boundary indicator zero
2051 * (bottom face) we use a dim-dimensional zero function representing no
2052 * motion in any coordinate direction. For the boundary with indicator 1
2053 * (top surface), we use the <code>IncrementalBoundaryValues</code> class,
2054 * but we specify an additional argument to the
2055 * <code>VectorTools::interpolate_boundary_values</code> function denoting
2056 * which vector components it should apply to; this is a vector of bools
2057 * for each vector component and because we only want to restrict vertical
2058 * motion, it has only its last component set:
2061 * const FEValuesExtractors::Scalar z_component(dim - 1);
2062 * std::map<types::global_dof_index, double> boundary_values;
2063 * VectorTools::interpolate_boundary_values(dof_handler,
2065 * Functions::ZeroFunction<dim>(dim),
2067 * VectorTools::interpolate_boundary_values(
2070 * IncrementalBoundaryValues<dim>(present_time, present_timestep),
2072 * fe.component_mask(z_component));
2074 * PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator);
2075 * MatrixTools::apply_boundary_values(
2076 * boundary_values, system_matrix, tmp, system_rhs, false);
2077 * incremental_displacement = tmp;
2085 * <a name="step_18-TopLevelsolve_timestep"></a>
2086 * <h4>TopLevel::solve_timestep</h4>
2090 * The next function is the one that controls what all has to happen within
2091 * a timestep. The order of things should be relatively self-explanatory
2092 * from the function names:
2095 * template <int dim>
2096 * void TopLevel<dim>::solve_timestep()
2098 * pcout << " Assembling system..." << std::flush;
2099 * assemble_system();
2100 * pcout << " norm of rhs is " << system_rhs.l2_norm() << std::endl;
2102 * const unsigned int n_iterations = solve_linear_problem();
2104 * pcout << " Solver converged in " << n_iterations << " iterations."
2107 * pcout << " Updating quadrature point data..." << std::flush;
2108 * update_quadrature_point_history();
2109 * pcout << std::endl;
2117 * <a name="step_18-TopLevelsolve_linear_problem"></a>
2118 * <h4>TopLevel::solve_linear_problem</h4>
2122 * Solving the linear system again works mostly as before. The only
2123 * difference is that we want to only keep a complete local copy of the
2124 * solution vector instead of the distributed one that we get as output from
2125 * PETSc's solver routines. To
this end, we declare a local temporary
2126 * variable
for the distributed vector and initialize it with the contents
2127 * of the local variable (remember that the
2128 * <code>apply_boundary_values</code> function called in
2129 * <code>assemble_system</code> preset the values of boundary nodes in
this
2130 * vector), solve with it, and at the
end of the function
copy it again into
2131 * the complete local vector that we declared as a member variable. Hanging
2132 * node constraints are then distributed only on the local
copy,
2133 * i.e. independently of each other on each of the processors:
2136 *
template <
int dim>
2137 *
unsigned int TopLevel<dim>::solve_linear_problem()
2140 * locally_owned_dofs, mpi_communicator);
2141 * distributed_incremental_displacement = incremental_displacement;
2144 * 1e-16 * system_rhs.l2_norm());
2150 * cg.solve(system_matrix,
2151 * distributed_incremental_displacement,
2155 * incremental_displacement = distributed_incremental_displacement;
2157 * hanging_node_constraints.distribute(incremental_displacement);
2159 *
return solver_control.last_step();
2167 * <a name=
"step_18-TopLeveloutput_results"></a>
2168 * <h4>TopLevel::output_results</h4>
2172 * This function generates the graphical output in .vtu format as explained
2173 * in the introduction. Each process will only work on the cells it owns,
2174 * and then write the result into a file of its own. Additionally, processor
2175 * 0 will write the record files the reference all the .vtu files.
2179 * The crucial part of
this function is to give the <code>
DataOut</code>
2180 *
class a way to only work on the cells that the present process owns.
2186 *
template <
int dim>
2187 *
void TopLevel<dim>::output_results() const
2194 * Then, just as in @ref step_17
"step-17", define the names of solution variables (which
2195 * here are the displacement increments) and queue the solution vector
for
2196 * output. Note in the following
switch how we make sure that
if the space
2197 * dimension should be unhandled that we
throw an exception saying that we
2198 * haven
't implemented this case yet (another case of defensive
2202 * std::vector<std::string> solution_names;
2206 * solution_names.emplace_back("delta_x");
2209 * solution_names.emplace_back("delta_x");
2210 * solution_names.emplace_back("delta_y");
2213 * solution_names.emplace_back("delta_x");
2214 * solution_names.emplace_back("delta_y");
2215 * solution_names.emplace_back("delta_z");
2218 * DEAL_II_NOT_IMPLEMENTED();
2221 * data_out.add_data_vector(incremental_displacement, solution_names);
2226 * The next thing is that we wanted to output something like the average
2227 * norm of the stresses that we have stored in each cell. This may seem
2228 * complicated, since on the present processor we only store the stresses
2229 * in quadrature points on those cells that actually belong to the present
2230 * process. In other words, it seems as if we can't compute the average
2231 * stresses
for all cells. However, remember that our
class derived from
2232 * <code>
DataOut</code> only iterates over those cells that actually
do
2233 * belong to the present processor, i.e. we don
't have to compute anything
2234 * for all the other cells as this information would not be touched. The
2235 * following little loop does this. We enclose the entire block into a
2236 * pair of braces to make sure that the iterator variables do not remain
2237 * accidentally visible beyond the end of the block in which they are
2241 * Vector<double> norm_of_stress(triangulation.n_active_cells());
2245 * Loop over all the cells...
2248 * for (auto &cell : triangulation.active_cell_iterators())
2249 * if (cell->is_locally_owned())
2253 * On these cells, add up the stresses over all quadrature
2257 * SymmetricTensor<2, dim> accumulated_stress;
2258 * for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2259 * accumulated_stress +=
2260 * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer())[q]
2265 * ...then write the norm of the average to their destination:
2268 * norm_of_stress(cell->active_cell_index()) =
2269 * (accumulated_stress / quadrature_formula.size()).norm();
2273 * And on the cells that we are not interested in, set the respective
2274 * value in the vector to a bogus value (norms must be positive, and a
2275 * large negative value should catch your eye) in order to make sure
2276 * that if we were somehow wrong about our assumption that these
2277 * elements would not appear in the output file, that we would find out
2278 * by looking at the graphical output:
2282 * norm_of_stress(cell->active_cell_index()) = -1e+20;
2286 * Finally attach this vector as well to be treated for output:
2289 * data_out.add_data_vector(norm_of_stress, "norm_of_stress");
2293 * As a last piece of data, let us also add the partitioning of the domain
2294 * into subdomains associated with the processors if this is a parallel
2295 * job. This works in the exact same way as in the @ref step_17 "step-17" program:
2298 * std::vector<types::subdomain_id> partition_int(
2299 * triangulation.n_active_cells());
2300 * GridTools::get_subdomain_association(triangulation, partition_int);
2301 * const Vector<double> partitioning(partition_int.begin(),
2302 * partition_int.end());
2303 * data_out.add_data_vector(partitioning, "partitioning");
2307 * Finally, with all this data, we can instruct deal.II to munge the
2308 * information and produce some intermediate data structures that contain
2309 * all these solution and other data vectors:
2312 * data_out.build_patches();
2316 * Let us call a function that opens the necessary output files and writes
2317 * the data we have generated into them. The function automatically
2318 * constructs the file names from the given directory name (the first
2319 * argument) and file name base (second argument). It augments the resulting
2320 * string by pieces that result from the time step number and a "piece
2321 * number" that corresponds to a part of the overall domain that can consist
2322 * of one or more subdomains.
2326 * The function also writes a record files (with suffix `.pvd`) for Paraview
2327 * that describes how all of these output files combine into the data for
2328 * this single time step:
2331 * const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record(
2332 * "./", "solution", timestep_no, mpi_communicator, 4);
2336 * The record files must be written only once and not by each processor,
2337 * so we do this on processor 0:
2340 * if (this_mpi_process == 0)
2344 * Finally, we write the paraview record, that references all .pvtu
2345 * files and their respective time. Note that the variable
2346 * times_and_names is declared static, so it will retain the entries
2347 * from the previous timesteps.
2350 * static std::vector<std::pair<double, std::string>> times_and_names;
2351 * times_and_names.emplace_back(present_time, pvtu_filename);
2352 * std::ofstream pvd_output("solution.pvd");
2353 * DataOutBase::write_pvd_record(pvd_output, times_and_names);
2362 * <a name="step_18-TopLeveldo_initial_timestep"></a>
2363 * <h4>TopLevel::do_initial_timestep</h4>
2367 * This and the next function handle the overall structure of the first and
2368 * following timesteps, respectively. The first timestep is slightly more
2369 * involved because we want to compute it multiple times on successively
2370 * refined meshes, each time starting from a clean state. At the end of
2371 * these computations, in which we compute the incremental displacements
2372 * each time, we use the last results obtained for the incremental
2373 * displacements to compute the resulting stress updates and move the mesh
2374 * accordingly. On this new mesh, we then output the solution and any
2375 * additional data we consider important.
2379 * All this is interspersed by generating output to the console to update
2380 * the person watching the screen on what is going on. As in @ref step_17 "step-17", the
2381 * use of <code>pcout</code> instead of <code>std::cout</code> makes sure
2382 * that only one of the parallel processes is actually writing to the
2383 * console, without having to explicitly code an if-statement in each place
2384 * where we generate output:
2387 * template <int dim>
2388 * void TopLevel<dim>::do_initial_timestep()
2390 * present_time += present_timestep;
2392 * pcout << "Timestep " << timestep_no << " at time " << present_time
2395 * for (unsigned int cycle = 0; cycle < 2; ++cycle)
2397 * pcout << " Cycle " << cycle << ':
' << std::endl;
2400 * create_coarse_grid();
2402 * refine_initial_grid();
2404 * pcout << " Number of active cells: "
2405 * << triangulation.n_active_cells() << " (by partition:";
2406 * for (unsigned int p = 0; p < n_mpi_processes; ++p)
2407 * pcout << (p == 0 ? ' ' : '+
')
2408 * << (GridTools::count_cells_with_subdomain_association(
2409 * triangulation, p));
2410 * pcout << ')
' << std::endl;
2414 * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
2415 * << " (by partition:";
2416 * for (unsigned int p = 0; p < n_mpi_processes; ++p)
2417 * pcout << (p == 0 ? ' ' : '+
')
2418 * << (DoFTools::count_dofs_with_subdomain_association(dof_handler,
2420 * pcout << ')
' << std::endl;
2428 * pcout << std::endl;
2436 * <a name="step_18-TopLeveldo_timestep"></a>
2437 * <h4>TopLevel::do_timestep</h4>
2441 * Subsequent timesteps are simpler, and probably do not require any more
2442 * documentation given the explanations for the previous function above:
2445 * template <int dim>
2446 * void TopLevel<dim>::do_timestep()
2448 * present_time += present_timestep;
2450 * pcout << "Timestep " << timestep_no << " at time " << present_time
2452 * if (present_time > end_time)
2454 * present_timestep -= (present_time - end_time);
2455 * present_time = end_time;
2464 * pcout << std::endl;
2471 * <a name="step_18-TopLevelrefine_initial_grid"></a>
2472 * <h4>TopLevel::refine_initial_grid</h4>
2476 * The following function is called when solving the first time step on
2477 * successively refined meshes. After each iteration, it computes a
2478 * refinement criterion, refines the mesh, and sets up the history variables
2479 * in each quadrature point again to a clean state.
2482 * template <int dim>
2483 * void TopLevel<dim>::refine_initial_grid()
2487 * First, let each process compute error indicators for the cells it owns:
2490 * Vector<float> error_per_cell(triangulation.n_active_cells());
2491 * KellyErrorEstimator<dim>::estimate(
2493 * QGauss<dim - 1>(fe.degree + 1),
2494 * std::map<types::boundary_id, const Function<dim> *>(),
2495 * incremental_displacement,
2499 * MultithreadInfo::n_threads(),
2500 * this_mpi_process);
2504 * Then set up a global vector into which we merge the local indicators
2505 * from each of the %parallel processes:
2508 * const unsigned int n_local_cells =
2509 * triangulation.n_locally_owned_active_cells();
2511 * PETScWrappers::MPI::Vector distributed_error_per_cell(
2512 * mpi_communicator, triangulation.n_active_cells(), n_local_cells);
2514 * for (unsigned int i = 0; i < error_per_cell.size(); ++i)
2515 * if (error_per_cell(i) != 0)
2516 * distributed_error_per_cell(i) = error_per_cell(i);
2517 * distributed_error_per_cell.compress(VectorOperation::insert);
2521 * Once we have that, copy it back into local copies on all processors and
2522 * refine the mesh accordingly:
2525 * error_per_cell = distributed_error_per_cell;
2526 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2530 * triangulation.execute_coarsening_and_refinement();
2534 * Finally, set up quadrature point data again on the new mesh, and only
2535 * on those cells that we have determined to be ours:
2538 * setup_quadrature_point_history();
2546 * <a name="step_18-TopLevelmove_mesh"></a>
2547 * <h4>TopLevel::move_mesh</h4>
2551 * At the end of each time step, we move the nodes of the mesh according to
2552 * the incremental displacements computed in this time step. To do this, we
2553 * keep a vector of flags that indicate for each vertex whether we have
2554 * already moved it around, and then loop over all cells and move those
2555 * vertices of the cell that have not been moved yet. It is worth noting
2556 * that it does not matter from which of the cells adjacent to a vertex we
2557 * move this vertex: since we compute the displacement using a continuous
2558 * finite element, the displacement field is continuous as well and we can
2559 * compute the displacement of a given vertex from each of the adjacent
2560 * cells. We only have to make sure that we move each node exactly once,
2561 * which is why we keep the vector of flags.
2565 * There are two noteworthy things in this function. First, how we get the
2566 * displacement field at a given vertex using the
2567 * <code>cell-@>vertex_dof_index(v,d)</code> function that returns the index
2568 * of the <code>d</code>th degree of freedom at vertex <code>v</code> of the
2569 * given cell. In the present case, displacement in the k-th coordinate
2570 * direction corresponds to the k-th component of the finite element. Using a
2571 * function like this bears a certain risk, because it uses knowledge of the
2572 * order of elements that we have taken together for this program in the
2573 * <code>FESystem</code> element. If we decided to add an additional
2574 * variable, for example a pressure variable for stabilization, and happened
2575 * to insert it as the first variable of the element, then the computation
2576 * below will start to produce nonsensical results. In addition, this
2577 * computation rests on other assumptions: first, that the element we use
2578 * has, indeed, degrees of freedom that are associated with vertices. This
2579 * is indeed the case for the present Q1 element, as would be for all Qp
2580 * elements of polynomial order <code>p</code>. However, it would not hold
2581 * for discontinuous elements, or elements for mixed formulations. Secondly,
2582 * it also rests on the assumption that the displacement at a vertex is
2583 * determined solely by the value of the degree of freedom associated with
2584 * this vertex; in other words, all shape functions corresponding to other
2585 * degrees of freedom are zero at this particular vertex. Again, this is the
2586 * case for the present element, but is not so for all elements that are
2587 * presently available in deal.II. Despite its risks, we choose to use this
2588 * way in order to present a way to query individual degrees of freedom
2589 * associated with vertices.
2593 * In this context, it is instructive to point out what a more general way
2594 * would be. For general finite elements, the way to go would be to take a
2595 * quadrature formula with the quadrature points in the vertices of a
2596 * cell. The <code>QTrapezoid</code> formula for the trapezoidal rule does
2597 * exactly this. With this quadrature formula, we would then initialize an
2598 * <code>FEValues</code> object in each cell, and use the
2599 * <code>FEValues::get_function_values</code> function to obtain the values
2600 * of the solution function in the quadrature points, i.e. the vertices of
2601 * the cell. These are the only values that we really need, i.e. we are not
2602 * at all interested in the weights (or the <code>JxW</code> values)
2603 * associated with this particular quadrature formula, and this can be
2604 * specified as the last argument in the constructor to
2605 * <code>FEValues</code>. The only point of minor inconvenience in this
2606 * scheme is that we have to figure out which quadrature point corresponds
2607 * to the vertex we consider at present, as they may or may not be ordered
2608 * in the same order.
2612 * This inconvenience could be avoided if finite elements have support
2613 * points on vertices (which the one here has; for the concept of support
2614 * points, see @ref GlossSupport "support points"). For such a case, one
2615 * could construct a custom quadrature rule using
2616 * FiniteElement::get_unit_support_points(). The first
2617 * <code>cell->n_vertices()*fe.dofs_per_vertex</code>
2618 * quadrature points will then correspond to the vertices of the cell and
2619 * are ordered consistent with <code>cell-@>vertex(i)</code>, taking into
2620 * account that support points for vector elements will be duplicated
2621 * <code>fe.dofs_per_vertex</code> times.
2625 * Another point worth explaining about this short function is the way in
2626 * which the triangulation class exports information about its vertices:
2627 * through the <code>Triangulation::n_vertices</code> function, it
2628 * advertises how many vertices there are in the triangulation. Not all of
2629 * them are actually in use all the time -- some are left-overs from cells
2630 * that have been coarsened previously and remain in existence since deal.II
2631 * never changes the number of a vertex once it has come into existence,
2632 * even if vertices with lower number go away. Secondly, the location
2633 * returned by <code>cell-@>vertex(v)</code> is not only a read-only object
2634 * of type <code>Point@<dim@></code>, but in fact a reference that can be
2635 * written to. This allows to move around the nodes of a mesh with relative
2636 * ease, but it is worth pointing out that it is the responsibility of an
2637 * application program using this feature to make sure that the resulting
2638 * cells are still useful, i.e. are not distorted so much that the cell is
2639 * degenerated (indicated, for example, by negative Jacobians). Note that we
2640 * do not have any provisions in this function to actually ensure this, we
2645 * After this lengthy introduction, here are the full 20 or so lines of
2649 * template <int dim>
2650 * void TopLevel<dim>::move_mesh()
2652 * pcout << " Moving mesh..." << std::endl;
2654 * std::vector<bool> vertex_touched(triangulation.n_vertices(), false);
2655 * for (auto &cell : dof_handler.active_cell_iterators())
2656 * for (const auto v : cell->vertex_indices())
2657 * if (vertex_touched[cell->vertex_index(v)] == false)
2659 * vertex_touched[cell->vertex_index(v)] = true;
2661 * Point<dim> vertex_displacement;
2662 * for (unsigned int d = 0; d < dim; ++d)
2663 * vertex_displacement[d] =
2664 * incremental_displacement(cell->vertex_dof_index(v, d));
2666 * cell->vertex(v) += vertex_displacement;
2674 * <a name="step_18-TopLevelsetup_quadrature_point_history"></a>
2675 * <h4>TopLevel::setup_quadrature_point_history</h4>
2679 * At the beginning of our computations, we needed to set up initial values
2680 * of the history variables, such as the existing stresses in the material,
2681 * that we store in each quadrature point. As mentioned above, we use the
2682 * <code>user_pointer</code> for this that is available in each cell.
2686 * To put this into larger perspective, we note that if we had previously
2687 * available stresses in our model (which we assume do not exist for the
2688 * purpose of this program), then we would need to interpolate the field of
2689 * preexisting stresses to the quadrature points. Likewise, if we were to
2690 * simulate elasto-plastic materials with hardening/softening, then we would
2691 * have to store additional history variables like the present yield stress
2692 * of the accumulated plastic strains in each quadrature
2693 * points. Pre-existing hardening or weakening would then be implemented by
2694 * interpolating these variables in the present function as well.
2697 * template <int dim>
2698 * void TopLevel<dim>::setup_quadrature_point_history()
2702 * For good measure, we set all user pointers of all cells, whether
2703 * ours of not, to the null pointer. This way, if we ever access the user
2704 * pointer of a cell which we should not have accessed, a segmentation
2705 * fault will let us know that this should not have happened:
2711 * triangulation.clear_user_data();
2715 * Next, allocate the quadrature objects that are within the responsibility
2716 * of this processor. This, of course, equals the number of cells that
2717 * belong to this processor times the number of quadrature points our
2718 * quadrature formula has on each cell. Since the `resize()` function does
2719 * not actually shrink the amount of allocated memory if the requested new
2720 * size is smaller than the old size, we resort to a trick to first free all
2721 * memory, and then reallocate it: we declare an empty vector as a temporary
2722 * variable and then swap the contents of the old vector and this temporary
2723 * variable. This makes sure that the `quadrature_point_history` is now
2724 * really empty, and we can let the temporary variable that now holds the
2725 * previous contents of the vector go out of scope and be destroyed. In the
2726 * next step we can then re-allocate as many elements as we need, with the
2727 * vector default-initializing the `PointHistory` objects, which includes
2728 * setting the stress variables to zero.
2732 * std::vector<PointHistory<dim>> tmp;
2733 * quadrature_point_history.swap(tmp);
2735 * quadrature_point_history.resize(
2736 * triangulation.n_locally_owned_active_cells() * quadrature_formula.size());
2740 * Finally loop over all cells again and set the user pointers from the
2741 * cells that belong to the present processor to point to the first
2742 * quadrature point objects corresponding to this cell in the vector of
2746 * unsigned int history_index = 0;
2747 * for (auto &cell : triangulation.active_cell_iterators())
2748 * if (cell->is_locally_owned())
2750 * cell->set_user_pointer(&quadrature_point_history[history_index]);
2751 * history_index += quadrature_formula.size();
2756 * At the end, for good measure make sure that our count of elements was
2757 * correct and that we have both used up all objects we allocated
2758 * previously, and not point to any objects beyond the end of the
2759 * vector. Such defensive programming strategies are always good checks to
2760 * avoid accidental errors and to guard against future changes to this
2761 * function that forget to update all uses of a variable at the same
2762 * time. Recall that constructs using the <code>Assert</code> macro are
2763 * optimized away in optimized mode, so do not affect the run time of
2767 * Assert(history_index == quadrature_point_history.size(),
2768 * ExcInternalError());
2776 * <a name="step_18-TopLevelupdate_quadrature_point_history"></a>
2777 * <h4>TopLevel::update_quadrature_point_history</h4>
2781 * At the end of each time step, we should have computed an incremental
2782 * displacement update so that the material in its new configuration
2783 * accommodates for the difference between the external body and boundary
2784 * forces applied during this time step minus the forces exerted through
2785 * preexisting internal stresses. In order to have the preexisting
2786 * stresses available at the next time step, we therefore have to update the
2787 * preexisting stresses with the stresses due to the incremental
2788 * displacement computed during the present time step. Ideally, the
2789 * resulting sum of internal stresses would exactly counter all external
2790 * forces. Indeed, a simple experiment can make sure that this is so: if we
2791 * choose boundary conditions and body forces to be time independent, then
2792 * the forcing terms (the sum of external forces and internal stresses)
2793 * should be exactly zero. If you make this experiment, you will realize
2794 * from the output of the norm of the right hand side in each time step that
2795 * this is almost the case: it is not exactly zero, since in the first time
2796 * step the incremental displacement and stress updates were computed
2797 * relative to the undeformed mesh, which was then deformed. In the second
2798 * time step, we again compute displacement and stress updates, but this
2799 * time in the deformed mesh -- there, the resulting updates are very small
2800 * but not quite zero. This can be iterated, and in each such iteration the
2801 * residual, i.e. the norm of the right hand side vector, is reduced; if one
2802 * makes this little experiment, one realizes that the norm of this residual
2803 * decays exponentially with the number of iterations, and after an initial
2804 * very rapid decline is reduced by roughly a factor of about 3.5 in each
2805 * iteration (for one testcase I looked at, other testcases, and other
2806 * numbers of unknowns change the factor, but not the exponential decay).
2810 * In a sense, this can then be considered as a quasi-timestepping scheme to
2811 * resolve the nonlinear problem of solving large-deformation elasticity on
2812 * a mesh that is moved along in a Lagrangian manner.
2816 * Another complication is that the existing (old) stresses are defined on
2817 * the old mesh, which we will move around after updating the stresses. If
2818 * this mesh update involves rotations of the cell, then we need to also
2819 * rotate the updated stress, since it was computed relative to the
2820 * coordinate system of the old cell.
2824 * Thus, what we need is the following: on each cell which the present
2825 * processor owns, we need to extract the old stress from the data stored
2826 * with each quadrature point, compute the stress update, add the two
2827 * together, and then rotate the result together with the incremental
2828 * rotation computed from the incremental displacement at the present
2829 * quadrature point. We will detail these steps below:
2832 * template <int dim>
2833 * void TopLevel<dim>::update_quadrature_point_history()
2837 * First, set up an <code>FEValues</code> object by which we will evaluate
2838 * the incremental displacements and the gradients thereof at the
2839 * quadrature points, together with a vector that will hold this
2843 * FEValues<dim> fe_values(fe,
2844 * quadrature_formula,
2845 * update_values | update_gradients);
2847 * std::vector<std::vector<Tensor<1, dim>>> displacement_increment_grads(
2848 * quadrature_formula.size(), std::vector<Tensor<1, dim>>(dim));
2852 * Then loop over all cells and do the job in the cells that belong to our
2856 * for (auto &cell : dof_handler.active_cell_iterators())
2857 * if (cell->is_locally_owned())
2861 * Next, get a pointer to the quadrature point history data local to
2862 * the present cell, and, as a defensive measure, make sure that
2863 * this pointer is within the bounds of the global array:
2866 * PointHistory<dim> *local_quadrature_points_history =
2867 * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
2868 * Assert(local_quadrature_points_history >=
2869 * &quadrature_point_history.front(),
2870 * ExcInternalError());
2871 * Assert(local_quadrature_points_history <=
2872 * &quadrature_point_history.back(),
2873 * ExcInternalError());
2877 * Then initialize the <code>FEValues</code> object on the present
2878 * cell, and extract the gradients of the displacement at the
2879 * quadrature points for later computation of the strains
2882 * fe_values.reinit(cell);
2883 * fe_values.get_function_gradients(incremental_displacement,
2884 * displacement_increment_grads);
2888 * Then loop over the quadrature points of this cell:
2891 * for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2895 * On each quadrature point, compute the strain increment from
2896 * the gradients, and multiply it by the stress-strain tensor to
2897 * get the stress update. Then add this update to the already
2898 * existing strain at this point:
2901 * const SymmetricTensor<2, dim> new_stress =
2902 * (local_quadrature_points_history[q].old_stress +
2903 * (stress_strain_tensor *
2904 * get_strain(displacement_increment_grads[q])));
2908 * Finally, we have to rotate the result. For this, we first
2909 * have to compute a rotation matrix at the present quadrature
2910 * point from the incremental displacements. In fact, it can be
2911 * computed from the gradients, and we already have a function
2915 * const Tensor<2, dim> rotation =
2916 * get_rotation_matrix(displacement_increment_grads[q]);
2919 * Note that the result, a rotation matrix, is in general an
2920 * antisymmetric tensor of rank 2, so we must store it as a full
2925 * With this rotation matrix, we can compute the rotated tensor
2926 * by contraction from the left and right, after we expand the
2927 * symmetric tensor <code>new_stress</code> into a full tensor:
2930 * const SymmetricTensor<2, dim> rotated_new_stress =
2931 * symmetrize(transpose(rotation) *
2932 * static_cast<Tensor<2, dim>>(new_stress) * rotation);
2935 * Note that while the result of the multiplication of these
2936 * three matrices should be symmetric, it is not due to floating
2937 * point round off: we get an asymmetry on the order of 1e-16 of
2938 * the off-diagonal elements of the result. When assigning the
2939 * result to a <code>SymmetricTensor</code>, the constructor of
2940 * that class checks the symmetry and realizes that it isn't
2941 * exactly
symmetric; it will then raise an exception. To avoid
2942 * that, we explicitly
symmetrize the result to make it exactly
2947 * The result of all these operations is then written back into
2948 * the original place:
2951 * local_quadrature_points_history[q].old_stress =
2952 * rotated_new_stress;
2959 * This ends the
project specific namespace <code>Step18</code>. The rest is
2960 * as usual and as already shown in @ref step_17
"step-17": A <code>main()</code> function
2961 * that initializes and terminates
PETSc, calls the classes that
do the
2962 * actual work, and makes sure that we
catch all exceptions that propagate
2969 *
int main(
int argc,
char **argv)
2973 *
using namespace dealii;
2974 *
using namespace Step18;
2978 * TopLevel<3> elastic_problem;
2979 * elastic_problem.run();
2981 *
catch (std::exception &exc)
2983 * std::cerr << std::endl
2985 * <<
"----------------------------------------------------"
2987 * std::cerr <<
"Exception on processing: " << std::endl
2988 * << exc.what() << std::endl
2989 * <<
"Aborting!" << std::endl
2990 * <<
"----------------------------------------------------"
2997 * std::cerr << std::endl
2999 * <<
"----------------------------------------------------"
3001 * std::cerr <<
"Unknown exception!" << std::endl
3002 * <<
"Aborting!" << std::endl
3003 * <<
"----------------------------------------------------"
3011<a name=
"step_18-Results"></a><h1>Results</h1>
3015Running the program takes a good
while if one uses debug mode; it takes about
3016eleven minutes on my i7 desktop. Fortunately, the version compiled with
3017optimizations is much faster; the program only takes about a minute and a half
3018after recompiling with the command <tt>make release</tt> on the same machine, a
3019much more reasonable time.
3022If
run, the program prints the following output, explaining what it is
3023doing during all that time:
3026[ 66%] Built target step-18
3027[100%] Run step-18 with Release configuration
3030 Number of active cells: 3712 (by
partition: 3712)
3031 Number of degrees of freedom: 17226 (by
partition: 17226)
3032 Assembling system...
norm of rhs is 1.88062e+10
3033 Solver converged in 103 iterations.
3034 Updating quadrature
point data...
3036 Number of active cells: 12812 (by
partition: 12812)
3037 Number of degrees of freedom: 51738 (by
partition: 51738)
3038 Assembling system...
norm of rhs is 1.86145e+10
3039 Solver converged in 121 iterations.
3040 Updating quadrature
point data...
3044 Assembling system...
norm of rhs is 1.84169e+10
3045 Solver converged in 122 iterations.
3046 Updating quadrature
point data...
3050 Assembling system...
norm of rhs is 1.82355e+10
3051 Solver converged in 122 iterations.
3052 Updating quadrature
point data...
3056 Assembling system...
norm of rhs is 1.80728e+10
3057 Solver converged in 117 iterations.
3058 Updating quadrature
point data...
3062 Assembling system...
norm of rhs is 1.79318e+10
3063 Solver converged in 116 iterations.
3064 Updating quadrature
point data...
3068 Assembling system...
norm of rhs is 1.78171e+10
3069 Solver converged in 115 iterations.
3070 Updating quadrature
point data...
3074 Assembling system...
norm of rhs is 1.7737e+10
3075 Solver converged in 112 iterations.
3076 Updating quadrature
point data...
3080 Assembling system...
norm of rhs is 1.77127e+10
3081 Solver converged in 111 iterations.
3082 Updating quadrature
point data...
3086 Assembling system...
norm of rhs is 1.78207e+10
3087 Solver converged in 113 iterations.
3088 Updating quadrature
point data...
3091Timestep 10 at time 10
3092 Assembling system...
norm of rhs is 1.83544e+10
3093 Solver converged in 115 iterations.
3094 Updating quadrature
point data...
3097[100%] Built target
run
3098make
run 176.82s user 0.15s system 198% cpu 1:28.94 total
3100In other words, it is computing on 12,000 cells and with some 52,000
3101unknowns. Not a whole lot, but enough for a coupled three-dimensional
3102problem to keep a computer busy for a while. At the
end of the day,
3103this is what we have for output:
3106-rw-r--r-- 1 drwells users 1706059 Feb 13 19:36 solution-0010.000.
vtu
3107-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0010.pvtu
3108-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0010.visit
3109-rw-r--r-- 1 drwells users 1707907 Feb 13 19:36 solution-0009.000.
vtu
3110-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0009.pvtu
3111-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0009.visit
3112-rw-r--r-- 1 drwells users 1703771 Feb 13 19:35 solution-0008.000.
vtu
3113-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0008.pvtu
3114-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0008.visit
3115-rw-r--r-- 1 drwells users 1693671 Feb 13 19:35 solution-0007.000.
vtu
3116-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0007.pvtu
3117-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0007.visit
3118-rw-r--r-- 1 drwells users 1681847 Feb 13 19:35 solution-0006.000.
vtu
3119-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0006.pvtu
3120-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0006.visit
3121-rw-r--r-- 1 drwells users 1670115 Feb 13 19:35 solution-0005.000.
vtu
3122-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0005.pvtu
3123-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0005.visit
3124-rw-r--r-- 1 drwells users 1658559 Feb 13 19:35 solution-0004.000.
vtu
3125-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0004.pvtu
3126-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0004.visit
3127-rw-r--r-- 1 drwells users 1639983 Feb 13 19:35 solution-0003.000.
vtu
3128-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0003.pvtu
3129-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0003.visit
3130-rw-r--r-- 1 drwells users 1625851 Feb 13 19:35 solution-0002.000.
vtu
3131-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0002.pvtu
3132-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0002.visit
3133-rw-r--r-- 1 drwells users 1616035 Feb 13 19:34 solution-0001.000.
vtu
3134-rw-r--r-- 1 drwells users 761 Feb 13 19:34 solution-0001.pvtu
3135-rw-r--r-- 1 drwells users 33 Feb 13 19:34 solution-0001.visit
3139If we visualize these files with VisIt or Paraview, we get to see the full picture
3140of the disaster our forced compression wreaks on the
cylinder (colors in the
3141images encode the
norm of the stress in the material):
3144<div class=
"threecolumn" style=
"width: 80%">
3145 <div class=
"parent">
3146 <div class=
"img" align=
"center">
3147 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0002.0000.png"
3151 <div class=
"text" align=
"center">
3155 <div class=
"parent">
3156 <div class=
"img" align=
"center">
3157 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0005.0000.png"
3161 <div class=
"text" align=
"center">
3165 <div class=
"parent">
3166 <div class=
"img" align=
"center">
3167 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0007.0000.png"
3171 <div class=
"text" align=
"center">
3178<div class=
"threecolumn" style=
"width: 80%">
3179 <div class=
"parent">
3180 <div class=
"img" align=
"center">
3181 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0008.0000.png"
3185 <div class=
"text" align=
"center">
3189 <div class=
"parent">
3190 <div class=
"img" align=
"center">
3191 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0009.0000.png"
3195 <div class=
"text" align=
"center">
3199 <div class=
"parent">
3200 <div class=
"img" align=
"center">
3201 <img src=
"https://www.dealii.org/images/steps/developer/step-18.sequential-0010.0000.png"
3205 <div class=
"text" align=
"center">
3212As is clearly visible, as we keep compressing the
cylinder, it starts
3213to bow out near the fully constrained bottom surface and, after about eight
3214time units, buckle in an azimuthally
symmetric manner.
3217Although the result appears plausible for the
symmetric geometry and loading,
3218it is yet to be established whether or not the computation is fully converged.
3219In order to see whether it is, we ran the program again with one more global
3220refinement at the beginning and with the time step halved. This would have
3221taken a very long time on a single machine, so we used a proper workstation and
3222ran it on 16 processors in
parallel. The beginning of the output now looks like
3225Timestep 1 at time 0.5
3227 Number of active cells: 29696 (by
partition: 1808+1802+1894+1881+1870+1840+1884+1810+1876+1818+1870+1884+1854+1903+1816+1886)
3228 Number of degrees of freedom: 113100 (by
partition: 6936+6930+7305+7116+7326+6869+7331+6786+7193+6829+7093+7162+6920+7280+6843+7181)
3229 Assembling system...
norm of rhs is 1.10765e+10
3230 Solver converged in 209 iterations.
3231 Updating quadrature
point data...
3233 Number of active cells: 102034 (by
partition: 6387+6202+6421+6341+6408+6201+6428+6428+6385+6294+6506+6244+6417+6527+6299+6546)
3234 Number of degrees of freedom: 359337 (by
partition: 23255+21308+24774+24019+22304+21415+22430+22184+22298+21796+22396+21592+22325+22553+21977+22711)
3235 Assembling system...
norm of rhs is 1.35759e+10
3236 Solver converged in 268 iterations.
3237 Updating quadrature
point data...
3241 Assembling system...
norm of rhs is 1.34674e+10
3242 Solver converged in 267 iterations.
3243 Updating quadrature
point data...
3246Timestep 3 at time 1.5
3247 Assembling system...
norm of rhs is 1.33607e+10
3248 Solver converged in 265 iterations.
3249 Updating quadrature
point data...
3253 Assembling system...
norm of rhs is 1.32558e+10
3254 Solver converged in 263 iterations.
3255 Updating quadrature
point data...
3260Timestep 20 at time 10
3261 Assembling system...
norm of rhs is 1.47755e+10
3262 Solver converged in 425 iterations.
3263 Updating quadrature
point data...
3266That
's quite a good number of unknowns, given that we are in 3d. The output of
3267this program are 16 files for each time step:
3269\$ ls -l solution-0001*
3270-rw-r--r-- 1 wellsd2 user 761065 Feb 13 21:09 solution-0001.000.vtu
3271-rw-r--r-- 1 wellsd2 user 759277 Feb 13 21:09 solution-0001.001.vtu
3272-rw-r--r-- 1 wellsd2 user 761217 Feb 13 21:09 solution-0001.002.vtu
3273-rw-r--r-- 1 wellsd2 user 761605 Feb 13 21:09 solution-0001.003.vtu
3274-rw-r--r-- 1 wellsd2 user 756917 Feb 13 21:09 solution-0001.004.vtu
3275-rw-r--r-- 1 wellsd2 user 752669 Feb 13 21:09 solution-0001.005.vtu
3276-rw-r--r-- 1 wellsd2 user 735217 Feb 13 21:09 solution-0001.006.vtu
3277-rw-r--r-- 1 wellsd2 user 750065 Feb 13 21:09 solution-0001.007.vtu
3278-rw-r--r-- 1 wellsd2 user 760273 Feb 13 21:09 solution-0001.008.vtu
3279-rw-r--r-- 1 wellsd2 user 777265 Feb 13 21:09 solution-0001.009.vtu
3280-rw-r--r-- 1 wellsd2 user 772469 Feb 13 21:09 solution-0001.010.vtu
3281-rw-r--r-- 1 wellsd2 user 760833 Feb 13 21:09 solution-0001.011.vtu
3282-rw-r--r-- 1 wellsd2 user 782241 Feb 13 21:09 solution-0001.012.vtu
3283-rw-r--r-- 1 wellsd2 user 748905 Feb 13 21:09 solution-0001.013.vtu
3284-rw-r--r-- 1 wellsd2 user 738413 Feb 13 21:09 solution-0001.014.vtu
3285-rw-r--r-- 1 wellsd2 user 762133 Feb 13 21:09 solution-0001.015.vtu
3286-rw-r--r-- 1 wellsd2 user 1421 Feb 13 21:09 solution-0001.pvtu
3287-rw-r--r-- 1 wellsd2 user 364 Feb 13 21:09 solution-0001.visit
3291Here are first the mesh on which we compute as well as the partitioning
3292for the 16 processors:
3295<div class="twocolumn" style="width: 80%">
3296 <div class="parent">
3297 <div class="img" align="center">
3298 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-000mesh.png"
3299 alt="Discretization"
3302 <div class="text" align="center">
3306 <div class="parent">
3307 <div class="img" align="center">
3308 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.p.png"
3309 alt="Parallel partitioning"
3312 <div class="text" align="center">
3313 Parallel partitioning
3319Finally, here is the same output as we have shown before for the much smaller
3322<div class="threecolumn" style="width: 80%">
3323 <div class="parent">
3324 <div class="img" align="center">
3325 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.s.png"
3329 <div class="text" align="center">
3333 <div class="parent">
3334 <div class="img" align="center">
3335 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0005.s.png"
3339 <div class="text" align="center">
3343 <div class="parent">
3344 <div class="img" align="center">
3345 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0007.s.png"
3349 <div class="text" align="center">
3356<div class="threecolumn" style="width: 80%">
3357 <div class="parent">
3358 <div class="img" align="center">
3359 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0008.s.png"
3363 <div class="text" align="center">
3367 <div class="parent">
3368 <div class="img" align="center">
3369 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0009.s.png"
3373 <div class="text" align="center">
3377 <div class="parent">
3378 <div class="img" align="center">
3379 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0010.s.png"
3383 <div class="text" align="center">
3390As before, we observe that at high axial compression the cylinder begins
3391to buckle, but this time ultimately collapses on itself. In contrast to our
3392first run, towards the end of the simulation the deflection pattern becomes
3393nonsymmetric (the central bulge deflects laterally). The model clearly does not
3394provide for this (all our forces and boundary deflections are symmetric) but the
3395effect is probably physically correct anyway: in reality, small inhomogeneities
3396in the body's material properties would lead it to buckle to one side
3397to evade the forcing; in numerical simulations, small perturbations
3398such as numerical round-off or an inexact solution of a linear system
3399by an iterative solver could have the same effect. Another typical source
for
3400asymmetries in adaptive computations is that only a certain fraction of cells
3401is refined in each step, which may lead to asymmetric meshes even
if the
3405If one compares
this with the previous
run, the results both qualitatively
3406and quantitatively different. The previous computation was
3407therefore certainly not converged, though we can
't say for sure anything about
3408the present one. One would need an even finer computation to find out. However,
3409the point may be moot: looking at the last picture in detail, it is pretty
3410obvious that not only is the linear small deformation model we chose completely
3411inadequate, but for a realistic simulation we would also need to make sure that
3412the body does not intersect itself during deformation (if we continued
3413compressing the cylinder we would observe some self-intersection).
3414Without such a formulation we cannot expect anything to make physical sense,
3415even if it produces nice pictures!
3418<a name="step_18-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
3421The program as is does not really solve an equation that has many applications
3422in practice: quasi-static material deformation based on a purely elastic law
3423is almost boring. However, the program may serve as the starting point for
3424more interesting experiments, and that indeed was the initial motivation for
3425writing it. Here are some suggestions of what the program is missing and in
3426what direction it may be extended:
3428<a name="step_18-Plasticitymodels"></a><h5>Plasticity models</h5>
3431 The most obvious extension is to use a more
3432realistic material model for large-scale quasistatic deformation. The natural
3433choice for this would be plasticity, in which a nonlinear relationship between
3434stress and strain replaces equation <a href="#step_18.stress-strain">[stress-strain]</a>. Plasticity
3435models are usually rather complicated to program since the stress-strain
3436dependence is generally non-smooth. The material can be thought of being able
3437to withstand only a maximal stress (the yield stress) after which it starts to
3438“flow”. A mathematical description to this can be given in the form of a
3439variational inequality, which alternatively can be treated as minimizing the
3443 (\varepsilon(\mathbf{u}), C\varepsilon(\mathbf{u}))_{\Omega}
3444 - (\mathbf{f}, \mathbf{u})_{\Omega} - (\mathbf{b}, \mathbf{u})_{\Gamma_N},
3446subject to the constraint
3448 f(\sigma(\mathbf{u})) \le 0
3450on the stress. This extension makes the problem to be solved in each time step
3451nonlinear, so we need another loop within each time step.
3453Without going into further details of this model, we refer to the excellent
3454book by Simo and Hughes on “Computational Inelasticity” for a
3455comprehensive overview of computational strategies for solving plastic
3456models. Alternatively, a brief but concise description of an algorithm for
3457plasticity is given in an article by S. Commend, A. Truty, and Th. Zimmermann;
3461<a name="step_18-Stabilizationissues"></a><h5>Stabilization issues</h5>
3464The formulation we have chosen, i.e. using
3465piecewise (bi-, tri-)linear elements for all components of the displacement
3466vector, and treating the stress as a variable dependent on the displacement is
3467appropriate for most materials. However, this so-called displacement-based
3468formulation becomes unstable and exhibits spurious modes for incompressible or
3469nearly-incompressible materials. While fluids are usually not elastic (in most
3470cases, the stress depends on velocity gradients, not displacement gradients,
3471although there are exceptions such as electro-rheologic fluids), there are a
3472few solids that are nearly incompressible, for example rubber. Another case is
3473that many plasticity models ultimately let the material become incompressible,
3474although this is outside the scope of the present program.
3476Incompressibility is characterized by Poisson's ratio
3480where @f$\lambda,\mu@f$ are the Lamé constants of the material.
3481Physical constraints indicate that @f$-1\le \nu\le \frac 12@f$ (the condition
3482also follows from mathematical stability considerations). If @f$\nu@f$
3483approaches @f$\frac 12@f$, then the material becomes incompressible. In that
3484case, pure displacement-based formulations are no longer appropriate
for the
3485solution of such problems, and stabilization techniques have to be employed
3486for a stable and accurate solution. The book and paper cited above give
3487indications as to how to
do this, but there is also a large
volume of
3488literature on
this subject; a good start to get an overview of the topic can
3489be found in the references of the paper by H.-Y. Duan and Q. Lin; @cite DL05.
3492<a name=
"step_18-Refinementduringtimesteps"></a><h5>Refinement during timesteps</h5>
3495In the present form, the program
3496only refines the
initial mesh a number of times, but then never again. For any
3497kind of realistic simulation, one would want to extend
this so that the mesh
3498is refined and coarsened every few time steps instead. This is not hard to
do,
3499in fact, but has been left
for future tutorial programs or as an exercise,
if
3502The main complication one has to overcome is that one has to
3503transfer the data that is stored in the quadrature points of the cells of the
3504old mesh to the
new mesh, preferably by some sort of projection scheme. The
3505general approach to
this would go like
this:
3507- At the beginning, the data is only available in the quadrature points of
3508 individual cells, not as a finite element field that is defined everywhere.
3510- So let us find a finite element field that <i>is</i> defined everywhere so
3511 that we can later
interpolate it to the quadrature points of the
new
3512 mesh. In
general, it will be difficult to find a continuous finite element
3513 field that matches the values in the quadrature points exactly because the
3514 number of degrees of freedom of these fields does not match the number of
3515 quadrature points there are, and the nodal values of
this global field will
3516 either be over- or underdetermined. But it is usually not very difficult to
3517 find a discontinuous field that matches the values in the quadrature points;
3518 for example,
if you have a
QGauss(2) quadrature formula (i.e. 4 points per
3519 cell in 2d, 8 points in 3d), then one would use a finite element of kind
3520 FE_DGQ(1), i.e. bi-/tri-linear functions as these have 4 degrees of freedom
3521 per cell in 2d and 8 in 3d.
3523- There are functions that can make this conversion from individual points to
3524 a global field simpler. The following piece of pseudo-code should help if
3525 you use a
QGauss(2) quadrature formula. Note that the multiplication by the
3526 projection matrix below takes a vector of scalar components, i.e., we can only
3527 convert one set of scalars at a time from the quadrature points to the degrees
3528 of freedom and vice versa. So we need to store each component of stress separately,
3529 which requires <code>dim*dim</code> vectors. We'll store this set of vectors in a 2D array to
3530 make it easier to read off components in the same way you would the stress tensor.
3531 Thus, we'll loop over the components of stress on each cell and store
3532 these values in the global history field. (The prefix <code>history_</code>
3533 indicates that we work with quantities related to the history variables defined
3534 in the quadrature points.)
3536 FE_DGQ<dim> history_fe (1);
3538 history_dof_handler.distribute_dofs (history_fe);
3541 history_field (dim,
std::vector<
Vector<
double> >(dim)),
3542 local_history_values_at_qpoints (dim,
std::vector<
Vector<
double> >(dim)),
3543 local_history_fe_values (dim,
std::vector<
Vector<
double> >(dim));
3545 for (
unsigned int i=0; i<dim; ++i)
3546 for (
unsigned int j=0; j<dim; ++j)
3548 history_field[i][j].
reinit(history_dof_handler.n_dofs());
3549 local_history_values_at_qpoints[i][j].reinit(quadrature.size());
3550 local_history_fe_values[i][j].reinit(history_fe.n_dofs_per_cell());
3557 quadrature, quadrature,
3558 qpoint_to_dof_matrix);
3561 endc = dof_handler.end(),
3562 dg_cell = history_dof_handler.begin_active();
3564 for (; cell!=endc; ++cell, ++dg_cell)
3567 PointHistory<dim> *local_quadrature_points_history
3568 =
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
3570 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3571 ExcInternalError());
3572 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3573 ExcInternalError());
3575 for (
unsigned int i=0; i<dim; ++i)
3576 for (
unsigned int j=0; j<dim; ++j)
3578 for (
unsigned int q=0; q<quadrature.size(); ++q)
3579 local_history_values_at_qpoints[i][j](q)
3580 = local_quadrature_points_history[q].old_stress[i][j];
3582 qpoint_to_dof_matrix.vmult (local_history_fe_values[i][j],
3583 local_history_values_at_qpoints[i][j]);
3585 dg_cell->set_dof_values (local_history_fe_values[i][j],
3586 history_field[i][j]);
3591- Now that we have a global field, we can
refine the mesh and transfer the
3593 interpolate everything from the old to the
new mesh.
3595- In a
final step, we have to get the data back from the now interpolated
3596 global field to the quadrature points on the
new mesh. The following code
3600 history_fe.n_dofs_per_cell());
3604 dof_to_qpoint_matrix);
3607 endc = dof_handler.end(),
3608 dg_cell = history_dof_handler.begin_active();
3610 for (; cell != endc; ++cell, ++dg_cell)
3612 PointHistory<dim> *local_quadrature_points_history
3613 =
reinterpret_cast<PointHistory<dim> *
>(cell->user_pointer());
3615 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3616 ExcInternalError());
3617 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3618 ExcInternalError());
3620 for (
unsigned int i=0; i<dim; ++i)
3621 for (
unsigned int j=0; j<dim; ++j)
3623 dg_cell->get_dof_values (history_field[i][j],
3624 local_history_fe_values[i][j]);
3626 dof_to_qpoint_matrix.vmult (local_history_values_at_qpoints[i][j],
3627 local_history_fe_values[i][j]);
3629 for (
unsigned int q=0; q<quadrature.size(); ++q)
3630 local_quadrature_points_history[q].old_stress[i][j]
3631 = local_history_values_at_qpoints[i][j](q);
3635It becomes a bit more complicated once we
run the program in
parallel, since
3636then each process only stores
this data
for the cells it owned on the old
3637mesh. That said,
using a
parallel vector for <code>history_field</code> will
3638do the trick
if you put a call to <code>
compress</code> after the transfer
3639from quadrature points into the global vector.
3642<a name=
"step_18-Ensuringmeshregularity"></a><h5>Ensuring mesh regularity</h5>
3645At present, the program makes no attempt
3646to make sure that a cell, after moving its
vertices at the
end of the time
3648positive and bounded away from zero everywhere). It is, in fact, not very hard
3649to
set boundary values and forcing terms in such a way that one gets distorted
3650and inverted cells rather quickly. Certainly, in some cases of large
3651deformation,
this is unavoidable with a mesh of finite mesh size, but in some
3652other cases
this should be preventable by appropriate mesh refinement and/or a
3653reduction of the time step size. The program does not
do that, but a more
3654sophisticated version definitely should employ some sort of heuristic defining
3655what amount of deformation of cells is acceptable, and what isn
't.
3658<a name="step_18-PlainProg"></a>
3659<h1> The plain program</h1>
3660@include "step-18.cc"
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
active_cell_iterator begin_active(const unsigned int level=0) const
virtual void vector_value_list(const std::vector< Point< dim > > &points, std::vector< Vector< RangeNumberType > > &values) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
void refine_global(const unsigned int times=1)
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
__global__ void reduction(Number *result, const Number *v, const size_type N)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void cylinder_shell(Triangulation< dim > &tria, const double length, const double inner_radius, const double outer_radius, const unsigned int n_radial_cells=0, const unsigned int n_axial_cells=0, const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string compress(const std::string &input)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)