Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-18.h
Go to the documentation of this file.
1 ,
1437  * Vector<double> &values) const
1438  * {
1439  * Assert(values.size() == dim, ExcDimensionMismatch(values.size(), dim));
1440  *
1441  * const double g = 9.81;
1442  * const double rho = 7700;
1443  *
1444  * values = 0;
1445  * values(dim - 1) = -rho * g;
1446  * }
1447  *
1448  *
1449  *
1450  * template <int dim>
1451  * void BodyForce<dim>::vector_value_list(
1452  * const std::vector<Point<dim>> &points,
1453  * std::vector<Vector<double>> & value_list) const
1454  * {
1455  * const unsigned int n_points = points.size();
1456  *
1457  * Assert(value_list.size() == n_points,
1458  * ExcDimensionMismatch(value_list.size(), n_points));
1459  *
1460  * for (unsigned int p = 0; p < n_points; ++p)
1461  * BodyForce<dim>::vector_value(points[p], value_list[p]);
1462  * }
1463  *
1464  *
1465  *
1466  * @endcode
1467  *
1468  *
1469  * <a name="ThecodeIncrementalBoundaryValuecodeclass"></a>
1470  * <h3>The <code>IncrementalBoundaryValue</code> class</h3>
1471  *
1472 
1473  *
1474  * In addition to body forces, movement can be induced by boundary forces
1475  * and forced boundary displacement. The latter case is equivalent to forces
1476  * being chosen in such a way that they induce certain displacement.
1477  *
1478 
1479  *
1480  * For quasistatic displacement, typical boundary forces would be pressure
1481  * on a body, or tangential friction against another body. We chose a
1482  * somewhat simpler case here: we prescribe a certain movement of (parts of)
1483  * the boundary, or at least of certain components of the displacement
1484  * vector. We describe this by another vector-valued function that, for a
1485  * given point on the boundary, returns the prescribed displacement.
1486  *
1487 
1488  *
1489  * Since we have a time-dependent problem, the displacement increment of the
1490  * boundary equals the displacement accumulated during the length of the
1491  * timestep. The class therefore has to know both the present time and the
1492  * length of the present time step, and can then approximate the incremental
1493  * displacement as the present velocity times the present timestep.
1494  *
1495 
1496  *
1497  * For the purposes of this program, we choose a simple form of boundary
1498  * displacement: we displace the top boundary with constant velocity
1499  * downwards. The rest of the boundary is either going to be fixed (and is
1500  * then described using an object of type
1501  * <code>Functions::ZeroFunction</code>) or free (Neumann-type, in which case
1502  * nothing special has to be done). The implementation of the class
1503  * describing the constant downward motion should then be obvious using the
1504  * knowledge we gained through all the previous example programs:
1505  *
1506  * @code
1507  * template <int dim>
1508  * class IncrementalBoundaryValues : public Function<dim>
1509  * {
1510  * public:
1511  * IncrementalBoundaryValues(const double present_time,
1512  * const double present_timestep);
1513  *
1514  * virtual void vector_value(const Point<dim> &p,
1515  * Vector<double> & values) const override;
1516  *
1517  * virtual void
1518  * vector_value_list(const std::vector<Point<dim>> &points,
1519  * std::vector<Vector<double>> & value_list) const override;
1520  *
1521  * private:
1522  * const double velocity;
1523  * const double present_time;
1524  * const double present_timestep;
1525  * };
1526  *
1527  *
1528  * template <int dim>
1529  * IncrementalBoundaryValues<dim>::IncrementalBoundaryValues(
1530  * const double present_time,
1531  * const double present_timestep)
1532  * : Function<dim>(dim)
1533  * , velocity(.08)
1534  * , present_time(present_time)
1535  * , present_timestep(present_timestep)
1536  * {}
1537  *
1538  *
1539  * template <int dim>
1540  * void
1541  * IncrementalBoundaryValues<dim>::vector_value(const Point<dim> & /*p*/,
1542  * Vector<double> &values) const
1543  * {
1544  * Assert(values.size() == dim, ExcDimensionMismatch(values.size(), dim));
1545  *
1546  * values = 0;
1547  * values(2) = -present_timestep * velocity;
1548  * }
1549  *
1550  *
1551  *
1552  * template <int dim>
1553  * void IncrementalBoundaryValues<dim>::vector_value_list(
1554  * const std::vector<Point<dim>> &points,
1555  * std::vector<Vector<double>> & value_list) const
1556  * {
1557  * const unsigned int n_points = points.size();
1558  *
1559  * Assert(value_list.size() == n_points,
1560  * ExcDimensionMismatch(value_list.size(), n_points));
1561  *
1562  * for (unsigned int p = 0; p < n_points; ++p)
1563  * IncrementalBoundaryValues<dim>::vector_value(points[p], value_list[p]);
1564  * }
1565  *
1566  *
1567  *
1568  * @endcode
1569  *
1570  *
1571  * <a name="ImplementationofthecodeTopLevelcodeclass"></a>
1572  * <h3>Implementation of the <code>TopLevel</code> class</h3>
1573  *
1574 
1575  *
1576  * Now for the implementation of the main class. First, we initialize the
1577  * stress-strain tensor, which we have declared as a static const
1578  * variable. We chose Lam&eacute; constants that are appropriate for steel:
1579  *
1580  * @code
1581  * template <int dim>
1582  * const SymmetricTensor<4, dim> TopLevel<dim>::stress_strain_tensor =
1583  * get_stress_strain_tensor<dim>(/*lambda = */ 9.695e10,
1584  * /*mu = */ 7.617e10);
1585  *
1586  *
1587  *
1588  * @endcode
1589  *
1590  *
1591  * <a name="Thepublicinterface"></a>
1592  * <h4>The public interface</h4>
1593  *
1594 
1595  *
1596  * The next step is the definition of constructors and destructors. There
1597  * are no surprises here: we choose linear and continuous finite elements
1598  * for each of the <code>dim</code> vector components of the solution, and a
1599  * Gaussian quadrature formula with 2 points in each coordinate
1600  * direction. The destructor should be obvious:
1601  *
1602  * @code
1603  * template <int dim>
1604  * TopLevel<dim>::TopLevel()
1605  * : triangulation(MPI_COMM_WORLD)
1606  * , fe(FE_Q<dim>(1), dim)
1607  * , dof_handler(triangulation)
1608  * , quadrature_formula(fe.degree + 1)
1609  * , present_time(0.0)
1610  * , present_timestep(1.0)
1611  * , end_time(10.0)
1612  * , timestep_no(0)
1613  * , mpi_communicator(MPI_COMM_WORLD)
1614  * , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
1615  * , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
1616  * , pcout(std::cout, this_mpi_process == 0)
1617  * {}
1618  *
1619  *
1620  *
1621  * template <int dim>
1622  * TopLevel<dim>::~TopLevel()
1623  * {
1624  * dof_handler.clear();
1625  * }
1626  *
1627  *
1628  *
1629  * @endcode
1630  *
1631  * The last of the public functions is the one that directs all the work,
1632  * <code>run()</code>. It initializes the variables that describe where in
1633  * time we presently are, then runs the first time step, then loops over all
1634  * the other time steps. Note that for simplicity we use a fixed time step,
1635  * whereas a more sophisticated program would of course have to choose it in
1636  * some more reasonable way adaptively:
1637  *
1638  * @code
1639  * template <int dim>
1640  * void TopLevel<dim>::run()
1641  * {
1642  * do_initial_timestep();
1643  *
1644  * while (present_time < end_time)
1645  * do_timestep();
1646  * }
1647  *
1648  *
1649  * @endcode
1650  *
1651  *
1652  * <a name="TopLevelcreate_coarse_grid"></a>
1653  * <h4>TopLevel::create_coarse_grid</h4>
1654  *
1655 
1656  *
1657  * The next function in the order in which they were declared above is the
1658  * one that creates the coarse grid from which we start. For this example
1659  * program, we want to compute the deformation of a cylinder under axial
1660  * compression. The first step therefore is to generate a mesh for a
1661  * cylinder of length 3 and with inner and outer radii of 0.8 and 1,
1662  * respectively. Fortunately, there is a library function for such a mesh.
1663  *
1664 
1665  *
1666  * In a second step, we have to associated boundary conditions with the
1667  * upper and lower faces of the cylinder. We choose a boundary indicator of
1668  * 0 for the boundary faces that are characterized by their midpoints having
1669  * z-coordinates of either 0 (bottom face), an indicator of 1 for z=3 (top
1670  * face); finally, we use boundary indicator 2 for all faces on the inside
1671  * of the cylinder shell, and 3 for the outside.
1672  *
1673  * @code
1674  * template <int dim>
1675  * void TopLevel<dim>::create_coarse_grid()
1676  * {
1677  * const double inner_radius = 0.8, outer_radius = 1;
1678  * GridGenerator::cylinder_shell(triangulation, 3, inner_radius, outer_radius);
1679  * for (const auto &cell : triangulation.active_cell_iterators())
1680  * for (const auto &face : cell->face_iterators())
1681  * if (face->at_boundary())
1682  * {
1683  * const Point<dim> face_center = face->center();
1684  *
1685  * if (face_center[2] == 0)
1686  * face->set_boundary_id(0);
1687  * else if (face_center[2] == 3)
1688  * face->set_boundary_id(1);
1689  * else if (std::sqrt(face_center[0] * face_center[0] +
1690  * face_center[1] * face_center[1]) <
1691  * (inner_radius + outer_radius) / 2)
1692  * face->set_boundary_id(2);
1693  * else
1694  * face->set_boundary_id(3);
1695  * }
1696  *
1697  * @endcode
1698  *
1699  * Once all this is done, we can refine the mesh once globally:
1700  *
1701  * @code
1702  * triangulation.refine_global(1);
1703  *
1704  * @endcode
1705  *
1706  * As the final step, we need to set up a clean state of the data that we
1707  * store in the quadrature points on all cells that are treated on the
1708  * present processor.
1709  *
1710  * @code
1711  * setup_quadrature_point_history();
1712  * }
1713  *
1714  *
1715  *
1716  * @endcode
1717  *
1718  *
1719  * <a name="TopLevelsetup_system"></a>
1720  * <h4>TopLevel::setup_system</h4>
1721  *
1722 
1723  *
1724  * The next function is the one that sets up the data structures for a given
1725  * mesh. This is done in most the same way as in @ref step_17 "step-17": distribute the
1726  * degrees of freedom, then sort these degrees of freedom in such a way that
1727  * each processor gets a contiguous chunk of them. Note that subdivisions into
1728  * chunks for each processor is handled in the functions that create or
1729  * refine grids, unlike in the previous example program (the point where
1730  * this happens is mostly a matter of taste; here, we chose to do it when
1731  * grids are created since in the <code>do_initial_timestep</code> and
1732  * <code>do_timestep</code> functions we want to output the number of cells
1733  * on each processor at a point where we haven't called the present function
1734  * yet).
1735  *
1736  * @code
1737  * template <int dim>
1738  * void TopLevel<dim>::setup_system()
1739  * {
1740  * dof_handler.distribute_dofs(fe);
1741  * locally_owned_dofs = dof_handler.locally_owned_dofs();
1742  * DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
1743  *
1744  * @endcode
1745  *
1746  * The next step is to set up constraints due to hanging nodes. This has
1747  * been handled many times before:
1748  *
1749  * @code
1750  * hanging_node_constraints.clear();
1751  * DoFTools::make_hanging_node_constraints(dof_handler,
1752  * hanging_node_constraints);
1753  * hanging_node_constraints.close();
1754  *
1755  * @endcode
1756  *
1757  * And then we have to set up the matrix. Here we deviate from @ref step_17 "step-17", in
1758  * which we simply used PETSc's ability to just know about the size of the
1759  * matrix and later allocate those nonzero elements that are being written
1760  * to. While this works just fine from a correctness viewpoint, it is not
1761  * at all efficient: if we don't give PETSc a clue as to which elements
1762  * are written to, it is (at least at the time of this writing) unbearably
1763  * slow when we set the elements in the matrix for the first time (i.e. in
1764  * the first timestep). Later on, when the elements have been allocated,
1765  * everything is much faster. In experiments we made, the first timestep
1766  * can be accelerated by almost two orders of magnitude if we instruct
1767  * PETSc which elements will be used and which are not.
1768  *
1769 
1770  *
1771  * To do so, we first generate the sparsity pattern of the matrix we are
1772  * going to work with, and make sure that the condensation of hanging node
1773  * constraints add the necessary additional entries in the sparsity
1774  * pattern:
1775  *
1776  * @code
1777  * DynamicSparsityPattern sparsity_pattern(locally_relevant_dofs);
1778  * DoFTools::make_sparsity_pattern(dof_handler,
1779  * sparsity_pattern,
1780  * hanging_node_constraints,
1781  * /*keep constrained dofs*/ false);
1782  * SparsityTools::distribute_sparsity_pattern(sparsity_pattern,
1783  * locally_owned_dofs,
1784  * mpi_communicator,
1785  * locally_relevant_dofs);
1786  * @endcode
1787  *
1788  * Note that we have used the <code>DynamicSparsityPattern</code> class
1789  * here that was already introduced in @ref step_11 "step-11", rather than the
1790  * <code>SparsityPattern</code> class that we have used in all other
1791  * cases. The reason for this is that for the latter class to work we have
1792  * to give an initial upper bound for the number of entries in each row, a
1793  * task that is traditionally done by
1794  * <code>DoFHandler::max_couplings_between_dofs()</code>. However, this
1795  * function suffers from a serious problem: it has to compute an upper
1796  * bound to the number of nonzero entries in each row, and this is a
1797  * rather complicated task, in particular in 3d. In effect, while it is
1798  * quite accurate in 2d, it often comes up with much too large a number in
1799  * 3d, and in that case the <code>SparsityPattern</code> allocates much
1800  * too much memory at first, often several 100 MBs. This is later
1801  * corrected when <code>DoFTools::make_sparsity_pattern</code> is called
1802  * and we realize that we don't need all that much memory, but at time it
1803  * is already too late: for large problems, the temporary allocation of
1804  * too much memory can lead to out-of-memory situations.
1805  *
1806 
1807  *
1808  * In order to avoid this, we resort to the
1809  * <code>DynamicSparsityPattern</code> class that is slower but does
1810  * not require any up-front estimate on the number of nonzero entries per
1811  * row. It therefore only ever allocates as much memory as it needs at any
1812  * given time, and we can build it even for large 3d problems.
1813  *
1814 
1815  *
1816  * It is also worth noting that due to the specifics of
1817  * parallel::shared::Triangulation, the sparsity pattern we construct is
1818  * global, i.e. comprises all degrees of freedom whether they will be
1819  * owned by the processor we are on or another one (in case this program
1820  * is run in %parallel via MPI). This of course is not optimal -- it
1821  * limits the size of the problems we can solve, since storing the entire
1822  * sparsity pattern (even if only for a short time) on each processor does
1823  * not scale well. However, there are several more places in the program
1824  * in which we do this, for example we always keep the global
1825  * triangulation and DoF handler objects around, even if we only work on
1826  * part of them. At present, deal.II does not have the necessary
1827  * facilities to completely distribute these objects (a task that, indeed,
1828  * is very hard to achieve with adaptive meshes, since well-balanced
1829  * subdivisions of a domain tend to become unbalanced as the mesh is
1830  * adaptively refined).
1831  *
1832 
1833  *
1834  * With this data structure, we can then go to the PETSc sparse matrix and
1835  * tell it to preallocate all the entries we will later want to write to:
1836  *
1837  * @code
1838  * system_matrix.reinit(locally_owned_dofs,
1839  * locally_owned_dofs,
1840  * sparsity_pattern,
1841  * mpi_communicator);
1842  * @endcode
1843  *
1844  * After this point, no further explicit knowledge of the sparsity pattern
1845  * is required any more and we can let the <code>sparsity_pattern</code>
1846  * variable go out of scope without any problem.
1847  *
1848 
1849  *
1850  * The last task in this function is then only to reset the right hand
1851  * side vector as well as the solution vector to its correct size;
1852  * remember that the solution vector is a local one, unlike the right hand
1853  * side that is a distributed %parallel one and therefore needs to know
1854  * the MPI communicator over which it is supposed to transmit messages:
1855  *
1856  * @code
1857  * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1858  * incremental_displacement.reinit(dof_handler.n_dofs());
1859  * }
1860  *
1861  *
1862  *
1863  * @endcode
1864  *
1865  *
1866  * <a name="TopLevelassemble_system"></a>
1867  * <h4>TopLevel::assemble_system</h4>
1868  *
1869 
1870  *
1871  * Again, assembling the system matrix and right hand side follows the same
1872  * structure as in many example programs before. In particular, it is mostly
1873  * equivalent to @ref step_17 "step-17", except for the different right hand side that now
1874  * only has to take into account internal stresses. In addition, assembling
1875  * the matrix is made significantly more transparent by using the
1876  * <code>SymmetricTensor</code> class: note the elegance of forming the
1877  * scalar products of symmetric tensors of rank 2 and 4. The implementation
1878  * is also more general since it is independent of the fact that we may or
1879  * may not be using an isotropic elasticity tensor.
1880  *
1881 
1882  *
1883  * The first part of the assembly routine is as always:
1884  *
1885  * @code
1886  * template <int dim>
1887  * void TopLevel<dim>::assemble_system()
1888  * {
1889  * system_rhs = 0;
1890  * system_matrix = 0;
1891  *
1892  * FEValues<dim> fe_values(fe,
1893  * quadrature_formula,
1896  *
1897  * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1898  * const unsigned int n_q_points = quadrature_formula.size();
1899  *
1900  * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1901  * Vector<double> cell_rhs(dofs_per_cell);
1902  *
1903  * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1904  *
1905  * BodyForce<dim> body_force;
1906  * std::vector<Vector<double>> body_force_values(n_q_points,
1907  * Vector<double>(dim));
1908  *
1909  * @endcode
1910  *
1911  * As in @ref step_17 "step-17", we only need to loop over all cells that belong to the
1912  * present processor:
1913  *
1914  * @code
1915  * for (const auto &cell : dof_handler.active_cell_iterators())
1916  * if (cell->is_locally_owned())
1917  * {
1918  * cell_matrix = 0;
1919  * cell_rhs = 0;
1920  *
1921  * fe_values.reinit(cell);
1922  *
1923  * @endcode
1924  *
1925  * Then loop over all indices i,j and quadrature points and assemble
1926  * the system matrix contributions from this cell. Note how we
1927  * extract the symmetric gradients (strains) of the shape functions
1928  * at a given quadrature point from the <code>FEValues</code>
1929  * object, and the elegance with which we form the triple
1930  * contraction <code>eps_phi_i : C : eps_phi_j</code>; the latter
1931  * needs to be compared to the clumsy computations needed in
1932  * @ref step_17 "step-17", both in the introduction as well as in the respective
1933  * place in the program:
1934  *
1935  * @code
1936  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1937  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
1938  * for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1939  * {
1940  * const SymmetricTensor<2, dim>
1941  * eps_phi_i = get_strain(fe_values, i, q_point),
1942  * eps_phi_j = get_strain(fe_values, j, q_point);
1943  *
1944  * cell_matrix(i, j) += (eps_phi_i *
1945  * stress_strain_tensor *
1946  * eps_phi_j
1947  * ) *
1948  * fe_values.JxW(q_point);
1949  * }
1950  *
1951  *
1952  * @endcode
1953  *
1954  * Then also assemble the local right hand side contributions. For
1955  * this, we need to access the prior stress value in this quadrature
1956  * point. To get it, we use the user pointer of this cell that
1957  * points into the global array to the quadrature point data
1958  * corresponding to the first quadrature point of the present cell,
1959  * and then add an offset corresponding to the index of the
1960  * quadrature point we presently consider:
1961  *
1962  * @code
1963  * const PointHistory<dim> *local_quadrature_points_data =
1964  * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
1965  * @endcode
1966  *
1967  * In addition, we need the values of the external body forces at
1968  * the quadrature points on this cell:
1969  *
1970  * @code
1971  * body_force.vector_value_list(fe_values.get_quadrature_points(),
1972  * body_force_values);
1973  * @endcode
1974  *
1975  * Then we can loop over all degrees of freedom on this cell and
1976  * compute local contributions to the right hand side:
1977  *
1978  * @code
1979  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1980  * {
1981  * const unsigned int component_i =
1982  * fe.system_to_component_index(i).first;
1983  *
1984  * for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1985  * {
1986  * const SymmetricTensor<2, dim> &old_stress =
1987  * local_quadrature_points_data[q_point].old_stress;
1988  *
1989  * cell_rhs(i) +=
1990  * (body_force_values[q_point](component_i) *
1991  * fe_values.shape_value(i, q_point) -
1992  * old_stress * get_strain(fe_values, i, q_point)) *
1993  * fe_values.JxW(q_point);
1994  * }
1995  * }
1996  *
1997  * @endcode
1998  *
1999  * Now that we have the local contributions to the linear system, we
2000  * need to transfer it into the global objects. This is done exactly
2001  * as in @ref step_17 "step-17":
2002  *
2003  * @code
2004  * cell->get_dof_indices(local_dof_indices);
2005  *
2006  * hanging_node_constraints.distribute_local_to_global(cell_matrix,
2007  * cell_rhs,
2008  * local_dof_indices,
2009  * system_matrix,
2010  * system_rhs);
2011  * }
2012  *
2013  * @endcode
2014  *
2015  * Now compress the vector and the system matrix:
2016  *
2017  * @code
2018  * system_matrix.compress(VectorOperation::add);
2019  * system_rhs.compress(VectorOperation::add);
2020  *
2021  *
2022  * @endcode
2023  *
2024  * The last step is to again fix up boundary values, just as we already
2025  * did in previous programs. A slight complication is that the
2026  * <code>apply_boundary_values</code> function wants to have a solution
2027  * vector compatible with the matrix and right hand side (i.e. here a
2028  * distributed %parallel vector, rather than the sequential vector we use
2029  * in this program) in order to preset the entries of the solution vector
2030  * with the correct boundary values. We provide such a compatible vector
2031  * in the form of a temporary vector which we then copy into the
2032  * sequential one.
2033  *
2034 
2035  *
2036  * We make up for this complication by showing how boundary values can be
2037  * used flexibly: following the way we create the triangulation, there are
2038  * three distinct boundary indicators used to describe the domain,
2039  * corresponding to the bottom and top faces, as well as the inner/outer
2040  * surfaces. We would like to impose boundary conditions of the following
2041  * type: The inner and outer cylinder surfaces are free of external
2042  * forces, a fact that corresponds to natural (Neumann-type) boundary
2043  * conditions for which we don't have to do anything. At the bottom, we
2044  * want no movement at all, corresponding to the cylinder being clamped or
2045  * cemented in at this part of the boundary. At the top, however, we want
2046  * a prescribed vertical downward motion compressing the cylinder; in
2047  * addition, we only want to restrict the vertical movement, but not the
2048  * horizontal ones -- one can think of this situation as a well-greased
2049  * plate sitting on top of the cylinder pushing it downwards: the atoms of
2050  * the cylinder are forced to move downward, but they are free to slide
2051  * horizontally along the plate.
2052  *
2053 
2054  *
2055  * The way to describe this is as follows: for boundary indicator zero
2056  * (bottom face) we use a dim-dimensional zero function representing no
2057  * motion in any coordinate direction. For the boundary with indicator 1
2058  * (top surface), we use the <code>IncrementalBoundaryValues</code> class,
2059  * but we specify an additional argument to the
2060  * <code>VectorTools::interpolate_boundary_values</code> function denoting
2061  * which vector components it should apply to; this is a vector of bools
2062  * for each vector component and because we only want to restrict vertical
2063  * motion, it has only its last component set:
2064  *
2065  * @code
2066  * FEValuesExtractors::Scalar z_component(dim - 1);
2067  * std::map<types::global_dof_index, double> boundary_values;
2068  * VectorTools::interpolate_boundary_values(dof_handler,
2069  * 0,
2070  * Functions::ZeroFunction<dim>(dim),
2071  * boundary_values);
2072  * VectorTools::interpolate_boundary_values(
2073  * dof_handler,
2074  * 1,
2075  * IncrementalBoundaryValues<dim>(present_time, present_timestep),
2076  * boundary_values,
2077  * fe.component_mask(z_component));
2078  *
2079  * PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator);
2080  * MatrixTools::apply_boundary_values(
2081  * boundary_values, system_matrix, tmp, system_rhs, false);
2082  * incremental_displacement = tmp;
2083  * }
2084  *
2085  *
2086  *
2087  * @endcode
2088  *
2089  *
2090  * <a name="TopLevelsolve_timestep"></a>
2091  * <h4>TopLevel::solve_timestep</h4>
2092  *
2093 
2094  *
2095  * The next function is the one that controls what all has to happen within
2096  * a timestep. The order of things should be relatively self-explanatory
2097  * from the function names:
2098  *
2099  * @code
2100  * template <int dim>
2101  * void TopLevel<dim>::solve_timestep()
2102  * {
2103  * pcout << " Assembling system..." << std::flush;
2104  * assemble_system();
2105  * pcout << " norm of rhs is " << system_rhs.l2_norm() << std::endl;
2106  *
2107  * const unsigned int n_iterations = solve_linear_problem();
2108  *
2109  * pcout << " Solver converged in " << n_iterations << " iterations."
2110  * << std::endl;
2111  *
2112  * pcout << " Updating quadrature point data..." << std::flush;
2113  * update_quadrature_point_history();
2114  * pcout << std::endl;
2115  * }
2116  *
2117  *
2118  *
2119  * @endcode
2120  *
2121  *
2122  * <a name="TopLevelsolve_linear_problem"></a>
2123  * <h4>TopLevel::solve_linear_problem</h4>
2124  *
2125 
2126  *
2127  * Solving the linear system again works mostly as before. The only
2128  * difference is that we want to only keep a complete local copy of the
2129  * solution vector instead of the distributed one that we get as output from
2130  * PETSc's solver routines. To this end, we declare a local temporary
2131  * variable for the distributed vector and initialize it with the contents
2132  * of the local variable (remember that the
2133  * <code>apply_boundary_values</code> function called in
2134  * <code>assemble_system</code> preset the values of boundary nodes in this
2135  * vector), solve with it, and at the end of the function copy it again into
2136  * the complete local vector that we declared as a member variable. Hanging
2137  * node constraints are then distributed only on the local copy,
2138  * i.e. independently of each other on each of the processors:
2139  *
2140  * @code
2141  * template <int dim>
2142  * unsigned int TopLevel<dim>::solve_linear_problem()
2143  * {
2144  * PETScWrappers::MPI::Vector distributed_incremental_displacement(
2145  * locally_owned_dofs, mpi_communicator);
2146  * distributed_incremental_displacement = incremental_displacement;
2147  *
2148  * SolverControl solver_control(dof_handler.n_dofs(),
2149  * 1e-16 * system_rhs.l2_norm());
2150  *
2151  * PETScWrappers::SolverCG cg(solver_control, mpi_communicator);
2152  *
2153  * PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix);
2154  *
2155  * cg.solve(system_matrix,
2156  * distributed_incremental_displacement,
2157  * system_rhs,
2158  * preconditioner);
2159  *
2160  * incremental_displacement = distributed_incremental_displacement;
2161  *
2162  * hanging_node_constraints.distribute(incremental_displacement);
2163  *
2164  * return solver_control.last_step();
2165  * }
2166  *
2167  *
2168  *
2169  * @endcode
2170  *
2171  *
2172  * <a name="TopLeveloutput_results"></a>
2173  * <h4>TopLevel::output_results</h4>
2174  *
2175 
2176  *
2177  * This function generates the graphical output in .vtu format as explained
2178  * in the introduction. Each process will only work on the cells it owns,
2179  * and then write the result into a file of its own. Additionally, processor
2180  * 0 will write the record files the reference all the .vtu files.
2181  *
2182 
2183  *
2184  * The crucial part of this function is to give the <code>DataOut</code>
2185  * class a way to only work on the cells that the present process owns.
2186  *
2187 
2188  *
2189  *
2190  * @code
2191  * template <int dim>
2192  * void TopLevel<dim>::output_results() const
2193  * {
2194  * DataOut<dim> data_out;
2195  * data_out.attach_dof_handler(dof_handler);
2196  *
2197  * @endcode
2198  *
2199  * Then, just as in @ref step_17 "step-17", define the names of solution variables (which
2200  * here are the displacement increments) and queue the solution vector for
2201  * output. Note in the following switch how we make sure that if the space
2202  * dimension should be unhandled that we throw an exception saying that we
2203  * haven't implemented this case yet (another case of defensive
2204  * programming):
2205  *
2206  * @code
2207  * std::vector<std::string> solution_names;
2208  * switch (dim)
2209  * {
2210  * case 1:
2211  * solution_names.emplace_back("delta_x");
2212  * break;
2213  * case 2:
2214  * solution_names.emplace_back("delta_x");
2215  * solution_names.emplace_back("delta_y");
2216  * break;
2217  * case 3:
2218  * solution_names.emplace_back("delta_x");
2219  * solution_names.emplace_back("delta_y");
2220  * solution_names.emplace_back("delta_z");
2221  * break;
2222  * default:
2223  * Assert(false, ExcNotImplemented());
2224  * }
2225  *
2226  * data_out.add_data_vector(incremental_displacement, solution_names);
2227  *
2228  *
2229  * @endcode
2230  *
2231  * The next thing is that we wanted to output something like the average
2232  * norm of the stresses that we have stored in each cell. This may seem
2233  * complicated, since on the present processor we only store the stresses
2234  * in quadrature points on those cells that actually belong to the present
2235  * process. In other words, it seems as if we can't compute the average
2236  * stresses for all cells. However, remember that our class derived from
2237  * <code>DataOut</code> only iterates over those cells that actually do
2238  * belong to the present processor, i.e. we don't have to compute anything
2239  * for all the other cells as this information would not be touched. The
2240  * following little loop does this. We enclose the entire block into a
2241  * pair of braces to make sure that the iterator variables do not remain
2242  * accidentally visible beyond the end of the block in which they are
2243  * used:
2244  *
2245  * @code
2246  * Vector<double> norm_of_stress(triangulation.n_active_cells());
2247  * {
2248  * @endcode
2249  *
2250  * Loop over all the cells...
2251  *
2252  * @code
2253  * for (auto &cell : triangulation.active_cell_iterators())
2254  * if (cell->is_locally_owned())
2255  * {
2256  * @endcode
2257  *
2258  * On these cells, add up the stresses over all quadrature
2259  * points...
2260  *
2261  * @code
2262  * SymmetricTensor<2, dim> accumulated_stress;
2263  * for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2264  * accumulated_stress +=
2265  * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer())[q]
2266  * .old_stress;
2267  *
2268  * @endcode
2269  *
2270  * ...then write the norm of the average to their destination:
2271  *
2272  * @code
2273  * norm_of_stress(cell->active_cell_index()) =
2274  * (accumulated_stress / quadrature_formula.size()).norm();
2275  * }
2276  * @endcode
2277  *
2278  * And on the cells that we are not interested in, set the respective
2279  * value in the vector to a bogus value (norms must be positive, and a
2280  * large negative value should catch your eye) in order to make sure
2281  * that if we were somehow wrong about our assumption that these
2282  * elements would not appear in the output file, that we would find out
2283  * by looking at the graphical output:
2284  *
2285  * @code
2286  * else
2287  * norm_of_stress(cell->active_cell_index()) = -1e+20;
2288  * }
2289  * @endcode
2290  *
2291  * Finally attach this vector as well to be treated for output:
2292  *
2293  * @code
2294  * data_out.add_data_vector(norm_of_stress, "norm_of_stress");
2295  *
2296  * @endcode
2297  *
2298  * As a last piece of data, let us also add the partitioning of the domain
2299  * into subdomains associated with the processors if this is a parallel
2300  * job. This works in the exact same way as in the @ref step_17 "step-17" program:
2301  *
2302  * @code
2303  * std::vector<types::subdomain_id> partition_int(
2304  * triangulation.n_active_cells());
2305  * GridTools::get_subdomain_association(triangulation, partition_int);
2306  * const Vector<double> partitioning(partition_int.begin(),
2307  * partition_int.end());
2308  * data_out.add_data_vector(partitioning, "partitioning");
2309  *
2310  * @endcode
2311  *
2312  * Finally, with all this data, we can instruct deal.II to munge the
2313  * information and produce some intermediate data structures that contain
2314  * all these solution and other data vectors:
2315  *
2316  * @code
2317  * data_out.build_patches();
2318  *
2319  * @endcode
2320  *
2321  * Let us call a function that opens the necessary output files and writes
2322  * the data we have generated into them. The function automatically
2323  * constructs the file names from the given directory name (the first
2324  * argument) and file name base (second argument). It augments the resulting
2325  * string by pieces that result from the time step number and a "piece
2326  * number" that corresponds to a part of the overall domain that can consist
2327  * of one or more subdomains.
2328  *
2329 
2330  *
2331  * The function also writes a record files (with suffix `.pvd`) for Paraview
2332  * that describes how all of these output files combine into the data for
2333  * this single time step:
2334  *
2335  * @code
2336  * const std::string pvtu_master_filename =
2337  * data_out.write_vtu_with_pvtu_record(
2338  * "./", "solution", timestep_no, mpi_communicator, 4);
2339  *
2340  * @endcode
2341  *
2342  * The record files must be written only once and not by each processor,
2343  * so we do this on processor 0:
2344  *
2345  * @code
2346  * if (this_mpi_process == 0)
2347  * {
2348  * @endcode
2349  *
2350  * Finally, we write the paraview record, that references all .pvtu
2351  * files and their respective time. Note that the variable
2352  * times_and_names is declared static, so it will retain the entries
2353  * from the previous timesteps.
2354  *
2355  * @code
2356  * static std::vector<std::pair<double, std::string>> times_and_names;
2357  * times_and_names.push_back(
2358  * std::pair<double, std::string>(present_time, pvtu_master_filename));
2359  * std::ofstream pvd_output("solution.pvd");
2360  * DataOutBase::write_pvd_record(pvd_output, times_and_names);
2361  * }
2362  * }
2363  *
2364  *
2365  *
2366  * @endcode
2367  *
2368  *
2369  * <a name="TopLeveldo_initial_timestep"></a>
2370  * <h4>TopLevel::do_initial_timestep</h4>
2371  *
2372 
2373  *
2374  * This and the next function handle the overall structure of the first and
2375  * following timesteps, respectively. The first timestep is slightly more
2376  * involved because we want to compute it multiple times on successively
2377  * refined meshes, each time starting from a clean state. At the end of
2378  * these computations, in which we compute the incremental displacements
2379  * each time, we use the last results obtained for the incremental
2380  * displacements to compute the resulting stress updates and move the mesh
2381  * accordingly. On this new mesh, we then output the solution and any
2382  * additional data we consider important.
2383  *
2384 
2385  *
2386  * All this is interspersed by generating output to the console to update
2387  * the person watching the screen on what is going on. As in @ref step_17 "step-17", the
2388  * use of <code>pcout</code> instead of <code>std::cout</code> makes sure
2389  * that only one of the parallel processes is actually writing to the
2390  * console, without having to explicitly code an if-statement in each place
2391  * where we generate output:
2392  *
2393  * @code
2394  * template <int dim>
2395  * void TopLevel<dim>::do_initial_timestep()
2396  * {
2397  * present_time += present_timestep;
2398  * ++timestep_no;
2399  * pcout << "Timestep " << timestep_no << " at time " << present_time
2400  * << std::endl;
2401  *
2402  * for (unsigned int cycle = 0; cycle < 2; ++cycle)
2403  * {
2404  * pcout << " Cycle " << cycle << ':' << std::endl;
2405  *
2406  * if (cycle == 0)
2407  * create_coarse_grid();
2408  * else
2409  * refine_initial_grid();
2410  *
2411  * pcout << " Number of active cells: "
2412  * << triangulation.n_active_cells() << " (by partition:";
2413  * for (unsigned int p = 0; p < n_mpi_processes; ++p)
2414  * pcout << (p == 0 ? ' ' : '+')
2415  * << (GridTools::count_cells_with_subdomain_association(
2416  * triangulation, p));
2417  * pcout << ")" << std::endl;
2418  *
2419  * setup_system();
2420  *
2421  * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
2422  * << " (by partition:";
2423  * for (unsigned int p = 0; p < n_mpi_processes; ++p)
2424  * pcout << (p == 0 ? ' ' : '+')
2425  * << (DoFTools::count_dofs_with_subdomain_association(dof_handler,
2426  * p));
2427  * pcout << ")" << std::endl;
2428  *
2429  * solve_timestep();
2430  * }
2431  *
2432  * move_mesh();
2433  * output_results();
2434  *
2435  * pcout << std::endl;
2436  * }
2437  *
2438  *
2439  *
2440  * @endcode
2441  *
2442  *
2443  * <a name="TopLeveldo_timestep"></a>
2444  * <h4>TopLevel::do_timestep</h4>
2445  *
2446 
2447  *
2448  * Subsequent timesteps are simpler, and probably do not require any more
2449  * documentation given the explanations for the previous function above:
2450  *
2451  * @code
2452  * template <int dim>
2453  * void TopLevel<dim>::do_timestep()
2454  * {
2455  * present_time += present_timestep;
2456  * ++timestep_no;
2457  * pcout << "Timestep " << timestep_no << " at time " << present_time
2458  * << std::endl;
2459  * if (present_time > end_time)
2460  * {
2461  * present_timestep -= (present_time - end_time);
2462  * present_time = end_time;
2463  * }
2464  *
2465  *
2466  * solve_timestep();
2467  *
2468  * move_mesh();
2469  * output_results();
2470  *
2471  * pcout << std::endl;
2472  * }
2473  *
2474  *
2475  * @endcode
2476  *
2477  *
2478  * <a name="TopLevelrefine_initial_grid"></a>
2479  * <h4>TopLevel::refine_initial_grid</h4>
2480  *
2481 
2482  *
2483  * The following function is called when solving the first time step on
2484  * successively refined meshes. After each iteration, it computes a
2485  * refinement criterion, refines the mesh, and sets up the history variables
2486  * in each quadrature point again to a clean state.
2487  *
2488  * @code
2489  * template <int dim>
2490  * void TopLevel<dim>::refine_initial_grid()
2491  * {
2492  * @endcode
2493  *
2494  * First, let each process compute error indicators for the cells it owns:
2495  *
2496  * @code
2497  * Vector<float> error_per_cell(triangulation.n_active_cells());
2498  * KellyErrorEstimator<dim>::estimate(
2499  * dof_handler,
2500  * QGauss<dim - 1>(fe.degree + 1),
2501  * std::map<types::boundary_id, const Function<dim> *>(),
2502  * incremental_displacement,
2503  * error_per_cell,
2504  * ComponentMask(),
2505  * nullptr,
2506  * MultithreadInfo::n_threads(),
2507  * this_mpi_process);
2508  *
2509  * @endcode
2510  *
2511  * Then set up a global vector into which we merge the local indicators
2512  * from each of the %parallel processes:
2513  *
2514  * @code
2515  * const unsigned int n_local_cells =
2516  * triangulation.n_locally_owned_active_cells();
2517  *
2518  * PETScWrappers::MPI::Vector distributed_error_per_cell(
2519  * mpi_communicator, triangulation.n_active_cells(), n_local_cells);
2520  *
2521  * for (unsigned int i = 0; i < error_per_cell.size(); ++i)
2522  * if (error_per_cell(i) != 0)
2523  * distributed_error_per_cell(i) = error_per_cell(i);
2524  * distributed_error_per_cell.compress(VectorOperation::insert);
2525  *
2526  * @endcode
2527  *
2528  * Once we have that, copy it back into local copies on all processors and
2529  * refine the mesh accordingly:
2530  *
2531  * @code
2532  * error_per_cell = distributed_error_per_cell;
2533  * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2534  * error_per_cell,
2535  * 0.35,
2536  * 0.03);
2537  * triangulation.execute_coarsening_and_refinement();
2538  *
2539  * @endcode
2540  *
2541  * Finally, set up quadrature point data again on the new mesh, and only
2542  * on those cells that we have determined to be ours:
2543  *
2544  * @code
2545  * setup_quadrature_point_history();
2546  * }
2547  *
2548  *
2549  *
2550  * @endcode
2551  *
2552  *
2553  * <a name="TopLevelmove_mesh"></a>
2554  * <h4>TopLevel::move_mesh</h4>
2555  *
2556 
2557  *
2558  * At the end of each time step, we move the nodes of the mesh according to
2559  * the incremental displacements computed in this time step. To do this, we
2560  * keep a vector of flags that indicate for each vertex whether we have
2561  * already moved it around, and then loop over all cells and move those
2562  * vertices of the cell that have not been moved yet. It is worth noting
2563  * that it does not matter from which of the cells adjacent to a vertex we
2564  * move this vertex: since we compute the displacement using a continuous
2565  * finite element, the displacement field is continuous as well and we can
2566  * compute the displacement of a given vertex from each of the adjacent
2567  * cells. We only have to make sure that we move each node exactly once,
2568  * which is why we keep the vector of flags.
2569  *
2570 
2571  *
2572  * There are two noteworthy things in this function. First, how we get the
2573  * displacement field at a given vertex using the
2574  * <code>cell-@>vertex_dof_index(v,d)</code> function that returns the index
2575  * of the <code>d</code>th degree of freedom at vertex <code>v</code> of the
2576  * given cell. In the present case, displacement in the k-th coordinate
2577  * direction corresponds to the k-th component of the finite element. Using a
2578  * function like this bears a certain risk, because it uses knowledge of the
2579  * order of elements that we have taken together for this program in the
2580  * <code>FESystem</code> element. If we decided to add an additional
2581  * variable, for example a pressure variable for stabilization, and happened
2582  * to insert it as the first variable of the element, then the computation
2583  * below will start to produce nonsensical results. In addition, this
2584  * computation rests on other assumptions: first, that the element we use
2585  * has, indeed, degrees of freedom that are associated with vertices. This
2586  * is indeed the case for the present Q1 element, as would be for all Qp
2587  * elements of polynomial order <code>p</code>. However, it would not hold
2588  * for discontinuous elements, or elements for mixed formulations. Secondly,
2589  * it also rests on the assumption that the displacement at a vertex is
2590  * determined solely by the value of the degree of freedom associated with
2591  * this vertex; in other words, all shape functions corresponding to other
2592  * degrees of freedom are zero at this particular vertex. Again, this is the
2593  * case for the present element, but is not so for all elements that are
2594  * presently available in deal.II. Despite its risks, we choose to use this
2595  * way in order to present a way to query individual degrees of freedom
2596  * associated with vertices.
2597  *
2598 
2599  *
2600  * In this context, it is instructive to point out what a more general way
2601  * would be. For general finite elements, the way to go would be to take a
2602  * quadrature formula with the quadrature points in the vertices of a
2603  * cell. The <code>QTrapez</code> formula for the trapezoidal rule does
2604  * exactly this. With this quadrature formula, we would then initialize an
2605  * <code>FEValues</code> object in each cell, and use the
2606  * <code>FEValues::get_function_values</code> function to obtain the values
2607  * of the solution function in the quadrature points, i.e. the vertices of
2608  * the cell. These are the only values that we really need, i.e. we are not
2609  * at all interested in the weights (or the <code>JxW</code> values)
2610  * associated with this particular quadrature formula, and this can be
2611  * specified as the last argument in the constructor to
2612  * <code>FEValues</code>. The only point of minor inconvenience in this
2613  * scheme is that we have to figure out which quadrature point corresponds
2614  * to the vertex we consider at present, as they may or may not be ordered
2615  * in the same order.
2616  *
2617 
2618  *
2619  * This inconvenience could be avoided if finite elements have support
2620  * points on vertices (which the one here has; for the concept of support
2621  * points, see @ref GlossSupport "support points"). For such a case, one
2622  * could construct a custom quadrature rule using
2623  * FiniteElement::get_unit_support_points(). The first
2624  * <code>GeometryInfo@<dim@>::%vertices_per_cell*fe.dofs_per_vertex</code>
2625  * quadrature points will then correspond to the vertices of the cell and
2626  * are ordered consistent with <code>cell-@>vertex(i)</code>, taking into
2627  * account that support points for vector elements will be duplicated
2628  * <code>fe.dofs_per_vertex</code> times.
2629  *
2630 
2631  *
2632  * Another point worth explaining about this short function is the way in
2633  * which the triangulation class exports information about its vertices:
2634  * through the <code>Triangulation::n_vertices</code> function, it
2635  * advertises how many vertices there are in the triangulation. Not all of
2636  * them are actually in use all the time -- some are left-overs from cells
2637  * that have been coarsened previously and remain in existence since deal.II
2638  * never changes the number of a vertex once it has come into existence,
2639  * even if vertices with lower number go away. Secondly, the location
2640  * returned by <code>cell-@>vertex(v)</code> is not only a read-only object
2641  * of type <code>Point@<dim@></code>, but in fact a reference that can be
2642  * written to. This allows to move around the nodes of a mesh with relative
2643  * ease, but it is worth pointing out that it is the responsibility of an
2644  * application program using this feature to make sure that the resulting
2645  * cells are still useful, i.e. are not distorted so much that the cell is
2646  * degenerated (indicated, for example, by negative Jacobians). Note that we
2647  * do not have any provisions in this function to actually ensure this, we
2648  * just have faith.
2649  *
2650 
2651  *
2652  * After this lengthy introduction, here are the full 20 or so lines of
2653  * code:
2654  *
2655  * @code
2656  * template <int dim>
2657  * void TopLevel<dim>::move_mesh()
2658  * {
2659  * pcout << " Moving mesh..." << std::endl;
2660  *
2661  * std::vector<bool> vertex_touched(triangulation.n_vertices(), false);
2662  * for (auto &cell : dof_handler.active_cell_iterators())
2663  * for (unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell; ++v)
2664  * if (vertex_touched[cell->vertex_index(v)] == false)
2665  * {
2666  * vertex_touched[cell->vertex_index(v)] = true;
2667  *
2668  * Point<dim> vertex_displacement;
2669  * for (unsigned int d = 0; d < dim; ++d)
2670  * vertex_displacement[d] =
2671  * incremental_displacement(cell->vertex_dof_index(v, d));
2672  *
2673  * cell->vertex(v) += vertex_displacement;
2674  * }
2675  * }
2676  *
2677  *
2678  * @endcode
2679  *
2680  *
2681  * <a name="TopLevelsetup_quadrature_point_history"></a>
2682  * <h4>TopLevel::setup_quadrature_point_history</h4>
2683  *
2684 
2685  *
2686  * At the beginning of our computations, we needed to set up initial values
2687  * of the history variables, such as the existing stresses in the material,
2688  * that we store in each quadrature point. As mentioned above, we use the
2689  * <code>user_pointer</code> for this that is available in each cell.
2690  *
2691 
2692  *
2693  * To put this into larger perspective, we note that if we had previously
2694  * available stresses in our model (which we assume do not exist for the
2695  * purpose of this program), then we would need to interpolate the field of
2696  * preexisting stresses to the quadrature points. Likewise, if we were to
2697  * simulate elasto-plastic materials with hardening/softening, then we would
2698  * have to store additional history variables like the present yield stress
2699  * of the accumulated plastic strains in each quadrature
2700  * points. Pre-existing hardening or weakening would then be implemented by
2701  * interpolating these variables in the present function as well.
2702  *
2703  * @code
2704  * template <int dim>
2705  * void TopLevel<dim>::setup_quadrature_point_history()
2706  * {
2707  * @endcode
2708  *
2709  * For good measure, we set all user pointers of all cells, whether
2710  * ours of not, to the null pointer. This way, if we ever access the user
2711  * pointer of a cell which we should not have accessed, a segmentation
2712  * fault will let us know that this should not have happened:
2713  *
2714 
2715  *
2716  *
2717  * @code
2718  * triangulation.clear_user_data();
2719  *
2720  * @endcode
2721  *
2722  * Next, allocate the quadrature objects that are within the responsibility
2723  * of this processor. This, of course, equals the number of cells that
2724  * belong to this processor times the number of quadrature points our
2725  * quadrature formula has on each cell. Since the `resize()` function does
2726  * not actually shrink the amount of allocated memory if the requested new
2727  * size is smaller than the old size, we resort to a trick to first free all
2728  * memory, and then reallocate it: we declare an empty vector as a temporary
2729  * variable and then swap the contents of the old vector and this temporary
2730  * variable. This makes sure that the `quadrature_point_history` is now
2731  * really empty, and we can let the temporary variable that now holds the
2732  * previous contents of the vector go out of scope and be destroyed. In the
2733  * next step we can then re-allocate as many elements as we need, with the
2734  * vector default-initializing the `PointHistory` objects, which includes
2735  * setting the stress variables to zero.
2736  *
2737  * @code
2738  * {
2739  * std::vector<PointHistory<dim>> tmp;
2740  * quadrature_point_history.swap(tmp);
2741  * }
2742  * quadrature_point_history.resize(
2743  * triangulation.n_locally_owned_active_cells() * quadrature_formula.size());
2744  *
2745  * @endcode
2746  *
2747  * Finally loop over all cells again and set the user pointers from the
2748  * cells that belong to the present processor to point to the first
2749  * quadrature point objects corresponding to this cell in the vector of
2750  * such objects:
2751  *
2752  * @code
2753  * unsigned int history_index = 0;
2754  * for (auto &cell : triangulation.active_cell_iterators())
2755  * if (cell->is_locally_owned())
2756  * {
2757  * cell->set_user_pointer(&quadrature_point_history[history_index]);
2758  * history_index += quadrature_formula.size();
2759  * }
2760  *
2761  * @endcode
2762  *
2763  * At the end, for good measure make sure that our count of elements was
2764  * correct and that we have both used up all objects we allocated
2765  * previously, and not point to any objects beyond the end of the
2766  * vector. Such defensive programming strategies are always good checks to
2767  * avoid accidental errors and to guard against future changes to this
2768  * function that forget to update all uses of a variable at the same
2769  * time. Recall that constructs using the <code>Assert</code> macro are
2770  * optimized away in optimized mode, so do not affect the run time of
2771  * optimized runs:
2772  *
2773  * @code
2774  * Assert(history_index == quadrature_point_history.size(),
2775  * ExcInternalError());
2776  * }
2777  *
2778  *
2779  *
2780  * @endcode
2781  *
2782  *
2783  * <a name="TopLevelupdate_quadrature_point_history"></a>
2784  * <h4>TopLevel::update_quadrature_point_history</h4>
2785  *
2786 
2787  *
2788  * At the end of each time step, we should have computed an incremental
2789  * displacement update so that the material in its new configuration
2790  * accommodates for the difference between the external body and boundary
2791  * forces applied during this time step minus the forces exerted through
2792  * preexisting internal stresses. In order to have the preexisting
2793  * stresses available at the next time step, we therefore have to update the
2794  * preexisting stresses with the stresses due to the incremental
2795  * displacement computed during the present time step. Ideally, the
2796  * resulting sum of internal stresses would exactly counter all external
2797  * forces. Indeed, a simple experiment can make sure that this is so: if we
2798  * choose boundary conditions and body forces to be time independent, then
2799  * the forcing terms (the sum of external forces and internal stresses)
2800  * should be exactly zero. If you make this experiment, you will realize
2801  * from the output of the norm of the right hand side in each time step that
2802  * this is almost the case: it is not exactly zero, since in the first time
2803  * step the incremental displacement and stress updates were computed
2804  * relative to the undeformed mesh, which was then deformed. In the second
2805  * time step, we again compute displacement and stress updates, but this
2806  * time in the deformed mesh -- there, the resulting updates are very small
2807  * but not quite zero. This can be iterated, and in each such iteration the
2808  * residual, i.e. the norm of the right hand side vector, is reduced; if one
2809  * makes this little experiment, one realizes that the norm of this residual
2810  * decays exponentially with the number of iterations, and after an initial
2811  * very rapid decline is reduced by roughly a factor of about 3.5 in each
2812  * iteration (for one testcase I looked at, other testcases, and other
2813  * numbers of unknowns change the factor, but not the exponential decay).
2814  *
2815 
2816  *
2817  * In a sense, this can then be considered as a quasi-timestepping scheme to
2818  * resolve the nonlinear problem of solving large-deformation elasticity on
2819  * a mesh that is moved along in a Lagrangian manner.
2820  *
2821 
2822  *
2823  * Another complication is that the existing (old) stresses are defined on
2824  * the old mesh, which we will move around after updating the stresses. If
2825  * this mesh update involves rotations of the cell, then we need to also
2826  * rotate the updated stress, since it was computed relative to the
2827  * coordinate system of the old cell.
2828  *
2829 
2830  *
2831  * Thus, what we need is the following: on each cell which the present
2832  * processor owns, we need to extract the old stress from the data stored
2833  * with each quadrature point, compute the stress update, add the two
2834  * together, and then rotate the result together with the incremental
2835  * rotation computed from the incremental displacement at the present
2836  * quadrature point. We will detail these steps below:
2837  *
2838  * @code
2839  * template <int dim>
2840  * void TopLevel<dim>::update_quadrature_point_history()
2841  * {
2842  * @endcode
2843  *
2844  * First, set up an <code>FEValues</code> object by which we will evaluate
2845  * the incremental displacements and the gradients thereof at the
2846  * quadrature points, together with a vector that will hold this
2847  * information:
2848  *
2849  * @code
2850  * FEValues<dim> fe_values(fe,
2851  * quadrature_formula,
2852  * update_values | update_gradients);
2853  *
2854  * std::vector<std::vector<Tensor<1, dim>>> displacement_increment_grads(
2855  * quadrature_formula.size(), std::vector<Tensor<1, dim>>(dim));
2856  *
2857  * @endcode
2858  *
2859  * Then loop over all cells and do the job in the cells that belong to our
2860  * subdomain:
2861  *
2862  * @code
2863  * for (auto &cell : dof_handler.active_cell_iterators())
2864  * if (cell->is_locally_owned())
2865  * {
2866  * @endcode
2867  *
2868  * Next, get a pointer to the quadrature point history data local to
2869  * the present cell, and, as a defensive measure, make sure that
2870  * this pointer is within the bounds of the global array:
2871  *
2872  * @code
2873  * PointHistory<dim> *local_quadrature_points_history =
2874  * reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
2875  * Assert(local_quadrature_points_history >=
2876  * &quadrature_point_history.front(),
2877  * ExcInternalError());
2878  * Assert(local_quadrature_points_history <=
2879  * &quadrature_point_history.back(),
2880  * ExcInternalError());
2881  *
2882  * @endcode
2883  *
2884  * Then initialize the <code>FEValues</code> object on the present
2885  * cell, and extract the gradients of the displacement at the
2886  * quadrature points for later computation of the strains
2887  *
2888  * @code
2889  * fe_values.reinit(cell);
2890  * fe_values.get_function_gradients(incremental_displacement,
2891  * displacement_increment_grads);
2892  *
2893  * @endcode
2894  *
2895  * Then loop over the quadrature points of this cell:
2896  *
2897  * @code
2898  * for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2899  * {
2900  * @endcode
2901  *
2902  * On each quadrature point, compute the strain increment from
2903  * the gradients, and multiply it by the stress-strain tensor to
2904  * get the stress update. Then add this update to the already
2905  * existing strain at this point:
2906  *
2907  * @code
2908  * const SymmetricTensor<2, dim> new_stress =
2909  * (local_quadrature_points_history[q].old_stress +
2910  * (stress_strain_tensor *
2911  * get_strain(displacement_increment_grads[q])));
2912  *
2913  * @endcode
2914  *
2915  * Finally, we have to rotate the result. For this, we first
2916  * have to compute a rotation matrix at the present quadrature
2917  * point from the incremental displacements. In fact, it can be
2918  * computed from the gradients, and we already have a function
2919  * for that purpose:
2920  *
2921  * @code
2922  * const Tensor<2, dim> rotation =
2923  * get_rotation_matrix(displacement_increment_grads[q]);
2924  * @endcode
2925  *
2926  * Note that the result, a rotation matrix, is in general an
2927  * antisymmetric tensor of rank 2, so we must store it as a full
2928  * tensor.
2929  *
2930 
2931  *
2932  * With this rotation matrix, we can compute the rotated tensor
2933  * by contraction from the left and right, after we expand the
2934  * symmetric tensor <code>new_stress</code> into a full tensor:
2935  *
2936  * @code
2937  * const SymmetricTensor<2, dim> rotated_new_stress =
2938  * symmetrize(transpose(rotation) *
2939  * static_cast<Tensor<2, dim>>(new_stress) * rotation);
2940  * @endcode
2941  *
2942  * Note that while the result of the multiplication of these
2943  * three matrices should be symmetric, it is not due to floating
2944  * point round off: we get an asymmetry on the order of 1e-16 of
2945  * the off-diagonal elements of the result. When assigning the
2946  * result to a <code>SymmetricTensor</code>, the constructor of
2947  * that class checks the symmetry and realizes that it isn't
2948  * exactly symmetric; it will then raise an exception. To avoid
2949  * that, we explicitly symmetrize the result to make it exactly
2950  * symmetric.
2951  *
2952 
2953  *
2954  * The result of all these operations is then written back into
2955  * the original place:
2956  *
2957  * @code
2958  * local_quadrature_points_history[q].old_stress =
2959  * rotated_new_stress;
2960  * }
2961  * }
2962  * }
2963  *
2964  * @endcode
2965  *
2966  * This ends the project specific namespace <code>Step18</code>. The rest is
2967  * as usual and as already shown in @ref step_17 "step-17": A <code>main()</code> function
2968  * that initializes and terminates PETSc, calls the classes that do the
2969  * actual work, and makes sure that we catch all exceptions that propagate
2970  * up to this point:
2971  *
2972  * @code
2973  * } // namespace Step18
2974  *
2975  *
2976  * int main(int argc, char **argv)
2977  * {
2978  * try
2979  * {
2980  * using namespace dealii;
2981  * using namespace Step18;
2982  *
2983  * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2984  *
2985  * TopLevel<3> elastic_problem;
2986  * elastic_problem.run();
2987  * }
2988  * catch (std::exception &exc)
2989  * {
2990  * std::cerr << std::endl
2991  * << std::endl
2992  * << "----------------------------------------------------"
2993  * << std::endl;
2994  * std::cerr << "Exception on processing: " << std::endl
2995  * << exc.what() << std::endl
2996  * << "Aborting!" << std::endl
2997  * << "----------------------------------------------------"
2998  * << std::endl;
2999  *
3000  * return 1;
3001  * }
3002  * catch (...)
3003  * {
3004  * std::cerr << std::endl
3005  * << std::endl
3006  * << "----------------------------------------------------"
3007  * << std::endl;
3008  * std::cerr << "Unknown exception!" << std::endl
3009  * << "Aborting!" << std::endl
3010  * << "----------------------------------------------------"
3011  * << std::endl;
3012  * return 1;
3013  * }
3014  *
3015  * return 0;
3016  * }
3017  * @endcode
3018 <a name="Results"></a><h1>Results</h1>
3019 
3020 
3021 
3022 Running the program takes a good while if one uses debug mode; it takes about
3023 eleven minutes on my i7 desktop. Fortunately, the version compiled with
3024 optimizations is much faster; the program only takes about a minute and a half
3025 after recompiling with the command <tt>make release</tt> on the same machine, a
3026 much more reasonable time.
3027 
3028 
3029 If run, the program prints the following output, explaining what it is
3030 doing during all that time:
3031 @verbatim
3032 $ time make run
3033 [ 66%] Built target step-18
3034 [100%] Run step-18 with Release configuration
3035 Timestep 1 at time 1
3036  Cycle 0:
3037  Number of active cells: 3712 (by partition: 3712)
3038  Number of degrees of freedom: 17226 (by partition: 17226)
3039  Assembling system... norm of rhs is 1.88062e+10
3040  Solver converged in 103 iterations.
3041  Updating quadrature point data...
3042  Cycle 1:
3043  Number of active cells: 12812 (by partition: 12812)
3044  Number of degrees of freedom: 51738 (by partition: 51738)
3045  Assembling system... norm of rhs is 1.86145e+10
3046  Solver converged in 121 iterations.
3047  Updating quadrature point data...
3048  Moving mesh...
3049 
3050 Timestep 2 at time 2
3051  Assembling system... norm of rhs is 1.84169e+10
3052  Solver converged in 122 iterations.
3053  Updating quadrature point data...
3054  Moving mesh...
3055 
3056 Timestep 3 at time 3
3057  Assembling system... norm of rhs is 1.82355e+10
3058  Solver converged in 122 iterations.
3059  Updating quadrature point data...
3060  Moving mesh...
3061 
3062 Timestep 4 at time 4
3063  Assembling system... norm of rhs is 1.80728e+10
3064  Solver converged in 117 iterations.
3065  Updating quadrature point data...
3066  Moving mesh...
3067 
3068 Timestep 5 at time 5
3069  Assembling system... norm of rhs is 1.79318e+10
3070  Solver converged in 116 iterations.
3071  Updating quadrature point data...
3072  Moving mesh...
3073 
3074 Timestep 6 at time 6
3075  Assembling system... norm of rhs is 1.78171e+10
3076  Solver converged in 115 iterations.
3077  Updating quadrature point data...
3078  Moving mesh...
3079 
3080 Timestep 7 at time 7
3081  Assembling system... norm of rhs is 1.7737e+10
3082  Solver converged in 112 iterations.
3083  Updating quadrature point data...
3084  Moving mesh...
3085 
3086 Timestep 8 at time 8
3087  Assembling system... norm of rhs is 1.77127e+10
3088  Solver converged in 111 iterations.
3089  Updating quadrature point data...
3090  Moving mesh...
3091 
3092 Timestep 9 at time 9
3093  Assembling system... norm of rhs is 1.78207e+10
3094  Solver converged in 113 iterations.
3095  Updating quadrature point data...
3096  Moving mesh...
3097 
3098 Timestep 10 at time 10
3099  Assembling system... norm of rhs is 1.83544e+10
3100  Solver converged in 115 iterations.
3101  Updating quadrature point data...
3102  Moving mesh...
3103 
3104 [100%] Built target run
3105 make run 176.82s user 0.15s system 198% cpu 1:28.94 total
3106 @endverbatim
3107 In other words, it is computing on 12,000 cells and with some 52,000
3108 unknowns. Not a whole lot, but enough for a coupled three-dimensional
3109 problem to keep a computer busy for a while. At the end of the day,
3110 this is what we have for output:
3111 @verbatim
3112 $ ls -l *vtu *visit
3113 -rw-r--r-- 1 drwells users 1706059 Feb 13 19:36 solution-0010.000.vtu
3114 -rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0010.pvtu
3115 -rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0010.visit
3116 -rw-r--r-- 1 drwells users 1707907 Feb 13 19:36 solution-0009.000.vtu
3117 -rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0009.pvtu
3118 -rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0009.visit
3119 -rw-r--r-- 1 drwells users 1703771 Feb 13 19:35 solution-0008.000.vtu
3120 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0008.pvtu
3121 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0008.visit
3122 -rw-r--r-- 1 drwells users 1693671 Feb 13 19:35 solution-0007.000.vtu
3123 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0007.pvtu
3124 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0007.visit
3125 -rw-r--r-- 1 drwells users 1681847 Feb 13 19:35 solution-0006.000.vtu
3126 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0006.pvtu
3127 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0006.visit
3128 -rw-r--r-- 1 drwells users 1670115 Feb 13 19:35 solution-0005.000.vtu
3129 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0005.pvtu
3130 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0005.visit
3131 -rw-r--r-- 1 drwells users 1658559 Feb 13 19:35 solution-0004.000.vtu
3132 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0004.pvtu
3133 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0004.visit
3134 -rw-r--r-- 1 drwells users 1639983 Feb 13 19:35 solution-0003.000.vtu
3135 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0003.pvtu
3136 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0003.visit
3137 -rw-r--r-- 1 drwells users 1625851 Feb 13 19:35 solution-0002.000.vtu
3138 -rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0002.pvtu
3139 -rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0002.visit
3140 -rw-r--r-- 1 drwells users 1616035 Feb 13 19:34 solution-0001.000.vtu
3141 -rw-r--r-- 1 drwells users 761 Feb 13 19:34 solution-0001.pvtu
3142 -rw-r--r-- 1 drwells users 33 Feb 13 19:34 solution-0001.visit
3143 @endverbatim
3144 
3145 
3146 If we visualize these files with VisIt or Paraview, we get to see the full picture
3147 of the disaster our forced compression wreaks on the cylinder (colors in the
3148 images encode the norm of the stress in the material):
3149 
3150 
3151 <div class="threecolumn" style="width: 80%">
3152  <div class="parent">
3153  <div class="img" align="center">
3154  <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0002.0000.png"
3155  alt="Time = 2"
3156  width="400">
3157  </div>
3158  <div class="text" align="center">
3159  Time = 2
3160  </div>
3161  </div>
3162  <div class="parent">
3163  <div class="img" align="center">
3164  <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0005.0000.png"
3165  alt="Time = 5"
3166  width="400">
3167  </div>
3168  <div class="text" align="center">
3169  Time = 5
3170  </div>
3171  </div>
3172  <div class="parent">
3173  <div class="img" align="center">
3174  <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0007.0000.png"
3175  alt="Time = 7"
3176  width="400">
3177  </div>
3178  <div class="text" align="center">
3179  Time = 7
3180  </div>
3181  </div>
3182 </div>
3183 
3184 
3185 <div class="threecolumn" style="width: 80%">
3186  <div class="parent">
3187  <div class="img" align="center">
3188  <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0008.0000.png"
3189  alt="Time = 8"
3190  width="400">
3191  </div>
3192  <div class="text" align="center">
3193  Time = 8
3194  </div>
3195  </div>
3196  <div class="parent">
3197  <div class="img" align="center">
3198  <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0009.0000.png"
3199  alt="Time = 9"
3200  width="400">
3201  </div>
3202  <div class="text" align="center">
3203  Time = 9
3204  </div>
3205  </div>
3206  <div class="parent">
3207  <div class="img" align="center">
3208  <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0010.0000.png"
3209  alt="Time = 10"
3210  width="400">
3211  </div>
3212  <div class="text" align="center">
3213  Time = 10
3214  </div>
3215  </div>
3216 </div>
3217 
3218 
3219 As is clearly visible, as we keep compressing the cylinder, it starts
3220 to bow out near the fully constrained bottom surface and, after about eight
3221 time units, buckle in an azimuthally symmetric manner.
3222 
3223 
3224 Although the result appears plausible for the symmetric geometry and loading,
3225 it is yet to be established whether or not the computation is fully converged.
3226 In order to see whether it is, we ran the program again with one more global
3227 refinement at the beginning and with the time step halved. This would have
3228 taken a very long time on a single machine, so we used a proper workstation and
3229 ran it on 16 processors in parallel. The beginning of the output now looks like
3230 this:
3231 @verbatim
3232 Timestep 1 at time 0.5
3233  Cycle 0:
3234  Number of active cells: 29696 (by partition: 1808+1802+1894+1881+1870+1840+1884+1810+1876+1818+1870+1884+1854+1903+1816+1886)
3235  Number of degrees of freedom: 113100 (by partition: 6936+6930+7305+7116+7326+6869+7331+6786+7193+6829+7093+7162+6920+7280+6843+7181)
3236  Assembling system... norm of rhs is 1.10765e+10
3237  Solver converged in 209 iterations.
3238  Updating quadrature point data...
3239  Cycle 1:
3240  Number of active cells: 102034 (by partition: 6387+6202+6421+6341+6408+6201+6428+6428+6385+6294+6506+6244+6417+6527+6299+6546)
3241  Number of degrees of freedom: 359337 (by partition: 23255+21308+24774+24019+22304+21415+22430+22184+22298+21796+22396+21592+22325+22553+21977+22711)
3242  Assembling system... norm of rhs is 1.35759e+10
3243  Solver converged in 268 iterations.
3244  Updating quadrature point data...
3245  Moving mesh...
3246 
3247 Timestep 2 at time 1
3248  Assembling system... norm of rhs is 1.34674e+10
3249  Solver converged in 267 iterations.
3250  Updating quadrature point data...
3251  Moving mesh...
3252 
3253 Timestep 3 at time 1.5
3254  Assembling system... norm of rhs is 1.33607e+10
3255  Solver converged in 265 iterations.
3256  Updating quadrature point data...
3257  Moving mesh...
3258 
3259 Timestep 4 at time 2
3260  Assembling system... norm of rhs is 1.32558e+10
3261  Solver converged in 263 iterations.
3262  Updating quadrature point data...
3263  Moving mesh...
3264 
3265 [...]
3266 
3267 Timestep 20 at time 10
3268  Assembling system... norm of rhs is 1.47755e+10
3269  Solver converged in 425 iterations.
3270  Updating quadrature point data...
3271  Moving mesh...
3272 @endverbatim
3273 That's quite a good number of unknowns, given that we are in 3d. The output of
3274 this program are 16 files for each time step:
3275 @verbatim
3276 $ ls -l solution-0001*
3277 -rw-r--r-- 1 wellsd2 user 761065 Feb 13 21:09 solution-0001.000.vtu
3278 -rw-r--r-- 1 wellsd2 user 759277 Feb 13 21:09 solution-0001.001.vtu
3279 -rw-r--r-- 1 wellsd2 user 761217 Feb 13 21:09 solution-0001.002.vtu
3280 -rw-r--r-- 1 wellsd2 user 761605 Feb 13 21:09 solution-0001.003.vtu
3281 -rw-r--r-- 1 wellsd2 user 756917 Feb 13 21:09 solution-0001.004.vtu
3282 -rw-r--r-- 1 wellsd2 user 752669 Feb 13 21:09 solution-0001.005.vtu
3283 -rw-r--r-- 1 wellsd2 user 735217 Feb 13 21:09 solution-0001.006.vtu
3284 -rw-r--r-- 1 wellsd2 user 750065 Feb 13 21:09 solution-0001.007.vtu
3285 -rw-r--r-- 1 wellsd2 user 760273 Feb 13 21:09 solution-0001.008.vtu
3286 -rw-r--r-- 1 wellsd2 user 777265 Feb 13 21:09 solution-0001.009.vtu
3287 -rw-r--r-- 1 wellsd2 user 772469 Feb 13 21:09 solution-0001.010.vtu
3288 -rw-r--r-- 1 wellsd2 user 760833 Feb 13 21:09 solution-0001.011.vtu
3289 -rw-r--r-- 1 wellsd2 user 782241 Feb 13 21:09 solution-0001.012.vtu
3290 -rw-r--r-- 1 wellsd2 user 748905 Feb 13 21:09 solution-0001.013.vtu
3291 -rw-r--r-- 1 wellsd2 user 738413 Feb 13 21:09 solution-0001.014.vtu
3292 -rw-r--r-- 1 wellsd2 user 762133 Feb 13 21:09 solution-0001.015.vtu
3293 -rw-r--r-- 1 wellsd2 user 1421 Feb 13 21:09 solution-0001.pvtu
3294 -rw-r--r-- 1 wellsd2 user 364 Feb 13 21:09 solution-0001.visit
3295 @endverbatim
3296 
3297 
3298 Here are first the mesh on which we compute as well as the partitioning
3299 for the 16 processors:
3300 
3301 
3302 <div class="twocolumn" style="width: 80%">
3303  <div class="parent">
3304  <div class="img" align="center">
3305  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-000mesh.png"
3306  alt="Discretization"
3307  width="400">
3308  </div>
3309  <div class="text" align="center">
3310  Discretization
3311  </div>
3312  </div>
3313  <div class="parent">
3314  <div class="img" align="center">
3315  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.p.png"
3316  alt="Parallel partitioning"
3317  width="400">
3318  </div>
3319  <div class="text" align="center">
3320  Parallel partitioning
3321  </div>
3322  </div>
3323 </div>
3324 
3325 
3326 Finally, here is the same output as we have shown before for the much smaller
3327 sequential case:
3328 
3329 <div class="threecolumn" style="width: 80%">
3330  <div class="parent">
3331  <div class="img" align="center">
3332  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.s.png"
3333  alt="Time = 2"
3334  width="400">
3335  </div>
3336  <div class="text" align="center">
3337  Time = 2
3338  </div>
3339  </div>
3340  <div class="parent">
3341  <div class="img" align="center">
3342  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0005.s.png"
3343  alt="Time = 5"
3344  width="400">
3345  </div>
3346  <div class="text" align="center">
3347  Time = 5
3348  </div>
3349  </div>
3350  <div class="parent">
3351  <div class="img" align="center">
3352  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0007.s.png"
3353  alt="Time = 7"
3354  width="400">
3355  </div>
3356  <div class="text" align="center">
3357  Time = 7
3358  </div>
3359  </div>
3360 </div>
3361 
3362 
3363 <div class="threecolumn" style="width: 80%">
3364  <div class="parent">
3365  <div class="img" align="center">
3366  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0008.s.png"
3367  alt="Time = 8"
3368  width="400">
3369  </div>
3370  <div class="text" align="center">
3371  Time = 8
3372  </div>
3373  </div>
3374  <div class="parent">
3375  <div class="img" align="center">
3376  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0009.s.png"
3377  alt="Time = 9"
3378  width="400">
3379  </div>
3380  <div class="text" align="center">
3381  Time = 9
3382  </div>
3383  </div>
3384  <div class="parent">
3385  <div class="img" align="center">
3386  <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0010.s.png"
3387  alt="Time = 10"
3388  width="400">
3389  </div>
3390  <div class="text" align="center">
3391  Time = 10
3392  </div>
3393  </div>
3394 </div>
3395 
3396 
3397 As before, we observe that at high axial compression the cylinder begins
3398 to buckle, but this time ultimately collapses on itself. In contrast to our
3399 first run, towards the end of the simulation the deflection pattern becomes
3400 nonsymmetric (the central bulge deflects laterally). The model clearly does not
3401 provide for this (all our forces and boundary deflections are symmetric) but the
3402 effect is probably physically correct anyway: in reality, small inhomogeneities
3403 in the body's material properties would lead it to buckle to one side
3404 to evade the forcing; in numerical simulations, small perturbations
3405 such as numerical round-off or an inexact solution of a linear system
3406 by an iterative solver could have the same effect. Another typical source for
3407 asymmetries in adaptive computations is that only a certain fraction of cells
3408 is refined in each step, which may lead to asymmetric meshes even if the
3409 original coarse mesh was symmetric.
3410 
3411 
3412 If one compares this with the previous run, the results both qualitatively
3413 and quantitatively different. The previous computation was
3414 therefore certainly not converged, though we can't say for sure anything about
3415 the present one. One would need an even finer computation to find out. However,
3416 the point may be moot: looking at the last picture in detail, it is pretty
3417 obvious that not only is the linear small deformation model we chose completely
3418 inadequate, but for a realistic simulation we would also need to make sure that
3419 the body does not intersect itself during deformation (if we continued
3420 compressing the cylinder we would observe some self-intersection).
3421 Without such a formulation we cannot expect anything to make physical sense,
3422 even if it produces nice pictures!
3423 
3424 
3425 <a name="Possibledirectionsforextensions"></a><h3>Possible directions for extensions</h3>
3426 
3427 
3428 The program as is does not really solve an equation that has many applications
3429 in practice: quasi-static material deformation based on a purely elastic law
3430 is almost boring. However, the program may serve as the starting point for
3431 more interesting experiments, and that indeed was the initial motivation for
3432 writing it. Here are some suggestions of what the program is missing and in
3433 what direction it may be extended:
3434 
3435 <a name="Plasticitymodels"></a><h5>Plasticity models</h5>
3436 
3437 
3438  The most obvious extension is to use a more
3439 realistic material model for large-scale quasistatic deformation. The natural
3440 choice for this would be plasticity, in which a nonlinear relationship between
3441 stress and strain replaces equation <a href="#step_18.stress-strain">[stress-strain]</a>. Plasticity
3442 models are usually rather complicated to program since the stress-strain
3443 dependence is generally non-smooth. The material can be thought of being able
3444 to withstand only a maximal stress (the yield stress) after which it starts to
3445 &ldquo;flow&rdquo;. A mathematical description to this can be given in the form of a
3446 variational inequality, which alternatively can be treated as minimizing the
3447 elastic energy
3448 @f[
3449  E(\mathbf{u}) =
3450  (\varepsilon(\mathbf{u}), C\varepsilon(\mathbf{u}))_{\Omega}
3451  - (\mathbf{f}, \mathbf{u})_{\Omega} - (\mathbf{b}, \mathbf{u})_{\Gamma_N},
3452 @f]
3453 subject to the constraint
3454 @f[
3455  f(\sigma(\mathbf{u})) \le 0
3456 @f]
3457 on the stress. This extension makes the problem to be solved in each time step
3458 nonlinear, so we need another loop within each time step.
3459 
3460 Without going into further details of this model, we refer to the excellent
3461 book by Simo and Hughes on &ldquo;Computational Inelasticity&rdquo; for a
3462 comprehensive overview of computational strategies for solving plastic
3463 models. Alternatively, a brief but concise description of an algorithm for
3464 plasticity is given in an article by S. Commend, A. Truty, and Th. Zimmermann;
3465 @cite CTZ04.
3466 
3467 
3468 <a name="Stabilizationissues"></a><h5>Stabilization issues</h5>
3469 
3470 
3471 The formulation we have chosen, i.e. using
3472 piecewise (bi-, tri-)linear elements for all components of the displacement
3473 vector, and treating the stress as a variable dependent on the displacement is
3474 appropriate for most materials. However, this so-called displacement-based
3475 formulation becomes unstable and exhibits spurious modes for incompressible or
3476 nearly-incompressible materials. While fluids are usually not elastic (in most
3477 cases, the stress depends on velocity gradients, not displacement gradients,
3478 although there are exceptions such as electro-rheologic fluids), there are a
3479 few solids that are nearly incompressible, for example rubber. Another case is
3480 that many plasticity models ultimately let the material become incompressible,
3481 although this is outside the scope of the present program.
3482 
3483 Incompressibility is characterized by Poisson's ratio
3484 @f[
3485  \nu = \frac{\lambda}{2(\lambda+\mu)},
3486 @f]
3487 where @f$\lambda,\mu@f$ are the Lam&eacute; constants of the material.
3488 Physical constraints indicate that @f$-1\le \nu\le \frac 12@f$ (the condition
3489 also follows from mathematical stability considerations). If @f$\nu@f$
3490 approaches @f$\frac 12@f$, then the material becomes incompressible. In that
3491 case, pure displacement-based formulations are no longer appropriate for the
3492 solution of such problems, and stabilization techniques have to be employed
3493 for a stable and accurate solution. The book and paper cited above give
3494 indications as to how to do this, but there is also a large volume of
3495 literature on this subject; a good start to get an overview of the topic can
3496 be found in the references of the paper by H.-Y. Duan and Q. Lin; @cite DL05.
3497 
3498 
3499 <a name="Refinementduringtimesteps"></a><h5>Refinement during timesteps</h5>
3500 
3501 
3502 In the present form, the program
3503 only refines the initial mesh a number of times, but then never again. For any
3504 kind of realistic simulation, one would want to extend this so that the mesh
3505 is refined and coarsened every few time steps instead. This is not hard to do,
3506 in fact, but has been left for future tutorial programs or as an exercise, if
3507 you wish.
3508 
3509 The main complication one has to overcome is that one has to
3510 transfer the data that is stored in the quadrature points of the cells of the
3511 old mesh to the new mesh, preferably by some sort of projection scheme. The
3512 general approach to this would go like this:
3513 
3514 - At the beginning, the data is only available in the quadrature points of
3515  individual cells, not as a finite element field that is defined everywhere.
3516 
3517 - So let us find a finite element field that <i>is</i> defined everywhere so
3518  that we can later interpolate it to the quadrature points of the new
3519  mesh. In general, it will be difficult to find a continuous finite element
3520  field that matches the values in the quadrature points exactly because the
3521  number of degrees of freedom of these fields does not match the number of
3522  quadrature points there are, and the nodal values of this global field will
3523  either be over- or underdetermined. But it is usually not very difficult to
3524  find a discontinuous field that matches the values in the quadrature points;
3525  for example, if you have a QGauss(2) quadrature formula (i.e. 4 points per
3526  cell in 2d, 8 points in 3d), then one would use a finite element of kind
3527  FE_DGQ(1), i.e. bi-/tri-linear functions as these have 4 degrees of freedom
3528  per cell in 2d and 8 in 3d.
3529 
3530 - There are functions that can make this conversion from individual points to
3531  a global field simpler. The following piece of pseudo-code should help if
3532  you use a QGauss(2) quadrature formula. Note that the multiplication by the
3533  projection matrix below takes a vector of scalar components, i.e., we can only
3534  convert one set of scalars at a time from the quadrature points to the degrees
3535  of freedom and vice versa. So we need to store each component of stress separately,
3536  which requires <code>dim*dim</code> vectors. We'll store this set of vectors in a 2D array to
3537  make it easier to read off components in the same way you would the stress tensor.
3538  Thus, we'll loop over the components of stress on each cell and store
3539  these values in the global history field. (The prefix <code>history_</code>
3540  indicates that we work with quantities related to the history variables defined
3541  in the quadrature points.)
3542  @code
3543  FE_DGQ<dim> history_fe (1);
3544  DoFHandler<dim> history_dof_handler (triangulation);
3545  history_dof_handler.distribute_dofs (history_fe);
3546 
3547  std::vector< std::vector< Vector<double> > >
3548  history_field (dim, std::vector< Vector<double> >(dim)),
3549  local_history_values_at_qpoints (dim, std::vector< Vector<double> >(dim)),
3550  local_history_fe_values (dim, std::vector< Vector<double> >(dim));
3551 
3552  for (unsigned int i=0; i<dim; i++)
3553  for (unsigned int j=0; j<dim; j++)
3554  {
3555  history_field[i][j].reinit(history_dof_handler.n_dofs());
3556  local_history_values_at_qpoints[i][j].reinit(quadrature.size());
3557  local_history_fe_values[i][j].reinit(history_fe.dofs_per_cell);
3558  }
3559 
3560  FullMatrix<double> qpoint_to_dof_matrix (history_fe.dofs_per_cell,
3561  quadrature.size());
3563  (history_fe,
3564  quadrature, quadrature,
3565  qpoint_to_dof_matrix);
3566 
3567  typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
3568  endc = dof_handler.end(),
3569  dg_cell = history_dof_handler.begin_active();
3570 
3571  for (; cell!=endc; ++cell, ++dg_cell)
3572  {
3573 
3574  PointHistory<dim> *local_quadrature_points_history
3575  = reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
3576 
3577  Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3578  ExcInternalError());
3579  Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3580  ExcInternalError());
3581 
3582  for (unsigned int i=0; i<dim; i++)
3583  for (unsigned int j=0; j<dim; j++)
3584  {
3585  for (unsigned int q=0; q<quadrature.size(); ++q)
3586  local_history_values_at_qpoints[i][j](q)
3587  = local_quadrature_points_history[q].old_stress[i][j];
3588 
3589  qpoint_to_dof_matrix.vmult (local_history_fe_values[i][j],
3590  local_history_values_at_qpoints[i][j]);
3591 
3592  dg_cell->set_dof_values (local_history_fe_values[i][j],
3593  history_field[i][j]);
3594  }
3595  }
3596  @endcode
3597 
3598 - Now that we have a global field, we can refine the mesh and transfer the
3599  history_field vector as usual using the SolutionTransfer class. This will
3600  interpolate everything from the old to the new mesh.
3601 
3602 - In a final step, we have to get the data back from the now interpolated
3603  global field to the quadrature points on the new mesh. The following code
3604  will do that:
3605  @code
3606  FullMatrix<double> dof_to_qpoint_matrix (quadrature.size(),
3607  history_fe.dofs_per_cell);
3609  (history_fe,
3610  quadrature,
3611  dof_to_qpoint_matrix);
3612 
3613  typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
3614  endc = dof_handler.end(),
3615  dg_cell = history_dof_handler.begin_active();
3616 
3617  for (; cell != endc; ++cell, ++dg_cell)
3618  {
3619  PointHistory<dim> *local_quadrature_points_history
3620  = reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
3621 
3622  Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3623  ExcInternalError());
3624  Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3625  ExcInternalError());
3626 
3627  for (unsigned int i=0; i<dim; i++)
3628  for (unsigned int j=0; j<dim; j++)
3629  {
3630  dg_cell->get_dof_values (history_field[i][j],
3631  local_history_fe_values[i][j]);
3632 
3633  dof_to_qpoint_matrix.vmult (local_history_values_at_qpoints[i][j],
3634  local_history_fe_values[i][j]);
3635 
3636  for (unsigned int q=0; q<quadrature.size(); ++q)
3637  local_quadrature_points_history[q].old_stress[i][j]
3638  = local_history_values_at_qpoints[i][j](q);
3639  }
3640  @endcode
3641 
3642 It becomes a bit more complicated once we run the program in parallel, since
3643 then each process only stores this data for the cells it owned on the old
3644 mesh. That said, using a parallel vector for <code>history_field</code> will
3645 do the trick if you put a call to <code>compress</code> after the transfer
3646 from quadrature points into the global vector.
3647 
3648 
3649 <a name="Ensuringmeshregularity"></a><h5>Ensuring mesh regularity</h5>
3650 
3651 
3652 At present, the program makes no attempt
3653 to make sure that a cell, after moving its vertices at the end of the time
3654 step, still has a valid geometry (i.e. that its Jacobian determinant is
3655 positive and bounded away from zero everywhere). It is, in fact, not very hard
3656 to set boundary values and forcing terms in such a way that one gets distorted
3657 and inverted cells rather quickly. Certainly, in some cases of large
3658 deformation, this is unavoidable with a mesh of finite mesh size, but in some
3659 other cases this should be preventable by appropriate mesh refinement and/or a
3660 reduction of the time step size. The program does not do that, but a more
3661 sophisticated version definitely should employ some sort of heuristic defining
3662 what amount of deformation of cells is acceptable, and what isn't.
3663  *
3664  *
3665 <a name="PlainProg"></a>
3666 <h1> The plain program</h1>
3667 @include "step-18.cc"
3668 */
Function::vector_value_list
virtual void vector_value_list(const std::vector< Point< dim >> &points, std::vector< Vector< RangeNumberType >> &values) const
DoFTools::nonzero
@ nonzero
Definition: dof_tools.h:244
PETScWrappers::SolverCG
Definition: petsc_solver.h:383
FE_DGQ
Definition: fe_dgq.h:112
LinearAlgebraDealII::Vector
Vector< double > Vector
Definition: generic_linear_algebra.h:43
update_quadrature_points
@ update_quadrature_points
Transformed quadrature points.
Definition: fe_update_flags.h:122
FETools::compute_interpolation_to_quadrature_points_matrix
void compute_interpolation_to_quadrature_points_matrix(const FiniteElement< dim, spacedim > &fe, const Quadrature< dim > &quadrature, FullMatrix< double > &I_q)
FE_Q
Definition: fe_q.h:554
GridTools::volume
double volume(const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping=(StaticMappingQ1< dim, spacedim >::mapping))
Definition: grid_tools.cc:133
SymmetricTensor< 4, dim >
dealii
Definition: namespace_dealii.h:25
PETScWrappers::PreconditionBlockJacobi
Definition: petsc_precondition.h:223
DataOutBase::vtu
@ vtu
Definition: data_out_base.h:1610
MatrixTools::apply_boundary_values
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Definition: matrix_tools.cc:81
Physics::Elasticity::Kinematics::C
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
IteratorState::valid
@ valid
Iterator points to a valid object.
Definition: tria_iterator_base.h:38
VectorOperation::add
@ add
Definition: vector_operation.h:53
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorTools::project
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >(0)), const bool project_to_boundary_first=false)
LinearAlgebra::CUDAWrappers::kernel::reduction
__global__ void reduction(Number *result, const Number *v, const size_type N)
DoFTools::always
@ always
Definition: dof_tools.h:236
LocalIntegrators::Advection::cell_matrix
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
Definition: advection.h:80
update_values
@ update_values
Shape function values.
Definition: fe_update_flags.h:78
second
Point< 2 > second
Definition: grid_out.cc:4353
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
DoFHandler
Definition: dof_handler.h:205
SparsityTools::partition
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
Definition: sparsity_tools.cc:400
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
SolverBase
Definition: solver.h:333
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
FEValues< dim >
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
Utilities::CUDA::free
void free(T *&pointer)
Definition: cuda.h:96
TensorAccessors::extract
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
Definition: tensor_accessors.h:226
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
GridGenerator::cylinder
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
LAPACKSupport::symmetric
@ symmetric
Matrix is symmetric.
Definition: lapack_support.h:115
Algorithms::Events::initial
const Event initial
Definition: event.cc:65
GridGenerator::cylinder_shell
void cylinder_shell(Triangulation< dim > &tria, const double length, const double inner_radius, const double outer_radius, const unsigned int n_radial_cells=0, const unsigned int n_axial_cells=0)
LocalIntegrators::Divergence::norm
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:548
SolutionTransfer
Definition: solution_transfer.h:340
GridTools::scale
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:837
update_gradients
@ update_gradients
Shape function gradients.
Definition: fe_update_flags.h:84
Physics::Elasticity::Kinematics::l
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
DynamicSparsityPattern
Definition: dynamic_sparsity_pattern.h:323
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
LAPACKSupport::general
@ general
No special properties.
Definition: lapack_support.h:113
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
Vector::reinit
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
PETScWrappers::MPI::Vector
Definition: petsc_vector.h:158
QGauss
Definition: quadrature_lib.h:40
LAPACKSupport::A
static const char A
Definition: lapack_support.h:155
parallel::Triangulation
TriangulationBase< dim, spacedim > Triangulation
Definition: tria_base.h:302
DataOut_DoFData::attach_dof_handler
void attach_dof_handler(const DoFHandlerType &)
value
static const bool value
Definition: dof_tools_constraints.cc:433
vertices
Point< 3 > vertices[4]
Definition: data_out_base.cc:174
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
update_JxW_values
@ update_JxW_values
Transformed quadrature weights.
Definition: fe_update_flags.h:129
FETools::compute_projection_from_quadrature_points_matrix
void compute_projection_from_quadrature_points_matrix(const FiniteElement< dim, spacedim > &fe, const Quadrature< dim > &lhs_quadrature, const Quadrature< dim > &rhs_quadrature, FullMatrix< double > &X)
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
SymmetricTensor::symmetrize
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:3547
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
SymmetricTensor::determinant
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:2645
internal::assemble
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition: loop.h:71
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
LAPACKSupport::zero
static const types::blas_int zero
Definition: lapack_support.h:179
Function::vector_value
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
DerivativeApproximation::internal::approximate
void approximate(SynchronousIterators< std::tuple< TriaActiveIterator< ::DoFCellAccessor< DoFHandlerType< dim, spacedim >, false >>, Vector< float >::iterator >> const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandlerType< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
Definition: derivative_approximation.cc:924
StandardExceptions::ExcDimensionMismatch
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
Point< dim >
Functions::ZeroFunction
Definition: function.h:513
FullMatrix< double >
FETools::interpolate
void interpolate(const DoFHandlerType1< dim, spacedim > &dof1, const InVector &u1, const DoFHandlerType2< dim, spacedim > &dof2, OutVector &u2)
Function
Definition: function.h:151
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
SolverControl
Definition: solver_control.h:67
MeshWorker::loop
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:443
first
Point< 2 > first
Definition: grid_out.cc:4352
DataOut
Definition: data_out.h:148
DoFHandler::begin_active
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: dof_handler.cc:935
Vector< double >
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
Utilities::compress
std::string compress(const std::string &input)
Definition: utilities.cc:392
parallel
Definition: distributed.h:416
internal::VectorOperations::copy
void copy(const T *begin, const T *end, U *dest)
Definition: vector_operations_internal.h:67
Utilities::MPI::MPI_InitFinalize
Definition: mpi.h:828