deal.II version GIT relicensing-2173-gae8fc9d14b 2024-11-24 06:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-18.h
Go to the documentation of this file.
1,
1435 *   Vector<double> &values) const
1436 *   {
1437 *   AssertDimension(values.size(), dim);
1438 *  
1439 *   const double g = 9.81;
1440 *   const double rho = 7700;
1441 *  
1442 *   values = 0;
1443 *   values(dim - 1) = -rho * g;
1444 *   }
1445 *  
1446 *  
1447 *  
1448 *   template <int dim>
1449 *   void BodyForce<dim>::vector_value_list(
1450 *   const std::vector<Point<dim>> &points,
1451 *   std::vector<Vector<double>> &value_list) const
1452 *   {
1453 *   const unsigned int n_points = points.size();
1454 *  
1455 *   AssertDimension(value_list.size(), n_points);
1456 *  
1457 *   for (unsigned int p = 0; p < n_points; ++p)
1458 *   BodyForce<dim>::vector_value(points[p], value_list[p]);
1459 *   }
1460 *  
1461 *  
1462 *  
1463 * @endcode
1464 *
1465 *
1466 * <a name="step_18-ThecodeIncrementalBoundaryValuecodeclass"></a>
1467 * <h3>The <code>IncrementalBoundaryValue</code> class</h3>
1468 *
1469
1470 *
1471 * In addition to body forces, movement can be induced by boundary forces
1472 * and forced boundary displacement. The latter case is equivalent to forces
1473 * being chosen in such a way that they induce certain displacement.
1474 *
1475
1476 *
1477 * For quasistatic displacement, typical boundary forces would be pressure
1478 * on a body, or tangential friction against another body. We chose a
1479 * somewhat simpler case here: we prescribe a certain movement of (parts of)
1480 * the boundary, or at least of certain components of the displacement
1481 * vector. We describe this by another vector-valued function that, for a
1482 * given point on the boundary, returns the prescribed displacement.
1483 *
1484
1485 *
1486 * Since we have a time-dependent problem, the displacement increment of the
1487 * boundary equals the displacement accumulated during the length of the
1488 * timestep. The class therefore has to know both the present time and the
1489 * length of the present time step, and can then approximate the incremental
1490 * displacement as the present velocity times the present timestep.
1491 *
1492
1493 *
1494 * For the purposes of this program, we choose a simple form of boundary
1495 * displacement: we displace the top boundary with constant velocity
1496 * downwards. The rest of the boundary is either going to be fixed (and is
1497 * then described using an object of type
1498 * <code>Functions::ZeroFunction</code>) or free (Neumann-type, in which case
1499 * nothing special has to be done). The implementation of the class
1500 * describing the constant downward motion should then be obvious using the
1501 * knowledge we gained through all the previous example programs:
1502 *
1503 * @code
1504 *   template <int dim>
1505 *   class IncrementalBoundaryValues : public Function<dim>
1506 *   {
1507 *   public:
1508 *   IncrementalBoundaryValues(const double present_time,
1509 *   const double present_timestep);
1510 *  
1511 *   virtual void vector_value(const Point<dim> &p,
1512 *   Vector<double> &values) const override;
1513 *  
1514 *   virtual void
1515 *   vector_value_list(const std::vector<Point<dim>> &points,
1516 *   std::vector<Vector<double>> &value_list) const override;
1517 *  
1518 *   private:
1519 *   const double velocity;
1520 *   const double present_time;
1521 *   const double present_timestep;
1522 *   };
1523 *  
1524 *  
1525 *   template <int dim>
1526 *   IncrementalBoundaryValues<dim>::IncrementalBoundaryValues(
1527 *   const double present_time,
1528 *   const double present_timestep)
1529 *   : Function<dim>(dim)
1530 *   , velocity(.08)
1531 *   , present_time(present_time)
1532 *   , present_timestep(present_timestep)
1533 *   {}
1534 *  
1535 *  
1536 *   template <int dim>
1537 *   void
1538 *   IncrementalBoundaryValues<dim>::vector_value(const Point<dim> & /*p*/,
1539 *   Vector<double> &values) const
1540 *   {
1541 *   AssertDimension(values.size(), dim);
1542 *  
1543 *   values = 0;
1544 *   values(2) = -present_timestep * velocity;
1545 *   }
1546 *  
1547 *  
1548 *  
1549 *   template <int dim>
1550 *   void IncrementalBoundaryValues<dim>::vector_value_list(
1551 *   const std::vector<Point<dim>> &points,
1552 *   std::vector<Vector<double>> &value_list) const
1553 *   {
1554 *   const unsigned int n_points = points.size();
1555 *  
1556 *   AssertDimension(value_list.size(), n_points);
1557 *  
1558 *   for (unsigned int p = 0; p < n_points; ++p)
1559 *   IncrementalBoundaryValues<dim>::vector_value(points[p], value_list[p]);
1560 *   }
1561 *  
1562 *  
1563 *  
1564 * @endcode
1565 *
1566 *
1567 * <a name="step_18-ImplementationofthecodeTopLevelcodeclass"></a>
1568 * <h3>Implementation of the <code>TopLevel</code> class</h3>
1569 *
1570
1571 *
1572 * Now for the implementation of the main class. First, we initialize the
1573 * stress-strain tensor, which we have declared as a static const
1574 * variable. We chose Lam&eacute; constants that are appropriate for steel:
1575 *
1576 * @code
1577 *   template <int dim>
1578 *   const SymmetricTensor<4, dim> TopLevel<dim>::stress_strain_tensor =
1579 *   get_stress_strain_tensor<dim>(/*lambda = */ 9.695e10,
1580 *   /*mu = */ 7.617e10);
1581 *  
1582 *  
1583 *  
1584 * @endcode
1585 *
1586 *
1587 * <a name="step_18-Thepublicinterface"></a>
1588 * <h4>The public interface</h4>
1589 *
1590
1591 *
1592 * The next step is the definition of constructors and destructors. There
1593 * are no surprises here: we choose linear and continuous finite elements
1594 * for each of the <code>dim</code> vector components of the solution, and a
1595 * Gaussian quadrature formula with 2 points in each coordinate
1596 * direction. The destructor should be obvious:
1597 *
1598 * @code
1599 *   template <int dim>
1600 *   TopLevel<dim>::TopLevel()
1601 *   : triangulation(MPI_COMM_WORLD)
1602 *   , fe(FE_Q<dim>(1) ^ dim)
1603 *   , dof_handler(triangulation)
1604 *   , quadrature_formula(fe.degree + 1)
1605 *   , present_time(0.0)
1606 *   , present_timestep(1.0)
1607 *   , end_time(10.0)
1608 *   , timestep_no(0)
1609 *   , mpi_communicator(MPI_COMM_WORLD)
1610 *   , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
1611 *   , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
1612 *   , pcout(std::cout, this_mpi_process == 0)
1613 *   {}
1614 *  
1615 *  
1616 *  
1617 *   template <int dim>
1618 *   TopLevel<dim>::~TopLevel()
1619 *   {
1620 *   dof_handler.clear();
1621 *   }
1622 *  
1623 *  
1624 *  
1625 * @endcode
1626 *
1627 * The last of the public functions is the one that directs all the work,
1628 * <code>run()</code>. It initializes the variables that describe where in
1629 * time we presently are, then runs the first time step, then loops over all
1630 * the other time steps. Note that for simplicity we use a fixed time step,
1631 * whereas a more sophisticated program would of course have to choose it in
1632 * some more reasonable way adaptively:
1633 *
1634 * @code
1635 *   template <int dim>
1636 *   void TopLevel<dim>::run()
1637 *   {
1638 *   do_initial_timestep();
1639 *  
1640 *   while (present_time < end_time)
1641 *   do_timestep();
1642 *   }
1643 *  
1644 *  
1645 * @endcode
1646 *
1647 *
1648 * <a name="step_18-TopLevelcreate_coarse_grid"></a>
1649 * <h4>TopLevel::create_coarse_grid</h4>
1650 *
1651
1652 *
1653 * The next function in the order in which they were declared above is the
1654 * one that creates the coarse grid from which we start. For this example
1655 * program, we want to compute the deformation of a cylinder under axial
1656 * compression. The first step therefore is to generate a mesh for a
1657 * cylinder of length 3 and with inner and outer radii of 0.8 and 1,
1658 * respectively. Fortunately, there is a library function for such a mesh.
1659 *
1660
1661 *
1662 * In a second step, we have to associated boundary conditions with the
1663 * upper and lower faces of the cylinder. We choose a boundary indicator of
1664 * 0 for the boundary faces that are characterized by their midpoints having
1665 * z-coordinates of either 0 (bottom face), an indicator of 1 for z=3 (top
1666 * face); finally, we use boundary indicator 2 for all faces on the inside
1667 * of the cylinder shell, and 3 for the outside.
1668 *
1669 * @code
1670 *   template <int dim>
1671 *   void TopLevel<dim>::create_coarse_grid()
1672 *   {
1673 *   const double inner_radius = 0.8, outer_radius = 1;
1674 *   GridGenerator::cylinder_shell(triangulation, 3, inner_radius, outer_radius);
1675 *   for (const auto &cell : triangulation.active_cell_iterators())
1676 *   for (const auto &face : cell->face_iterators())
1677 *   if (face->at_boundary())
1678 *   {
1679 *   const Point<dim> face_center = face->center();
1680 *  
1681 *   if (face_center[2] == 0)
1682 *   face->set_boundary_id(0);
1683 *   else if (face_center[2] == 3)
1684 *   face->set_boundary_id(1);
1685 *   else if (std::sqrt(face_center[0] * face_center[0] +
1686 *   face_center[1] * face_center[1]) <
1687 *   (inner_radius + outer_radius) / 2)
1688 *   face->set_boundary_id(2);
1689 *   else
1690 *   face->set_boundary_id(3);
1691 *   }
1692 *  
1693 * @endcode
1694 *
1695 * Once all this is done, we can refine the mesh once globally:
1696 *
1697 * @code
1698 *   triangulation.refine_global(1);
1699 *  
1700 * @endcode
1701 *
1702 * As the final step, we need to set up a clean state of the data that we
1703 * store in the quadrature points on all cells that are treated on the
1704 * present processor.
1705 *
1706 * @code
1707 *   setup_quadrature_point_history();
1708 *   }
1709 *  
1710 *  
1711 *  
1712 * @endcode
1713 *
1714 *
1715 * <a name="step_18-TopLevelsetup_system"></a>
1716 * <h4>TopLevel::setup_system</h4>
1717 *
1718
1719 *
1720 * The next function is the one that sets up the data structures for a given
1721 * mesh. This is done in most the same way as in @ref step_17 "step-17": distribute the
1722 * degrees of freedom, then sort these degrees of freedom in such a way that
1723 * each processor gets a contiguous chunk of them. Note that subdivisions into
1724 * chunks for each processor is handled in the functions that create or
1725 * refine grids, unlike in the previous example program (the point where
1726 * this happens is mostly a matter of taste; here, we chose to do it when
1727 * grids are created since in the <code>do_initial_timestep</code> and
1728 * <code>do_timestep</code> functions we want to output the number of cells
1729 * on each processor at a point where we haven't called the present function
1730 * yet).
1731 *
1732 * @code
1733 *   template <int dim>
1734 *   void TopLevel<dim>::setup_system()
1735 *   {
1736 *   dof_handler.distribute_dofs(fe);
1737 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
1738 *   locally_relevant_dofs =
1739 *   DoFTools::extract_locally_relevant_dofs(dof_handler);
1740 *  
1741 * @endcode
1742 *
1743 * The next step is to set up constraints due to hanging nodes. This has
1744 * been handled many times before:
1745 *
1746 * @code
1747 *   hanging_node_constraints.clear();
1748 *   DoFTools::make_hanging_node_constraints(dof_handler,
1749 *   hanging_node_constraints);
1750 *   hanging_node_constraints.close();
1751 *  
1752 * @endcode
1753 *
1754 * And then we have to set up the matrix. Here we deviate from @ref step_17 "step-17", in
1755 * which we simply used PETSc's ability to just know about the size of the
1756 * matrix and later allocate those nonzero elements that are being written
1757 * to. While this works just fine from a correctness viewpoint, it is not
1758 * at all efficient: if we don't give PETSc a clue as to which elements
1759 * are written to, it is (at least at the time of this writing) unbearably
1760 * slow when we set the elements in the matrix for the first time (i.e. in
1761 * the first timestep). Later on, when the elements have been allocated,
1762 * everything is much faster. In experiments we made, the first timestep
1763 * can be accelerated by almost two orders of magnitude if we instruct
1764 * PETSc which elements will be used and which are not.
1765 *
1766
1767 *
1768 * To do so, we first generate the sparsity pattern of the matrix we are
1769 * going to work with, and make sure that the condensation of hanging node
1770 * constraints add the necessary additional entries in the sparsity
1771 * pattern:
1772 *
1773 * @code
1774 *   DynamicSparsityPattern sparsity_pattern(locally_relevant_dofs);
1775 *   DoFTools::make_sparsity_pattern(dof_handler,
1776 *   sparsity_pattern,
1777 *   hanging_node_constraints,
1778 *   /*keep constrained dofs*/ false);
1779 *   SparsityTools::distribute_sparsity_pattern(sparsity_pattern,
1780 *   locally_owned_dofs,
1781 *   mpi_communicator,
1782 *   locally_relevant_dofs);
1783 * @endcode
1784 *
1785 * Note that we have used the <code>DynamicSparsityPattern</code> class
1786 * here that was already introduced in @ref step_11 "step-11", rather than the
1787 * <code>SparsityPattern</code> class that we have used in all other
1788 * cases. The reason for this is that for the latter class to work we have
1789 * to give an initial upper bound for the number of entries in each row, a
1790 * task that is traditionally done by
1791 * <code>DoFHandler::max_couplings_between_dofs()</code>. However, this
1792 * function suffers from a serious problem: it has to compute an upper
1793 * bound to the number of nonzero entries in each row, and this is a
1794 * rather complicated task, in particular in 3d. In effect, while it is
1795 * quite accurate in 2d, it often comes up with much too large a number in
1796 * 3d, and in that case the <code>SparsityPattern</code> allocates much
1797 * too much memory at first, often several 100 MBs. This is later
1798 * corrected when <code>DoFTools::make_sparsity_pattern</code> is called
1799 * and we realize that we don't need all that much memory, but at time it
1800 * is already too late: for large problems, the temporary allocation of
1801 * too much memory can lead to out-of-memory situations.
1802 *
1803
1804 *
1805 * In order to avoid this, we resort to the
1806 * <code>DynamicSparsityPattern</code> class that is slower but does
1807 * not require any up-front estimate on the number of nonzero entries per
1808 * row. It therefore only ever allocates as much memory as it needs at any
1809 * given time, and we can build it even for large 3d problems.
1810 *
1811
1812 *
1813 * It is also worth noting that due to the specifics of
1814 * parallel::shared::Triangulation, the sparsity pattern we construct is
1815 * global, i.e. comprises all degrees of freedom whether they will be
1816 * owned by the processor we are on or another one (in case this program
1817 * is run in %parallel via MPI). This of course is not optimal -- it
1818 * limits the size of the problems we can solve, since storing the entire
1819 * sparsity pattern (even if only for a short time) on each processor does
1820 * not scale well. However, there are several more places in the program
1821 * in which we do this, for example we always keep the global
1822 * triangulation and DoF handler objects around, even if we only work on
1823 * part of them. At present, deal.II does not have the necessary
1824 * facilities to completely distribute these objects (a task that, indeed,
1825 * is very hard to achieve with adaptive meshes, since well-balanced
1826 * subdivisions of a domain tend to become unbalanced as the mesh is
1827 * adaptively refined).
1828 *
1829
1830 *
1831 * With this data structure, we can then go to the PETSc sparse matrix and
1832 * tell it to preallocate all the entries we will later want to write to:
1833 *
1834 * @code
1835 *   system_matrix.reinit(locally_owned_dofs,
1836 *   locally_owned_dofs,
1837 *   sparsity_pattern,
1838 *   mpi_communicator);
1839 * @endcode
1840 *
1841 * After this point, no further explicit knowledge of the sparsity pattern
1842 * is required any more and we can let the <code>sparsity_pattern</code>
1843 * variable go out of scope without any problem.
1844 *
1845
1846 *
1847 * The last task in this function is then only to reset the right hand
1848 * side vector as well as the solution vector to its correct size;
1849 * remember that the solution vector is a local one, unlike the right hand
1850 * side that is a distributed %parallel one and therefore needs to know
1851 * the MPI communicator over which it is supposed to transmit messages:
1852 *
1853 * @code
1854 *   system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1855 *   incremental_displacement.reinit(dof_handler.n_dofs());
1856 *   }
1857 *  
1858 *  
1859 *  
1860 * @endcode
1861 *
1862 *
1863 * <a name="step_18-TopLevelassemble_system"></a>
1864 * <h4>TopLevel::assemble_system</h4>
1865 *
1866
1867 *
1868 * Again, assembling the system matrix and right hand side follows the same
1869 * structure as in many example programs before. In particular, it is mostly
1870 * equivalent to @ref step_17 "step-17", except for the different right hand side that now
1871 * only has to take into account internal stresses. In addition, assembling
1872 * the matrix is made significantly more transparent by using the
1873 * <code>SymmetricTensor</code> class: note the elegance of forming the
1874 * scalar products of symmetric tensors of rank 2 and 4. The implementation
1875 * is also more general since it is independent of the fact that we may or
1876 * may not be using an isotropic elasticity tensor.
1877 *
1878
1879 *
1880 * The first part of the assembly routine is as always:
1881 *
1882 * @code
1883 *   template <int dim>
1884 *   void TopLevel<dim>::assemble_system()
1885 *   {
1886 *   system_rhs = 0;
1887 *   system_matrix = 0;
1888 *  
1889 *   FEValues<dim> fe_values(fe,
1890 *   quadrature_formula,
1893 *  
1894 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1895 *   const unsigned int n_q_points = quadrature_formula.size();
1896 *  
1897 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1898 *   Vector<double> cell_rhs(dofs_per_cell);
1899 *  
1900 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1901 *  
1902 *   BodyForce<dim> body_force;
1903 *   std::vector<Vector<double>> body_force_values(n_q_points,
1904 *   Vector<double>(dim));
1905 *  
1906 * @endcode
1907 *
1908 * As in @ref step_17 "step-17", we only need to loop over all cells that belong to the
1909 * present processor:
1910 *
1911 * @code
1912 *   for (const auto &cell : dof_handler.active_cell_iterators())
1913 *   if (cell->is_locally_owned())
1914 *   {
1915 *   cell_matrix = 0;
1916 *   cell_rhs = 0;
1917 *  
1918 *   fe_values.reinit(cell);
1919 *  
1920 * @endcode
1921 *
1922 * Then loop over all indices i,j and quadrature points and assemble
1923 * the system matrix contributions from this cell. Note how we
1924 * extract the symmetric gradients (strains) of the shape functions
1925 * at a given quadrature point from the <code>FEValues</code>
1926 * object, and the elegance with which we form the triple
1927 * contraction <code>eps_phi_i : C : eps_phi_j</code>; the latter
1928 * needs to be compared to the clumsy computations needed in
1929 * @ref step_17 "step-17", both in the introduction as well as in the respective
1930 * place in the program:
1931 *
1932 * @code
1933 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1934 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1935 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1936 *   {
1937 *   const SymmetricTensor<2, dim>
1938 *   eps_phi_i = get_strain(fe_values, i, q_point),
1939 *   eps_phi_j = get_strain(fe_values, j, q_point);
1940 *  
1941 *   cell_matrix(i, j) += (eps_phi_i *
1942 *   stress_strain_tensor *
1943 *   eps_phi_j
1944 *   ) *
1945 *   fe_values.JxW(q_point);
1946 *   }
1947 *  
1948 *  
1949 * @endcode
1950 *
1951 * Then also assemble the local right hand side contributions. For
1952 * this, we need to access the prior stress value in this quadrature
1953 * point. To get it, we use the user pointer of this cell that
1954 * points into the global array to the quadrature point data
1955 * corresponding to the first quadrature point of the present cell,
1956 * and then add an offset corresponding to the index of the
1957 * quadrature point we presently consider:
1958 *
1959 * @code
1960 *   const PointHistory<dim> *local_quadrature_points_data =
1961 *   reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
1962 * @endcode
1963 *
1964 * In addition, we need the values of the external body forces at
1965 * the quadrature points on this cell:
1966 *
1967 * @code
1968 *   body_force.vector_value_list(fe_values.get_quadrature_points(),
1969 *   body_force_values);
1970 * @endcode
1971 *
1972 * Then we can loop over all degrees of freedom on this cell and
1973 * compute local contributions to the right hand side:
1974 *
1975 * @code
1976 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1977 *   {
1978 *   const unsigned int component_i =
1979 *   fe.system_to_component_index(i).first;
1980 *  
1981 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1982 *   {
1983 *   const SymmetricTensor<2, dim> &old_stress =
1984 *   local_quadrature_points_data[q_point].old_stress;
1985 *  
1986 *   cell_rhs(i) +=
1987 *   (body_force_values[q_point](component_i) *
1988 *   fe_values.shape_value(i, q_point) -
1989 *   old_stress * get_strain(fe_values, i, q_point)) *
1990 *   fe_values.JxW(q_point);
1991 *   }
1992 *   }
1993 *  
1994 * @endcode
1995 *
1996 * Now that we have the local contributions to the linear system, we
1997 * need to transfer it into the global objects. This is done exactly
1998 * as in @ref step_17 "step-17":
1999 *
2000 * @code
2001 *   cell->get_dof_indices(local_dof_indices);
2002 *  
2003 *   hanging_node_constraints.distribute_local_to_global(cell_matrix,
2004 *   cell_rhs,
2005 *   local_dof_indices,
2006 *   system_matrix,
2007 *   system_rhs);
2008 *   }
2009 *  
2010 * @endcode
2011 *
2012 * Now compress the vector and the system matrix:
2013 *
2014 * @code
2015 *   system_matrix.compress(VectorOperation::add);
2016 *   system_rhs.compress(VectorOperation::add);
2017 *  
2018 *  
2019 * @endcode
2020 *
2021 * The last step is to again fix up boundary values, just as we already
2022 * did in previous programs. A slight complication is that the
2023 * <code>apply_boundary_values</code> function wants to have a solution
2024 * vector compatible with the matrix and right hand side (i.e. here a
2025 * distributed %parallel vector, rather than the sequential vector we use
2026 * in this program) in order to preset the entries of the solution vector
2027 * with the correct boundary values. We provide such a compatible vector
2028 * in the form of a temporary vector which we then copy into the
2029 * sequential one.
2030 *
2031
2032 *
2033 * We make up for this complication by showing how boundary values can be
2034 * used flexibly: following the way we create the triangulation, there are
2035 * three distinct boundary indicators used to describe the domain,
2036 * corresponding to the bottom and top faces, as well as the inner/outer
2037 * surfaces. We would like to impose boundary conditions of the following
2038 * type: The inner and outer cylinder surfaces are free of external
2039 * forces, a fact that corresponds to natural (Neumann-type) boundary
2040 * conditions for which we don't have to do anything. At the bottom, we
2041 * want no movement at all, corresponding to the cylinder being clamped or
2042 * cemented in at this part of the boundary. At the top, however, we want
2043 * a prescribed vertical downward motion compressing the cylinder; in
2044 * addition, we only want to restrict the vertical movement, but not the
2045 * horizontal ones -- one can think of this situation as a well-greased
2046 * plate sitting on top of the cylinder pushing it downwards: the atoms of
2047 * the cylinder are forced to move downward, but they are free to slide
2048 * horizontally along the plate.
2049 *
2050
2051 *
2052 * The way to describe this is as follows: for boundary indicator zero
2053 * (bottom face) we use a dim-dimensional zero function representing no
2054 * motion in any coordinate direction. For the boundary with indicator 1
2055 * (top surface), we use the <code>IncrementalBoundaryValues</code> class,
2056 * but we specify an additional argument to the
2057 * <code>VectorTools::interpolate_boundary_values</code> function denoting
2058 * which vector components it should apply to; this is a vector of bools
2059 * for each vector component and because we only want to restrict vertical
2060 * motion, it has only its last component set:
2061 *
2062 * @code
2063 *   const FEValuesExtractors::Scalar z_component(dim - 1);
2064 *   std::map<types::global_dof_index, double> boundary_values;
2065 *   VectorTools::interpolate_boundary_values(dof_handler,
2066 *   0,
2067 *   Functions::ZeroFunction<dim>(dim),
2068 *   boundary_values);
2069 *   VectorTools::interpolate_boundary_values(
2070 *   dof_handler,
2071 *   1,
2072 *   IncrementalBoundaryValues<dim>(present_time, present_timestep),
2073 *   boundary_values,
2074 *   fe.component_mask(z_component));
2075 *  
2076 *   PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator);
2077 *   MatrixTools::apply_boundary_values(
2078 *   boundary_values, system_matrix, tmp, system_rhs, false);
2079 *   incremental_displacement = tmp;
2080 *   }
2081 *  
2082 *  
2083 *  
2084 * @endcode
2085 *
2086 *
2087 * <a name="step_18-TopLevelsolve_timestep"></a>
2088 * <h4>TopLevel::solve_timestep</h4>
2089 *
2090
2091 *
2092 * The next function is the one that controls what all has to happen within
2093 * a timestep. The order of things should be relatively self-explanatory
2094 * from the function names:
2095 *
2096 * @code
2097 *   template <int dim>
2098 *   void TopLevel<dim>::solve_timestep()
2099 *   {
2100 *   pcout << " Assembling system..." << std::flush;
2101 *   assemble_system();
2102 *   pcout << " norm of rhs is " << system_rhs.l2_norm() << std::endl;
2103 *  
2104 *   const unsigned int n_iterations = solve_linear_problem();
2105 *  
2106 *   pcout << " Solver converged in " << n_iterations << " iterations."
2107 *   << std::endl;
2108 *  
2109 *   pcout << " Updating quadrature point data..." << std::flush;
2110 *   update_quadrature_point_history();
2111 *   pcout << std::endl;
2112 *   }
2113 *  
2114 *  
2115 *  
2116 * @endcode
2117 *
2118 *
2119 * <a name="step_18-TopLevelsolve_linear_problem"></a>
2120 * <h4>TopLevel::solve_linear_problem</h4>
2121 *
2122
2123 *
2124 * Solving the linear system again works mostly as before. The only
2125 * difference is that we want to only keep a complete local copy of the
2126 * solution vector instead of the distributed one that we get as output from
2127 * PETSc's solver routines. To this end, we declare a local temporary
2128 * variable for the distributed vector and initialize it with the contents
2129 * of the local variable (remember that the
2130 * <code>apply_boundary_values</code> function called in
2131 * <code>assemble_system</code> preset the values of boundary nodes in this
2132 * vector), solve with it, and at the end of the function copy it again into
2133 * the complete local vector that we declared as a member variable. Hanging
2134 * node constraints are then distributed only on the local copy,
2135 * i.e. independently of each other on each of the processors:
2136 *
2137 * @code
2138 *   template <int dim>
2139 *   unsigned int TopLevel<dim>::solve_linear_problem()
2140 *   {
2141 *   PETScWrappers::MPI::Vector distributed_incremental_displacement(
2142 *   locally_owned_dofs, mpi_communicator);
2143 *   distributed_incremental_displacement = incremental_displacement;
2144 *  
2145 *   SolverControl solver_control(dof_handler.n_dofs(),
2146 *   1e-16 * system_rhs.l2_norm());
2147 *  
2148 *   PETScWrappers::SolverCG cg(solver_control);
2149 *  
2150 *   PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix);
2151 *  
2152 *   cg.solve(system_matrix,
2153 *   distributed_incremental_displacement,
2154 *   system_rhs,
2155 *   preconditioner);
2156 *  
2157 *   incremental_displacement = distributed_incremental_displacement;
2158 *  
2159 *   hanging_node_constraints.distribute(incremental_displacement);
2160 *  
2161 *   return solver_control.last_step();
2162 *   }
2163 *  
2164 *  
2165 *  
2166 * @endcode
2167 *
2168 *
2169 * <a name="step_18-TopLeveloutput_results"></a>
2170 * <h4>TopLevel::output_results</h4>
2171 *
2172
2173 *
2174 * This function generates the graphical output in .vtu format as explained
2175 * in the introduction. Each process will only work on the cells it owns,
2176 * and then write the result into a file of its own. Additionally, processor
2177 * 0 will write the record files the reference all the .vtu files.
2178 *
2179
2180 *
2181 * The crucial part of this function is to give the <code>DataOut</code>
2182 * class a way to only work on the cells that the present process owns.
2183 *
2184
2185 *
2186 *
2187 * @code
2188 *   template <int dim>
2189 *   void TopLevel<dim>::output_results() const
2190 *   {
2191 *   DataOut<dim> data_out;
2192 *   data_out.attach_dof_handler(dof_handler);
2193 *  
2194 * @endcode
2195 *
2196 * Then, just as in @ref step_17 "step-17", define the names of solution variables (which
2197 * here are the displacement increments) and queue the solution vector for
2198 * output. Note in the following switch how we make sure that if the space
2199 * dimension should be unhandled that we throw an exception saying that we
2200 * haven't implemented this case yet (another case of defensive
2201 * programming):
2202 *
2203 * @code
2204 *   std::vector<std::string> solution_names;
2205 *   switch (dim)
2206 *   {
2207 *   case 1:
2208 *   solution_names.emplace_back("delta_x");
2209 *   break;
2210 *   case 2:
2211 *   solution_names.emplace_back("delta_x");
2212 *   solution_names.emplace_back("delta_y");
2213 *   break;
2214 *   case 3:
2215 *   solution_names.emplace_back("delta_x");
2216 *   solution_names.emplace_back("delta_y");
2217 *   solution_names.emplace_back("delta_z");
2218 *   break;
2219 *   default:
2220 *   DEAL_II_NOT_IMPLEMENTED();
2221 *   }
2222 *  
2223 *   data_out.add_data_vector(incremental_displacement, solution_names);
2224 *  
2225 *  
2226 * @endcode
2227 *
2228 * The next thing is that we wanted to output something like the average
2229 * norm of the stresses that we have stored in each cell. This may seem
2230 * complicated, since on the present processor we only store the stresses
2231 * in quadrature points on those cells that actually belong to the present
2232 * process. In other words, it seems as if we can't compute the average
2233 * stresses for all cells. However, remember that our class derived from
2234 * <code>DataOut</code> only iterates over those cells that actually do
2235 * belong to the present processor, i.e. we don't have to compute anything
2236 * for all the other cells as this information would not be touched. The
2237 * following little loop does this. We enclose the entire block into a
2238 * pair of braces to make sure that the iterator variables do not remain
2239 * accidentally visible beyond the end of the block in which they are
2240 * used:
2241 *
2242 * @code
2243 *   Vector<double> norm_of_stress(triangulation.n_active_cells());
2244 *   {
2245 * @endcode
2246 *
2247 * Loop over all the cells...
2248 *
2249 * @code
2250 *   for (auto &cell : triangulation.active_cell_iterators())
2251 *   if (cell->is_locally_owned())
2252 *   {
2253 * @endcode
2254 *
2255 * On these cells, add up the stresses over all quadrature
2256 * points...
2257 *
2258 * @code
2259 *   SymmetricTensor<2, dim> accumulated_stress;
2260 *   for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2261 *   accumulated_stress +=
2262 *   reinterpret_cast<PointHistory<dim> *>(cell->user_pointer())[q]
2263 *   .old_stress;
2264 *  
2265 * @endcode
2266 *
2267 * ...then write the norm of the average to their destination:
2268 *
2269 * @code
2270 *   norm_of_stress(cell->active_cell_index()) =
2271 *   (accumulated_stress / quadrature_formula.size()).norm();
2272 *   }
2273 * @endcode
2274 *
2275 * And on the cells that we are not interested in, set the respective
2276 * value in the vector to a bogus value (norms must be positive, and a
2277 * large negative value should catch your eye) in order to make sure
2278 * that if we were somehow wrong about our assumption that these
2279 * elements would not appear in the output file, that we would find out
2280 * by looking at the graphical output:
2281 *
2282 * @code
2283 *   else
2284 *   norm_of_stress(cell->active_cell_index()) = -1e+20;
2285 *   }
2286 * @endcode
2287 *
2288 * Finally attach this vector as well to be treated for output:
2289 *
2290 * @code
2291 *   data_out.add_data_vector(norm_of_stress, "norm_of_stress");
2292 *  
2293 * @endcode
2294 *
2295 * As a last piece of data, let us also add the partitioning of the domain
2296 * into subdomains associated with the processors if this is a parallel
2297 * job. This works in the exact same way as in the @ref step_17 "step-17" program:
2298 *
2299 * @code
2300 *   std::vector<types::subdomain_id> partition_int(
2301 *   triangulation.n_active_cells());
2302 *   GridTools::get_subdomain_association(triangulation, partition_int);
2303 *   const Vector<double> partitioning(partition_int.begin(),
2304 *   partition_int.end());
2305 *   data_out.add_data_vector(partitioning, "partitioning");
2306 *  
2307 * @endcode
2308 *
2309 * Finally, with all this data, we can instruct deal.II to munge the
2310 * information and produce some intermediate data structures that contain
2311 * all these solution and other data vectors:
2312 *
2313 * @code
2314 *   data_out.build_patches();
2315 *  
2316 * @endcode
2317 *
2318 * Let us call a function that opens the necessary output files and writes
2319 * the data we have generated into them. The function automatically
2320 * constructs the file names from the given directory name (the first
2321 * argument) and file name base (second argument). It augments the resulting
2322 * string by pieces that result from the time step number and a "piece
2323 * number" that corresponds to a part of the overall domain that can consist
2324 * of one or more subdomains.
2325 *
2326
2327 *
2328 * The function also writes a record files (with suffix `.pvd`) for Paraview
2329 * that describes how all of these output files combine into the data for
2330 * this single time step:
2331 *
2332 * @code
2333 *   const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record(
2334 *   "./", "solution", timestep_no, mpi_communicator, 4);
2335 *  
2336 * @endcode
2337 *
2338 * The record files must be written only once and not by each processor,
2339 * so we do this on processor 0:
2340 *
2341 * @code
2342 *   if (this_mpi_process == 0)
2343 *   {
2344 * @endcode
2345 *
2346 * Finally, we write the paraview record, that references all .pvtu
2347 * files and their respective time. Note that the variable
2348 * times_and_names is declared static, so it will retain the entries
2349 * from the previous timesteps.
2350 *
2351 * @code
2352 *   static std::vector<std::pair<double, std::string>> times_and_names;
2353 *   times_and_names.emplace_back(present_time, pvtu_filename);
2354 *   std::ofstream pvd_output("solution.pvd");
2355 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
2356 *   }
2357 *   }
2358 *  
2359 *  
2360 *  
2361 * @endcode
2362 *
2363 *
2364 * <a name="step_18-TopLeveldo_initial_timestep"></a>
2365 * <h4>TopLevel::do_initial_timestep</h4>
2366 *
2367
2368 *
2369 * This and the next function handle the overall structure of the first and
2370 * following timesteps, respectively. The first timestep is slightly more
2371 * involved because we want to compute it multiple times on successively
2372 * refined meshes, each time starting from a clean state. At the end of
2373 * these computations, in which we compute the incremental displacements
2374 * each time, we use the last results obtained for the incremental
2375 * displacements to compute the resulting stress updates and move the mesh
2376 * accordingly. On this new mesh, we then output the solution and any
2377 * additional data we consider important.
2378 *
2379
2380 *
2381 * All this is interspersed by generating output to the console to update
2382 * the person watching the screen on what is going on. As in @ref step_17 "step-17", the
2383 * use of <code>pcout</code> instead of <code>std::cout</code> makes sure
2384 * that only one of the parallel processes is actually writing to the
2385 * console, without having to explicitly code an if-statement in each place
2386 * where we generate output:
2387 *
2388 * @code
2389 *   template <int dim>
2390 *   void TopLevel<dim>::do_initial_timestep()
2391 *   {
2392 *   present_time += present_timestep;
2393 *   ++timestep_no;
2394 *   pcout << "Timestep " << timestep_no << " at time " << present_time
2395 *   << std::endl;
2396 *  
2397 *   for (unsigned int cycle = 0; cycle < 2; ++cycle)
2398 *   {
2399 *   pcout << " Cycle " << cycle << ':' << std::endl;
2400 *  
2401 *   if (cycle == 0)
2402 *   create_coarse_grid();
2403 *   else
2404 *   refine_initial_grid();
2405 *  
2406 *   pcout << " Number of active cells: "
2407 *   << triangulation.n_active_cells() << " (by partition:";
2408 *   for (unsigned int p = 0; p < n_mpi_processes; ++p)
2409 *   pcout << (p == 0 ? ' ' : '+')
2410 *   << (GridTools::count_cells_with_subdomain_association(
2411 *   triangulation, p));
2412 *   pcout << ')' << std::endl;
2413 *  
2414 *   setup_system();
2415 *  
2416 *   pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
2417 *   << " (by partition:";
2418 *   for (unsigned int p = 0; p < n_mpi_processes; ++p)
2419 *   pcout << (p == 0 ? ' ' : '+')
2420 *   << (DoFTools::count_dofs_with_subdomain_association(dof_handler,
2421 *   p));
2422 *   pcout << ')' << std::endl;
2423 *  
2424 *   solve_timestep();
2425 *   }
2426 *  
2427 *   move_mesh();
2428 *   output_results();
2429 *  
2430 *   pcout << std::endl;
2431 *   }
2432 *  
2433 *  
2434 *  
2435 * @endcode
2436 *
2437 *
2438 * <a name="step_18-TopLeveldo_timestep"></a>
2439 * <h4>TopLevel::do_timestep</h4>
2440 *
2441
2442 *
2443 * Subsequent timesteps are simpler, and probably do not require any more
2444 * documentation given the explanations for the previous function above:
2445 *
2446 * @code
2447 *   template <int dim>
2448 *   void TopLevel<dim>::do_timestep()
2449 *   {
2450 *   present_time += present_timestep;
2451 *   ++timestep_no;
2452 *   pcout << "Timestep " << timestep_no << " at time " << present_time
2453 *   << std::endl;
2454 *   if (present_time > end_time)
2455 *   {
2456 *   present_timestep -= (present_time - end_time);
2457 *   present_time = end_time;
2458 *   }
2459 *  
2460 *  
2461 *   solve_timestep();
2462 *  
2463 *   move_mesh();
2464 *   output_results();
2465 *  
2466 *   pcout << std::endl;
2467 *   }
2468 *  
2469 *  
2470 * @endcode
2471 *
2472 *
2473 * <a name="step_18-TopLevelrefine_initial_grid"></a>
2474 * <h4>TopLevel::refine_initial_grid</h4>
2475 *
2476
2477 *
2478 * The following function is called when solving the first time step on
2479 * successively refined meshes. After each iteration, it computes a
2480 * refinement criterion, refines the mesh, and sets up the history variables
2481 * in each quadrature point again to a clean state.
2482 *
2483 * @code
2484 *   template <int dim>
2485 *   void TopLevel<dim>::refine_initial_grid()
2486 *   {
2487 * @endcode
2488 *
2489 * First, let each process compute error indicators for the cells it owns:
2490 *
2491 * @code
2492 *   Vector<float> error_per_cell(triangulation.n_active_cells());
2493 *   KellyErrorEstimator<dim>::estimate(
2494 *   dof_handler,
2495 *   QGauss<dim - 1>(fe.degree + 1),
2496 *   std::map<types::boundary_id, const Function<dim> *>(),
2497 *   incremental_displacement,
2498 *   error_per_cell,
2499 *   ComponentMask(),
2500 *   nullptr,
2501 *   MultithreadInfo::n_threads(),
2502 *   this_mpi_process);
2503 *  
2504 * @endcode
2505 *
2506 * Then set up a global vector into which we merge the local indicators
2507 * from each of the %parallel processes:
2508 *
2509 * @code
2510 *   const unsigned int n_local_cells =
2511 *   triangulation.n_locally_owned_active_cells();
2512 *  
2513 *   PETScWrappers::MPI::Vector distributed_error_per_cell(
2514 *   mpi_communicator, triangulation.n_active_cells(), n_local_cells);
2515 *  
2516 *   for (unsigned int i = 0; i < error_per_cell.size(); ++i)
2517 *   if (error_per_cell(i) != 0)
2518 *   distributed_error_per_cell(i) = error_per_cell(i);
2519 *   distributed_error_per_cell.compress(VectorOperation::insert);
2520 *  
2521 * @endcode
2522 *
2523 * Once we have that, copy it back into local copies on all processors and
2524 * refine the mesh accordingly:
2525 *
2526 * @code
2527 *   error_per_cell = distributed_error_per_cell;
2528 *   GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2529 *   error_per_cell,
2530 *   0.35,
2531 *   0.03);
2532 *   triangulation.execute_coarsening_and_refinement();
2533 *  
2534 * @endcode
2535 *
2536 * Finally, set up quadrature point data again on the new mesh, and only
2537 * on those cells that we have determined to be ours:
2538 *
2539 * @code
2540 *   setup_quadrature_point_history();
2541 *   }
2542 *  
2543 *  
2544 *  
2545 * @endcode
2546 *
2547 *
2548 * <a name="step_18-TopLevelmove_mesh"></a>
2549 * <h4>TopLevel::move_mesh</h4>
2550 *
2551
2552 *
2553 * At the end of each time step, we move the nodes of the mesh according to
2554 * the incremental displacements computed in this time step. To do this, we
2555 * keep a vector of flags that indicate for each vertex whether we have
2556 * already moved it around, and then loop over all cells and move those
2557 * vertices of the cell that have not been moved yet. It is worth noting
2558 * that it does not matter from which of the cells adjacent to a vertex we
2559 * move this vertex: since we compute the displacement using a continuous
2560 * finite element, the displacement field is continuous as well and we can
2561 * compute the displacement of a given vertex from each of the adjacent
2562 * cells. We only have to make sure that we move each node exactly once,
2563 * which is why we keep the vector of flags.
2564 *
2565
2566 *
2567 * There are two noteworthy things in this function. First, how we get the
2568 * displacement field at a given vertex using the
2569 * <code>cell-@>vertex_dof_index(v,d)</code> function that returns the index
2570 * of the <code>d</code>th degree of freedom at vertex <code>v</code> of the
2571 * given cell. In the present case, displacement in the k-th coordinate
2572 * direction corresponds to the k-th component of the finite element. Using a
2573 * function like this bears a certain risk, because it uses knowledge of the
2574 * order of elements that we have taken together for this program in the
2575 * <code>FESystem</code> element. If we decided to add an additional
2576 * variable, for example a pressure variable for stabilization, and happened
2577 * to insert it as the first variable of the element, then the computation
2578 * below will start to produce nonsensical results. In addition, this
2579 * computation rests on other assumptions: first, that the element we use
2580 * has, indeed, degrees of freedom that are associated with vertices. This
2581 * is indeed the case for the present Q1 element, as would be for all Qp
2582 * elements of polynomial order <code>p</code>. However, it would not hold
2583 * for discontinuous elements, or elements for mixed formulations. Secondly,
2584 * it also rests on the assumption that the displacement at a vertex is
2585 * determined solely by the value of the degree of freedom associated with
2586 * this vertex; in other words, all shape functions corresponding to other
2587 * degrees of freedom are zero at this particular vertex. Again, this is the
2588 * case for the present element, but is not so for all elements that are
2589 * presently available in deal.II. Despite its risks, we choose to use this
2590 * way in order to present a way to query individual degrees of freedom
2591 * associated with vertices.
2592 *
2593
2594 *
2595 * In this context, it is instructive to point out what a more general way
2596 * would be. For general finite elements, the way to go would be to take a
2597 * quadrature formula with the quadrature points in the vertices of a
2598 * cell. The <code>QTrapezoid</code> formula for the trapezoidal rule does
2599 * exactly this. With this quadrature formula, we would then initialize an
2600 * <code>FEValues</code> object in each cell, and use the
2601 * <code>FEValues::get_function_values</code> function to obtain the values
2602 * of the solution function in the quadrature points, i.e. the vertices of
2603 * the cell. These are the only values that we really need, i.e. we are not
2604 * at all interested in the weights (or the <code>JxW</code> values)
2605 * associated with this particular quadrature formula, and this can be
2606 * specified as the last argument in the constructor to
2607 * <code>FEValues</code>. The only point of minor inconvenience in this
2608 * scheme is that we have to figure out which quadrature point corresponds
2609 * to the vertex we consider at present, as they may or may not be ordered
2610 * in the same order.
2611 *
2612
2613 *
2614 * This inconvenience could be avoided if finite elements have support
2615 * points on vertices (which the one here has; for the concept of support
2616 * points, see @ref GlossSupport "support points"). For such a case, one
2617 * could construct a custom quadrature rule using
2618 * FiniteElement::get_unit_support_points(). The first
2619 * <code>cell-&gt;n_vertices()*fe.dofs_per_vertex</code>
2620 * quadrature points will then correspond to the vertices of the cell and
2621 * are ordered consistent with <code>cell-@>vertex(i)</code>, taking into
2622 * account that support points for vector elements will be duplicated
2623 * <code>fe.dofs_per_vertex</code> times.
2624 *
2625
2626 *
2627 * Another point worth explaining about this short function is the way in
2628 * which the triangulation class exports information about its vertices:
2629 * through the <code>Triangulation::n_vertices</code> function, it
2630 * advertises how many vertices there are in the triangulation. Not all of
2631 * them are actually in use all the time -- some are left-overs from cells
2632 * that have been coarsened previously and remain in existence since deal.II
2633 * never changes the number of a vertex once it has come into existence,
2634 * even if vertices with lower number go away. Secondly, the location
2635 * returned by <code>cell-@>vertex(v)</code> is not only a read-only object
2636 * of type <code>Point@<dim@></code>, but in fact a reference that can be
2637 * written to. This allows to move around the nodes of a mesh with relative
2638 * ease, but it is worth pointing out that it is the responsibility of an
2639 * application program using this feature to make sure that the resulting
2640 * cells are still useful, i.e. are not distorted so much that the cell is
2641 * degenerated (indicated, for example, by negative Jacobians). Note that we
2642 * do not have any provisions in this function to actually ensure this, we
2643 * just have faith.
2644 *
2645
2646 *
2647 * After this lengthy introduction, here are the full 20 or so lines of
2648 * code:
2649 *
2650 * @code
2651 *   template <int dim>
2652 *   void TopLevel<dim>::move_mesh()
2653 *   {
2654 *   pcout << " Moving mesh..." << std::endl;
2655 *  
2656 *   std::vector<bool> vertex_touched(triangulation.n_vertices(), false);
2657 *   for (auto &cell : dof_handler.active_cell_iterators())
2658 *   for (const auto v : cell->vertex_indices())
2659 *   if (vertex_touched[cell->vertex_index(v)] == false)
2660 *   {
2661 *   vertex_touched[cell->vertex_index(v)] = true;
2662 *  
2663 *   Point<dim> vertex_displacement;
2664 *   for (unsigned int d = 0; d < dim; ++d)
2665 *   vertex_displacement[d] =
2666 *   incremental_displacement(cell->vertex_dof_index(v, d));
2667 *  
2668 *   cell->vertex(v) += vertex_displacement;
2669 *   }
2670 *   }
2671 *  
2672 *  
2673 * @endcode
2674 *
2675 *
2676 * <a name="step_18-TopLevelsetup_quadrature_point_history"></a>
2677 * <h4>TopLevel::setup_quadrature_point_history</h4>
2678 *
2679
2680 *
2681 * At the beginning of our computations, we needed to set up initial values
2682 * of the history variables, such as the existing stresses in the material,
2683 * that we store in each quadrature point. As mentioned above, we use the
2684 * <code>user_pointer</code> for this that is available in each cell.
2685 *
2686
2687 *
2688 * To put this into larger perspective, we note that if we had previously
2689 * available stresses in our model (which we assume do not exist for the
2690 * purpose of this program), then we would need to interpolate the field of
2691 * preexisting stresses to the quadrature points. Likewise, if we were to
2692 * simulate elasto-plastic materials with hardening/softening, then we would
2693 * have to store additional history variables like the present yield stress
2694 * of the accumulated plastic strains in each quadrature
2695 * points. Pre-existing hardening or weakening would then be implemented by
2696 * interpolating these variables in the present function as well.
2697 *
2698 * @code
2699 *   template <int dim>
2700 *   void TopLevel<dim>::setup_quadrature_point_history()
2701 *   {
2702 * @endcode
2703 *
2704 * For good measure, we set all user pointers of all cells, whether
2705 * ours of not, to the null pointer. This way, if we ever access the user
2706 * pointer of a cell which we should not have accessed, a segmentation
2707 * fault will let us know that this should not have happened:
2708 *
2709
2710 *
2711 *
2712 * @code
2713 *   triangulation.clear_user_data();
2714 *  
2715 * @endcode
2716 *
2717 * Next, allocate the quadrature objects that are within the responsibility
2718 * of this processor. This, of course, equals the number of cells that
2719 * belong to this processor times the number of quadrature points our
2720 * quadrature formula has on each cell. Since the `resize()` function does
2721 * not actually shrink the amount of allocated memory if the requested new
2722 * size is smaller than the old size, we resort to a trick to first free all
2723 * memory, and then reallocate it: we declare an empty vector as a temporary
2724 * variable and then swap the contents of the old vector and this temporary
2725 * variable. This makes sure that the `quadrature_point_history` is now
2726 * really empty, and we can let the temporary variable that now holds the
2727 * previous contents of the vector go out of scope and be destroyed. In the
2728 * next step we can then re-allocate as many elements as we need, with the
2729 * vector default-initializing the `PointHistory` objects, which includes
2730 * setting the stress variables to zero.
2731 *
2732 * @code
2733 *   {
2734 *   std::vector<PointHistory<dim>> tmp;
2735 *   quadrature_point_history.swap(tmp);
2736 *   }
2737 *   quadrature_point_history.resize(
2738 *   triangulation.n_locally_owned_active_cells() * quadrature_formula.size());
2739 *  
2740 * @endcode
2741 *
2742 * Finally loop over all cells again and set the user pointers from the
2743 * cells that belong to the present processor to point to the first
2744 * quadrature point objects corresponding to this cell in the vector of
2745 * such objects:
2746 *
2747 * @code
2748 *   unsigned int history_index = 0;
2749 *   for (auto &cell : triangulation.active_cell_iterators())
2750 *   if (cell->is_locally_owned())
2751 *   {
2752 *   cell->set_user_pointer(&quadrature_point_history[history_index]);
2753 *   history_index += quadrature_formula.size();
2754 *   }
2755 *  
2756 * @endcode
2757 *
2758 * At the end, for good measure make sure that our count of elements was
2759 * correct and that we have both used up all objects we allocated
2760 * previously, and not point to any objects beyond the end of the
2761 * vector. Such defensive programming strategies are always good checks to
2762 * avoid accidental errors and to guard against future changes to this
2763 * function that forget to update all uses of a variable at the same
2764 * time. Recall that constructs using the <code>Assert</code> macro are
2765 * optimized away in optimized mode, so do not affect the run time of
2766 * optimized runs:
2767 *
2768 * @code
2769 *   Assert(history_index == quadrature_point_history.size(),
2770 *   ExcInternalError());
2771 *   }
2772 *  
2773 *  
2774 *  
2775 * @endcode
2776 *
2777 *
2778 * <a name="step_18-TopLevelupdate_quadrature_point_history"></a>
2779 * <h4>TopLevel::update_quadrature_point_history</h4>
2780 *
2781
2782 *
2783 * At the end of each time step, we should have computed an incremental
2784 * displacement update so that the material in its new configuration
2785 * accommodates for the difference between the external body and boundary
2786 * forces applied during this time step minus the forces exerted through
2787 * preexisting internal stresses. In order to have the preexisting
2788 * stresses available at the next time step, we therefore have to update the
2789 * preexisting stresses with the stresses due to the incremental
2790 * displacement computed during the present time step. Ideally, the
2791 * resulting sum of internal stresses would exactly counter all external
2792 * forces. Indeed, a simple experiment can make sure that this is so: if we
2793 * choose boundary conditions and body forces to be time independent, then
2794 * the forcing terms (the sum of external forces and internal stresses)
2795 * should be exactly zero. If you make this experiment, you will realize
2796 * from the output of the norm of the right hand side in each time step that
2797 * this is almost the case: it is not exactly zero, since in the first time
2798 * step the incremental displacement and stress updates were computed
2799 * relative to the undeformed mesh, which was then deformed. In the second
2800 * time step, we again compute displacement and stress updates, but this
2801 * time in the deformed mesh -- there, the resulting updates are very small
2802 * but not quite zero. This can be iterated, and in each such iteration the
2803 * residual, i.e. the norm of the right hand side vector, is reduced; if one
2804 * makes this little experiment, one realizes that the norm of this residual
2805 * decays exponentially with the number of iterations, and after an initial
2806 * very rapid decline is reduced by roughly a factor of about 3.5 in each
2807 * iteration (for one testcase I looked at, other testcases, and other
2808 * numbers of unknowns change the factor, but not the exponential decay).
2809 *
2810
2811 *
2812 * In a sense, this can then be considered as a quasi-timestepping scheme to
2813 * resolve the nonlinear problem of solving large-deformation elasticity on
2814 * a mesh that is moved along in a Lagrangian manner.
2815 *
2816
2817 *
2818 * Another complication is that the existing (old) stresses are defined on
2819 * the old mesh, which we will move around after updating the stresses. If
2820 * this mesh update involves rotations of the cell, then we need to also
2821 * rotate the updated stress, since it was computed relative to the
2822 * coordinate system of the old cell.
2823 *
2824
2825 *
2826 * Thus, what we need is the following: on each cell which the present
2827 * processor owns, we need to extract the old stress from the data stored
2828 * with each quadrature point, compute the stress update, add the two
2829 * together, and then rotate the result together with the incremental
2830 * rotation computed from the incremental displacement at the present
2831 * quadrature point. We will detail these steps below:
2832 *
2833 * @code
2834 *   template <int dim>
2835 *   void TopLevel<dim>::update_quadrature_point_history()
2836 *   {
2837 * @endcode
2838 *
2839 * First, set up an <code>FEValues</code> object by which we will evaluate
2840 * the incremental displacements and the gradients thereof at the
2841 * quadrature points, together with a vector that will hold this
2842 * information:
2843 *
2844 * @code
2845 *   FEValues<dim> fe_values(fe,
2846 *   quadrature_formula,
2847 *   update_values | update_gradients);
2848 *  
2849 *   std::vector<std::vector<Tensor<1, dim>>> displacement_increment_grads(
2850 *   quadrature_formula.size(), std::vector<Tensor<1, dim>>(dim));
2851 *  
2852 * @endcode
2853 *
2854 * Then loop over all cells and do the job in the cells that belong to our
2855 * subdomain:
2856 *
2857 * @code
2858 *   for (auto &cell : dof_handler.active_cell_iterators())
2859 *   if (cell->is_locally_owned())
2860 *   {
2861 * @endcode
2862 *
2863 * Next, get a pointer to the quadrature point history data local to
2864 * the present cell, and, as a defensive measure, make sure that
2865 * this pointer is within the bounds of the global array:
2866 *
2867 * @code
2868 *   PointHistory<dim> *local_quadrature_points_history =
2869 *   reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
2870 *   Assert(local_quadrature_points_history >=
2871 *   &quadrature_point_history.front(),
2872 *   ExcInternalError());
2873 *   Assert(local_quadrature_points_history <=
2874 *   &quadrature_point_history.back(),
2875 *   ExcInternalError());
2876 *  
2877 * @endcode
2878 *
2879 * Then initialize the <code>FEValues</code> object on the present
2880 * cell, and extract the gradients of the displacement at the
2881 * quadrature points for later computation of the strains
2882 *
2883 * @code
2884 *   fe_values.reinit(cell);
2885 *   fe_values.get_function_gradients(incremental_displacement,
2886 *   displacement_increment_grads);
2887 *  
2888 * @endcode
2889 *
2890 * Then loop over the quadrature points of this cell:
2891 *
2892 * @code
2893 *   for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2894 *   {
2895 * @endcode
2896 *
2897 * On each quadrature point, compute the strain increment from
2898 * the gradients, and multiply it by the stress-strain tensor to
2899 * get the stress update. Then add this update to the already
2900 * existing strain at this point:
2901 *
2902 * @code
2903 *   const SymmetricTensor<2, dim> new_stress =
2904 *   (local_quadrature_points_history[q].old_stress +
2905 *   (stress_strain_tensor *
2906 *   get_strain(displacement_increment_grads[q])));
2907 *  
2908 * @endcode
2909 *
2910 * Finally, we have to rotate the result. For this, we first
2911 * have to compute a rotation matrix at the present quadrature
2912 * point from the incremental displacements. In fact, it can be
2913 * computed from the gradients, and we already have a function
2914 * for that purpose:
2915 *
2916 * @code
2917 *   const Tensor<2, dim> rotation =
2918 *   get_rotation_matrix(displacement_increment_grads[q]);
2919 * @endcode
2920 *
2921 * Note that the result, a rotation matrix, is in general an
2922 * antisymmetric tensor of rank 2, so we must store it as a full
2923 * tensor.
2924 *
2925
2926 *
2927 * With this rotation matrix, we can compute the rotated tensor
2928 * by contraction from the left and right, after we expand the
2929 * symmetric tensor <code>new_stress</code> into a full tensor:
2930 *
2931 * @code
2932 *   const SymmetricTensor<2, dim> rotated_new_stress =
2933 *   symmetrize(transpose(rotation) *
2934 *   static_cast<Tensor<2, dim>>(new_stress) * rotation);
2935 * @endcode
2936 *
2937 * Note that while the result of the multiplication of these
2938 * three matrices should be symmetric, it is not due to floating
2939 * point round off: we get an asymmetry on the order of 1e-16 of
2940 * the off-diagonal elements of the result. When assigning the
2941 * result to a <code>SymmetricTensor</code>, the constructor of
2942 * that class checks the symmetry and realizes that it isn't
2943 * exactly symmetric; it will then raise an exception. To avoid
2944 * that, we explicitly symmetrize the result to make it exactly
2945 * symmetric.
2946 *
2947
2948 *
2949 * The result of all these operations is then written back into
2950 * the original place:
2951 *
2952 * @code
2953 *   local_quadrature_points_history[q].old_stress =
2954 *   rotated_new_stress;
2955 *   }
2956 *   }
2957 *   }
2958 *  
2959 * @endcode
2960 *
2961 * This ends the project specific namespace <code>Step18</code>. The rest is
2962 * as usual and as already shown in @ref step_17 "step-17": A <code>main()</code> function
2963 * that initializes and terminates PETSc, calls the classes that do the
2964 * actual work, and makes sure that we catch all exceptions that propagate
2965 * up to this point:
2966 *
2967 * @code
2968 *   } // namespace Step18
2969 *  
2970 *  
2971 *   int main(int argc, char **argv)
2972 *   {
2973 *   try
2974 *   {
2975 *   using namespace dealii;
2976 *   using namespace Step18;
2977 *  
2978 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2979 *  
2980 *   TopLevel<3> elastic_problem;
2981 *   elastic_problem.run();
2982 *   }
2983 *   catch (std::exception &exc)
2984 *   {
2985 *   std::cerr << std::endl
2986 *   << std::endl
2987 *   << "----------------------------------------------------"
2988 *   << std::endl;
2989 *   std::cerr << "Exception on processing: " << std::endl
2990 *   << exc.what() << std::endl
2991 *   << "Aborting!" << std::endl
2992 *   << "----------------------------------------------------"
2993 *   << std::endl;
2994 *  
2995 *   return 1;
2996 *   }
2997 *   catch (...)
2998 *   {
2999 *   std::cerr << std::endl
3000 *   << std::endl
3001 *   << "----------------------------------------------------"
3002 *   << std::endl;
3003 *   std::cerr << "Unknown exception!" << std::endl
3004 *   << "Aborting!" << std::endl
3005 *   << "----------------------------------------------------"
3006 *   << std::endl;
3007 *   return 1;
3008 *   }
3009 *  
3010 *   return 0;
3011 *   }
3012 * @endcode
3013<a name="step_18-Results"></a><h1>Results</h1>
3014
3015
3016
3017Running the program takes a good while if one uses debug mode; it takes about
3018eleven minutes on my i7 desktop. Fortunately, the version compiled with
3019optimizations is much faster; the program only takes about a minute and a half
3020after recompiling with the command <tt>make release</tt> on the same machine, a
3021much more reasonable time.
3022
3023
3024If run, the program prints the following output, explaining what it is
3025doing during all that time:
3026@verbatim
3027\$ time make run
3028[ 66%] Built target step-18
3029[100%] Run step-18 with Release configuration
3030Timestep 1 at time 1
3031 Cycle 0:
3032 Number of active cells: 3712 (by partition: 3712)
3033 Number of degrees of freedom: 17226 (by partition: 17226)
3034 Assembling system... norm of rhs is 1.88062e+10
3035 Solver converged in 103 iterations.
3036 Updating quadrature point data...
3037 Cycle 1:
3038 Number of active cells: 12812 (by partition: 12812)
3039 Number of degrees of freedom: 51738 (by partition: 51738)
3040 Assembling system... norm of rhs is 1.86145e+10
3041 Solver converged in 121 iterations.
3042 Updating quadrature point data...
3043 Moving mesh...
3044
3045Timestep 2 at time 2
3046 Assembling system... norm of rhs is 1.84169e+10
3047 Solver converged in 122 iterations.
3048 Updating quadrature point data...
3049 Moving mesh...
3050
3051Timestep 3 at time 3
3052 Assembling system... norm of rhs is 1.82355e+10
3053 Solver converged in 122 iterations.
3054 Updating quadrature point data...
3055 Moving mesh...
3056
3057Timestep 4 at time 4
3058 Assembling system... norm of rhs is 1.80728e+10
3059 Solver converged in 117 iterations.
3060 Updating quadrature point data...
3061 Moving mesh...
3062
3063Timestep 5 at time 5
3064 Assembling system... norm of rhs is 1.79318e+10
3065 Solver converged in 116 iterations.
3066 Updating quadrature point data...
3067 Moving mesh...
3068
3069Timestep 6 at time 6
3070 Assembling system... norm of rhs is 1.78171e+10
3071 Solver converged in 115 iterations.
3072 Updating quadrature point data...
3073 Moving mesh...
3074
3075Timestep 7 at time 7
3076 Assembling system... norm of rhs is 1.7737e+10
3077 Solver converged in 112 iterations.
3078 Updating quadrature point data...
3079 Moving mesh...
3080
3081Timestep 8 at time 8
3082 Assembling system... norm of rhs is 1.77127e+10
3083 Solver converged in 111 iterations.
3084 Updating quadrature point data...
3085 Moving mesh...
3086
3087Timestep 9 at time 9
3088 Assembling system... norm of rhs is 1.78207e+10
3089 Solver converged in 113 iterations.
3090 Updating quadrature point data...
3091 Moving mesh...
3092
3093Timestep 10 at time 10
3094 Assembling system... norm of rhs is 1.83544e+10
3095 Solver converged in 115 iterations.
3096 Updating quadrature point data...
3097 Moving mesh...
3098
3099[100%] Built target run
3100make run 176.82s user 0.15s system 198% cpu 1:28.94 total
3101@endverbatim
3102In other words, it is computing on 12,000 cells and with some 52,000
3103unknowns. Not a whole lot, but enough for a coupled three-dimensional
3104problem to keep a computer busy for a while. At the end of the day,
3105this is what we have for output:
3106@verbatim
3107\$ ls -l *vtu *visit
3108-rw-r--r-- 1 drwells users 1706059 Feb 13 19:36 solution-0010.000.vtu
3109-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0010.pvtu
3110-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0010.visit
3111-rw-r--r-- 1 drwells users 1707907 Feb 13 19:36 solution-0009.000.vtu
3112-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0009.pvtu
3113-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0009.visit
3114-rw-r--r-- 1 drwells users 1703771 Feb 13 19:35 solution-0008.000.vtu
3115-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0008.pvtu
3116-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0008.visit
3117-rw-r--r-- 1 drwells users 1693671 Feb 13 19:35 solution-0007.000.vtu
3118-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0007.pvtu
3119-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0007.visit
3120-rw-r--r-- 1 drwells users 1681847 Feb 13 19:35 solution-0006.000.vtu
3121-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0006.pvtu
3122-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0006.visit
3123-rw-r--r-- 1 drwells users 1670115 Feb 13 19:35 solution-0005.000.vtu
3124-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0005.pvtu
3125-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0005.visit
3126-rw-r--r-- 1 drwells users 1658559 Feb 13 19:35 solution-0004.000.vtu
3127-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0004.pvtu
3128-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0004.visit
3129-rw-r--r-- 1 drwells users 1639983 Feb 13 19:35 solution-0003.000.vtu
3130-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0003.pvtu
3131-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0003.visit
3132-rw-r--r-- 1 drwells users 1625851 Feb 13 19:35 solution-0002.000.vtu
3133-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0002.pvtu
3134-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0002.visit
3135-rw-r--r-- 1 drwells users 1616035 Feb 13 19:34 solution-0001.000.vtu
3136-rw-r--r-- 1 drwells users 761 Feb 13 19:34 solution-0001.pvtu
3137-rw-r--r-- 1 drwells users 33 Feb 13 19:34 solution-0001.visit
3138@endverbatim
3139
3140
3141If we visualize these files with VisIt or Paraview, we get to see the full picture
3142of the disaster our forced compression wreaks on the cylinder (colors in the
3143images encode the norm of the stress in the material):
3144
3145
3146<div class="threecolumn" style="width: 80%">
3147 <div class="parent">
3148 <div class="img" align="center">
3149 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0002.0000.png"
3150 alt="Time = 2"
3151 width="400">
3152 </div>
3153 <div class="text" align="center">
3154 Time = 2
3155 </div>
3156 </div>
3157 <div class="parent">
3158 <div class="img" align="center">
3159 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0005.0000.png"
3160 alt="Time = 5"
3161 width="400">
3162 </div>
3163 <div class="text" align="center">
3164 Time = 5
3165 </div>
3166 </div>
3167 <div class="parent">
3168 <div class="img" align="center">
3169 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0007.0000.png"
3170 alt="Time = 7"
3171 width="400">
3172 </div>
3173 <div class="text" align="center">
3174 Time = 7
3175 </div>
3176 </div>
3177</div>
3178
3179
3180<div class="threecolumn" style="width: 80%">
3181 <div class="parent">
3182 <div class="img" align="center">
3183 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0008.0000.png"
3184 alt="Time = 8"
3185 width="400">
3186 </div>
3187 <div class="text" align="center">
3188 Time = 8
3189 </div>
3190 </div>
3191 <div class="parent">
3192 <div class="img" align="center">
3193 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0009.0000.png"
3194 alt="Time = 9"
3195 width="400">
3196 </div>
3197 <div class="text" align="center">
3198 Time = 9
3199 </div>
3200 </div>
3201 <div class="parent">
3202 <div class="img" align="center">
3203 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0010.0000.png"
3204 alt="Time = 10"
3205 width="400">
3206 </div>
3207 <div class="text" align="center">
3208 Time = 10
3209 </div>
3210 </div>
3211</div>
3212
3213
3214As is clearly visible, as we keep compressing the cylinder, it starts
3215to bow out near the fully constrained bottom surface and, after about eight
3216time units, buckle in an azimuthally symmetric manner.
3217
3218
3219Although the result appears plausible for the symmetric geometry and loading,
3220it is yet to be established whether or not the computation is fully converged.
3221In order to see whether it is, we ran the program again with one more global
3222refinement at the beginning and with the time step halved. This would have
3223taken a very long time on a single machine, so we used a proper workstation and
3224ran it on 16 processors in parallel. The beginning of the output now looks like
3225this:
3226@verbatim
3227Timestep 1 at time 0.5
3228 Cycle 0:
3229 Number of active cells: 29696 (by partition: 1808+1802+1894+1881+1870+1840+1884+1810+1876+1818+1870+1884+1854+1903+1816+1886)
3230 Number of degrees of freedom: 113100 (by partition: 6936+6930+7305+7116+7326+6869+7331+6786+7193+6829+7093+7162+6920+7280+6843+7181)
3231 Assembling system... norm of rhs is 1.10765e+10
3232 Solver converged in 209 iterations.
3233 Updating quadrature point data...
3234 Cycle 1:
3235 Number of active cells: 102034 (by partition: 6387+6202+6421+6341+6408+6201+6428+6428+6385+6294+6506+6244+6417+6527+6299+6546)
3236 Number of degrees of freedom: 359337 (by partition: 23255+21308+24774+24019+22304+21415+22430+22184+22298+21796+22396+21592+22325+22553+21977+22711)
3237 Assembling system... norm of rhs is 1.35759e+10
3238 Solver converged in 268 iterations.
3239 Updating quadrature point data...
3240 Moving mesh...
3241
3242Timestep 2 at time 1
3243 Assembling system... norm of rhs is 1.34674e+10
3244 Solver converged in 267 iterations.
3245 Updating quadrature point data...
3246 Moving mesh...
3247
3248Timestep 3 at time 1.5
3249 Assembling system... norm of rhs is 1.33607e+10
3250 Solver converged in 265 iterations.
3251 Updating quadrature point data...
3252 Moving mesh...
3253
3254Timestep 4 at time 2
3255 Assembling system... norm of rhs is 1.32558e+10
3256 Solver converged in 263 iterations.
3257 Updating quadrature point data...
3258 Moving mesh...
3259
3260[...]
3261
3262Timestep 20 at time 10
3263 Assembling system... norm of rhs is 1.47755e+10
3264 Solver converged in 425 iterations.
3265 Updating quadrature point data...
3266 Moving mesh...
3267@endverbatim
3268That's quite a good number of unknowns, given that we are in 3d. The output of
3269this program are 16 files for each time step:
3270@verbatim
3271\$ ls -l solution-0001*
3272-rw-r--r-- 1 wellsd2 user 761065 Feb 13 21:09 solution-0001.000.vtu
3273-rw-r--r-- 1 wellsd2 user 759277 Feb 13 21:09 solution-0001.001.vtu
3274-rw-r--r-- 1 wellsd2 user 761217 Feb 13 21:09 solution-0001.002.vtu
3275-rw-r--r-- 1 wellsd2 user 761605 Feb 13 21:09 solution-0001.003.vtu
3276-rw-r--r-- 1 wellsd2 user 756917 Feb 13 21:09 solution-0001.004.vtu
3277-rw-r--r-- 1 wellsd2 user 752669 Feb 13 21:09 solution-0001.005.vtu
3278-rw-r--r-- 1 wellsd2 user 735217 Feb 13 21:09 solution-0001.006.vtu
3279-rw-r--r-- 1 wellsd2 user 750065 Feb 13 21:09 solution-0001.007.vtu
3280-rw-r--r-- 1 wellsd2 user 760273 Feb 13 21:09 solution-0001.008.vtu
3281-rw-r--r-- 1 wellsd2 user 777265 Feb 13 21:09 solution-0001.009.vtu
3282-rw-r--r-- 1 wellsd2 user 772469 Feb 13 21:09 solution-0001.010.vtu
3283-rw-r--r-- 1 wellsd2 user 760833 Feb 13 21:09 solution-0001.011.vtu
3284-rw-r--r-- 1 wellsd2 user 782241 Feb 13 21:09 solution-0001.012.vtu
3285-rw-r--r-- 1 wellsd2 user 748905 Feb 13 21:09 solution-0001.013.vtu
3286-rw-r--r-- 1 wellsd2 user 738413 Feb 13 21:09 solution-0001.014.vtu
3287-rw-r--r-- 1 wellsd2 user 762133 Feb 13 21:09 solution-0001.015.vtu
3288-rw-r--r-- 1 wellsd2 user 1421 Feb 13 21:09 solution-0001.pvtu
3289-rw-r--r-- 1 wellsd2 user 364 Feb 13 21:09 solution-0001.visit
3290@endverbatim
3291
3292
3293Here are first the mesh on which we compute as well as the partitioning
3294for the 16 processors:
3295
3296
3297<div class="twocolumn" style="width: 80%">
3298 <div class="parent">
3299 <div class="img" align="center">
3300 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-000mesh.png"
3301 alt="Discretization"
3302 width="400">
3303 </div>
3304 <div class="text" align="center">
3305 Discretization
3306 </div>
3307 </div>
3308 <div class="parent">
3309 <div class="img" align="center">
3310 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.p.png"
3311 alt="Parallel partitioning"
3312 width="400">
3313 </div>
3314 <div class="text" align="center">
3315 Parallel partitioning
3316 </div>
3317 </div>
3318</div>
3319
3320
3321Finally, here is the same output as we have shown before for the much smaller
3322sequential case:
3323
3324<div class="threecolumn" style="width: 80%">
3325 <div class="parent">
3326 <div class="img" align="center">
3327 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.s.png"
3328 alt="Time = 2"
3329 width="400">
3330 </div>
3331 <div class="text" align="center">
3332 Time = 2
3333 </div>
3334 </div>
3335 <div class="parent">
3336 <div class="img" align="center">
3337 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0005.s.png"
3338 alt="Time = 5"
3339 width="400">
3340 </div>
3341 <div class="text" align="center">
3342 Time = 5
3343 </div>
3344 </div>
3345 <div class="parent">
3346 <div class="img" align="center">
3347 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0007.s.png"
3348 alt="Time = 7"
3349 width="400">
3350 </div>
3351 <div class="text" align="center">
3352 Time = 7
3353 </div>
3354 </div>
3355</div>
3356
3357
3358<div class="threecolumn" style="width: 80%">
3359 <div class="parent">
3360 <div class="img" align="center">
3361 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0008.s.png"
3362 alt="Time = 8"
3363 width="400">
3364 </div>
3365 <div class="text" align="center">
3366 Time = 8
3367 </div>
3368 </div>
3369 <div class="parent">
3370 <div class="img" align="center">
3371 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0009.s.png"
3372 alt="Time = 9"
3373 width="400">
3374 </div>
3375 <div class="text" align="center">
3376 Time = 9
3377 </div>
3378 </div>
3379 <div class="parent">
3380 <div class="img" align="center">
3381 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0010.s.png"
3382 alt="Time = 10"
3383 width="400">
3384 </div>
3385 <div class="text" align="center">
3386 Time = 10
3387 </div>
3388 </div>
3389</div>
3390
3391
3392As before, we observe that at high axial compression the cylinder begins
3393to buckle, but this time ultimately collapses on itself. In contrast to our
3394first run, towards the end of the simulation the deflection pattern becomes
3395nonsymmetric (the central bulge deflects laterally). The model clearly does not
3396provide for this (all our forces and boundary deflections are symmetric) but the
3397effect is probably physically correct anyway: in reality, small inhomogeneities
3398in the body's material properties would lead it to buckle to one side
3399to evade the forcing; in numerical simulations, small perturbations
3400such as numerical round-off or an inexact solution of a linear system
3401by an iterative solver could have the same effect. Another typical source for
3402asymmetries in adaptive computations is that only a certain fraction of cells
3403is refined in each step, which may lead to asymmetric meshes even if the
3404original coarse mesh was symmetric.
3405
3406
3407If one compares this with the previous run, the results both qualitatively
3408and quantitatively different. The previous computation was
3409therefore certainly not converged, though we can't say for sure anything about
3410the present one. One would need an even finer computation to find out. However,
3411the point may be moot: looking at the last picture in detail, it is pretty
3412obvious that not only is the linear small deformation model we chose completely
3413inadequate, but for a realistic simulation we would also need to make sure that
3414the body does not intersect itself during deformation (if we continued
3415compressing the cylinder we would observe some self-intersection).
3416Without such a formulation we cannot expect anything to make physical sense,
3417even if it produces nice pictures!
3418
3419
3420<a name="step_18-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
3421
3422
3423The program as is does not really solve an equation that has many applications
3424in practice: quasi-static material deformation based on a purely elastic law
3425is almost boring. However, the program may serve as the starting point for
3426more interesting experiments, and that indeed was the initial motivation for
3427writing it. Here are some suggestions of what the program is missing and in
3428what direction it may be extended:
3429
3430<a name="step_18-Plasticitymodels"></a><h5>Plasticity models</h5>
3431
3432
3433 The most obvious extension is to use a more
3434realistic material model for large-scale quasistatic deformation. The natural
3435choice for this would be plasticity, in which a nonlinear relationship between
3436stress and strain replaces equation <a href="#step_18.stress-strain">[stress-strain]</a>. Plasticity
3437models are usually rather complicated to program since the stress-strain
3438dependence is generally non-smooth. The material can be thought of being able
3439to withstand only a maximal stress (the yield stress) after which it starts to
3440&ldquo;flow&rdquo;. A mathematical description to this can be given in the form of a
3441variational inequality, which alternatively can be treated as minimizing the
3442elastic energy
3443@f[
3444 E(\mathbf{u}) =
3445 (\varepsilon(\mathbf{u}), C\varepsilon(\mathbf{u}))_{\Omega}
3446 - (\mathbf{f}, \mathbf{u})_{\Omega} - (\mathbf{b}, \mathbf{u})_{\Gamma_N},
3447@f]
3448subject to the constraint
3449@f[
3450 f(\sigma(\mathbf{u})) \le 0
3451@f]
3452on the stress. This extension makes the problem to be solved in each time step
3453nonlinear, so we need another loop within each time step.
3454
3455Without going into further details of this model, we refer to the excellent
3456book by Simo and Hughes on &ldquo;Computational Inelasticity&rdquo; for a
3457comprehensive overview of computational strategies for solving plastic
3458models. Alternatively, a brief but concise description of an algorithm for
3459plasticity is given in an article by S. Commend, A. Truty, and Th. Zimmermann;
3460@cite CTZ04.
3461
3462
3463<a name="step_18-Stabilizationissues"></a><h5>Stabilization issues</h5>
3464
3465
3466The formulation we have chosen, i.e. using
3467piecewise (bi-, tri-)linear elements for all components of the displacement
3468vector, and treating the stress as a variable dependent on the displacement is
3469appropriate for most materials. However, this so-called displacement-based
3470formulation becomes unstable and exhibits spurious modes for incompressible or
3471nearly-incompressible materials. While fluids are usually not elastic (in most
3472cases, the stress depends on velocity gradients, not displacement gradients,
3473although there are exceptions such as electro-rheologic fluids), there are a
3474few solids that are nearly incompressible, for example rubber. Another case is
3475that many plasticity models ultimately let the material become incompressible,
3476although this is outside the scope of the present program.
3477
3478Incompressibility is characterized by Poisson's ratio
3479@f[
3480 \nu = \frac{\lambda}{2(\lambda+\mu)},
3481@f]
3482where @f$\lambda,\mu@f$ are the Lam&eacute; constants of the material.
3483Physical constraints indicate that @f$-1\le \nu\le \frac 12@f$ (the condition
3484also follows from mathematical stability considerations). If @f$\nu@f$
3485approaches @f$\frac 12@f$, then the material becomes incompressible. In that
3486case, pure displacement-based formulations are no longer appropriate for the
3487solution of such problems, and stabilization techniques have to be employed
3488for a stable and accurate solution. The book and paper cited above give
3489indications as to how to do this, but there is also a large volume of
3490literature on this subject; a good start to get an overview of the topic can
3491be found in the references of the paper by H.-Y. Duan and Q. Lin; @cite DL05.
3492
3493
3494<a name="step_18-Refinementduringtimesteps"></a><h5>Refinement during timesteps</h5>
3495
3496
3497In the present form, the program
3498only refines the initial mesh a number of times, but then never again. For any
3499kind of realistic simulation, one would want to extend this so that the mesh
3500is refined and coarsened every few time steps instead. This is not hard to do,
3501in fact, but has been left for future tutorial programs or as an exercise, if
3502you wish.
3503
3504The main complication one has to overcome is that one has to
3505transfer the data that is stored in the quadrature points of the cells of the
3506old mesh to the new mesh, preferably by some sort of projection scheme. The
3507general approach to this would go like this:
3508
3509- At the beginning, the data is only available in the quadrature points of
3510 individual cells, not as a finite element field that is defined everywhere.
3511
3512- So let us find a finite element field that <i>is</i> defined everywhere so
3513 that we can later interpolate it to the quadrature points of the new
3514 mesh. In general, it will be difficult to find a continuous finite element
3515 field that matches the values in the quadrature points exactly because the
3516 number of degrees of freedom of these fields does not match the number of
3517 quadrature points there are, and the nodal values of this global field will
3518 either be over- or underdetermined. But it is usually not very difficult to
3519 find a discontinuous field that matches the values in the quadrature points;
3520 for example, if you have a QGauss(2) quadrature formula (i.e. 4 points per
3521 cell in 2d, 8 points in 3d), then one would use a finite element of kind
3522 FE_DGQ(1), i.e. bi-/tri-linear functions as these have 4 degrees of freedom
3523 per cell in 2d and 8 in 3d.
3524
3525- There are functions that can make this conversion from individual points to
3526 a global field simpler. The following piece of pseudo-code should help if
3527 you use a QGauss(2) quadrature formula. Note that the multiplication by the
3528 projection matrix below takes a vector of scalar components, i.e., we can only
3529 convert one set of scalars at a time from the quadrature points to the degrees
3530 of freedom and vice versa. So we need to store each component of stress separately,
3531 which requires <code>dim*dim</code> vectors. We'll store this set of vectors in a 2D array to
3532 make it easier to read off components in the same way you would the stress tensor.
3533 Thus, we'll loop over the components of stress on each cell and store
3534 these values in the global history field. (The prefix <code>history_</code>
3535 indicates that we work with quantities related to the history variables defined
3536 in the quadrature points.)
3537 @code
3538 FE_DGQ<dim> history_fe (1);
3539 DoFHandler<dim> history_dof_handler (triangulation);
3540 history_dof_handler.distribute_dofs (history_fe);
3541
3542 std::vector< std::vector< Vector<double> > >
3543 history_field (dim, std::vector< Vector<double> >(dim)),
3544 local_history_values_at_qpoints (dim, std::vector< Vector<double> >(dim)),
3545 local_history_fe_values (dim, std::vector< Vector<double> >(dim));
3546
3547 for (unsigned int i=0; i<dim; ++i)
3548 for (unsigned int j=0; j<dim; ++j)
3549 {
3550 history_field[i][j].reinit(history_dof_handler.n_dofs());
3551 local_history_values_at_qpoints[i][j].reinit(quadrature.size());
3552 local_history_fe_values[i][j].reinit(history_fe.n_dofs_per_cell());
3553 }
3554
3555 FullMatrix<double> qpoint_to_dof_matrix (history_fe.dofs_per_cell,
3556 quadrature.size());
3558 (history_fe,
3559 quadrature, quadrature,
3560 qpoint_to_dof_matrix);
3561
3562 typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
3563 endc = dof_handler.end(),
3564 dg_cell = history_dof_handler.begin_active();
3565
3566 for (; cell!=endc; ++cell, ++dg_cell)
3567 {
3568
3569 PointHistory<dim> *local_quadrature_points_history
3570 = reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
3571
3572 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3573 ExcInternalError());
3574 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3575 ExcInternalError());
3576
3577 for (unsigned int i=0; i<dim; ++i)
3578 for (unsigned int j=0; j<dim; ++j)
3579 {
3580 for (unsigned int q=0; q<quadrature.size(); ++q)
3581 local_history_values_at_qpoints[i][j](q)
3582 = local_quadrature_points_history[q].old_stress[i][j];
3583
3584 qpoint_to_dof_matrix.vmult (local_history_fe_values[i][j],
3585 local_history_values_at_qpoints[i][j]);
3586
3587 dg_cell->set_dof_values (local_history_fe_values[i][j],
3588 history_field[i][j]);
3589 }
3590 }
3591 @endcode
3592
3593- Now that we have a global field, we can refine the mesh and transfer the
3594 history_field vector as usual using the SolutionTransfer class. This will
3595 interpolate everything from the old to the new mesh.
3596
3597- In a final step, we have to get the data back from the now interpolated
3598 global field to the quadrature points on the new mesh. The following code
3599 will do that:
3600 @code
3601 FullMatrix<double> dof_to_qpoint_matrix (quadrature.size(),
3602 history_fe.n_dofs_per_cell());
3604 (history_fe,
3605 quadrature,
3606 dof_to_qpoint_matrix);
3607
3608 typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
3609 endc = dof_handler.end(),
3610 dg_cell = history_dof_handler.begin_active();
3611
3612 for (; cell != endc; ++cell, ++dg_cell)
3613 {
3614 PointHistory<dim> *local_quadrature_points_history
3615 = reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
3616
3617 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3618 ExcInternalError());
3619 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3620 ExcInternalError());
3621
3622 for (unsigned int i=0; i<dim; ++i)
3623 for (unsigned int j=0; j<dim; ++j)
3624 {
3625 dg_cell->get_dof_values (history_field[i][j],
3626 local_history_fe_values[i][j]);
3627
3628 dof_to_qpoint_matrix.vmult (local_history_values_at_qpoints[i][j],
3629 local_history_fe_values[i][j]);
3630
3631 for (unsigned int q=0; q<quadrature.size(); ++q)
3632 local_quadrature_points_history[q].old_stress[i][j]
3633 = local_history_values_at_qpoints[i][j](q);
3634 }
3635 @endcode
3636
3637It becomes a bit more complicated once we run the program in parallel, since
3638then each process only stores this data for the cells it owned on the old
3639mesh. That said, using a parallel vector for <code>history_field</code> will
3640do the trick if you put a call to <code>compress</code> after the transfer
3641from quadrature points into the global vector.
3642
3643
3644<a name="step_18-Ensuringmeshregularity"></a><h5>Ensuring mesh regularity</h5>
3645
3646
3647At present, the program makes no attempt
3648to make sure that a cell, after moving its vertices at the end of the time
3649step, still has a valid geometry (i.e. that its Jacobian determinant is
3650positive and bounded away from zero everywhere). It is, in fact, not very hard
3651to set boundary values and forcing terms in such a way that one gets distorted
3652and inverted cells rather quickly. Certainly, in some cases of large
3653deformation, this is unavoidable with a mesh of finite mesh size, but in some
3654other cases this should be preventable by appropriate mesh refinement and/or a
3655reduction of the time step size. The program does not do that, but a more
3656sophisticated version definitely should employ some sort of heuristic defining
3657what amount of deformation of cells is acceptable, and what isn't.
3658 *
3659 *
3660<a name="step_18-PlainProg"></a>
3661<h1> The plain program</h1>
3662@include "step-18.cc"
3663*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
active_cell_iterator begin_active(const unsigned int level=0) const
Definition fe_q.h:554
virtual void vector_value_list(const std::vector< Point< dim > > &points, std::vector< Vector< RangeNumberType > > &values) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
Definition point.h:111
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< index_type > data
Definition mpi.cc:735
std::size_t size
Definition mpi.cc:734
const Event initial
Definition event.cc:64
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void compute_interpolation_to_quadrature_points_matrix(const FiniteElement< dim, spacedim > &fe, const Quadrature< dim > &quadrature, FullMatrix< double > &I_q)
void compute_projection_from_quadrature_points_matrix(const FiniteElement< dim, spacedim > &fe, const Quadrature< dim > &lhs_quadrature, const Quadrature< dim > &rhs_quadrature, FullMatrix< double > &X)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void cylinder_shell(Triangulation< dim > &tria, const double length, const double inner_radius, const double outer_radius, const unsigned int n_radial_cells=0, const unsigned int n_axial_cells=0, const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string compress(const std::string &input)
Definition utilities.cc:389
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
STL namespace.
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)