Reference documentation for deal.II version GIT relicensing-214-g6e74dec06b 2024-03-27 18:10:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-18.h
Go to the documentation of this file.
1,
1434 *   Vector<double> &values) const
1435 *   {
1436 *   AssertDimension(values.size(), dim);
1437 *  
1438 *   const double g = 9.81;
1439 *   const double rho = 7700;
1440 *  
1441 *   values = 0;
1442 *   values(dim - 1) = -rho * g;
1443 *   }
1444 *  
1445 *  
1446 *  
1447 *   template <int dim>
1448 *   void BodyForce<dim>::vector_value_list(
1449 *   const std::vector<Point<dim>> &points,
1450 *   std::vector<Vector<double>> &value_list) const
1451 *   {
1452 *   const unsigned int n_points = points.size();
1453 *  
1454 *   AssertDimension(value_list.size(), n_points);
1455 *  
1456 *   for (unsigned int p = 0; p < n_points; ++p)
1457 *   BodyForce<dim>::vector_value(points[p], value_list[p]);
1458 *   }
1459 *  
1460 *  
1461 *  
1462 * @endcode
1463 *
1464 *
1465 * <a name="step_18-ThecodeIncrementalBoundaryValuecodeclass"></a>
1466 * <h3>The <code>IncrementalBoundaryValue</code> class</h3>
1467 *
1468
1469 *
1470 * In addition to body forces, movement can be induced by boundary forces
1471 * and forced boundary displacement. The latter case is equivalent to forces
1472 * being chosen in such a way that they induce certain displacement.
1473 *
1474
1475 *
1476 * For quasistatic displacement, typical boundary forces would be pressure
1477 * on a body, or tangential friction against another body. We chose a
1478 * somewhat simpler case here: we prescribe a certain movement of (parts of)
1479 * the boundary, or at least of certain components of the displacement
1480 * vector. We describe this by another vector-valued function that, for a
1481 * given point on the boundary, returns the prescribed displacement.
1482 *
1483
1484 *
1485 * Since we have a time-dependent problem, the displacement increment of the
1486 * boundary equals the displacement accumulated during the length of the
1487 * timestep. The class therefore has to know both the present time and the
1488 * length of the present time step, and can then approximate the incremental
1489 * displacement as the present velocity times the present timestep.
1490 *
1491
1492 *
1493 * For the purposes of this program, we choose a simple form of boundary
1494 * displacement: we displace the top boundary with constant velocity
1495 * downwards. The rest of the boundary is either going to be fixed (and is
1496 * then described using an object of type
1497 * <code>Functions::ZeroFunction</code>) or free (Neumann-type, in which case
1498 * nothing special has to be done). The implementation of the class
1499 * describing the constant downward motion should then be obvious using the
1500 * knowledge we gained through all the previous example programs:
1501 *
1502 * @code
1503 *   template <int dim>
1504 *   class IncrementalBoundaryValues : public Function<dim>
1505 *   {
1506 *   public:
1507 *   IncrementalBoundaryValues(const double present_time,
1508 *   const double present_timestep);
1509 *  
1510 *   virtual void vector_value(const Point<dim> &p,
1511 *   Vector<double> &values) const override;
1512 *  
1513 *   virtual void
1514 *   vector_value_list(const std::vector<Point<dim>> &points,
1515 *   std::vector<Vector<double>> &value_list) const override;
1516 *  
1517 *   private:
1518 *   const double velocity;
1519 *   const double present_time;
1520 *   const double present_timestep;
1521 *   };
1522 *  
1523 *  
1524 *   template <int dim>
1525 *   IncrementalBoundaryValues<dim>::IncrementalBoundaryValues(
1526 *   const double present_time,
1527 *   const double present_timestep)
1528 *   : Function<dim>(dim)
1529 *   , velocity(.08)
1530 *   , present_time(present_time)
1531 *   , present_timestep(present_timestep)
1532 *   {}
1533 *  
1534 *  
1535 *   template <int dim>
1536 *   void
1537 *   IncrementalBoundaryValues<dim>::vector_value(const Point<dim> & /*p*/,
1538 *   Vector<double> &values) const
1539 *   {
1540 *   AssertDimension(values.size(), dim);
1541 *  
1542 *   values = 0;
1543 *   values(2) = -present_timestep * velocity;
1544 *   }
1545 *  
1546 *  
1547 *  
1548 *   template <int dim>
1549 *   void IncrementalBoundaryValues<dim>::vector_value_list(
1550 *   const std::vector<Point<dim>> &points,
1551 *   std::vector<Vector<double>> &value_list) const
1552 *   {
1553 *   const unsigned int n_points = points.size();
1554 *  
1555 *   AssertDimension(value_list.size(), n_points);
1556 *  
1557 *   for (unsigned int p = 0; p < n_points; ++p)
1558 *   IncrementalBoundaryValues<dim>::vector_value(points[p], value_list[p]);
1559 *   }
1560 *  
1561 *  
1562 *  
1563 * @endcode
1564 *
1565 *
1566 * <a name="step_18-ImplementationofthecodeTopLevelcodeclass"></a>
1567 * <h3>Implementation of the <code>TopLevel</code> class</h3>
1568 *
1569
1570 *
1571 * Now for the implementation of the main class. First, we initialize the
1572 * stress-strain tensor, which we have declared as a static const
1573 * variable. We chose Lam&eacute; constants that are appropriate for steel:
1574 *
1575 * @code
1576 *   template <int dim>
1577 *   const SymmetricTensor<4, dim> TopLevel<dim>::stress_strain_tensor =
1578 *   get_stress_strain_tensor<dim>(/*lambda = */ 9.695e10,
1579 *   /*mu = */ 7.617e10);
1580 *  
1581 *  
1582 *  
1583 * @endcode
1584 *
1585 *
1586 * <a name="step_18-Thepublicinterface"></a>
1587 * <h4>The public interface</h4>
1588 *
1589
1590 *
1591 * The next step is the definition of constructors and destructors. There
1592 * are no surprises here: we choose linear and continuous finite elements
1593 * for each of the <code>dim</code> vector components of the solution, and a
1594 * Gaussian quadrature formula with 2 points in each coordinate
1595 * direction. The destructor should be obvious:
1596 *
1597 * @code
1598 *   template <int dim>
1599 *   TopLevel<dim>::TopLevel()
1600 *   : triangulation(MPI_COMM_WORLD)
1601 *   , fe(FE_Q<dim>(1) ^ dim)
1602 *   , dof_handler(triangulation)
1603 *   , quadrature_formula(fe.degree + 1)
1604 *   , present_time(0.0)
1605 *   , present_timestep(1.0)
1606 *   , end_time(10.0)
1607 *   , timestep_no(0)
1608 *   , mpi_communicator(MPI_COMM_WORLD)
1609 *   , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
1610 *   , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
1611 *   , pcout(std::cout, this_mpi_process == 0)
1612 *   {}
1613 *  
1614 *  
1615 *  
1616 *   template <int dim>
1617 *   TopLevel<dim>::~TopLevel()
1618 *   {
1619 *   dof_handler.clear();
1620 *   }
1621 *  
1622 *  
1623 *  
1624 * @endcode
1625 *
1626 * The last of the public functions is the one that directs all the work,
1627 * <code>run()</code>. It initializes the variables that describe where in
1628 * time we presently are, then runs the first time step, then loops over all
1629 * the other time steps. Note that for simplicity we use a fixed time step,
1630 * whereas a more sophisticated program would of course have to choose it in
1631 * some more reasonable way adaptively:
1632 *
1633 * @code
1634 *   template <int dim>
1635 *   void TopLevel<dim>::run()
1636 *   {
1637 *   do_initial_timestep();
1638 *  
1639 *   while (present_time < end_time)
1640 *   do_timestep();
1641 *   }
1642 *  
1643 *  
1644 * @endcode
1645 *
1646 *
1647 * <a name="step_18-TopLevelcreate_coarse_grid"></a>
1648 * <h4>TopLevel::create_coarse_grid</h4>
1649 *
1650
1651 *
1652 * The next function in the order in which they were declared above is the
1653 * one that creates the coarse grid from which we start. For this example
1654 * program, we want to compute the deformation of a cylinder under axial
1655 * compression. The first step therefore is to generate a mesh for a
1656 * cylinder of length 3 and with inner and outer radii of 0.8 and 1,
1657 * respectively. Fortunately, there is a library function for such a mesh.
1658 *
1659
1660 *
1661 * In a second step, we have to associated boundary conditions with the
1662 * upper and lower faces of the cylinder. We choose a boundary indicator of
1663 * 0 for the boundary faces that are characterized by their midpoints having
1664 * z-coordinates of either 0 (bottom face), an indicator of 1 for z=3 (top
1665 * face); finally, we use boundary indicator 2 for all faces on the inside
1666 * of the cylinder shell, and 3 for the outside.
1667 *
1668 * @code
1669 *   template <int dim>
1670 *   void TopLevel<dim>::create_coarse_grid()
1671 *   {
1672 *   const double inner_radius = 0.8, outer_radius = 1;
1673 *   GridGenerator::cylinder_shell(triangulation, 3, inner_radius, outer_radius);
1674 *   for (const auto &cell : triangulation.active_cell_iterators())
1675 *   for (const auto &face : cell->face_iterators())
1676 *   if (face->at_boundary())
1677 *   {
1678 *   const Point<dim> face_center = face->center();
1679 *  
1680 *   if (face_center[2] == 0)
1681 *   face->set_boundary_id(0);
1682 *   else if (face_center[2] == 3)
1683 *   face->set_boundary_id(1);
1684 *   else if (std::sqrt(face_center[0] * face_center[0] +
1685 *   face_center[1] * face_center[1]) <
1686 *   (inner_radius + outer_radius) / 2)
1687 *   face->set_boundary_id(2);
1688 *   else
1689 *   face->set_boundary_id(3);
1690 *   }
1691 *  
1692 * @endcode
1693 *
1694 * Once all this is done, we can refine the mesh once globally:
1695 *
1696 * @code
1697 *   triangulation.refine_global(1);
1698 *  
1699 * @endcode
1700 *
1701 * As the final step, we need to set up a clean state of the data that we
1702 * store in the quadrature points on all cells that are treated on the
1703 * present processor.
1704 *
1705 * @code
1706 *   setup_quadrature_point_history();
1707 *   }
1708 *  
1709 *  
1710 *  
1711 * @endcode
1712 *
1713 *
1714 * <a name="step_18-TopLevelsetup_system"></a>
1715 * <h4>TopLevel::setup_system</h4>
1716 *
1717
1718 *
1719 * The next function is the one that sets up the data structures for a given
1720 * mesh. This is done in most the same way as in @ref step_17 "step-17": distribute the
1721 * degrees of freedom, then sort these degrees of freedom in such a way that
1722 * each processor gets a contiguous chunk of them. Note that subdivisions into
1723 * chunks for each processor is handled in the functions that create or
1724 * refine grids, unlike in the previous example program (the point where
1725 * this happens is mostly a matter of taste; here, we chose to do it when
1726 * grids are created since in the <code>do_initial_timestep</code> and
1727 * <code>do_timestep</code> functions we want to output the number of cells
1728 * on each processor at a point where we haven't called the present function
1729 * yet).
1730 *
1731 * @code
1732 *   template <int dim>
1733 *   void TopLevel<dim>::setup_system()
1734 *   {
1735 *   dof_handler.distribute_dofs(fe);
1736 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
1737 *   locally_relevant_dofs =
1738 *   DoFTools::extract_locally_relevant_dofs(dof_handler);
1739 *  
1740 * @endcode
1741 *
1742 * The next step is to set up constraints due to hanging nodes. This has
1743 * been handled many times before:
1744 *
1745 * @code
1746 *   hanging_node_constraints.clear();
1747 *   DoFTools::make_hanging_node_constraints(dof_handler,
1748 *   hanging_node_constraints);
1749 *   hanging_node_constraints.close();
1750 *  
1751 * @endcode
1752 *
1753 * And then we have to set up the matrix. Here we deviate from @ref step_17 "step-17", in
1754 * which we simply used PETSc's ability to just know about the size of the
1755 * matrix and later allocate those nonzero elements that are being written
1756 * to. While this works just fine from a correctness viewpoint, it is not
1757 * at all efficient: if we don't give PETSc a clue as to which elements
1758 * are written to, it is (at least at the time of this writing) unbearably
1759 * slow when we set the elements in the matrix for the first time (i.e. in
1760 * the first timestep). Later on, when the elements have been allocated,
1761 * everything is much faster. In experiments we made, the first timestep
1762 * can be accelerated by almost two orders of magnitude if we instruct
1763 * PETSc which elements will be used and which are not.
1764 *
1765
1766 *
1767 * To do so, we first generate the sparsity pattern of the matrix we are
1768 * going to work with, and make sure that the condensation of hanging node
1769 * constraints add the necessary additional entries in the sparsity
1770 * pattern:
1771 *
1772 * @code
1773 *   DynamicSparsityPattern sparsity_pattern(locally_relevant_dofs);
1774 *   DoFTools::make_sparsity_pattern(dof_handler,
1775 *   sparsity_pattern,
1776 *   hanging_node_constraints,
1777 *   /*keep constrained dofs*/ false);
1778 *   SparsityTools::distribute_sparsity_pattern(sparsity_pattern,
1779 *   locally_owned_dofs,
1780 *   mpi_communicator,
1781 *   locally_relevant_dofs);
1782 * @endcode
1783 *
1784 * Note that we have used the <code>DynamicSparsityPattern</code> class
1785 * here that was already introduced in @ref step_11 "step-11", rather than the
1786 * <code>SparsityPattern</code> class that we have used in all other
1787 * cases. The reason for this is that for the latter class to work we have
1788 * to give an initial upper bound for the number of entries in each row, a
1789 * task that is traditionally done by
1790 * <code>DoFHandler::max_couplings_between_dofs()</code>. However, this
1791 * function suffers from a serious problem: it has to compute an upper
1792 * bound to the number of nonzero entries in each row, and this is a
1793 * rather complicated task, in particular in 3d. In effect, while it is
1794 * quite accurate in 2d, it often comes up with much too large a number in
1795 * 3d, and in that case the <code>SparsityPattern</code> allocates much
1796 * too much memory at first, often several 100 MBs. This is later
1797 * corrected when <code>DoFTools::make_sparsity_pattern</code> is called
1798 * and we realize that we don't need all that much memory, but at time it
1799 * is already too late: for large problems, the temporary allocation of
1800 * too much memory can lead to out-of-memory situations.
1801 *
1802
1803 *
1804 * In order to avoid this, we resort to the
1805 * <code>DynamicSparsityPattern</code> class that is slower but does
1806 * not require any up-front estimate on the number of nonzero entries per
1807 * row. It therefore only ever allocates as much memory as it needs at any
1808 * given time, and we can build it even for large 3d problems.
1809 *
1810
1811 *
1812 * It is also worth noting that due to the specifics of
1813 * parallel::shared::Triangulation, the sparsity pattern we construct is
1814 * global, i.e. comprises all degrees of freedom whether they will be
1815 * owned by the processor we are on or another one (in case this program
1816 * is run in %parallel via MPI). This of course is not optimal -- it
1817 * limits the size of the problems we can solve, since storing the entire
1818 * sparsity pattern (even if only for a short time) on each processor does
1819 * not scale well. However, there are several more places in the program
1820 * in which we do this, for example we always keep the global
1821 * triangulation and DoF handler objects around, even if we only work on
1822 * part of them. At present, deal.II does not have the necessary
1823 * facilities to completely distribute these objects (a task that, indeed,
1824 * is very hard to achieve with adaptive meshes, since well-balanced
1825 * subdivisions of a domain tend to become unbalanced as the mesh is
1826 * adaptively refined).
1827 *
1828
1829 *
1830 * With this data structure, we can then go to the PETSc sparse matrix and
1831 * tell it to preallocate all the entries we will later want to write to:
1832 *
1833 * @code
1834 *   system_matrix.reinit(locally_owned_dofs,
1835 *   locally_owned_dofs,
1836 *   sparsity_pattern,
1837 *   mpi_communicator);
1838 * @endcode
1839 *
1840 * After this point, no further explicit knowledge of the sparsity pattern
1841 * is required any more and we can let the <code>sparsity_pattern</code>
1842 * variable go out of scope without any problem.
1843 *
1844
1845 *
1846 * The last task in this function is then only to reset the right hand
1847 * side vector as well as the solution vector to its correct size;
1848 * remember that the solution vector is a local one, unlike the right hand
1849 * side that is a distributed %parallel one and therefore needs to know
1850 * the MPI communicator over which it is supposed to transmit messages:
1851 *
1852 * @code
1853 *   system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1854 *   incremental_displacement.reinit(dof_handler.n_dofs());
1855 *   }
1856 *  
1857 *  
1858 *  
1859 * @endcode
1860 *
1861 *
1862 * <a name="step_18-TopLevelassemble_system"></a>
1863 * <h4>TopLevel::assemble_system</h4>
1864 *
1865
1866 *
1867 * Again, assembling the system matrix and right hand side follows the same
1868 * structure as in many example programs before. In particular, it is mostly
1869 * equivalent to @ref step_17 "step-17", except for the different right hand side that now
1870 * only has to take into account internal stresses. In addition, assembling
1871 * the matrix is made significantly more transparent by using the
1872 * <code>SymmetricTensor</code> class: note the elegance of forming the
1873 * scalar products of symmetric tensors of rank 2 and 4. The implementation
1874 * is also more general since it is independent of the fact that we may or
1875 * may not be using an isotropic elasticity tensor.
1876 *
1877
1878 *
1879 * The first part of the assembly routine is as always:
1880 *
1881 * @code
1882 *   template <int dim>
1883 *   void TopLevel<dim>::assemble_system()
1884 *   {
1885 *   system_rhs = 0;
1886 *   system_matrix = 0;
1887 *  
1888 *   FEValues<dim> fe_values(fe,
1889 *   quadrature_formula,
1892 *  
1893 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1894 *   const unsigned int n_q_points = quadrature_formula.size();
1895 *  
1896 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1897 *   Vector<double> cell_rhs(dofs_per_cell);
1898 *  
1899 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1900 *  
1901 *   BodyForce<dim> body_force;
1902 *   std::vector<Vector<double>> body_force_values(n_q_points,
1903 *   Vector<double>(dim));
1904 *  
1905 * @endcode
1906 *
1907 * As in @ref step_17 "step-17", we only need to loop over all cells that belong to the
1908 * present processor:
1909 *
1910 * @code
1911 *   for (const auto &cell : dof_handler.active_cell_iterators())
1912 *   if (cell->is_locally_owned())
1913 *   {
1914 *   cell_matrix = 0;
1915 *   cell_rhs = 0;
1916 *  
1917 *   fe_values.reinit(cell);
1918 *  
1919 * @endcode
1920 *
1921 * Then loop over all indices i,j and quadrature points and assemble
1922 * the system matrix contributions from this cell. Note how we
1923 * extract the symmetric gradients (strains) of the shape functions
1924 * at a given quadrature point from the <code>FEValues</code>
1925 * object, and the elegance with which we form the triple
1926 * contraction <code>eps_phi_i : C : eps_phi_j</code>; the latter
1927 * needs to be compared to the clumsy computations needed in
1928 * @ref step_17 "step-17", both in the introduction as well as in the respective
1929 * place in the program:
1930 *
1931 * @code
1932 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1933 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1934 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1935 *   {
1936 *   const SymmetricTensor<2, dim>
1937 *   eps_phi_i = get_strain(fe_values, i, q_point),
1938 *   eps_phi_j = get_strain(fe_values, j, q_point);
1939 *  
1940 *   cell_matrix(i, j) += (eps_phi_i *
1941 *   stress_strain_tensor *
1942 *   eps_phi_j
1943 *   ) *
1944 *   fe_values.JxW(q_point);
1945 *   }
1946 *  
1947 *  
1948 * @endcode
1949 *
1950 * Then also assemble the local right hand side contributions. For
1951 * this, we need to access the prior stress value in this quadrature
1952 * point. To get it, we use the user pointer of this cell that
1953 * points into the global array to the quadrature point data
1954 * corresponding to the first quadrature point of the present cell,
1955 * and then add an offset corresponding to the index of the
1956 * quadrature point we presently consider:
1957 *
1958 * @code
1959 *   const PointHistory<dim> *local_quadrature_points_data =
1960 *   reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
1961 * @endcode
1962 *
1963 * In addition, we need the values of the external body forces at
1964 * the quadrature points on this cell:
1965 *
1966 * @code
1967 *   body_force.vector_value_list(fe_values.get_quadrature_points(),
1968 *   body_force_values);
1969 * @endcode
1970 *
1971 * Then we can loop over all degrees of freedom on this cell and
1972 * compute local contributions to the right hand side:
1973 *
1974 * @code
1975 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1976 *   {
1977 *   const unsigned int component_i =
1978 *   fe.system_to_component_index(i).first;
1979 *  
1980 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1981 *   {
1982 *   const SymmetricTensor<2, dim> &old_stress =
1983 *   local_quadrature_points_data[q_point].old_stress;
1984 *  
1985 *   cell_rhs(i) +=
1986 *   (body_force_values[q_point](component_i) *
1987 *   fe_values.shape_value(i, q_point) -
1988 *   old_stress * get_strain(fe_values, i, q_point)) *
1989 *   fe_values.JxW(q_point);
1990 *   }
1991 *   }
1992 *  
1993 * @endcode
1994 *
1995 * Now that we have the local contributions to the linear system, we
1996 * need to transfer it into the global objects. This is done exactly
1997 * as in @ref step_17 "step-17":
1998 *
1999 * @code
2000 *   cell->get_dof_indices(local_dof_indices);
2001 *  
2002 *   hanging_node_constraints.distribute_local_to_global(cell_matrix,
2003 *   cell_rhs,
2004 *   local_dof_indices,
2005 *   system_matrix,
2006 *   system_rhs);
2007 *   }
2008 *  
2009 * @endcode
2010 *
2011 * Now compress the vector and the system matrix:
2012 *
2013 * @code
2014 *   system_matrix.compress(VectorOperation::add);
2015 *   system_rhs.compress(VectorOperation::add);
2016 *  
2017 *  
2018 * @endcode
2019 *
2020 * The last step is to again fix up boundary values, just as we already
2021 * did in previous programs. A slight complication is that the
2022 * <code>apply_boundary_values</code> function wants to have a solution
2023 * vector compatible with the matrix and right hand side (i.e. here a
2024 * distributed %parallel vector, rather than the sequential vector we use
2025 * in this program) in order to preset the entries of the solution vector
2026 * with the correct boundary values. We provide such a compatible vector
2027 * in the form of a temporary vector which we then copy into the
2028 * sequential one.
2029 *
2030
2031 *
2032 * We make up for this complication by showing how boundary values can be
2033 * used flexibly: following the way we create the triangulation, there are
2034 * three distinct boundary indicators used to describe the domain,
2035 * corresponding to the bottom and top faces, as well as the inner/outer
2036 * surfaces. We would like to impose boundary conditions of the following
2037 * type: The inner and outer cylinder surfaces are free of external
2038 * forces, a fact that corresponds to natural (Neumann-type) boundary
2039 * conditions for which we don't have to do anything. At the bottom, we
2040 * want no movement at all, corresponding to the cylinder being clamped or
2041 * cemented in at this part of the boundary. At the top, however, we want
2042 * a prescribed vertical downward motion compressing the cylinder; in
2043 * addition, we only want to restrict the vertical movement, but not the
2044 * horizontal ones -- one can think of this situation as a well-greased
2045 * plate sitting on top of the cylinder pushing it downwards: the atoms of
2046 * the cylinder are forced to move downward, but they are free to slide
2047 * horizontally along the plate.
2048 *
2049
2050 *
2051 * The way to describe this is as follows: for boundary indicator zero
2052 * (bottom face) we use a dim-dimensional zero function representing no
2053 * motion in any coordinate direction. For the boundary with indicator 1
2054 * (top surface), we use the <code>IncrementalBoundaryValues</code> class,
2055 * but we specify an additional argument to the
2056 * <code>VectorTools::interpolate_boundary_values</code> function denoting
2057 * which vector components it should apply to; this is a vector of bools
2058 * for each vector component and because we only want to restrict vertical
2059 * motion, it has only its last component set:
2060 *
2061 * @code
2062 *   const FEValuesExtractors::Scalar z_component(dim - 1);
2063 *   std::map<types::global_dof_index, double> boundary_values;
2064 *   VectorTools::interpolate_boundary_values(dof_handler,
2065 *   0,
2066 *   Functions::ZeroFunction<dim>(dim),
2067 *   boundary_values);
2068 *   VectorTools::interpolate_boundary_values(
2069 *   dof_handler,
2070 *   1,
2071 *   IncrementalBoundaryValues<dim>(present_time, present_timestep),
2072 *   boundary_values,
2073 *   fe.component_mask(z_component));
2074 *  
2075 *   PETScWrappers::MPI::Vector tmp(locally_owned_dofs, mpi_communicator);
2076 *   MatrixTools::apply_boundary_values(
2077 *   boundary_values, system_matrix, tmp, system_rhs, false);
2078 *   incremental_displacement = tmp;
2079 *   }
2080 *  
2081 *  
2082 *  
2083 * @endcode
2084 *
2085 *
2086 * <a name="step_18-TopLevelsolve_timestep"></a>
2087 * <h4>TopLevel::solve_timestep</h4>
2088 *
2089
2090 *
2091 * The next function is the one that controls what all has to happen within
2092 * a timestep. The order of things should be relatively self-explanatory
2093 * from the function names:
2094 *
2095 * @code
2096 *   template <int dim>
2097 *   void TopLevel<dim>::solve_timestep()
2098 *   {
2099 *   pcout << " Assembling system..." << std::flush;
2100 *   assemble_system();
2101 *   pcout << " norm of rhs is " << system_rhs.l2_norm() << std::endl;
2102 *  
2103 *   const unsigned int n_iterations = solve_linear_problem();
2104 *  
2105 *   pcout << " Solver converged in " << n_iterations << " iterations."
2106 *   << std::endl;
2107 *  
2108 *   pcout << " Updating quadrature point data..." << std::flush;
2109 *   update_quadrature_point_history();
2110 *   pcout << std::endl;
2111 *   }
2112 *  
2113 *  
2114 *  
2115 * @endcode
2116 *
2117 *
2118 * <a name="step_18-TopLevelsolve_linear_problem"></a>
2119 * <h4>TopLevel::solve_linear_problem</h4>
2120 *
2121
2122 *
2123 * Solving the linear system again works mostly as before. The only
2124 * difference is that we want to only keep a complete local copy of the
2125 * solution vector instead of the distributed one that we get as output from
2126 * PETSc's solver routines. To this end, we declare a local temporary
2127 * variable for the distributed vector and initialize it with the contents
2128 * of the local variable (remember that the
2129 * <code>apply_boundary_values</code> function called in
2130 * <code>assemble_system</code> preset the values of boundary nodes in this
2131 * vector), solve with it, and at the end of the function copy it again into
2132 * the complete local vector that we declared as a member variable. Hanging
2133 * node constraints are then distributed only on the local copy,
2134 * i.e. independently of each other on each of the processors:
2135 *
2136 * @code
2137 *   template <int dim>
2138 *   unsigned int TopLevel<dim>::solve_linear_problem()
2139 *   {
2140 *   PETScWrappers::MPI::Vector distributed_incremental_displacement(
2141 *   locally_owned_dofs, mpi_communicator);
2142 *   distributed_incremental_displacement = incremental_displacement;
2143 *  
2144 *   SolverControl solver_control(dof_handler.n_dofs(),
2145 *   1e-16 * system_rhs.l2_norm());
2146 *  
2147 *   PETScWrappers::SolverCG cg(solver_control);
2148 *  
2149 *   PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix);
2150 *  
2151 *   cg.solve(system_matrix,
2152 *   distributed_incremental_displacement,
2153 *   system_rhs,
2154 *   preconditioner);
2155 *  
2156 *   incremental_displacement = distributed_incremental_displacement;
2157 *  
2158 *   hanging_node_constraints.distribute(incremental_displacement);
2159 *  
2160 *   return solver_control.last_step();
2161 *   }
2162 *  
2163 *  
2164 *  
2165 * @endcode
2166 *
2167 *
2168 * <a name="step_18-TopLeveloutput_results"></a>
2169 * <h4>TopLevel::output_results</h4>
2170 *
2171
2172 *
2173 * This function generates the graphical output in .vtu format as explained
2174 * in the introduction. Each process will only work on the cells it owns,
2175 * and then write the result into a file of its own. Additionally, processor
2176 * 0 will write the record files the reference all the .vtu files.
2177 *
2178
2179 *
2180 * The crucial part of this function is to give the <code>DataOut</code>
2181 * class a way to only work on the cells that the present process owns.
2182 *
2183
2184 *
2185 *
2186 * @code
2187 *   template <int dim>
2188 *   void TopLevel<dim>::output_results() const
2189 *   {
2190 *   DataOut<dim> data_out;
2191 *   data_out.attach_dof_handler(dof_handler);
2192 *  
2193 * @endcode
2194 *
2195 * Then, just as in @ref step_17 "step-17", define the names of solution variables (which
2196 * here are the displacement increments) and queue the solution vector for
2197 * output. Note in the following switch how we make sure that if the space
2198 * dimension should be unhandled that we throw an exception saying that we
2199 * haven't implemented this case yet (another case of defensive
2200 * programming):
2201 *
2202 * @code
2203 *   std::vector<std::string> solution_names;
2204 *   switch (dim)
2205 *   {
2206 *   case 1:
2207 *   solution_names.emplace_back("delta_x");
2208 *   break;
2209 *   case 2:
2210 *   solution_names.emplace_back("delta_x");
2211 *   solution_names.emplace_back("delta_y");
2212 *   break;
2213 *   case 3:
2214 *   solution_names.emplace_back("delta_x");
2215 *   solution_names.emplace_back("delta_y");
2216 *   solution_names.emplace_back("delta_z");
2217 *   break;
2218 *   default:
2219 *   DEAL_II_NOT_IMPLEMENTED();
2220 *   }
2221 *  
2222 *   data_out.add_data_vector(incremental_displacement, solution_names);
2223 *  
2224 *  
2225 * @endcode
2226 *
2227 * The next thing is that we wanted to output something like the average
2228 * norm of the stresses that we have stored in each cell. This may seem
2229 * complicated, since on the present processor we only store the stresses
2230 * in quadrature points on those cells that actually belong to the present
2231 * process. In other words, it seems as if we can't compute the average
2232 * stresses for all cells. However, remember that our class derived from
2233 * <code>DataOut</code> only iterates over those cells that actually do
2234 * belong to the present processor, i.e. we don't have to compute anything
2235 * for all the other cells as this information would not be touched. The
2236 * following little loop does this. We enclose the entire block into a
2237 * pair of braces to make sure that the iterator variables do not remain
2238 * accidentally visible beyond the end of the block in which they are
2239 * used:
2240 *
2241 * @code
2242 *   Vector<double> norm_of_stress(triangulation.n_active_cells());
2243 *   {
2244 * @endcode
2245 *
2246 * Loop over all the cells...
2247 *
2248 * @code
2249 *   for (auto &cell : triangulation.active_cell_iterators())
2250 *   if (cell->is_locally_owned())
2251 *   {
2252 * @endcode
2253 *
2254 * On these cells, add up the stresses over all quadrature
2255 * points...
2256 *
2257 * @code
2258 *   SymmetricTensor<2, dim> accumulated_stress;
2259 *   for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2260 *   accumulated_stress +=
2261 *   reinterpret_cast<PointHistory<dim> *>(cell->user_pointer())[q]
2262 *   .old_stress;
2263 *  
2264 * @endcode
2265 *
2266 * ...then write the norm of the average to their destination:
2267 *
2268 * @code
2269 *   norm_of_stress(cell->active_cell_index()) =
2270 *   (accumulated_stress / quadrature_formula.size()).norm();
2271 *   }
2272 * @endcode
2273 *
2274 * And on the cells that we are not interested in, set the respective
2275 * value in the vector to a bogus value (norms must be positive, and a
2276 * large negative value should catch your eye) in order to make sure
2277 * that if we were somehow wrong about our assumption that these
2278 * elements would not appear in the output file, that we would find out
2279 * by looking at the graphical output:
2280 *
2281 * @code
2282 *   else
2283 *   norm_of_stress(cell->active_cell_index()) = -1e+20;
2284 *   }
2285 * @endcode
2286 *
2287 * Finally attach this vector as well to be treated for output:
2288 *
2289 * @code
2290 *   data_out.add_data_vector(norm_of_stress, "norm_of_stress");
2291 *  
2292 * @endcode
2293 *
2294 * As a last piece of data, let us also add the partitioning of the domain
2295 * into subdomains associated with the processors if this is a parallel
2296 * job. This works in the exact same way as in the @ref step_17 "step-17" program:
2297 *
2298 * @code
2299 *   std::vector<types::subdomain_id> partition_int(
2300 *   triangulation.n_active_cells());
2301 *   GridTools::get_subdomain_association(triangulation, partition_int);
2302 *   const Vector<double> partitioning(partition_int.begin(),
2303 *   partition_int.end());
2304 *   data_out.add_data_vector(partitioning, "partitioning");
2305 *  
2306 * @endcode
2307 *
2308 * Finally, with all this data, we can instruct deal.II to munge the
2309 * information and produce some intermediate data structures that contain
2310 * all these solution and other data vectors:
2311 *
2312 * @code
2313 *   data_out.build_patches();
2314 *  
2315 * @endcode
2316 *
2317 * Let us call a function that opens the necessary output files and writes
2318 * the data we have generated into them. The function automatically
2319 * constructs the file names from the given directory name (the first
2320 * argument) and file name base (second argument). It augments the resulting
2321 * string by pieces that result from the time step number and a "piece
2322 * number" that corresponds to a part of the overall domain that can consist
2323 * of one or more subdomains.
2324 *
2325
2326 *
2327 * The function also writes a record files (with suffix `.pvd`) for Paraview
2328 * that describes how all of these output files combine into the data for
2329 * this single time step:
2330 *
2331 * @code
2332 *   const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record(
2333 *   "./", "solution", timestep_no, mpi_communicator, 4);
2334 *  
2335 * @endcode
2336 *
2337 * The record files must be written only once and not by each processor,
2338 * so we do this on processor 0:
2339 *
2340 * @code
2341 *   if (this_mpi_process == 0)
2342 *   {
2343 * @endcode
2344 *
2345 * Finally, we write the paraview record, that references all .pvtu
2346 * files and their respective time. Note that the variable
2347 * times_and_names is declared static, so it will retain the entries
2348 * from the previous timesteps.
2349 *
2350 * @code
2351 *   static std::vector<std::pair<double, std::string>> times_and_names;
2352 *   times_and_names.emplace_back(present_time, pvtu_filename);
2353 *   std::ofstream pvd_output("solution.pvd");
2354 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
2355 *   }
2356 *   }
2357 *  
2358 *  
2359 *  
2360 * @endcode
2361 *
2362 *
2363 * <a name="step_18-TopLeveldo_initial_timestep"></a>
2364 * <h4>TopLevel::do_initial_timestep</h4>
2365 *
2366
2367 *
2368 * This and the next function handle the overall structure of the first and
2369 * following timesteps, respectively. The first timestep is slightly more
2370 * involved because we want to compute it multiple times on successively
2371 * refined meshes, each time starting from a clean state. At the end of
2372 * these computations, in which we compute the incremental displacements
2373 * each time, we use the last results obtained for the incremental
2374 * displacements to compute the resulting stress updates and move the mesh
2375 * accordingly. On this new mesh, we then output the solution and any
2376 * additional data we consider important.
2377 *
2378
2379 *
2380 * All this is interspersed by generating output to the console to update
2381 * the person watching the screen on what is going on. As in @ref step_17 "step-17", the
2382 * use of <code>pcout</code> instead of <code>std::cout</code> makes sure
2383 * that only one of the parallel processes is actually writing to the
2384 * console, without having to explicitly code an if-statement in each place
2385 * where we generate output:
2386 *
2387 * @code
2388 *   template <int dim>
2389 *   void TopLevel<dim>::do_initial_timestep()
2390 *   {
2391 *   present_time += present_timestep;
2392 *   ++timestep_no;
2393 *   pcout << "Timestep " << timestep_no << " at time " << present_time
2394 *   << std::endl;
2395 *  
2396 *   for (unsigned int cycle = 0; cycle < 2; ++cycle)
2397 *   {
2398 *   pcout << " Cycle " << cycle << ':' << std::endl;
2399 *  
2400 *   if (cycle == 0)
2401 *   create_coarse_grid();
2402 *   else
2403 *   refine_initial_grid();
2404 *  
2405 *   pcout << " Number of active cells: "
2406 *   << triangulation.n_active_cells() << " (by partition:";
2407 *   for (unsigned int p = 0; p < n_mpi_processes; ++p)
2408 *   pcout << (p == 0 ? ' ' : '+')
2409 *   << (GridTools::count_cells_with_subdomain_association(
2410 *   triangulation, p));
2411 *   pcout << ')' << std::endl;
2412 *  
2413 *   setup_system();
2414 *  
2415 *   pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
2416 *   << " (by partition:";
2417 *   for (unsigned int p = 0; p < n_mpi_processes; ++p)
2418 *   pcout << (p == 0 ? ' ' : '+')
2419 *   << (DoFTools::count_dofs_with_subdomain_association(dof_handler,
2420 *   p));
2421 *   pcout << ')' << std::endl;
2422 *  
2423 *   solve_timestep();
2424 *   }
2425 *  
2426 *   move_mesh();
2427 *   output_results();
2428 *  
2429 *   pcout << std::endl;
2430 *   }
2431 *  
2432 *  
2433 *  
2434 * @endcode
2435 *
2436 *
2437 * <a name="step_18-TopLeveldo_timestep"></a>
2438 * <h4>TopLevel::do_timestep</h4>
2439 *
2440
2441 *
2442 * Subsequent timesteps are simpler, and probably do not require any more
2443 * documentation given the explanations for the previous function above:
2444 *
2445 * @code
2446 *   template <int dim>
2447 *   void TopLevel<dim>::do_timestep()
2448 *   {
2449 *   present_time += present_timestep;
2450 *   ++timestep_no;
2451 *   pcout << "Timestep " << timestep_no << " at time " << present_time
2452 *   << std::endl;
2453 *   if (present_time > end_time)
2454 *   {
2455 *   present_timestep -= (present_time - end_time);
2456 *   present_time = end_time;
2457 *   }
2458 *  
2459 *  
2460 *   solve_timestep();
2461 *  
2462 *   move_mesh();
2463 *   output_results();
2464 *  
2465 *   pcout << std::endl;
2466 *   }
2467 *  
2468 *  
2469 * @endcode
2470 *
2471 *
2472 * <a name="step_18-TopLevelrefine_initial_grid"></a>
2473 * <h4>TopLevel::refine_initial_grid</h4>
2474 *
2475
2476 *
2477 * The following function is called when solving the first time step on
2478 * successively refined meshes. After each iteration, it computes a
2479 * refinement criterion, refines the mesh, and sets up the history variables
2480 * in each quadrature point again to a clean state.
2481 *
2482 * @code
2483 *   template <int dim>
2484 *   void TopLevel<dim>::refine_initial_grid()
2485 *   {
2486 * @endcode
2487 *
2488 * First, let each process compute error indicators for the cells it owns:
2489 *
2490 * @code
2491 *   Vector<float> error_per_cell(triangulation.n_active_cells());
2492 *   KellyErrorEstimator<dim>::estimate(
2493 *   dof_handler,
2494 *   QGauss<dim - 1>(fe.degree + 1),
2495 *   std::map<types::boundary_id, const Function<dim> *>(),
2496 *   incremental_displacement,
2497 *   error_per_cell,
2498 *   ComponentMask(),
2499 *   nullptr,
2500 *   MultithreadInfo::n_threads(),
2501 *   this_mpi_process);
2502 *  
2503 * @endcode
2504 *
2505 * Then set up a global vector into which we merge the local indicators
2506 * from each of the %parallel processes:
2507 *
2508 * @code
2509 *   const unsigned int n_local_cells =
2510 *   triangulation.n_locally_owned_active_cells();
2511 *  
2512 *   PETScWrappers::MPI::Vector distributed_error_per_cell(
2513 *   mpi_communicator, triangulation.n_active_cells(), n_local_cells);
2514 *  
2515 *   for (unsigned int i = 0; i < error_per_cell.size(); ++i)
2516 *   if (error_per_cell(i) != 0)
2517 *   distributed_error_per_cell(i) = error_per_cell(i);
2518 *   distributed_error_per_cell.compress(VectorOperation::insert);
2519 *  
2520 * @endcode
2521 *
2522 * Once we have that, copy it back into local copies on all processors and
2523 * refine the mesh accordingly:
2524 *
2525 * @code
2526 *   error_per_cell = distributed_error_per_cell;
2527 *   GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2528 *   error_per_cell,
2529 *   0.35,
2530 *   0.03);
2531 *   triangulation.execute_coarsening_and_refinement();
2532 *  
2533 * @endcode
2534 *
2535 * Finally, set up quadrature point data again on the new mesh, and only
2536 * on those cells that we have determined to be ours:
2537 *
2538 * @code
2539 *   setup_quadrature_point_history();
2540 *   }
2541 *  
2542 *  
2543 *  
2544 * @endcode
2545 *
2546 *
2547 * <a name="step_18-TopLevelmove_mesh"></a>
2548 * <h4>TopLevel::move_mesh</h4>
2549 *
2550
2551 *
2552 * At the end of each time step, we move the nodes of the mesh according to
2553 * the incremental displacements computed in this time step. To do this, we
2554 * keep a vector of flags that indicate for each vertex whether we have
2555 * already moved it around, and then loop over all cells and move those
2556 * vertices of the cell that have not been moved yet. It is worth noting
2557 * that it does not matter from which of the cells adjacent to a vertex we
2558 * move this vertex: since we compute the displacement using a continuous
2559 * finite element, the displacement field is continuous as well and we can
2560 * compute the displacement of a given vertex from each of the adjacent
2561 * cells. We only have to make sure that we move each node exactly once,
2562 * which is why we keep the vector of flags.
2563 *
2564
2565 *
2566 * There are two noteworthy things in this function. First, how we get the
2567 * displacement field at a given vertex using the
2568 * <code>cell-@>vertex_dof_index(v,d)</code> function that returns the index
2569 * of the <code>d</code>th degree of freedom at vertex <code>v</code> of the
2570 * given cell. In the present case, displacement in the k-th coordinate
2571 * direction corresponds to the k-th component of the finite element. Using a
2572 * function like this bears a certain risk, because it uses knowledge of the
2573 * order of elements that we have taken together for this program in the
2574 * <code>FESystem</code> element. If we decided to add an additional
2575 * variable, for example a pressure variable for stabilization, and happened
2576 * to insert it as the first variable of the element, then the computation
2577 * below will start to produce nonsensical results. In addition, this
2578 * computation rests on other assumptions: first, that the element we use
2579 * has, indeed, degrees of freedom that are associated with vertices. This
2580 * is indeed the case for the present Q1 element, as would be for all Qp
2581 * elements of polynomial order <code>p</code>. However, it would not hold
2582 * for discontinuous elements, or elements for mixed formulations. Secondly,
2583 * it also rests on the assumption that the displacement at a vertex is
2584 * determined solely by the value of the degree of freedom associated with
2585 * this vertex; in other words, all shape functions corresponding to other
2586 * degrees of freedom are zero at this particular vertex. Again, this is the
2587 * case for the present element, but is not so for all elements that are
2588 * presently available in deal.II. Despite its risks, we choose to use this
2589 * way in order to present a way to query individual degrees of freedom
2590 * associated with vertices.
2591 *
2592
2593 *
2594 * In this context, it is instructive to point out what a more general way
2595 * would be. For general finite elements, the way to go would be to take a
2596 * quadrature formula with the quadrature points in the vertices of a
2597 * cell. The <code>QTrapezoid</code> formula for the trapezoidal rule does
2598 * exactly this. With this quadrature formula, we would then initialize an
2599 * <code>FEValues</code> object in each cell, and use the
2600 * <code>FEValues::get_function_values</code> function to obtain the values
2601 * of the solution function in the quadrature points, i.e. the vertices of
2602 * the cell. These are the only values that we really need, i.e. we are not
2603 * at all interested in the weights (or the <code>JxW</code> values)
2604 * associated with this particular quadrature formula, and this can be
2605 * specified as the last argument in the constructor to
2606 * <code>FEValues</code>. The only point of minor inconvenience in this
2607 * scheme is that we have to figure out which quadrature point corresponds
2608 * to the vertex we consider at present, as they may or may not be ordered
2609 * in the same order.
2610 *
2611
2612 *
2613 * This inconvenience could be avoided if finite elements have support
2614 * points on vertices (which the one here has; for the concept of support
2615 * points, see @ref GlossSupport "support points"). For such a case, one
2616 * could construct a custom quadrature rule using
2617 * FiniteElement::get_unit_support_points(). The first
2618 * <code>cell-&gt;n_vertices()*fe.dofs_per_vertex</code>
2619 * quadrature points will then correspond to the vertices of the cell and
2620 * are ordered consistent with <code>cell-@>vertex(i)</code>, taking into
2621 * account that support points for vector elements will be duplicated
2622 * <code>fe.dofs_per_vertex</code> times.
2623 *
2624
2625 *
2626 * Another point worth explaining about this short function is the way in
2627 * which the triangulation class exports information about its vertices:
2628 * through the <code>Triangulation::n_vertices</code> function, it
2629 * advertises how many vertices there are in the triangulation. Not all of
2630 * them are actually in use all the time -- some are left-overs from cells
2631 * that have been coarsened previously and remain in existence since deal.II
2632 * never changes the number of a vertex once it has come into existence,
2633 * even if vertices with lower number go away. Secondly, the location
2634 * returned by <code>cell-@>vertex(v)</code> is not only a read-only object
2635 * of type <code>Point@<dim@></code>, but in fact a reference that can be
2636 * written to. This allows to move around the nodes of a mesh with relative
2637 * ease, but it is worth pointing out that it is the responsibility of an
2638 * application program using this feature to make sure that the resulting
2639 * cells are still useful, i.e. are not distorted so much that the cell is
2640 * degenerated (indicated, for example, by negative Jacobians). Note that we
2641 * do not have any provisions in this function to actually ensure this, we
2642 * just have faith.
2643 *
2644
2645 *
2646 * After this lengthy introduction, here are the full 20 or so lines of
2647 * code:
2648 *
2649 * @code
2650 *   template <int dim>
2651 *   void TopLevel<dim>::move_mesh()
2652 *   {
2653 *   pcout << " Moving mesh..." << std::endl;
2654 *  
2655 *   std::vector<bool> vertex_touched(triangulation.n_vertices(), false);
2656 *   for (auto &cell : dof_handler.active_cell_iterators())
2657 *   for (const auto v : cell->vertex_indices())
2658 *   if (vertex_touched[cell->vertex_index(v)] == false)
2659 *   {
2660 *   vertex_touched[cell->vertex_index(v)] = true;
2661 *  
2662 *   Point<dim> vertex_displacement;
2663 *   for (unsigned int d = 0; d < dim; ++d)
2664 *   vertex_displacement[d] =
2665 *   incremental_displacement(cell->vertex_dof_index(v, d));
2666 *  
2667 *   cell->vertex(v) += vertex_displacement;
2668 *   }
2669 *   }
2670 *  
2671 *  
2672 * @endcode
2673 *
2674 *
2675 * <a name="step_18-TopLevelsetup_quadrature_point_history"></a>
2676 * <h4>TopLevel::setup_quadrature_point_history</h4>
2677 *
2678
2679 *
2680 * At the beginning of our computations, we needed to set up initial values
2681 * of the history variables, such as the existing stresses in the material,
2682 * that we store in each quadrature point. As mentioned above, we use the
2683 * <code>user_pointer</code> for this that is available in each cell.
2684 *
2685
2686 *
2687 * To put this into larger perspective, we note that if we had previously
2688 * available stresses in our model (which we assume do not exist for the
2689 * purpose of this program), then we would need to interpolate the field of
2690 * preexisting stresses to the quadrature points. Likewise, if we were to
2691 * simulate elasto-plastic materials with hardening/softening, then we would
2692 * have to store additional history variables like the present yield stress
2693 * of the accumulated plastic strains in each quadrature
2694 * points. Pre-existing hardening or weakening would then be implemented by
2695 * interpolating these variables in the present function as well.
2696 *
2697 * @code
2698 *   template <int dim>
2699 *   void TopLevel<dim>::setup_quadrature_point_history()
2700 *   {
2701 * @endcode
2702 *
2703 * For good measure, we set all user pointers of all cells, whether
2704 * ours of not, to the null pointer. This way, if we ever access the user
2705 * pointer of a cell which we should not have accessed, a segmentation
2706 * fault will let us know that this should not have happened:
2707 *
2708
2709 *
2710 *
2711 * @code
2712 *   triangulation.clear_user_data();
2713 *  
2714 * @endcode
2715 *
2716 * Next, allocate the quadrature objects that are within the responsibility
2717 * of this processor. This, of course, equals the number of cells that
2718 * belong to this processor times the number of quadrature points our
2719 * quadrature formula has on each cell. Since the `resize()` function does
2720 * not actually shrink the amount of allocated memory if the requested new
2721 * size is smaller than the old size, we resort to a trick to first free all
2722 * memory, and then reallocate it: we declare an empty vector as a temporary
2723 * variable and then swap the contents of the old vector and this temporary
2724 * variable. This makes sure that the `quadrature_point_history` is now
2725 * really empty, and we can let the temporary variable that now holds the
2726 * previous contents of the vector go out of scope and be destroyed. In the
2727 * next step we can then re-allocate as many elements as we need, with the
2728 * vector default-initializing the `PointHistory` objects, which includes
2729 * setting the stress variables to zero.
2730 *
2731 * @code
2732 *   {
2733 *   std::vector<PointHistory<dim>> tmp;
2734 *   quadrature_point_history.swap(tmp);
2735 *   }
2736 *   quadrature_point_history.resize(
2737 *   triangulation.n_locally_owned_active_cells() * quadrature_formula.size());
2738 *  
2739 * @endcode
2740 *
2741 * Finally loop over all cells again and set the user pointers from the
2742 * cells that belong to the present processor to point to the first
2743 * quadrature point objects corresponding to this cell in the vector of
2744 * such objects:
2745 *
2746 * @code
2747 *   unsigned int history_index = 0;
2748 *   for (auto &cell : triangulation.active_cell_iterators())
2749 *   if (cell->is_locally_owned())
2750 *   {
2751 *   cell->set_user_pointer(&quadrature_point_history[history_index]);
2752 *   history_index += quadrature_formula.size();
2753 *   }
2754 *  
2755 * @endcode
2756 *
2757 * At the end, for good measure make sure that our count of elements was
2758 * correct and that we have both used up all objects we allocated
2759 * previously, and not point to any objects beyond the end of the
2760 * vector. Such defensive programming strategies are always good checks to
2761 * avoid accidental errors and to guard against future changes to this
2762 * function that forget to update all uses of a variable at the same
2763 * time. Recall that constructs using the <code>Assert</code> macro are
2764 * optimized away in optimized mode, so do not affect the run time of
2765 * optimized runs:
2766 *
2767 * @code
2768 *   Assert(history_index == quadrature_point_history.size(),
2769 *   ExcInternalError());
2770 *   }
2771 *  
2772 *  
2773 *  
2774 * @endcode
2775 *
2776 *
2777 * <a name="step_18-TopLevelupdate_quadrature_point_history"></a>
2778 * <h4>TopLevel::update_quadrature_point_history</h4>
2779 *
2780
2781 *
2782 * At the end of each time step, we should have computed an incremental
2783 * displacement update so that the material in its new configuration
2784 * accommodates for the difference between the external body and boundary
2785 * forces applied during this time step minus the forces exerted through
2786 * preexisting internal stresses. In order to have the preexisting
2787 * stresses available at the next time step, we therefore have to update the
2788 * preexisting stresses with the stresses due to the incremental
2789 * displacement computed during the present time step. Ideally, the
2790 * resulting sum of internal stresses would exactly counter all external
2791 * forces. Indeed, a simple experiment can make sure that this is so: if we
2792 * choose boundary conditions and body forces to be time independent, then
2793 * the forcing terms (the sum of external forces and internal stresses)
2794 * should be exactly zero. If you make this experiment, you will realize
2795 * from the output of the norm of the right hand side in each time step that
2796 * this is almost the case: it is not exactly zero, since in the first time
2797 * step the incremental displacement and stress updates were computed
2798 * relative to the undeformed mesh, which was then deformed. In the second
2799 * time step, we again compute displacement and stress updates, but this
2800 * time in the deformed mesh -- there, the resulting updates are very small
2801 * but not quite zero. This can be iterated, and in each such iteration the
2802 * residual, i.e. the norm of the right hand side vector, is reduced; if one
2803 * makes this little experiment, one realizes that the norm of this residual
2804 * decays exponentially with the number of iterations, and after an initial
2805 * very rapid decline is reduced by roughly a factor of about 3.5 in each
2806 * iteration (for one testcase I looked at, other testcases, and other
2807 * numbers of unknowns change the factor, but not the exponential decay).
2808 *
2809
2810 *
2811 * In a sense, this can then be considered as a quasi-timestepping scheme to
2812 * resolve the nonlinear problem of solving large-deformation elasticity on
2813 * a mesh that is moved along in a Lagrangian manner.
2814 *
2815
2816 *
2817 * Another complication is that the existing (old) stresses are defined on
2818 * the old mesh, which we will move around after updating the stresses. If
2819 * this mesh update involves rotations of the cell, then we need to also
2820 * rotate the updated stress, since it was computed relative to the
2821 * coordinate system of the old cell.
2822 *
2823
2824 *
2825 * Thus, what we need is the following: on each cell which the present
2826 * processor owns, we need to extract the old stress from the data stored
2827 * with each quadrature point, compute the stress update, add the two
2828 * together, and then rotate the result together with the incremental
2829 * rotation computed from the incremental displacement at the present
2830 * quadrature point. We will detail these steps below:
2831 *
2832 * @code
2833 *   template <int dim>
2834 *   void TopLevel<dim>::update_quadrature_point_history()
2835 *   {
2836 * @endcode
2837 *
2838 * First, set up an <code>FEValues</code> object by which we will evaluate
2839 * the incremental displacements and the gradients thereof at the
2840 * quadrature points, together with a vector that will hold this
2841 * information:
2842 *
2843 * @code
2844 *   FEValues<dim> fe_values(fe,
2845 *   quadrature_formula,
2846 *   update_values | update_gradients);
2847 *  
2848 *   std::vector<std::vector<Tensor<1, dim>>> displacement_increment_grads(
2849 *   quadrature_formula.size(), std::vector<Tensor<1, dim>>(dim));
2850 *  
2851 * @endcode
2852 *
2853 * Then loop over all cells and do the job in the cells that belong to our
2854 * subdomain:
2855 *
2856 * @code
2857 *   for (auto &cell : dof_handler.active_cell_iterators())
2858 *   if (cell->is_locally_owned())
2859 *   {
2860 * @endcode
2861 *
2862 * Next, get a pointer to the quadrature point history data local to
2863 * the present cell, and, as a defensive measure, make sure that
2864 * this pointer is within the bounds of the global array:
2865 *
2866 * @code
2867 *   PointHistory<dim> *local_quadrature_points_history =
2868 *   reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
2869 *   Assert(local_quadrature_points_history >=
2870 *   &quadrature_point_history.front(),
2871 *   ExcInternalError());
2872 *   Assert(local_quadrature_points_history <=
2873 *   &quadrature_point_history.back(),
2874 *   ExcInternalError());
2875 *  
2876 * @endcode
2877 *
2878 * Then initialize the <code>FEValues</code> object on the present
2879 * cell, and extract the gradients of the displacement at the
2880 * quadrature points for later computation of the strains
2881 *
2882 * @code
2883 *   fe_values.reinit(cell);
2884 *   fe_values.get_function_gradients(incremental_displacement,
2885 *   displacement_increment_grads);
2886 *  
2887 * @endcode
2888 *
2889 * Then loop over the quadrature points of this cell:
2890 *
2891 * @code
2892 *   for (unsigned int q = 0; q < quadrature_formula.size(); ++q)
2893 *   {
2894 * @endcode
2895 *
2896 * On each quadrature point, compute the strain increment from
2897 * the gradients, and multiply it by the stress-strain tensor to
2898 * get the stress update. Then add this update to the already
2899 * existing strain at this point:
2900 *
2901 * @code
2902 *   const SymmetricTensor<2, dim> new_stress =
2903 *   (local_quadrature_points_history[q].old_stress +
2904 *   (stress_strain_tensor *
2905 *   get_strain(displacement_increment_grads[q])));
2906 *  
2907 * @endcode
2908 *
2909 * Finally, we have to rotate the result. For this, we first
2910 * have to compute a rotation matrix at the present quadrature
2911 * point from the incremental displacements. In fact, it can be
2912 * computed from the gradients, and we already have a function
2913 * for that purpose:
2914 *
2915 * @code
2916 *   const Tensor<2, dim> rotation =
2917 *   get_rotation_matrix(displacement_increment_grads[q]);
2918 * @endcode
2919 *
2920 * Note that the result, a rotation matrix, is in general an
2921 * antisymmetric tensor of rank 2, so we must store it as a full
2922 * tensor.
2923 *
2924
2925 *
2926 * With this rotation matrix, we can compute the rotated tensor
2927 * by contraction from the left and right, after we expand the
2928 * symmetric tensor <code>new_stress</code> into a full tensor:
2929 *
2930 * @code
2931 *   const SymmetricTensor<2, dim> rotated_new_stress =
2932 *   symmetrize(transpose(rotation) *
2933 *   static_cast<Tensor<2, dim>>(new_stress) * rotation);
2934 * @endcode
2935 *
2936 * Note that while the result of the multiplication of these
2937 * three matrices should be symmetric, it is not due to floating
2938 * point round off: we get an asymmetry on the order of 1e-16 of
2939 * the off-diagonal elements of the result. When assigning the
2940 * result to a <code>SymmetricTensor</code>, the constructor of
2941 * that class checks the symmetry and realizes that it isn't
2942 * exactly symmetric; it will then raise an exception. To avoid
2943 * that, we explicitly symmetrize the result to make it exactly
2944 * symmetric.
2945 *
2946
2947 *
2948 * The result of all these operations is then written back into
2949 * the original place:
2950 *
2951 * @code
2952 *   local_quadrature_points_history[q].old_stress =
2953 *   rotated_new_stress;
2954 *   }
2955 *   }
2956 *   }
2957 *  
2958 * @endcode
2959 *
2960 * This ends the project specific namespace <code>Step18</code>. The rest is
2961 * as usual and as already shown in @ref step_17 "step-17": A <code>main()</code> function
2962 * that initializes and terminates PETSc, calls the classes that do the
2963 * actual work, and makes sure that we catch all exceptions that propagate
2964 * up to this point:
2965 *
2966 * @code
2967 *   } // namespace Step18
2968 *  
2969 *  
2970 *   int main(int argc, char **argv)
2971 *   {
2972 *   try
2973 *   {
2974 *   using namespace dealii;
2975 *   using namespace Step18;
2976 *  
2977 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
2978 *  
2979 *   TopLevel<3> elastic_problem;
2980 *   elastic_problem.run();
2981 *   }
2982 *   catch (std::exception &exc)
2983 *   {
2984 *   std::cerr << std::endl
2985 *   << std::endl
2986 *   << "----------------------------------------------------"
2987 *   << std::endl;
2988 *   std::cerr << "Exception on processing: " << std::endl
2989 *   << exc.what() << std::endl
2990 *   << "Aborting!" << std::endl
2991 *   << "----------------------------------------------------"
2992 *   << std::endl;
2993 *  
2994 *   return 1;
2995 *   }
2996 *   catch (...)
2997 *   {
2998 *   std::cerr << std::endl
2999 *   << std::endl
3000 *   << "----------------------------------------------------"
3001 *   << std::endl;
3002 *   std::cerr << "Unknown exception!" << std::endl
3003 *   << "Aborting!" << std::endl
3004 *   << "----------------------------------------------------"
3005 *   << std::endl;
3006 *   return 1;
3007 *   }
3008 *  
3009 *   return 0;
3010 *   }
3011 * @endcode
3012<a name="step_18-Results"></a><h1>Results</h1>
3013
3014
3015
3016Running the program takes a good while if one uses debug mode; it takes about
3017eleven minutes on my i7 desktop. Fortunately, the version compiled with
3018optimizations is much faster; the program only takes about a minute and a half
3019after recompiling with the command <tt>make release</tt> on the same machine, a
3020much more reasonable time.
3021
3022
3023If run, the program prints the following output, explaining what it is
3024doing during all that time:
3025@verbatim
3026\$ time make run
3027[ 66%] Built target step-18
3028[100%] Run step-18 with Release configuration
3029Timestep 1 at time 1
3030 Cycle 0:
3031 Number of active cells: 3712 (by partition: 3712)
3032 Number of degrees of freedom: 17226 (by partition: 17226)
3033 Assembling system... norm of rhs is 1.88062e+10
3034 Solver converged in 103 iterations.
3035 Updating quadrature point data...
3036 Cycle 1:
3037 Number of active cells: 12812 (by partition: 12812)
3038 Number of degrees of freedom: 51738 (by partition: 51738)
3039 Assembling system... norm of rhs is 1.86145e+10
3040 Solver converged in 121 iterations.
3041 Updating quadrature point data...
3042 Moving mesh...
3043
3044Timestep 2 at time 2
3045 Assembling system... norm of rhs is 1.84169e+10
3046 Solver converged in 122 iterations.
3047 Updating quadrature point data...
3048 Moving mesh...
3049
3050Timestep 3 at time 3
3051 Assembling system... norm of rhs is 1.82355e+10
3052 Solver converged in 122 iterations.
3053 Updating quadrature point data...
3054 Moving mesh...
3055
3056Timestep 4 at time 4
3057 Assembling system... norm of rhs is 1.80728e+10
3058 Solver converged in 117 iterations.
3059 Updating quadrature point data...
3060 Moving mesh...
3061
3062Timestep 5 at time 5
3063 Assembling system... norm of rhs is 1.79318e+10
3064 Solver converged in 116 iterations.
3065 Updating quadrature point data...
3066 Moving mesh...
3067
3068Timestep 6 at time 6
3069 Assembling system... norm of rhs is 1.78171e+10
3070 Solver converged in 115 iterations.
3071 Updating quadrature point data...
3072 Moving mesh...
3073
3074Timestep 7 at time 7
3075 Assembling system... norm of rhs is 1.7737e+10
3076 Solver converged in 112 iterations.
3077 Updating quadrature point data...
3078 Moving mesh...
3079
3080Timestep 8 at time 8
3081 Assembling system... norm of rhs is 1.77127e+10
3082 Solver converged in 111 iterations.
3083 Updating quadrature point data...
3084 Moving mesh...
3085
3086Timestep 9 at time 9
3087 Assembling system... norm of rhs is 1.78207e+10
3088 Solver converged in 113 iterations.
3089 Updating quadrature point data...
3090 Moving mesh...
3091
3092Timestep 10 at time 10
3093 Assembling system... norm of rhs is 1.83544e+10
3094 Solver converged in 115 iterations.
3095 Updating quadrature point data...
3096 Moving mesh...
3097
3098[100%] Built target run
3099make run 176.82s user 0.15s system 198% cpu 1:28.94 total
3100@endverbatim
3101In other words, it is computing on 12,000 cells and with some 52,000
3102unknowns. Not a whole lot, but enough for a coupled three-dimensional
3103problem to keep a computer busy for a while. At the end of the day,
3104this is what we have for output:
3105@verbatim
3106\$ ls -l *vtu *visit
3107-rw-r--r-- 1 drwells users 1706059 Feb 13 19:36 solution-0010.000.vtu
3108-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0010.pvtu
3109-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0010.visit
3110-rw-r--r-- 1 drwells users 1707907 Feb 13 19:36 solution-0009.000.vtu
3111-rw-r--r-- 1 drwells users 761 Feb 13 19:36 solution-0009.pvtu
3112-rw-r--r-- 1 drwells users 33 Feb 13 19:36 solution-0009.visit
3113-rw-r--r-- 1 drwells users 1703771 Feb 13 19:35 solution-0008.000.vtu
3114-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0008.pvtu
3115-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0008.visit
3116-rw-r--r-- 1 drwells users 1693671 Feb 13 19:35 solution-0007.000.vtu
3117-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0007.pvtu
3118-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0007.visit
3119-rw-r--r-- 1 drwells users 1681847 Feb 13 19:35 solution-0006.000.vtu
3120-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0006.pvtu
3121-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0006.visit
3122-rw-r--r-- 1 drwells users 1670115 Feb 13 19:35 solution-0005.000.vtu
3123-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0005.pvtu
3124-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0005.visit
3125-rw-r--r-- 1 drwells users 1658559 Feb 13 19:35 solution-0004.000.vtu
3126-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0004.pvtu
3127-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0004.visit
3128-rw-r--r-- 1 drwells users 1639983 Feb 13 19:35 solution-0003.000.vtu
3129-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0003.pvtu
3130-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0003.visit
3131-rw-r--r-- 1 drwells users 1625851 Feb 13 19:35 solution-0002.000.vtu
3132-rw-r--r-- 1 drwells users 761 Feb 13 19:35 solution-0002.pvtu
3133-rw-r--r-- 1 drwells users 33 Feb 13 19:35 solution-0002.visit
3134-rw-r--r-- 1 drwells users 1616035 Feb 13 19:34 solution-0001.000.vtu
3135-rw-r--r-- 1 drwells users 761 Feb 13 19:34 solution-0001.pvtu
3136-rw-r--r-- 1 drwells users 33 Feb 13 19:34 solution-0001.visit
3137@endverbatim
3138
3139
3140If we visualize these files with VisIt or Paraview, we get to see the full picture
3141of the disaster our forced compression wreaks on the cylinder (colors in the
3142images encode the norm of the stress in the material):
3143
3144
3145<div class="threecolumn" style="width: 80%">
3146 <div class="parent">
3147 <div class="img" align="center">
3148 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0002.0000.png"
3149 alt="Time = 2"
3150 width="400">
3151 </div>
3152 <div class="text" align="center">
3153 Time = 2
3154 </div>
3155 </div>
3156 <div class="parent">
3157 <div class="img" align="center">
3158 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0005.0000.png"
3159 alt="Time = 5"
3160 width="400">
3161 </div>
3162 <div class="text" align="center">
3163 Time = 5
3164 </div>
3165 </div>
3166 <div class="parent">
3167 <div class="img" align="center">
3168 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0007.0000.png"
3169 alt="Time = 7"
3170 width="400">
3171 </div>
3172 <div class="text" align="center">
3173 Time = 7
3174 </div>
3175 </div>
3176</div>
3177
3178
3179<div class="threecolumn" style="width: 80%">
3180 <div class="parent">
3181 <div class="img" align="center">
3182 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0008.0000.png"
3183 alt="Time = 8"
3184 width="400">
3185 </div>
3186 <div class="text" align="center">
3187 Time = 8
3188 </div>
3189 </div>
3190 <div class="parent">
3191 <div class="img" align="center">
3192 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0009.0000.png"
3193 alt="Time = 9"
3194 width="400">
3195 </div>
3196 <div class="text" align="center">
3197 Time = 9
3198 </div>
3199 </div>
3200 <div class="parent">
3201 <div class="img" align="center">
3202 <img src="https://www.dealii.org/images/steps/developer/step-18.sequential-0010.0000.png"
3203 alt="Time = 10"
3204 width="400">
3205 </div>
3206 <div class="text" align="center">
3207 Time = 10
3208 </div>
3209 </div>
3210</div>
3211
3212
3213As is clearly visible, as we keep compressing the cylinder, it starts
3214to bow out near the fully constrained bottom surface and, after about eight
3215time units, buckle in an azimuthally symmetric manner.
3216
3217
3218Although the result appears plausible for the symmetric geometry and loading,
3219it is yet to be established whether or not the computation is fully converged.
3220In order to see whether it is, we ran the program again with one more global
3221refinement at the beginning and with the time step halved. This would have
3222taken a very long time on a single machine, so we used a proper workstation and
3223ran it on 16 processors in parallel. The beginning of the output now looks like
3224this:
3225@verbatim
3226Timestep 1 at time 0.5
3227 Cycle 0:
3228 Number of active cells: 29696 (by partition: 1808+1802+1894+1881+1870+1840+1884+1810+1876+1818+1870+1884+1854+1903+1816+1886)
3229 Number of degrees of freedom: 113100 (by partition: 6936+6930+7305+7116+7326+6869+7331+6786+7193+6829+7093+7162+6920+7280+6843+7181)
3230 Assembling system... norm of rhs is 1.10765e+10
3231 Solver converged in 209 iterations.
3232 Updating quadrature point data...
3233 Cycle 1:
3234 Number of active cells: 102034 (by partition: 6387+6202+6421+6341+6408+6201+6428+6428+6385+6294+6506+6244+6417+6527+6299+6546)
3235 Number of degrees of freedom: 359337 (by partition: 23255+21308+24774+24019+22304+21415+22430+22184+22298+21796+22396+21592+22325+22553+21977+22711)
3236 Assembling system... norm of rhs is 1.35759e+10
3237 Solver converged in 268 iterations.
3238 Updating quadrature point data...
3239 Moving mesh...
3240
3241Timestep 2 at time 1
3242 Assembling system... norm of rhs is 1.34674e+10
3243 Solver converged in 267 iterations.
3244 Updating quadrature point data...
3245 Moving mesh...
3246
3247Timestep 3 at time 1.5
3248 Assembling system... norm of rhs is 1.33607e+10
3249 Solver converged in 265 iterations.
3250 Updating quadrature point data...
3251 Moving mesh...
3252
3253Timestep 4 at time 2
3254 Assembling system... norm of rhs is 1.32558e+10
3255 Solver converged in 263 iterations.
3256 Updating quadrature point data...
3257 Moving mesh...
3258
3259[...]
3260
3261Timestep 20 at time 10
3262 Assembling system... norm of rhs is 1.47755e+10
3263 Solver converged in 425 iterations.
3264 Updating quadrature point data...
3265 Moving mesh...
3266@endverbatim
3267That's quite a good number of unknowns, given that we are in 3d. The output of
3268this program are 16 files for each time step:
3269@verbatim
3270\$ ls -l solution-0001*
3271-rw-r--r-- 1 wellsd2 user 761065 Feb 13 21:09 solution-0001.000.vtu
3272-rw-r--r-- 1 wellsd2 user 759277 Feb 13 21:09 solution-0001.001.vtu
3273-rw-r--r-- 1 wellsd2 user 761217 Feb 13 21:09 solution-0001.002.vtu
3274-rw-r--r-- 1 wellsd2 user 761605 Feb 13 21:09 solution-0001.003.vtu
3275-rw-r--r-- 1 wellsd2 user 756917 Feb 13 21:09 solution-0001.004.vtu
3276-rw-r--r-- 1 wellsd2 user 752669 Feb 13 21:09 solution-0001.005.vtu
3277-rw-r--r-- 1 wellsd2 user 735217 Feb 13 21:09 solution-0001.006.vtu
3278-rw-r--r-- 1 wellsd2 user 750065 Feb 13 21:09 solution-0001.007.vtu
3279-rw-r--r-- 1 wellsd2 user 760273 Feb 13 21:09 solution-0001.008.vtu
3280-rw-r--r-- 1 wellsd2 user 777265 Feb 13 21:09 solution-0001.009.vtu
3281-rw-r--r-- 1 wellsd2 user 772469 Feb 13 21:09 solution-0001.010.vtu
3282-rw-r--r-- 1 wellsd2 user 760833 Feb 13 21:09 solution-0001.011.vtu
3283-rw-r--r-- 1 wellsd2 user 782241 Feb 13 21:09 solution-0001.012.vtu
3284-rw-r--r-- 1 wellsd2 user 748905 Feb 13 21:09 solution-0001.013.vtu
3285-rw-r--r-- 1 wellsd2 user 738413 Feb 13 21:09 solution-0001.014.vtu
3286-rw-r--r-- 1 wellsd2 user 762133 Feb 13 21:09 solution-0001.015.vtu
3287-rw-r--r-- 1 wellsd2 user 1421 Feb 13 21:09 solution-0001.pvtu
3288-rw-r--r-- 1 wellsd2 user 364 Feb 13 21:09 solution-0001.visit
3289@endverbatim
3290
3291
3292Here are first the mesh on which we compute as well as the partitioning
3293for the 16 processors:
3294
3295
3296<div class="twocolumn" style="width: 80%">
3297 <div class="parent">
3298 <div class="img" align="center">
3299 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-000mesh.png"
3300 alt="Discretization"
3301 width="400">
3302 </div>
3303 <div class="text" align="center">
3304 Discretization
3305 </div>
3306 </div>
3307 <div class="parent">
3308 <div class="img" align="center">
3309 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.p.png"
3310 alt="Parallel partitioning"
3311 width="400">
3312 </div>
3313 <div class="text" align="center">
3314 Parallel partitioning
3315 </div>
3316 </div>
3317</div>
3318
3319
3320Finally, here is the same output as we have shown before for the much smaller
3321sequential case:
3322
3323<div class="threecolumn" style="width: 80%">
3324 <div class="parent">
3325 <div class="img" align="center">
3326 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0002.s.png"
3327 alt="Time = 2"
3328 width="400">
3329 </div>
3330 <div class="text" align="center">
3331 Time = 2
3332 </div>
3333 </div>
3334 <div class="parent">
3335 <div class="img" align="center">
3336 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0005.s.png"
3337 alt="Time = 5"
3338 width="400">
3339 </div>
3340 <div class="text" align="center">
3341 Time = 5
3342 </div>
3343 </div>
3344 <div class="parent">
3345 <div class="img" align="center">
3346 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0007.s.png"
3347 alt="Time = 7"
3348 width="400">
3349 </div>
3350 <div class="text" align="center">
3351 Time = 7
3352 </div>
3353 </div>
3354</div>
3355
3356
3357<div class="threecolumn" style="width: 80%">
3358 <div class="parent">
3359 <div class="img" align="center">
3360 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0008.s.png"
3361 alt="Time = 8"
3362 width="400">
3363 </div>
3364 <div class="text" align="center">
3365 Time = 8
3366 </div>
3367 </div>
3368 <div class="parent">
3369 <div class="img" align="center">
3370 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0009.s.png"
3371 alt="Time = 9"
3372 width="400">
3373 </div>
3374 <div class="text" align="center">
3375 Time = 9
3376 </div>
3377 </div>
3378 <div class="parent">
3379 <div class="img" align="center">
3380 <img src="https://www.dealii.org/images/steps/developer/step-18.parallel-0010.s.png"
3381 alt="Time = 10"
3382 width="400">
3383 </div>
3384 <div class="text" align="center">
3385 Time = 10
3386 </div>
3387 </div>
3388</div>
3389
3390
3391As before, we observe that at high axial compression the cylinder begins
3392to buckle, but this time ultimately collapses on itself. In contrast to our
3393first run, towards the end of the simulation the deflection pattern becomes
3394nonsymmetric (the central bulge deflects laterally). The model clearly does not
3395provide for this (all our forces and boundary deflections are symmetric) but the
3396effect is probably physically correct anyway: in reality, small inhomogeneities
3397in the body's material properties would lead it to buckle to one side
3398to evade the forcing; in numerical simulations, small perturbations
3399such as numerical round-off or an inexact solution of a linear system
3400by an iterative solver could have the same effect. Another typical source for
3401asymmetries in adaptive computations is that only a certain fraction of cells
3402is refined in each step, which may lead to asymmetric meshes even if the
3403original coarse mesh was symmetric.
3404
3405
3406If one compares this with the previous run, the results both qualitatively
3407and quantitatively different. The previous computation was
3408therefore certainly not converged, though we can't say for sure anything about
3409the present one. One would need an even finer computation to find out. However,
3410the point may be moot: looking at the last picture in detail, it is pretty
3411obvious that not only is the linear small deformation model we chose completely
3412inadequate, but for a realistic simulation we would also need to make sure that
3413the body does not intersect itself during deformation (if we continued
3414compressing the cylinder we would observe some self-intersection).
3415Without such a formulation we cannot expect anything to make physical sense,
3416even if it produces nice pictures!
3417
3418
3419<a name="step_18-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
3420
3421
3422The program as is does not really solve an equation that has many applications
3423in practice: quasi-static material deformation based on a purely elastic law
3424is almost boring. However, the program may serve as the starting point for
3425more interesting experiments, and that indeed was the initial motivation for
3426writing it. Here are some suggestions of what the program is missing and in
3427what direction it may be extended:
3428
3429<a name="step_18-Plasticitymodels"></a><h5>Plasticity models</h5>
3430
3431
3432 The most obvious extension is to use a more
3433realistic material model for large-scale quasistatic deformation. The natural
3434choice for this would be plasticity, in which a nonlinear relationship between
3435stress and strain replaces equation <a href="#step_18.stress-strain">[stress-strain]</a>. Plasticity
3436models are usually rather complicated to program since the stress-strain
3437dependence is generally non-smooth. The material can be thought of being able
3438to withstand only a maximal stress (the yield stress) after which it starts to
3439&ldquo;flow&rdquo;. A mathematical description to this can be given in the form of a
3440variational inequality, which alternatively can be treated as minimizing the
3441elastic energy
3442@f[
3443 E(\mathbf{u}) =
3444 (\varepsilon(\mathbf{u}), C\varepsilon(\mathbf{u}))_{\Omega}
3445 - (\mathbf{f}, \mathbf{u})_{\Omega} - (\mathbf{b}, \mathbf{u})_{\Gamma_N},
3446@f]
3447subject to the constraint
3448@f[
3449 f(\sigma(\mathbf{u})) \le 0
3450@f]
3451on the stress. This extension makes the problem to be solved in each time step
3452nonlinear, so we need another loop within each time step.
3453
3454Without going into further details of this model, we refer to the excellent
3455book by Simo and Hughes on &ldquo;Computational Inelasticity&rdquo; for a
3456comprehensive overview of computational strategies for solving plastic
3457models. Alternatively, a brief but concise description of an algorithm for
3458plasticity is given in an article by S. Commend, A. Truty, and Th. Zimmermann;
3459@cite CTZ04.
3460
3461
3462<a name="step_18-Stabilizationissues"></a><h5>Stabilization issues</h5>
3463
3464
3465The formulation we have chosen, i.e. using
3466piecewise (bi-, tri-)linear elements for all components of the displacement
3467vector, and treating the stress as a variable dependent on the displacement is
3468appropriate for most materials. However, this so-called displacement-based
3469formulation becomes unstable and exhibits spurious modes for incompressible or
3470nearly-incompressible materials. While fluids are usually not elastic (in most
3471cases, the stress depends on velocity gradients, not displacement gradients,
3472although there are exceptions such as electro-rheologic fluids), there are a
3473few solids that are nearly incompressible, for example rubber. Another case is
3474that many plasticity models ultimately let the material become incompressible,
3475although this is outside the scope of the present program.
3476
3477Incompressibility is characterized by Poisson's ratio
3478@f[
3479 \nu = \frac{\lambda}{2(\lambda+\mu)},
3480@f]
3481where @f$\lambda,\mu@f$ are the Lam&eacute; constants of the material.
3482Physical constraints indicate that @f$-1\le \nu\le \frac 12@f$ (the condition
3483also follows from mathematical stability considerations). If @f$\nu@f$
3484approaches @f$\frac 12@f$, then the material becomes incompressible. In that
3485case, pure displacement-based formulations are no longer appropriate for the
3486solution of such problems, and stabilization techniques have to be employed
3487for a stable and accurate solution. The book and paper cited above give
3488indications as to how to do this, but there is also a large volume of
3489literature on this subject; a good start to get an overview of the topic can
3490be found in the references of the paper by H.-Y. Duan and Q. Lin; @cite DL05.
3491
3492
3493<a name="step_18-Refinementduringtimesteps"></a><h5>Refinement during timesteps</h5>
3494
3495
3496In the present form, the program
3497only refines the initial mesh a number of times, but then never again. For any
3498kind of realistic simulation, one would want to extend this so that the mesh
3499is refined and coarsened every few time steps instead. This is not hard to do,
3500in fact, but has been left for future tutorial programs or as an exercise, if
3501you wish.
3502
3503The main complication one has to overcome is that one has to
3504transfer the data that is stored in the quadrature points of the cells of the
3505old mesh to the new mesh, preferably by some sort of projection scheme. The
3506general approach to this would go like this:
3507
3508- At the beginning, the data is only available in the quadrature points of
3509 individual cells, not as a finite element field that is defined everywhere.
3510
3511- So let us find a finite element field that <i>is</i> defined everywhere so
3512 that we can later interpolate it to the quadrature points of the new
3513 mesh. In general, it will be difficult to find a continuous finite element
3514 field that matches the values in the quadrature points exactly because the
3515 number of degrees of freedom of these fields does not match the number of
3516 quadrature points there are, and the nodal values of this global field will
3517 either be over- or underdetermined. But it is usually not very difficult to
3518 find a discontinuous field that matches the values in the quadrature points;
3519 for example, if you have a QGauss(2) quadrature formula (i.e. 4 points per
3520 cell in 2d, 8 points in 3d), then one would use a finite element of kind
3521 FE_DGQ(1), i.e. bi-/tri-linear functions as these have 4 degrees of freedom
3522 per cell in 2d and 8 in 3d.
3523
3524- There are functions that can make this conversion from individual points to
3525 a global field simpler. The following piece of pseudo-code should help if
3526 you use a QGauss(2) quadrature formula. Note that the multiplication by the
3527 projection matrix below takes a vector of scalar components, i.e., we can only
3528 convert one set of scalars at a time from the quadrature points to the degrees
3529 of freedom and vice versa. So we need to store each component of stress separately,
3530 which requires <code>dim*dim</code> vectors. We'll store this set of vectors in a 2D array to
3531 make it easier to read off components in the same way you would the stress tensor.
3532 Thus, we'll loop over the components of stress on each cell and store
3533 these values in the global history field. (The prefix <code>history_</code>
3534 indicates that we work with quantities related to the history variables defined
3535 in the quadrature points.)
3536 @code
3537 FE_DGQ<dim> history_fe (1);
3538 DoFHandler<dim> history_dof_handler (triangulation);
3539 history_dof_handler.distribute_dofs (history_fe);
3540
3541 std::vector< std::vector< Vector<double> > >
3542 history_field (dim, std::vector< Vector<double> >(dim)),
3543 local_history_values_at_qpoints (dim, std::vector< Vector<double> >(dim)),
3544 local_history_fe_values (dim, std::vector< Vector<double> >(dim));
3545
3546 for (unsigned int i=0; i<dim; ++i)
3547 for (unsigned int j=0; j<dim; ++j)
3548 {
3549 history_field[i][j].reinit(history_dof_handler.n_dofs());
3550 local_history_values_at_qpoints[i][j].reinit(quadrature.size());
3551 local_history_fe_values[i][j].reinit(history_fe.n_dofs_per_cell());
3552 }
3553
3554 FullMatrix<double> qpoint_to_dof_matrix (history_fe.dofs_per_cell,
3555 quadrature.size());
3557 (history_fe,
3558 quadrature, quadrature,
3559 qpoint_to_dof_matrix);
3560
3561 typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
3562 endc = dof_handler.end(),
3563 dg_cell = history_dof_handler.begin_active();
3564
3565 for (; cell!=endc; ++cell, ++dg_cell)
3566 {
3567
3568 PointHistory<dim> *local_quadrature_points_history
3569 = reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
3570
3571 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3572 ExcInternalError());
3573 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3574 ExcInternalError());
3575
3576 for (unsigned int i=0; i<dim; ++i)
3577 for (unsigned int j=0; j<dim; ++j)
3578 {
3579 for (unsigned int q=0; q<quadrature.size(); ++q)
3580 local_history_values_at_qpoints[i][j](q)
3581 = local_quadrature_points_history[q].old_stress[i][j];
3582
3583 qpoint_to_dof_matrix.vmult (local_history_fe_values[i][j],
3584 local_history_values_at_qpoints[i][j]);
3585
3586 dg_cell->set_dof_values (local_history_fe_values[i][j],
3587 history_field[i][j]);
3588 }
3589 }
3590 @endcode
3591
3592- Now that we have a global field, we can refine the mesh and transfer the
3593 history_field vector as usual using the SolutionTransfer class. This will
3594 interpolate everything from the old to the new mesh.
3595
3596- In a final step, we have to get the data back from the now interpolated
3597 global field to the quadrature points on the new mesh. The following code
3598 will do that:
3599 @code
3600 FullMatrix<double> dof_to_qpoint_matrix (quadrature.size(),
3601 history_fe.n_dofs_per_cell());
3603 (history_fe,
3604 quadrature,
3605 dof_to_qpoint_matrix);
3606
3607 typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
3608 endc = dof_handler.end(),
3609 dg_cell = history_dof_handler.begin_active();
3610
3611 for (; cell != endc; ++cell, ++dg_cell)
3612 {
3613 PointHistory<dim> *local_quadrature_points_history
3614 = reinterpret_cast<PointHistory<dim> *>(cell->user_pointer());
3615
3616 Assert (local_quadrature_points_history >= &quadrature_point_history.front(),
3617 ExcInternalError());
3618 Assert (local_quadrature_points_history < &quadrature_point_history.back(),
3619 ExcInternalError());
3620
3621 for (unsigned int i=0; i<dim; ++i)
3622 for (unsigned int j=0; j<dim; ++j)
3623 {
3624 dg_cell->get_dof_values (history_field[i][j],
3625 local_history_fe_values[i][j]);
3626
3627 dof_to_qpoint_matrix.vmult (local_history_values_at_qpoints[i][j],
3628 local_history_fe_values[i][j]);
3629
3630 for (unsigned int q=0; q<quadrature.size(); ++q)
3631 local_quadrature_points_history[q].old_stress[i][j]
3632 = local_history_values_at_qpoints[i][j](q);
3633 }
3634 @endcode
3635
3636It becomes a bit more complicated once we run the program in parallel, since
3637then each process only stores this data for the cells it owned on the old
3638mesh. That said, using a parallel vector for <code>history_field</code> will
3639do the trick if you put a call to <code>compress</code> after the transfer
3640from quadrature points into the global vector.
3641
3642
3643<a name="step_18-Ensuringmeshregularity"></a><h5>Ensuring mesh regularity</h5>
3644
3645
3646At present, the program makes no attempt
3647to make sure that a cell, after moving its vertices at the end of the time
3648step, still has a valid geometry (i.e. that its Jacobian determinant is
3649positive and bounded away from zero everywhere). It is, in fact, not very hard
3650to set boundary values and forcing terms in such a way that one gets distorted
3651and inverted cells rather quickly. Certainly, in some cases of large
3652deformation, this is unavoidable with a mesh of finite mesh size, but in some
3653other cases this should be preventable by appropriate mesh refinement and/or a
3654reduction of the time step size. The program does not do that, but a more
3655sophisticated version definitely should employ some sort of heuristic defining
3656what amount of deformation of cells is acceptable, and what isn't.
3657 *
3658 *
3659<a name="step_18-PlainProg"></a>
3660<h1> The plain program</h1>
3661@include "step-18.cc"
3662*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
active_cell_iterator begin_active(const unsigned int level=0) const
Definition fe_q.h:550
virtual void vector_value_list(const std::vector< Point< dim > > &points, std::vector< Vector< RangeNumberType > > &values) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
Definition point.h:111
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
Point< 3 > center
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4614
Point< 2 > first
Definition grid_out.cc:4613
__global__ void reduction(Number *result, const Number *v, const size_type N)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
const Event initial
Definition event.cc:64
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void compute_interpolation_to_quadrature_points_matrix(const FiniteElement< dim, spacedim > &fe, const Quadrature< dim > &quadrature, FullMatrix< double > &I_q)
void compute_projection_from_quadrature_points_matrix(const FiniteElement< dim, spacedim > &fe, const Quadrature< dim > &lhs_quadrature, const Quadrature< dim > &rhs_quadrature, FullMatrix< double > &X)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void cylinder(Triangulation< dim > &tria, const double radius=1., const double half_length=1.)
void cylinder_shell(Triangulation< dim > &tria, const double length, const double inner_radius, const double outer_radius, const unsigned int n_radial_cells=0, const unsigned int n_axial_cells=0, const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
spacedim const Point< spacedim > & p
Definition grid_tools.h:981
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:980
double volume(const Triangulation< dim, spacedim > &tria)
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void free(T *&pointer)
Definition cuda.h:96
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:128
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
std::string compress(const std::string &input)
Definition utilities.cc:389
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >(0)), const bool project_to_boundary_first=false)
spacedim const DoFHandler< dim, spacedim > const FullMatrix< double > & transfer
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
const InputIterator OutputIterator out
Definition parallel.h:167
const Iterator const std_cxx20::type_identity_t< Iterator > & end
Definition parallel.h:610
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
STL namespace.
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)