deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-44.h
Go to the documentation of this file.
1)
1885 *   {}
1886 *  
1887 * @endcode
1888 *
1889 * Solve for the displacement using a Newton-Raphson method. We break this
1890 * function into the nonlinear loop and the function that solves the
1891 * linearized Newton-Raphson step:
1892 *
1893 * @code
1895 *  
1896 *   std::pair<unsigned int, double>
1898 *  
1899 * @endcode
1900 *
1902 *
1903 * @code
1906 *  
1907 *   void output_results() const;
1908 *  
1909 * @endcode
1910 *
1912 * collection of the parameters used to describe the problem setup...
1913 *
1914 * @code
1915 *   Parameters::AllParameters parameters;
1916 *  
1917 * @endcode
1918 *
1919 * ...the volume of the reference configuration...
1920 *
1921 * @code
1922 *   double vol_reference;
1923 *  
1924 * @endcode
1925 *
1926 * ...and description of the geometry on which the problem is solved:
1927 *
1928 * @code
1930 *  
1931 * @endcode
1932 *
1935 *
1936 * @code
1937 *   Time time;
1938 *   mutable TimerOutput timer;
1939 *  
1940 * @endcode
1941 *
1942 * A storage object for quadrature point information. As opposed to
1943 * @ref step_18 "step-18", deal.II's native quadrature point data manager is employed
1944 * here.
1945 *
1946 * @code
1947 *   CellDataStorage<typename Triangulation<dim>::cell_iterator,
1948 *   PointHistory<dim>>
1949 *   quadrature_point_history;
1950 *  
1951 * @endcode
1952 *
1953 * A description of the finite-element system including the displacement
1954 * polynomial degree, the degree-of-freedom handler, number of DoFs per
1955 * cell and the extractor objects used to retrieve information from the
1956 * solution vectors:
1957 *
1958 * @code
1959 *   const unsigned int degree;
1960 *   const FESystem<dim> fe;
1961 *   DoFHandler<dim> dof_handler;
1962 *   const unsigned int dofs_per_cell;
1963 *  
1964 * @endcode
1965 *
1966 * Description of how the block-system is arranged. There are 3 blocks,
1967 * the first contains a vector DOF @f$\mathbf{u}@f$ while the other two
1968 * describe scalar DOFs, @f$\widetilde{p}@f$ and @f$\widetilde{J}@f$.
1969 *
1970 * @code
1971 *   static constexpr unsigned int n_blocks = 3;
1972 *   static constexpr unsigned int n_components = dim + 2;
1973 *   static constexpr unsigned int first_u_component = 0;
1974 *   static constexpr unsigned int p_component = dim;
1975 *   static constexpr unsigned int J_component = dim + 1;
1976 *  
1977 *   static constexpr FEValuesExtractors::Vector u_fe =
1978 *   FEValuesExtractors::Vector(first_u_component);
1979 *   static constexpr FEValuesExtractors::Scalar p_fe =
1980 *   FEValuesExtractors::Scalar(p_component);
1981 *   static constexpr FEValuesExtractors::Scalar J_fe =
1982 *   FEValuesExtractors::Scalar(J_component);
1983 *  
1984 *   enum
1985 *   {
1986 *   u_dof = 0,
1987 *   p_dof = 1,
1988 *   J_dof = 2
1989 *   };
1990 *  
1991 *   std::vector<types::global_dof_index> dofs_per_block;
1992 *   std::vector<types::global_dof_index> element_indices_u;
1993 *   std::vector<types::global_dof_index> element_indices_p;
1994 *   std::vector<types::global_dof_index> element_indices_J;
1995 *  
1996 * @endcode
1997 *
1998 * Rules for Gauss-quadrature on both the cell and faces. The number of
1999 * quadrature points on both cells and faces is recorded.
2000 *
2001 * @code
2002 *   const QGauss<dim> qf_cell;
2003 *   const QGauss<dim - 1> qf_face;
2004 *   const unsigned int n_q_points;
2005 *   const unsigned int n_q_points_f;
2006 *  
2007 * @endcode
2008 *
2009 * Objects that store the converged solution and right-hand side vectors,
2010 * as well as the tangent matrix. There is an AffineConstraints object used
2011 * to keep track of constraints. We make use of a sparsity pattern
2012 * designed for a block system.
2013 *
2014 * @code
2015 *   AffineConstraints<double> constraints;
2016 *   BlockSparsityPattern sparsity_pattern;
2017 *   BlockSparseMatrix<double> tangent_matrix;
2018 *   BlockVector<double> system_rhs;
2019 *   BlockVector<double> solution_n;
2020 *  
2021 * @endcode
2022 *
2023 * Then define a number of variables to store norms and update norms and
2024 * normalization factors.
2025 *
2026 * @code
2027 *   struct Errors
2028 *   {
2029 *   Errors()
2030 *   : norm(1.0)
2031 *   , u(1.0)
2032 *   , p(1.0)
2033 *   , J(1.0)
2034 *   {}
2035 *  
2036 *   void reset()
2037 *   {
2038 *   norm = 1.0;
2039 *   u = 1.0;
2040 *   p = 1.0;
2041 *   J = 1.0;
2042 *   }
2043 *   void normalize(const Errors &rhs)
2044 *   {
2045 *   if (rhs.norm != 0.0)
2046 *   norm /= rhs.norm;
2047 *   if (rhs.u != 0.0)
2048 *   u /= rhs.u;
2049 *   if (rhs.p != 0.0)
2050 *   p /= rhs.p;
2051 *   if (rhs.J != 0.0)
2052 *   J /= rhs.J;
2053 *   }
2054 *  
2055 *   double norm, u, p, J;
2056 *   };
2057 *  
2058 *   Errors error_residual, error_residual_0, error_residual_norm, error_update,
2059 *   error_update_0, error_update_norm;
2060 *  
2061 * @endcode
2062 *
2063 * Methods to calculate error measures
2064 *
2065 * @code
2066 *   void get_error_residual(Errors &error_residual);
2067 *  
2068 *   void get_error_update(const BlockVector<double> &newton_update,
2069 *   Errors &error_update);
2070 *  
2071 *   std::pair<double, double> get_error_dilation() const;
2072 *  
2073 * @endcode
2074 *
2075 * Compute the volume in the spatial configuration
2076 *
2077 * @code
2078 *   double compute_vol_current() const;
2079 *  
2080 * @endcode
2081 *
2082 * Print information to screen in a pleasing way...
2083 *
2084 * @code
2085 *   static void print_conv_header();
2086 *  
2087 *   void print_conv_footer();
2088 *   };
2089 *  
2090 * @endcode
2091 *
2092 *
2093 * <a name="step_44-ImplementationofthecodeSolidcodeclass"></a>
2094 * <h3>Implementation of the <code>Solid</code> class</h3>
2095 *
2096
2097 *
2098 *
2099 * <a name="step_44-Publicinterface"></a>
2100 * <h4>Public interface</h4>
2101 *
2102
2103 *
2104 * We initialize the Solid class using data extracted from the parameter file.
2105 *
2106 * @code
2107 *   template <int dim>
2108 *   Solid<dim>::Solid(const std::string &input_file)
2109 *   : parameters(input_file)
2110 *   , vol_reference(0.)
2111 *   , triangulation(Triangulation<dim>::maximum_smoothing)
2112 *   , time(parameters.end_time, parameters.delta_t)
2113 *   , timer(std::cout, TimerOutput::summary, TimerOutput::wall_times)
2114 *   , degree(parameters.poly_degree)
2115 *   ,
2116 * @endcode
2117 *
2118 * The Finite Element System is composed of dim continuous displacement
2119 * DOFs, and discontinuous pressure and dilatation DOFs. In an attempt to
2120 * satisfy the Babuska-Brezzi or LBB stability conditions (see Hughes
2121 * (2000)), we set up a @f$Q_n \times DGP_{n-1} \times DGP_{n-1}@f$
2122 * system. @f$Q_2 \times DGP_1 \times DGP_1@f$ elements satisfy this
2123 * condition, while @f$Q_1 \times DGP_0 \times DGP_0@f$ elements do
2124 * not. However, it has been shown that the latter demonstrate good
2125 * convergence characteristics nonetheless.
2126 *
2127 * @code
2128 *   fe(FE_Q<dim>(parameters.poly_degree) ^ dim, // displacement
2129 *   FE_DGP<dim>(parameters.poly_degree - 1), // pressure
2130 *   FE_DGP<dim>(parameters.poly_degree - 1)) // dilatation
2131 *   , dof_handler(triangulation)
2132 *   , dofs_per_cell(fe.n_dofs_per_cell())
2133 *   , dofs_per_block(n_blocks)
2134 *   , qf_cell(parameters.quad_order)
2135 *   , qf_face(parameters.quad_order)
2136 *   , n_q_points(qf_cell.size())
2137 *   , n_q_points_f(qf_face.size())
2138 *   {
2139 *   Assert(dim == 2 || dim == 3,
2140 *   ExcMessage("This problem only works in 2 or 3 space dimensions."));
2141 *  
2142 * @endcode
2143 *
2144 * Next we compute some information from the FE system that describes which
2145 * local element DOFs are attached to which block component. This is used
2146 * later to extract sub-blocks from the global matrix.
2147 *
2148
2149 *
2150 * In essence, all we need is for the FESystem object to indicate to which
2151 * block component a DOF is attached. We can do that via the
2152 * FiniteElement::shape_function_belongs_to() function.
2153 *
2154 * @code
2155 *   for (unsigned int k = 0; k < fe.n_dofs_per_cell(); ++k)
2156 *   {
2157 *   if (fe.shape_function_belongs_to(k, u_fe))
2158 *   element_indices_u.push_back(k);
2159 *   else if (fe.shape_function_belongs_to(k, p_fe))
2160 *   element_indices_p.push_back(k);
2161 *   else if (fe.shape_function_belongs_to(k, J_fe))
2162 *   element_indices_J.push_back(k);
2163 *   else
2164 *   DEAL_II_ASSERT_UNREACHABLE();
2165 *   }
2166 *   }
2167 *  
2168 *  
2169 * @endcode
2170 *
2171 * In solving the quasi-static problem, the time becomes a loading parameter,
2172 * i.e. we increasing the loading linearly with time, making the two concepts
2173 * interchangeable. We choose to increment time linearly using a constant time
2174 * step size.
2175 *
2176
2177 *
2178 * We start the function with preprocessing, setting the initial dilatation
2179 * values, and then output the initial grid before starting the simulation
2180 * proper with the first time (and loading)
2181 * increment.
2182 *
2183
2184 *
2185 * Care must be taken (or at least some thought given) when imposing the
2186 * constraint @f$\widetilde{J}=1@f$ on the initial solution field. The constraint
2187 * corresponds to the determinant of the deformation gradient in the
2188 * undeformed configuration, which is the identity tensor. We use
2189 * FE_DGP bases to interpolate the dilatation field, thus we can't
2191 * coefficients of a truncated Legendre polynomial.
2192 * Thus we use the VectorTools::project function to do the work for us.
2193 * The VectorTools::project function requires an argument
2194 * indicating the hanging node constraints. We have none in this program
2195 * So we have to create a constraint object. In its original state, constraint
2196 * objects are unsorted, and have to be sorted (using the
2197 * AffineConstraints::close function) before they can be used. Have a look at
2198 * @ref step_21 "step-21" for more information. We only need to enforce the initial condition
2199 * on the dilatation. In order to do this, we make use of a
2201 * n_components to 1. This is exactly what we want. Have a look at its usage
2202 * in @ref step_20 "step-20" for more information.
2203 *
2204 * @code
2205 *   template <int dim>
2206 *   void Solid<dim>::run()
2207 *   {
2208 *   make_grid();
2209 *   system_setup();
2210 *   {
2211 *   AffineConstraints<double> constraints;
2212 *   constraints.close();
2213 *  
2214 *   const ComponentSelectFunction<dim> J_mask(J_component, n_components);
2215 *  
2217 *   dof_handler, constraints, QGauss<dim>(degree + 2), J_mask, solution_n);
2218 *   }
2219 *   output_results();
2220 *   time.increment();
2221 *  
2222 * @endcode
2223 *
2226 * \varDelta \widetilde{J} \}@f$ and start the loop over the time domain.
2227 *
2228
2229 *
2230 * At the beginning, we reset the solution update for this time step...
2231 *
2232 * @code
2234 *   while (time.current() < time.end())
2235 *   {
2236 *   solution_delta = 0.0;
2237 *  
2238 * @endcode
2239 *
2240 * ...solve the current time step and update total solution vector
2241 * @f$\mathbf{\Xi}_{\textrm{n}} = \mathbf{\Xi}_{\textrm{n-1}} +
2242 * \varDelta \mathbf{\Xi}@f$...
2243 *
2244 * @code
2247 *  
2248 * @endcode
2249 *
2250 * ...and plot the results before moving on happily to the next time
2251 * step:
2252 *
2253 * @code
2254 *   output_results();
2255 *   time.increment();
2256 *   }
2257 *   }
2258 *  
2259 *  
2260 * @endcode
2261 *
2262 *
2263 * <a name="step_44-Privateinterface"></a>
2264 * <h3>Private interface</h3>
2265 *
2266
2267 *
2268 *
2269 * <a name="step_44-Threadingbuildingblocksstructures"></a>
2270 * <h4>Threading-building-blocks structures</h4>
2271 *
2272
2273 *
2276 * computationally intensive distributed tasks as possible. In particular, we
2277 * assemble the tangent matrix and right hand side vector, the static
2278 * condensation contributions, and update data stored at the quadrature points
2279 * using TBB. Our main tool for this is the WorkStream class (see the @ref
2280 * threads topic for more information).
2281 *
2282
2283 *
2284 * Firstly we deal with the tangent matrix and right-hand side assembly
2285 * structures. The PerTaskData object stores local contributions to the global
2286 * system.
2287 *
2288 * @code
2291 *   {
2292 *   FullMatrix<double> cell_matrix;
2294 *   std::vector<types::global_dof_index> local_dof_indices;
2295 *  
2296 *   PerTaskData_ASM(const unsigned int dofs_per_cell)
2297 *   : cell_matrix(dofs_per_cell, dofs_per_cell)
2298 *   , cell_rhs(dofs_per_cell)
2299 *   , local_dof_indices(dofs_per_cell)
2300 *   {}
2301 *  
2302 *   void reset()
2303 *   {
2304 *   cell_matrix = 0.0;
2305 *   cell_rhs = 0.0;
2306 *   }
2307 *   };
2308 *  
2309 *  
2310 * @endcode
2311 *
2312 * On the other hand, the ScratchData object stores the larger objects such as
2313 * the shape-function values array (<code>Nx</code>) and a shape function
2315 * assembly.
2316 *
2317 * @code
2318 *   template <int dim>
2319 *   struct Solid<dim>::ScratchData_ASM
2320 *   {
2321 *   FEValues<dim> fe_values;
2322 *   FEFaceValues<dim> fe_face_values;
2323 *  
2324 *   std::vector<std::vector<double>> Nx;
2325 *   std::vector<std::vector<Tensor<2, dim>>> grad_Nx;
2326 *   std::vector<std::vector<SymmetricTensor<2, dim>>> symm_grad_Nx;
2327 *  
2329 *   const QGauss<dim> &qf_cell,
2330 *   const UpdateFlags uf_cell,
2331 *   const QGauss<dim - 1> &qf_face,
2332 *   const UpdateFlags uf_face)
2333 *   : fe_values(fe_cell, qf_cell, uf_cell)
2334 *   , fe_face_values(fe_cell, qf_face, uf_face)
2335 *   , Nx(qf_cell.size(), std::vector<double>(fe_cell.n_dofs_per_cell()))
2336 *   , grad_Nx(qf_cell.size(),
2337 *   std::vector<Tensor<2, dim>>(fe_cell.n_dofs_per_cell()))
2338 *   , symm_grad_Nx(qf_cell.size(),
2339 *   std::vector<SymmetricTensor<2, dim>>(
2340 *   fe_cell.n_dofs_per_cell()))
2341 *   {}
2342 *  
2344 *   : fe_values(rhs.fe_values.get_fe(),
2345 *   rhs.fe_values.get_quadrature(),
2346 *   rhs.fe_values.get_update_flags())
2347 *   , fe_face_values(rhs.fe_face_values.get_fe(),
2348 *   rhs.fe_face_values.get_quadrature(),
2349 *   rhs.fe_face_values.get_update_flags())
2350 *   , Nx(rhs.Nx)
2351 *   , grad_Nx(rhs.grad_Nx)
2352 *   , symm_grad_Nx(rhs.symm_grad_Nx)
2353 *   {}
2354 *  
2355 *   void reset()
2356 *   {
2357 *   const unsigned int n_q_points = Nx.size();
2358 *   const unsigned int n_dofs_per_cell = Nx[0].size();
2359 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
2360 *   {
2361 *   AssertDimension(Nx[q_point].size(), n_dofs_per_cell);
2362 *   AssertDimension(grad_Nx[q_point].size(), n_dofs_per_cell);
2363 *   AssertDimension(symm_grad_Nx[q_point].size(), n_dofs_per_cell);
2364 *  
2365 *   for (unsigned int k = 0; k < n_dofs_per_cell; ++k)
2366 *   {
2367 *   Nx[q_point][k] = 0.0;
2368 *   grad_Nx[q_point][k] = 0.0;
2369 *   symm_grad_Nx[q_point][k] = 0.0;
2370 *   }
2371 *   }
2372 *   }
2373 *   };
2374 *  
2375 *  
2376 * @endcode
2377 *
2380 * We do the condensation at the element level as the @f$\widetilde{p}@f$ and
2381 * @f$\widetilde{J}@f$ fields are element-wise discontinuous. As these operations
2382 * are matrix-based, we need to set up a number of matrices to store the local
2383 * contributions from a number of the tangent matrix sub-blocks. We place
2384 * these in the PerTaskData struct.
2385 *
2386
2387 *
2388 * We choose not to reset any data in the <code>reset()</code> function as the
2390 *
2391 * @code
2392 *   template <int dim>
2393 *   struct Solid<dim>::PerTaskData_SC
2394 *   {
2396 *   std::vector<types::global_dof_index> local_dof_indices;
2397 *  
2407 *  
2408 *   PerTaskData_SC(const unsigned int dofs_per_cell,
2409 *   const unsigned int n_u,
2410 *   const unsigned int n_p,
2411 *   const unsigned int n_J)
2412 *   : cell_matrix(dofs_per_cell, dofs_per_cell)
2413 *   , local_dof_indices(dofs_per_cell)
2414 *   , k_orig(dofs_per_cell, dofs_per_cell)
2415 *   , k_pu(n_p, n_u)
2416 *   , k_pJ(n_p, n_J)
2417 *   , k_JJ(n_J, n_J)
2418 *   , k_pJ_inv(n_p, n_J)
2419 *   , k_bbar(n_u, n_u)
2420 *   , A(n_J, n_u)
2421 *   , B(n_J, n_u)
2422 *   , C(n_p, n_u)
2423 *   {}
2424 *  
2425 *   void reset()
2426 *   {}
2427 *   };
2428 *  
2429 *  
2430 * @endcode
2431 *
2432 * The ScratchData object for the operations we wish to perform here is empty
2434 * current implementation of TBB in deal.II. So we create a dummy struct for
2435 * this purpose.
2436 *
2437 * @code
2438 *   template <int dim>
2439 *   struct Solid<dim>::ScratchData_SC
2440 *   {
2441 *   void reset()
2442 *   {}
2443 *   };
2444 *  
2445 *  
2446 * @endcode
2447 *
2448 * And finally we define the structures to assist with updating the quadrature
2450 * PerTaskData object (since there is nothing to store here) but must define
2452 * here -- updating the data on quadrature points -- the operation is purely
2453 * local: the things we do on every cell get consumed on every cell, without
2454 * any global aggregation operation as is usually the case when using the
2455 * WorkStream class. The fact that we still have to define a per-task data
2457 * this operation (we could, in principle simply create a new task using
2459 * it this way anyway.
2461 * quadrature point, requiring varying levels of computational expense, then
2462 * the method used here could be advantageous.
2463 *
2464 * @code
2465 *   template <int dim>
2466 *   struct Solid<dim>::PerTaskData_UQPH
2467 *   {
2468 *   void reset()
2469 *   {}
2470 *   };
2471 *  
2472 *  
2473 * @endcode
2474 *
2475 * The ScratchData object will be used to store an alias for the solution
2476 * vector so that we don't have to copy this large data structure. We then
2477 * define a number of vectors to extract the solution values and gradients at
2478 * the quadrature points.
2479 *
2480 * @code
2481 *   template <int dim>
2482 *   struct Solid<dim>::ScratchData_UQPH
2483 *   {
2484 *   const BlockVector<double> &solution_total;
2485 *  
2486 *   std::vector<Tensor<2, dim>> solution_grads_u_total;
2487 *   std::vector<double> solution_values_p_total;
2488 *   std::vector<double> solution_values_J_total;
2489 *  
2490 *   FEValues<dim> fe_values;
2491 *  
2492 *   ScratchData_UQPH(const FiniteElement<dim> &fe_cell,
2493 *   const QGauss<dim> &qf_cell,
2494 *   const UpdateFlags uf_cell,
2495 *   const BlockVector<double> &solution_total)
2496 *   : solution_total(solution_total)
2497 *   , solution_grads_u_total(qf_cell.size())
2498 *   , solution_values_p_total(qf_cell.size())
2499 *   , solution_values_J_total(qf_cell.size())
2500 *   , fe_values(fe_cell, qf_cell, uf_cell)
2501 *   {}
2502 *  
2503 *   ScratchData_UQPH(const ScratchData_UQPH &rhs)
2504 *   : solution_total(rhs.solution_total)
2505 *   , solution_grads_u_total(rhs.solution_grads_u_total)
2506 *   , solution_values_p_total(rhs.solution_values_p_total)
2507 *   , solution_values_J_total(rhs.solution_values_J_total)
2508 *   , fe_values(rhs.fe_values.get_fe(),
2509 *   rhs.fe_values.get_quadrature(),
2510 *   rhs.fe_values.get_update_flags())
2511 *   {}
2512 *  
2513 *   void reset()
2514 *   {
2515 *   const unsigned int n_q_points = solution_grads_u_total.size();
2516 *   for (unsigned int q = 0; q < n_q_points; ++q)
2517 *   {
2518 *   solution_grads_u_total[q] = 0.0;
2519 *   solution_values_p_total[q] = 0.0;
2520 *   solution_values_J_total[q] = 0.0;
2521 *   }
2522 *   }
2523 *   };
2524 *  
2525 *  
2526 * @endcode
2527 *
2528 *
2529 * <a name="step_44-Solidmake_grid"></a>
2530 * <h4>Solid::make_grid</h4>
2531 *
2532
2533 *
2534 * On to the first of the private member functions. Here we create the
2535 * triangulation of the domain, for which we choose the scaled cube with each
2536 * face given a boundary ID number. The grid must be refined at least once
2537 * for the indentation problem.
2538 *
2539
2540 *
2541 * We then determine the volume of the reference configuration and print it
2542 * for comparison:
2543 *
2544 * @code
2545 *   template <int dim>
2546 *   void Solid<dim>::make_grid()
2547 *   {
2548 *   GridGenerator::hyper_rectangle(
2549 *   triangulation,
2550 *   (dim == 3 ? Point<dim>(0.0, 0.0, 0.0) : Point<dim>(0.0, 0.0)),
2551 *   (dim == 3 ? Point<dim>(1.0, 1.0, 1.0) : Point<dim>(1.0, 1.0)),
2552 *   true);
2553 *   GridTools::scale(parameters.scale, triangulation);
2554 *   triangulation.refine_global(std::max(1U, parameters.global_refinement));
2555 *  
2556 *   vol_reference = GridTools::volume(triangulation);
2557 *   std::cout << "Grid:\n\t Reference volume: " << vol_reference << std::endl;
2558 *  
2559 * @endcode
2560 *
2561 * Since we wish to apply a Neumann BC to a patch on the top surface, we
2562 * must find the cell faces in this part of the domain and mark them with
2563 * a distinct boundary ID number. The faces we are looking for are on the
2564 * +y surface and will get boundary ID 6 (zero through five are already
2565 * used when creating the six faces of the cube domain):
2566 *
2567 * @code
2568 *   for (const auto &cell : triangulation.active_cell_iterators())
2569 *   for (const auto &face : cell->face_iterators())
2570 *   {
2571 *   if (face->at_boundary() == true &&
2572 *   face->center()[1] == 1.0 * parameters.scale)
2573 *   {
2574 *   if (dim == 3)
2575 *   {
2576 *   if (face->center()[0] < 0.5 * parameters.scale &&
2577 *   face->center()[2] < 0.5 * parameters.scale)
2578 *   face->set_boundary_id(6);
2579 *   }
2580 *   else
2581 *   {
2582 *   if (face->center()[0] < 0.5 * parameters.scale)
2583 *   face->set_boundary_id(6);
2584 *   }
2585 *   }
2586 *   }
2587 *   }
2588 *  
2589 *  
2590 * @endcode
2591 *
2592 *
2593 * <a name="step_44-Solidsystem_setup"></a>
2594 * <h4>Solid::system_setup</h4>
2595 *
2596
2597 *
2598 * Next we describe how the FE system is setup. We first determine the number
2599 * of components per block. Since the displacement is a vector component, the
2600 * first dim components belong to it, while the next two describe scalar
2601 * pressure and dilatation DOFs.
2602 *
2603 * @code
2604 *   template <int dim>
2605 *   void Solid<dim>::system_setup()
2606 *   {
2607 *   timer.enter_subsection("Setup system");
2608 *  
2609 *   std::vector<unsigned int> block_component(n_components,
2610 *   u_dof); // Displacement
2611 *   block_component[p_component] = p_dof; // Pressure
2612 *   block_component[J_component] = J_dof; // Dilatation
2613 *  
2614 * @endcode
2615 *
2616 * The DOF handler is then initialized and we renumber the grid in an
2617 * efficient manner. We also record the number of DOFs per block.
2618 *
2619 * @code
2620 *   dof_handler.distribute_dofs(fe);
2621 *   DoFRenumbering::Cuthill_McKee(dof_handler);
2622 *   DoFRenumbering::component_wise(dof_handler, block_component);
2623 *  
2624 *   dofs_per_block =
2625 *   DoFTools::count_dofs_per_fe_block(dof_handler, block_component);
2626 *  
2627 *   std::cout << "Triangulation:"
2628 *   << "\n\t Number of active cells: "
2629 *   << triangulation.n_active_cells()
2630 *   << "\n\t Number of degrees of freedom: " << dof_handler.n_dofs()
2631 *   << std::endl;
2632 *  
2633 * @endcode
2634 *
2635 * Setup the sparsity pattern and tangent matrix
2636 *
2637 * @code
2638 *   tangent_matrix.clear();
2639 *   {
2640 *   BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);
2641 *  
2642 * @endcode
2643 *
2644 * The global system matrix initially has the following structure
2645 * @f{align*}{
2646 * \underbrace{\begin{bmatrix}
2647 * \mathsf{\mathbf{K}}_{uu} & \mathsf{\mathbf{K}}_{u\widetilde{p}} &
2648 * \mathbf{0}
2649 * \\ \mathsf{\mathbf{K}}_{\widetilde{p}u} & \mathbf{0} &
2650 * \mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}
2651 * \\ \mathbf{0} & \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}} &
2652 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
2653 * \end{bmatrix}}_{\mathsf{\mathbf{K}}(\mathbf{\Xi}_{\textrm{i}})}
2654 * \underbrace{\begin{bmatrix}
2655 * d \mathsf{u}
2656 * \\ d \widetilde{\mathsf{\mathbf{p}}}
2657 * \\ d \widetilde{\mathsf{\mathbf{J}}}
2658 * \end{bmatrix}}_{d \mathbf{\Xi}}
2659 * =
2660 * \underbrace{\begin{bmatrix}
2661 * \mathsf{\mathbf{F}}_{u}(\mathbf{u}_{\textrm{i}})
2662 * \\ \mathsf{\mathbf{F}}_{\widetilde{p}}(\widetilde{p}_{\textrm{i}})
2663 * \\ \mathsf{\mathbf{F}}_{\widetilde{J}}(\widetilde{J}_{\textrm{i}})
2664 * \end{bmatrix}}_{ \mathsf{\mathbf{F}}(\mathbf{\Xi}_{\textrm{i}}) } \, .
2665 * @f}
2666 * We optimize the sparsity pattern to reflect this structure
2667 * and prevent unnecessary data creation for the right-diagonal
2668 * block components.
2669 *
2670 * @code
2671 *   Table<2, DoFTools::Coupling> coupling(n_components, n_components);
2672 *   for (unsigned int ii = 0; ii < n_components; ++ii)
2673 *   for (unsigned int jj = 0; jj < n_components; ++jj)
2674 *   if (((ii < p_component) && (jj == J_component)) ||
2675 *   ((ii == J_component) && (jj < p_component)) ||
2676 *   ((ii == p_component) && (jj == p_component)))
2677 *   coupling[ii][jj] = DoFTools::none;
2678 *   else
2679 *   coupling[ii][jj] = DoFTools::always;
2680 *   DoFTools::make_sparsity_pattern(
2681 *   dof_handler, coupling, dsp, constraints, false);
2682 *   sparsity_pattern.copy_from(dsp);
2683 *   }
2684 *  
2685 *   tangent_matrix.reinit(sparsity_pattern);
2686 *  
2687 * @endcode
2688 *
2689 * We then set up storage vectors
2690 *
2691 * @code
2692 *   system_rhs.reinit(dofs_per_block);
2693 *   solution_n.reinit(dofs_per_block);
2694 *  
2695 * @endcode
2696 *
2697 * ...and finally set up the quadrature
2698 * point history:
2699 *
2700 * @code
2701 *   setup_qph();
2702 *  
2703 *   timer.leave_subsection();
2704 *   }
2705 *  
2706 *  
2707 * @endcode
2708 *
2709 *
2710 * <a name="step_44-Solidsetup_qph"></a>
2711 * <h4>Solid::setup_qph</h4>
2712 * The method used to store quadrature information is already described in
2713 * @ref step_18 "step-18". Here we implement a similar setup for a SMP machine.
2714 *
2715
2716 *
2717 * Firstly the actual QPH data objects are created. This must be done only
2718 * once the grid is refined to its finest level.
2719 *
2720 * @code
2721 *   template <int dim>
2722 *   void Solid<dim>::setup_qph()
2723 *   {
2724 *   std::cout << " Setting up quadrature point data..." << std::endl;
2725 *  
2726 *   quadrature_point_history.initialize(triangulation.begin_active(),
2727 *   triangulation.end(),
2728 *   n_q_points);
2729 *  
2730 * @endcode
2731 *
2732 * Next we set up the initial quadrature point data.
2733 * Note that when the quadrature point data is retrieved,
2734 * it is returned as a vector of smart pointers.
2735 *
2736 * @code
2737 *   for (const auto &cell : triangulation.active_cell_iterators())
2738 *   {
2739 *   const std::vector<std::shared_ptr<PointHistory<dim>>> lqph =
2740 *   quadrature_point_history.get_data(cell);
2741 *   AssertDimension(lqph.size(), n_q_points);
2742 *  
2743 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
2744 *   lqph[q_point]->setup_lqp(parameters);
2745 *   }
2746 *   }
2747 *  
2748 * @endcode
2749 *
2750 *
2751 * <a name="step_44-Solidupdate_qph_incremental"></a>
2752 * <h4>Solid::update_qph_incremental</h4>
2753 * As the update of QP information occurs frequently and involves a number of
2754 * expensive operations, we define a multithreaded approach to distributing
2755 * the task across a number of CPU cores.
2756 *
2757
2758 *
2759 * To start this, we first we need to obtain the total solution as it stands
2760 * at this Newton increment and then create the initial copy of the scratch
2761 * and copy data objects:
2762 *
2763 * @code
2764 *   template <int dim>
2765 *   void
2766 *   Solid<dim>::update_qph_incremental(const BlockVector<double> &solution_delta)
2767 *   {
2768 *   timer.enter_subsection("Update QPH data");
2769 *   std::cout << " UQPH " << std::flush;
2770 *  
2771 *   const BlockVector<double> solution_total(
2772 *   get_total_solution(solution_delta));
2773 *  
2774 *   const UpdateFlags uf_UQPH(update_values | update_gradients);
2775 *   PerTaskData_UQPH per_task_data_UQPH;
2776 *   ScratchData_UQPH scratch_data_UQPH(fe, qf_cell, uf_UQPH, solution_total);
2777 *  
2778 * @endcode
2779 *
2780 * We then pass them and the one-cell update function to the WorkStream to
2781 * be processed:
2782 *
2783 * @code
2784 *   WorkStream::run(dof_handler.active_cell_iterators(),
2785 *   *this,
2786 *   &Solid::update_qph_incremental_one_cell,
2787 *   &Solid::copy_local_to_global_UQPH,
2788 *   scratch_data_UQPH,
2789 *   per_task_data_UQPH);
2790 *  
2791 *   timer.leave_subsection();
2792 *   }
2793 *  
2794 *  
2795 * @endcode
2796 *
2797 * Now we describe how we extract data from the solution vector and pass it
2798 * along to each QP storage object for processing.
2799 *
2800 * @code
2801 *   template <int dim>
2802 *   void Solid<dim>::update_qph_incremental_one_cell(
2803 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
2804 *   ScratchData_UQPH &scratch,
2805 *   PerTaskData_UQPH & /*data*/)
2806 *   {
2807 *   const std::vector<std::shared_ptr<PointHistory<dim>>> lqph =
2808 *   quadrature_point_history.get_data(cell);
2809 *   AssertDimension(lqph.size(), n_q_points);
2810 *  
2811 *   AssertDimension(scratch.solution_grads_u_total.size(), n_q_points);
2812 *   AssertDimension(scratch.solution_values_p_total.size(), n_q_points);
2813 *   AssertDimension(scratch.solution_values_J_total.size(), n_q_points);
2814 *  
2815 *   scratch.reset();
2816 *  
2817 * @endcode
2818 *
2819 * We first need to find the values and gradients at quadrature points
2820 * inside the current cell and then we update each local QP using the
2821 * displacement gradient and total pressure and dilatation solution
2822 * values:
2823 *
2824 * @code
2825 *   scratch.fe_values.reinit(cell);
2826 *   scratch.fe_values[u_fe].get_function_gradients(
2827 *   scratch.solution_total, scratch.solution_grads_u_total);
2828 *   scratch.fe_values[p_fe].get_function_values(
2829 *   scratch.solution_total, scratch.solution_values_p_total);
2830 *   scratch.fe_values[J_fe].get_function_values(
2831 *   scratch.solution_total, scratch.solution_values_J_total);
2832 *  
2833 *   for (const unsigned int q_point :
2834 *   scratch.fe_values.quadrature_point_indices())
2835 *   lqph[q_point]->update_values(scratch.solution_grads_u_total[q_point],
2836 *   scratch.solution_values_p_total[q_point],
2837 *   scratch.solution_values_J_total[q_point]);
2838 *   }
2839 *  
2840 *  
2841 * @endcode
2842 *
2843 *
2844 * <a name="step_44-Solidsolve_nonlinear_timestep"></a>
2845 * <h4>Solid::solve_nonlinear_timestep</h4>
2846 *
2847
2848 *
2849 * The next function is the driver method for the Newton-Raphson scheme. At
2850 * its top we create a new vector to store the current Newton update step,
2851 * reset the error storage objects and print solver header.
2852 *
2853 * @code
2854 *   template <int dim>
2855 *   void Solid<dim>::solve_nonlinear_timestep(BlockVector<double> &solution_delta)
2856 *   {
2857 *   std::cout << std::endl
2858 *   << "Timestep " << time.get_timestep() << " @ " << time.current()
2859 *   << 's' << std::endl;
2860 *  
2861 *   BlockVector<double> newton_update(dofs_per_block);
2862 *  
2863 *   error_residual.reset();
2864 *   error_residual_0.reset();
2865 *   error_residual_norm.reset();
2866 *   error_update.reset();
2867 *   error_update_0.reset();
2868 *   error_update_norm.reset();
2869 *  
2870 *   print_conv_header();
2871 *  
2872 * @endcode
2873 *
2874 * We now perform a number of Newton iterations to iteratively solve the
2875 * nonlinear problem. Since the problem is fully nonlinear and we are
2876 * using a full Newton method, the data stored in the tangent matrix and
2877 * right-hand side vector is not reusable and must be cleared at each
2878 * Newton step. We then initially build the linear system and
2879 * check for convergence (and store this value in the first iteration).
2880 * The unconstrained DOFs of the rhs vector hold the out-of-balance
2881 * forces, and collectively determine whether or not the equilibrium
2882 * solution has been attained.
2883 *
2884
2885 *
2886 * Although for this particular problem we could potentially construct the
2887 * RHS vector before assembling the system matrix, for the sake of
2888 * extensibility we choose not to do so. The benefit to assembling the RHS
2889 * vector and system matrix separately is that the latter is an expensive
2890 * operation and we can potentially avoid an extra assembly process by not
2891 * assembling the tangent matrix when convergence is attained. However, this
2892 * makes parallelizing the code using MPI more difficult. Furthermore, when
2893 * extending the problem to the transient case additional contributions to
2894 * the RHS may result from the time discretization and application of
2895 * constraints for the velocity and acceleration fields.
2896 *
2897 * @code
2898 *   unsigned int newton_iteration = 0;
2899 *   for (; newton_iteration < parameters.max_iterations_NR; ++newton_iteration)
2900 *   {
2901 *   std::cout << ' ' << std::setw(2) << newton_iteration << ' '
2902 *   << std::flush;
2903 *  
2904 * @endcode
2905 *
2906 * We construct the linear system, but hold off on solving it
2907 * (a step that should be significantly more expensive than assembly):
2908 *
2909 * @code
2910 *   make_constraints(newton_iteration);
2911 *   assemble_system();
2912 *  
2913 * @endcode
2914 *
2915 * We can now determine the normalized residual error and check for
2916 * solution convergence:
2917 *
2918 * @code
2919 *   get_error_residual(error_residual);
2920 *   if (newton_iteration == 0)
2921 *   error_residual_0 = error_residual;
2922 *  
2923 *   error_residual_norm = error_residual;
2924 *   error_residual_norm.normalize(error_residual_0);
2925 *  
2926 *   if (newton_iteration > 0 && error_update_norm.u <= parameters.tol_u &&
2927 *   error_residual_norm.u <= parameters.tol_f)
2928 *   {
2929 *   std::cout << " CONVERGED! " << std::endl;
2930 *   print_conv_footer();
2931 *  
2932 *   break;
2933 *   }
2934 *  
2935 * @endcode
2936 *
2937 * If we have decided that we want to continue with the iteration, we
2938 * solve the linearized system:
2939 *
2940 * @code
2941 *   const std::pair<unsigned int, double> lin_solver_output =
2942 *   solve_linear_system(newton_update);
2943 *  
2944 * @endcode
2945 *
2946 * We can now determine the normalized Newton update error:
2947 *
2948 * @code
2949 *   get_error_update(newton_update, error_update);
2950 *   if (newton_iteration == 0)
2951 *   error_update_0 = error_update;
2952 *  
2953 *   error_update_norm = error_update;
2954 *   error_update_norm.normalize(error_update_0);
2955 *  
2956 * @endcode
2957 *
2958 * Lastly, since we implicitly accept the solution step we can perform
2959 * the actual update of the solution increment for the current time
2960 * step, update all quadrature point information pertaining to
2961 * this new displacement and stress state and continue iterating:
2962 *
2963 * @code
2964 *   solution_delta += newton_update;
2965 *   update_qph_incremental(solution_delta);
2966 *  
2967 *   std::cout << " | " << std::fixed << std::setprecision(3) << std::setw(7)
2968 *   << std::scientific << lin_solver_output.first << " "
2969 *   << lin_solver_output.second << " "
2970 *   << error_residual_norm.norm << " " << error_residual_norm.u
2971 *   << " " << error_residual_norm.p << " "
2972 *   << error_residual_norm.J << " " << error_update_norm.norm
2973 *   << " " << error_update_norm.u << " " << error_update_norm.p
2974 *   << " " << error_update_norm.J << " " << std::endl;
2975 *   }
2976 *  
2977 * @endcode
2978 *
2979 * At the end, if it turns out that we have in fact done more iterations
2980 * than the parameter file allowed, we raise an exception that can be
2981 * caught in the main() function. The call <code>AssertThrow(condition,
2982 * exc_object)</code> is in essence equivalent to <code>if (!cond) throw
2983 * exc_object;</code> but the former form fills certain fields in the
2984 * exception object that identify the location (filename and line number)
2985 * where the exception was raised to make it simpler to identify where the
2986 * problem happened.
2987 *
2988 * @code
2989 *   AssertThrow(newton_iteration < parameters.max_iterations_NR,
2990 *   ExcMessage("No convergence in nonlinear solver!"));
2991 *   }
2992 *  
2993 *  
2994 * @endcode
2995 *
2996 *
2997 * <a name="step_44-Solidprint_conv_headerandSolidprint_conv_footer"></a>
2998 * <h4>Solid::print_conv_header and Solid::print_conv_footer</h4>
2999 *
3000
3001 *
3002 * This program prints out data in a nice table that is updated
3003 * on a per-iteration basis. The next two functions set up the table
3004 * header and footer:
3005 *
3006 * @code
3007 *   template <int dim>
3008 *   void Solid<dim>::print_conv_header()
3009 *   {
3010 *   static const unsigned int l_width = 150;
3011 *  
3012 *   for (unsigned int i = 0; i < l_width; ++i)
3013 *   std::cout << '_';
3014 *   std::cout << std::endl;
3015 *  
3016 *   std::cout << " SOLVER STEP "
3017 *   << " | LIN_IT LIN_RES RES_NORM "
3018 *   << " RES_U RES_P RES_J NU_NORM "
3019 *   << " NU_U NU_P NU_J " << std::endl;
3020 *  
3021 *   for (unsigned int i = 0; i < l_width; ++i)
3022 *   std::cout << '_';
3023 *   std::cout << std::endl;
3024 *   }
3025 *  
3026 *  
3027 *  
3028 *   template <int dim>
3029 *   void Solid<dim>::print_conv_footer()
3030 *   {
3031 *   static const unsigned int l_width = 150;
3032 *  
3033 *   for (unsigned int i = 0; i < l_width; ++i)
3034 *   std::cout << '_';
3035 *   std::cout << std::endl;
3036 *  
3037 *   const std::pair<double, double> error_dil = get_error_dilation();
3038 *  
3039 *   std::cout << "Relative errors:" << std::endl
3040 *   << "Displacement:\t" << error_update.u / error_update_0.u
3041 *   << std::endl
3042 *   << "Force: \t\t" << error_residual.u / error_residual_0.u
3043 *   << std::endl
3044 *   << "Dilatation:\t" << error_dil.first << std::endl
3045 *   << "v / V_0:\t" << error_dil.second * vol_reference << " / "
3046 *   << vol_reference << " = " << error_dil.second << std::endl;
3047 *   }
3048 *  
3049 *  
3050 * @endcode
3051 *
3052 *
3053 * <a name="step_44-Solidget_error_dilation"></a>
3054 * <h4>Solid::get_error_dilation</h4>
3055 *
3056
3057 *
3058 * Calculate the volume of the domain in the spatial configuration
3059 *
3060 * @code
3061 *   template <int dim>
3062 *   double Solid<dim>::compute_vol_current() const
3063 *   {
3064 *   double vol_current = 0.0;
3065 *  
3066 *   FEValues<dim> fe_values(fe, qf_cell, update_JxW_values);
3067 *  
3068 *   for (const auto &cell : triangulation.active_cell_iterators())
3069 *   {
3070 *   fe_values.reinit(cell);
3071 *  
3072 * @endcode
3073 *
3074 * In contrast to that which was previously called for,
3075 * in this instance the quadrature point data is specifically
3076 * non-modifiable since we will only be accessing data.
3077 * We ensure that the right get_data function is called by
3078 * marking this update function as constant.
3079 *
3080 * @code
3081 *   const std::vector<std::shared_ptr<const PointHistory<dim>>> lqph =
3082 *   quadrature_point_history.get_data(cell);
3083 *   AssertDimension(lqph.size(), n_q_points);
3084 *  
3085 *   for (const unsigned int q_point : fe_values.quadrature_point_indices())
3086 *   {
3087 *   const double det_F_qp = lqph[q_point]->get_det_F();
3088 *   const double JxW = fe_values.JxW(q_point);
3089 *  
3090 *   vol_current += det_F_qp * JxW;
3091 *   }
3092 *   }
3093 *   Assert(vol_current > 0.0, ExcInternalError());
3094 *   return vol_current;
3095 *   }
3096 *  
3097 * @endcode
3098 *
3099 * Calculate how well the dilatation @f$\widetilde{J}@f$ agrees with @f$J
3100 * \dealcoloneq \textrm{det}\ \mathbf{F}@f$ from the @f$L^2@f$ error @f$ \bigl[
3101 * \int_{\Omega_0} {[ J - \widetilde{J}]}^{2}\textrm{d}V \bigr]^{1/2}@f$.
3102 * We also return the ratio of the current volume of the
3103 * domain to the reference volume. This is of interest for incompressible
3104 * media where we want to check how well the isochoric constraint has been
3105 * enforced.
3106 *
3107 * @code
3108 *   template <int dim>
3109 *   std::pair<double, double> Solid<dim>::get_error_dilation() const
3110 *   {
3111 *   double dil_L2_error = 0.0;
3112 *  
3113 *   FEValues<dim> fe_values(fe, qf_cell, update_JxW_values);
3114 *  
3115 *   for (const auto &cell : triangulation.active_cell_iterators())
3116 *   {
3117 *   fe_values.reinit(cell);
3118 *  
3119 *   const std::vector<std::shared_ptr<const PointHistory<dim>>> lqph =
3120 *   quadrature_point_history.get_data(cell);
3121 *   AssertDimension(lqph.size(), n_q_points);
3122 *  
3123 *   for (const unsigned int q_point : fe_values.quadrature_point_indices())
3124 *   {
3125 *   const double det_F_qp = lqph[q_point]->get_det_F();
3126 *   const double J_tilde_qp = lqph[q_point]->get_J_tilde();
3127 *   const double the_error_qp_squared =
3128 *   Utilities::fixed_power<2>((det_F_qp - J_tilde_qp));
3129 *   const double JxW = fe_values.JxW(q_point);
3130 *  
3131 *   dil_L2_error += the_error_qp_squared * JxW;
3132 *   }
3133 *   }
3134 *  
3135 *   return std::make_pair(std::sqrt(dil_L2_error),
3136 *   compute_vol_current() / vol_reference);
3137 *   }
3138 *  
3139 *  
3140 * @endcode
3141 *
3142 *
3143 * <a name="step_44-Solidget_error_residual"></a>
3144 * <h4>Solid::get_error_residual</h4>
3145 *
3146
3147 *
3148 * Determine the true residual error for the problem. That is, determine the
3149 * error in the residual for the unconstrained degrees of freedom. Note that
3150 * to do so, we need to ignore constrained DOFs by setting the residual in
3151 * these vector components to zero.
3152 *
3153 * @code
3154 *   template <int dim>
3155 *   void Solid<dim>::get_error_residual(Errors &error_residual)
3156 *   {
3157 *   BlockVector<double> error_res(dofs_per_block);
3158 *  
3159 *   for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
3160 *   if (!constraints.is_constrained(i))
3161 *   error_res(i) = system_rhs(i);
3162 *  
3163 *   error_residual.norm = error_res.l2_norm();
3164 *   error_residual.u = error_res.block(u_dof).l2_norm();
3165 *   error_residual.p = error_res.block(p_dof).l2_norm();
3166 *   error_residual.J = error_res.block(J_dof).l2_norm();
3167 *   }
3168 *  
3169 *  
3170 * @endcode
3171 *
3172 *
3173 * <a name="step_44-Solidget_error_update"></a>
3174 * <h4>Solid::get_error_update</h4>
3175 *
3176
3177 *
3178 * Determine the true Newton update error for the problem
3179 *
3180 * @code
3181 *   template <int dim>
3182 *   void Solid<dim>::get_error_update(const BlockVector<double> &newton_update,
3183 *   Errors &error_update)
3184 *   {
3185 *   BlockVector<double> error_ud(dofs_per_block);
3186 *   for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
3187 *   if (!constraints.is_constrained(i))
3188 *   error_ud(i) = newton_update(i);
3189 *  
3190 *   error_update.norm = error_ud.l2_norm();
3191 *   error_update.u = error_ud.block(u_dof).l2_norm();
3192 *   error_update.p = error_ud.block(p_dof).l2_norm();
3193 *   error_update.J = error_ud.block(J_dof).l2_norm();
3194 *   }
3195 *  
3196 *  
3197 *  
3198 * @endcode
3199 *
3200 *
3201 * <a name="step_44-Solidget_total_solution"></a>
3202 * <h4>Solid::get_total_solution</h4>
3203 *
3204
3205 *
3206 * This function provides the total solution, which is valid at any Newton
3207 * step. This is required as, to reduce computational error, the total
3208 * solution is only updated at the end of the timestep.
3209 *
3210 * @code
3211 *   template <int dim>
3212 *   BlockVector<double> Solid<dim>::get_total_solution(
3213 *   const BlockVector<double> &solution_delta) const
3214 *   {
3215 *   BlockVector<double> solution_total(solution_n);
3216 *   solution_total += solution_delta;
3217 *   return solution_total;
3218 *   }
3219 *  
3220 *  
3221 * @endcode
3222 *
3223 *
3224 * <a name="step_44-Solidassemble_system"></a>
3225 * <h4>Solid::assemble_system</h4>
3226 *
3227
3228 *
3229 * Since we use TBB for assembly, we simply setup a copy of the
3230 * data structures required for the process and pass them, along
3231 * with the assembly functions to the WorkStream object for processing. Note
3232 * that we must ensure that the matrix and RHS vector are reset before any
3233 * assembly operations can occur. Furthermore, since we are describing a
3234 * problem with Neumann BCs, we will need the face normals and so must specify
3235 * this in the face update flags.
3236 *
3237 * @code
3238 *   template <int dim>
3239 *   void Solid<dim>::assemble_system()
3240 *   {
3241 *   timer.enter_subsection("Assemble system");
3242 *   std::cout << " ASM_SYS " << std::flush;
3243 *  
3244 *   tangent_matrix = 0.0;
3245 *   system_rhs = 0.0;
3246 *  
3247 *   const UpdateFlags uf_cell(update_values | update_gradients |
3248 *   update_JxW_values);
3249 *   const UpdateFlags uf_face(update_values | update_normal_vectors |
3250 *   update_JxW_values);
3251 *  
3252 *   PerTaskData_ASM per_task_data(dofs_per_cell);
3253 *   ScratchData_ASM scratch_data(fe, qf_cell, uf_cell, qf_face, uf_face);
3254 *  
3255 * @endcode
3256 *
3257 * The syntax used here to pass data to the WorkStream class
3258 * is discussed in @ref step_13 "step-13".
3259 *
3260 * @code
3261 *   WorkStream::run(
3262 *   dof_handler.active_cell_iterators(),
3263 *   [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
3264 *   ScratchData_ASM &scratch,
3265 *   PerTaskData_ASM &data) {
3266 *   this->assemble_system_one_cell(cell, scratch, data);
3267 *   },
3268 *   [this](const PerTaskData_ASM &data) {
3269 *   this->constraints.distribute_local_to_global(data.cell_matrix,
3270 *   data.cell_rhs,
3271 *   data.local_dof_indices,
3272 *   tangent_matrix,
3273 *   system_rhs);
3274 *   },
3275 *   scratch_data,
3276 *   per_task_data);
3277 *  
3278 *   timer.leave_subsection();
3279 *   }
3280 *  
3281 * @endcode
3282 *
3283 * Of course, we still have to define how we assemble the tangent matrix
3284 * contribution for a single cell. We first need to reset and initialize some
3285 * of the scratch data structures and retrieve some basic information
3286 * regarding the DOF numbering on this cell. We can precalculate the cell
3287 * shape function values and gradients. Note that the shape function gradients
3288 * are defined with regard to the current configuration. That is
3289 * @f$\textrm{grad}\ \boldsymbol{\varphi} = \textrm{Grad}\ \boldsymbol{\varphi}
3290 * \ \mathbf{F}^{-1}@f$.
3291 *
3292 * @code
3293 *   template <int dim>
3294 *   void Solid<dim>::assemble_system_one_cell(
3295 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
3296 *   ScratchData_ASM &scratch,
3297 *   PerTaskData_ASM &data) const
3298 *   {
3299 *   data.reset();
3300 *   scratch.reset();
3301 *   scratch.fe_values.reinit(cell);
3302 *   cell->get_dof_indices(data.local_dof_indices);
3303 *  
3304 *   const std::vector<std::shared_ptr<const PointHistory<dim>>> lqph =
3305 *   quadrature_point_history.get_data(cell);
3306 *   AssertDimension(lqph.size(), n_q_points);
3307 *  
3308 *   for (const unsigned int q_point :
3309 *   scratch.fe_values.quadrature_point_indices())
3310 *   {
3311 *   const Tensor<2, dim> F_inv = lqph[q_point]->get_F_inv();
3312 *   for (const unsigned int k : scratch.fe_values.dof_indices())
3313 *   {
3314 *   const unsigned int k_group = fe.system_to_base_index(k).first.first;
3315 *  
3316 *   if (k_group == u_dof)
3317 *   {
3318 *   scratch.grad_Nx[q_point][k] =
3319 *   scratch.fe_values[u_fe].gradient(k, q_point) * F_inv;
3320 *   scratch.symm_grad_Nx[q_point][k] =
3321 *   symmetrize(scratch.grad_Nx[q_point][k]);
3322 *   }
3323 *   else if (k_group == p_dof)
3324 *   scratch.Nx[q_point][k] =
3325 *   scratch.fe_values[p_fe].value(k, q_point);
3326 *   else if (k_group == J_dof)
3327 *   scratch.Nx[q_point][k] =
3328 *   scratch.fe_values[J_fe].value(k, q_point);
3329 *   else
3330 *   DEAL_II_ASSERT_UNREACHABLE();
3331 *   }
3332 *   }
3333 *  
3334 * @endcode
3335 *
3336 * Now we build the local cell @ref GlossStiffnessMatrix "stiffness matrix" and RHS vector. Since the
3337 * global and local system matrices are symmetric, we can exploit this
3338 * property by building only the lower half of the local matrix and copying
3339 * the values to the upper half. So we only assemble half of the
3340 * @f$\mathsf{\mathbf{k}}_{uu}@f$, @f$\mathsf{\mathbf{k}}_{\widetilde{p}
3341 * \widetilde{p}} = \mathbf{0}@f$, @f$\mathsf{\mathbf{k}}_{\widetilde{J}
3342 * \widetilde{J}}@f$ blocks, while the whole
3343 * @f$\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}@f$,
3344 * @f$\mathsf{\mathbf{k}}_{u \widetilde{J}} = \mathbf{0}@f$,
3345 * @f$\mathsf{\mathbf{k}}_{u \widetilde{p}}@f$ blocks are built.
3346 *
3347
3348 *
3349 * In doing so, we first extract some configuration dependent variables
3350 * from our quadrature history objects for the current quadrature point.
3351 *
3352 * @code
3353 *   for (const unsigned int q_point :
3354 *   scratch.fe_values.quadrature_point_indices())
3355 *   {
3356 *   const SymmetricTensor<2, dim> tau = lqph[q_point]->get_tau();
3357 *   const Tensor<2, dim> tau_ns = lqph[q_point]->get_tau();
3358 *   const SymmetricTensor<4, dim> Jc = lqph[q_point]->get_Jc();
3359 *   const double det_F = lqph[q_point]->get_det_F();
3360 *   const double p_tilde = lqph[q_point]->get_p_tilde();
3361 *   const double J_tilde = lqph[q_point]->get_J_tilde();
3362 *   const double dPsi_vol_dJ = lqph[q_point]->get_dPsi_vol_dJ();
3363 *   const double d2Psi_vol_dJ2 = lqph[q_point]->get_d2Psi_vol_dJ2();
3364 *   const SymmetricTensor<2, dim> &I =
3365 *   Physics::Elasticity::StandardTensors<dim>::I;
3366 *  
3367 * @endcode
3368 *
3369 * These two tensors store some precomputed data. Their use will
3370 * explained shortly.
3371 *
3372 * @code
3373 *   SymmetricTensor<2, dim> symm_grad_Nx_i_x_Jc;
3374 *   Tensor<1, dim> grad_Nx_i_comp_i_x_tau;
3375 *  
3376 * @endcode
3377 *
3378 * Next we define some aliases to make the assembly process easier to
3379 * follow.
3380 *
3381 * @code
3382 *   const std::vector<double> &N = scratch.Nx[q_point];
3383 *   const std::vector<SymmetricTensor<2, dim>> &symm_grad_Nx =
3384 *   scratch.symm_grad_Nx[q_point];
3385 *   const std::vector<Tensor<2, dim>> &grad_Nx = scratch.grad_Nx[q_point];
3386 *   const double JxW = scratch.fe_values.JxW(q_point);
3387 *  
3388 *   for (const unsigned int i : scratch.fe_values.dof_indices())
3389 *   {
3390 *   const unsigned int component_i =
3391 *   fe.system_to_component_index(i).first;
3392 *   const unsigned int i_group = fe.system_to_base_index(i).first.first;
3393 *  
3394 * @endcode
3395 *
3396 * We first compute the contributions
3397 * from the internal forces. Note, by
3398 * definition of the rhs as the negative
3399 * of the residual, these contributions
3400 * are subtracted.
3401 *
3402 * @code
3403 *   if (i_group == u_dof)
3404 *   data.cell_rhs(i) -= (symm_grad_Nx[i] * tau) * JxW;
3405 *   else if (i_group == p_dof)
3406 *   data.cell_rhs(i) -= N[i] * (det_F - J_tilde) * JxW;
3407 *   else if (i_group == J_dof)
3408 *   data.cell_rhs(i) -= N[i] * (dPsi_vol_dJ - p_tilde) * JxW;
3409 *   else
3410 *   DEAL_II_ASSERT_UNREACHABLE();
3411 *  
3412 * @endcode
3413 *
3414 * Before we go into the inner loop, we have one final chance to
3415 * introduce some optimizations. We've already taken into account
3418 * We won't be excessive here, but will rather focus on expensive
3419 * operations, namely those involving the rank-4 material stiffness
3420 * tensor and the rank-2 stress tensor.
3421 *
3422
3423 *
3424 * What we may observe is that both of these tensors are contracted
3425 * with shape function gradients indexed on the "i" DoF. This
3426 * implies that this particular operation remains constant as we
3427 * loop over the "j" DoF. For that reason, we can extract this from
3428 * the inner loop and save the many operations that, for each
3429 * quadrature point and DoF index "i" and repeated over index "j"
3430 * are required to double contract a rank-2 symmetric tensor with a
3431 * rank-4 symmetric tensor, and a rank-1 tensor with a rank-2
3432 * tensor.
3433 *
3434
3435 *
3436 * At the loss of some readability, this small change will reduce
3437 * the assembly time of the symmetrized system by about half when
3438 * using the simulation default parameters, and becomes more
3439 * significant as the h-refinement level increases.
3440 *
3441 * @code
3442 *   if (i_group == u_dof)
3443 *   {
3444 *   symm_grad_Nx_i_x_Jc = symm_grad_Nx[i] * Jc;
3445 *   grad_Nx_i_comp_i_x_tau = grad_Nx[i][component_i] * tau_ns;
3446 *   }
3447 *  
3448 * @endcode
3449 *
3450 * Now we're prepared to compute the tangent matrix contributions:
3451 *
3452 * @code
3453 *   for (const unsigned int j :
3454 *   scratch.fe_values.dof_indices_ending_at(i))
3455 *   {
3456 *   const unsigned int component_j =
3457 *   fe.system_to_component_index(j).first;
3458 *   const unsigned int j_group =
3459 *   fe.system_to_base_index(j).first.first;
3460 *  
3461 * @endcode
3462 *
3463 * This is the @f$\mathsf{\mathbf{k}}_{uu}@f$
3466 * the local matrix diagonals:
3467 *
3468 * @code
3469 *   if ((i_group == u_dof) && (j_group == u_dof)) // UU block
3470 *   {
3471 * @endcode
3472 *
3474 *
3475 * @code
3476 *   data.cell_matrix(i, j) += symm_grad_Nx_i_x_Jc *
3477 *   symm_grad_Nx[j] * JxW;
3478 *  
3479 * @endcode
3480 *
3482 *
3483 * @code
3484 *   if (component_i == component_j)
3485 *   data.cell_matrix(i, j) +=
3487 *   }
3488 * @endcode
3489 *
3490 * Next is the @f$\mathsf{\mathbf{k}}_{ \widetilde{p} u}@f$
3491 * contribution
3492 *
3493 * @code
3494 *   else if ((i_group == p_dof) && (j_group == u_dof)) // PU block
3495 *   {
3496 *   data.cell_matrix(i, j) += N[i] * det_F *
3497 *   (symm_grad_Nx[j] * I) * JxW;
3498 *   }
3499 * @endcode
3500 *
3502 * \widetilde{p}}@f$ and @f$\mathsf{\mathbf{k}}_{ \widetilde{J}
3503 * \widetilde{J}}@f$ contributions:
3504 *
3505 * @code
3506 *   else if ((i_group == J_dof) && (j_group == p_dof)) // JP block
3507 *   data.cell_matrix(i, j) -= N[i] * N[j] * JxW;
3508 *   else if ((i_group == J_dof) && (j_group == J_dof)) // JJ block
3509 *   data.cell_matrix(i, j) += N[i] * d2Psi_vol_dJ2 * N[j] * JxW;
3510 *   else if ((i_group <= J_dof) && (j_group <= J_dof))
3511 *   {
3512 *   /* Nothing to do for the remaining blocks. */
3513 *   }
3514 *   else
3516 *   }
3517 *   }
3518 *   }
3519 *  
3520 * @endcode
3521 *
3523 * cell face exists on a boundary on which a traction is applied and add
3524 * the contribution if this is the case.
3525 *
3526 * @code
3527 *   for (const auto &face : cell->face_iterators())
3528 *   if (face->at_boundary() && face->boundary_id() == 6)
3529 *   {
3530 *   scratch.fe_face_values.reinit(cell, face);
3531 *  
3532 *   for (const unsigned int f_q_point :
3533 *   scratch.fe_face_values.quadrature_point_indices())
3534 *   {
3535 *   const Tensor<1, dim> &N =
3536 *   scratch.fe_face_values.normal_vector(f_q_point);
3537 *  
3538 * @endcode
3539 *
3540 * Using the face normal at this quadrature point we specify the
3541 * traction in reference configuration. For this problem, a
3542 * defined pressure is applied in the reference configuration.
3543 * The direction of the applied traction is assumed not to
3545 * defined using the first Piola-Kirchhoff stress is simply
3546 * @f$\mathbf{t} = \mathbf{P}\mathbf{N} = [p_0 \mathbf{I}]
3547 * \mathbf{N} = p_0 \mathbf{N}@f$ We use the time variable to
3548 * linearly ramp up the pressure load.
3549 *
3550
3551 *
3552 * Note that the contributions to the right hand side vector we
3553 * compute here only exist in the displacement components of the
3554 * vector.
3555 *
3556 * @code
3557 *   static const double p0 =
3558 *   -4.0 / (parameters.scale * parameters.scale);
3559 *   const double time_ramp = (time.current() / time.end());
3560 *   const double pressure = p0 * parameters.p_p0 * time_ramp;
3561 *   const Tensor<1, dim> traction = pressure * N;
3562 *  
3563 *   for (const unsigned int i : scratch.fe_values.dof_indices())
3564 *   {
3565 *   const unsigned int i_group =
3566 *   fe.system_to_base_index(i).first.first;
3567 *  
3568 *   if (i_group == u_dof)
3569 *   {
3570 *   const unsigned int component_i =
3571 *   fe.system_to_component_index(i).first;
3572 *   const double Ni =
3573 *   scratch.fe_face_values.shape_value(i, f_q_point);
3574 *   const double JxW = scratch.fe_face_values.JxW(f_q_point);
3575 *  
3576 *   data.cell_rhs(i) += (Ni * traction[component_i]) * JxW;
3577 *   }
3578 *   }
3579 *   }
3580 *   }
3581 *  
3582 * @endcode
3583 *
3585 * upper half:
3586 *
3587 * @code
3588 *   for (const unsigned int i : scratch.fe_values.dof_indices())
3589 *   for (const unsigned int j :
3590 *   scratch.fe_values.dof_indices_starting_at(i + 1))
3591 *   data.cell_matrix(i, j) = data.cell_matrix(j, i);
3592 *   }
3593 *  
3594 *  
3595 *  
3596 * @endcode
3597 *
3598 *
3599 * <a name="step_44-Solidmake_constraints"></a>
3601 * The constraints for this problem are simple to describe.
3602 * In this particular example, the boundary values will be calculated for
3603 * the two first iterations of Newton's algorithm. In general, one would
3604 * build non-homogeneous constraints in the zeroth iteration (that is, when
3605 * `apply_dirichlet_bc == true` in the code block that follows) and build
3606 * only the corresponding homogeneous constraints in the following step. While
3607 * the current example has only homogeneous constraints, previous experiences
3608 * have shown that a common error is forgetting to add the extra condition
3609 * when refactoring the code to specific uses. This could lead to errors that
3610 * are hard to debug. In this spirit, we choose to make the code more verbose
3611 * in terms of what operations are performed at each Newton step.
3612 *
3613 * @code
3614 *   template <int dim>
3615 *   void Solid<dim>::make_constraints(const unsigned int it_nr)
3616 *   {
3617 * @endcode
3618 *
3619 * Since we (a) are dealing with an iterative Newton method, (b) are using
3620 * an incremental formulation for the displacement, and (c) apply the
3621 * constraints to the incremental displacement field, any non-homogeneous
3622 * constraints on the displacement update should only be specified at the
3623 * zeroth iteration. No subsequent contributions are to be made since the
3624 * constraints will be exactly satisfied after that iteration.
3625 *
3626 * @code
3627 *   const bool apply_dirichlet_bc = (it_nr == 0);
3628 *  
3629 * @endcode
3630 *
3631 * Furthermore, after the first Newton iteration within a timestep, the
3632 * constraints remain the same and we do not need to modify or rebuild them
3633 * so long as we do not clear the @p constraints object.
3634 *
3635 * @code
3636 *   if (it_nr > 1)
3637 *   {
3638 *   std::cout << " --- " << std::flush;
3639 *   return;
3640 *   }
3641 *  
3642 *   std::cout << " CST " << std::flush;
3643 *  
3644 *   if (apply_dirichlet_bc)
3645 *   {
3646 * @endcode
3647 *
3648 * At the zeroth Newton iteration we wish to apply the full set of
3649 * non-homogeneous and homogeneous constraints that represent the
3650 * boundary conditions on the displacement increment. Since in general
3651 * the constraints may be different at each time step, we need to clear
3652 * the constraints matrix and completely rebuild it. An example case
3653 * would be if a surface is accelerating; in such a scenario the change
3654 * in displacement is non-constant between each time step.
3655 *
3656 * @code
3657 *   constraints.clear();
3658 *  
3659 * @endcode
3660 *
3661 * The boundary conditions for the indentation problem in 3d are as
3662 * follows: On the -x, -y and -z faces (IDs 0,2,4) we set up a symmetry
3663 * condition to allow only planar movement while the +x and +z faces
3664 * (IDs 1,5) are traction free. In this contrived problem, part of the
3665 * +y face (ID 3) is set to have no motion in the x- and z-component.
3666 * Finally, as described earlier, the other part of the +y face has an
3667 * the applied pressure but is also constrained in the x- and
3668 * z-directions.
3669 *
3670
3671 *
3672 * In the following, we will have to tell the function interpolation
3673 * boundary values which components of the solution vector should be
3674 * constrained (i.e., whether it's the x-, y-, z-displacements or
3675 * combinations thereof). This is done using ComponentMask objects (see
3676 * @ref GlossComponentMask) which we can get from the finite element if we
3677 * provide it with an extractor object for the component we wish to
3678 * select. To this end we first set up such extractor objects and later
3679 * use it when generating the relevant component masks:
3680 *
3681 * @code
3684 *  
3685 *   {
3686 *   const int boundary_id = 0;
3687 *  
3689 *   dof_handler,
3690 *   boundary_id,
3691 *   Functions::ZeroFunction<dim>(n_components),
3692 *   constraints,
3693 *   fe.component_mask(x_displacement));
3694 *   }
3695 *   {
3696 *   const int boundary_id = 2;
3697 *  
3699 *   dof_handler,
3700 *   boundary_id,
3701 *   Functions::ZeroFunction<dim>(n_components),
3702 *   constraints,
3703 *   fe.component_mask(y_displacement));
3704 *   }
3705 *  
3706 *   if (dim == 3)
3707 *   {
3709 *  
3710 *   {
3711 *   const int boundary_id = 3;
3712 *  
3714 *   dof_handler,
3715 *   boundary_id,
3716 *   Functions::ZeroFunction<dim>(n_components),
3717 *   constraints,
3718 *   (fe.component_mask(x_displacement) |
3719 *   fe.component_mask(z_displacement)));
3720 *   }
3721 *   {
3722 *   const int boundary_id = 4;
3723 *  
3725 *   dof_handler,
3726 *   boundary_id,
3727 *   Functions::ZeroFunction<dim>(n_components),
3728 *   constraints,
3729 *   fe.component_mask(z_displacement));
3730 *   }
3731 *  
3732 *   {
3733 *   const int boundary_id = 6;
3734 *  
3736 *   dof_handler,
3737 *   boundary_id,
3738 *   Functions::ZeroFunction<dim>(n_components),
3739 *   constraints,
3740 *   (fe.component_mask(x_displacement) |
3741 *   fe.component_mask(z_displacement)));
3742 *   }
3743 *   }
3744 *   else
3745 *   {
3746 *   {
3747 *   const int boundary_id = 3;
3748 *  
3750 *   dof_handler,
3751 *   boundary_id,
3752 *   Functions::ZeroFunction<dim>(n_components),
3753 *   constraints,
3754 *   (fe.component_mask(x_displacement)));
3755 *   }
3756 *   {
3757 *   const int boundary_id = 6;
3758 *  
3760 *   dof_handler,
3761 *   boundary_id,
3762 *   Functions::ZeroFunction<dim>(n_components),
3763 *   constraints,
3764 *   (fe.component_mask(x_displacement)));
3765 *   }
3766 *   }
3767 *   }
3768 *   else
3769 *   {
3770 * @endcode
3771 *
3772 * As all Dirichlet constraints are fulfilled exactly after the zeroth
3775 * all non-homogeneous Dirichlet constraints into homogeneous ones.
3776 *
3777
3778 *
3783 * and periodic constraints, which may also introduce some
3785 * objects for the different types of constraints, and merge them
3787 * constructed.
3788 *
3789 * @code
3790 *   if (constraints.has_inhomogeneities())
3791 *   {
3792 * @endcode
3793 *
3794 * Since the affine constraints were finalized at the previous
3797 * modification there. Once we're done, we'll transfer them
3798 * back to the main @p constraints object.
3799 *
3800 * @code
3802 *   for (unsigned int dof = 0; dof != dof_handler.n_dofs(); ++dof)
3803 *   if (homogeneous_constraints.is_inhomogeneously_constrained(dof))
3804 *   homogeneous_constraints.set_inhomogeneity(dof, 0.0);
3805 *  
3806 *   constraints.clear();
3807 *   constraints.copy_from(homogeneous_constraints);
3808 *   }
3809 *   }
3810 *  
3811 *   constraints.close();
3812 *   }
3813 *  
3814 * @endcode
3815 *
3816 *
3817 * <a name="step_44-Solidassemble_sc"></a>
3821 * block, rendering it noninvertible (when using an iterative solver).
3823 * condense them out to form a smaller displacement-only system which
3824 * we will then solve and subsequently post-process to retrieve the
3826 *
3827
3828 *
3829 * The static condensation process could be performed at a global level but we
3830 * need the inverse of one of the blocks. However, since the pressure and
3831 * dilatation variables are discontinuous, the static condensation (SC)
3832 * operation can also be done on a per-cell basis and we can produce the
3833 * inverse of the block-diagonal
3834 * @f$\mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}@f$ block by inverting the
3835 * local blocks. We can again use TBB to do this since each operation will be
3837 *
3838
3839 *
3841 * form
3842 * @f$
3843 * \mathsf{\mathbf{K}}_{\textrm{con}}
3844 * = \bigl[ \mathsf{\mathbf{K}}_{uu} +
3845 * \overline{\overline{\mathsf{\mathbf{K}}}}~ \bigr]
3846 * @f$
3847 * from each element's contributions. These contributions are then added to
3848 * the global stiffness matrix. Given this description, the following two
3849 * functions should be clear:
3850 *
3851 * @code
3852 *   template <int dim>
3853 *   void Solid<dim>::assemble_sc()
3854 *   {
3855 *   timer.enter_subsection("Perform static condensation");
3856 *   std::cout << " ASM_SC " << std::flush;
3857 *  
3858 *   PerTaskData_SC per_task_data(dofs_per_cell,
3859 *   element_indices_u.size(),
3860 *   element_indices_p.size(),
3861 *   element_indices_J.size());
3862 *   ScratchData_SC scratch_data;
3863 *  
3864 *   WorkStream::run(dof_handler.active_cell_iterators(),
3865 *   *this,
3866 *   &Solid::assemble_sc_one_cell,
3867 *   &Solid::copy_local_to_global_sc,
3868 *   scratch_data,
3869 *   per_task_data);
3870 *  
3871 *   timer.leave_subsection();
3872 *   }
3873 *  
3874 *  
3875 *   template <int dim>
3876 *   void Solid<dim>::copy_local_to_global_sc(const PerTaskData_SC &data)
3877 *   {
3878 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
3879 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
3880 *   tangent_matrix.add(data.local_dof_indices[i],
3881 *   data.local_dof_indices[j],
3882 *   data.cell_matrix(i, j));
3883 *   }
3884 *  
3885 *  
3886 * @endcode
3887 *
3888 * Now we describe the static condensation process. As per usual, we must
3889 * first find out which global numbers the degrees of freedom on this cell
3890 * have and reset some data structures:
3891 *
3892 * @code
3893 *   template <int dim>
3894 *   void Solid<dim>::assemble_sc_one_cell(
3895 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
3896 *   ScratchData_SC &scratch,
3897 *   PerTaskData_SC &data)
3898 *   {
3899 *   data.reset();
3900 *   scratch.reset();
3901 *   cell->get_dof_indices(data.local_dof_indices);
3902 *  
3903 * @endcode
3904 *
3905 * We now extract the contribution of the dofs associated with the current
3906 * cell to the global stiffness matrix. The discontinuous nature of the
3907 * @f$\widetilde{p}@f$ and @f$\widetilde{J}@f$ interpolations mean that their is
3908 * no coupling of the local contributions at the global level. This is not
3909 * the case with the @f$\mathbf{u}@f$ dof. In other words,
3910 * @f$\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{p}}@f$,
3911 * @f$\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{p}}@f$ and
3912 * @f$\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{p}}@f$, when extracted
3913 * from the global stiffness matrix are the element contributions. This
3914 * is not the case for @f$\mathsf{\mathbf{k}}_{uu}@f$.
3915 *
3916
3917 *
3918 * Note: A lower-case symbol is used to denote element stiffness matrices.
3919 *
3920
3921 *
3922 * Currently the matrix corresponding to
3923 * the dof associated with the current element
3924 * (denoted somewhat loosely as @f$\mathsf{\mathbf{k}}@f$)
3925 * is of the form:
3926 * @f{align*}{
3927 * \begin{bmatrix}
3928 * \mathsf{\mathbf{k}}_{uu} & \mathsf{\mathbf{k}}_{u\widetilde{p}}
3929 * & \mathbf{0}
3930 * \\ \mathsf{\mathbf{k}}_{\widetilde{p}u} & \mathbf{0} &
3931 * \mathsf{\mathbf{k}}_{\widetilde{p}\widetilde{J}}
3932 * \\ \mathbf{0} & \mathsf{\mathbf{k}}_{\widetilde{J}\widetilde{p}} &
3933 * \mathsf{\mathbf{k}}_{\widetilde{J}\widetilde{J}} \end{bmatrix}
3934 * @f}
3935 *
3936
3937 *
3938 * We now need to modify it such that it appear as
3939 * @f{align*}{
3940 * \begin{bmatrix}
3941 * \mathsf{\mathbf{k}}_{\textrm{con}} &
3942 * \mathsf{\mathbf{k}}_{u\widetilde{p}} & \mathbf{0}
3943 * \\ \mathsf{\mathbf{k}}_{\widetilde{p}u} & \mathbf{0} &
3944 * \mathsf{\mathbf{k}}_{\widetilde{p}\widetilde{J}}^{-1}
3945 * \\ \mathbf{0} & \mathsf{\mathbf{k}}_{\widetilde{J}\widetilde{p}} &
3946 * \mathsf{\mathbf{k}}_{\widetilde{J}\widetilde{J}} \end{bmatrix}
3947 * @f}
3948 * with @f$\mathsf{\mathbf{k}}_{\textrm{con}} = \bigl[
3949 * \mathsf{\mathbf{k}}_{uu} +\overline{\overline{\mathsf{\mathbf{k}}}}~
3950 * \bigr]@f$ where @f$ \overline{\overline{\mathsf{\mathbf{k}}}}
3951 * \dealcoloneq \mathsf{\mathbf{k}}_{u\widetilde{p}}
3952 * \overline{\mathsf{\mathbf{k}}} \mathsf{\mathbf{k}}_{\widetilde{p}u}
3953 * @f$
3954 * and
3955 * @f$
3956 * \overline{\mathsf{\mathbf{k}}} =
3957 * \mathsf{\mathbf{k}}_{\widetilde{J}\widetilde{p}}^{-1}
3958 * \mathsf{\mathbf{k}}_{\widetilde{J}\widetilde{J}}
3959 * \mathsf{\mathbf{k}}_{\widetilde{p}\widetilde{J}}^{-1}
3960 * @f$.
3961 *
3962
3963 *
3964 * At this point, we need to take note of
3965 * the fact that global data already exists
3966 * in the @f$\mathsf{\mathbf{K}}_{uu}@f$,
3967 * @f$\mathsf{\mathbf{K}}_{\widetilde{p} \widetilde{J}}@f$
3968 * and
3969 * @f$\mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{p}}@f$
3970 * sub-blocks. So if we are to modify them, we must account for the data
3971 * that is already there (i.e. simply add to it or remove it if
3972 * necessary). Since the copy_local_to_global operation is a "+="
3973 * operation, we need to take this into account
3974 *
3975
3976 *
3977 * For the @f$\mathsf{\mathbf{K}}_{uu}@f$ block in particular, this means that
3978 * contributions have been added from the surrounding cells, so we need to
3979 * be careful when we manipulate this block. We can't just erase the
3980 * sub-blocks.
3981 *
3982
3983 *
3984 * This is the strategy we will employ to get the sub-blocks we want:
3985 *
3986
3987 *
3988 * - @f$ {\mathsf{\mathbf{k}}}_{\textrm{store}}@f$:
3989 * Since we don't have access to @f$\mathsf{\mathbf{k}}_{uu}@f$,
3990 * but we know its contribution is added to
3991 * the global @f$\mathsf{\mathbf{K}}_{uu}@f$ matrix, we just want
3992 * to add the element wise
3993 * static-condensation @f$\overline{\overline{\mathsf{\mathbf{k}}}}@f$.
3994 *
3995
3996 *
3997 * - @f$\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}@f$:
3998 * Similarly, @f$\mathsf{\mathbf{k}}_{\widetilde{p}
3999 * \widetilde{J}}@f$ exists in
4000 * the subblock. Since the copy
4001 * operation is a += operation, we
4002 * need to subtract the existing
4003 * @f$\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}@f$
4004 * submatrix in addition to
4005 * "adding" that which we wish to
4006 * replace it with.
4007 *
4008
4009 *
4010 * - @f$\mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}}@f$:
4011 * Since the global matrix
4012 * is symmetric, this block is the
4013 * same as the one above and we
4014 * can simply use
4015 * @f$\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}@f$
4016 * as a substitute for this one.
4017 *
4018
4019 *
4020 * We first extract element data from the
4021 * system matrix. So first we get the
4022 * entire subblock for the cell, then
4023 * extract @f$\mathsf{\mathbf{k}}@f$
4024 * for the dofs associated with
4025 * the current element
4026 *
4027 * @code
4028 *   data.k_orig.extract_submatrix_from(tangent_matrix,
4029 *   data.local_dof_indices,
4030 *   data.local_dof_indices);
4031 * @endcode
4032 *
4033 * and next the local matrices for
4034 * @f$\mathsf{\mathbf{k}}_{ \widetilde{p} u}@f$
4035 * @f$\mathsf{\mathbf{k}}_{ \widetilde{p} \widetilde{J}}@f$
4036 * and
4037 * @f$\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{J}}@f$:
4038 *
4039 * @code
4040 *   data.k_pu.extract_submatrix_from(data.k_orig,
4041 *   element_indices_p,
4042 *   element_indices_u);
4043 *   data.k_pJ.extract_submatrix_from(data.k_orig,
4044 *   element_indices_p,
4045 *   element_indices_J);
4046 *   data.k_JJ.extract_submatrix_from(data.k_orig,
4047 *   element_indices_J,
4048 *   element_indices_J);
4049 *  
4050 * @endcode
4051 *
4052 * To get the inverse of @f$\mathsf{\mathbf{k}}_{\widetilde{p}
4053 * \widetilde{J}}@f$, we invert it directly. This operation is relatively
4054 * inexpensive since @f$\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}@f$
4055 * since block-diagonal.
4056 *
4057 * @code
4058 *   data.k_pJ_inv.invert(data.k_pJ);
4059 *  
4060 * @endcode
4061 *
4062 * Now we can make condensation terms to
4063 * add to the @f$\mathsf{\mathbf{k}}_{uu}@f$
4064 * block and put them in
4065 * the cell local matrix
4066 * @f$
4067 * \mathsf{\mathbf{A}}
4068 * =
4069 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}
4070 * \mathsf{\mathbf{k}}_{\widetilde{p} u}
4071 * @f$:
4072 *
4073 * @code
4074 *   data.k_pJ_inv.mmult(data.A, data.k_pu);
4075 * @endcode
4076 *
4077 * @f$
4078 * \mathsf{\mathbf{B}}
4079 * =
4080 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}}
4081 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}
4082 * \mathsf{\mathbf{k}}_{\widetilde{p} u}
4083 * @f$
4084 *
4085 * @code
4086 *   data.k_JJ.mmult(data.B, data.A);
4087 * @endcode
4088 *
4089 * @f$
4090 * \mathsf{\mathbf{C}}
4091 * =
4092 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}}
4093 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}}
4094 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}
4095 * \mathsf{\mathbf{k}}_{\widetilde{p} u}
4096 * @f$
4097 *
4098 * @code
4099 *   data.k_pJ_inv.Tmmult(data.C, data.B);
4100 * @endcode
4101 *
4102 * @f$
4103 * \overline{\overline{\mathsf{\mathbf{k}}}}
4104 * =
4105 * \mathsf{\mathbf{k}}_{u \widetilde{p}}
4106 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}}
4107 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}}
4108 * \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}
4109 * \mathsf{\mathbf{k}}_{\widetilde{p} u}
4110 * @f$
4111 *
4112 * @code
4113 *   data.k_pu.Tmmult(data.k_bbar, data.C);
4114 *   data.k_bbar.scatter_matrix_to(element_indices_u,
4115 *   element_indices_u,
4116 *   data.cell_matrix);
4117 *  
4118 * @endcode
4119 *
4120 * Next we place
4121 * @f$\mathsf{\mathbf{k}}^{-1}_{ \widetilde{p} \widetilde{J}}@f$
4122 * in the
4123 * @f$\mathsf{\mathbf{k}}_{ \widetilde{p} \widetilde{J}}@f$
4124 * block for post-processing. Note again
4125 * that we need to remove the
4126 * contribution that already exists there.
4127 *
4128 * @code
4129 *   data.k_pJ_inv.add(-1.0, data.k_pJ);
4130 *   data.k_pJ_inv.scatter_matrix_to(element_indices_p,
4131 *   element_indices_J,
4132 *   data.cell_matrix);
4133 *   }
4134 *  
4135 * @endcode
4136 *
4137 *
4138 * <a name="step_44-Solidsolve_linear_system"></a>
4139 * <h4>Solid::solve_linear_system</h4>
4140 * We now have all of the necessary components to use one of two possible
4141 * methods to solve the linearised system. The first is to perform static
4142 * condensation on an element level, which requires some alterations
4143 * to the tangent matrix and RHS vector. Alternatively, the full block
4144 * system can be solved by performing condensation on a global level.
4145 * Below we implement both approaches.
4146 *
4147 * @code
4148 *   template <int dim>
4149 *   std::pair<unsigned int, double>
4150 *   Solid<dim>::solve_linear_system(BlockVector<double> &newton_update)
4151 *   {
4152 *   unsigned int lin_it = 0;
4153 *   double lin_res = 0.0;
4154 *  
4155 *   if (parameters.use_static_condensation == true)
4156 *   {
4157 * @endcode
4158 *
4159 * Firstly, here is the approach using the (permanent) augmentation of
4160 * the tangent matrix. For the following, recall that
4161 * @f{align*}{
4162 * \mathsf{\mathbf{K}}_{\textrm{store}}
4163 * \dealcoloneq
4164 * \begin{bmatrix}
4165 * \mathsf{\mathbf{K}}_{\textrm{con}} &
4166 * \mathsf{\mathbf{K}}_{u\widetilde{p}} & \mathbf{0}
4167 * \\ \mathsf{\mathbf{K}}_{\widetilde{p}u} & \mathbf{0} &
4168 * \mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}^{-1}
4169 * \\ \mathbf{0} &
4170 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}} &
4171 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}} \end{bmatrix} \, .
4172 * @f}
4173 * and
4174 * @f{align*}{
4175 * d \widetilde{\mathsf{\mathbf{p}}}
4176 * & =
4177 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}
4178 * \bigl[
4179 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4180 * -
4181 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4182 * d \widetilde{\mathsf{\mathbf{J}}} \bigr]
4183 * \\ d \widetilde{\mathsf{\mathbf{J}}}
4184 * & =
4185 * \mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}^{-1}
4186 * \bigl[
4187 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4188 * - \mathsf{\mathbf{K}}_{\widetilde{p}u} d
4189 * \mathsf{\mathbf{u}} \bigr]
4190 * \\ \Rightarrow d \widetilde{\mathsf{\mathbf{p}}}
4191 * &= \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}
4192 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4193 * -
4194 * \underbrace{\bigl[\mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}
4195 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4196 * \mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}^{-1}\bigr]}_{\overline{\mathsf{\mathbf{K}}}}\bigl[
4197 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4198 * - \mathsf{\mathbf{K}}_{\widetilde{p}u} d
4199 * \mathsf{\mathbf{u}} \bigr]
4200 * @f}
4201 * and thus
4202 * @f[
4203 * \underbrace{\bigl[ \mathsf{\mathbf{K}}_{uu} +
4204 * \overline{\overline{\mathsf{\mathbf{K}}}}~ \bigr]
4205 * }_{\mathsf{\mathbf{K}}_{\textrm{con}}} d
4206 * \mathsf{\mathbf{u}}
4207 * =
4208 * \underbrace{
4209 * \Bigl[
4210 * \mathsf{\mathbf{F}}_{u}
4211 * - \mathsf{\mathbf{K}}_{u\widetilde{p}} \bigl[
4212 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}
4213 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4214 * -
4215 * \overline{\mathsf{\mathbf{K}}}\mathsf{\mathbf{F}}_{\widetilde{p}}
4216 * \bigr]
4217 * \Bigr]}_{\mathsf{\mathbf{F}}_{\textrm{con}}}
4218 * @f]
4219 * where
4220 * @f[
4221 * \overline{\overline{\mathsf{\mathbf{K}}}} \dealcoloneq
4222 * \mathsf{\mathbf{K}}_{u\widetilde{p}}
4223 * \overline{\mathsf{\mathbf{K}}}
4224 * \mathsf{\mathbf{K}}_{\widetilde{p}u} \, .
4225 * @f]
4226 *
4227
4228 *
4229 * At the top, we allocate two temporary vectors to help with the
4230 * static condensation, and variables to store the number of
4231 * linear solver iterations and the (hopefully converged) residual.
4232 *
4233 * @code
4234 *   BlockVector<double> A(dofs_per_block);
4235 *   BlockVector<double> B(dofs_per_block);
4236 *  
4237 *  
4238 * @endcode
4239 *
4240 * In the first step of this function, we solve for the incremental
4241 * displacement @f$d\mathbf{u}@f$. To this end, we perform static
4242 * condensation to make
4243 * @f$\mathsf{\mathbf{K}}_{\textrm{con}}
4244 * = \bigl[ \mathsf{\mathbf{K}}_{uu} +
4245 * \overline{\overline{\mathsf{\mathbf{K}}}}~ \bigr]@f$
4246 * and put
4247 * @f$\mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}@f$
4248 * in the original @f$\mathsf{\mathbf{K}}_{\widetilde{p} \widetilde{J}}@f$
4249 * block. That is, we make @f$\mathsf{\mathbf{K}}_{\textrm{store}}@f$.
4250 *
4251 * @code
4252 *   {
4253 *   assemble_sc();
4254 *  
4255 * @endcode
4256 *
4257 * @f$
4258 * \mathsf{\mathbf{A}}_{\widetilde{J}}
4259 * =
4260 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}
4261 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4262 * @f$
4263 *
4264 * @code
4265 *   tangent_matrix.block(p_dof, J_dof)
4266 *   .vmult(A.block(J_dof), system_rhs.block(p_dof));
4267 * @endcode
4268 *
4269 * @f$
4270 * \mathsf{\mathbf{B}}_{\widetilde{J}}
4271 * =
4272 * \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}}
4273 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}
4274 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4275 * @f$
4276 *
4277 * @code
4278 *   tangent_matrix.block(J_dof, J_dof)
4279 *   .vmult(B.block(J_dof), A.block(J_dof));
4280 * @endcode
4281 *
4282 * @f$
4283 * \mathsf{\mathbf{A}}_{\widetilde{J}}
4284 * =
4285 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4286 * -
4287 * \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}}
4288 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}
4289 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4290 * @f$
4291 *
4292 * @code
4293 *   A.block(J_dof) = system_rhs.block(J_dof);
4294 *   A.block(J_dof) -= B.block(J_dof);
4295 * @endcode
4296 *
4297 * @f$
4298 * \mathsf{\mathbf{A}}_{\widetilde{J}}
4299 * =
4300 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}}
4301 * [
4302 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4303 * -
4304 * \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}}
4305 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}
4306 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4307 * ]
4308 * @f$
4309 *
4310 * @code
4311 *   tangent_matrix.block(p_dof, J_dof)
4312 *   .Tvmult(A.block(p_dof), A.block(J_dof));
4313 * @endcode
4314 *
4315 * @f$
4316 * \mathsf{\mathbf{A}}_{u}
4317 * =
4318 * \mathsf{\mathbf{K}}_{u \widetilde{p}}
4319 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}}
4320 * [
4321 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4322 * -
4323 * \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}}
4324 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}
4325 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4326 * ]
4327 * @f$
4328 *
4329 * @code
4330 *   tangent_matrix.block(u_dof, p_dof)
4331 *   .vmult(A.block(u_dof), A.block(p_dof));
4332 * @endcode
4333 *
4334 * @f$
4335 * \mathsf{\mathbf{F}}_{\text{con}}
4336 * =
4337 * \mathsf{\mathbf{F}}_{u}
4338 * -
4339 * \mathsf{\mathbf{K}}_{u \widetilde{p}}
4340 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}}
4341 * [
4342 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4343 * -
4344 * \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}}
4345 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}}
4346 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4347 * ]
4348 * @f$
4349 *
4350 * @code
4351 *   system_rhs.block(u_dof) -= A.block(u_dof);
4352 *  
4353 *   timer.enter_subsection("Linear solver");
4354 *   std::cout << " SLV " << std::flush;
4355 *   if (parameters.type_lin == "CG")
4356 *   {
4357 *   const auto solver_its = static_cast<unsigned int>(
4358 *   tangent_matrix.block(u_dof, u_dof).m() *
4359 *   parameters.max_iterations_lin);
4360 *   const double tol_sol =
4361 *   parameters.tol_lin * system_rhs.block(u_dof).l2_norm();
4362 *  
4363 *   SolverControl solver_control(solver_its, tol_sol);
4364 *  
4365 *   GrowingVectorMemory<Vector<double>> GVM;
4366 *   SolverCG<Vector<double>> solver_CG(solver_control, GVM);
4367 *  
4368 * @endcode
4369 *
4370 * We've chosen by default a SSOR preconditioner as it appears to
4372 * problem on a single-thread machine. However, this might not be
4373 * true for different problem sizes.
4374 *
4375 * @code
4377 *   preconditioner(parameters.preconditioner_type,
4378 *   parameters.preconditioner_relaxation);
4379 *   preconditioner.use_matrix(tangent_matrix.block(u_dof, u_dof));
4380 *  
4381 *   solver_CG.solve(tangent_matrix.block(u_dof, u_dof),
4382 *   newton_update.block(u_dof),
4383 *   system_rhs.block(u_dof),
4384 *   preconditioner);
4385 *  
4386 *   lin_it = solver_control.last_step();
4387 *   lin_res = solver_control.last_value();
4388 *   }
4389 *   else if (parameters.type_lin == "Direct")
4390 *   {
4391 * @endcode
4392 *
4394 * enough, a direct solver can be
4395 * utilised.
4396 *
4397 * @code
4399 *   A_direct.initialize(tangent_matrix.block(u_dof, u_dof));
4400 *   A_direct.vmult(newton_update.block(u_dof),
4401 *   system_rhs.block(u_dof));
4402 *  
4403 *   lin_it = 1;
4404 *   lin_res = 0.0;
4405 *   }
4406 *   else
4407 *   Assert(false, ExcMessage("Linear solver type not implemented"));
4408 *  
4409 *   timer.leave_subsection();
4410 *   }
4411 *  
4412 * @endcode
4413 *
4414 * Now that we have the displacement update, distribute the constraints
4415 * back to the Newton update:
4416 *
4417 * @code
4418 *   constraints.distribute(newton_update);
4419 *  
4420 *   timer.enter_subsection("Linear solver postprocessing");
4421 *   std::cout << " PP " << std::flush;
4422 *  
4423 * @endcode
4424 *
4425 * The next step after solving the displacement
4426 * problem is to post-process to get the
4427 * dilatation solution from the
4428 * substitution:
4429 * @f$
4430 * d \widetilde{\mathsf{\mathbf{J}}}
4431 * = \mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \bigl[
4432 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4433 * - \mathsf{\mathbf{K}}_{\widetilde{p}u} d \mathsf{\mathbf{u}}
4434 * \bigr]
4435 * @f$
4436 *
4437 * @code
4438 *   {
4439 * @endcode
4440 *
4441 * @f$
4442 * \mathsf{\mathbf{A}}_{\widetilde{p}}
4443 * =
4445 * @f$
4446 *
4447 * @code
4448 *   tangent_matrix.block(p_dof, u_dof)
4449 *   .vmult(A.block(p_dof), newton_update.block(u_dof));
4450 * @endcode
4451 *
4452 * @f$
4453 * \mathsf{\mathbf{A}}_{\widetilde{p}}
4454 * =
4455 * -\mathsf{\mathbf{K}}_{\widetilde{p}u} d \mathsf{\mathbf{u}}
4456 * @f$
4457 *
4458 * @code
4459 *   A.block(p_dof) *= -1.0;
4460 * @endcode
4461 *
4462 * @f$
4463 * \mathsf{\mathbf{A}}_{\widetilde{p}}
4464 * =
4465 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4466 * -\mathsf{\mathbf{K}}_{\widetilde{p}u} d \mathsf{\mathbf{u}}
4467 * @f$
4468 *
4469 * @code
4470 *   A.block(p_dof) += system_rhs.block(p_dof);
4471 * @endcode
4472 *
4473 * @f$
4474 * d\mathsf{\mathbf{\widetilde{J}}}
4475 * =
4476 * \mathsf{\mathbf{K}}^{-1}_{\widetilde{p}\widetilde{J}}
4477 * [
4478 * \mathsf{\mathbf{F}}_{\widetilde{p}}
4479 * -\mathsf{\mathbf{K}}_{\widetilde{p}u} d \mathsf{\mathbf{u}}
4480 * ]
4481 * @f$
4482 *
4483 * @code
4484 *   tangent_matrix.block(p_dof, J_dof)
4485 *   .vmult(newton_update.block(J_dof), A.block(p_dof));
4486 *   }
4487 *  
4488 * @endcode
4489 *
4490 * we ensure here that any Dirichlet constraints
4491 * are distributed on the updated solution:
4492 *
4493 * @code
4494 *   constraints.distribute(newton_update);
4495 *  
4496 * @endcode
4497 *
4498 * Finally we solve for the pressure
4500 * @f$
4501 * d \widetilde{\mathsf{\mathbf{p}}}
4502 * =
4503 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}
4504 * \bigl[
4505 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4506 * - \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4507 * d \widetilde{\mathsf{\mathbf{J}}}
4508 * \bigr]
4509 * @f$
4510 *
4511 * @code
4512 *   {
4513 * @endcode
4514 *
4515 * @f$
4516 * \mathsf{\mathbf{A}}_{\widetilde{J}}
4517 * =
4518 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4519 * d \widetilde{\mathsf{\mathbf{J}}}
4520 * @f$
4521 *
4522 * @code
4523 *   tangent_matrix.block(J_dof, J_dof)
4524 *   .vmult(A.block(J_dof), newton_update.block(J_dof));
4525 * @endcode
4526 *
4527 * @f$
4528 * \mathsf{\mathbf{A}}_{\widetilde{J}}
4529 * =
4530 * -\mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4531 * d \widetilde{\mathsf{\mathbf{J}}}
4532 * @f$
4533 *
4534 * @code
4535 *   A.block(J_dof) *= -1.0;
4536 * @endcode
4537 *
4538 * @f$
4539 * \mathsf{\mathbf{A}}_{\widetilde{J}}
4540 * =
4541 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4542 * -
4543 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4544 * d \widetilde{\mathsf{\mathbf{J}}}
4545 * @f$
4546 *
4547 * @code
4548 *   A.block(J_dof) += system_rhs.block(J_dof);
4549 * @endcode
4550 *
4551 * and finally....
4552 * @f$
4553 * d \widetilde{\mathsf{\mathbf{p}}}
4554 * =
4555 * \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}
4556 * \bigl[
4557 * \mathsf{\mathbf{F}}_{\widetilde{J}}
4558 * - \mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{J}}
4559 * d \widetilde{\mathsf{\mathbf{J}}}
4560 * \bigr]
4561 * @f$
4562 *
4563 * @code
4564 *   tangent_matrix.block(p_dof, J_dof)
4565 *   .Tvmult(newton_update.block(p_dof), A.block(J_dof));
4566 *   }
4567 *  
4568 * @endcode
4569 *
4570 * We are now at the end, so we distribute all
4571 * constrained dofs back to the Newton
4572 * update:
4573 *
4574 * @code
4575 *   constraints.distribute(newton_update);
4576 *  
4577 *   timer.leave_subsection();
4578 *   }
4579 *   else
4580 *   {
4581 *   std::cout << " ------ " << std::flush;
4582 *  
4583 *   timer.enter_subsection("Linear solver");
4584 *   std::cout << " SLV " << std::flush;
4585 *  
4586 *   if (parameters.type_lin == "CG")
4587 *   {
4588 * @endcode
4589 *
4593 * inverse matrix
4594 * @f$\mathsf{\mathbf{K}}_{\widetilde{p}\widetilde{J}}^{-1}@f$, which
4598 * extraction and manipulation of local sub-blocks of the tangent
4600 * matrix-vector operations required to solve for dilatation and
4602 * of steps to the much simpler and transparent implementation using
4604 *
4605
4606 *
4608 * blocks in the RHS vector
4609 *
4610 * @code
4611 *   const Vector<double> &f_u = system_rhs.block(u_dof);
4612 *   const Vector<double> &f_p = system_rhs.block(p_dof);
4613 *   const Vector<double> &f_J = system_rhs.block(J_dof);
4614 *  
4615 * @endcode
4616 *
4617 * ... and for blocks in the Newton update vector.
4618 *
4619 * @code
4623 *  
4624 * @endcode
4625 *
4626 * We next define some linear operators for the tangent matrix
4627 * sub-blocks We will exploit the symmetry of the system, so not all
4628 * blocks are required.
4629 *
4630 * @code
4631 *   const auto K_uu =
4633 *   const auto K_up =
4635 *   const auto K_pu =
4637 *   const auto K_Jp =
4639 *   const auto K_JJ =
4641 *  
4642 * @endcode
4643 *
4645 * (square block)
4646 * @f$\mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}@f$. Since it is
4647 * diagonal (or, when a higher order ansatz it used, nearly
4648 * diagonal), a Jacobi preconditioner is suitable.
4649 *
4650 * @code
4652 *   preconditioner_K_Jp_inv("jacobi");
4653 *   preconditioner_K_Jp_inv.use_matrix(
4654 *   tangent_matrix.block(J_dof, p_dof));
4656 *   static_cast<unsigned int>(tangent_matrix.block(J_dof, p_dof).m() *
4657 *   parameters.max_iterations_lin),
4658 *   1.0e-30,
4659 *   parameters.tol_lin);
4661 *   solver_K_Jp_inv.select("cg");
4663 *   const auto K_Jp_inv =
4665 *  
4666 * @endcode
4667 *
4669 * @f$\mathsf{\mathbf{K}}_{\widetilde{J}\widetilde{p}}^{-1}@f$ and a
4670 * linear operator that represents the condensed operations
4672 * @f$\overline{\overline{\mathsf{\mathbf{K}}}}@f$ and the final
4674 * @f$\mathsf{\mathbf{K}}_{\textrm{con}}@f$.
4678 * linear solution scheme, we will perform these operations
4679 * manually.
4680 *
4681 * @code
4686 *  
4687 * @endcode
4688 *
4690 * matrix, namely @f$\mathsf{\mathbf{K}}_{\textrm{con}}^{-1}@f$. Note
4691 * that the preconditioner for the augmented stiffness matrix is
4692 * different to the case when we use static condensation. In this
4693 * instance, the preconditioner is based on a non-modified
4694 * @f$\mathsf{\mathbf{K}}_{uu}@f$, while with the first approach we
4695 * actually modified the entries of this sub-block. However, since
4696 * @f$\mathsf{\mathbf{K}}_{\textrm{con}}@f$ and
4698 * adequate for this problem.
4699 *
4700 * @code
4702 *   preconditioner_K_con_inv(parameters.preconditioner_type,
4703 *   parameters.preconditioner_relaxation);
4704 *   preconditioner_K_con_inv.use_matrix(
4705 *   tangent_matrix.block(u_dof, u_dof));
4707 *   static_cast<unsigned int>(tangent_matrix.block(u_dof, u_dof).m() *
4708 *   parameters.max_iterations_lin),
4709 *   1.0e-30,
4710 *   parameters.tol_lin);
4712 *   solver_K_con_inv.select("cg");
4714 *   const auto K_uu_con_inv =
4718 *  
4719 * @endcode
4720 *
4721 * Now we are in a position to solve for the displacement field.
4723 * written to the Newton update vector.
4725 * stated in the introduction.
4726 *
4727 * @code
4728 *   d_u =
4729 *   K_uu_con_inv * (f_u - K_up * (K_Jp_inv * f_J - K_pp_bar * f_p));
4730 *  
4731 *   timer.leave_subsection();
4732 *  
4733 * @endcode
4734 *
4735 * The operations need to post-process for the dilatation and
4737 *
4738 * @code
4739 *   timer.enter_subsection("Linear solver postprocessing");
4740 *   std::cout << " PP " << std::flush;
4741 *  
4742 *   d_J = K_pJ_inv * (f_p - K_pu * d_u);
4743 *   d_p = K_Jp_inv * (f_J - K_JJ * d_J);
4744 *  
4745 *   lin_it = solver_control_K_con_inv.last_step();
4746 *   lin_res = solver_control_K_con_inv.last_value();
4747 *   }
4748 *   else if (parameters.type_lin == "Direct")
4749 *   {
4750 * @endcode
4751 *
4752 * Solve the full block system with
4753 * a direct solver. As it is relatively
4756 * @f$\mathsf{\mathbf{K}}_{ \widetilde{J} \widetilde{J}}@f$
4757 * block.
4758 *
4759 * @code
4761 *   A_direct.initialize(tangent_matrix);
4763 *  
4764 *   lin_it = 1;
4765 *   lin_res = 0.0;
4766 *  
4767 *   std::cout << " -- " << std::flush;
4768 *   }
4769 *   else
4770 *   Assert(false, ExcMessage("Linear solver type not implemented"));
4771 *  
4772 *   timer.leave_subsection();
4773 *  
4774 * @endcode
4775 *
4777 * constraints are distributed on the updated solution:
4778 *
4779 * @code
4780 *   constraints.distribute(newton_update);
4781 *   }
4782 *  
4783 *   return std::make_pair(lin_it, lin_res);
4784 *   }
4785 *  
4786 * @endcode
4787 *
4788 *
4789 * <a name="step_44-Solidoutput_results"></a>
4791 * Here we present how the results are written to file to be viewed
4792 * using ParaView or VisIt. The method is similar to that shown in previous
4793 * tutorials so will not be discussed in detail.
4794 *
4795
4796 *
4797 * @note As of 2023, Visit 3.3.3 can still not deal with higher-order cells.
4798 * Rather, it simply reports that there is no data to show. To view the
4799 * results of this program with Visit, you will want to comment out the
4800 * line that sets `output_flags.write_higher_order_cells = true;`. On the
4802 * cells just fine.
4803 *
4804 * @code
4805 *   template <int dim>
4807 *   {
4808 *   DataOut<dim> data_out;
4809 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
4810 *   data_component_interpretation(
4812 *   data_component_interpretation.push_back(
4814 *   data_component_interpretation.push_back(
4816 *  
4817 *   std::vector<std::string> solution_name(dim, "displacement");
4818 *   solution_name.emplace_back("pressure");
4819 *   solution_name.emplace_back("dilatation");
4820 *  
4822 *   output_flags.write_higher_order_cells = true;
4823 *   output_flags.physical_units["displacement"] = "m";
4824 *   data_out.set_flags(output_flags);
4825 *  
4826 *   data_out.attach_dof_handler(dof_handler);
4827 *   data_out.add_data_vector(solution_n,
4828 *   solution_name,
4830 *   data_component_interpretation);
4831 *  
4832 * @endcode
4833 *
4837 * can be achieved without physically moving the grid points in the
4838 * Triangulation object ourselves. We first need to copy the solution to
4839 * a temporary vector and then create the Eulerian mapping. We also
4840 * specify the polynomial degree to the DataOut object in order to produce
4841 * a more refined output data set when higher order polynomials are used.
4842 *
4843 * @code
4844 *   Vector<double> soln(solution_n.size());
4845 *   for (unsigned int i = 0; i < soln.size(); ++i)
4846 *   soln(i) = solution_n(i);
4847 *   const MappingQEulerian<dim> q_mapping(degree, dof_handler, soln);
4848 *   data_out.build_patches(q_mapping, degree);
4849 *  
4850 *   std::ofstream output("solution-" + std::to_string(dim) + "d-" +
4851 *   std::to_string(time.get_timestep()) + ".vtu");
4852 *   data_out.write_vtu(output);
4853 *   }
4854 *  
4855 *   } // namespace Step44
4856 *  
4857 *  
4858 * @endcode
4859 *
4860 *
4861 * <a name="step_44-Mainfunction"></a>
4862 * <h3>Main function</h3>
4865 *
4866 * @code
4867 *   int main()
4868 *   {
4869 *   using namespace Step44;
4870 *  
4871 *   try
4872 *   {
4873 *   const unsigned int dim = 3;
4874 *   Solid<dim> solid("parameters.prm");
4875 *   solid.run();
4876 *   }
4877 *   catch (std::exception &exc)
4878 *   {
4879 *   std::cerr << std::endl
4880 *   << std::endl
4881 *   << "----------------------------------------------------"
4882 *   << std::endl;
4883 *   std::cerr << "Exception on processing: " << std::endl
4884 *   << exc.what() << std::endl
4885 *   << "Aborting!" << std::endl
4886 *   << "----------------------------------------------------"
4887 *   << std::endl;
4888 *  
4889 *   return 1;
4890 *   }
4891 *   catch (...)
4892 *   {
4893 *   std::cerr << std::endl
4894 *   << std::endl
4895 *   << "----------------------------------------------------"
4896 *   << std::endl;
4897 *   std::cerr << "Unknown exception!" << std::endl
4898 *   << "Aborting!" << std::endl
4899 *   << "----------------------------------------------------"
4900 *   << std::endl;
4901 *   return 1;
4902 *   }
4903 *  
4904 *   return 0;
4905 *   }
4906 * @endcode
4907<a name="step_44-Results"></a><h1>Results</h1>
4908
4909
4912
4916Both schemes demonstrate good convergence properties for varying values of the load parameter @f$p/p_0@f$.
4917The results agree with those in the literature.
4919while the higher-order interpolation scheme underestimates it, but be a lesser degree.
4921as it should.
4922
4923<table align="center" class="tutorial" cellspacing="3" cellpadding="3">
4924 <tr>
4925 <td align="center">
4926 <img src="https://www.dealii.org/images/steps/developer/step-44.Q1-P0_convergence.png" alt="">
4927 <p align="center">
4929 </p>
4930 </td>
4931 <td align="center">
4932 <img src="https://www.dealii.org/images/steps/developer/step-44.Q2-P1_convergence.png" alt="">
4933 <p align="center">
4935 </p>
4936 </td>
4937 </tr>
4938</table>
4939
4940
4941A typical screen output generated by running the problem is shown below.
4943It is clear that, using the Newton-Raphson method, quadratic convergence of the solution is obtained.
4944Solution convergence is achieved within 5 Newton increments for all time-steps.
4946
4947@code
4948Grid:
4949 Reference volume: 1e-09
4951 Number of active cells: 64
4952 Number of degrees of freedom: 2699
4953 Setting up quadrature point data...
4954
4955Timestep 1 @ 0.1s
4959 0 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 786 2.118e-06 1.000e+00 1.000e+00 0.000e+00 0.000e+00 1.000e+00 1.000e+00 1.000e+00 1.000e+00
4960 1 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 552 1.031e-03 8.563e-02 8.563e-02 9.200e-13 3.929e-08 1.060e-01 3.816e-02 1.060e-01 1.060e-01
4961 2 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 667 5.602e-06 2.482e-03 2.482e-03 3.373e-15 2.982e-10 2.936e-03 2.053e-04 2.936e-03 2.936e-03
4962 3 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 856 6.469e-10 2.129e-06 2.129e-06 2.245e-19 1.244e-13 1.887e-06 7.289e-07 1.887e-06 1.887e-06
4963 4 ASM_R CONVERGED!
4966Displacement: 7.289e-07
4967Force: 2.451e-10
4968Dilatation: 1.353e-07
4969v / V_0: 1.000e-09 / 1.000e-09 = 1.000e+00
4970
4971
4972[...]
4973
4974Timestep 10 @ 1.000e+00s
4978 0 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 874 2.358e-06 1.000e+00 1.000e+00 1.000e+00 1.000e+00 1.000e+00 1.000e+00 1.000e+00 1.000e+00
4979 1 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 658 2.942e-04 1.544e-01 1.544e-01 1.208e+13 1.855e+06 6.014e-02 7.398e-02 6.014e-02 6.014e-02
4980 2 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 790 2.206e-06 2.908e-03 2.908e-03 7.302e+10 2.067e+03 2.716e-03 1.433e-03 2.716e-03 2.717e-03
4981 3 ASM_R ASM_K CST ASM_SC SLV PP UQPH | 893 2.374e-09 1.919e-06 1.919e-06 4.527e+07 4.100e+00 1.672e-06 6.842e-07 1.672e-06 1.672e-06
4982 4 ASM_R CONVERGED!
4985Displacement: 6.842e-07
4986Force: 8.995e-10
4987Dilatation: 1.528e-06
4988v / V_0: 1.000e-09 / 1.000e-09 = 1.000e+00
4989@endcode
4990
4991
4992
4994For a case with a large number of degrees-of-freedom (i.e. a high level of refinement), a typical output of the Timer is given below.
4996discussed and demonstrated in @ref step_18 "step-18" and others.
4997With over 93% of the time being spent in the linear solver, it is obvious that it may be necessary
5001
5002
5003@code
5004+---------------------------------------------+------------+------------+
5005| Total wallclock time elapsed since start | 9.874e+02s | |
5006| | | |
5007| Section | no. calls | wall time | % of total |
5008+---------------------------------+-----------+------------+------------+
5009| Assemble system right-hand side | 53 | 1.727e+00s | 1.75e-01% |
5010| Assemble tangent matrix | 43 | 2.707e+01s | 2.74e+00% |
5011| Linear solver | 43 | 9.248e+02s | 9.37e+01% |
5012| Linear solver postprocessing | 43 | 2.743e-02s | 2.78e-03% |
5013| Perform static condensation | 43 | 1.437e+01s | 1.46e+00% |
5014| Setup system | 1 | 3.897e-01s | 3.95e-02% |
5015| Update QPH data | 43 | 5.770e-01s | 5.84e-02% |
5016+---------------------------------+-----------+------------+------------+
5017@endcode
5018
5019
5020We then used ParaView to visualize the results for two cases.
5025
5026
5028(the loading ratio is @f$p/p_0=80@f$).
5029Additionally, the pressure difference between elements is very large.
5032even in this poorly discretised case.
5033The final vertical displacement of the tracked node on the top surface of the block is still within 12.5% of the converged solution.
5034The pressure solution is very coarse and has large jumps between adjacent cells.
5036of the domain are in a state of expansion.
5041This stems from the definition of the volumetric strain energy function
5043
5044
5045<table align="center" class="tutorial" cellspacing="3" cellpadding="3">
5046 <tr>
5047 <td align="center">
5048 <img src="https://www.dealii.org/images/steps/developer/step-44.Q1-P0_gr_1_p_ratio_80-displacement.png" alt="">
5049 <p align="center">
5050 Z-displacement solution for the 3-d problem.
5051 </p>
5052 </td>
5053 <td align="center">
5054 <img src="https://www.dealii.org/images/steps/developer/step-44.Q1-P0_gr_1_p_ratio_80-pressure.png" alt="">
5055 <p align="center">
5057 </p>
5058 </td>
5059 <td align="center">
5060 <img src="https://www.dealii.org/images/steps/developer/step-44.Q1-P0_gr_1_p_ratio_80-dilatation.png" alt="">
5061 <p align="center">
5063 </p>
5064 </td>
5065 </tr>
5066</table>
5067
5068Combining spatial refinement and a higher-order interpolation scheme results in a high-quality solution.
5069Three grid refinements coupled with a @f$Q_2-DGPM_1-DGPM_1@f$ formulation produces
5076the solution remains accurate.
5077The pressure field is captured in far greater detail than before.
5080of the pressure at the sub-element scale.
5083
5084
5085
5086<table align="center" class="tutorial" cellspacing="3" cellpadding="3">
5087 <tr>
5088 <td align="center">
5089 <img src="https://www.dealii.org/images/steps/developer/step-44.Q2-P1_gr_3_p_ratio_80-displacement.png" alt="">
5090 <p align="center">
5091 Z-displacement solution for the 3-d problem.
5092 </p>
5093 </td>
5094 <td align="center">
5095 <img src="https://www.dealii.org/images/steps/developer/step-44.Q2-P1_gr_3_p_ratio_80-pressure.png" alt="">
5096 <p align="center">
5098 </p>
5099 </td>
5100 <td align="center">
5101 <img src="https://www.dealii.org/images/steps/developer/step-44.Q2-P1_gr_3_p_ratio_80-dilatation.png" alt="">
5102 <p align="center">
5104 </p>
5105 </td>
5106 </tr>
5107</table>
5108
5114less than 0.01% volume change for a Poisson's ratio of 0.4999.
5115
5116In terms of run-time, the @f$Q_2-DGPM_1-DGPM_1@f$ formulation tends to be more computationally expensive
5117than the @f$Q_1-DGPM_0-DGPM_0@f$ for a similar number of degrees-of-freedom
5118(produced by adding an extra grid refinement level for the lower-order interpolation).
5119This is shown in the graph below for a batch of tests run consecutively on a single 4-core (8-thread) machine.
5120The increase in computational time for the higher-order method is likely due to
5121the increased band-width required for the higher-order elements.
5122As previously mentioned, the use of a better solver and preconditioner may mitigate the
5123expense of using a higher-order formulation.
5124It was observed that for the given problem using the multithreaded Jacobi preconditioner can reduce the
5125computational runtime by up to 72% (for the worst case being a higher-order formulation with a large number
5126of degrees-of-freedom) in comparison to the single-thread SSOR preconditioner.
5127However, it is the author's experience that the Jacobi method of preconditioning may not be suitable for
5129
5130
5131<table align="center" class="tutorial" cellspacing="3" cellpadding="3">
5132 <tr>
5133 <td align="center">
5134 <img src="https://www.dealii.org/images/steps/developer/step-44.Normalised_runtime.png" alt="">
5135 <p align="center">
5136 Runtime on a 4-core machine, normalised against the lowest grid resolution @f$Q_1-DGPM_0-DGPM_0@f$ solution that utilised a SSOR preconditioner.
5137 </p>
5138 </td>
5139 </tr>
5140</table>
5141
5142
5143Lastly, results for the displacement solution for the 2-d problem are showcased below for
5144two different levels of grid refinement.
5147
5148
5149<table align="center" class="tutorial" cellspacing="3" cellpadding="3">
5150 <tr>
5151 <td align="center">
5152 <img src="https://www.dealii.org/images/steps/developer/step-44.2d-gr_2.png" alt="">
5153 <p align="center">
5154 Y-displacement solution in 2-d for 2 global grid refinement levels.
5155 </p>
5156 </td>
5157 <td align="center">
5158 <img src="https://www.dealii.org/images/steps/developer/step-44.2d-gr_5.png" alt="">
5159 <p align="center">
5160 Y-displacement solution in 2-d for 5 global grid refinement levels.
5161 </p>
5162 </td>
5163 </tr>
5164</table>
5165
5166<a name="step-44-extensions"></a>
5167<a name="step_44-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
5168
5169
5170There are a number of obvious extensions for this work:
5171
5173 function in order to enforce a high degree of incompressibility in
5177 Taylor (1991) </em>.
5178- The constitutive relationship used in this
5190 @ref step_40 "step-40". This would mostly involve changes to the setup, assembly,
5191 <code>PointHistory</code> and linear solver routines.
5196 nonlinear problems. It is possible to add a linesearch algorithm to
5197 limit the step size within a Newton increment to ensure optimum
5198 convergence. It may also be necessary to use a load limiting method,
5199 such as the Riks method, to solve unstable problems involving
5204 in the free-energy functional and therefore an addition to the
5208 use active set methods such as the one used in @ref step_41 "step-41".
5210 coded into the linear solver routine. This could also have been
5212 operator to condense out one or more of the fields in a more
5214- Finally, adaptive mesh refinement, as demonstrated in @ref step_6 "step-6" and
5215 @ref step_18 "step-18", could provide additional solution accuracy.
5216 *
5217 *
5218<a name="step_44-PlainProg"></a>
5219<h1> The plain program</h1>
5220@include "step-44.cc"
5221*/
numbers::NumberTraits< Number >::real_type norm() const
constexpr void clear()
friend class Tensor
Definition tensor.h:865
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
Definition timer.h:117
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
unsigned int level
Definition grid_out.cc:4632
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
LinearOperator< Range, Domain, Payload > linear_operator(const OperatorExemplar &, const Matrix &)
LinearOperator< Domain, Range, Payload > transpose_operator(const LinearOperator< Range, Domain, Payload > &op)
LinearOperator< Range_2, Domain_2, Payload > schur_complement(const LinearOperator< Domain_1, Range_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const LinearOperator< Range_2, Domain_1, Payload > &C, const LinearOperator< Range_2, Domain_2, Payload > &D)
LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
UpdateFlags
Task< RT > new_task(const std::function< RT()> &function)
#define DEAL_II_ASSERT_UNREACHABLE()
std::vector< index_type > data
Definition mpi.cc:740
std::size_t size
Definition mpi.cc:739
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
const Event initial
Definition event.cc:68
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
Expression sign(const Expression &x)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
constexpr char N
constexpr types::blas_int zero
constexpr char A
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition types.h:32
unsigned int boundary_id
Definition types.h:153
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation