deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-79.h
Go to the documentation of this file.
1
1244 *  
1245 *   for (unsigned int i = 0; i < dim; ++i)
1246 *   {
1247 *   for (unsigned int k = 0; k < dim; ++k)
1248 *   {
1253 *   }
1254 *   }
1255 *  
1256 *   /* Coupling for slack variables */
1263 *  
1270 *   }
1271 *  
1272 * @endcode
1273 *
1274 * Before we can create the sparsity pattern, we also have to
1275 * set up constraints. Since this program does not adaptively
1280 *
1281 * @code
1282 *   const ComponentMask density_mask =
1283 *   fe.component_mask(ValueExtractors::densities<dim>);
1284 *   const IndexSet density_dofs =
1285 *   DoFTools::extract_dofs(dof_handler, density_mask);
1286 *  
1288 *   density_dofs.nth_index_in_set(density_dofs.n_elements() - 1);
1289 *   constraints.clear();
1290 *   {
1291 *   std::vector<std::pair<types::global_dof_index, double>>
1292 *   constraint_entries;
1293 *   constraint_entries.reserve(density_dofs.n_elements() - 1);
1294 *   for (const types::global_dof_index dof_index : density_dofs)
1295 *   if (dof_index != last_density_dof)
1296 *   constraint_entries.emplace_back(dof_index, -1.);
1297 *  
1298 *   constraints.add_constraint(last_density_dof, constraint_entries, 0.);
1299 *   }
1300 *   constraints.close();
1301 *  
1302 * @endcode
1303 *
1304 * We can now finally create the sparsity pattern for the
1306 * which other variables, and the constraints we have on the
1307 * density.
1308 *
1309 * @code
1310 *   DoFTools::make_sparsity_pattern(dof_handler, coupling, dsp, constraints);
1311 *  
1312 * @endcode
1313 *
1315 * filter matrix and its transpose. These are non-local
1318 * over all cells and couple the unfiltered density on this
1319 * cell to all filtered densities of neighboring cells that
1320 * are less than a threshold distance away, and the other way
1322 * the sparsity pattern that would correspond to this kind of
1324 * on we would write into an entry of the matrix, we now
1325 * simply add an entry to the sparsity matrix:
1326 *
1327 * @code
1328 *   for (const auto &cell : dof_handler.active_cell_iterators())
1329 *   {
1330 *   const unsigned int i = cell->active_cell_index();
1331 *   for (const auto &check_cell : find_relevant_neighbors(cell))
1332 *   {
1333 *   const double distance =
1334 *   cell->center().distance(check_cell->center());
1335 *   if (distance < filter_r)
1336 *   {
1337 *   dsp
1340 *   .add(i, check_cell->active_cell_index());
1341 *   dsp
1344 *   .add(i, check_cell->active_cell_index());
1345 *   }
1346 *   }
1347 *   }
1348 *  
1349 * @endcode
1350 *
1351 * Having so generated the "dynamic" sparsity pattern, we can
1352 * finally copy it to the structure that is used to associate
1353 * matrices with a sparsity pattern. Because the sparsity
1354 * pattern is large and complex, we also output it into a file
1356 * for "visual debugging".
1357 *
1358 * @code
1359 *   sparsity_pattern.copy_from(dsp);
1360 *  
1361 *   std::ofstream out("sparsity.plt");
1362 *   sparsity_pattern.print_gnuplot(out);
1363 *  
1364 *   system_matrix.reinit(sparsity_pattern);
1365 *  
1366 *  
1367 * @endcode
1368 *
1369 * What is left is to correctly size the various vectors and
1370 * their blocks, as well as setting initial guesses for some
1371 * of the components of the (nonlinear) solution vector. We
1372 * here use the symbolic component names for individual blocks
1373 * of the solution vector and, for brevity, use the same trick
1374 * with `using namespace` as above:
1375 *
1376 * @code
1378 *   system_rhs.reinit(block_sizes);
1379 *  
1380 *   {
1381 *   using namespace SolutionBlocks;
1385 *   .add(density_ratio);
1390 *   }
1391 *   }
1392 *  
1393 *  
1394 * @endcode
1395 *
1396 *
1397 * <a name="step_79-Creatingthefiltermatrix"></a>
1398 * <h3>Creating the filter matrix</h3>
1399 *
1400
1401 *
1402 * Next up, a function that is used once at the beginning of the
1405 * of this matrix is non-trivial, and it is used in every
1408 *
1409
1410 *
1412 * already to form its sparsity pattern. We repeat this process here
1413 * for the sparsity pattern of this separately formed matrix, and
1415 * definition of this matrix in the introduction to this program.
1416 *
1417 * @code
1418 *   template <int dim>
1420 *   {
1421 * @endcode
1422 *
1423 * The sparsity pattern of the filter has already been determined
1424 * and implemented in the setup_system() function. We copy the
1426 *
1427
1428 *
1429 *
1430 * @code
1431 *   filter_sparsity_pattern.copy_from(
1432 *   sparsity_pattern.block(SolutionBlocks::unfiltered_density,
1435 *  
1436 * @endcode
1437 *
1438 * Having so built the sparsity pattern, now we re-do all of
1439 * these loops to actually compute the necessary values of the
1440 * matrix entries:
1441 *
1442
1443 *
1444 *
1445 * @code
1446 *   for (const auto &cell : dof_handler.active_cell_iterators())
1447 *   {
1448 *   const unsigned int i = cell->active_cell_index();
1449 *   for (const auto &check_cell : find_relevant_neighbors(cell))
1450 *   {
1451 *   const double distance =
1452 *   cell->center().distance(check_cell->center());
1453 *   if (distance < filter_r)
1454 *   {
1455 *   filter_matrix.add(i,
1456 *   check_cell->active_cell_index(),
1457 *   filter_r - distance);
1458 *  
1459 *   }
1460 *   }
1461 *   }
1462 *  
1463 * @endcode
1464 *
1465 * The final step is to normalize the matrix so that for each
1466 * row, the sum of entries equals one.
1467 *
1468 * @code
1469 *   for (unsigned int i = 0; i < filter_matrix.m(); ++i)
1470 *   {
1471 *   double denominator = 0;
1472 *   for (SparseMatrix<double>::iterator iter = filter_matrix.begin(i);
1473 *   iter != filter_matrix.end(i);
1474 *   iter++)
1475 *   denominator = denominator + iter->value();
1476 *   for (SparseMatrix<double>::iterator iter = filter_matrix.begin(i);
1477 *   iter != filter_matrix.end(i);
1478 *   iter++)
1479 *   iter->value() = iter->value() / denominator;
1480 *   }
1481 *   }
1482 *  
1483 * @endcode
1484 *
1485 * This function is used for building the filter matrix. We create a set of
1486 * all the cell iterators within a certain radius of the cell that is input.
1487 * These are the neighboring cells that will be relevant for the filter.
1488 *
1489 * @code
1490 *   template <int dim>
1491 *   std::set<typename Triangulation<dim>::cell_iterator>
1493 *   typename Triangulation<dim>::cell_iterator cell) const
1494 *   {
1495 *   std::set<unsigned int> neighbor_ids;
1496 *   std::set<typename Triangulation<dim>::cell_iterator> cells_to_check;
1497 *  
1498 *   neighbor_ids.insert(cell->active_cell_index());
1499 *   cells_to_check.insert(cell);
1500 *  
1501 *   bool new_neighbors_found;
1502 *   do
1503 *   {
1504 *   new_neighbors_found = false;
1505 *   for (const auto &check_cell :
1506 *   std::vector<typename Triangulation<dim>::cell_iterator>(
1508 *   {
1509 *   for (const auto n : check_cell->face_indices())
1510 *   {
1511 *   if (!(check_cell->face(n)->at_boundary()))
1512 *   {
1513 *   const auto &neighbor = check_cell->neighbor(n);
1514 *   const double distance =
1515 *   cell->center().distance(neighbor->center());
1516 *   if ((distance < filter_r) &&
1517 *   !(neighbor_ids.count(neighbor->active_cell_index())))
1518 *   {
1519 *   cells_to_check.insert(neighbor);
1520 *   neighbor_ids.insert(neighbor->active_cell_index());
1521 *   new_neighbors_found = true;
1522 *   }
1523 *   }
1524 *   }
1525 *   }
1526 *   }
1527 *   while (new_neighbors_found);
1528 *   return cells_to_check;
1529 *   }
1530 *  
1531 * @endcode
1532 *
1533 *
1534 * <a name="step_79-AssemblingtheNewtonmatrix"></a>
1535 * <h3>Assembling the Newton matrix</h3>
1536 *
1537
1538 *
1540 * long as the mesh does not change (which we don't do anyway in
1541 * this program), the next function builds the matrix to be solved
1542 * in each iteration. This is where the magic happens. The components
1543 * of the system of linear equations describing Newton's method for
1545 *
1546
1547 *
1548 * The top of the function is as in most of these functions and just
1551 * look familiar, though somewhat lengthier, if you've previously
1552 * looked at @ref step_22 "step-22".
1553 *
1554 * @code
1555 *   template <int dim>
1556 *   void SANDTopOpt<dim>::assemble_system()
1557 *   {
1558 *   TimerOutput::Scope t(timer, "assembly");
1559 *  
1560 *   system_matrix = 0;
1561 *   system_rhs = 0;
1562 *  
1563 *  
1564 *   const MappingQ<dim> mapping(1);
1565 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1566 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1567 *   FEValues<dim> fe_values(mapping,
1568 *   fe,
1569 *   quadrature_formula,
1570 *   update_values | update_gradients |
1571 *   update_quadrature_points | update_JxW_values);
1572 *   FEFaceValues<dim> fe_face_values(mapping,
1573 *   fe,
1574 *   face_quadrature_formula,
1575 *   update_values | update_quadrature_points |
1576 *   update_normal_vectors |
1577 *   update_JxW_values);
1578 *  
1579 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1580 *   const unsigned int n_q_points = quadrature_formula.size();
1581 *  
1582 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1583 *   Vector<double> dummy_cell_rhs(dofs_per_cell);
1584 *  
1585 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1586 *  
1587 *   std::vector<double> lambda_values(n_q_points);
1588 *   std::vector<double> mu_values(n_q_points);
1589 *   const Functions::ConstantFunction<dim> lambda(1.);
1590 *   const Functions::ConstantFunction<dim> mu(1.);
1591 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points);
1592 *  
1593 * @endcode
1594 *
1595 * At this point, we apply the filter to the unfiltered
1596 * density, and apply the adjoint (transpose) operation to the
1597 * unfiltered density multiplier, both to the current best
1598 * guess for the nonlinear solution. We use this later to tell
1599 * us how far off our filtered density is from the filter
1600 * applied to the unfiltered density. That is because while at
1601 * the solution of the nonlinear problem, we have
1602 * @f$\rho=H\varrho@f$, but at intermediate iterations, we in
1603 * general have @f$\rho^k\neq H\varrho^k@f$ and the "residual"
1604 * @f$\rho^k-H\varrho^k@f$ will then appear as the right hand side
1605 * of one of the Newton update equations that we compute
1606 * below.
1607 *
1608 * @code
1609 *   BlockVector<double> filtered_unfiltered_density_solution =
1610 *   nonlinear_solution;
1611 *   BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
1612 *   nonlinear_solution;
1613 *  
1614 *   filter_matrix.vmult(filtered_unfiltered_density_solution.block(
1615 *   SolutionBlocks::unfiltered_density),
1616 *   nonlinear_solution.block(
1617 *   SolutionBlocks::unfiltered_density));
1618 *   filter_matrix.Tvmult(
1619 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
1620 *   SolutionBlocks::unfiltered_density_multiplier),
1621 *   nonlinear_solution.block(SolutionBlocks::unfiltered_density_multiplier));
1622 *  
1623 *  
1624 *   std::vector<double> old_density_values(n_q_points);
1625 *   std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
1626 *   std::vector<double> old_displacement_divs(n_q_points);
1627 *   std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
1628 *   std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
1629 *   std::vector<double> old_displacement_multiplier_divs(n_q_points);
1630 *   std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
1631 *   n_q_points);
1632 *   std::vector<double> old_lower_slack_multiplier_values(n_q_points);
1633 *   std::vector<double> old_upper_slack_multiplier_values(n_q_points);
1634 *   std::vector<double> old_lower_slack_values(n_q_points);
1635 *   std::vector<double> old_upper_slack_values(n_q_points);
1636 *   std::vector<double> old_unfiltered_density_values(n_q_points);
1637 *   std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
1638 *   std::vector<double> filtered_unfiltered_density_values(n_q_points);
1639 *   std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
1640 *   n_q_points);
1641 *  
1642 *   using namespace ValueExtractors;
1643 *   for (const auto &cell : dof_handler.active_cell_iterators())
1644 *   {
1645 *   cell_matrix = 0;
1646 *  
1647 *   cell->get_dof_indices(local_dof_indices);
1648 *  
1649 *   fe_values.reinit(cell);
1650 *  
1651 *   lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
1652 *   mu.value_list(fe_values.get_quadrature_points(), mu_values);
1653 *  
1654 * @endcode
1655 *
1656 * As part of the construction of our system matrix, we need to
1657 * retrieve values from our current guess at the solution.
1658 * The following lines of code retrieve the needed values.
1659 *
1660 * @code
1661 *   fe_values[densities<dim>].get_function_values(nonlinear_solution,
1662 *   old_density_values);
1663 *   fe_values[displacements<dim>].get_function_values(
1664 *   nonlinear_solution, old_displacement_values);
1665 *   fe_values[displacements<dim>].get_function_divergences(
1666 *   nonlinear_solution, old_displacement_divs);
1667 *   fe_values[displacements<dim>].get_function_symmetric_gradients(
1668 *   nonlinear_solution, old_displacement_symmgrads);
1669 *   fe_values[displacement_multipliers<dim>].get_function_values(
1670 *   nonlinear_solution, old_displacement_multiplier_values);
1671 *   fe_values[displacement_multipliers<dim>].get_function_divergences(
1672 *   nonlinear_solution, old_displacement_multiplier_divs);
1673 *   fe_values[displacement_multipliers<dim>]
1674 *   .get_function_symmetric_gradients(
1675 *   nonlinear_solution, old_displacement_multiplier_symmgrads);
1676 *   fe_values[density_lower_slacks<dim>].get_function_values(
1677 *   nonlinear_solution, old_lower_slack_values);
1678 *   fe_values[density_lower_slack_multipliers<dim>].get_function_values(
1679 *   nonlinear_solution, old_lower_slack_multiplier_values);
1680 *   fe_values[density_upper_slacks<dim>].get_function_values(
1681 *   nonlinear_solution, old_upper_slack_values);
1682 *   fe_values[density_upper_slack_multipliers<dim>].get_function_values(
1683 *   nonlinear_solution, old_upper_slack_multiplier_values);
1684 *   fe_values[unfiltered_densities<dim>].get_function_values(
1685 *   nonlinear_solution, old_unfiltered_density_values);
1686 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1687 *   nonlinear_solution, old_unfiltered_density_multiplier_values);
1688 *   fe_values[unfiltered_densities<dim>].get_function_values(
1689 *   filtered_unfiltered_density_solution,
1690 *   filtered_unfiltered_density_values);
1691 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1692 *   filter_adjoint_unfiltered_density_multiplier_solution,
1693 *   filter_adjoint_unfiltered_density_multiplier_values);
1694 *  
1695 *   for (const auto q_point : fe_values.quadrature_point_indices())
1696 *   {
1697 * @endcode
1698 *
1699 * We need several more values corresponding to the test functions
1700 * coming from the first derivatives taken from the Lagrangian,
1701 * that is the @f$d_{\bullet}@f$ functions. These are calculated here:
1702 *
1703 * @code
1704 *   for (const auto i : fe_values.dof_indices())
1705 *   {
1706 *   const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
1707 *   fe_values[displacements<dim>].symmetric_gradient(i, q_point);
1708 *   const double displacement_phi_i_div =
1709 *   fe_values[displacements<dim>].divergence(i, q_point);
1710 *  
1711 *   const SymmetricTensor<2, dim>
1712 *   displacement_multiplier_phi_i_symmgrad =
1713 *   fe_values[displacement_multipliers<dim>].symmetric_gradient(
1714 *   i, q_point);
1715 *   const double displacement_multiplier_phi_i_div =
1716 *   fe_values[displacement_multipliers<dim>].divergence(i,
1717 *   q_point);
1718 *  
1719 *   const double density_phi_i =
1720 *   fe_values[densities<dim>].value(i, q_point);
1721 *   const double unfiltered_density_phi_i =
1722 *   fe_values[unfiltered_densities<dim>].value(i, q_point);
1723 *   const double unfiltered_density_multiplier_phi_i =
1724 *   fe_values[unfiltered_density_multipliers<dim>].value(i,
1725 *   q_point);
1726 *  
1727 *   const double lower_slack_multiplier_phi_i =
1728 *   fe_values[density_lower_slack_multipliers<dim>].value(
1729 *   i, q_point);
1730 *  
1731 *   const double lower_slack_phi_i =
1732 *   fe_values[density_lower_slacks<dim>].value(i, q_point);
1733 *  
1734 *   const double upper_slack_phi_i =
1735 *   fe_values[density_upper_slacks<dim>].value(i, q_point);
1736 *  
1737 *   const double upper_slack_multiplier_phi_i =
1738 *   fe_values[density_upper_slack_multipliers<dim>].value(
1739 *   i, q_point);
1740 *  
1741 *  
1742 *   for (const auto j : fe_values.dof_indices())
1743 *   {
1744 * @endcode
1745 *
1746 * Finally, we need values that come from the second round
1747 * of derivatives taken from the Lagrangian,
1748 * the @f$c_{\bullet}@f$ functions. These are calculated here:
1749 *
1750 * @code
1751 *   const SymmetricTensor<2, dim> displacement_phi_j_symmgrad =
1752 *   fe_values[displacements<dim>].symmetric_gradient(j,
1753 *   q_point);
1754 *   const double displacement_phi_j_div =
1755 *   fe_values[displacements<dim>].divergence(j, q_point);
1756 *  
1757 *   const SymmetricTensor<2, dim>
1758 *   displacement_multiplier_phi_j_symmgrad =
1759 *   fe_values[displacement_multipliers<dim>]
1760 *   .symmetric_gradient(j, q_point);
1761 *   const double displacement_multiplier_phi_j_div =
1762 *   fe_values[displacement_multipliers<dim>].divergence(
1763 *   j, q_point);
1764 *  
1765 *   const double density_phi_j =
1766 *   fe_values[densities<dim>].value(j, q_point);
1767 *  
1768 *   const double unfiltered_density_phi_j =
1769 *   fe_values[unfiltered_densities<dim>].value(j, q_point);
1770 *   const double unfiltered_density_multiplier_phi_j =
1771 *   fe_values[unfiltered_density_multipliers<dim>].value(
1772 *   j, q_point);
1773 *  
1774 *  
1775 *   const double lower_slack_phi_j =
1776 *   fe_values[density_lower_slacks<dim>].value(j, q_point);
1777 *  
1778 *   const double upper_slack_phi_j =
1779 *   fe_values[density_upper_slacks<dim>].value(j, q_point);
1780 *  
1781 *   const double lower_slack_multiplier_phi_j =
1782 *   fe_values[density_lower_slack_multipliers<dim>].value(
1783 *   j, q_point);
1784 *  
1785 *   const double upper_slack_multiplier_phi_j =
1786 *   fe_values[density_upper_slack_multipliers<dim>].value(
1787 *   j, q_point);
1788 *  
1789 * @endcode
1790 *
1791 * This is where the actual work starts. In
1792 * the following, we will build all of the
1793 * terms of the matrix -- they are numerous
1794 * and not entirely self-explanatory, also
1795 * depending on the previous solutions and its
1796 * derivatives (which we have already
1797 * evaluated above and put into the variables
1798 * called `old_*`). To understand what each of
1799 * these terms corresponds to, you will want
1800 * to look at the explicit form of these terms
1801 * in the introduction above.
1802 *
1803
1804 *
1805 * The right hand sides of the equations being
1806 * driven to 0 give all the KKT conditions
1807 * for finding a local minimum -- the descriptions of what
1808 * each individual equation are given with the computations
1809 * of the right hand side.
1810 *
1811
1812 *
1813 *
1814 * @code
1815 *   /* Equation 1 */
1816 *   cell_matrix(i, j) +=
1817 *   fe_values.JxW(q_point) *
1818 *   (
1819 *  
1820 *   -density_phi_i * unfiltered_density_multiplier_phi_j
1821 *  
1822 *   + density_penalty_exponent *
1823 *   (density_penalty_exponent - 1) *
1824 *   std::pow(old_density_values[q_point],
1825 *   density_penalty_exponent - 2) *
1826 *   density_phi_i * density_phi_j *
1827 *   (old_displacement_multiplier_divs[q_point] *
1828 *   old_displacement_divs[q_point] *
1829 *   lambda_values[q_point] +
1830 *   2 * mu_values[q_point] *
1831 *   (old_displacement_symmgrads[q_point] *
1832 *   old_displacement_multiplier_symmgrads[q_point]))
1833 *  
1834 *   + density_penalty_exponent *
1835 *   std::pow(old_density_values[q_point],
1836 *   density_penalty_exponent - 1) *
1837 *   density_phi_i *
1838 *   (displacement_multiplier_phi_j_div *
1839 *   old_displacement_divs[q_point] *
1840 *   lambda_values[q_point] +
1841 *   2 * mu_values[q_point] *
1842 *   (old_displacement_symmgrads[q_point] *
1843 *   displacement_multiplier_phi_j_symmgrad))
1844 *  
1845 *   + density_penalty_exponent *
1846 *   std::pow(old_density_values[q_point],
1847 *   density_penalty_exponent - 1) *
1848 *   density_phi_i *
1849 *   (displacement_phi_j_div *
1850 *   old_displacement_multiplier_divs[q_point] *
1851 *   lambda_values[q_point] +
1852 *   2 * mu_values[q_point] *
1853 *   (old_displacement_multiplier_symmgrads[q_point] *
1854 *   displacement_phi_j_symmgrad)));
1855 *  
1856 *   /* Equation 2 */
1857 *   cell_matrix(i, j) +=
1858 *   fe_values.JxW(q_point) *
1859 *   (density_penalty_exponent *
1860 *   std::pow(old_density_values[q_point],
1861 *   density_penalty_exponent - 1) *
1862 *   density_phi_j *
1863 *   (old_displacement_multiplier_divs[q_point] *
1864 *   displacement_phi_i_div * lambda_values[q_point] +
1865 *   2 * mu_values[q_point] *
1866 *   (old_displacement_multiplier_symmgrads[q_point] *
1867 *   displacement_phi_i_symmgrad))
1868 *  
1869 *   + std::pow(old_density_values[q_point],
1870 *   density_penalty_exponent) *
1871 *   (displacement_multiplier_phi_j_div *
1872 *   displacement_phi_i_div * lambda_values[q_point] +
1873 *   2 * mu_values[q_point] *
1874 *   (displacement_multiplier_phi_j_symmgrad *
1875 *   displacement_phi_i_symmgrad))
1876 *  
1877 *   );
1878 *  
1879 *   /* Equation 3, which has to do with the filter and which is
1880 *   * calculated elsewhere. */
1881 *   cell_matrix(i, j) +=
1882 *   fe_values.JxW(q_point) *
1883 *   (-1 * unfiltered_density_phi_i *
1884 *   lower_slack_multiplier_phi_j +
1885 *   unfiltered_density_phi_i * upper_slack_multiplier_phi_j);
1886 *  
1887 *  
1888 *   /* Equation 4: Primal feasibility */
1889 *   cell_matrix(i, j) +=
1890 *   fe_values.JxW(q_point) *
1891 *   (
1892 *  
1893 *   density_penalty_exponent *
1894 *   std::pow(old_density_values[q_point],
1895 *   density_penalty_exponent - 1) *
1896 *   density_phi_j *
1897 *   (old_displacement_divs[q_point] *
1898 *   displacement_multiplier_phi_i_div *
1899 *   lambda_values[q_point] +
1900 *   2 * mu_values[q_point] *
1901 *   (old_displacement_symmgrads[q_point] *
1902 *   displacement_multiplier_phi_i_symmgrad))
1903 *  
1904 *   + std::pow(old_density_values[q_point],
1905 *   density_penalty_exponent) *
1906 *   (displacement_phi_j_div *
1907 *   displacement_multiplier_phi_i_div *
1908 *   lambda_values[q_point] +
1909 *   2 * mu_values[q_point] *
1910 *   (displacement_phi_j_symmgrad *
1911 *   displacement_multiplier_phi_i_symmgrad)));
1912 *  
1913 *   /* Equation 5: Primal feasibility */
1914 *   cell_matrix(i, j) +=
1915 *   -1 * fe_values.JxW(q_point) *
1916 *   lower_slack_multiplier_phi_i *
1917 *   (unfiltered_density_phi_j - lower_slack_phi_j);
1918 *  
1919 *   /* Equation 6: Primal feasibility */
1920 *   cell_matrix(i, j) +=
1921 *   -1 * fe_values.JxW(q_point) *
1922 *   upper_slack_multiplier_phi_i *
1923 *   (-1 * unfiltered_density_phi_j - upper_slack_phi_j);
1924 *  
1925 *   /* Equation 7: Primal feasibility - the part with the filter
1926 *   * is added later */
1927 *   cell_matrix(i, j) += -1 * fe_values.JxW(q_point) *
1928 *   unfiltered_density_multiplier_phi_i *
1929 *   (density_phi_j);
1930 *  
1931 *   /* Equation 8: Complementary slackness */
1932 *   cell_matrix(i, j) +=
1933 *   fe_values.JxW(q_point) *
1934 *   (lower_slack_phi_i * lower_slack_multiplier_phi_j
1935 *  
1936 *   + lower_slack_phi_i * lower_slack_phi_j *
1937 *   old_lower_slack_multiplier_values[q_point] /
1938 *   old_lower_slack_values[q_point]);
1939 *  
1940 *   /* Equation 9: Complementary slackness */
1941 *   cell_matrix(i, j) +=
1942 *   fe_values.JxW(q_point) *
1943 *   (upper_slack_phi_i * upper_slack_multiplier_phi_j
1944 *  
1945 *  
1946 *   + upper_slack_phi_i * upper_slack_phi_j *
1947 *   old_upper_slack_multiplier_values[q_point] /
1948 *   old_upper_slack_values[q_point]);
1949 *   }
1950 *   }
1951 *   }
1952 *  
1953 * @endcode
1954 *
1955 * Now that we have everything assembled, all we have to
1956 * do is deal with the effect of (Dirichlet) boundary
1957 * conditions and other constraints. We incorporate the
1958 * former locally with just the contributions from the
1959 * current cell, and then let the AffineConstraint class
1960 * deal with the latter while copying contributions from
1961 * the current cell into the global linear system:
1962 *
1963 * @code
1964 *   MatrixTools::local_apply_boundary_values(boundary_values,
1965 *   local_dof_indices,
1966 *   cell_matrix,
1967 *   dummy_cell_rhs,
1968 *   true);
1969 *  
1970 *   constraints.distribute_local_to_global(cell_matrix,
1971 *   local_dof_indices,
1972 *   system_matrix);
1973 *   }
1974 *  
1975 * @endcode
1976 *
1977 * Having accumulated all of the terms that belong
1978 * into the Newton matrix, we now also have to
1979 * compute the terms for the right hand side
1980 * (i.e., the negative residual). We already do this
1981 * in another function, and so we call that here:
1982 *
1983 * @code
1984 *   system_rhs = calculate_test_rhs(nonlinear_solution);
1985 *  
1986 * @endcode
1987 *
1988 * Here we use the filter matrix we have already
1989 * constructed. We only need to integrate this filter applied
1990 * to test functions, which are piecewise constant, and so the
1991 * integration becomes a simple multiplication by the measure
1992 * of the cell. Iterating over the pre-made filter matrix
1993 * allows us to use the information about which cells are in
1994 * or out of the filter without repeatedly checking neighbor
1995 * cells again.
1996 *
1997 * @code
1998 *   for (const auto &cell : dof_handler.active_cell_iterators())
1999 *   {
2000 *   const unsigned int i = cell->active_cell_index();
2001 *   for (typename SparseMatrix<double>::iterator iter =
2002 *   filter_matrix.begin(i);
2003 *   iter != filter_matrix.end(i);
2004 *   ++iter)
2005 *   {
2006 *   const unsigned int j = iter->column();
2007 *   const double value = iter->value() * cell->measure();
2008 *  
2009 *   system_matrix
2010 *   .block(SolutionBlocks::unfiltered_density_multiplier,
2011 *   SolutionBlocks::unfiltered_density)
2012 *   .add(i, j, value);
2013 *   system_matrix
2014 *   .block(SolutionBlocks::unfiltered_density,
2015 *   SolutionBlocks::unfiltered_density_multiplier)
2016 *   .add(j, i, value);
2017 *   }
2018 *   }
2019 *   }
2020 *  
2021 *  
2022 * @endcode
2023 *
2024 *
2025 * <a name="step_79-SolvingtheNewtonlinearsystem"></a>
2026 * <h3>Solving the Newton linear system</h3>
2027 *
2028
2029 *
2030 *
2031
2032 *
2033 * We will need to solve a linear system in each iteration. We use
2034 * a direct solver, for now -- this is clearly not an efficient
2035 * choice for a matrix that has so many non-zeroes, and it will
2036 * not scale to anything interesting. For "real" applications, we
2037 * will need an iterative solver but the complexity of the system
2038 * means that an iterative solver algorithm will take a good deal
2039 * of work. Because this is not the focus of the current program,
2040 * we simply stick with the direct solver we have here -- the
2041 * function follows the same structure as used in @ref step_29 "step-29".
2042 *
2043 * @code
2044 *   template <int dim>
2045 *   BlockVector<double> SANDTopOpt<dim>::solve()
2046 *   {
2047 *   TimerOutput::Scope t(timer, "solver");
2048 *  
2049 *   BlockVector<double> linear_solution;
2050 *   linear_solution.reinit(nonlinear_solution);
2051 *  
2052 *   SparseDirectUMFPACK A_direct;
2053 *   A_direct.initialize(system_matrix);
2054 *   A_direct.vmult(linear_solution, system_rhs);
2055 *  
2056 *   constraints.distribute(linear_solution);
2057 *  
2058 *   return linear_solution;
2059 *   }
2060 *  
2061 *  
2062 * @endcode
2063 *
2064 *
2065 * <a name="step_79-Detailsoftheoptimizationalgorithm"></a>
2066 * <h3>Details of the optimization algorithm</h3>
2067 *
2068
2069 *
2070 * The next several functions deal with specific parts of the
2071 * optimization algorithm, most notably with deciding whether the
2072 * direction computed by solving the linearized (Newton) system is
2073 * viable and, if so, how far we want to go in this direction.
2074 *
2075
2076 *
2077 *
2078 * <a name="step_79-Computingsteplengths"></a>
2079 * <h4>Computing step lengths</h4>
2080 *
2081
2082 *
2083 * We start with a function that does a binary search to figure
2084 * out the maximum step that meets the dual feasibility -- that
2085 * is, how far can we go so that @f$s>0@f$ and @f$z>0@f$. The function
2086 * returns a pair of values, one each for the @f$s@f$ and @f$z@f$ slack
2087 * variables.
2088 *
2089 * @code
2090 *   template <int dim>
2091 *   std::pair<double, double> SANDTopOpt<dim>::calculate_max_step_size(
2092 *   const BlockVector<double> &state,
2093 *   const BlockVector<double> &step) const
2094 *   {
2095 *   double fraction_to_boundary;
2096 *   const double min_fraction_to_boundary = .8;
2097 *   const double max_fraction_to_boundary = 1. - 1e-5;
2098 *  
2099 *   if (min_fraction_to_boundary < 1 - barrier_size)
2100 *   {
2101 *   if (1 - barrier_size < max_fraction_to_boundary)
2102 *   fraction_to_boundary = 1 - barrier_size;
2103 *   else
2104 *   fraction_to_boundary = max_fraction_to_boundary;
2105 *   }
2106 *   else
2107 *   fraction_to_boundary = min_fraction_to_boundary;
2108 *  
2109 *   double step_size_s_low = 0;
2110 *   double step_size_z_low = 0;
2111 *   double step_size_s_high = 1;
2112 *   double step_size_z_high = 1;
2113 *   double step_size_s, step_size_z;
2114 *  
2115 *   const int max_bisection_method_steps = 50;
2116 *   for (unsigned int k = 0; k < max_bisection_method_steps; ++k)
2117 *   {
2118 *   step_size_s = (step_size_s_low + step_size_s_high) / 2;
2119 *   step_size_z = (step_size_z_low + step_size_z_high) / 2;
2120 *  
2121 *   const BlockVector<double> state_test_s =
2122 *   (fraction_to_boundary * state) + (step_size_s * step);
2123 *  
2124 *   const BlockVector<double> state_test_z =
2125 *   (fraction_to_boundary * state) + (step_size_z * step);
2126 *  
2127 *   const bool accept_s =
2128 *   (state_test_s.block(SolutionBlocks::density_lower_slack)
2129 *   .is_non_negative()) &&
2130 *   (state_test_s.block(SolutionBlocks::density_upper_slack)
2131 *   .is_non_negative());
2132 *   const bool accept_z =
2133 *   (state_test_z.block(SolutionBlocks::density_lower_slack_multiplier)
2134 *   .is_non_negative()) &&
2135 *   (state_test_z.block(SolutionBlocks::density_upper_slack_multiplier)
2136 *   .is_non_negative());
2137 *  
2138 *   if (accept_s)
2139 *   step_size_s_low = step_size_s;
2140 *   else
2141 *   step_size_s_high = step_size_s;
2142 *  
2143 *   if (accept_z)
2144 *   step_size_z_low = step_size_z;
2145 *   else
2146 *   step_size_z_high = step_size_z;
2147 *   }
2148 *  
2149 *   return {step_size_s_low, step_size_z_low};
2150 *   }
2151 *  
2152 *  
2153 * @endcode
2154 *
2155 *
2156 * <a name="step_79-Computingresiduals"></a>
2157 * <h4>Computing residuals</h4>
2158 *
2159
2160 *
2161 * The next function computes a right hand side vector linearized
2162 * around a "test solution vector" that we can use to look at the
2163 * magnitude of the KKT conditions. This is then used for testing
2164 * the convergence before shrinking the barrier size, as well as in the
2165 * calculation of the @f$l_1@f$ merit.
2166 *
2167
2168 *
2169 * The function is lengthy and complicated, but it is really just a
2170 * copy of the right hand side part of what the `assemble_system()`
2171 * function above did.
2172 *
2173 * @code
2174 *   template <int dim>
2175 *   BlockVector<double> SANDTopOpt<dim>::calculate_test_rhs(
2176 *   const BlockVector<double> &test_solution) const
2177 *   {
2178 * @endcode
2179 *
2180 * We first create a zero vector with size and blocking of system_rhs
2181 *
2182 * @code
2183 *   BlockVector<double> test_rhs;
2184 *   test_rhs.reinit(system_rhs);
2185 *  
2186 *   const MappingQ<dim> mapping(1);
2187 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
2188 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2189 *   FEValues<dim> fe_values(mapping,
2190 *   fe,
2191 *   quadrature_formula,
2192 *   update_values | update_gradients |
2193 *   update_quadrature_points | update_JxW_values);
2194 *   FEFaceValues<dim> fe_face_values(mapping,
2195 *   fe,
2196 *   face_quadrature_formula,
2197 *   update_values | update_quadrature_points |
2198 *   update_normal_vectors |
2199 *   update_JxW_values);
2200 *  
2201 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
2202 *   const unsigned int n_q_points = quadrature_formula.size();
2203 *  
2204 *   Vector<double> cell_rhs(dofs_per_cell);
2205 *   FullMatrix<double> dummy_cell_matrix(dofs_per_cell, dofs_per_cell);
2206 *  
2207 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
2208 *  
2209 *   std::vector<double> lambda_values(n_q_points);
2210 *   std::vector<double> mu_values(n_q_points);
2211 *  
2212 *   const Functions::ConstantFunction<dim> lambda(1.), mu(1.);
2213 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points);
2214 *  
2215 *  
2216 *   BlockVector<double> filtered_unfiltered_density_solution = test_solution;
2217 *   BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
2218 *   test_solution;
2219 *   filtered_unfiltered_density_solution.block(
2220 *   SolutionBlocks::unfiltered_density) = 0;
2221 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
2222 *   SolutionBlocks::unfiltered_density_multiplier) = 0;
2223 *  
2224 *   filter_matrix.vmult(filtered_unfiltered_density_solution.block(
2225 *   SolutionBlocks::unfiltered_density),
2226 *   test_solution.block(
2227 *   SolutionBlocks::unfiltered_density));
2228 *   filter_matrix.Tvmult(
2229 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
2230 *   SolutionBlocks::unfiltered_density_multiplier),
2231 *   test_solution.block(SolutionBlocks::unfiltered_density_multiplier));
2232 *  
2233 *  
2234 *   std::vector<double> old_density_values(n_q_points);
2235 *   std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
2236 *   std::vector<double> old_displacement_divs(n_q_points);
2237 *   std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
2238 *   std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
2239 *   std::vector<double> old_displacement_multiplier_divs(n_q_points);
2240 *   std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
2241 *   n_q_points);
2242 *   std::vector<double> old_lower_slack_multiplier_values(n_q_points);
2243 *   std::vector<double> old_upper_slack_multiplier_values(n_q_points);
2244 *   std::vector<double> old_lower_slack_values(n_q_points);
2245 *   std::vector<double> old_upper_slack_values(n_q_points);
2246 *   std::vector<double> old_unfiltered_density_values(n_q_points);
2247 *   std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
2248 *   std::vector<double> filtered_unfiltered_density_values(n_q_points);
2249 *   std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
2250 *   n_q_points);
2251 *  
2252 *   using namespace ValueExtractors;
2253 *   for (const auto &cell : dof_handler.active_cell_iterators())
2254 *   {
2255 *   cell_rhs = 0;
2256 *  
2257 *   cell->get_dof_indices(local_dof_indices);
2258 *  
2259 *   fe_values.reinit(cell);
2260 *  
2261 *   lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
2262 *   mu.value_list(fe_values.get_quadrature_points(), mu_values);
2263 *  
2264 *   fe_values[densities<dim>].get_function_values(test_solution,
2265 *   old_density_values);
2266 *   fe_values[displacements<dim>].get_function_values(
2267 *   test_solution, old_displacement_values);
2268 *   fe_values[displacements<dim>].get_function_divergences(
2269 *   test_solution, old_displacement_divs);
2270 *   fe_values[displacements<dim>].get_function_symmetric_gradients(
2271 *   test_solution, old_displacement_symmgrads);
2272 *   fe_values[displacement_multipliers<dim>].get_function_values(
2273 *   test_solution, old_displacement_multiplier_values);
2274 *   fe_values[displacement_multipliers<dim>].get_function_divergences(
2275 *   test_solution, old_displacement_multiplier_divs);
2276 *   fe_values[displacement_multipliers<dim>]
2277 *   .get_function_symmetric_gradients(
2278 *   test_solution, old_displacement_multiplier_symmgrads);
2279 *   fe_values[density_lower_slacks<dim>].get_function_values(
2280 *   test_solution, old_lower_slack_values);
2281 *   fe_values[density_lower_slack_multipliers<dim>].get_function_values(
2282 *   test_solution, old_lower_slack_multiplier_values);
2283 *   fe_values[density_upper_slacks<dim>].get_function_values(
2284 *   test_solution, old_upper_slack_values);
2285 *   fe_values[density_upper_slack_multipliers<dim>].get_function_values(
2286 *   test_solution, old_upper_slack_multiplier_values);
2287 *   fe_values[unfiltered_densities<dim>].get_function_values(
2288 *   test_solution, old_unfiltered_density_values);
2289 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2290 *   test_solution, old_unfiltered_density_multiplier_values);
2291 *   fe_values[unfiltered_densities<dim>].get_function_values(
2292 *   filtered_unfiltered_density_solution,
2293 *   filtered_unfiltered_density_values);
2294 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2295 *   filter_adjoint_unfiltered_density_multiplier_solution,
2296 *   filter_adjoint_unfiltered_density_multiplier_values);
2297 *  
2298 *   for (const auto q_point : fe_values.quadrature_point_indices())
2299 *   {
2300 *   for (const auto i : fe_values.dof_indices())
2301 *   {
2302 *   const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
2303 *   fe_values[displacements<dim>].symmetric_gradient(i, q_point);
2304 *   const double displacement_phi_i_div =
2305 *   fe_values[displacements<dim>].divergence(i, q_point);
2306 *  
2307 *   const SymmetricTensor<2, dim>
2308 *   displacement_multiplier_phi_i_symmgrad =
2309 *   fe_values[displacement_multipliers<dim>].symmetric_gradient(
2310 *   i, q_point);
2311 *   const double displacement_multiplier_phi_i_div =
2312 *   fe_values[displacement_multipliers<dim>].divergence(i,
2313 *   q_point);
2314 *  
2315 *  
2316 *   const double density_phi_i =
2317 *   fe_values[densities<dim>].value(i, q_point);
2318 *   const double unfiltered_density_phi_i =
2319 *   fe_values[unfiltered_densities<dim>].value(i, q_point);
2320 *   const double unfiltered_density_multiplier_phi_i =
2321 *   fe_values[unfiltered_density_multipliers<dim>].value(i,
2322 *   q_point);
2323 *  
2324 *   const double lower_slack_multiplier_phi_i =
2325 *   fe_values[density_lower_slack_multipliers<dim>].value(
2326 *   i, q_point);
2327 *  
2328 *   const double lower_slack_phi_i =
2329 *   fe_values[density_lower_slacks<dim>].value(i, q_point);
2330 *  
2331 *   const double upper_slack_phi_i =
2332 *   fe_values[density_upper_slacks<dim>].value(i, q_point);
2333 *  
2334 *   const double upper_slack_multiplier_phi_i =
2335 *   fe_values[density_upper_slack_multipliers<dim>].value(
2336 *   i, q_point);
2337 *  
2338 *   /* Equation 1: This equation, along with equations
2339 *   * 2 and 3, are the variational derivatives of the
2340 *   * Lagrangian with respect to the decision
2341 *   * variables - the density, displacement, and
2342 *   * unfiltered density. */
2343 *   cell_rhs(i) +=
2344 *   -1 * fe_values.JxW(q_point) *
2345 *   (density_penalty_exponent *
2346 *   std::pow(old_density_values[q_point],
2347 *   density_penalty_exponent - 1) *
2348 *   density_phi_i *
2349 *   (old_displacement_multiplier_divs[q_point] *
2350 *   old_displacement_divs[q_point] *
2351 *   lambda_values[q_point] +
2352 *   2 * mu_values[q_point] *
2353 *   (old_displacement_symmgrads[q_point] *
2354 *   old_displacement_multiplier_symmgrads[q_point])) -
2355 *   density_phi_i *
2356 *   old_unfiltered_density_multiplier_values[q_point]);
2357 *  
2358 *   /* Equation 2; the boundary terms will be added further down
2359 *   * below. */
2360 *   cell_rhs(i) +=
2361 *   -1 * fe_values.JxW(q_point) *
2362 *   (std::pow(old_density_values[q_point],
2363 *   density_penalty_exponent) *
2364 *   (old_displacement_multiplier_divs[q_point] *
2365 *   displacement_phi_i_div * lambda_values[q_point] +
2366 *   2 * mu_values[q_point] *
2367 *   (old_displacement_multiplier_symmgrads[q_point] *
2368 *   displacement_phi_i_symmgrad)));
2369 *  
2370 *   /* Equation 3 */
2371 *   cell_rhs(i) +=
2372 *   -1 * fe_values.JxW(q_point) *
2373 *   (unfiltered_density_phi_i *
2374 *   filter_adjoint_unfiltered_density_multiplier_values
2375 *   [q_point] +
2376 *   unfiltered_density_phi_i *
2377 *   old_upper_slack_multiplier_values[q_point] +
2378 *   -1 * unfiltered_density_phi_i *
2379 *   old_lower_slack_multiplier_values[q_point]);
2380 *  
2381 *  
2382 *  
2383 *   /* Equation 4; boundary term will again be dealt
2384 *   * with below. This equation being driven to 0
2385 *   * ensures that the elasticity equation is met as
2386 *   * a constraint. */
2387 *   cell_rhs(i) += -1 * fe_values.JxW(q_point) *
2388 *   (std::pow(old_density_values[q_point],
2389 *   density_penalty_exponent) *
2390 *   (old_displacement_divs[q_point] *
2391 *   displacement_multiplier_phi_i_div *
2392 *   lambda_values[q_point] +
2393 *   2 * mu_values[q_point] *
2394 *   (displacement_multiplier_phi_i_symmgrad *
2395 *   old_displacement_symmgrads[q_point])));
2396 *  
2397 *   /* Equation 5: This equation sets the lower slack
2398 *   * variable equal to the unfiltered density,
2399 *   * giving a minimum density of 0. */
2400 *   cell_rhs(i) += fe_values.JxW(q_point) *
2401 *   (lower_slack_multiplier_phi_i *
2402 *   (old_unfiltered_density_values[q_point] -
2403 *   old_lower_slack_values[q_point]));
2404 *  
2405 *   /* Equation 6: This equation sets the upper slack
2406 *   * variable equal to one minus the unfiltered
2407 *   * density. */
2408 *   cell_rhs(i) += fe_values.JxW(q_point) *
2409 *   (upper_slack_multiplier_phi_i *
2410 *   (1 - old_unfiltered_density_values[q_point] -
2411 *   old_upper_slack_values[q_point]));
2412 *  
2413 *   /* Equation 7: This is the difference between the
2414 *   * density and the filter applied to the
2415 *   * unfiltered density. This being driven to 0 by
2416 *   * the Newton steps ensures that the filter is
2417 *   * applied correctly. */
2418 *   cell_rhs(i) += fe_values.JxW(q_point) *
2419 *   (unfiltered_density_multiplier_phi_i *
2420 *   (old_density_values[q_point] -
2421 *   filtered_unfiltered_density_values[q_point]));
2422 *  
2423 *   /* Equation 8: This along with equation 9 give the
2424 *   * requirement that s*z = \alpha for the barrier
2425 *   * size alpha, and gives complementary slackness
2426 *   * from KKT conditions when \alpha goes to 0. */
2427 *   cell_rhs(i) +=
2428 *   -1 * fe_values.JxW(q_point) *
2429 *   (lower_slack_phi_i *
2430 *   (old_lower_slack_multiplier_values[q_point] -
2431 *   barrier_size / old_lower_slack_values[q_point]));
2432 *  
2433 *   /* Equation 9 */
2434 *   cell_rhs(i) +=
2435 *   -1 * fe_values.JxW(q_point) *
2436 *   (upper_slack_phi_i *
2437 *   (old_upper_slack_multiplier_values[q_point] -
2438 *   barrier_size / old_upper_slack_values[q_point]));
2439 *   }
2440 *   }
2441 *  
2442 *   for (const auto &face : cell->face_iterators())
2443 *   {
2444 *   if (face->at_boundary() &&
2445 *   face->boundary_id() == BoundaryIds::down_force)
2446 *   {
2447 *   fe_face_values.reinit(cell, face);
2448 *  
2449 *   for (const auto face_q_point :
2450 *   fe_face_values.quadrature_point_indices())
2451 *   {
2452 *   for (const auto i : fe_face_values.dof_indices())
2453 *   {
2454 *   Tensor<1, dim> traction;
2455 *   traction[1] = -1.;
2456 *  
2457 *   cell_rhs(i) +=
2458 *   -1 *
2459 *   (traction * fe_face_values[displacements<dim>].value(
2460 *   i, face_q_point)) *
2461 *   fe_face_values.JxW(face_q_point);
2462 *  
2463 *   cell_rhs(i) +=
2464 *   (traction *
2465 *   fe_face_values[displacement_multipliers<dim>].value(
2466 *   i, face_q_point)) *
2467 *   fe_face_values.JxW(face_q_point);
2468 *   }
2469 *   }
2470 *   }
2471 *   }
2472 *  
2473 *   MatrixTools::local_apply_boundary_values(boundary_values,
2474 *   local_dof_indices,
2475 *   dummy_cell_matrix,
2476 *   cell_rhs,
2477 *   true);
2478 *  
2479 *   constraints.distribute_local_to_global(cell_rhs,
2480 *   local_dof_indices,
2481 *   test_rhs);
2482 *   }
2483 *  
2484 *   return test_rhs;
2485 *   }
2486 *  
2487 *  
2488 * @endcode
2489 *
2490 *
2491 * <a name="step_79-Computingthemeritfunction"></a>
2492 * <h4>Computing the merit function</h4>
2493 *
2494
2495 *
2496 * The algorithm we use herein uses a "watchdog" strategy to
2497 * determine where and how far to go from the current iterate. We
2498 * base the watchdog strategy on an exact @f$l_1@f$ merit function. This
2499 * function calculates the exact @f$l_1@f$ merit of a given, putative,
2500 * next iterate.
2501 *
2502
2503 *
2504 * The merit function consists of the sum of the objective function
2505 * (which is simply an integral of external forces (on the boundary
2506 * of the domain) times the displacement values of a test solution
2507 * (typically, the current solution plus some multiple of the Newton
2508 * update), and the @f$l_1@f$ norms of the Lagrange multiplier
2509 * components of residual vectors. The following code computes these
2510 * parts in turn:
2511 *
2512 * @code
2513 *   template <int dim>
2514 *   double SANDTopOpt<dim>::calculate_exact_merit(
2515 *   const BlockVector<double> &test_solution)
2516 *   {
2517 *   TimerOutput::Scope t(timer, "merit function");
2518 *  
2519 * @endcode
2520 *
2521 * Start with computing the objective function:
2522 *
2523 * @code
2524 *   double objective_function_merit = 0;
2525 *   {
2526 *   const MappingQ<dim> mapping(1);
2527 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
2528 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2529 *   FEValues<dim> fe_values(mapping,
2530 *   fe,
2531 *   quadrature_formula,
2532 *   update_values | update_gradients |
2533 *   update_quadrature_points | update_JxW_values);
2534 *   FEFaceValues<dim> fe_face_values(mapping,
2535 *   fe,
2536 *   face_quadrature_formula,
2537 *   update_values |
2538 *   update_quadrature_points |
2539 *   update_normal_vectors |
2540 *   update_JxW_values);
2541 *  
2542 *   const unsigned int n_face_q_points = face_quadrature_formula.size();
2543 *  
2544 *   std::vector<Tensor<1, dim>> displacement_face_values(n_face_q_points);
2545 *  
2546 *   for (const auto &cell : dof_handler.active_cell_iterators())
2547 *   {
2548 *   for (const auto &face : cell->face_iterators())
2549 *   {
2550 *   if (face->at_boundary() &&
2551 *   face->boundary_id() == BoundaryIds::down_force)
2552 *   {
2553 *   fe_face_values.reinit(cell, face);
2554 *   fe_face_values[ValueExtractors::displacements<dim>]
2555 *   .get_function_values(test_solution,
2556 *   displacement_face_values);
2557 *   for (unsigned int face_q_point = 0;
2558 *   face_q_point < n_face_q_points;
2559 *   ++face_q_point)
2560 *   {
2561 *   Tensor<1, dim> traction;
2562 *   traction[1] = -1.;
2563 *  
2564 *   objective_function_merit +=
2565 *   (traction * displacement_face_values[face_q_point]) *
2566 *   fe_face_values.JxW(face_q_point);
2567 *   }
2568 *   }
2569 *   }
2570 *   }
2571 *   }
2572 *  
2573 *   for (const auto &cell : triangulation.active_cell_iterators())
2574 *   {
2575 *   objective_function_merit =
2576 *   objective_function_merit -
2577 *   barrier_size * cell->measure() *
2578 *   std::log(test_solution.block(
2579 *   SolutionBlocks::density_lower_slack)[cell->active_cell_index()]);
2580 *   objective_function_merit =
2581 *   objective_function_merit -
2582 *   barrier_size * cell->measure() *
2583 *   std::log(test_solution.block(
2584 *   SolutionBlocks::density_upper_slack)[cell->active_cell_index()]);
2585 *   }
2586 *  
2587 * @endcode
2588 *
2589 * Then compute the residual and take the @f$l_1@f$ norms of the
2590 * components that correspond to Lagrange multipliers. We add
2591 * those to the objective function computed above, and return
2592 * the sum at the bottom:
2593 *
2594 * @code
2595 *   const BlockVector<double> test_rhs = calculate_test_rhs(test_solution);
2596 *  
2597 *   const double elasticity_constraint_merit =
2598 *   penalty_multiplier *
2599 *   test_rhs.block(SolutionBlocks::displacement_multiplier).l1_norm();
2600 *   const double filter_constraint_merit =
2601 *   penalty_multiplier *
2602 *   test_rhs.block(SolutionBlocks::unfiltered_density_multiplier).l1_norm();
2603 *   const double lower_slack_merit =
2604 *   penalty_multiplier *
2605 *   test_rhs.block(SolutionBlocks::density_lower_slack_multiplier).l1_norm();
2606 *   const double upper_slack_merit =
2607 *   penalty_multiplier *
2608 *   test_rhs.block(SolutionBlocks::density_upper_slack_multiplier).l1_norm();
2609 *  
2610 *   const double total_merit =
2611 *   objective_function_merit + elasticity_constraint_merit +
2612 *   filter_constraint_merit + lower_slack_merit + upper_slack_merit;
2613 *   return total_merit;
2614 *   }
2615 *  
2616 *  
2617 *  
2618 * @endcode
2619 *
2620 *
2621 * <a name="step_79-Findingasearchdirection"></a>
2622 * <h4>Finding a search direction</h4>
2623 *
2624
2625 *
2626 * Next up is the function that actually computes a search direction
2627 * starting at the current state (passed as the first argument) and
2628 * returns the resulting vector. To this end, the function first
2629 * calls the functions that assemble the linear system that
2630 * corresponds to the Newton system, and that solve it.
2631 *
2632
2633 *
2634 * This function also updates the penalty multiplier in the merit
2635 * function, and then returns the largest scaled feasible step.
2636 * It uses the `calculate_max_step_sizes()` function to find the
2637 * largest feasible step that satisfies @f$s>0@f$ and @f$z>0@f$.
2638 *
2639
2640 *
2641 *
2642 * @code
2643 *   template <int dim>
2644 *   BlockVector<double> SANDTopOpt<dim>::find_max_step()
2645 *   {
2646 *   assemble_system();
2647 *   BlockVector<double> step = solve();
2648 *  
2649 * @endcode
2650 *
2651 * Next we are going to update penalty_multiplier. In
2652 * essence, a larger penalty multiplier makes us consider the
2653 * constraints more. Looking at the Hessian and gradient with
2654 * respect to the step we want to take with our decision
2655 * variables, and comparing that to the norm of our constraint
2656 * error gives us a way to ensure that our merit function is
2657 * "exact" - that is, it has a minimum in the same location
2658 * that the objective function does. As our merit function is
2659 * exact for any penalty multiplier over some minimum value,
2660 * we only keep the computed value if it increases the penalty
2661 * multiplier.
2662 *
2663
2664 *
2665 *
2666 * @code
2667 *   const std::vector<unsigned int> decision_variables = {
2668 *   SolutionBlocks::density,
2669 *   SolutionBlocks::displacement,
2670 *   SolutionBlocks::unfiltered_density,
2671 *   SolutionBlocks::density_upper_slack,
2672 *   SolutionBlocks::density_lower_slack};
2673 *   double hess_part = 0;
2674 *   double grad_part = 0;
2675 *   for (const unsigned int decision_variable_i : decision_variables)
2676 *   {
2677 *   for (const unsigned int decision_variable_j : decision_variables)
2678 *   {
2679 *   Vector<double> temp_vector(step.block(decision_variable_i).size());
2680 *   system_matrix.block(decision_variable_i, decision_variable_j)
2681 *   .vmult(temp_vector, step.block(decision_variable_j));
2682 *   hess_part += step.block(decision_variable_i) * temp_vector;
2683 *   }
2684 *   grad_part -= system_rhs.block(decision_variable_i) *
2685 *   step.block(decision_variable_i);
2686 *   }
2687 *  
2688 *   const std::vector<unsigned int> equality_constraint_multipliers = {
2689 *   SolutionBlocks::displacement_multiplier,
2690 *   SolutionBlocks::unfiltered_density_multiplier,
2691 *   SolutionBlocks::density_lower_slack_multiplier,
2692 *   SolutionBlocks::density_upper_slack_multiplier};
2693 *   double constraint_norm = 0;
2694 *   for (const unsigned int multiplier_i : equality_constraint_multipliers)
2695 *   constraint_norm += system_rhs.block(multiplier_i).linfty_norm();
2696 *  
2697 *  
2698 *   double test_penalty_multiplier;
2699 *   if (hess_part > 0)
2700 *   test_penalty_multiplier =
2701 *   (grad_part + .5 * hess_part) / (.05 * constraint_norm);
2702 *   else
2703 *   test_penalty_multiplier = (grad_part) / (.05 * constraint_norm);
2704 *  
2705 *   penalty_multiplier = std::max(penalty_multiplier, test_penalty_multiplier);
2706 *  
2707 * @endcode
2708 *
2709 * Based on all of this, we can now compute step sizes for the
2710 * primal and dual (Lagrange multiplier) variables. Once we
2711 * have these, we scale the components of the solution vector,
2712 * and that is what this function returns.
2713 *
2714 * @code
2715 *   const std::pair<double, double> max_step_sizes =
2716 *   calculate_max_step_size(nonlinear_solution, step);
2717 *   const double step_size_s = max_step_sizes.first;
2718 *   const double step_size_z = max_step_sizes.second;
2719 *  
2720 *   step.block(SolutionBlocks::density) *= step_size_s;
2721 *   step.block(SolutionBlocks::displacement) *= step_size_s;
2722 *   step.block(SolutionBlocks::unfiltered_density) *= step_size_s;
2723 *   step.block(SolutionBlocks::displacement_multiplier) *= step_size_z;
2724 *   step.block(SolutionBlocks::unfiltered_density_multiplier) *= step_size_z;
2725 *   step.block(SolutionBlocks::density_lower_slack) *= step_size_s;
2726 *   step.block(SolutionBlocks::density_lower_slack_multiplier) *= step_size_z;
2727 *   step.block(SolutionBlocks::density_upper_slack) *= step_size_s;
2728 *   step.block(SolutionBlocks::density_upper_slack_multiplier) *= step_size_z;
2729 *  
2730 *   return step;
2731 *   }
2732 *  
2733 *  
2734 *  
2735 * @endcode
2736 *
2737 *
2738 * <a name="step_79-Computingascaledstep"></a>
2739 * <h4>Computing a scaled step</h4>
2740 *
2741
2742 *
2743 * The next function then implements a back-tracking algorithm for a
2744 * line search. It keeps shrinking step size until it finds a step
2745 * where the merit is decreased, and then returns the new location
2746 * based on the current state vector, and the direction to go into,
2747 * times the step length.
2748 *
2749 * @code
2750 *   template <int dim>
2751 *   BlockVector<double>
2752 *   SANDTopOpt<dim>::compute_scaled_step(const BlockVector<double> &state,
2753 *   const BlockVector<double> &max_step,
2754 *   const double descent_requirement)
2755 *   {
2756 *   const double merit_derivative =
2757 *   (calculate_exact_merit(state + 1e-4 * max_step) -
2758 *   calculate_exact_merit(state)) /
2759 *   1e-4;
2760 *   double step_size = 1;
2761 *   unsigned int max_linesearch_iterations = 10;
2762 *   for (unsigned int k = 0; k < max_linesearch_iterations; ++k)
2763 *   {
2764 *   if (calculate_exact_merit(state + step_size * max_step) <
2765 *   calculate_exact_merit(state) +
2766 *   step_size * descent_requirement * merit_derivative)
2767 *   break;
2768 *   else
2769 *   step_size = step_size / 2;
2770 *   }
2771 *   return state + (step_size * max_step);
2772 *   }
2773 *  
2774 *  
2775 * @endcode
2776 *
2777 *
2778 * <a name="step_79-Checkingforconvergence"></a>
2779 * <h4>Checking for convergence</h4>
2780 *
2781
2782 *
2783 * The final auxiliary function in this block is the one that checks
2784 * to see if the KKT conditions are sufficiently met so that the
2785 * overall algorithm can lower the barrier size. It does so by
2786 * computing the @f$l_1@f$ norm of the residual, which is what
2787 * `calculate_test_rhs()` computes.
2788 *
2789 * @code
2790 *   template <int dim>
2791 *   bool SANDTopOpt<dim>::check_convergence(const BlockVector<double> &state)
2792 *   {
2793 *   const BlockVector<double> test_rhs = calculate_test_rhs(state);
2794 *   const double test_rhs_norm = test_rhs.l1_norm();
2795 *  
2796 *   const double convergence_condition = 1e-2;
2797 *   const double target_norm = convergence_condition * barrier_size;
2798 *  
2799 *   std::cout << " Checking convergence. Current rhs norm is "
2800 *   << test_rhs_norm << ", target is " << target_norm << std::endl;
2801 *  
2802 *   return (test_rhs_norm < target_norm);
2803 *   }
2804 *  
2805 *  
2806 * @endcode
2807 *
2808 *
2809 * <a name="step_79-Postprocessingthesolution"></a>
2810 * <h3>Postprocessing the solution</h3>
2811 *
2812
2813 *
2814 * The first of the postprocessing functions outputs information
2815 * in a VTU file for visualization. It looks long, but it's really
2816 * just the same as what was done in @ref step_22 "step-22", for example, just
2817 * with (a lot) more solution variables:
2818 *
2819 * @code
2820 *   template <int dim>
2822 *   {
2823 *   std::vector<std::string> solution_names(1, "density");
2824 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2825 *   data_component_interpretation(
2827 *   for (unsigned int i = 0; i < dim; ++i)
2828 *   {
2829 *   solution_names.emplace_back("displacement");
2830 *   data_component_interpretation.push_back(
2832 *   }
2833 *   solution_names.emplace_back("unfiltered_density");
2834 *   data_component_interpretation.push_back(
2836 *   for (unsigned int i = 0; i < dim; ++i)
2837 *   {
2838 *   solution_names.emplace_back("displacement_multiplier");
2839 *   data_component_interpretation.push_back(
2841 *   }
2842 *   solution_names.emplace_back("unfiltered_density_multiplier");
2843 *   data_component_interpretation.push_back(
2845 *   solution_names.emplace_back("low_slack");
2846 *   data_component_interpretation.push_back(
2848 *   solution_names.emplace_back("low_slack_multiplier");
2849 *   data_component_interpretation.push_back(
2851 *   solution_names.emplace_back("high_slack");
2852 *   data_component_interpretation.push_back(
2854 *   solution_names.emplace_back("high_slack_multiplier");
2855 *   data_component_interpretation.push_back(
2857 *  
2858 *   DataOut<dim> data_out;
2859 *   data_out.attach_dof_handler(dof_handler);
2860 *   data_out.add_data_vector(nonlinear_solution,
2863 *   data_component_interpretation);
2864 *   data_out.build_patches();
2865 *  
2866 *   std::ofstream output("solution" + std::to_string(iteration) + ".vtu");
2867 *   data_out.write_vtu(output);
2868 *   }
2869 *  
2870 *  
2871 * @endcode
2872 *
2873 * The second of these functions outputs the solution as an `.stl`
2874 * file for 3d
2875 * printing. [STL](https://en.wikipedia.org/wiki/STL_(file_format))
2876 * files are made up of triangles and normal vectors, and we will
2877 * use it to show all of those cells with a density value larger
2879 * to @f$z=0.25@f$, and then generating two triangles for each face of
2882 * and the normal vectors must be unit vectors pointing outwards,
2883 * which requires a few checks.
2884 *
2885 * @code
2886 *   template <int dim>
2888 *   {
2889 *   static_assert(dim == 2,
2890 *   "This function is not implemented for anything "
2891 *   "other than the 2d case.");
2892 *  
2893 *   std::ofstream stlfile;
2894 *   stlfile.open("bridge.stl");
2895 *  
2896 *   stlfile << "solid bridge\n" << std::scientific;
2897 *   double height = .25;
2898 *  
2899 *   for (const auto &cell : dof_handler.active_cell_iterators())
2900 *   {
2901 *   if (nonlinear_solution.block(
2902 *   SolutionBlocks::density)[cell->active_cell_index()] > 0.5)
2903 *   {
2904 * @endcode
2905 *
2906 * We have now found a cell with a density value larger
2907 * than zero. Let us start by writing out the bottom
2910 * whether a cell has a right- or left-handed
2912 * directions of the two edges starting at vertex 0 and
2914 *
2915 * @code
2916 *   const Tensor<1, dim> edge_directions[2] = {cell->vertex(1) -
2917 *   cell->vertex(0),
2918 *   cell->vertex(2) -
2919 *   cell->vertex(0)};
2921 *   {{edge_directions[0][0], edge_directions[0][1]},
2922 *   {edge_directions[1][0], edge_directions[1][1]}});
2923 *   const bool is_right_handed_cell = (determinant(edge_tensor) > 0);
2924 *  
2926 *   {
2927 *   /* Write one side at z = 0. */
2928 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2929 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2930 *   stlfile << " outer loop\n";
2931 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2932 *   << cell->vertex(0)[1] << ' ' << 0.000000e+00 << '\n';
2933 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2934 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2935 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2936 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2937 *   stlfile << " endloop\n";
2938 *   stlfile << " endfacet\n";
2939 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2940 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2941 *   stlfile << " outer loop\n";
2942 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2943 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2944 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2945 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2946 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2947 *   << cell->vertex(3)[1] << ' ' << 0.000000e+00 << '\n';
2948 *   stlfile << " endloop\n";
2949 *   stlfile << " endfacet\n";
2950 *  
2951 *   /* Write one side at z = height. */
2952 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2953 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
2954 *   stlfile << " outer loop\n";
2955 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2956 *   << cell->vertex(0)[1] << ' ' << height << '\n';
2957 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2958 *   << cell->vertex(1)[1] << ' ' << height << '\n';
2959 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2960 *   << cell->vertex(2)[1] << ' ' << height << '\n';
2961 *   stlfile << " endloop\n";
2962 *   stlfile << " endfacet\n";
2963 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2964 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
2965 *   stlfile << " outer loop\n";
2966 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2967 *   << cell->vertex(1)[1] << ' ' << height << '\n';
2968 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2969 *   << cell->vertex(3)[1] << ' ' << height << '\n';
2970 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2971 *   << cell->vertex(2)[1] << ' ' << height << '\n';
2972 *   stlfile << " endloop\n";
2973 *   stlfile << " endfacet\n";
2974 *   }
2975 *   else /* The cell has a left-handed set up */
2976 *   {
2977 *   /* Write one side at z = 0. */
2978 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2979 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2980 *   stlfile << " outer loop\n";
2981 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2982 *   << cell->vertex(0)[1] << ' ' << 0.000000e+00 << '\n';
2983 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2984 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2985 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2986 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2987 *   stlfile << " endloop\n";
2988 *   stlfile << " endfacet\n";
2989 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2990 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2991 *   stlfile << " outer loop\n";
2992 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2993 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2994 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2995 *   << cell->vertex(3)[1] << ' ' << 0.000000e+00 << '\n';
2996 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2997 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2998 *   stlfile << " endloop\n";
2999 *   stlfile << " endfacet\n";
3000 *  
3001 *   /* Write one side at z = height. */
3002 *   stlfile << " facet normal " << 0.000000e+00 << ' '
3003 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
3004 *   stlfile << " outer loop\n";
3005 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
3006 *   << cell->vertex(0)[1] << ' ' << height << '\n';
3007 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
3008 *   << cell->vertex(2)[1] << ' ' << height << '\n';
3009 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
3010 *   << cell->vertex(1)[1] << ' ' << height << '\n';
3011 *   stlfile << " endloop\n";
3012 *   stlfile << " endfacet\n";
3013 *   stlfile << " facet normal " << 0.000000e+00 << ' '
3014 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
3015 *   stlfile << " outer loop\n";
3016 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
3017 *   << cell->vertex(1)[1] << ' ' << height << '\n';
3018 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
3019 *   << cell->vertex(2)[1] << ' ' << height << '\n';
3020 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
3021 *   << cell->vertex(3)[1] << ' ' << height << '\n';
3022 *   stlfile << " endloop\n";
3023 *   stlfile << " endfacet\n";
3024 *   }
3025 *  
3026 * @endcode
3027 *
3028 * Next we need to deal with the four faces of the
3029 * cell, extended into the @f$z@f$ direction. However, we
3030 * only need to write these faces if either the face
3031 * is on the domain boundary, or if it is the
3032 * interface between a cell with density greater than
3033 * 0.5, and a cell with a density less than 0.5.
3034 *
3035 * @code
3036 *   for (unsigned int face_number = 0;
3038 *   ++face_number)
3039 *   {
3040 *   const typename DoFHandler<dim>::face_iterator face =
3041 *   cell->face(face_number);
3042 *  
3043 *   if ((face->at_boundary()) ||
3044 *   (!face->at_boundary() &&
3045 *   (nonlinear_solution.block(
3046 *   0)[cell->neighbor(face_number)->active_cell_index()] <
3047 *   0.5)))
3048 *   {
3049 *   const Tensor<1, dim> normal_vector =
3050 *   (face->center() - cell->center());
3051 *   const double normal_norm = normal_vector.norm();
3052 *   if ((face->vertex(0)[0] - face->vertex(0)[0]) *
3053 *   (face->vertex(1)[1] - face->vertex(0)[1]) *
3054 *   0.000000e+00 +
3055 *   (face->vertex(0)[1] - face->vertex(0)[1]) * (0 - 0) *
3056 *   normal_vector[0] +
3057 *   (height - 0) *
3058 *   (face->vertex(1)[0] - face->vertex(0)[0]) *
3059 *   normal_vector[1] -
3060 *   (face->vertex(0)[0] - face->vertex(0)[0]) * (0 - 0) *
3061 *   normal_vector[1] -
3062 *   (face->vertex(0)[1] - face->vertex(0)[1]) *
3063 *   (face->vertex(1)[0] - face->vertex(0)[0]) *
3064 *   normal_vector[0] -
3065 *   (height - 0) *
3066 *   (face->vertex(1)[1] - face->vertex(0)[1]) * 0 >
3067 *   0)
3068 *   {
3069 *   stlfile << " facet normal "
3070 *   << normal_vector[0] / normal_norm << ' '
3071 *   << normal_vector[1] / normal_norm << ' '
3072 *   << 0.000000e+00 << '\n';
3073 *   stlfile << " outer loop\n";
3074 *   stlfile << " vertex " << face->vertex(0)[0]
3075 *   << ' ' << face->vertex(0)[1] << ' '
3076 *   << 0.000000e+00 << '\n';
3077 *   stlfile << " vertex " << face->vertex(0)[0]
3078 *   << ' ' << face->vertex(0)[1] << ' ' << height
3079 *   << '\n';
3080 *   stlfile << " vertex " << face->vertex(1)[0]
3081 *   << ' ' << face->vertex(1)[1] << ' '
3082 *   << 0.000000e+00 << '\n';
3083 *   stlfile << " endloop\n";
3084 *   stlfile << " endfacet\n";
3085 *   stlfile << " facet normal "
3086 *   << normal_vector[0] / normal_norm << ' '
3087 *   << normal_vector[1] / normal_norm << ' '
3088 *   << 0.000000e+00 << '\n';
3089 *   stlfile << " outer loop\n";
3090 *   stlfile << " vertex " << face->vertex(0)[0]
3091 *   << ' ' << face->vertex(0)[1] << ' ' << height
3092 *   << '\n';
3093 *   stlfile << " vertex " << face->vertex(1)[0]
3094 *   << ' ' << face->vertex(1)[1] << ' ' << height
3095 *   << '\n';
3096 *   stlfile << " vertex " << face->vertex(1)[0]
3097 *   << ' ' << face->vertex(1)[1] << ' '
3098 *   << 0.000000e+00 << '\n';
3099 *   stlfile << " endloop\n";
3100 *   stlfile << " endfacet\n";
3101 *   }
3102 *   else
3103 *   {
3104 *   stlfile << " facet normal "
3105 *   << normal_vector[0] / normal_norm << ' '
3106 *   << normal_vector[1] / normal_norm << ' '
3107 *   << 0.000000e+00 << '\n';
3108 *   stlfile << " outer loop\n";
3109 *   stlfile << " vertex " << face->vertex(0)[0]
3110 *   << ' ' << face->vertex(0)[1] << ' '
3111 *   << 0.000000e+00 << '\n';
3112 *   stlfile << " vertex " << face->vertex(1)[0]
3113 *   << ' ' << face->vertex(1)[1] << ' '
3114 *   << 0.000000e+00 << '\n';
3115 *   stlfile << " vertex " << face->vertex(0)[0]
3116 *   << ' ' << face->vertex(0)[1] << ' ' << height
3117 *   << '\n';
3118 *   stlfile << " endloop\n";
3119 *   stlfile << " endfacet\n";
3120 *   stlfile << " facet normal "
3121 *   << normal_vector[0] / normal_norm << ' '
3122 *   << normal_vector[1] / normal_norm << ' '
3123 *   << 0.000000e+00 << '\n';
3124 *   stlfile << " outer loop\n";
3125 *   stlfile << " vertex " << face->vertex(0)[0]
3126 *   << ' ' << face->vertex(0)[1] << ' ' << height
3127 *   << '\n';
3128 *   stlfile << " vertex " << face->vertex(1)[0]
3129 *   << ' ' << face->vertex(1)[1] << ' '
3130 *   << 0.000000e+00 << '\n';
3131 *   stlfile << " vertex " << face->vertex(1)[0]
3132 *   << ' ' << face->vertex(1)[1] << ' ' << height
3133 *   << '\n';
3134 *   stlfile << " endloop\n";
3135 *   stlfile << " endfacet\n";
3136 *   }
3137 *   }
3138 *   }
3139 *   }
3140 *   }
3141 *   stlfile << "endsolid bridge";
3142 *   }
3143 *  
3144 *  
3145 *  
3146 * @endcode
3147 *
3148 *
3149 * <a name="step_79-Therunfunctiondrivingtheoverallalgorithm"></a>
3150 * <h3>The run() function driving the overall algorithm</h3>
3151 *
3152
3153 *
3155 * in the grand scheme of things, a rather complicated function
3157 * isn't just about finding a Newton direction like in @ref step_15 "step-15" and
3158 * then going a fixed distance in this direction any more, but
3160 * penalty parameter should be in the current step, (ii) a
3164 *
3165
3166 *
3167 * The function starts out simple enough with first setting up the
3168 * mesh, the DoFHandler, and then the various linear algebra objects
3170 *
3171 * @code
3172 *   template <int dim>
3173 *   void SANDTopOpt<dim>::run()
3174 *   {
3175 *   std::cout << "filter r is: " << filter_r << std::endl;
3176 *  
3177 *   {
3178 *   TimerOutput::Scope t(timer, "setup");
3179 *  
3181 *  
3182 *   dof_handler.distribute_dofs(fe);
3183 *   DoFRenumbering::component_wise(dof_handler);
3184 *  
3188 *   }
3189 *  
3190 * @endcode
3191 *
3192 * We then set a number of parameters that affect the
3193 * log-barrier and line search components of the optimization
3194 * algorithm:
3195 *
3196 * @code
3197 *   barrier_size = 25;
3198 *   const double min_barrier_size = .0005;
3199 *  
3200 *   const unsigned int max_uphill_steps = 8;
3201 *   const double descent_requirement = .0001;
3202 *  
3203 *  
3204 * @endcode
3205 *
3208 * (i) the log-barrier parameter has become small enough, or (ii)
3209 * we have reached convergence. In any case, we terminate if
3211 * structure is encoded as a `do { ... } while (...)` loop
3213 *
3214 * @code
3215 *   unsigned int iteration_number = 0;
3216 *   const unsigned int max_iterations = 10000;
3217 *  
3218 *   do
3219 *   {
3220 *   std::cout << "Starting outer step in iteration " << iteration_number
3221 *   << " with barrier parameter " << barrier_size << std::endl;
3222 *  
3223 * @endcode
3224 *
3225 * Within this outer loop, we have an inner loop in which we
3226 * try to find an update direction using the watchdog
3228 *
3229
3230 *
3232 * this: For a maximum of `max_uphill_steps` (i.e., a loop
3233 * within the "inner loop" mentioned above) attempts, we use
3234 * `find_max_step()` to compute a Newton update step, and
3235 * add these up in the `nonlinear_solution` vector. In each of
3238 * reached a target value of the merit function described
3242 * first proposed direction provided by `find_max_step()` in
3243 * the first go-around of this loop (the `k==0` case).
3244 *
3245 * @code
3246 *   do
3247 *   {
3248 *   std::cout << " Starting inner step in iteration "
3249 *   << iteration_number
3250 *   << " with merit function penalty multiplier "
3251 *   << penalty_multiplier << std::endl;
3252 *  
3253 *   bool watchdog_step_found = false;
3254 *  
3257 *   double target_merit = numbers::signaling_nan<double>();
3258 *   double merit_derivative = numbers::signaling_nan<double>();
3259 *  
3260 *   for (unsigned int k = 0; k < max_uphill_steps; ++k)
3261 *   {
3262 *   ++iteration_number;
3264 *  
3265 *   if (k == 0)
3266 *   {
3270 *   .0001 * first_step) -
3272 *   .0001);
3275 *   }
3276 *  
3278 *   const double current_merit =
3280 *  
3281 *   std::cout << " current watchdog state merit is: "
3282 *   << current_merit << "; target merit is "
3283 *   << target_merit << std::endl;
3284 *  
3286 *   {
3287 *   watchdog_step_found = true;
3288 *   std::cout << " found workable step after " << k + 1
3289 *   << " iterations" << std::endl;
3290 *   break;
3291 *   }
3292 *   }
3293 *  
3294 *  
3295 * @endcode
3296 *
3301 * we took the maximal number of unsuccessful steps in
3302 * the loop above, then we need to do something else,
3303 * and this is what the following code block does.
3304 *
3305
3306 *
3307 * Specifically, from the final (unsuccessful) state
3308 * of the loop above, we seek one more update
3309 * direction and take what is called a "stretch
3310 * step". If that stretch state satisfies a condition
3311 * involving the merit function, then we go there. On
3312 * the other hand, if the stretch state is also
3313 * unacceptable (as all of the watchdog steps above
3314 * were), then we discard all of the watchdog steps
3317 * stored in the `watchdog_state` variable above. More
3319 * whether we take a step from `watchdog_state` in
3320 * direction `first_step`, or whether we can do one
3321 * more update from the stretch state to find a new
3323 * actually better than the state we started from at
3326 * place to be in, and getting away to start the next
3328 * strategy to eventually converge.
3329 *
3330
3331 *
3334 * finally converged (or if we run up against the
3335 * maximal number of iterations -- where we count the
3336 * number of linear solves as iterations and increment
3337 * the counter every time we call `find_max_step()`
3338 * since that is where the linear solve actually
3339 * happens). In any case, at the end of each of these
3340 * inner iterations we also output the solution in a
3342 *
3343
3344 *
3345 *
3346 * @code
3347 *   if (watchdog_step_found == false)
3348 *   {
3349 *   ++iteration_number;
3353 *   update_step,
3355 *  
3356 * @endcode
3357 *
3358 * If we did not get a successful watchdog step,
3359 * we now need to decide between going back to
3360 * where we started, or using the final state. We
3361 * compare the merits of both of these locations,
3362 * and then take a scaled step from whichever
3363 * location is better. As the scaled step is
3365 * keeping one of the two.
3366 *
3367 * @code
3371 *   {
3372 *   std::cout << " Taking scaled step from end of watchdog"
3373 *   << std::endl;
3375 *   }
3376 *   else
3377 *   {
3378 *   std::cout
3379 *   << " Taking scaled step from beginning of watchdog"
3380 *   << std::endl;
3383 *   {
3386 *   first_step,
3388 *   }
3389 *   else
3390 *   {
3391 *   ++iteration_number;
3394 *   find_max_step();
3397 *   stretch_step,
3399 *   }
3400 *   }
3401 *   }
3402 *  
3404 *   }
3405 *   while ((iteration_number < max_iterations) &&
3407 *  
3408 *  
3409 * @endcode
3410 *
3412 * barrier parameter, for which we use the following
3413 * formula. The rest of the function is then simply about
3416 * final "design" as an STL file for use in 3d printing,
3417 * and to output some timing information.
3418 *
3419 * @code
3420 *   const double barrier_size_multiplier = .8;
3421 *   const double barrier_size_exponent = 1.2;
3422 *  
3423 *   barrier_size =
3427 *  
3428 *   std::cout << std::endl;
3429 *   }
3430 *   while (((barrier_size > min_barrier_size) ||
3431 *   (check_convergence(nonlinear_solution) == false)) &&
3432 *   (iteration_number < max_iterations));
3433 *  
3434 *   write_as_stl();
3435 *   }
3436 *   } // namespace SAND
3437 *  
3438 * @endcode
3439 *
3440 *
3441 * <a name="step_79-Themainfunction"></a>
3442 * <h3>The main function</h3>
3443 *
3444
3445 *
3446 * The remainder of the code, the `main()` function, is as usual:
3447 *
3448 * @code
3449 *   int main()
3450 *   {
3451 *   try
3452 *   {
3454 *   elastic_problem_2d.run();
3455 *   }
3456 *   catch (std::exception &exc)
3457 *   {
3458 *   std::cerr << std::endl
3459 *   << std::endl
3460 *   << "----------------------------------------------------"
3461 *   << std::endl;
3462 *   std::cerr << "Exception on processing: " << std::endl
3463 *   << exc.what() << std::endl
3464 *   << "Aborting!" << std::endl
3465 *   << "----------------------------------------------------"
3466 *   << std::endl;
3467 *  
3468 *   return 1;
3469 *   }
3470 *   catch (...)
3471 *   {
3472 *   std::cerr << std::endl
3473 *   << std::endl
3474 *   << "----------------------------------------------------"
3475 *   << std::endl;
3476 *   std::cerr << "Unknown exception!" << std::endl
3477 *   << "Aborting!" << std::endl
3478 *   << "----------------------------------------------------"
3479 *   << std::endl;
3480 *   return 1;
3481 *   }
3482 *   return 0;
3483 *   }
3484 * @endcode
3485<a name="step_79-Results"></a><h1>Results</h1>
3486
3487<a name="step_79-TestProblem"></a><h3>Test Problem</h3>
3488
3491
3494in the @f$y@f$ direction using a zero Dirichlet boundary condition, and a downward
3496boundary condition. The rest of the boundary is allowed to move, and has no
3499design a bridge in a way so that if the bottom left and bottom right point of
3502the center is minimal.
3503
3506rectangle of width 3 and height 1 by cutting the original domain in half, and
3507using zero Dirichlet boundary conditions in the @f$x@f$ direction along the cut
3511
3512<div style="text-align:center;">
3513 <img src="https://www.dealii.org/images/steps/developer/step-79.mbbgeometry.png"
3514 alt="The MBB problem domain and boundary conditions">
3515</div>
3516
3517
3519individual components of the solution look as follows:
3520
3521<div class="onecolumn" style="width: 80%; text-align: center;">
3522 <div>
3523 <img src="https://www.dealii.org/images/steps/developer/step-79.filtereddensity.png"
3524 alt="Filtered Density Solution">
3525 </div>
3526 <div>
3527 <img src="https://www.dealii.org/images/steps/developer/step-79.unfiltereddensity.png"
3528 alt="Unfiltered Density Solution">
3529 </div>
3530</div>
3531
3532
3533These pictures show that what we find here is in accordance with what one
3535Maybe more interestingly, the
3536result looks like a truss bridge (except that we apply the load at the top of
3540in some sense.
3541<a name="step_79-Possibilitiesforextensions"></a><h4>Possibilities for extensions</h4>
3542
3543
3548be due to both a lack of precision on when and how to decrease the boundary
3549values, as well as our choice of merit function being sub-optimal. In the future,
3551Filter in place of a merit function will decrease the number of necessary
3553
3557
3558Secondly, the linear solver used here is just the sparse direct solver based on
3562lot of nonzero entries in many rows, even on meshes that overall are still
3563relatively coarse. As a consequence, the solver time dominates the
3566 *
3567 *
3568<a name="step_79-PlainProg"></a>
3569<h1> The plain program</h1>
3570@include "step-79.cc"
3571*/
numbers::NumberTraits< Number >::real_type norm() const
constexpr void clear()
friend class Tensor
Definition tensor.h:865
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
typename ActiveSelector::face_iterator face_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
std::size_t size
Definition mpi.cc:739
const Event initial
Definition event.cc:68
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
IndexSet extract_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask)
Definition dof_tools.cc:416
void create_triangulation(Triangulation< dim, dim > &tria, const AdditionalData &additional_data=AdditionalData())
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
constexpr types::blas_int zero
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
STL namespace.
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)