deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-79.h
Go to the documentation of this file.
1
1243 *  
1244 *   for (unsigned int i = 0; i < dim; ++i)
1245 *   {
1246 *   for (unsigned int k = 0; k < dim; ++k)
1247 *   {
1248 *   coupling[displacement<dim> + i]
1249 *   [displacement_multiplier<dim> + k] = DoFTools::always;
1250 *   coupling[displacement_multiplier<dim> + k]
1251 *   [displacement<dim> + i] = DoFTools::always;
1252 *   }
1253 *   }
1254 *  
1255 *   /* Coupling for slack variables */
1256 *   coupling[density_lower_slack<dim>][density_lower_slack<dim>] =
1257 *   DoFTools::always;
1258 *   coupling[density_lower_slack<dim>][density_upper_slack<dim>] =
1259 *   DoFTools::always;
1260 *   coupling[density_upper_slack<dim>][density_lower_slack<dim>] =
1261 *   DoFTools::always;
1262 *  
1263 *   coupling[density_lower_slack_multiplier<dim>]
1264 *   [density_lower_slack_multiplier<dim>] = DoFTools::always;
1265 *   coupling[density_lower_slack_multiplier<dim>]
1266 *   [density_upper_slack_multiplier<dim>] = DoFTools::always;
1267 *   coupling[density_upper_slack_multiplier<dim>]
1268 *   [density_lower_slack_multiplier<dim>] = DoFTools::always;
1269 *   }
1270 *  
1271 * @endcode
1272 *
1273 * Before we can create the sparsity pattern, we also have to
1274 * set up constraints. Since this program does not adaptively
1275 * refine the mesh, the only constraint we have is one that
1276 * couples all density variables to enforce the volume
1277 * constraint. This will ultimately lead to a dense sub-block
1278 * of the matrix, but there is little we can do about that.
1279 *
1280 * @code
1281 *   const ComponentMask density_mask =
1282 *   fe.component_mask(ValueExtractors::densities<dim>);
1283 *   const IndexSet density_dofs =
1284 *   DoFTools::extract_dofs(dof_handler, density_mask);
1285 *  
1286 *   types::global_dof_index last_density_dof =
1287 *   density_dofs.nth_index_in_set(density_dofs.n_elements() - 1);
1288 *   constraints.clear();
1289 *   {
1290 *   std::vector<std::pair<types::global_dof_index, double>>
1291 *   constraint_entries;
1292 *   constraint_entries.reserve(density_dofs.n_elements() - 1);
1293 *   for (const types::global_dof_index dof_index : density_dofs)
1294 *   if (dof_index != last_density_dof)
1295 *   constraint_entries.emplace_back(dof_index, -1.);
1296 *  
1297 *   constraints.add_constraint(last_density_dof, constraint_entries, 0.);
1298 *   }
1299 *   constraints.close();
1300 *  
1301 * @endcode
1302 *
1303 * We can now finally create the sparsity pattern for the
1304 * matrix, taking into account which variables couple with
1305 * which other variables, and the constraints we have on the
1306 * density.
1307 *
1308 * @code
1309 *   DoFTools::make_sparsity_pattern(dof_handler, coupling, dsp, constraints);
1310 *  
1311 * @endcode
1312 *
1313 * The only part of the matrix we have not dealt with is the
1314 * filter matrix and its transpose. These are non-local
1315 * (integral) operators for which deal.II does not currently
1316 * have functions. What we will ultimately need to do is go
1317 * over all cells and couple the unfiltered density on this
1318 * cell to all filtered densities of neighboring cells that
1319 * are less than a threshold distance away, and the other way
1320 * around; for the moment, we are only concerned with building
1321 * the sparsity pattern that would correspond to this kind of
1322 * matrix, so we perform the equivalent loop and where later
1323 * on we would write into an entry of the matrix, we now
1324 * simply add an entry to the sparsity matrix:
1325 *
1326 * @code
1327 *   for (const auto &cell : dof_handler.active_cell_iterators())
1328 *   {
1329 *   const unsigned int i = cell->active_cell_index();
1330 *   for (const auto &check_cell : find_relevant_neighbors(cell))
1331 *   {
1332 *   const double distance =
1333 *   cell->center().distance(check_cell->center());
1334 *   if (distance < filter_r)
1335 *   {
1336 *   dsp
1337 *   .block(SolutionBlocks::unfiltered_density,
1338 *   SolutionBlocks::unfiltered_density_multiplier)
1339 *   .add(i, check_cell->active_cell_index());
1340 *   dsp
1341 *   .block(SolutionBlocks::unfiltered_density_multiplier,
1342 *   SolutionBlocks::unfiltered_density)
1343 *   .add(i, check_cell->active_cell_index());
1344 *   }
1345 *   }
1346 *   }
1347 *  
1348 * @endcode
1349 *
1350 * Having so generated the "dynamic" sparsity pattern, we can
1351 * finally copy it to the structure that is used to associate
1352 * matrices with a sparsity pattern. Because the sparsity
1353 * pattern is large and complex, we also output it into a file
1354 * of its own for visualization purposes -- in other words,
1355 * for "visual debugging".
1356 *
1357 * @code
1358 *   sparsity_pattern.copy_from(dsp);
1359 *  
1360 *   std::ofstream out("sparsity.plt");
1361 *   sparsity_pattern.print_gnuplot(out);
1362 *  
1363 *   system_matrix.reinit(sparsity_pattern);
1364 *  
1365 *  
1366 * @endcode
1367 *
1368 * What is left is to correctly size the various vectors and
1369 * their blocks, as well as setting initial guesses for some
1370 * of the components of the (nonlinear) solution vector. We
1371 * here use the symbolic component names for individual blocks
1372 * of the solution vector and, for brevity, use the same trick
1373 * with `using namespace` as above:
1374 *
1375 * @code
1376 *   nonlinear_solution.reinit(block_sizes);
1377 *   system_rhs.reinit(block_sizes);
1378 *  
1379 *   {
1380 *   using namespace SolutionBlocks;
1381 *   nonlinear_solution.block(density).add(density_ratio);
1382 *   nonlinear_solution.block(unfiltered_density).add(density_ratio);
1383 *   nonlinear_solution.block(unfiltered_density_multiplier)
1384 *   .add(density_ratio);
1385 *   nonlinear_solution.block(density_lower_slack).add(density_ratio);
1386 *   nonlinear_solution.block(density_lower_slack_multiplier).add(50);
1387 *   nonlinear_solution.block(density_upper_slack).add(1 - density_ratio);
1388 *   nonlinear_solution.block(density_upper_slack_multiplier).add(50);
1389 *   }
1390 *   }
1391 *  
1392 *  
1393 * @endcode
1394 *
1395 *
1396 * <a name="step_79-Creatingthefiltermatrix"></a>
1397 * <h3>Creating the filter matrix</h3>
1398 *
1399
1400 *
1401 * Next up, a function that is used once at the beginning of the
1402 * program: It creates a matrix @f$H@f$ so that the filtered density
1403 * vector equals @f$H@f$ times the unfiltered density. The creation
1404 * of this matrix is non-trivial, and it is used in every
1405 * iteration, and so rather than reforming it as we do with the
1406 * Newton matrix, it is made only once and stored separately.
1407 *
1408
1409 *
1410 * The way this matrix is computed follows the outline used above
1411 * already to form its sparsity pattern. We repeat this process here
1412 * for the sparsity pattern of this separately formed matrix, and
1413 * then actually build the matrix itself. You may want to check the
1414 * definition of this matrix in the introduction to this program.
1415 *
1416 * @code
1417 *   template <int dim>
1418 *   void SANDTopOpt<dim>::setup_filter_matrix()
1419 *   {
1420 * @endcode
1421 *
1422 * The sparsity pattern of the filter has already been determined
1423 * and implemented in the setup_system() function. We copy the
1424 * structure from the appropriate block and use it again here.
1425 *
1426
1427 *
1428 *
1429 * @code
1430 *   filter_sparsity_pattern.copy_from(
1431 *   sparsity_pattern.block(SolutionBlocks::unfiltered_density,
1432 *   SolutionBlocks::unfiltered_density_multiplier));
1433 *   filter_matrix.reinit(filter_sparsity_pattern);
1434 *  
1435 * @endcode
1436 *
1437 * Having so built the sparsity pattern, now we re-do all of
1438 * these loops to actually compute the necessary values of the
1439 * matrix entries:
1440 *
1441
1442 *
1443 *
1444 * @code
1445 *   for (const auto &cell : dof_handler.active_cell_iterators())
1446 *   {
1447 *   const unsigned int i = cell->active_cell_index();
1448 *   for (const auto &check_cell : find_relevant_neighbors(cell))
1449 *   {
1450 *   const double distance =
1451 *   cell->center().distance(check_cell->center());
1452 *   if (distance < filter_r)
1453 *   {
1454 *   filter_matrix.add(i,
1455 *   check_cell->active_cell_index(),
1456 *   filter_r - distance);
1457 *  
1458 *   }
1459 *   }
1460 *   }
1461 *  
1462 * @endcode
1463 *
1464 * The final step is to normalize the matrix so that for each
1465 * row, the sum of entries equals one.
1466 *
1467 * @code
1468 *   for (unsigned int i = 0; i < filter_matrix.m(); ++i)
1469 *   {
1470 *   double denominator = 0;
1471 *   for (SparseMatrix<double>::iterator iter = filter_matrix.begin(i);
1472 *   iter != filter_matrix.end(i);
1473 *   iter++)
1474 *   denominator = denominator + iter->value();
1475 *   for (SparseMatrix<double>::iterator iter = filter_matrix.begin(i);
1476 *   iter != filter_matrix.end(i);
1477 *   iter++)
1478 *   iter->value() = iter->value() / denominator;
1479 *   }
1480 *   }
1481 *  
1482 * @endcode
1483 *
1484 * This function is used for building the filter matrix. We create a set of
1485 * all the cell iterators within a certain radius of the cell that is input.
1486 * These are the neighboring cells that will be relevant for the filter.
1487 *
1488 * @code
1489 *   template <int dim>
1490 *   std::set<typename Triangulation<dim>::cell_iterator>
1491 *   SANDTopOpt<dim>::find_relevant_neighbors(
1492 *   typename Triangulation<dim>::cell_iterator cell) const
1493 *   {
1494 *   std::set<unsigned int> neighbor_ids;
1495 *   std::set<typename Triangulation<dim>::cell_iterator> cells_to_check;
1496 *  
1497 *   neighbor_ids.insert(cell->active_cell_index());
1498 *   cells_to_check.insert(cell);
1499 *  
1500 *   bool new_neighbors_found;
1501 *   do
1502 *   {
1503 *   new_neighbors_found = false;
1504 *   for (const auto &check_cell :
1505 *   std::vector<typename Triangulation<dim>::cell_iterator>(
1506 *   cells_to_check.begin(), cells_to_check.end()))
1507 *   {
1508 *   for (const auto n : check_cell->face_indices())
1509 *   {
1510 *   if (!(check_cell->face(n)->at_boundary()))
1511 *   {
1512 *   const auto &neighbor = check_cell->neighbor(n);
1513 *   const double distance =
1514 *   cell->center().distance(neighbor->center());
1515 *   if ((distance < filter_r) &&
1516 *   !(neighbor_ids.count(neighbor->active_cell_index())))
1517 *   {
1518 *   cells_to_check.insert(neighbor);
1519 *   neighbor_ids.insert(neighbor->active_cell_index());
1520 *   new_neighbors_found = true;
1521 *   }
1522 *   }
1523 *   }
1524 *   }
1525 *   }
1526 *   while (new_neighbors_found);
1527 *   return cells_to_check;
1528 *   }
1529 *  
1530 * @endcode
1531 *
1532 *
1533 * <a name="step_79-AssemblingtheNewtonmatrix"></a>
1534 * <h3>Assembling the Newton matrix</h3>
1535 *
1536
1537 *
1538 * Whereas the setup_filter_matrix function built a matrix that is the same as
1539 * long as the mesh does not change (which we don't do anyway in
1540 * this program), the next function builds the matrix to be solved
1541 * in each iteration. This is where the magic happens. The components
1542 * of the system of linear equations describing Newton's method for
1543 * finding the solution of the KKT conditions are implemented here.
1544 *
1545
1546 *
1547 * The top of the function is as in most of these functions and just
1548 * sets up all sorts of variables necessary for the actual assembly,
1549 * including a whole bunch of extractors. The entire set up should
1550 * look familiar, though somewhat lengthier, if you've previously
1551 * looked at @ref step_22 "step-22".
1552 *
1553 * @code
1554 *   template <int dim>
1555 *   void SANDTopOpt<dim>::assemble_system()
1556 *   {
1557 *   TimerOutput::Scope t(timer, "assembly");
1558 *  
1559 *   system_matrix = 0;
1560 *   system_rhs = 0;
1561 *  
1562 *  
1563 *   const MappingQ<dim> mapping(1);
1564 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1565 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1566 *   FEValues<dim> fe_values(mapping,
1567 *   fe,
1568 *   quadrature_formula,
1569 *   update_values | update_gradients |
1570 *   update_quadrature_points | update_JxW_values);
1571 *   FEFaceValues<dim> fe_face_values(mapping,
1572 *   fe,
1573 *   face_quadrature_formula,
1574 *   update_values | update_quadrature_points |
1575 *   update_normal_vectors |
1576 *   update_JxW_values);
1577 *  
1578 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1579 *   const unsigned int n_q_points = quadrature_formula.size();
1580 *  
1581 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1582 *   Vector<double> dummy_cell_rhs(dofs_per_cell);
1583 *  
1584 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1585 *  
1586 *   std::vector<double> lambda_values(n_q_points);
1587 *   std::vector<double> mu_values(n_q_points);
1588 *   const Functions::ConstantFunction<dim> lambda(1.);
1589 *   const Functions::ConstantFunction<dim> mu(1.);
1590 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points);
1591 *  
1592 * @endcode
1593 *
1594 * At this point, we apply the filter to the unfiltered
1595 * density, and apply the adjoint (transpose) operation to the
1596 * unfiltered density multiplier, both to the current best
1597 * guess for the nonlinear solution. We use this later to tell
1598 * us how far off our filtered density is from the filter
1599 * applied to the unfiltered density. That is because while at
1600 * the solution of the nonlinear problem, we have
1601 * @f$\rho=H\varrho@f$, but at intermediate iterations, we in
1602 * general have @f$\rho^k\neq H\varrho^k@f$ and the "residual"
1603 * @f$\rho^k-H\varrho^k@f$ will then appear as the right hand side
1604 * of one of the Newton update equations that we compute
1605 * below.
1606 *
1607 * @code
1608 *   BlockVector<double> filtered_unfiltered_density_solution =
1609 *   nonlinear_solution;
1610 *   BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
1611 *   nonlinear_solution;
1612 *  
1613 *   filter_matrix.vmult(filtered_unfiltered_density_solution.block(
1614 *   SolutionBlocks::unfiltered_density),
1615 *   nonlinear_solution.block(
1616 *   SolutionBlocks::unfiltered_density));
1617 *   filter_matrix.Tvmult(
1618 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
1619 *   SolutionBlocks::unfiltered_density_multiplier),
1620 *   nonlinear_solution.block(SolutionBlocks::unfiltered_density_multiplier));
1621 *  
1622 *  
1623 *   std::vector<double> old_density_values(n_q_points);
1624 *   std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
1625 *   std::vector<double> old_displacement_divs(n_q_points);
1626 *   std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
1627 *   std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
1628 *   std::vector<double> old_displacement_multiplier_divs(n_q_points);
1629 *   std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
1630 *   n_q_points);
1631 *   std::vector<double> old_lower_slack_multiplier_values(n_q_points);
1632 *   std::vector<double> old_upper_slack_multiplier_values(n_q_points);
1633 *   std::vector<double> old_lower_slack_values(n_q_points);
1634 *   std::vector<double> old_upper_slack_values(n_q_points);
1635 *   std::vector<double> old_unfiltered_density_values(n_q_points);
1636 *   std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
1637 *   std::vector<double> filtered_unfiltered_density_values(n_q_points);
1638 *   std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
1639 *   n_q_points);
1640 *  
1641 *   using namespace ValueExtractors;
1642 *   for (const auto &cell : dof_handler.active_cell_iterators())
1643 *   {
1644 *   cell_matrix = 0;
1645 *  
1646 *   cell->get_dof_indices(local_dof_indices);
1647 *  
1648 *   fe_values.reinit(cell);
1649 *  
1650 *   lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
1651 *   mu.value_list(fe_values.get_quadrature_points(), mu_values);
1652 *  
1653 * @endcode
1654 *
1655 * As part of the construction of our system matrix, we need to
1656 * retrieve values from our current guess at the solution.
1657 * The following lines of code retrieve the needed values.
1658 *
1659 * @code
1660 *   fe_values[densities<dim>].get_function_values(nonlinear_solution,
1661 *   old_density_values);
1662 *   fe_values[displacements<dim>].get_function_values(
1663 *   nonlinear_solution, old_displacement_values);
1664 *   fe_values[displacements<dim>].get_function_divergences(
1665 *   nonlinear_solution, old_displacement_divs);
1666 *   fe_values[displacements<dim>].get_function_symmetric_gradients(
1667 *   nonlinear_solution, old_displacement_symmgrads);
1668 *   fe_values[displacement_multipliers<dim>].get_function_values(
1669 *   nonlinear_solution, old_displacement_multiplier_values);
1670 *   fe_values[displacement_multipliers<dim>].get_function_divergences(
1671 *   nonlinear_solution, old_displacement_multiplier_divs);
1672 *   fe_values[displacement_multipliers<dim>]
1673 *   .get_function_symmetric_gradients(
1674 *   nonlinear_solution, old_displacement_multiplier_symmgrads);
1675 *   fe_values[density_lower_slacks<dim>].get_function_values(
1676 *   nonlinear_solution, old_lower_slack_values);
1677 *   fe_values[density_lower_slack_multipliers<dim>].get_function_values(
1678 *   nonlinear_solution, old_lower_slack_multiplier_values);
1679 *   fe_values[density_upper_slacks<dim>].get_function_values(
1680 *   nonlinear_solution, old_upper_slack_values);
1681 *   fe_values[density_upper_slack_multipliers<dim>].get_function_values(
1682 *   nonlinear_solution, old_upper_slack_multiplier_values);
1683 *   fe_values[unfiltered_densities<dim>].get_function_values(
1684 *   nonlinear_solution, old_unfiltered_density_values);
1685 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1686 *   nonlinear_solution, old_unfiltered_density_multiplier_values);
1687 *   fe_values[unfiltered_densities<dim>].get_function_values(
1688 *   filtered_unfiltered_density_solution,
1689 *   filtered_unfiltered_density_values);
1690 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1691 *   filter_adjoint_unfiltered_density_multiplier_solution,
1692 *   filter_adjoint_unfiltered_density_multiplier_values);
1693 *  
1694 *   for (const auto q_point : fe_values.quadrature_point_indices())
1695 *   {
1696 * @endcode
1697 *
1698 * We need several more values corresponding to the test functions
1699 * coming from the first derivatives taken from the Lagrangian,
1700 * that is the @f$d_{\bullet}@f$ functions. These are calculated here:
1701 *
1702 * @code
1703 *   for (const auto i : fe_values.dof_indices())
1704 *   {
1705 *   const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
1706 *   fe_values[displacements<dim>].symmetric_gradient(i, q_point);
1707 *   const double displacement_phi_i_div =
1708 *   fe_values[displacements<dim>].divergence(i, q_point);
1709 *  
1710 *   const SymmetricTensor<2, dim>
1711 *   displacement_multiplier_phi_i_symmgrad =
1712 *   fe_values[displacement_multipliers<dim>].symmetric_gradient(
1713 *   i, q_point);
1714 *   const double displacement_multiplier_phi_i_div =
1715 *   fe_values[displacement_multipliers<dim>].divergence(i,
1716 *   q_point);
1717 *  
1718 *   const double density_phi_i =
1719 *   fe_values[densities<dim>].value(i, q_point);
1720 *   const double unfiltered_density_phi_i =
1721 *   fe_values[unfiltered_densities<dim>].value(i, q_point);
1722 *   const double unfiltered_density_multiplier_phi_i =
1723 *   fe_values[unfiltered_density_multipliers<dim>].value(i,
1724 *   q_point);
1725 *  
1726 *   const double lower_slack_multiplier_phi_i =
1727 *   fe_values[density_lower_slack_multipliers<dim>].value(
1728 *   i, q_point);
1729 *  
1730 *   const double lower_slack_phi_i =
1731 *   fe_values[density_lower_slacks<dim>].value(i, q_point);
1732 *  
1733 *   const double upper_slack_phi_i =
1734 *   fe_values[density_upper_slacks<dim>].value(i, q_point);
1735 *  
1736 *   const double upper_slack_multiplier_phi_i =
1737 *   fe_values[density_upper_slack_multipliers<dim>].value(
1738 *   i, q_point);
1739 *  
1740 *  
1741 *   for (const auto j : fe_values.dof_indices())
1742 *   {
1743 * @endcode
1744 *
1745 * Finally, we need values that come from the second round
1746 * of derivatives taken from the Lagrangian,
1747 * the @f$c_{\bullet}@f$ functions. These are calculated here:
1748 *
1749 * @code
1750 *   const SymmetricTensor<2, dim> displacement_phi_j_symmgrad =
1751 *   fe_values[displacements<dim>].symmetric_gradient(j,
1752 *   q_point);
1753 *   const double displacement_phi_j_div =
1754 *   fe_values[displacements<dim>].divergence(j, q_point);
1755 *  
1756 *   const SymmetricTensor<2, dim>
1757 *   displacement_multiplier_phi_j_symmgrad =
1758 *   fe_values[displacement_multipliers<dim>]
1759 *   .symmetric_gradient(j, q_point);
1760 *   const double displacement_multiplier_phi_j_div =
1761 *   fe_values[displacement_multipliers<dim>].divergence(
1762 *   j, q_point);
1763 *  
1764 *   const double density_phi_j =
1765 *   fe_values[densities<dim>].value(j, q_point);
1766 *  
1767 *   const double unfiltered_density_phi_j =
1768 *   fe_values[unfiltered_densities<dim>].value(j, q_point);
1769 *   const double unfiltered_density_multiplier_phi_j =
1770 *   fe_values[unfiltered_density_multipliers<dim>].value(
1771 *   j, q_point);
1772 *  
1773 *  
1774 *   const double lower_slack_phi_j =
1775 *   fe_values[density_lower_slacks<dim>].value(j, q_point);
1776 *  
1777 *   const double upper_slack_phi_j =
1778 *   fe_values[density_upper_slacks<dim>].value(j, q_point);
1779 *  
1780 *   const double lower_slack_multiplier_phi_j =
1781 *   fe_values[density_lower_slack_multipliers<dim>].value(
1782 *   j, q_point);
1783 *  
1784 *   const double upper_slack_multiplier_phi_j =
1785 *   fe_values[density_upper_slack_multipliers<dim>].value(
1786 *   j, q_point);
1787 *  
1788 * @endcode
1789 *
1790 * This is where the actual work starts. In
1791 * the following, we will build all of the
1792 * terms of the matrix -- they are numerous
1793 * and not entirely self-explanatory, also
1794 * depending on the previous solutions and its
1795 * derivatives (which we have already
1796 * evaluated above and put into the variables
1797 * called `old_*`). To understand what each of
1798 * these terms corresponds to, you will want
1799 * to look at the explicit form of these terms
1800 * in the introduction above.
1801 *
1802
1803 *
1804 * The right hand sides of the equations being
1805 * driven to 0 give all the KKT conditions
1806 * for finding a local minimum -- the descriptions of what
1807 * each individual equation are given with the computations
1808 * of the right hand side.
1809 *
1810
1811 *
1812 *
1813 * @code
1814 *   /* Equation 1 */
1815 *   cell_matrix(i, j) +=
1816 *   fe_values.JxW(q_point) *
1817 *   (
1818 *  
1819 *   -density_phi_i * unfiltered_density_multiplier_phi_j
1820 *  
1821 *   + density_penalty_exponent *
1822 *   (density_penalty_exponent - 1) *
1823 *   std::pow(old_density_values[q_point],
1824 *   density_penalty_exponent - 2) *
1825 *   density_phi_i * density_phi_j *
1826 *   (old_displacement_multiplier_divs[q_point] *
1827 *   old_displacement_divs[q_point] *
1828 *   lambda_values[q_point] +
1829 *   2 * mu_values[q_point] *
1830 *   (old_displacement_symmgrads[q_point] *
1831 *   old_displacement_multiplier_symmgrads[q_point]))
1832 *  
1833 *   + density_penalty_exponent *
1834 *   std::pow(old_density_values[q_point],
1835 *   density_penalty_exponent - 1) *
1836 *   density_phi_i *
1837 *   (displacement_multiplier_phi_j_div *
1838 *   old_displacement_divs[q_point] *
1839 *   lambda_values[q_point] +
1840 *   2 * mu_values[q_point] *
1841 *   (old_displacement_symmgrads[q_point] *
1842 *   displacement_multiplier_phi_j_symmgrad))
1843 *  
1844 *   + density_penalty_exponent *
1845 *   std::pow(old_density_values[q_point],
1846 *   density_penalty_exponent - 1) *
1847 *   density_phi_i *
1848 *   (displacement_phi_j_div *
1849 *   old_displacement_multiplier_divs[q_point] *
1850 *   lambda_values[q_point] +
1851 *   2 * mu_values[q_point] *
1852 *   (old_displacement_multiplier_symmgrads[q_point] *
1853 *   displacement_phi_j_symmgrad)));
1854 *  
1855 *   /* Equation 2 */
1856 *   cell_matrix(i, j) +=
1857 *   fe_values.JxW(q_point) *
1858 *   (density_penalty_exponent *
1859 *   std::pow(old_density_values[q_point],
1860 *   density_penalty_exponent - 1) *
1861 *   density_phi_j *
1862 *   (old_displacement_multiplier_divs[q_point] *
1863 *   displacement_phi_i_div * lambda_values[q_point] +
1864 *   2 * mu_values[q_point] *
1865 *   (old_displacement_multiplier_symmgrads[q_point] *
1866 *   displacement_phi_i_symmgrad))
1867 *  
1868 *   + std::pow(old_density_values[q_point],
1869 *   density_penalty_exponent) *
1870 *   (displacement_multiplier_phi_j_div *
1871 *   displacement_phi_i_div * lambda_values[q_point] +
1872 *   2 * mu_values[q_point] *
1873 *   (displacement_multiplier_phi_j_symmgrad *
1874 *   displacement_phi_i_symmgrad))
1875 *  
1876 *   );
1877 *  
1878 *   /* Equation 3, which has to do with the filter and which is
1879 *   * calculated elsewhere. */
1880 *   cell_matrix(i, j) +=
1881 *   fe_values.JxW(q_point) *
1882 *   (-1 * unfiltered_density_phi_i *
1883 *   lower_slack_multiplier_phi_j +
1884 *   unfiltered_density_phi_i * upper_slack_multiplier_phi_j);
1885 *  
1886 *  
1887 *   /* Equation 4: Primal feasibility */
1888 *   cell_matrix(i, j) +=
1889 *   fe_values.JxW(q_point) *
1890 *   (
1891 *  
1892 *   density_penalty_exponent *
1893 *   std::pow(old_density_values[q_point],
1894 *   density_penalty_exponent - 1) *
1895 *   density_phi_j *
1896 *   (old_displacement_divs[q_point] *
1897 *   displacement_multiplier_phi_i_div *
1898 *   lambda_values[q_point] +
1899 *   2 * mu_values[q_point] *
1900 *   (old_displacement_symmgrads[q_point] *
1901 *   displacement_multiplier_phi_i_symmgrad))
1902 *  
1903 *   + std::pow(old_density_values[q_point],
1904 *   density_penalty_exponent) *
1905 *   (displacement_phi_j_div *
1906 *   displacement_multiplier_phi_i_div *
1907 *   lambda_values[q_point] +
1908 *   2 * mu_values[q_point] *
1909 *   (displacement_phi_j_symmgrad *
1910 *   displacement_multiplier_phi_i_symmgrad)));
1911 *  
1912 *   /* Equation 5: Primal feasibility */
1913 *   cell_matrix(i, j) +=
1914 *   -1 * fe_values.JxW(q_point) *
1915 *   lower_slack_multiplier_phi_i *
1916 *   (unfiltered_density_phi_j - lower_slack_phi_j);
1917 *  
1918 *   /* Equation 6: Primal feasibility */
1919 *   cell_matrix(i, j) +=
1920 *   -1 * fe_values.JxW(q_point) *
1921 *   upper_slack_multiplier_phi_i *
1922 *   (-1 * unfiltered_density_phi_j - upper_slack_phi_j);
1923 *  
1924 *   /* Equation 7: Primal feasibility - the part with the filter
1925 *   * is added later */
1926 *   cell_matrix(i, j) += -1 * fe_values.JxW(q_point) *
1927 *   unfiltered_density_multiplier_phi_i *
1928 *   (density_phi_j);
1929 *  
1930 *   /* Equation 8: Complementary slackness */
1931 *   cell_matrix(i, j) +=
1932 *   fe_values.JxW(q_point) *
1933 *   (lower_slack_phi_i * lower_slack_multiplier_phi_j
1934 *  
1935 *   + lower_slack_phi_i * lower_slack_phi_j *
1936 *   old_lower_slack_multiplier_values[q_point] /
1937 *   old_lower_slack_values[q_point]);
1938 *  
1939 *   /* Equation 9: Complementary slackness */
1940 *   cell_matrix(i, j) +=
1941 *   fe_values.JxW(q_point) *
1942 *   (upper_slack_phi_i * upper_slack_multiplier_phi_j
1943 *  
1944 *  
1945 *   + upper_slack_phi_i * upper_slack_phi_j *
1946 *   old_upper_slack_multiplier_values[q_point] /
1947 *   old_upper_slack_values[q_point]);
1948 *   }
1949 *   }
1950 *   }
1951 *  
1952 * @endcode
1953 *
1954 * Now that we have everything assembled, all we have to
1955 * do is deal with the effect of (Dirichlet) boundary
1956 * conditions and other constraints. We incorporate the
1957 * former locally with just the contributions from the
1958 * current cell, and then let the AffineConstraint class
1959 * deal with the latter while copying contributions from
1960 * the current cell into the global linear system:
1961 *
1962 * @code
1963 *   MatrixTools::local_apply_boundary_values(boundary_values,
1964 *   local_dof_indices,
1965 *   cell_matrix,
1966 *   dummy_cell_rhs,
1967 *   true);
1968 *  
1969 *   constraints.distribute_local_to_global(cell_matrix,
1970 *   local_dof_indices,
1971 *   system_matrix);
1972 *   }
1973 *  
1974 * @endcode
1975 *
1976 * Having accumulated all of the terms that belong
1977 * into the Newton matrix, we now also have to
1978 * compute the terms for the right hand side
1979 * (i.e., the negative residual). We already do this
1980 * in another function, and so we call that here:
1981 *
1982 * @code
1983 *   system_rhs = calculate_test_rhs(nonlinear_solution);
1984 *  
1985 * @endcode
1986 *
1987 * Here we use the filter matrix we have already
1988 * constructed. We only need to integrate this filter applied
1989 * to test functions, which are piecewise constant, and so the
1990 * integration becomes a simple multiplication by the measure
1991 * of the cell. Iterating over the pre-made filter matrix
1992 * allows us to use the information about which cells are in
1993 * or out of the filter without repeatedly checking neighbor
1994 * cells again.
1995 *
1996 * @code
1997 *   for (const auto &cell : dof_handler.active_cell_iterators())
1998 *   {
1999 *   const unsigned int i = cell->active_cell_index();
2000 *   for (typename SparseMatrix<double>::iterator iter =
2001 *   filter_matrix.begin(i);
2002 *   iter != filter_matrix.end(i);
2003 *   ++iter)
2004 *   {
2005 *   const unsigned int j = iter->column();
2006 *   const double value = iter->value() * cell->measure();
2007 *  
2008 *   system_matrix
2009 *   .block(SolutionBlocks::unfiltered_density_multiplier,
2010 *   SolutionBlocks::unfiltered_density)
2011 *   .add(i, j, value);
2012 *   system_matrix
2013 *   .block(SolutionBlocks::unfiltered_density,
2014 *   SolutionBlocks::unfiltered_density_multiplier)
2015 *   .add(j, i, value);
2016 *   }
2017 *   }
2018 *   }
2019 *  
2020 *  
2021 * @endcode
2022 *
2023 *
2024 * <a name="step_79-SolvingtheNewtonlinearsystem"></a>
2025 * <h3>Solving the Newton linear system</h3>
2026 *
2027
2028 *
2029 *
2030
2031 *
2032 * We will need to solve a linear system in each iteration. We use
2033 * a direct solver, for now -- this is clearly not an efficient
2034 * choice for a matrix that has so many non-zeroes, and it will
2035 * not scale to anything interesting. For "real" applications, we
2036 * will need an iterative solver but the complexity of the system
2037 * means that an iterative solver algorithm will take a good deal
2038 * of work. Because this is not the focus of the current program,
2039 * we simply stick with the direct solver we have here -- the
2040 * function follows the same structure as used in @ref step_29 "step-29".
2041 *
2042 * @code
2043 *   template <int dim>
2044 *   BlockVector<double> SANDTopOpt<dim>::solve()
2045 *   {
2046 *   TimerOutput::Scope t(timer, "solver");
2047 *  
2048 *   BlockVector<double> linear_solution;
2049 *   linear_solution.reinit(nonlinear_solution);
2050 *  
2051 *   SparseDirectUMFPACK A_direct;
2052 *   A_direct.initialize(system_matrix);
2053 *   A_direct.vmult(linear_solution, system_rhs);
2054 *  
2055 *   constraints.distribute(linear_solution);
2056 *  
2057 *   return linear_solution;
2058 *   }
2059 *  
2060 *  
2061 * @endcode
2062 *
2063 *
2064 * <a name="step_79-Detailsoftheoptimizationalgorithm"></a>
2065 * <h3>Details of the optimization algorithm</h3>
2066 *
2067
2068 *
2069 * The next several functions deal with specific parts of the
2070 * optimization algorithm, most notably with deciding whether the
2071 * direction computed by solving the linearized (Newton) system is
2072 * viable and, if so, how far we want to go in this direction.
2073 *
2074
2075 *
2076 *
2077 * <a name="step_79-Computingsteplengths"></a>
2078 * <h4>Computing step lengths</h4>
2079 *
2080
2081 *
2082 * We start with a function that does a binary search to figure
2083 * out the maximum step that meets the dual feasibility -- that
2084 * is, how far can we go so that @f$s>0@f$ and @f$z>0@f$. The function
2085 * returns a pair of values, one each for the @f$s@f$ and @f$z@f$ slack
2086 * variables.
2087 *
2088 * @code
2089 *   template <int dim>
2090 *   std::pair<double, double> SANDTopOpt<dim>::calculate_max_step_size(
2091 *   const BlockVector<double> &state,
2092 *   const BlockVector<double> &step) const
2093 *   {
2094 *   double fraction_to_boundary;
2095 *   const double min_fraction_to_boundary = .8;
2096 *   const double max_fraction_to_boundary = 1. - 1e-5;
2097 *  
2098 *   if (min_fraction_to_boundary < 1 - barrier_size)
2099 *   {
2100 *   if (1 - barrier_size < max_fraction_to_boundary)
2101 *   fraction_to_boundary = 1 - barrier_size;
2102 *   else
2103 *   fraction_to_boundary = max_fraction_to_boundary;
2104 *   }
2105 *   else
2106 *   fraction_to_boundary = min_fraction_to_boundary;
2107 *  
2108 *   double step_size_s_low = 0;
2109 *   double step_size_z_low = 0;
2110 *   double step_size_s_high = 1;
2111 *   double step_size_z_high = 1;
2112 *   double step_size_s, step_size_z;
2113 *  
2114 *   const int max_bisection_method_steps = 50;
2115 *   for (unsigned int k = 0; k < max_bisection_method_steps; ++k)
2116 *   {
2117 *   step_size_s = (step_size_s_low + step_size_s_high) / 2;
2118 *   step_size_z = (step_size_z_low + step_size_z_high) / 2;
2119 *  
2120 *   const BlockVector<double> state_test_s =
2121 *   (fraction_to_boundary * state) + (step_size_s * step);
2122 *  
2123 *   const BlockVector<double> state_test_z =
2124 *   (fraction_to_boundary * state) + (step_size_z * step);
2125 *  
2126 *   const bool accept_s =
2127 *   (state_test_s.block(SolutionBlocks::density_lower_slack)
2128 *   .is_non_negative()) &&
2129 *   (state_test_s.block(SolutionBlocks::density_upper_slack)
2130 *   .is_non_negative());
2131 *   const bool accept_z =
2132 *   (state_test_z.block(SolutionBlocks::density_lower_slack_multiplier)
2133 *   .is_non_negative()) &&
2134 *   (state_test_z.block(SolutionBlocks::density_upper_slack_multiplier)
2135 *   .is_non_negative());
2136 *  
2137 *   if (accept_s)
2138 *   step_size_s_low = step_size_s;
2139 *   else
2140 *   step_size_s_high = step_size_s;
2141 *  
2142 *   if (accept_z)
2143 *   step_size_z_low = step_size_z;
2144 *   else
2145 *   step_size_z_high = step_size_z;
2146 *   }
2147 *  
2148 *   return {step_size_s_low, step_size_z_low};
2149 *   }
2150 *  
2151 *  
2152 * @endcode
2153 *
2154 *
2155 * <a name="step_79-Computingresiduals"></a>
2156 * <h4>Computing residuals</h4>
2157 *
2158
2159 *
2160 * The next function computes a right hand side vector linearized
2161 * around a "test solution vector" that we can use to look at the
2162 * magnitude of the KKT conditions. This is then used for testing
2163 * the convergence before shrinking the barrier size, as well as in the
2164 * calculation of the @f$l_1@f$ merit.
2165 *
2166
2167 *
2168 * The function is lengthy and complicated, but it is really just a
2169 * copy of the right hand side part of what the `assemble_system()`
2170 * function above did.
2171 *
2172 * @code
2173 *   template <int dim>
2174 *   BlockVector<double> SANDTopOpt<dim>::calculate_test_rhs(
2175 *   const BlockVector<double> &test_solution) const
2176 *   {
2177 * @endcode
2178 *
2179 * We first create a zero vector with size and blocking of system_rhs
2180 *
2181 * @code
2182 *   BlockVector<double> test_rhs;
2183 *   test_rhs.reinit(system_rhs);
2184 *  
2185 *   const MappingQ<dim> mapping(1);
2186 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
2187 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2188 *   FEValues<dim> fe_values(mapping,
2189 *   fe,
2190 *   quadrature_formula,
2191 *   update_values | update_gradients |
2192 *   update_quadrature_points | update_JxW_values);
2193 *   FEFaceValues<dim> fe_face_values(mapping,
2194 *   fe,
2195 *   face_quadrature_formula,
2196 *   update_values | update_quadrature_points |
2197 *   update_normal_vectors |
2198 *   update_JxW_values);
2199 *  
2200 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
2201 *   const unsigned int n_q_points = quadrature_formula.size();
2202 *  
2203 *   Vector<double> cell_rhs(dofs_per_cell);
2204 *   FullMatrix<double> dummy_cell_matrix(dofs_per_cell, dofs_per_cell);
2205 *  
2206 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
2207 *  
2208 *   std::vector<double> lambda_values(n_q_points);
2209 *   std::vector<double> mu_values(n_q_points);
2210 *  
2211 *   const Functions::ConstantFunction<dim> lambda(1.), mu(1.);
2212 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points);
2213 *  
2214 *  
2215 *   BlockVector<double> filtered_unfiltered_density_solution = test_solution;
2216 *   BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
2217 *   test_solution;
2218 *   filtered_unfiltered_density_solution.block(
2219 *   SolutionBlocks::unfiltered_density) = 0;
2220 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
2221 *   SolutionBlocks::unfiltered_density_multiplier) = 0;
2222 *  
2223 *   filter_matrix.vmult(filtered_unfiltered_density_solution.block(
2224 *   SolutionBlocks::unfiltered_density),
2225 *   test_solution.block(
2226 *   SolutionBlocks::unfiltered_density));
2227 *   filter_matrix.Tvmult(
2228 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
2229 *   SolutionBlocks::unfiltered_density_multiplier),
2230 *   test_solution.block(SolutionBlocks::unfiltered_density_multiplier));
2231 *  
2232 *  
2233 *   std::vector<double> old_density_values(n_q_points);
2234 *   std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
2235 *   std::vector<double> old_displacement_divs(n_q_points);
2236 *   std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
2237 *   std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
2238 *   std::vector<double> old_displacement_multiplier_divs(n_q_points);
2239 *   std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
2240 *   n_q_points);
2241 *   std::vector<double> old_lower_slack_multiplier_values(n_q_points);
2242 *   std::vector<double> old_upper_slack_multiplier_values(n_q_points);
2243 *   std::vector<double> old_lower_slack_values(n_q_points);
2244 *   std::vector<double> old_upper_slack_values(n_q_points);
2245 *   std::vector<double> old_unfiltered_density_values(n_q_points);
2246 *   std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
2247 *   std::vector<double> filtered_unfiltered_density_values(n_q_points);
2248 *   std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
2249 *   n_q_points);
2250 *  
2251 *   using namespace ValueExtractors;
2252 *   for (const auto &cell : dof_handler.active_cell_iterators())
2253 *   {
2254 *   cell_rhs = 0;
2255 *  
2256 *   cell->get_dof_indices(local_dof_indices);
2257 *  
2258 *   fe_values.reinit(cell);
2259 *  
2260 *   lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
2261 *   mu.value_list(fe_values.get_quadrature_points(), mu_values);
2262 *  
2263 *   fe_values[densities<dim>].get_function_values(test_solution,
2264 *   old_density_values);
2265 *   fe_values[displacements<dim>].get_function_values(
2266 *   test_solution, old_displacement_values);
2267 *   fe_values[displacements<dim>].get_function_divergences(
2268 *   test_solution, old_displacement_divs);
2269 *   fe_values[displacements<dim>].get_function_symmetric_gradients(
2270 *   test_solution, old_displacement_symmgrads);
2271 *   fe_values[displacement_multipliers<dim>].get_function_values(
2272 *   test_solution, old_displacement_multiplier_values);
2273 *   fe_values[displacement_multipliers<dim>].get_function_divergences(
2274 *   test_solution, old_displacement_multiplier_divs);
2275 *   fe_values[displacement_multipliers<dim>]
2276 *   .get_function_symmetric_gradients(
2277 *   test_solution, old_displacement_multiplier_symmgrads);
2278 *   fe_values[density_lower_slacks<dim>].get_function_values(
2279 *   test_solution, old_lower_slack_values);
2280 *   fe_values[density_lower_slack_multipliers<dim>].get_function_values(
2281 *   test_solution, old_lower_slack_multiplier_values);
2282 *   fe_values[density_upper_slacks<dim>].get_function_values(
2283 *   test_solution, old_upper_slack_values);
2284 *   fe_values[density_upper_slack_multipliers<dim>].get_function_values(
2285 *   test_solution, old_upper_slack_multiplier_values);
2286 *   fe_values[unfiltered_densities<dim>].get_function_values(
2287 *   test_solution, old_unfiltered_density_values);
2288 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2289 *   test_solution, old_unfiltered_density_multiplier_values);
2290 *   fe_values[unfiltered_densities<dim>].get_function_values(
2291 *   filtered_unfiltered_density_solution,
2292 *   filtered_unfiltered_density_values);
2293 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2294 *   filter_adjoint_unfiltered_density_multiplier_solution,
2295 *   filter_adjoint_unfiltered_density_multiplier_values);
2296 *  
2297 *   for (const auto q_point : fe_values.quadrature_point_indices())
2298 *   {
2299 *   for (const auto i : fe_values.dof_indices())
2300 *   {
2301 *   const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
2302 *   fe_values[displacements<dim>].symmetric_gradient(i, q_point);
2303 *   const double displacement_phi_i_div =
2304 *   fe_values[displacements<dim>].divergence(i, q_point);
2305 *  
2306 *   const SymmetricTensor<2, dim>
2307 *   displacement_multiplier_phi_i_symmgrad =
2308 *   fe_values[displacement_multipliers<dim>].symmetric_gradient(
2309 *   i, q_point);
2310 *   const double displacement_multiplier_phi_i_div =
2311 *   fe_values[displacement_multipliers<dim>].divergence(i,
2312 *   q_point);
2313 *  
2314 *  
2315 *   const double density_phi_i =
2316 *   fe_values[densities<dim>].value(i, q_point);
2317 *   const double unfiltered_density_phi_i =
2318 *   fe_values[unfiltered_densities<dim>].value(i, q_point);
2319 *   const double unfiltered_density_multiplier_phi_i =
2320 *   fe_values[unfiltered_density_multipliers<dim>].value(i,
2321 *   q_point);
2322 *  
2323 *   const double lower_slack_multiplier_phi_i =
2324 *   fe_values[density_lower_slack_multipliers<dim>].value(
2325 *   i, q_point);
2326 *  
2327 *   const double lower_slack_phi_i =
2328 *   fe_values[density_lower_slacks<dim>].value(i, q_point);
2329 *  
2330 *   const double upper_slack_phi_i =
2331 *   fe_values[density_upper_slacks<dim>].value(i, q_point);
2332 *  
2333 *   const double upper_slack_multiplier_phi_i =
2334 *   fe_values[density_upper_slack_multipliers<dim>].value(
2335 *   i, q_point);
2336 *  
2337 *   /* Equation 1: This equation, along with equations
2338 *   * 2 and 3, are the variational derivatives of the
2339 *   * Lagrangian with respect to the decision
2340 *   * variables - the density, displacement, and
2341 *   * unfiltered density. */
2342 *   cell_rhs(i) +=
2343 *   -1 * fe_values.JxW(q_point) *
2344 *   (density_penalty_exponent *
2345 *   std::pow(old_density_values[q_point],
2346 *   density_penalty_exponent - 1) *
2347 *   density_phi_i *
2348 *   (old_displacement_multiplier_divs[q_point] *
2349 *   old_displacement_divs[q_point] *
2350 *   lambda_values[q_point] +
2351 *   2 * mu_values[q_point] *
2352 *   (old_displacement_symmgrads[q_point] *
2353 *   old_displacement_multiplier_symmgrads[q_point])) -
2354 *   density_phi_i *
2355 *   old_unfiltered_density_multiplier_values[q_point]);
2356 *  
2357 *   /* Equation 2; the boundary terms will be added further down
2358 *   * below. */
2359 *   cell_rhs(i) +=
2360 *   -1 * fe_values.JxW(q_point) *
2361 *   (std::pow(old_density_values[q_point],
2362 *   density_penalty_exponent) *
2363 *   (old_displacement_multiplier_divs[q_point] *
2364 *   displacement_phi_i_div * lambda_values[q_point] +
2365 *   2 * mu_values[q_point] *
2366 *   (old_displacement_multiplier_symmgrads[q_point] *
2367 *   displacement_phi_i_symmgrad)));
2368 *  
2369 *   /* Equation 3 */
2370 *   cell_rhs(i) +=
2371 *   -1 * fe_values.JxW(q_point) *
2372 *   (unfiltered_density_phi_i *
2373 *   filter_adjoint_unfiltered_density_multiplier_values
2374 *   [q_point] +
2375 *   unfiltered_density_phi_i *
2376 *   old_upper_slack_multiplier_values[q_point] +
2377 *   -1 * unfiltered_density_phi_i *
2378 *   old_lower_slack_multiplier_values[q_point]);
2379 *  
2380 *  
2381 *  
2382 *   /* Equation 4; boundary term will again be dealt
2383 *   * with below. This equation being driven to 0
2384 *   * ensures that the elasticity equation is met as
2385 *   * a constraint. */
2386 *   cell_rhs(i) += -1 * fe_values.JxW(q_point) *
2387 *   (std::pow(old_density_values[q_point],
2388 *   density_penalty_exponent) *
2389 *   (old_displacement_divs[q_point] *
2390 *   displacement_multiplier_phi_i_div *
2391 *   lambda_values[q_point] +
2392 *   2 * mu_values[q_point] *
2393 *   (displacement_multiplier_phi_i_symmgrad *
2394 *   old_displacement_symmgrads[q_point])));
2395 *  
2396 *   /* Equation 5: This equation sets the lower slack
2397 *   * variable equal to the unfiltered density,
2398 *   * giving a minimum density of 0. */
2399 *   cell_rhs(i) += fe_values.JxW(q_point) *
2400 *   (lower_slack_multiplier_phi_i *
2401 *   (old_unfiltered_density_values[q_point] -
2402 *   old_lower_slack_values[q_point]));
2403 *  
2404 *   /* Equation 6: This equation sets the upper slack
2405 *   * variable equal to one minus the unfiltered
2406 *   * density. */
2407 *   cell_rhs(i) += fe_values.JxW(q_point) *
2408 *   (upper_slack_multiplier_phi_i *
2409 *   (1 - old_unfiltered_density_values[q_point] -
2410 *   old_upper_slack_values[q_point]));
2411 *  
2412 *   /* Equation 7: This is the difference between the
2413 *   * density and the filter applied to the
2414 *   * unfiltered density. This being driven to 0 by
2415 *   * the Newton steps ensures that the filter is
2416 *   * applied correctly. */
2417 *   cell_rhs(i) += fe_values.JxW(q_point) *
2418 *   (unfiltered_density_multiplier_phi_i *
2419 *   (old_density_values[q_point] -
2420 *   filtered_unfiltered_density_values[q_point]));
2421 *  
2422 *   /* Equation 8: This along with equation 9 give the
2423 *   * requirement that s*z = \alpha for the barrier
2424 *   * size alpha, and gives complementary slackness
2425 *   * from KKT conditions when \alpha goes to 0. */
2426 *   cell_rhs(i) +=
2427 *   -1 * fe_values.JxW(q_point) *
2428 *   (lower_slack_phi_i *
2429 *   (old_lower_slack_multiplier_values[q_point] -
2430 *   barrier_size / old_lower_slack_values[q_point]));
2431 *  
2432 *   /* Equation 9 */
2433 *   cell_rhs(i) +=
2434 *   -1 * fe_values.JxW(q_point) *
2435 *   (upper_slack_phi_i *
2436 *   (old_upper_slack_multiplier_values[q_point] -
2437 *   barrier_size / old_upper_slack_values[q_point]));
2438 *   }
2439 *   }
2440 *  
2441 *   for (const auto &face : cell->face_iterators())
2442 *   {
2443 *   if (face->at_boundary() &&
2444 *   face->boundary_id() == BoundaryIds::down_force)
2445 *   {
2446 *   fe_face_values.reinit(cell, face);
2447 *  
2448 *   for (const auto face_q_point :
2449 *   fe_face_values.quadrature_point_indices())
2450 *   {
2451 *   for (const auto i : fe_face_values.dof_indices())
2452 *   {
2453 *   Tensor<1, dim> traction;
2454 *   traction[1] = -1.;
2455 *  
2456 *   cell_rhs(i) +=
2457 *   -1 *
2458 *   (traction * fe_face_values[displacements<dim>].value(
2459 *   i, face_q_point)) *
2460 *   fe_face_values.JxW(face_q_point);
2461 *  
2462 *   cell_rhs(i) +=
2463 *   (traction *
2464 *   fe_face_values[displacement_multipliers<dim>].value(
2465 *   i, face_q_point)) *
2466 *   fe_face_values.JxW(face_q_point);
2467 *   }
2468 *   }
2469 *   }
2470 *   }
2471 *  
2472 *   MatrixTools::local_apply_boundary_values(boundary_values,
2473 *   local_dof_indices,
2474 *   dummy_cell_matrix,
2475 *   cell_rhs,
2476 *   true);
2477 *  
2478 *   constraints.distribute_local_to_global(cell_rhs,
2479 *   local_dof_indices,
2480 *   test_rhs);
2481 *   }
2482 *  
2483 *   return test_rhs;
2484 *   }
2485 *  
2486 *  
2487 * @endcode
2488 *
2489 *
2490 * <a name="step_79-Computingthemeritfunction"></a>
2491 * <h4>Computing the merit function</h4>
2492 *
2493
2494 *
2495 * The algorithm we use herein uses a "watchdog" strategy to
2496 * determine where and how far to go from the current iterate. We
2497 * base the watchdog strategy on an exact @f$l_1@f$ merit function. This
2498 * function calculates the exact @f$l_1@f$ merit of a given, putative,
2499 * next iterate.
2500 *
2501
2502 *
2503 * The merit function consists of the sum of the objective function
2504 * (which is simply an integral of external forces (on the boundary
2505 * of the domain) times the displacement values of a test solution
2506 * (typically, the current solution plus some multiple of the Newton
2507 * update), and the @f$l_1@f$ norms of the Lagrange multiplier
2508 * components of residual vectors. The following code computes these
2509 * parts in turn:
2510 *
2511 * @code
2512 *   template <int dim>
2513 *   double SANDTopOpt<dim>::calculate_exact_merit(
2514 *   const BlockVector<double> &test_solution)
2515 *   {
2516 *   TimerOutput::Scope t(timer, "merit function");
2517 *  
2518 * @endcode
2519 *
2520 * Start with computing the objective function:
2521 *
2522 * @code
2523 *   double objective_function_merit = 0;
2524 *   {
2525 *   const MappingQ<dim> mapping(1);
2526 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
2527 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2528 *   FEValues<dim> fe_values(mapping,
2529 *   fe,
2530 *   quadrature_formula,
2531 *   update_values | update_gradients |
2532 *   update_quadrature_points | update_JxW_values);
2533 *   FEFaceValues<dim> fe_face_values(mapping,
2534 *   fe,
2535 *   face_quadrature_formula,
2536 *   update_values |
2537 *   update_quadrature_points |
2538 *   update_normal_vectors |
2539 *   update_JxW_values);
2540 *  
2541 *   const unsigned int n_face_q_points = face_quadrature_formula.size();
2542 *  
2543 *   std::vector<Tensor<1, dim>> displacement_face_values(n_face_q_points);
2544 *  
2545 *   for (const auto &cell : dof_handler.active_cell_iterators())
2546 *   {
2547 *   for (const auto &face : cell->face_iterators())
2548 *   {
2549 *   if (face->at_boundary() &&
2550 *   face->boundary_id() == BoundaryIds::down_force)
2551 *   {
2552 *   fe_face_values.reinit(cell, face);
2553 *   fe_face_values[ValueExtractors::displacements<dim>]
2554 *   .get_function_values(test_solution,
2555 *   displacement_face_values);
2556 *   for (unsigned int face_q_point = 0;
2557 *   face_q_point < n_face_q_points;
2558 *   ++face_q_point)
2559 *   {
2560 *   Tensor<1, dim> traction;
2561 *   traction[1] = -1.;
2562 *  
2563 *   objective_function_merit +=
2564 *   (traction * displacement_face_values[face_q_point]) *
2565 *   fe_face_values.JxW(face_q_point);
2566 *   }
2567 *   }
2568 *   }
2569 *   }
2570 *   }
2571 *  
2572 *   for (const auto &cell : triangulation.active_cell_iterators())
2573 *   {
2574 *   objective_function_merit =
2575 *   objective_function_merit -
2576 *   barrier_size * cell->measure() *
2577 *   std::log(test_solution.block(
2578 *   SolutionBlocks::density_lower_slack)[cell->active_cell_index()]);
2579 *   objective_function_merit =
2580 *   objective_function_merit -
2581 *   barrier_size * cell->measure() *
2582 *   std::log(test_solution.block(
2583 *   SolutionBlocks::density_upper_slack)[cell->active_cell_index()]);
2584 *   }
2585 *  
2586 * @endcode
2587 *
2588 * Then compute the residual and take the @f$l_1@f$ norms of the
2589 * components that correspond to Lagrange multipliers. We add
2590 * those to the objective function computed above, and return
2591 * the sum at the bottom:
2592 *
2593 * @code
2594 *   const BlockVector<double> test_rhs = calculate_test_rhs(test_solution);
2595 *  
2596 *   const double elasticity_constraint_merit =
2597 *   penalty_multiplier *
2598 *   test_rhs.block(SolutionBlocks::displacement_multiplier).l1_norm();
2599 *   const double filter_constraint_merit =
2600 *   penalty_multiplier *
2601 *   test_rhs.block(SolutionBlocks::unfiltered_density_multiplier).l1_norm();
2602 *   const double lower_slack_merit =
2603 *   penalty_multiplier *
2604 *   test_rhs.block(SolutionBlocks::density_lower_slack_multiplier).l1_norm();
2605 *   const double upper_slack_merit =
2606 *   penalty_multiplier *
2607 *   test_rhs.block(SolutionBlocks::density_upper_slack_multiplier).l1_norm();
2608 *  
2609 *   const double total_merit =
2610 *   objective_function_merit + elasticity_constraint_merit +
2611 *   filter_constraint_merit + lower_slack_merit + upper_slack_merit;
2612 *   return total_merit;
2613 *   }
2614 *  
2615 *  
2616 *  
2617 * @endcode
2618 *
2619 *
2620 * <a name="step_79-Findingasearchdirection"></a>
2621 * <h4>Finding a search direction</h4>
2622 *
2623
2624 *
2625 * Next up is the function that actually computes a search direction
2626 * starting at the current state (passed as the first argument) and
2627 * returns the resulting vector. To this end, the function first
2628 * calls the functions that assemble the linear system that
2629 * corresponds to the Newton system, and that solve it.
2630 *
2631
2632 *
2633 * This function also updates the penalty multiplier in the merit
2634 * function, and then returns the largest scaled feasible step.
2635 * It uses the `calculate_max_step_sizes()` function to find the
2636 * largest feasible step that satisfies @f$s>0@f$ and @f$z>0@f$.
2637 *
2638
2639 *
2640 *
2641 * @code
2642 *   template <int dim>
2643 *   BlockVector<double> SANDTopOpt<dim>::find_max_step()
2644 *   {
2645 *   assemble_system();
2646 *   BlockVector<double> step = solve();
2647 *  
2648 * @endcode
2649 *
2650 * Next we are going to update penalty_multiplier. In
2651 * essence, a larger penalty multiplier makes us consider the
2652 * constraints more. Looking at the Hessian and gradient with
2653 * respect to the step we want to take with our decision
2654 * variables, and comparing that to the norm of our constraint
2655 * error gives us a way to ensure that our merit function is
2656 * "exact" - that is, it has a minimum in the same location
2657 * that the objective function does. As our merit function is
2658 * exact for any penalty multiplier over some minimum value,
2659 * we only keep the computed value if it increases the penalty
2660 * multiplier.
2661 *
2662
2663 *
2664 *
2665 * @code
2666 *   const std::vector<unsigned int> decision_variables = {
2667 *   SolutionBlocks::density,
2668 *   SolutionBlocks::displacement,
2669 *   SolutionBlocks::unfiltered_density,
2670 *   SolutionBlocks::density_upper_slack,
2671 *   SolutionBlocks::density_lower_slack};
2672 *   double hess_part = 0;
2673 *   double grad_part = 0;
2674 *   for (const unsigned int decision_variable_i : decision_variables)
2675 *   {
2676 *   for (const unsigned int decision_variable_j : decision_variables)
2677 *   {
2678 *   Vector<double> temp_vector(step.block(decision_variable_i).size());
2679 *   system_matrix.block(decision_variable_i, decision_variable_j)
2680 *   .vmult(temp_vector, step.block(decision_variable_j));
2681 *   hess_part += step.block(decision_variable_i) * temp_vector;
2682 *   }
2683 *   grad_part -= system_rhs.block(decision_variable_i) *
2684 *   step.block(decision_variable_i);
2685 *   }
2686 *  
2687 *   const std::vector<unsigned int> equality_constraint_multipliers = {
2688 *   SolutionBlocks::displacement_multiplier,
2689 *   SolutionBlocks::unfiltered_density_multiplier,
2690 *   SolutionBlocks::density_lower_slack_multiplier,
2691 *   SolutionBlocks::density_upper_slack_multiplier};
2692 *   double constraint_norm = 0;
2693 *   for (const unsigned int multiplier_i : equality_constraint_multipliers)
2694 *   constraint_norm += system_rhs.block(multiplier_i).linfty_norm();
2695 *  
2696 *  
2697 *   double test_penalty_multiplier;
2698 *   if (hess_part > 0)
2699 *   test_penalty_multiplier =
2700 *   (grad_part + .5 * hess_part) / (.05 * constraint_norm);
2701 *   else
2702 *   test_penalty_multiplier = (grad_part) / (.05 * constraint_norm);
2703 *  
2704 *   penalty_multiplier = std::max(penalty_multiplier, test_penalty_multiplier);
2705 *  
2706 * @endcode
2707 *
2708 * Based on all of this, we can now compute step sizes for the
2709 * primal and dual (Lagrange multiplier) variables. Once we
2710 * have these, we scale the components of the solution vector,
2711 * and that is what this function returns.
2712 *
2713 * @code
2714 *   const std::pair<double, double> max_step_sizes =
2715 *   calculate_max_step_size(nonlinear_solution, step);
2716 *   const double step_size_s = max_step_sizes.first;
2717 *   const double step_size_z = max_step_sizes.second;
2718 *  
2719 *   step.block(SolutionBlocks::density) *= step_size_s;
2720 *   step.block(SolutionBlocks::displacement) *= step_size_s;
2721 *   step.block(SolutionBlocks::unfiltered_density) *= step_size_s;
2722 *   step.block(SolutionBlocks::displacement_multiplier) *= step_size_z;
2723 *   step.block(SolutionBlocks::unfiltered_density_multiplier) *= step_size_z;
2724 *   step.block(SolutionBlocks::density_lower_slack) *= step_size_s;
2725 *   step.block(SolutionBlocks::density_lower_slack_multiplier) *= step_size_z;
2726 *   step.block(SolutionBlocks::density_upper_slack) *= step_size_s;
2727 *   step.block(SolutionBlocks::density_upper_slack_multiplier) *= step_size_z;
2728 *  
2729 *   return step;
2730 *   }
2731 *  
2732 *  
2733 *  
2734 * @endcode
2735 *
2736 *
2737 * <a name="step_79-Computingascaledstep"></a>
2738 * <h4>Computing a scaled step</h4>
2739 *
2740
2741 *
2742 * The next function then implements a back-tracking algorithm for a
2743 * line search. It keeps shrinking step size until it finds a step
2744 * where the merit is decreased, and then returns the new location
2745 * based on the current state vector, and the direction to go into,
2746 * times the step length.
2747 *
2748 * @code
2749 *   template <int dim>
2750 *   BlockVector<double>
2751 *   SANDTopOpt<dim>::compute_scaled_step(const BlockVector<double> &state,
2752 *   const BlockVector<double> &max_step,
2753 *   const double descent_requirement)
2754 *   {
2755 *   const double merit_derivative =
2756 *   (calculate_exact_merit(state + 1e-4 * max_step) -
2757 *   calculate_exact_merit(state)) /
2758 *   1e-4;
2759 *   double step_size = 1;
2760 *   unsigned int max_linesearch_iterations = 10;
2761 *   for (unsigned int k = 0; k < max_linesearch_iterations; ++k)
2762 *   {
2763 *   if (calculate_exact_merit(state + step_size * max_step) <
2764 *   calculate_exact_merit(state) +
2765 *   step_size * descent_requirement * merit_derivative)
2766 *   break;
2767 *   else
2768 *   step_size = step_size / 2;
2769 *   }
2770 *   return state + (step_size * max_step);
2771 *   }
2772 *  
2773 *  
2774 * @endcode
2775 *
2776 *
2777 * <a name="step_79-Checkingforconvergence"></a>
2778 * <h4>Checking for convergence</h4>
2779 *
2780
2781 *
2782 * The final auxiliary function in this block is the one that checks
2783 * to see if the KKT conditions are sufficiently met so that the
2784 * overall algorithm can lower the barrier size. It does so by
2785 * computing the @f$l_1@f$ norm of the residual, which is what
2786 * `calculate_test_rhs()` computes.
2787 *
2788 * @code
2789 *   template <int dim>
2790 *   bool SANDTopOpt<dim>::check_convergence(const BlockVector<double> &state)
2791 *   {
2792 *   const BlockVector<double> test_rhs = calculate_test_rhs(state);
2793 *   const double test_rhs_norm = test_rhs.l1_norm();
2794 *  
2795 *   const double convergence_condition = 1e-2;
2796 *   const double target_norm = convergence_condition * barrier_size;
2797 *  
2798 *   std::cout << " Checking convergence. Current rhs norm is "
2799 *   << test_rhs_norm << ", target is " << target_norm << std::endl;
2800 *  
2801 *   return (test_rhs_norm < target_norm);
2802 *   }
2803 *  
2804 *  
2805 * @endcode
2806 *
2807 *
2808 * <a name="step_79-Postprocessingthesolution"></a>
2809 * <h3>Postprocessing the solution</h3>
2810 *
2811
2812 *
2813 * The first of the postprocessing functions outputs information
2814 * in a VTU file for visualization. It looks long, but it's really
2815 * just the same as what was done in @ref step_22 "step-22", for example, just
2816 * with (a lot) more solution variables:
2817 *
2818 * @code
2819 *   template <int dim>
2820 *   void SANDTopOpt<dim>::output_results(const unsigned int iteration) const
2821 *   {
2822 *   std::vector<std::string> solution_names(1, "density");
2823 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2824 *   data_component_interpretation(
2826 *   for (unsigned int i = 0; i < dim; ++i)
2827 *   {
2828 *   solution_names.emplace_back("displacement");
2829 *   data_component_interpretation.push_back(
2831 *   }
2832 *   solution_names.emplace_back("unfiltered_density");
2833 *   data_component_interpretation.push_back(
2835 *   for (unsigned int i = 0; i < dim; ++i)
2836 *   {
2837 *   solution_names.emplace_back("displacement_multiplier");
2838 *   data_component_interpretation.push_back(
2840 *   }
2841 *   solution_names.emplace_back("unfiltered_density_multiplier");
2842 *   data_component_interpretation.push_back(
2844 *   solution_names.emplace_back("low_slack");
2845 *   data_component_interpretation.push_back(
2847 *   solution_names.emplace_back("low_slack_multiplier");
2848 *   data_component_interpretation.push_back(
2850 *   solution_names.emplace_back("high_slack");
2851 *   data_component_interpretation.push_back(
2853 *   solution_names.emplace_back("high_slack_multiplier");
2854 *   data_component_interpretation.push_back(
2856 *  
2857 *   DataOut<dim> data_out;
2858 *   data_out.attach_dof_handler(dof_handler);
2859 *   data_out.add_data_vector(nonlinear_solution,
2860 *   solution_names,
2862 *   data_component_interpretation);
2863 *   data_out.build_patches();
2864 *  
2865 *   std::ofstream output("solution" + std::to_string(iteration) + ".vtu");
2866 *   data_out.write_vtu(output);
2867 *   }
2868 *  
2869 *  
2870 * @endcode
2871 *
2872 * The second of these functions outputs the solution as an `.stl`
2873 * file for 3d
2874 * printing. [STL](https://en.wikipedia.org/wiki/STL_(file_format))
2875 * files are made up of triangles and normal vectors, and we will
2876 * use it to show all of those cells with a density value larger
2877 * than zero by first extruding the mesh from a @f$z@f$ value of zero
2878 * to @f$z=0.25@f$, and then generating two triangles for each face of
2879 * the cells with a sufficiently large density value. The triangle
2880 * nodes must go counter-clockwise when looking from the outside,
2881 * and the normal vectors must be unit vectors pointing outwards,
2882 * which requires a few checks.
2883 *
2884 * @code
2885 *   template <int dim>
2886 *   void SANDTopOpt<dim>::write_as_stl()
2887 *   {
2888 *   static_assert(dim == 2,
2889 *   "This function is not implemented for anything "
2890 *   "other than the 2d case.");
2891 *  
2892 *   std::ofstream stlfile;
2893 *   stlfile.open("bridge.stl");
2894 *  
2895 *   stlfile << "solid bridge\n" << std::scientific;
2896 *   double height = .25;
2897 *  
2898 *   for (const auto &cell : dof_handler.active_cell_iterators())
2899 *   {
2900 *   if (nonlinear_solution.block(
2901 *   SolutionBlocks::density)[cell->active_cell_index()] > 0.5)
2902 *   {
2903 * @endcode
2904 *
2905 * We have now found a cell with a density value larger
2906 * than zero. Let us start by writing out the bottom
2907 * and top faces. Owing to the ordering issue mentioned
2908 * above, we have to make sure that we understand
2909 * whether a cell has a right- or left-handed
2910 * coordinate system. We do this by interrogating the
2911 * directions of the two edges starting at vertex 0 and
2912 * whether they form a right-handed coordinate system.
2913 *
2914 * @code
2915 *   const Tensor<1, dim> edge_directions[2] = {cell->vertex(1) -
2916 *   cell->vertex(0),
2917 *   cell->vertex(2) -
2918 *   cell->vertex(0)};
2919 *   const Tensor<2, dim> edge_tensor(
2920 *   {{edge_directions[0][0], edge_directions[0][1]},
2921 *   {edge_directions[1][0], edge_directions[1][1]}});
2922 *   const bool is_right_handed_cell = (determinant(edge_tensor) > 0);
2923 *  
2924 *   if (is_right_handed_cell)
2925 *   {
2926 *   /* Write one side at z = 0. */
2927 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2928 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2929 *   stlfile << " outer loop\n";
2930 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2931 *   << cell->vertex(0)[1] << ' ' << 0.000000e+00 << '\n';
2932 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2933 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2934 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2935 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2936 *   stlfile << " endloop\n";
2937 *   stlfile << " endfacet\n";
2938 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2939 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2940 *   stlfile << " outer loop\n";
2941 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2942 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2943 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2944 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2945 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2946 *   << cell->vertex(3)[1] << ' ' << 0.000000e+00 << '\n';
2947 *   stlfile << " endloop\n";
2948 *   stlfile << " endfacet\n";
2949 *  
2950 *   /* Write one side at z = height. */
2951 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2952 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
2953 *   stlfile << " outer loop\n";
2954 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2955 *   << cell->vertex(0)[1] << ' ' << height << '\n';
2956 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2957 *   << cell->vertex(1)[1] << ' ' << height << '\n';
2958 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2959 *   << cell->vertex(2)[1] << ' ' << height << '\n';
2960 *   stlfile << " endloop\n";
2961 *   stlfile << " endfacet\n";
2962 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2963 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
2964 *   stlfile << " outer loop\n";
2965 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2966 *   << cell->vertex(1)[1] << ' ' << height << '\n';
2967 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2968 *   << cell->vertex(3)[1] << ' ' << height << '\n';
2969 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2970 *   << cell->vertex(2)[1] << ' ' << height << '\n';
2971 *   stlfile << " endloop\n";
2972 *   stlfile << " endfacet\n";
2973 *   }
2974 *   else /* The cell has a left-handed set up */
2975 *   {
2976 *   /* Write one side at z = 0. */
2977 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2978 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2979 *   stlfile << " outer loop\n";
2980 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2981 *   << cell->vertex(0)[1] << ' ' << 0.000000e+00 << '\n';
2982 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2983 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2984 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2985 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2986 *   stlfile << " endloop\n";
2987 *   stlfile << " endfacet\n";
2988 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2989 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2990 *   stlfile << " outer loop\n";
2991 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2992 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2993 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2994 *   << cell->vertex(3)[1] << ' ' << 0.000000e+00 << '\n';
2995 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2996 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2997 *   stlfile << " endloop\n";
2998 *   stlfile << " endfacet\n";
2999 *  
3000 *   /* Write one side at z = height. */
3001 *   stlfile << " facet normal " << 0.000000e+00 << ' '
3002 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
3003 *   stlfile << " outer loop\n";
3004 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
3005 *   << cell->vertex(0)[1] << ' ' << height << '\n';
3006 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
3007 *   << cell->vertex(2)[1] << ' ' << height << '\n';
3008 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
3009 *   << cell->vertex(1)[1] << ' ' << height << '\n';
3010 *   stlfile << " endloop\n";
3011 *   stlfile << " endfacet\n";
3012 *   stlfile << " facet normal " << 0.000000e+00 << ' '
3013 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
3014 *   stlfile << " outer loop\n";
3015 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
3016 *   << cell->vertex(1)[1] << ' ' << height << '\n';
3017 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
3018 *   << cell->vertex(2)[1] << ' ' << height << '\n';
3019 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
3020 *   << cell->vertex(3)[1] << ' ' << height << '\n';
3021 *   stlfile << " endloop\n";
3022 *   stlfile << " endfacet\n";
3023 *   }
3024 *  
3025 * @endcode
3026 *
3027 * Next we need to deal with the four faces of the
3028 * cell, extended into the @f$z@f$ direction. However, we
3029 * only need to write these faces if either the face
3030 * is on the domain boundary, or if it is the
3031 * interface between a cell with density greater than
3032 * 0.5, and a cell with a density less than 0.5.
3033 *
3034 * @code
3035 *   for (unsigned int face_number = 0;
3036 *   face_number < GeometryInfo<dim>::faces_per_cell;
3037 *   ++face_number)
3038 *   {
3039 *   const typename DoFHandler<dim>::face_iterator face =
3040 *   cell->face(face_number);
3041 *  
3042 *   if ((face->at_boundary()) ||
3043 *   (!face->at_boundary() &&
3044 *   (nonlinear_solution.block(
3045 *   0)[cell->neighbor(face_number)->active_cell_index()] <
3046 *   0.5)))
3047 *   {
3048 *   const Tensor<1, dim> normal_vector =
3049 *   (face->center() - cell->center());
3050 *   const double normal_norm = normal_vector.norm();
3051 *   if ((face->vertex(0)[0] - face->vertex(0)[0]) *
3052 *   (face->vertex(1)[1] - face->vertex(0)[1]) *
3053 *   0.000000e+00 +
3054 *   (face->vertex(0)[1] - face->vertex(0)[1]) * (0 - 0) *
3055 *   normal_vector[0] +
3056 *   (height - 0) *
3057 *   (face->vertex(1)[0] - face->vertex(0)[0]) *
3058 *   normal_vector[1] -
3059 *   (face->vertex(0)[0] - face->vertex(0)[0]) * (0 - 0) *
3060 *   normal_vector[1] -
3061 *   (face->vertex(0)[1] - face->vertex(0)[1]) *
3062 *   (face->vertex(1)[0] - face->vertex(0)[0]) *
3063 *   normal_vector[0] -
3064 *   (height - 0) *
3065 *   (face->vertex(1)[1] - face->vertex(0)[1]) * 0 >
3066 *   0)
3067 *   {
3068 *   stlfile << " facet normal "
3069 *   << normal_vector[0] / normal_norm << ' '
3070 *   << normal_vector[1] / normal_norm << ' '
3071 *   << 0.000000e+00 << '\n';
3072 *   stlfile << " outer loop\n";
3073 *   stlfile << " vertex " << face->vertex(0)[0]
3074 *   << ' ' << face->vertex(0)[1] << ' '
3075 *   << 0.000000e+00 << '\n';
3076 *   stlfile << " vertex " << face->vertex(0)[0]
3077 *   << ' ' << face->vertex(0)[1] << ' ' << height
3078 *   << '\n';
3079 *   stlfile << " vertex " << face->vertex(1)[0]
3080 *   << ' ' << face->vertex(1)[1] << ' '
3081 *   << 0.000000e+00 << '\n';
3082 *   stlfile << " endloop\n";
3083 *   stlfile << " endfacet\n";
3084 *   stlfile << " facet normal "
3085 *   << normal_vector[0] / normal_norm << ' '
3086 *   << normal_vector[1] / normal_norm << ' '
3087 *   << 0.000000e+00 << '\n';
3088 *   stlfile << " outer loop\n";
3089 *   stlfile << " vertex " << face->vertex(0)[0]
3090 *   << ' ' << face->vertex(0)[1] << ' ' << height
3091 *   << '\n';
3092 *   stlfile << " vertex " << face->vertex(1)[0]
3093 *   << ' ' << face->vertex(1)[1] << ' ' << height
3094 *   << '\n';
3095 *   stlfile << " vertex " << face->vertex(1)[0]
3096 *   << ' ' << face->vertex(1)[1] << ' '
3097 *   << 0.000000e+00 << '\n';
3098 *   stlfile << " endloop\n";
3099 *   stlfile << " endfacet\n";
3100 *   }
3101 *   else
3102 *   {
3103 *   stlfile << " facet normal "
3104 *   << normal_vector[0] / normal_norm << ' '
3105 *   << normal_vector[1] / normal_norm << ' '
3106 *   << 0.000000e+00 << '\n';
3107 *   stlfile << " outer loop\n";
3108 *   stlfile << " vertex " << face->vertex(0)[0]
3109 *   << ' ' << face->vertex(0)[1] << ' '
3110 *   << 0.000000e+00 << '\n';
3111 *   stlfile << " vertex " << face->vertex(1)[0]
3112 *   << ' ' << face->vertex(1)[1] << ' '
3113 *   << 0.000000e+00 << '\n';
3114 *   stlfile << " vertex " << face->vertex(0)[0]
3115 *   << ' ' << face->vertex(0)[1] << ' ' << height
3116 *   << '\n';
3117 *   stlfile << " endloop\n";
3118 *   stlfile << " endfacet\n";
3119 *   stlfile << " facet normal "
3120 *   << normal_vector[0] / normal_norm << ' '
3121 *   << normal_vector[1] / normal_norm << ' '
3122 *   << 0.000000e+00 << '\n';
3123 *   stlfile << " outer loop\n";
3124 *   stlfile << " vertex " << face->vertex(0)[0]
3125 *   << ' ' << face->vertex(0)[1] << ' ' << height
3126 *   << '\n';
3127 *   stlfile << " vertex " << face->vertex(1)[0]
3128 *   << ' ' << face->vertex(1)[1] << ' '
3129 *   << 0.000000e+00 << '\n';
3130 *   stlfile << " vertex " << face->vertex(1)[0]
3131 *   << ' ' << face->vertex(1)[1] << ' ' << height
3132 *   << '\n';
3133 *   stlfile << " endloop\n";
3134 *   stlfile << " endfacet\n";
3135 *   }
3136 *   }
3137 *   }
3138 *   }
3139 *   }
3140 *   stlfile << "endsolid bridge";
3141 *   }
3142 *  
3143 *  
3144 *  
3145 * @endcode
3146 *
3147 *
3148 * <a name="step_79-Therunfunctiondrivingtheoverallalgorithm"></a>
3149 * <h3>The run() function driving the overall algorithm</h3>
3150 *
3151
3152 *
3153 * This function finally provides the overall driver logic. It is,
3154 * in the grand scheme of things, a rather complicated function
3155 * primarily because the optimization algorithm is difficult: It
3156 * isn't just about finding a Newton direction like in @ref step_15 "step-15" and
3157 * then going a fixed distance in this direction any more, but
3158 * instead about (i) determining what the optimal log-barrier
3159 * penalty parameter should be in the current step, (ii) a
3160 * complicated algorithm to determine how far we want to go, and
3161 * other ingredients. Let us see how we can break this down into
3162 * smaller chunks in the following documentation.
3163 *
3164
3165 *
3166 * The function starts out simple enough with first setting up the
3167 * mesh, the DoFHandler, and then the various linear algebra objects
3168 * necessary for the following:
3169 *
3170 * @code
3171 *   template <int dim>
3172 *   void SANDTopOpt<dim>::run()
3173 *   {
3174 *   std::cout << "filter r is: " << filter_r << std::endl;
3175 *  
3176 *   {
3177 *   TimerOutput::Scope t(timer, "setup");
3178 *  
3179 *   create_triangulation();
3180 *  
3181 *   dof_handler.distribute_dofs(fe);
3182 *   DoFRenumbering::component_wise(dof_handler);
3183 *  
3184 *   setup_boundary_values();
3185 *   setup_block_system();
3186 *   setup_filter_matrix();
3187 *   }
3188 *  
3189 * @endcode
3190 *
3191 * We then set a number of parameters that affect the
3192 * log-barrier and line search components of the optimization
3193 * algorithm:
3194 *
3195 * @code
3196 *   barrier_size = 25;
3197 *   const double min_barrier_size = .0005;
3198 *  
3199 *   const unsigned int max_uphill_steps = 8;
3200 *   const double descent_requirement = .0001;
3201 *  
3202 *  
3203 * @endcode
3204 *
3205 * Now start the principal iteration. The overall algorithm
3206 * works by using an outer loop in which we loop until either
3207 * (i) the log-barrier parameter has become small enough, or (ii)
3208 * we have reached convergence. In any case, we terminate if
3209 * end up with too large a number of iterations. This overall
3210 * structure is encoded as a `do { ... } while (...)` loop
3211 * where the convergence condition is at the bottom.
3212 *
3213 * @code
3214 *   unsigned int iteration_number = 0;
3215 *   const unsigned int max_iterations = 10000;
3216 *  
3217 *   do
3218 *   {
3219 *   std::cout << "Starting outer step in iteration " << iteration_number
3220 *   << " with barrier parameter " << barrier_size << std::endl;
3221 *  
3222 * @endcode
3223 *
3224 * Within this outer loop, we have an inner loop in which we
3225 * try to find an update direction using the watchdog
3226 * algorithm described in the introduction.
3227 *
3228
3229 *
3230 * The general idea of the watchdog algorithm itself is
3231 * this: For a maximum of `max_uphill_steps` (i.e., a loop
3232 * within the "inner loop" mentioned above) attempts, we use
3233 * `find_max_step()` to compute a Newton update step, and
3234 * add these up in the `nonlinear_solution` vector. In each of
3235 * these attempts (starting from the place reached at the
3236 * end of the previous attempt), we check whether we have
3237 * reached a target value of the merit function described
3238 * above. The target value is computed based on where this
3239 * algorithm starts (the `nonlinear_solution` at the beginning of
3240 * the watchdog loop, saves as `watchdog_state`) and the
3241 * first proposed direction provided by `find_max_step()` in
3242 * the first go-around of this loop (the `k==0` case).
3243 *
3244 * @code
3245 *   do
3246 *   {
3247 *   std::cout << " Starting inner step in iteration "
3248 *   << iteration_number
3249 *   << " with merit function penalty multiplier "
3250 *   << penalty_multiplier << std::endl;
3251 *  
3252 *   bool watchdog_step_found = false;
3253 *  
3254 *   const BlockVector<double> watchdog_state = nonlinear_solution;
3255 *   BlockVector<double> first_step;
3256 *   double target_merit = numbers::signaling_nan<double>();
3257 *   double merit_derivative = numbers::signaling_nan<double>();
3258 *  
3259 *   for (unsigned int k = 0; k < max_uphill_steps; ++k)
3260 *   {
3261 *   ++iteration_number;
3262 *   const BlockVector<double> update_step = find_max_step();
3263 *  
3264 *   if (k == 0)
3265 *   {
3266 *   first_step = update_step;
3267 *   merit_derivative =
3268 *   ((calculate_exact_merit(watchdog_state +
3269 *   .0001 * first_step) -
3270 *   calculate_exact_merit(watchdog_state)) /
3271 *   .0001);
3272 *   target_merit = calculate_exact_merit(watchdog_state) +
3273 *   descent_requirement * merit_derivative;
3274 *   }
3275 *  
3276 *   nonlinear_solution += update_step;
3277 *   const double current_merit =
3278 *   calculate_exact_merit(nonlinear_solution);
3279 *  
3280 *   std::cout << " current watchdog state merit is: "
3281 *   << current_merit << "; target merit is "
3282 *   << target_merit << std::endl;
3283 *  
3284 *   if (current_merit < target_merit)
3285 *   {
3286 *   watchdog_step_found = true;
3287 *   std::cout << " found workable step after " << k + 1
3288 *   << " iterations" << std::endl;
3289 *   break;
3290 *   }
3291 *   }
3292 *  
3293 *  
3294 * @endcode
3295 *
3296 * The next part of the algorithm then depends on
3297 * whether the watchdog loop above succeeded. If it
3298 * did, then we are satisfied and no further action is
3299 * necessary: We just stay where we are. If, however,
3300 * we took the maximal number of unsuccessful steps in
3301 * the loop above, then we need to do something else,
3302 * and this is what the following code block does.
3303 *
3304
3305 *
3306 * Specifically, from the final (unsuccessful) state
3307 * of the loop above, we seek one more update
3308 * direction and take what is called a "stretch
3309 * step". If that stretch state satisfies a condition
3310 * involving the merit function, then we go there. On
3311 * the other hand, if the stretch state is also
3312 * unacceptable (as all of the watchdog steps above
3313 * were), then we discard all of the watchdog steps
3314 * taken above and start over again where we had
3315 * started the watchdog iterations -- that place was
3316 * stored in the `watchdog_state` variable above. More
3317 * specifically, the conditions below first test
3318 * whether we take a step from `watchdog_state` in
3319 * direction `first_step`, or whether we can do one
3320 * more update from the stretch state to find a new
3321 * place. It is possible that neither of these is
3322 * actually better than the state we started from at
3323 * the beginning of the watchdog algorithm, but even
3324 * if that is so, that place clearly was a difficult
3325 * place to be in, and getting away to start the next
3326 * iteration from another place might be a useful
3327 * strategy to eventually converge.
3328 *
3329
3330 *
3331 * We keep repeating the watchdog steps above along
3332 * with the logic below until this inner iteration is
3333 * finally converged (or if we run up against the
3334 * maximal number of iterations -- where we count the
3335 * number of linear solves as iterations and increment
3336 * the counter every time we call `find_max_step()`
3337 * since that is where the linear solve actually
3338 * happens). In any case, at the end of each of these
3339 * inner iterations we also output the solution in a
3340 * form suitable for visualization.
3341 *
3342
3343 *
3344 *
3345 * @code
3346 *   if (watchdog_step_found == false)
3347 *   {
3348 *   ++iteration_number;
3349 *   const BlockVector<double> update_step = find_max_step();
3350 *   const BlockVector<double> stretch_state =
3351 *   compute_scaled_step(nonlinear_solution,
3352 *   update_step,
3353 *   descent_requirement);
3354 *  
3355 * @endcode
3356 *
3357 * If we did not get a successful watchdog step,
3358 * we now need to decide between going back to
3359 * where we started, or using the final state. We
3360 * compare the merits of both of these locations,
3361 * and then take a scaled step from whichever
3362 * location is better. As the scaled step is
3363 * guaranteed to lower the merit, we will end up
3364 * keeping one of the two.
3365 *
3366 * @code
3367 *   if ((calculate_exact_merit(nonlinear_solution) <
3368 *   calculate_exact_merit(watchdog_state)) ||
3369 *   (calculate_exact_merit(stretch_state) < target_merit))
3370 *   {
3371 *   std::cout << " Taking scaled step from end of watchdog"
3372 *   << std::endl;
3373 *   nonlinear_solution = stretch_state;
3374 *   }
3375 *   else
3376 *   {
3377 *   std::cout
3378 *   << " Taking scaled step from beginning of watchdog"
3379 *   << std::endl;
3380 *   if (calculate_exact_merit(stretch_state) >
3381 *   calculate_exact_merit(watchdog_state))
3382 *   {
3383 *   nonlinear_solution =
3384 *   compute_scaled_step(watchdog_state,
3385 *   first_step,
3386 *   descent_requirement);
3387 *   }
3388 *   else
3389 *   {
3390 *   ++iteration_number;
3391 *   nonlinear_solution = stretch_state;
3392 *   const BlockVector<double> stretch_step =
3393 *   find_max_step();
3394 *   nonlinear_solution =
3395 *   compute_scaled_step(nonlinear_solution,
3396 *   stretch_step,
3397 *   descent_requirement);
3398 *   }
3399 *   }
3400 *   }
3401 *  
3402 *   output_results(iteration_number);
3403 *   }
3404 *   while ((iteration_number < max_iterations) &&
3405 *   (check_convergence(nonlinear_solution) == false));
3406 *  
3407 *  
3408 * @endcode
3409 *
3410 * At the end of the outer loop, we have to update the
3411 * barrier parameter, for which we use the following
3412 * formula. The rest of the function is then simply about
3413 * checking the outer loop convergence condition, and if
3414 * we decide to terminate computations, about writing the
3415 * final "design" as an STL file for use in 3d printing,
3416 * and to output some timing information.
3417 *
3418 * @code
3419 *   const double barrier_size_multiplier = .8;
3420 *   const double barrier_size_exponent = 1.2;
3421 *  
3422 *   barrier_size =
3423 *   std::max(std::min(barrier_size * barrier_size_multiplier,
3424 *   std::pow(barrier_size, barrier_size_exponent)),
3425 *   min_barrier_size);
3426 *  
3427 *   std::cout << std::endl;
3428 *   }
3429 *   while (((barrier_size > min_barrier_size) ||
3430 *   (check_convergence(nonlinear_solution) == false)) &&
3431 *   (iteration_number < max_iterations));
3432 *  
3433 *   write_as_stl();
3434 *   }
3435 *   } // namespace SAND
3436 *  
3437 * @endcode
3438 *
3439 *
3440 * <a name="step_79-Themainfunction"></a>
3441 * <h3>The main function</h3>
3442 *
3443
3444 *
3445 * The remainder of the code, the `main()` function, is as usual:
3446 *
3447 * @code
3448 *   int main()
3449 *   {
3450 *   try
3451 *   {
3452 *   SAND::SANDTopOpt<2> elastic_problem_2d;
3453 *   elastic_problem_2d.run();
3454 *   }
3455 *   catch (std::exception &exc)
3456 *   {
3457 *   std::cerr << std::endl
3458 *   << std::endl
3459 *   << "----------------------------------------------------"
3460 *   << std::endl;
3461 *   std::cerr << "Exception on processing: " << std::endl
3462 *   << exc.what() << std::endl
3463 *   << "Aborting!" << std::endl
3464 *   << "----------------------------------------------------"
3465 *   << std::endl;
3466 *  
3467 *   return 1;
3468 *   }
3469 *   catch (...)
3470 *   {
3471 *   std::cerr << std::endl
3472 *   << std::endl
3473 *   << "----------------------------------------------------"
3474 *   << std::endl;
3475 *   std::cerr << "Unknown exception!" << std::endl
3476 *   << "Aborting!" << std::endl
3477 *   << "----------------------------------------------------"
3478 *   << std::endl;
3479 *   return 1;
3480 *   }
3481 *   return 0;
3482 *   }
3483 * @endcode
3484<a name="step_79-Results"></a><h1>Results</h1>
3485
3486<a name="step_79-TestProblem"></a><h3>Test Problem</h3>
3487
3488The algorithms used above are tested against a traditional topology optimization
3489 problem called the Messerschmitt-Bolkow-Blohm Beam (MBB Beam).
3490
3491This problem considers the optimal 2-d structure that can be built on a
3492rectangle 6 units wide, and 1 unit tall. The bottom corners are fixed in place
3493in the @f$y@f$ direction using a zero Dirichlet boundary condition, and a downward
3494force is applied in the center of the top of the beam by enforcing a Neumann
3495boundary condition. The rest of the boundary is allowed to move, and has no
3496external force applied, which takes the form of a zero Neumann boundary
3497condition. In essence, we are asking the following question: How should we
3498design a bridge in a way so that if the bottom left and bottom right point of
3499the bridge are on rollers that allow these points to move horizontally but not
3500vertically, and so that the displacement in response to the vertical force in
3501the center is minimal.
3502
3503While the total volume of the domain is 6 units, 3 units of material are allowed for
3504the structure. Because of the symmetry of the problem, it could be posed on a
3505rectangle of width 3 and height 1 by cutting the original domain in half, and
3506using zero Dirichlet boundary conditions in the @f$x@f$ direction along the cut
3507edge. That said, symmetry of the solution is a good indicator that the program
3508is working as expected, so we solved the problem on the whole domain,
3509as shown below. @cite Bendse2004
3510
3511<div style="text-align:center;">
3512 <img src="https://www.dealii.org/images/steps/developer/step-79.mbbgeometry.png"
3513 alt="The MBB problem domain and boundary conditions">
3514</div>
3515
3516
3517Using the program discussed above, we find the minimum volume of the MBB Beam and the
3518individual components of the solution look as follows:
3519
3520<div class="onecolumn" style="width: 80%; text-align: center;">
3521 <div>
3522 <img src="https://www.dealii.org/images/steps/developer/step-79.filtereddensity.png"
3523 alt="Filtered Density Solution">
3524 </div>
3525 <div>
3526 <img src="https://www.dealii.org/images/steps/developer/step-79.unfiltereddensity.png"
3527 alt="Unfiltered Density Solution">
3528 </div>
3529</div>
3530
3531
3532These pictures show that what we find here is in accordance with what one
3533typically sees in other publications on the topic @cite Bendse2004.
3534Maybe more interestingly, the
3535result looks like a truss bridge (except that we apply the load at the top of
3536the trusses, rather than the bottom as in real truss bridges, akin to a "deck
3537truss" bridge), suggesting that the designs that have been used in bridge-building
3538for centuries are indeed based on ideas we can now show to be optimal
3539in some sense.
3540<a name="step_79-Possibilitiesforextensions"></a><h4>Possibilities for extensions</h4>
3541
3542
3543The results shown above took around 75 iterations to find, which is quite
3544concerning given the expense in solving the large linear systems in each
3545iteration. Looking at the evolution, it does look as though the convergence has
3546moments of happening quickly and moments of happening slowly. We believe this to
3547be due to both a lack of precision on when and how to decrease the boundary
3548values, as well as our choice of merit function being sub-optimal. In the future,
3549a LOQO barrier update replacing the monotone reduction, as well as a Markov
3550Filter in place of a merit function will decrease the number of necessary
3551iterations significantly.
3552
3553The barrier decrease is most sensitive in the middle of the convergence, which
3554is problematic, as it seems like we need it to decrease quickly, then slowly,
3555then quickly again.
3556
3557Secondly, the linear solver used here is just the sparse direct solver based on
3558the SparseDirectUMFPACK class. This works reasonably well on small problems,
3559but the formulation of the optimization problem detailed above has quite a large
3560number of variables and so the linear problem is not only large but also has a
3561lot of nonzero entries in many rows, even on meshes that overall are still
3562relatively coarse. As a consequence, the solver time dominates the
3563computations, and more sophisticated approaches at solving the linear system
3564are necessary.
3565 *
3566 *
3567<a name="step_79-PlainProg"></a>
3568<h1> The plain program</h1>
3569@include "step-79.cc"
3570*/
std::vector< bool > component_mask
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
Point< 3 > center
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
typename ActiveSelector::face_iterator face_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:64
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
IndexSet extract_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask)
Definition dof_tools.cc:416
void create_triangulation(Triangulation< dim, dim > &tria, const AdditionalData &additional_data=AdditionalData())
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
STL namespace.
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)