Reference documentation for deal.II version GIT relicensing-487-ge9eb5ab491 2024-04-25 07:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-79.h
Go to the documentation of this file.
1
1245 *  
1246 *   for (unsigned int i = 0; i < dim; ++i)
1247 *   {
1248 *   for (unsigned int k = 0; k < dim; ++k)
1249 *   {
1250 *   coupling[displacement<dim> + i]
1251 *   [displacement_multiplier<dim> + k] = DoFTools::always;
1252 *   coupling[displacement_multiplier<dim> + k]
1253 *   [displacement<dim> + i] = DoFTools::always;
1254 *   }
1255 *   }
1256 *  
1257 *   /* Coupling for slack variables */
1258 *   coupling[density_lower_slack<dim>][density_lower_slack<dim>] =
1259 *   DoFTools::always;
1260 *   coupling[density_lower_slack<dim>][density_upper_slack<dim>] =
1261 *   DoFTools::always;
1262 *   coupling[density_upper_slack<dim>][density_lower_slack<dim>] =
1263 *   DoFTools::always;
1264 *  
1265 *   coupling[density_lower_slack_multiplier<dim>]
1266 *   [density_lower_slack_multiplier<dim>] = DoFTools::always;
1267 *   coupling[density_lower_slack_multiplier<dim>]
1268 *   [density_upper_slack_multiplier<dim>] = DoFTools::always;
1269 *   coupling[density_upper_slack_multiplier<dim>]
1270 *   [density_lower_slack_multiplier<dim>] = DoFTools::always;
1271 *   }
1272 *  
1273 * @endcode
1274 *
1275 * Before we can create the sparsity pattern, we also have to
1276 * set up constraints. Since this program does not adaptively
1277 * refine the mesh, the only constraint we have is one that
1278 * couples all density variables to enforce the volume
1279 * constraint. This will ultimately lead to a dense sub-block
1280 * of the matrix, but there is little we can do about that.
1281 *
1282 * @code
1283 *   const ComponentMask density_mask =
1284 *   fe.component_mask(ValueExtractors::densities<dim>);
1285 *   const IndexSet density_dofs =
1286 *   DoFTools::extract_dofs(dof_handler, density_mask);
1287 *  
1288 *   types::global_dof_index last_density_dof =
1289 *   density_dofs.nth_index_in_set(density_dofs.n_elements() - 1);
1290 *   constraints.clear();
1291 *   {
1292 *   std::vector<std::pair<types::global_dof_index, double>>
1293 *   constraint_entries;
1294 *   constraint_entries.reserve(density_dofs.n_elements() - 1);
1295 *   for (const types::global_dof_index dof_index : density_dofs)
1296 *   if (dof_index != last_density_dof)
1297 *   constraint_entries.emplace_back(dof_index, -1.);
1298 *  
1299 *   constraints.add_constraint(last_density_dof, constraint_entries, 0.);
1300 *   }
1301 *   constraints.close();
1302 *  
1303 * @endcode
1304 *
1305 * We can now finally create the sparsity pattern for the
1306 * matrix, taking into account which variables couple with
1307 * which other variables, and the constraints we have on the
1308 * density.
1309 *
1310 * @code
1311 *   DoFTools::make_sparsity_pattern(dof_handler, coupling, dsp, constraints);
1312 *  
1313 * @endcode
1314 *
1315 * The only part of the matrix we have not dealt with is the
1316 * filter matrix and its transpose. These are non-local
1317 * (integral) operators for which deal.II does not currently
1318 * have functions. What we will ultimately need to do is go
1319 * over all cells and couple the unfiltered density on this
1320 * cell to all filtered densities of neighboring cells that
1321 * are less than a threshold distance away, and the other way
1322 * around; for the moment, we are only concerned with building
1323 * the sparsity pattern that would correspond to this kind of
1324 * matrix, so we perform the equivalent loop and where later
1325 * on we would write into an entry of the matrix, we now
1326 * simply add an entry to the sparsity matrix:
1327 *
1328 * @code
1329 *   for (const auto &cell : dof_handler.active_cell_iterators())
1330 *   {
1331 *   const unsigned int i = cell->active_cell_index();
1332 *   for (const auto &check_cell : find_relevant_neighbors(cell))
1333 *   {
1334 *   const double distance =
1335 *   cell->center().distance(check_cell->center());
1336 *   if (distance < filter_r)
1337 *   {
1338 *   dsp
1339 *   .block(SolutionBlocks::unfiltered_density,
1340 *   SolutionBlocks::unfiltered_density_multiplier)
1341 *   .add(i, check_cell->active_cell_index());
1342 *   dsp
1343 *   .block(SolutionBlocks::unfiltered_density_multiplier,
1344 *   SolutionBlocks::unfiltered_density)
1345 *   .add(i, check_cell->active_cell_index());
1346 *   }
1347 *   }
1348 *   }
1349 *  
1350 * @endcode
1351 *
1352 * Having so generated the "dynamic" sparsity pattern, we can
1353 * finally copy it to the structure that is used to associate
1354 * matrices with a sparsity pattern. Because the sparsity
1355 * pattern is large and complex, we also output it into a file
1356 * of its own for visualization purposes -- in other words,
1357 * for "visual debugging".
1358 *
1359 * @code
1360 *   sparsity_pattern.copy_from(dsp);
1361 *  
1362 *   std::ofstream out("sparsity.plt");
1363 *   sparsity_pattern.print_gnuplot(out);
1364 *  
1365 *   system_matrix.reinit(sparsity_pattern);
1366 *  
1367 *  
1368 * @endcode
1369 *
1370 * What is left is to correctly size the various vectors and
1371 * their blocks, as well as setting initial guesses for some
1372 * of the components of the (nonlinear) solution vector. We
1373 * here use the symbolic component names for individual blocks
1374 * of the solution vector and, for brevity, use the same trick
1375 * with `using namespace` as above:
1376 *
1377 * @code
1378 *   nonlinear_solution.reinit(block_sizes);
1379 *   system_rhs.reinit(block_sizes);
1380 *  
1381 *   {
1382 *   using namespace SolutionBlocks;
1383 *   nonlinear_solution.block(density).add(density_ratio);
1384 *   nonlinear_solution.block(unfiltered_density).add(density_ratio);
1385 *   nonlinear_solution.block(unfiltered_density_multiplier)
1386 *   .add(density_ratio);
1387 *   nonlinear_solution.block(density_lower_slack).add(density_ratio);
1388 *   nonlinear_solution.block(density_lower_slack_multiplier).add(50);
1389 *   nonlinear_solution.block(density_upper_slack).add(1 - density_ratio);
1390 *   nonlinear_solution.block(density_upper_slack_multiplier).add(50);
1391 *   }
1392 *   }
1393 *  
1394 *  
1395 * @endcode
1396 *
1397 *
1398 * <a name="step_79-Creatingthefiltermatrix"></a>
1399 * <h3>Creating the filter matrix</h3>
1400 *
1401
1402 *
1403 * Next up, a function that is used once at the beginning of the
1404 * program: It creates a matrix @f$H@f$ so that the filtered density
1405 * vector equals @f$H@f$ times the unfiltered density. The creation
1406 * of this matrix is non-trivial, and it is used in every
1407 * iteration, and so rather than reforming it as we do with the
1408 * Newton matrix, it is made only once and stored separately.
1409 *
1410
1411 *
1412 * The way this matrix is computed follows the outline used above
1413 * already to form its sparsity pattern. We repeat this process here
1414 * for the sparsity pattern of this separately formed matrix, and
1415 * then actually build the matrix itself. You may want to check the
1416 * definition of this matrix in the introduction to this program.
1417 *
1418 * @code
1419 *   template <int dim>
1420 *   void SANDTopOpt<dim>::setup_filter_matrix()
1421 *   {
1422 * @endcode
1423 *
1424 * The sparsity pattern of the filter has already been determined
1425 * and implemented in the setup_system() function. We copy the
1426 * structure from the appropriate block and use it again here.
1427 *
1428
1429 *
1430 *
1431 * @code
1432 *   filter_sparsity_pattern.copy_from(
1433 *   sparsity_pattern.block(SolutionBlocks::unfiltered_density,
1434 *   SolutionBlocks::unfiltered_density_multiplier));
1435 *   filter_matrix.reinit(filter_sparsity_pattern);
1436 *  
1437 * @endcode
1438 *
1439 * Having so built the sparsity pattern, now we re-do all of
1440 * these loops to actually compute the necessary values of the
1441 * matrix entries:
1442 *
1443
1444 *
1445 *
1446 * @code
1447 *   for (const auto &cell : dof_handler.active_cell_iterators())
1448 *   {
1449 *   const unsigned int i = cell->active_cell_index();
1450 *   for (const auto &check_cell : find_relevant_neighbors(cell))
1451 *   {
1452 *   const double distance =
1453 *   cell->center().distance(check_cell->center());
1454 *   if (distance < filter_r)
1455 *   {
1456 *   filter_matrix.add(i,
1457 *   check_cell->active_cell_index(),
1458 *   filter_r - distance);
1459 *  
1460 *   }
1461 *   }
1462 *   }
1463 *  
1464 * @endcode
1465 *
1466 * The final step is to normalize the matrix so that for each
1467 * row, the sum of entries equals one.
1468 *
1469 * @code
1470 *   for (unsigned int i = 0; i < filter_matrix.m(); ++i)
1471 *   {
1472 *   double denominator = 0;
1473 *   for (SparseMatrix<double>::iterator iter = filter_matrix.begin(i);
1474 *   iter != filter_matrix.end(i);
1475 *   iter++)
1476 *   denominator = denominator + iter->value();
1477 *   for (SparseMatrix<double>::iterator iter = filter_matrix.begin(i);
1478 *   iter != filter_matrix.end(i);
1479 *   iter++)
1480 *   iter->value() = iter->value() / denominator;
1481 *   }
1482 *   }
1483 *  
1484 * @endcode
1485 *
1486 * This function is used for building the filter matrix. We create a set of
1487 * all the cell iterators within a certain radius of the cell that is input.
1488 * These are the neighboring cells that will be relevant for the filter.
1489 *
1490 * @code
1491 *   template <int dim>
1492 *   std::set<typename Triangulation<dim>::cell_iterator>
1493 *   SANDTopOpt<dim>::find_relevant_neighbors(
1494 *   typename Triangulation<dim>::cell_iterator cell) const
1495 *   {
1496 *   std::set<unsigned int> neighbor_ids;
1497 *   std::set<typename Triangulation<dim>::cell_iterator> cells_to_check;
1498 *  
1499 *   neighbor_ids.insert(cell->active_cell_index());
1500 *   cells_to_check.insert(cell);
1501 *  
1502 *   bool new_neighbors_found;
1503 *   do
1504 *   {
1505 *   new_neighbors_found = false;
1506 *   for (const auto &check_cell :
1507 *   std::vector<typename Triangulation<dim>::cell_iterator>(
1508 *   cells_to_check.begin(), cells_to_check.end()))
1509 *   {
1510 *   for (const auto n : check_cell->face_indices())
1511 *   {
1512 *   if (!(check_cell->face(n)->at_boundary()))
1513 *   {
1514 *   const auto &neighbor = check_cell->neighbor(n);
1515 *   const double distance =
1516 *   cell->center().distance(neighbor->center());
1517 *   if ((distance < filter_r) &&
1518 *   !(neighbor_ids.count(neighbor->active_cell_index())))
1519 *   {
1520 *   cells_to_check.insert(neighbor);
1521 *   neighbor_ids.insert(neighbor->active_cell_index());
1522 *   new_neighbors_found = true;
1523 *   }
1524 *   }
1525 *   }
1526 *   }
1527 *   }
1528 *   while (new_neighbors_found);
1529 *   return cells_to_check;
1530 *   }
1531 *  
1532 * @endcode
1533 *
1534 *
1535 * <a name="step_79-AssemblingtheNewtonmatrix"></a>
1536 * <h3>Assembling the Newton matrix</h3>
1537 *
1538
1539 *
1540 * Whereas the setup_filter_matrix function built a matrix that is the same as
1541 * long as the mesh does not change (which we don't do anyway in
1542 * this program), the next function builds the matrix to be solved
1543 * in each iteration. This is where the magic happens. The components
1544 * of the system of linear equations describing Newton's method for
1545 * finding the solution of the KKT conditions are implemented here.
1546 *
1547
1548 *
1549 * The top of the function is as in most of these functions and just
1550 * sets up all sorts of variables necessary for the actual assembly,
1551 * including a whole bunch of extractors. The entire set up should
1552 * look familiar, though somewhat lengthier, if you've previously
1553 * looked at @ref step_22 "step-22".
1554 *
1555 * @code
1556 *   template <int dim>
1557 *   void SANDTopOpt<dim>::assemble_system()
1558 *   {
1559 *   TimerOutput::Scope t(timer, "assembly");
1560 *  
1561 *   system_matrix = 0;
1562 *   system_rhs = 0;
1563 *  
1564 *  
1565 *   MappingQ<dim> mapping(1);
1566 *   QGauss<dim> quadrature_formula(fe.degree + 1);
1567 *   QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1568 *   FEValues<dim> fe_values(mapping,
1569 *   fe,
1570 *   quadrature_formula,
1571 *   update_values | update_gradients |
1572 *   update_quadrature_points | update_JxW_values);
1573 *   FEFaceValues<dim> fe_face_values(mapping,
1574 *   fe,
1575 *   face_quadrature_formula,
1576 *   update_values | update_quadrature_points |
1577 *   update_normal_vectors |
1578 *   update_JxW_values);
1579 *  
1580 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1581 *   const unsigned int n_q_points = quadrature_formula.size();
1582 *  
1583 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1584 *   Vector<double> dummy_cell_rhs(dofs_per_cell);
1585 *  
1586 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1587 *  
1588 *   std::vector<double> lambda_values(n_q_points);
1589 *   std::vector<double> mu_values(n_q_points);
1590 *   const Functions::ConstantFunction<dim> lambda(1.);
1591 *   const Functions::ConstantFunction<dim> mu(1.);
1592 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points);
1593 *  
1594 * @endcode
1595 *
1596 * At this point, we apply the filter to the unfiltered
1597 * density, and apply the adjoint (transpose) operation to the
1598 * unfiltered density multiplier, both to the current best
1599 * guess for the nonlinear solution. We use this later to tell
1600 * us how far off our filtered density is from the filter
1601 * applied to the unfiltered density. That is because while at
1602 * the solution of the nonlinear problem, we have
1603 * @f$\rho=H\varrho@f$, but at intermediate iterations, we in
1604 * general have @f$\rho^k\neq H\varrho^k@f$ and the "residual"
1605 * @f$\rho^k-H\varrho^k@f$ will then appear as the right hand side
1606 * of one of the Newton update equations that we compute
1607 * below.
1608 *
1609 * @code
1610 *   BlockVector<double> filtered_unfiltered_density_solution =
1611 *   nonlinear_solution;
1612 *   BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
1613 *   nonlinear_solution;
1614 *  
1615 *   filter_matrix.vmult(filtered_unfiltered_density_solution.block(
1616 *   SolutionBlocks::unfiltered_density),
1617 *   nonlinear_solution.block(
1618 *   SolutionBlocks::unfiltered_density));
1619 *   filter_matrix.Tvmult(
1620 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
1621 *   SolutionBlocks::unfiltered_density_multiplier),
1622 *   nonlinear_solution.block(SolutionBlocks::unfiltered_density_multiplier));
1623 *  
1624 *  
1625 *   std::vector<double> old_density_values(n_q_points);
1626 *   std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
1627 *   std::vector<double> old_displacement_divs(n_q_points);
1628 *   std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
1629 *   std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
1630 *   std::vector<double> old_displacement_multiplier_divs(n_q_points);
1631 *   std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
1632 *   n_q_points);
1633 *   std::vector<double> old_lower_slack_multiplier_values(n_q_points);
1634 *   std::vector<double> old_upper_slack_multiplier_values(n_q_points);
1635 *   std::vector<double> old_lower_slack_values(n_q_points);
1636 *   std::vector<double> old_upper_slack_values(n_q_points);
1637 *   std::vector<double> old_unfiltered_density_values(n_q_points);
1638 *   std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
1639 *   std::vector<double> filtered_unfiltered_density_values(n_q_points);
1640 *   std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
1641 *   n_q_points);
1642 *  
1643 *   using namespace ValueExtractors;
1644 *   for (const auto &cell : dof_handler.active_cell_iterators())
1645 *   {
1646 *   cell_matrix = 0;
1647 *  
1648 *   cell->get_dof_indices(local_dof_indices);
1649 *  
1650 *   fe_values.reinit(cell);
1651 *  
1652 *   lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
1653 *   mu.value_list(fe_values.get_quadrature_points(), mu_values);
1654 *  
1655 * @endcode
1656 *
1657 * As part of the construction of our system matrix, we need to
1658 * retrieve values from our current guess at the solution.
1659 * The following lines of code retrieve the needed values.
1660 *
1661 * @code
1662 *   fe_values[densities<dim>].get_function_values(nonlinear_solution,
1663 *   old_density_values);
1664 *   fe_values[displacements<dim>].get_function_values(
1665 *   nonlinear_solution, old_displacement_values);
1666 *   fe_values[displacements<dim>].get_function_divergences(
1667 *   nonlinear_solution, old_displacement_divs);
1668 *   fe_values[displacements<dim>].get_function_symmetric_gradients(
1669 *   nonlinear_solution, old_displacement_symmgrads);
1670 *   fe_values[displacement_multipliers<dim>].get_function_values(
1671 *   nonlinear_solution, old_displacement_multiplier_values);
1672 *   fe_values[displacement_multipliers<dim>].get_function_divergences(
1673 *   nonlinear_solution, old_displacement_multiplier_divs);
1674 *   fe_values[displacement_multipliers<dim>]
1675 *   .get_function_symmetric_gradients(
1676 *   nonlinear_solution, old_displacement_multiplier_symmgrads);
1677 *   fe_values[density_lower_slacks<dim>].get_function_values(
1678 *   nonlinear_solution, old_lower_slack_values);
1679 *   fe_values[density_lower_slack_multipliers<dim>].get_function_values(
1680 *   nonlinear_solution, old_lower_slack_multiplier_values);
1681 *   fe_values[density_upper_slacks<dim>].get_function_values(
1682 *   nonlinear_solution, old_upper_slack_values);
1683 *   fe_values[density_upper_slack_multipliers<dim>].get_function_values(
1684 *   nonlinear_solution, old_upper_slack_multiplier_values);
1685 *   fe_values[unfiltered_densities<dim>].get_function_values(
1686 *   nonlinear_solution, old_unfiltered_density_values);
1687 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1688 *   nonlinear_solution, old_unfiltered_density_multiplier_values);
1689 *   fe_values[unfiltered_densities<dim>].get_function_values(
1690 *   filtered_unfiltered_density_solution,
1691 *   filtered_unfiltered_density_values);
1692 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1693 *   filter_adjoint_unfiltered_density_multiplier_solution,
1694 *   filter_adjoint_unfiltered_density_multiplier_values);
1695 *  
1696 *   for (const auto q_point : fe_values.quadrature_point_indices())
1697 *   {
1698 * @endcode
1699 *
1700 * We need several more values corresponding to the test functions
1701 * coming from the first derivatives taken from the Lagrangian,
1702 * that is the @f$d_{\bullet}@f$ functions. These are calculated here:
1703 *
1704 * @code
1705 *   for (const auto i : fe_values.dof_indices())
1706 *   {
1707 *   const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
1708 *   fe_values[displacements<dim>].symmetric_gradient(i, q_point);
1709 *   const double displacement_phi_i_div =
1710 *   fe_values[displacements<dim>].divergence(i, q_point);
1711 *  
1712 *   const SymmetricTensor<2, dim>
1713 *   displacement_multiplier_phi_i_symmgrad =
1714 *   fe_values[displacement_multipliers<dim>].symmetric_gradient(
1715 *   i, q_point);
1716 *   const double displacement_multiplier_phi_i_div =
1717 *   fe_values[displacement_multipliers<dim>].divergence(i,
1718 *   q_point);
1719 *  
1720 *   const double density_phi_i =
1721 *   fe_values[densities<dim>].value(i, q_point);
1722 *   const double unfiltered_density_phi_i =
1723 *   fe_values[unfiltered_densities<dim>].value(i, q_point);
1724 *   const double unfiltered_density_multiplier_phi_i =
1725 *   fe_values[unfiltered_density_multipliers<dim>].value(i,
1726 *   q_point);
1727 *  
1728 *   const double lower_slack_multiplier_phi_i =
1729 *   fe_values[density_lower_slack_multipliers<dim>].value(
1730 *   i, q_point);
1731 *  
1732 *   const double lower_slack_phi_i =
1733 *   fe_values[density_lower_slacks<dim>].value(i, q_point);
1734 *  
1735 *   const double upper_slack_phi_i =
1736 *   fe_values[density_upper_slacks<dim>].value(i, q_point);
1737 *  
1738 *   const double upper_slack_multiplier_phi_i =
1739 *   fe_values[density_upper_slack_multipliers<dim>].value(
1740 *   i, q_point);
1741 *  
1742 *  
1743 *   for (const auto j : fe_values.dof_indices())
1744 *   {
1745 * @endcode
1746 *
1747 * Finally, we need values that come from the second round
1748 * of derivatives taken from the Lagrangian,
1749 * the @f$c_{\bullet}@f$ functions. These are calculated here:
1750 *
1751 * @code
1752 *   const SymmetricTensor<2, dim> displacement_phi_j_symmgrad =
1753 *   fe_values[displacements<dim>].symmetric_gradient(j,
1754 *   q_point);
1755 *   const double displacement_phi_j_div =
1756 *   fe_values[displacements<dim>].divergence(j, q_point);
1757 *  
1758 *   const SymmetricTensor<2, dim>
1759 *   displacement_multiplier_phi_j_symmgrad =
1760 *   fe_values[displacement_multipliers<dim>]
1761 *   .symmetric_gradient(j, q_point);
1762 *   const double displacement_multiplier_phi_j_div =
1763 *   fe_values[displacement_multipliers<dim>].divergence(
1764 *   j, q_point);
1765 *  
1766 *   const double density_phi_j =
1767 *   fe_values[densities<dim>].value(j, q_point);
1768 *  
1769 *   const double unfiltered_density_phi_j =
1770 *   fe_values[unfiltered_densities<dim>].value(j, q_point);
1771 *   const double unfiltered_density_multiplier_phi_j =
1772 *   fe_values[unfiltered_density_multipliers<dim>].value(
1773 *   j, q_point);
1774 *  
1775 *  
1776 *   const double lower_slack_phi_j =
1777 *   fe_values[density_lower_slacks<dim>].value(j, q_point);
1778 *  
1779 *   const double upper_slack_phi_j =
1780 *   fe_values[density_upper_slacks<dim>].value(j, q_point);
1781 *  
1782 *   const double lower_slack_multiplier_phi_j =
1783 *   fe_values[density_lower_slack_multipliers<dim>].value(
1784 *   j, q_point);
1785 *  
1786 *   const double upper_slack_multiplier_phi_j =
1787 *   fe_values[density_upper_slack_multipliers<dim>].value(
1788 *   j, q_point);
1789 *  
1790 * @endcode
1791 *
1792 * This is where the actual work starts. In
1793 * the following, we will build all of the
1794 * terms of the matrix -- they are numerous
1795 * and not entirely self-explanatory, also
1796 * depending on the previous solutions and its
1797 * derivatives (which we have already
1798 * evaluated above and put into the variables
1799 * called `old_*`). To understand what each of
1800 * these terms corresponds to, you will want
1801 * to look at the explicit form of these terms
1802 * in the introduction above.
1803 *
1804
1805 *
1806 * The right hand sides of the equations being
1807 * driven to 0 give all the KKT conditions
1808 * for finding a local minimum -- the descriptions of what
1809 * each individual equation are given with the computations
1810 * of the right hand side.
1811 *
1812
1813 *
1814 *
1815 * @code
1816 *   /* Equation 1 */
1817 *   cell_matrix(i, j) +=
1818 *   fe_values.JxW(q_point) *
1819 *   (
1820 *  
1821 *   -density_phi_i * unfiltered_density_multiplier_phi_j
1822 *  
1823 *   + density_penalty_exponent *
1824 *   (density_penalty_exponent - 1) *
1825 *   std::pow(old_density_values[q_point],
1826 *   density_penalty_exponent - 2) *
1827 *   density_phi_i * density_phi_j *
1828 *   (old_displacement_multiplier_divs[q_point] *
1829 *   old_displacement_divs[q_point] *
1830 *   lambda_values[q_point] +
1831 *   2 * mu_values[q_point] *
1832 *   (old_displacement_symmgrads[q_point] *
1833 *   old_displacement_multiplier_symmgrads[q_point]))
1834 *  
1835 *   + density_penalty_exponent *
1836 *   std::pow(old_density_values[q_point],
1837 *   density_penalty_exponent - 1) *
1838 *   density_phi_i *
1839 *   (displacement_multiplier_phi_j_div *
1840 *   old_displacement_divs[q_point] *
1841 *   lambda_values[q_point] +
1842 *   2 * mu_values[q_point] *
1843 *   (old_displacement_symmgrads[q_point] *
1844 *   displacement_multiplier_phi_j_symmgrad))
1845 *  
1846 *   + density_penalty_exponent *
1847 *   std::pow(old_density_values[q_point],
1848 *   density_penalty_exponent - 1) *
1849 *   density_phi_i *
1850 *   (displacement_phi_j_div *
1851 *   old_displacement_multiplier_divs[q_point] *
1852 *   lambda_values[q_point] +
1853 *   2 * mu_values[q_point] *
1854 *   (old_displacement_multiplier_symmgrads[q_point] *
1855 *   displacement_phi_j_symmgrad)));
1856 *  
1857 *   /* Equation 2 */
1858 *   cell_matrix(i, j) +=
1859 *   fe_values.JxW(q_point) *
1860 *   (density_penalty_exponent *
1861 *   std::pow(old_density_values[q_point],
1862 *   density_penalty_exponent - 1) *
1863 *   density_phi_j *
1864 *   (old_displacement_multiplier_divs[q_point] *
1865 *   displacement_phi_i_div * lambda_values[q_point] +
1866 *   2 * mu_values[q_point] *
1867 *   (old_displacement_multiplier_symmgrads[q_point] *
1868 *   displacement_phi_i_symmgrad))
1869 *  
1870 *   + std::pow(old_density_values[q_point],
1871 *   density_penalty_exponent) *
1872 *   (displacement_multiplier_phi_j_div *
1873 *   displacement_phi_i_div * lambda_values[q_point] +
1874 *   2 * mu_values[q_point] *
1875 *   (displacement_multiplier_phi_j_symmgrad *
1876 *   displacement_phi_i_symmgrad))
1877 *  
1878 *   );
1879 *  
1880 *   /* Equation 3, which has to do with the filter and which is
1881 *   * calculated elsewhere. */
1882 *   cell_matrix(i, j) +=
1883 *   fe_values.JxW(q_point) *
1884 *   (-1 * unfiltered_density_phi_i *
1885 *   lower_slack_multiplier_phi_j +
1886 *   unfiltered_density_phi_i * upper_slack_multiplier_phi_j);
1887 *  
1888 *  
1889 *   /* Equation 4: Primal feasibility */
1890 *   cell_matrix(i, j) +=
1891 *   fe_values.JxW(q_point) *
1892 *   (
1893 *  
1894 *   density_penalty_exponent *
1895 *   std::pow(old_density_values[q_point],
1896 *   density_penalty_exponent - 1) *
1897 *   density_phi_j *
1898 *   (old_displacement_divs[q_point] *
1899 *   displacement_multiplier_phi_i_div *
1900 *   lambda_values[q_point] +
1901 *   2 * mu_values[q_point] *
1902 *   (old_displacement_symmgrads[q_point] *
1903 *   displacement_multiplier_phi_i_symmgrad))
1904 *  
1905 *   + std::pow(old_density_values[q_point],
1906 *   density_penalty_exponent) *
1907 *   (displacement_phi_j_div *
1908 *   displacement_multiplier_phi_i_div *
1909 *   lambda_values[q_point] +
1910 *   2 * mu_values[q_point] *
1911 *   (displacement_phi_j_symmgrad *
1912 *   displacement_multiplier_phi_i_symmgrad)));
1913 *  
1914 *   /* Equation 5: Primal feasibility */
1915 *   cell_matrix(i, j) +=
1916 *   -1 * fe_values.JxW(q_point) *
1917 *   lower_slack_multiplier_phi_i *
1918 *   (unfiltered_density_phi_j - lower_slack_phi_j);
1919 *  
1920 *   /* Equation 6: Primal feasibility */
1921 *   cell_matrix(i, j) +=
1922 *   -1 * fe_values.JxW(q_point) *
1923 *   upper_slack_multiplier_phi_i *
1924 *   (-1 * unfiltered_density_phi_j - upper_slack_phi_j);
1925 *  
1926 *   /* Equation 7: Primal feasibility - the part with the filter
1927 *   * is added later */
1928 *   cell_matrix(i, j) += -1 * fe_values.JxW(q_point) *
1929 *   unfiltered_density_multiplier_phi_i *
1930 *   (density_phi_j);
1931 *  
1932 *   /* Equation 8: Complementary slackness */
1933 *   cell_matrix(i, j) +=
1934 *   fe_values.JxW(q_point) *
1935 *   (lower_slack_phi_i * lower_slack_multiplier_phi_j
1936 *  
1937 *   + lower_slack_phi_i * lower_slack_phi_j *
1938 *   old_lower_slack_multiplier_values[q_point] /
1939 *   old_lower_slack_values[q_point]);
1940 *  
1941 *   /* Equation 9: Complementary slackness */
1942 *   cell_matrix(i, j) +=
1943 *   fe_values.JxW(q_point) *
1944 *   (upper_slack_phi_i * upper_slack_multiplier_phi_j
1945 *  
1946 *  
1947 *   + upper_slack_phi_i * upper_slack_phi_j *
1948 *   old_upper_slack_multiplier_values[q_point] /
1949 *   old_upper_slack_values[q_point]);
1950 *   }
1951 *   }
1952 *   }
1953 *  
1954 * @endcode
1955 *
1956 * Now that we have everything assembled, all we have to
1957 * do is deal with the effect of (Dirichlet) boundary
1958 * conditions and other constraints. We incorporate the
1959 * former locally with just the contributions from the
1960 * current cell, and then let the AffineConstraint class
1961 * deal with the latter while copying contributions from
1962 * the current cell into the global linear system:
1963 *
1964 * @code
1965 *   MatrixTools::local_apply_boundary_values(boundary_values,
1966 *   local_dof_indices,
1967 *   cell_matrix,
1968 *   dummy_cell_rhs,
1969 *   true);
1970 *  
1971 *   constraints.distribute_local_to_global(cell_matrix,
1972 *   local_dof_indices,
1973 *   system_matrix);
1974 *   }
1975 *  
1976 * @endcode
1977 *
1978 * Having accumulated all of the terms that belong
1979 * into the Newton matrix, we now also have to
1980 * compute the terms for the right hand side
1981 * (i.e., the negative residual). We already do this
1982 * in another function, and so we call that here:
1983 *
1984 * @code
1985 *   system_rhs = calculate_test_rhs(nonlinear_solution);
1986 *  
1987 * @endcode
1988 *
1989 * Here we use the filter matrix we have already
1990 * constructed. We only need to integrate this filter applied
1991 * to test functions, which are piecewise constant, and so the
1992 * integration becomes a simple multiplication by the measure
1993 * of the cell. Iterating over the pre-made filter matrix
1994 * allows us to use the information about which cells are in
1995 * or out of the filter without repeatedly checking neighbor
1996 * cells again.
1997 *
1998 * @code
1999 *   for (const auto &cell : dof_handler.active_cell_iterators())
2000 *   {
2001 *   const unsigned int i = cell->active_cell_index();
2002 *   for (typename SparseMatrix<double>::iterator iter =
2003 *   filter_matrix.begin(i);
2004 *   iter != filter_matrix.end(i);
2005 *   ++iter)
2006 *   {
2007 *   const unsigned int j = iter->column();
2008 *   const double value = iter->value() * cell->measure();
2009 *  
2010 *   system_matrix
2011 *   .block(SolutionBlocks::unfiltered_density_multiplier,
2012 *   SolutionBlocks::unfiltered_density)
2013 *   .add(i, j, value);
2014 *   system_matrix
2015 *   .block(SolutionBlocks::unfiltered_density,
2016 *   SolutionBlocks::unfiltered_density_multiplier)
2017 *   .add(j, i, value);
2018 *   }
2019 *   }
2020 *   }
2021 *  
2022 *  
2023 * @endcode
2024 *
2025 *
2026 * <a name="step_79-SolvingtheNewtonlinearsystem"></a>
2027 * <h3>Solving the Newton linear system</h3>
2028 *
2029
2030 *
2031 *
2032
2033 *
2034 * We will need to solve a linear system in each iteration. We use
2035 * a direct solver, for now -- this is clearly not an efficient
2036 * choice for a matrix that has so many non-zeroes, and it will
2037 * not scale to anything interesting. For "real" applications, we
2038 * will need an iterative solver but the complexity of the system
2039 * means that an iterative solver algorithm will take a good deal
2040 * of work. Because this is not the focus of the current program,
2041 * we simply stick with the direct solver we have here -- the
2042 * function follows the same structure as used in @ref step_29 "step-29".
2043 *
2044 * @code
2045 *   template <int dim>
2046 *   BlockVector<double> SANDTopOpt<dim>::solve()
2047 *   {
2048 *   TimerOutput::Scope t(timer, "solver");
2049 *  
2050 *   BlockVector<double> linear_solution;
2051 *   linear_solution.reinit(nonlinear_solution);
2052 *  
2053 *   SparseDirectUMFPACK A_direct;
2054 *   A_direct.initialize(system_matrix);
2055 *   A_direct.vmult(linear_solution, system_rhs);
2056 *  
2057 *   constraints.distribute(linear_solution);
2058 *  
2059 *   return linear_solution;
2060 *   }
2061 *  
2062 *  
2063 * @endcode
2064 *
2065 *
2066 * <a name="step_79-Detailsoftheoptimizationalgorithm"></a>
2067 * <h3>Details of the optimization algorithm</h3>
2068 *
2069
2070 *
2071 * The next several functions deal with specific parts of the
2072 * optimization algorithm, most notably with deciding whether the
2073 * direction computed by solving the linearized (Newton) system is
2074 * viable and, if so, how far we want to go in this direction.
2075 *
2076
2077 *
2078 *
2079 * <a name="step_79-Computingsteplengths"></a>
2080 * <h4>Computing step lengths</h4>
2081 *
2082
2083 *
2084 * We start with a function that does a binary search to figure
2085 * out the maximum step that meets the dual feasibility -- that
2086 * is, how far can we go so that @f$s>0@f$ and @f$z>0@f$. The function
2087 * returns a pair of values, one each for the @f$s@f$ and @f$z@f$ slack
2088 * variables.
2089 *
2090 * @code
2091 *   template <int dim>
2092 *   std::pair<double, double> SANDTopOpt<dim>::calculate_max_step_size(
2093 *   const BlockVector<double> &state,
2094 *   const BlockVector<double> &step) const
2095 *   {
2096 *   double fraction_to_boundary;
2097 *   const double min_fraction_to_boundary = .8;
2098 *   const double max_fraction_to_boundary = 1. - 1e-5;
2099 *  
2100 *   if (min_fraction_to_boundary < 1 - barrier_size)
2101 *   {
2102 *   if (1 - barrier_size < max_fraction_to_boundary)
2103 *   fraction_to_boundary = 1 - barrier_size;
2104 *   else
2105 *   fraction_to_boundary = max_fraction_to_boundary;
2106 *   }
2107 *   else
2108 *   fraction_to_boundary = min_fraction_to_boundary;
2109 *  
2110 *   double step_size_s_low = 0;
2111 *   double step_size_z_low = 0;
2112 *   double step_size_s_high = 1;
2113 *   double step_size_z_high = 1;
2114 *   double step_size_s, step_size_z;
2115 *  
2116 *   const int max_bisection_method_steps = 50;
2117 *   for (unsigned int k = 0; k < max_bisection_method_steps; ++k)
2118 *   {
2119 *   step_size_s = (step_size_s_low + step_size_s_high) / 2;
2120 *   step_size_z = (step_size_z_low + step_size_z_high) / 2;
2121 *  
2122 *   const BlockVector<double> state_test_s =
2123 *   (fraction_to_boundary * state) + (step_size_s * step);
2124 *  
2125 *   const BlockVector<double> state_test_z =
2126 *   (fraction_to_boundary * state) + (step_size_z * step);
2127 *  
2128 *   const bool accept_s =
2129 *   (state_test_s.block(SolutionBlocks::density_lower_slack)
2130 *   .is_non_negative()) &&
2131 *   (state_test_s.block(SolutionBlocks::density_upper_slack)
2132 *   .is_non_negative());
2133 *   const bool accept_z =
2134 *   (state_test_z.block(SolutionBlocks::density_lower_slack_multiplier)
2135 *   .is_non_negative()) &&
2136 *   (state_test_z.block(SolutionBlocks::density_upper_slack_multiplier)
2137 *   .is_non_negative());
2138 *  
2139 *   if (accept_s)
2140 *   step_size_s_low = step_size_s;
2141 *   else
2142 *   step_size_s_high = step_size_s;
2143 *  
2144 *   if (accept_z)
2145 *   step_size_z_low = step_size_z;
2146 *   else
2147 *   step_size_z_high = step_size_z;
2148 *   }
2149 *  
2150 *   return {step_size_s_low, step_size_z_low};
2151 *   }
2152 *  
2153 *  
2154 * @endcode
2155 *
2156 *
2157 * <a name="step_79-Computingresiduals"></a>
2158 * <h4>Computing residuals</h4>
2159 *
2160
2161 *
2162 * The next function computes a right hand side vector linearized
2163 * around a "test solution vector" that we can use to look at the
2164 * magnitude of the KKT conditions. This is then used for testing
2165 * the convergence before shrinking the barrier size, as well as in the
2166 * calculation of the @f$l_1@f$ merit.
2167 *
2168
2169 *
2170 * The function is lengthy and complicated, but it is really just a
2171 * copy of the right hand side part of what the `assemble_system()`
2172 * function above did.
2173 *
2174 * @code
2175 *   template <int dim>
2176 *   BlockVector<double> SANDTopOpt<dim>::calculate_test_rhs(
2177 *   const BlockVector<double> &test_solution) const
2178 *   {
2179 * @endcode
2180 *
2181 * We first create a zero vector with size and blocking of system_rhs
2182 *
2183 * @code
2184 *   BlockVector<double> test_rhs;
2185 *   test_rhs.reinit(system_rhs);
2186 *  
2187 *   MappingQ<dim> mapping(1);
2188 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
2189 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2190 *   FEValues<dim> fe_values(mapping,
2191 *   fe,
2192 *   quadrature_formula,
2193 *   update_values | update_gradients |
2194 *   update_quadrature_points | update_JxW_values);
2195 *   FEFaceValues<dim> fe_face_values(mapping,
2196 *   fe,
2197 *   face_quadrature_formula,
2198 *   update_values | update_quadrature_points |
2199 *   update_normal_vectors |
2200 *   update_JxW_values);
2201 *  
2202 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
2203 *   const unsigned int n_q_points = quadrature_formula.size();
2204 *  
2205 *   Vector<double> cell_rhs(dofs_per_cell);
2206 *   FullMatrix<double> dummy_cell_matrix(dofs_per_cell, dofs_per_cell);
2207 *  
2208 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
2209 *  
2210 *   std::vector<double> lambda_values(n_q_points);
2211 *   std::vector<double> mu_values(n_q_points);
2212 *  
2213 *   const Functions::ConstantFunction<dim> lambda(1.), mu(1.);
2214 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points);
2215 *  
2216 *  
2217 *   BlockVector<double> filtered_unfiltered_density_solution = test_solution;
2218 *   BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
2219 *   test_solution;
2220 *   filtered_unfiltered_density_solution.block(
2221 *   SolutionBlocks::unfiltered_density) = 0;
2222 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
2223 *   SolutionBlocks::unfiltered_density_multiplier) = 0;
2224 *  
2225 *   filter_matrix.vmult(filtered_unfiltered_density_solution.block(
2226 *   SolutionBlocks::unfiltered_density),
2227 *   test_solution.block(
2228 *   SolutionBlocks::unfiltered_density));
2229 *   filter_matrix.Tvmult(
2230 *   filter_adjoint_unfiltered_density_multiplier_solution.block(
2231 *   SolutionBlocks::unfiltered_density_multiplier),
2232 *   test_solution.block(SolutionBlocks::unfiltered_density_multiplier));
2233 *  
2234 *  
2235 *   std::vector<double> old_density_values(n_q_points);
2236 *   std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
2237 *   std::vector<double> old_displacement_divs(n_q_points);
2238 *   std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
2239 *   std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
2240 *   std::vector<double> old_displacement_multiplier_divs(n_q_points);
2241 *   std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
2242 *   n_q_points);
2243 *   std::vector<double> old_lower_slack_multiplier_values(n_q_points);
2244 *   std::vector<double> old_upper_slack_multiplier_values(n_q_points);
2245 *   std::vector<double> old_lower_slack_values(n_q_points);
2246 *   std::vector<double> old_upper_slack_values(n_q_points);
2247 *   std::vector<double> old_unfiltered_density_values(n_q_points);
2248 *   std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
2249 *   std::vector<double> filtered_unfiltered_density_values(n_q_points);
2250 *   std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
2251 *   n_q_points);
2252 *  
2253 *   using namespace ValueExtractors;
2254 *   for (const auto &cell : dof_handler.active_cell_iterators())
2255 *   {
2256 *   cell_rhs = 0;
2257 *  
2258 *   cell->get_dof_indices(local_dof_indices);
2259 *  
2260 *   fe_values.reinit(cell);
2261 *  
2262 *   lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
2263 *   mu.value_list(fe_values.get_quadrature_points(), mu_values);
2264 *  
2265 *   fe_values[densities<dim>].get_function_values(test_solution,
2266 *   old_density_values);
2267 *   fe_values[displacements<dim>].get_function_values(
2268 *   test_solution, old_displacement_values);
2269 *   fe_values[displacements<dim>].get_function_divergences(
2270 *   test_solution, old_displacement_divs);
2271 *   fe_values[displacements<dim>].get_function_symmetric_gradients(
2272 *   test_solution, old_displacement_symmgrads);
2273 *   fe_values[displacement_multipliers<dim>].get_function_values(
2274 *   test_solution, old_displacement_multiplier_values);
2275 *   fe_values[displacement_multipliers<dim>].get_function_divergences(
2276 *   test_solution, old_displacement_multiplier_divs);
2277 *   fe_values[displacement_multipliers<dim>]
2278 *   .get_function_symmetric_gradients(
2279 *   test_solution, old_displacement_multiplier_symmgrads);
2280 *   fe_values[density_lower_slacks<dim>].get_function_values(
2281 *   test_solution, old_lower_slack_values);
2282 *   fe_values[density_lower_slack_multipliers<dim>].get_function_values(
2283 *   test_solution, old_lower_slack_multiplier_values);
2284 *   fe_values[density_upper_slacks<dim>].get_function_values(
2285 *   test_solution, old_upper_slack_values);
2286 *   fe_values[density_upper_slack_multipliers<dim>].get_function_values(
2287 *   test_solution, old_upper_slack_multiplier_values);
2288 *   fe_values[unfiltered_densities<dim>].get_function_values(
2289 *   test_solution, old_unfiltered_density_values);
2290 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2291 *   test_solution, old_unfiltered_density_multiplier_values);
2292 *   fe_values[unfiltered_densities<dim>].get_function_values(
2293 *   filtered_unfiltered_density_solution,
2294 *   filtered_unfiltered_density_values);
2295 *   fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2296 *   filter_adjoint_unfiltered_density_multiplier_solution,
2297 *   filter_adjoint_unfiltered_density_multiplier_values);
2298 *  
2299 *   for (const auto q_point : fe_values.quadrature_point_indices())
2300 *   {
2301 *   for (const auto i : fe_values.dof_indices())
2302 *   {
2303 *   const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
2304 *   fe_values[displacements<dim>].symmetric_gradient(i, q_point);
2305 *   const double displacement_phi_i_div =
2306 *   fe_values[displacements<dim>].divergence(i, q_point);
2307 *  
2308 *   const SymmetricTensor<2, dim>
2309 *   displacement_multiplier_phi_i_symmgrad =
2310 *   fe_values[displacement_multipliers<dim>].symmetric_gradient(
2311 *   i, q_point);
2312 *   const double displacement_multiplier_phi_i_div =
2313 *   fe_values[displacement_multipliers<dim>].divergence(i,
2314 *   q_point);
2315 *  
2316 *  
2317 *   const double density_phi_i =
2318 *   fe_values[densities<dim>].value(i, q_point);
2319 *   const double unfiltered_density_phi_i =
2320 *   fe_values[unfiltered_densities<dim>].value(i, q_point);
2321 *   const double unfiltered_density_multiplier_phi_i =
2322 *   fe_values[unfiltered_density_multipliers<dim>].value(i,
2323 *   q_point);
2324 *  
2325 *   const double lower_slack_multiplier_phi_i =
2326 *   fe_values[density_lower_slack_multipliers<dim>].value(
2327 *   i, q_point);
2328 *  
2329 *   const double lower_slack_phi_i =
2330 *   fe_values[density_lower_slacks<dim>].value(i, q_point);
2331 *  
2332 *   const double upper_slack_phi_i =
2333 *   fe_values[density_upper_slacks<dim>].value(i, q_point);
2334 *  
2335 *   const double upper_slack_multiplier_phi_i =
2336 *   fe_values[density_upper_slack_multipliers<dim>].value(
2337 *   i, q_point);
2338 *  
2339 *   /* Equation 1: This equation, along with equations
2340 *   * 2 and 3, are the variational derivatives of the
2341 *   * Lagrangian with respect to the decision
2342 *   * variables - the density, displacement, and
2343 *   * unfiltered density. */
2344 *   cell_rhs(i) +=
2345 *   -1 * fe_values.JxW(q_point) *
2346 *   (density_penalty_exponent *
2347 *   std::pow(old_density_values[q_point],
2348 *   density_penalty_exponent - 1) *
2349 *   density_phi_i *
2350 *   (old_displacement_multiplier_divs[q_point] *
2351 *   old_displacement_divs[q_point] *
2352 *   lambda_values[q_point] +
2353 *   2 * mu_values[q_point] *
2354 *   (old_displacement_symmgrads[q_point] *
2355 *   old_displacement_multiplier_symmgrads[q_point])) -
2356 *   density_phi_i *
2357 *   old_unfiltered_density_multiplier_values[q_point]);
2358 *  
2359 *   /* Equation 2; the boundary terms will be added further down
2360 *   * below. */
2361 *   cell_rhs(i) +=
2362 *   -1 * fe_values.JxW(q_point) *
2363 *   (std::pow(old_density_values[q_point],
2364 *   density_penalty_exponent) *
2365 *   (old_displacement_multiplier_divs[q_point] *
2366 *   displacement_phi_i_div * lambda_values[q_point] +
2367 *   2 * mu_values[q_point] *
2368 *   (old_displacement_multiplier_symmgrads[q_point] *
2369 *   displacement_phi_i_symmgrad)));
2370 *  
2371 *   /* Equation 3 */
2372 *   cell_rhs(i) +=
2373 *   -1 * fe_values.JxW(q_point) *
2374 *   (unfiltered_density_phi_i *
2375 *   filter_adjoint_unfiltered_density_multiplier_values
2376 *   [q_point] +
2377 *   unfiltered_density_phi_i *
2378 *   old_upper_slack_multiplier_values[q_point] +
2379 *   -1 * unfiltered_density_phi_i *
2380 *   old_lower_slack_multiplier_values[q_point]);
2381 *  
2382 *  
2383 *  
2384 *   /* Equation 4; boundary term will again be dealt
2385 *   * with below. This equation being driven to 0
2386 *   * ensures that the elasticity equation is met as
2387 *   * a constraint. */
2388 *   cell_rhs(i) += -1 * fe_values.JxW(q_point) *
2389 *   (std::pow(old_density_values[q_point],
2390 *   density_penalty_exponent) *
2391 *   (old_displacement_divs[q_point] *
2392 *   displacement_multiplier_phi_i_div *
2393 *   lambda_values[q_point] +
2394 *   2 * mu_values[q_point] *
2395 *   (displacement_multiplier_phi_i_symmgrad *
2396 *   old_displacement_symmgrads[q_point])));
2397 *  
2398 *   /* Equation 5: This equation sets the lower slack
2399 *   * variable equal to the unfiltered density,
2400 *   * giving a minimum density of 0. */
2401 *   cell_rhs(i) += fe_values.JxW(q_point) *
2402 *   (lower_slack_multiplier_phi_i *
2403 *   (old_unfiltered_density_values[q_point] -
2404 *   old_lower_slack_values[q_point]));
2405 *  
2406 *   /* Equation 6: This equation sets the upper slack
2407 *   * variable equal to one minus the unfiltered
2408 *   * density. */
2409 *   cell_rhs(i) += fe_values.JxW(q_point) *
2410 *   (upper_slack_multiplier_phi_i *
2411 *   (1 - old_unfiltered_density_values[q_point] -
2412 *   old_upper_slack_values[q_point]));
2413 *  
2414 *   /* Equation 7: This is the difference between the
2415 *   * density and the filter applied to the
2416 *   * unfiltered density. This being driven to 0 by
2417 *   * the Newton steps ensures that the filter is
2418 *   * applied correctly. */
2419 *   cell_rhs(i) += fe_values.JxW(q_point) *
2420 *   (unfiltered_density_multiplier_phi_i *
2421 *   (old_density_values[q_point] -
2422 *   filtered_unfiltered_density_values[q_point]));
2423 *  
2424 *   /* Equation 8: This along with equation 9 give the
2425 *   * requirement that s*z = \alpha for the barrier
2426 *   * size alpha, and gives complementary slackness
2427 *   * from KKT conditions when \alpha goes to 0. */
2428 *   cell_rhs(i) +=
2429 *   -1 * fe_values.JxW(q_point) *
2430 *   (lower_slack_phi_i *
2431 *   (old_lower_slack_multiplier_values[q_point] -
2432 *   barrier_size / old_lower_slack_values[q_point]));
2433 *  
2434 *   /* Equation 9 */
2435 *   cell_rhs(i) +=
2436 *   -1 * fe_values.JxW(q_point) *
2437 *   (upper_slack_phi_i *
2438 *   (old_upper_slack_multiplier_values[q_point] -
2439 *   barrier_size / old_upper_slack_values[q_point]));
2440 *   }
2441 *   }
2442 *  
2443 *   for (const auto &face : cell->face_iterators())
2444 *   {
2445 *   if (face->at_boundary() &&
2446 *   face->boundary_id() == BoundaryIds::down_force)
2447 *   {
2448 *   fe_face_values.reinit(cell, face);
2449 *  
2450 *   for (const auto face_q_point :
2451 *   fe_face_values.quadrature_point_indices())
2452 *   {
2453 *   for (const auto i : fe_face_values.dof_indices())
2454 *   {
2455 *   Tensor<1, dim> traction;
2456 *   traction[1] = -1.;
2457 *  
2458 *   cell_rhs(i) +=
2459 *   -1 *
2460 *   (traction * fe_face_values[displacements<dim>].value(
2461 *   i, face_q_point)) *
2462 *   fe_face_values.JxW(face_q_point);
2463 *  
2464 *   cell_rhs(i) +=
2465 *   (traction *
2466 *   fe_face_values[displacement_multipliers<dim>].value(
2467 *   i, face_q_point)) *
2468 *   fe_face_values.JxW(face_q_point);
2469 *   }
2470 *   }
2471 *   }
2472 *   }
2473 *  
2474 *   MatrixTools::local_apply_boundary_values(boundary_values,
2475 *   local_dof_indices,
2476 *   dummy_cell_matrix,
2477 *   cell_rhs,
2478 *   true);
2479 *  
2480 *   constraints.distribute_local_to_global(cell_rhs,
2481 *   local_dof_indices,
2482 *   test_rhs);
2483 *   }
2484 *  
2485 *   return test_rhs;
2486 *   }
2487 *  
2488 *  
2489 * @endcode
2490 *
2491 *
2492 * <a name="step_79-Computingthemeritfunction"></a>
2493 * <h4>Computing the merit function</h4>
2494 *
2495
2496 *
2497 * The algorithm we use herein uses a "watchdog" strategy to
2498 * determine where and how far to go from the current iterate. We
2499 * base the watchdog strategy on an exact @f$l_1@f$ merit function. This
2500 * function calculates the exact @f$l_1@f$ merit of a given, putative,
2501 * next iterate.
2502 *
2503
2504 *
2505 * The merit function consists of the sum of the objective function
2506 * (which is simply an integral of external forces (on the boundary
2507 * of the domain) times the displacement values of a test solution
2508 * (typically, the current solution plus some multiple of the Newton
2509 * update), and the @f$l_1@f$ norms of the Lagrange multiplier
2510 * components of residual vectors. The following code computes these
2511 * parts in turn:
2512 *
2513 * @code
2514 *   template <int dim>
2515 *   double SANDTopOpt<dim>::calculate_exact_merit(
2516 *   const BlockVector<double> &test_solution)
2517 *   {
2518 *   TimerOutput::Scope t(timer, "merit function");
2519 *  
2520 * @endcode
2521 *
2522 * Start with computing the objective function:
2523 *
2524 * @code
2525 *   double objective_function_merit = 0;
2526 *   {
2527 *   MappingQ<dim> mapping(1);
2528 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
2529 *   const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2530 *   FEValues<dim> fe_values(mapping,
2531 *   fe,
2532 *   quadrature_formula,
2533 *   update_values | update_gradients |
2534 *   update_quadrature_points | update_JxW_values);
2535 *   FEFaceValues<dim> fe_face_values(mapping,
2536 *   fe,
2537 *   face_quadrature_formula,
2538 *   update_values |
2539 *   update_quadrature_points |
2540 *   update_normal_vectors |
2541 *   update_JxW_values);
2542 *  
2543 *   const unsigned int n_face_q_points = face_quadrature_formula.size();
2544 *  
2545 *   std::vector<Tensor<1, dim>> displacement_face_values(n_face_q_points);
2546 *  
2547 *   for (const auto &cell : dof_handler.active_cell_iterators())
2548 *   {
2549 *   for (const auto &face : cell->face_iterators())
2550 *   {
2551 *   if (face->at_boundary() &&
2552 *   face->boundary_id() == BoundaryIds::down_force)
2553 *   {
2554 *   fe_face_values.reinit(cell, face);
2555 *   fe_face_values[ValueExtractors::displacements<dim>]
2556 *   .get_function_values(test_solution,
2557 *   displacement_face_values);
2558 *   for (unsigned int face_q_point = 0;
2559 *   face_q_point < n_face_q_points;
2560 *   ++face_q_point)
2561 *   {
2562 *   Tensor<1, dim> traction;
2563 *   traction[1] = -1.;
2564 *  
2565 *   objective_function_merit +=
2566 *   (traction * displacement_face_values[face_q_point]) *
2567 *   fe_face_values.JxW(face_q_point);
2568 *   }
2569 *   }
2570 *   }
2571 *   }
2572 *   }
2573 *  
2574 *   for (const auto &cell : triangulation.active_cell_iterators())
2575 *   {
2576 *   objective_function_merit =
2577 *   objective_function_merit -
2578 *   barrier_size * cell->measure() *
2579 *   std::log(test_solution.block(
2580 *   SolutionBlocks::density_lower_slack)[cell->active_cell_index()]);
2581 *   objective_function_merit =
2582 *   objective_function_merit -
2583 *   barrier_size * cell->measure() *
2584 *   std::log(test_solution.block(
2585 *   SolutionBlocks::density_upper_slack)[cell->active_cell_index()]);
2586 *   }
2587 *  
2588 * @endcode
2589 *
2590 * Then compute the residual and take the @f$l_1@f$ norms of the
2591 * components that correspond to Lagrange multipliers. We add
2592 * those to the objective function computed above, and return
2593 * the sum at the bottom:
2594 *
2595 * @code
2596 *   const BlockVector<double> test_rhs = calculate_test_rhs(test_solution);
2597 *  
2598 *   const double elasticity_constraint_merit =
2599 *   penalty_multiplier *
2600 *   test_rhs.block(SolutionBlocks::displacement_multiplier).l1_norm();
2601 *   const double filter_constraint_merit =
2602 *   penalty_multiplier *
2603 *   test_rhs.block(SolutionBlocks::unfiltered_density_multiplier).l1_norm();
2604 *   const double lower_slack_merit =
2605 *   penalty_multiplier *
2606 *   test_rhs.block(SolutionBlocks::density_lower_slack_multiplier).l1_norm();
2607 *   const double upper_slack_merit =
2608 *   penalty_multiplier *
2609 *   test_rhs.block(SolutionBlocks::density_upper_slack_multiplier).l1_norm();
2610 *  
2611 *   const double total_merit =
2612 *   objective_function_merit + elasticity_constraint_merit +
2613 *   filter_constraint_merit + lower_slack_merit + upper_slack_merit;
2614 *   return total_merit;
2615 *   }
2616 *  
2617 *  
2618 *  
2619 * @endcode
2620 *
2621 *
2622 * <a name="step_79-Findingasearchdirection"></a>
2623 * <h4>Finding a search direction</h4>
2624 *
2625
2626 *
2627 * Next up is the function that actually computes a search direction
2628 * starting at the current state (passed as the first argument) and
2629 * returns the resulting vector. To this end, the function first
2630 * calls the functions that assemble the linear system that
2631 * corresponds to the Newton system, and that solve it.
2632 *
2633
2634 *
2635 * This function also updates the penalty multiplier in the merit
2636 * function, and then returns the largest scaled feasible step.
2637 * It uses the `calculate_max_step_sizes()` function to find the
2638 * largest feasible step that satisfies @f$s>0@f$ and @f$z>0@f$.
2639 *
2640
2641 *
2642 *
2643 * @code
2644 *   template <int dim>
2645 *   BlockVector<double> SANDTopOpt<dim>::find_max_step()
2646 *   {
2647 *   assemble_system();
2648 *   BlockVector<double> step = solve();
2649 *  
2650 * @endcode
2651 *
2652 * Next we are going to update penalty_multiplier. In
2653 * essence, a larger penalty multiplier makes us consider the
2654 * constraints more. Looking at the Hessian and gradient with
2655 * respect to the step we want to take with our decision
2656 * variables, and comparing that to the norm of our constraint
2657 * error gives us a way to ensure that our merit function is
2658 * "exact" - that is, it has a minimum in the same location
2659 * that the objective function does. As our merit function is
2660 * exact for any penalty multiplier over some minimum value,
2661 * we only keep the computed value if it increases the penalty
2662 * multiplier.
2663 *
2664
2665 *
2666 *
2667 * @code
2668 *   const std::vector<unsigned int> decision_variables = {
2669 *   SolutionBlocks::density,
2670 *   SolutionBlocks::displacement,
2671 *   SolutionBlocks::unfiltered_density,
2672 *   SolutionBlocks::density_upper_slack,
2673 *   SolutionBlocks::density_lower_slack};
2674 *   double hess_part = 0;
2675 *   double grad_part = 0;
2676 *   for (const unsigned int decision_variable_i : decision_variables)
2677 *   {
2678 *   for (const unsigned int decision_variable_j : decision_variables)
2679 *   {
2680 *   Vector<double> temp_vector(step.block(decision_variable_i).size());
2681 *   system_matrix.block(decision_variable_i, decision_variable_j)
2682 *   .vmult(temp_vector, step.block(decision_variable_j));
2683 *   hess_part += step.block(decision_variable_i) * temp_vector;
2684 *   }
2685 *   grad_part -= system_rhs.block(decision_variable_i) *
2686 *   step.block(decision_variable_i);
2687 *   }
2688 *  
2689 *   const std::vector<unsigned int> equality_constraint_multipliers = {
2690 *   SolutionBlocks::displacement_multiplier,
2691 *   SolutionBlocks::unfiltered_density_multiplier,
2692 *   SolutionBlocks::density_lower_slack_multiplier,
2693 *   SolutionBlocks::density_upper_slack_multiplier};
2694 *   double constraint_norm = 0;
2695 *   for (const unsigned int multiplier_i : equality_constraint_multipliers)
2696 *   constraint_norm += system_rhs.block(multiplier_i).linfty_norm();
2697 *  
2698 *  
2699 *   double test_penalty_multiplier;
2700 *   if (hess_part > 0)
2701 *   test_penalty_multiplier =
2702 *   (grad_part + .5 * hess_part) / (.05 * constraint_norm);
2703 *   else
2704 *   test_penalty_multiplier = (grad_part) / (.05 * constraint_norm);
2705 *  
2706 *   penalty_multiplier = std::max(penalty_multiplier, test_penalty_multiplier);
2707 *  
2708 * @endcode
2709 *
2710 * Based on all of this, we can now compute step sizes for the
2711 * primal and dual (Lagrange multiplier) variables. Once we
2712 * have these, we scale the components of the solution vector,
2713 * and that is what this function returns.
2714 *
2715 * @code
2716 *   const std::pair<double, double> max_step_sizes =
2717 *   calculate_max_step_size(nonlinear_solution, step);
2718 *   const double step_size_s = max_step_sizes.first;
2719 *   const double step_size_z = max_step_sizes.second;
2720 *  
2721 *   step.block(SolutionBlocks::density) *= step_size_s;
2722 *   step.block(SolutionBlocks::displacement) *= step_size_s;
2723 *   step.block(SolutionBlocks::unfiltered_density) *= step_size_s;
2724 *   step.block(SolutionBlocks::displacement_multiplier) *= step_size_z;
2725 *   step.block(SolutionBlocks::unfiltered_density_multiplier) *= step_size_z;
2726 *   step.block(SolutionBlocks::density_lower_slack) *= step_size_s;
2727 *   step.block(SolutionBlocks::density_lower_slack_multiplier) *= step_size_z;
2728 *   step.block(SolutionBlocks::density_upper_slack) *= step_size_s;
2729 *   step.block(SolutionBlocks::density_upper_slack_multiplier) *= step_size_z;
2730 *  
2731 *   return step;
2732 *   }
2733 *  
2734 *  
2735 *  
2736 * @endcode
2737 *
2738 *
2739 * <a name="step_79-Computingascaledstep"></a>
2740 * <h4>Computing a scaled step</h4>
2741 *
2742
2743 *
2744 * The next function then implements a back-tracking algorithm for a
2745 * line search. It keeps shrinking step size until it finds a step
2746 * where the merit is decreased, and then returns the new location
2747 * based on the current state vector, and the direction to go into,
2748 * times the step length.
2749 *
2750 * @code
2751 *   template <int dim>
2752 *   BlockVector<double>
2753 *   SANDTopOpt<dim>::compute_scaled_step(const BlockVector<double> &state,
2754 *   const BlockVector<double> &max_step,
2755 *   const double descent_requirement)
2756 *   {
2757 *   const double merit_derivative =
2758 *   (calculate_exact_merit(state + 1e-4 * max_step) -
2759 *   calculate_exact_merit(state)) /
2760 *   1e-4;
2761 *   double step_size = 1;
2762 *   unsigned int max_linesearch_iterations = 10;
2763 *   for (unsigned int k = 0; k < max_linesearch_iterations; ++k)
2764 *   {
2765 *   if (calculate_exact_merit(state + step_size * max_step) <
2766 *   calculate_exact_merit(state) +
2767 *   step_size * descent_requirement * merit_derivative)
2768 *   break;
2769 *   else
2770 *   step_size = step_size / 2;
2771 *   }
2772 *   return state + (step_size * max_step);
2773 *   }
2774 *  
2775 *  
2776 * @endcode
2777 *
2778 *
2779 * <a name="step_79-Checkingforconvergence"></a>
2780 * <h4>Checking for convergence</h4>
2781 *
2782
2783 *
2784 * The final auxiliary function in this block is the one that checks
2785 * to see if the KKT conditions are sufficiently met so that the
2786 * overall algorithm can lower the barrier size. It does so by
2787 * computing the @f$l_1@f$ norm of the residual, which is what
2788 * `calculate_test_rhs()` computes.
2789 *
2790 * @code
2791 *   template <int dim>
2792 *   bool SANDTopOpt<dim>::check_convergence(const BlockVector<double> &state)
2793 *   {
2794 *   const BlockVector<double> test_rhs = calculate_test_rhs(state);
2795 *   const double test_rhs_norm = test_rhs.l1_norm();
2796 *  
2797 *   const double convergence_condition = 1e-2;
2798 *   const double target_norm = convergence_condition * barrier_size;
2799 *  
2800 *   std::cout << " Checking convergence. Current rhs norm is "
2801 *   << test_rhs_norm << ", target is " << target_norm << std::endl;
2802 *  
2803 *   return (test_rhs_norm < target_norm);
2804 *   }
2805 *  
2806 *  
2807 * @endcode
2808 *
2809 *
2810 * <a name="step_79-Postprocessingthesolution"></a>
2811 * <h3>Postprocessing the solution</h3>
2812 *
2813
2814 *
2815 * The first of the postprocessing functions outputs information
2816 * in a VTU file for visualization. It looks long, but it's really
2817 * just the same as what was done in @ref step_22 "step-22", for example, just
2818 * with (a lot) more solution variables:
2819 *
2820 * @code
2821 *   template <int dim>
2822 *   void SANDTopOpt<dim>::output_results(const unsigned int iteration) const
2823 *   {
2824 *   std::vector<std::string> solution_names(1, "density");
2825 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2826 *   data_component_interpretation(
2828 *   for (unsigned int i = 0; i < dim; ++i)
2829 *   {
2830 *   solution_names.emplace_back("displacement");
2831 *   data_component_interpretation.push_back(
2833 *   }
2834 *   solution_names.emplace_back("unfiltered_density");
2835 *   data_component_interpretation.push_back(
2837 *   for (unsigned int i = 0; i < dim; ++i)
2838 *   {
2839 *   solution_names.emplace_back("displacement_multiplier");
2840 *   data_component_interpretation.push_back(
2842 *   }
2843 *   solution_names.emplace_back("unfiltered_density_multiplier");
2844 *   data_component_interpretation.push_back(
2846 *   solution_names.emplace_back("low_slack");
2847 *   data_component_interpretation.push_back(
2849 *   solution_names.emplace_back("low_slack_multiplier");
2850 *   data_component_interpretation.push_back(
2852 *   solution_names.emplace_back("high_slack");
2853 *   data_component_interpretation.push_back(
2855 *   solution_names.emplace_back("high_slack_multiplier");
2856 *   data_component_interpretation.push_back(
2858 *  
2859 *   DataOut<dim> data_out;
2860 *   data_out.attach_dof_handler(dof_handler);
2861 *   data_out.add_data_vector(nonlinear_solution,
2862 *   solution_names,
2864 *   data_component_interpretation);
2865 *   data_out.build_patches();
2866 *  
2867 *   std::ofstream output("solution" + std::to_string(iteration) + ".vtu");
2868 *   data_out.write_vtu(output);
2869 *   }
2870 *  
2871 *  
2872 * @endcode
2873 *
2874 * The second of these functions outputs the solution as an `.stl`
2875 * file for 3d
2876 * printing. [STL](https://en.wikipedia.org/wiki/STL_(file_format))
2877 * files are made up of triangles and normal vectors, and we will
2878 * use it to show all of those cells with a density value larger
2879 * than zero by first extruding the mesh from a @f$z@f$ value of zero
2880 * to @f$z=0.25@f$, and then generating two triangles for each face of
2881 * the cells with a sufficiently large density value. The triangle
2882 * nodes must go counter-clockwise when looking from the outside,
2883 * and the normal vectors must be unit vectors pointing outwards,
2884 * which requires a few checks.
2885 *
2886 * @code
2887 *   template <int dim>
2888 *   void SANDTopOpt<dim>::write_as_stl()
2889 *   {
2890 *   static_assert(dim == 2,
2891 *   "This function is not implemented for anything "
2892 *   "other than the 2d case.");
2893 *  
2894 *   std::ofstream stlfile;
2895 *   stlfile.open("bridge.stl");
2896 *  
2897 *   stlfile << "solid bridge\n" << std::scientific;
2898 *   double height = .25;
2899 *  
2900 *   for (const auto &cell : dof_handler.active_cell_iterators())
2901 *   {
2902 *   if (nonlinear_solution.block(
2903 *   SolutionBlocks::density)[cell->active_cell_index()] > 0.5)
2904 *   {
2905 * @endcode
2906 *
2907 * We have now found a cell with a density value larger
2908 * than zero. Let us start by writing out the bottom
2909 * and top faces. Owing to the ordering issue mentioned
2910 * above, we have to make sure that we understand
2911 * whether a cell has a right- or left-handed
2912 * coordinate system. We do this by interrogating the
2913 * directions of the two edges starting at vertex 0 and
2914 * whether they form a right-handed coordinate system.
2915 *
2916 * @code
2917 *   const Tensor<1, dim> edge_directions[2] = {cell->vertex(1) -
2918 *   cell->vertex(0),
2919 *   cell->vertex(2) -
2920 *   cell->vertex(0)};
2921 *   const Tensor<2, dim> edge_tensor(
2922 *   {{edge_directions[0][0], edge_directions[0][1]},
2923 *   {edge_directions[1][0], edge_directions[1][1]}});
2924 *   const bool is_right_handed_cell = (determinant(edge_tensor) > 0);
2925 *  
2926 *   if (is_right_handed_cell)
2927 *   {
2928 *   /* Write one side at z = 0. */
2929 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2930 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2931 *   stlfile << " outer loop\n";
2932 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2933 *   << cell->vertex(0)[1] << ' ' << 0.000000e+00 << '\n';
2934 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2935 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2936 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2937 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2938 *   stlfile << " endloop\n";
2939 *   stlfile << " endfacet\n";
2940 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2941 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2942 *   stlfile << " outer loop\n";
2943 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2944 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2945 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2946 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2947 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2948 *   << cell->vertex(3)[1] << ' ' << 0.000000e+00 << '\n';
2949 *   stlfile << " endloop\n";
2950 *   stlfile << " endfacet\n";
2951 *  
2952 *   /* Write one side at z = height. */
2953 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2954 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
2955 *   stlfile << " outer loop\n";
2956 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2957 *   << cell->vertex(0)[1] << ' ' << height << '\n';
2958 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2959 *   << cell->vertex(1)[1] << ' ' << height << '\n';
2960 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2961 *   << cell->vertex(2)[1] << ' ' << height << '\n';
2962 *   stlfile << " endloop\n";
2963 *   stlfile << " endfacet\n";
2964 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2965 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
2966 *   stlfile << " outer loop\n";
2967 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2968 *   << cell->vertex(1)[1] << ' ' << height << '\n';
2969 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2970 *   << cell->vertex(3)[1] << ' ' << height << '\n';
2971 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2972 *   << cell->vertex(2)[1] << ' ' << height << '\n';
2973 *   stlfile << " endloop\n";
2974 *   stlfile << " endfacet\n";
2975 *   }
2976 *   else /* The cell has a left-handed set up */
2977 *   {
2978 *   /* Write one side at z = 0. */
2979 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2980 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2981 *   stlfile << " outer loop\n";
2982 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
2983 *   << cell->vertex(0)[1] << ' ' << 0.000000e+00 << '\n';
2984 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2985 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2986 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2987 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2988 *   stlfile << " endloop\n";
2989 *   stlfile << " endfacet\n";
2990 *   stlfile << " facet normal " << 0.000000e+00 << ' '
2991 *   << 0.000000e+00 << ' ' << -1.000000e+00 << '\n';
2992 *   stlfile << " outer loop\n";
2993 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
2994 *   << cell->vertex(1)[1] << ' ' << 0.000000e+00 << '\n';
2995 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
2996 *   << cell->vertex(3)[1] << ' ' << 0.000000e+00 << '\n';
2997 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
2998 *   << cell->vertex(2)[1] << ' ' << 0.000000e+00 << '\n';
2999 *   stlfile << " endloop\n";
3000 *   stlfile << " endfacet\n";
3001 *  
3002 *   /* Write one side at z = height. */
3003 *   stlfile << " facet normal " << 0.000000e+00 << ' '
3004 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
3005 *   stlfile << " outer loop\n";
3006 *   stlfile << " vertex " << cell->vertex(0)[0] << ' '
3007 *   << cell->vertex(0)[1] << ' ' << height << '\n';
3008 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
3009 *   << cell->vertex(2)[1] << ' ' << height << '\n';
3010 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
3011 *   << cell->vertex(1)[1] << ' ' << height << '\n';
3012 *   stlfile << " endloop\n";
3013 *   stlfile << " endfacet\n";
3014 *   stlfile << " facet normal " << 0.000000e+00 << ' '
3015 *   << 0.000000e+00 << ' ' << 1.000000e+00 << '\n';
3016 *   stlfile << " outer loop\n";
3017 *   stlfile << " vertex " << cell->vertex(1)[0] << ' '
3018 *   << cell->vertex(1)[1] << ' ' << height << '\n';
3019 *   stlfile << " vertex " << cell->vertex(2)[0] << ' '
3020 *   << cell->vertex(2)[1] << ' ' << height << '\n';
3021 *   stlfile << " vertex " << cell->vertex(3)[0] << ' '
3022 *   << cell->vertex(3)[1] << ' ' << height << '\n';
3023 *   stlfile << " endloop\n";
3024 *   stlfile << " endfacet\n";
3025 *   }
3026 *  
3027 * @endcode
3028 *
3029 * Next we need to deal with the four faces of the
3030 * cell, extended into the @f$z@f$ direction. However, we
3031 * only need to write these faces if either the face
3032 * is on the domain boundary, or if it is the
3033 * interface between a cell with density greater than
3034 * 0.5, and a cell with a density less than 0.5.
3035 *
3036 * @code
3037 *   for (unsigned int face_number = 0;
3038 *   face_number < GeometryInfo<dim>::faces_per_cell;
3039 *   ++face_number)
3040 *   {
3041 *   const typename DoFHandler<dim>::face_iterator face =
3042 *   cell->face(face_number);
3043 *  
3044 *   if ((face->at_boundary()) ||
3045 *   (!face->at_boundary() &&
3046 *   (nonlinear_solution.block(
3047 *   0)[cell->neighbor(face_number)->active_cell_index()] <
3048 *   0.5)))
3049 *   {
3050 *   const Tensor<1, dim> normal_vector =
3051 *   (face->center() - cell->center());
3052 *   const double normal_norm = normal_vector.norm();
3053 *   if ((face->vertex(0)[0] - face->vertex(0)[0]) *
3054 *   (face->vertex(1)[1] - face->vertex(0)[1]) *
3055 *   0.000000e+00 +
3056 *   (face->vertex(0)[1] - face->vertex(0)[1]) * (0 - 0) *
3057 *   normal_vector[0] +
3058 *   (height - 0) *
3059 *   (face->vertex(1)[0] - face->vertex(0)[0]) *
3060 *   normal_vector[1] -
3061 *   (face->vertex(0)[0] - face->vertex(0)[0]) * (0 - 0) *
3062 *   normal_vector[1] -
3063 *   (face->vertex(0)[1] - face->vertex(0)[1]) *
3064 *   (face->vertex(1)[0] - face->vertex(0)[0]) *
3065 *   normal_vector[0] -
3066 *   (height - 0) *
3067 *   (face->vertex(1)[1] - face->vertex(0)[1]) * 0 >
3068 *   0)
3069 *   {
3070 *   stlfile << " facet normal "
3071 *   << normal_vector[0] / normal_norm << ' '
3072 *   << normal_vector[1] / normal_norm << ' '
3073 *   << 0.000000e+00 << '\n';
3074 *   stlfile << " outer loop\n";
3075 *   stlfile << " vertex " << face->vertex(0)[0]
3076 *   << ' ' << face->vertex(0)[1] << ' '
3077 *   << 0.000000e+00 << '\n';
3078 *   stlfile << " vertex " << face->vertex(0)[0]
3079 *   << ' ' << face->vertex(0)[1] << ' ' << height
3080 *   << '\n';
3081 *   stlfile << " vertex " << face->vertex(1)[0]
3082 *   << ' ' << face->vertex(1)[1] << ' '
3083 *   << 0.000000e+00 << '\n';
3084 *   stlfile << " endloop\n";
3085 *   stlfile << " endfacet\n";
3086 *   stlfile << " facet normal "
3087 *   << normal_vector[0] / normal_norm << ' '
3088 *   << normal_vector[1] / normal_norm << ' '
3089 *   << 0.000000e+00 << '\n';
3090 *   stlfile << " outer loop\n";
3091 *   stlfile << " vertex " << face->vertex(0)[0]
3092 *   << ' ' << face->vertex(0)[1] << ' ' << height
3093 *   << '\n';
3094 *   stlfile << " vertex " << face->vertex(1)[0]
3095 *   << ' ' << face->vertex(1)[1] << ' ' << height
3096 *   << '\n';
3097 *   stlfile << " vertex " << face->vertex(1)[0]
3098 *   << ' ' << face->vertex(1)[1] << ' '
3099 *   << 0.000000e+00 << '\n';
3100 *   stlfile << " endloop\n";
3101 *   stlfile << " endfacet\n";
3102 *   }
3103 *   else
3104 *   {
3105 *   stlfile << " facet normal "
3106 *   << normal_vector[0] / normal_norm << ' '
3107 *   << normal_vector[1] / normal_norm << ' '
3108 *   << 0.000000e+00 << '\n';
3109 *   stlfile << " outer loop\n";
3110 *   stlfile << " vertex " << face->vertex(0)[0]
3111 *   << ' ' << face->vertex(0)[1] << ' '
3112 *   << 0.000000e+00 << '\n';
3113 *   stlfile << " vertex " << face->vertex(1)[0]
3114 *   << ' ' << face->vertex(1)[1] << ' '
3115 *   << 0.000000e+00 << '\n';
3116 *   stlfile << " vertex " << face->vertex(0)[0]
3117 *   << ' ' << face->vertex(0)[1] << ' ' << height
3118 *   << '\n';
3119 *   stlfile << " endloop\n";
3120 *   stlfile << " endfacet\n";
3121 *   stlfile << " facet normal "
3122 *   << normal_vector[0] / normal_norm << ' '
3123 *   << normal_vector[1] / normal_norm << ' '
3124 *   << 0.000000e+00 << '\n';
3125 *   stlfile << " outer loop\n";
3126 *   stlfile << " vertex " << face->vertex(0)[0]
3127 *   << ' ' << face->vertex(0)[1] << ' ' << height
3128 *   << '\n';
3129 *   stlfile << " vertex " << face->vertex(1)[0]
3130 *   << ' ' << face->vertex(1)[1] << ' '
3131 *   << 0.000000e+00 << '\n';
3132 *   stlfile << " vertex " << face->vertex(1)[0]
3133 *   << ' ' << face->vertex(1)[1] << ' ' << height
3134 *   << '\n';
3135 *   stlfile << " endloop\n";
3136 *   stlfile << " endfacet\n";
3137 *   }
3138 *   }
3139 *   }
3140 *   }
3141 *   }
3142 *   stlfile << "endsolid bridge";
3143 *   }
3144 *  
3145 *  
3146 *  
3147 * @endcode
3148 *
3149 *
3150 * <a name="step_79-Therunfunctiondrivingtheoverallalgorithm"></a>
3151 * <h3>The run() function driving the overall algorithm</h3>
3152 *
3153
3154 *
3155 * This function finally provides the overall driver logic. It is,
3156 * in the grand scheme of things, a rather complicated function
3157 * primarily because the optimization algorithm is difficult: It
3158 * isn't just about finding a Newton direction like in @ref step_15 "step-15" and
3159 * then going a fixed distance in this direction any more, but
3160 * instead about (i) determining what the optimal log-barrier
3161 * penalty parameter should be in the current step, (ii) a
3162 * complicated algorithm to determine how far we want to go, and
3163 * other ingredients. Let us see how we can break this down into
3164 * smaller chunks in the following documentation.
3165 *
3166
3167 *
3168 * The function starts out simple enough with first setting up the
3169 * mesh, the DoFHandler, and then the various linear algebra objects
3170 * necessary for the following:
3171 *
3172 * @code
3173 *   template <int dim>
3174 *   void SANDTopOpt<dim>::run()
3175 *   {
3176 *   std::cout << "filter r is: " << filter_r << std::endl;
3177 *  
3178 *   {
3179 *   TimerOutput::Scope t(timer, "setup");
3180 *  
3181 *   create_triangulation();
3182 *  
3183 *   dof_handler.distribute_dofs(fe);
3184 *   DoFRenumbering::component_wise(dof_handler);
3185 *  
3186 *   setup_boundary_values();
3187 *   setup_block_system();
3188 *   setup_filter_matrix();
3189 *   }
3190 *  
3191 * @endcode
3192 *
3193 * We then set a number of parameters that affect the
3194 * log-barrier and line search components of the optimization
3195 * algorithm:
3196 *
3197 * @code
3198 *   barrier_size = 25;
3199 *   const double min_barrier_size = .0005;
3200 *  
3201 *   const unsigned int max_uphill_steps = 8;
3202 *   const double descent_requirement = .0001;
3203 *  
3204 *  
3205 * @endcode
3206 *
3207 * Now start the principal iteration. The overall algorithm
3208 * works by using an outer loop in which we loop until either
3209 * (i) the log-barrier parameter has become small enough, or (ii)
3210 * we have reached convergence. In any case, we terminate if
3211 * end up with too large a number of iterations. This overall
3212 * structure is encoded as a `do { ... } while (...)` loop
3213 * where the convergence condition is at the bottom.
3214 *
3215 * @code
3216 *   unsigned int iteration_number = 0;
3217 *   const unsigned int max_iterations = 10000;
3218 *  
3219 *   do
3220 *   {
3221 *   std::cout << "Starting outer step in iteration " << iteration_number
3222 *   << " with barrier parameter " << barrier_size << std::endl;
3223 *  
3224 * @endcode
3225 *
3226 * Within this outer loop, we have an inner loop in which we
3227 * try to find an update direction using the watchdog
3228 * algorithm described in the introduction.
3229 *
3230
3231 *
3232 * The general idea of the watchdog algorithm itself is
3233 * this: For a maximum of `max_uphill_steps` (i.e., a loop
3234 * within the "inner loop" mentioned above) attempts, we use
3235 * `find_max_step()` to compute a Newton update step, and
3236 * add these up in the `nonlinear_solution` vector. In each of
3237 * these attempts (starting from the place reached at the
3238 * end of the previous attempt), we check whether we have
3239 * reached a target value of the merit function described
3240 * above. The target value is computed based on where this
3241 * algorithm starts (the `nonlinear_solution` at the beginning of
3242 * the watchdog loop, saves as `watchdog_state`) and the
3243 * first proposed direction provided by `find_max_step()` in
3244 * the first go-around of this loop (the `k==0` case).
3245 *
3246 * @code
3247 *   do
3248 *   {
3249 *   std::cout << " Starting inner step in iteration "
3250 *   << iteration_number
3251 *   << " with merit function penalty multiplier "
3252 *   << penalty_multiplier << std::endl;
3253 *  
3254 *   bool watchdog_step_found = false;
3255 *  
3256 *   const BlockVector<double> watchdog_state = nonlinear_solution;
3257 *   BlockVector<double> first_step;
3258 *   double target_merit = numbers::signaling_nan<double>();
3259 *   double merit_derivative = numbers::signaling_nan<double>();
3260 *  
3261 *   for (unsigned int k = 0; k < max_uphill_steps; ++k)
3262 *   {
3263 *   ++iteration_number;
3264 *   const BlockVector<double> update_step = find_max_step();
3265 *  
3266 *   if (k == 0)
3267 *   {
3268 *   first_step = update_step;
3269 *   merit_derivative =
3270 *   ((calculate_exact_merit(watchdog_state +
3271 *   .0001 * first_step) -
3272 *   calculate_exact_merit(watchdog_state)) /
3273 *   .0001);
3274 *   target_merit = calculate_exact_merit(watchdog_state) +
3275 *   descent_requirement * merit_derivative;
3276 *   }
3277 *  
3278 *   nonlinear_solution += update_step;
3279 *   const double current_merit =
3280 *   calculate_exact_merit(nonlinear_solution);
3281 *  
3282 *   std::cout << " current watchdog state merit is: "
3283 *   << current_merit << "; target merit is "
3284 *   << target_merit << std::endl;
3285 *  
3286 *   if (current_merit < target_merit)
3287 *   {
3288 *   watchdog_step_found = true;
3289 *   std::cout << " found workable step after " << k + 1
3290 *   << " iterations" << std::endl;
3291 *   break;
3292 *   }
3293 *   }
3294 *  
3295 *  
3296 * @endcode
3297 *
3298 * The next part of the algorithm then depends on
3299 * whether the watchdog loop above succeeded. If it
3300 * did, then we are satisfied and no further action is
3301 * necessary: We just stay where we are. If, however,
3302 * we took the maximal number of unsuccessful steps in
3303 * the loop above, then we need to do something else,
3304 * and this is what the following code block does.
3305 *
3306
3307 *
3308 * Specifically, from the final (unsuccessful) state
3309 * of the loop above, we seek one more update
3310 * direction and take what is called a "stretch
3311 * step". If that stretch state satisfies a condition
3312 * involving the merit function, then we go there. On
3313 * the other hand, if the stretch state is also
3314 * unacceptable (as all of the watchdog steps above
3315 * were), then we discard all of the watchdog steps
3316 * taken above and start over again where we had
3317 * started the watchdog iterations -- that place was
3318 * stored in the `watchdog_state` variable above. More
3319 * specifically, the conditions below first test
3320 * whether we take a step from `watchdog_state` in
3321 * direction `first_step`, or whether we can do one
3322 * more update from the stretch state to find a new
3323 * place. It is possible that neither of these is
3324 * actually better than the state we started from at
3325 * the beginning of the watchdog algorithm, but even
3326 * if that is so, that place clearly was a difficult
3327 * place to be in, and getting away to start the next
3328 * iteration from another place might be a useful
3329 * strategy to eventually converge.
3330 *
3331
3332 *
3333 * We keep repeating the watchdog steps above along
3334 * with the logic below until this inner iteration is
3335 * finally converged (or if we run up against the
3336 * maximal number of iterations -- where we count the
3337 * number of linear solves as iterations and increment
3338 * the counter every time we call `find_max_step()`
3339 * since that is where the linear solve actually
3340 * happens). In any case, at the end of each of these
3341 * inner iterations we also output the solution in a
3342 * form suitable for visualization.
3343 *
3344
3345 *
3346 *
3347 * @code
3348 *   if (watchdog_step_found == false)
3349 *   {
3350 *   ++iteration_number;
3351 *   const BlockVector<double> update_step = find_max_step();
3352 *   const BlockVector<double> stretch_state =
3353 *   compute_scaled_step(nonlinear_solution,
3354 *   update_step,
3355 *   descent_requirement);
3356 *  
3357 * @endcode
3358 *
3359 * If we did not get a successful watchdog step,
3360 * we now need to decide between going back to
3361 * where we started, or using the final state. We
3362 * compare the merits of both of these locations,
3363 * and then take a scaled step from whichever
3364 * location is better. As the scaled step is
3365 * guaranteed to lower the merit, we will end up
3366 * keeping one of the two.
3367 *
3368 * @code
3369 *   if ((calculate_exact_merit(nonlinear_solution) <
3370 *   calculate_exact_merit(watchdog_state)) ||
3371 *   (calculate_exact_merit(stretch_state) < target_merit))
3372 *   {
3373 *   std::cout << " Taking scaled step from end of watchdog"
3374 *   << std::endl;
3375 *   nonlinear_solution = stretch_state;
3376 *   }
3377 *   else
3378 *   {
3379 *   std::cout
3380 *   << " Taking scaled step from beginning of watchdog"
3381 *   << std::endl;
3382 *   if (calculate_exact_merit(stretch_state) >
3383 *   calculate_exact_merit(watchdog_state))
3384 *   {
3385 *   nonlinear_solution =
3386 *   compute_scaled_step(watchdog_state,
3387 *   first_step,
3388 *   descent_requirement);
3389 *   }
3390 *   else
3391 *   {
3392 *   ++iteration_number;
3393 *   nonlinear_solution = stretch_state;
3394 *   const BlockVector<double> stretch_step =
3395 *   find_max_step();
3396 *   nonlinear_solution =
3397 *   compute_scaled_step(nonlinear_solution,
3398 *   stretch_step,
3399 *   descent_requirement);
3400 *   }
3401 *   }
3402 *   }
3403 *  
3404 *   output_results(iteration_number);
3405 *   }
3406 *   while ((iteration_number < max_iterations) &&
3407 *   (check_convergence(nonlinear_solution) == false));
3408 *  
3409 *  
3410 * @endcode
3411 *
3412 * At the end of the outer loop, we have to update the
3413 * barrier parameter, for which we use the following
3414 * formula. The rest of the function is then simply about
3415 * checking the outer loop convergence condition, and if
3416 * we decide to terminate computations, about writing the
3417 * final "design" as an STL file for use in 3d printing,
3418 * and to output some timing information.
3419 *
3420 * @code
3421 *   const double barrier_size_multiplier = .8;
3422 *   const double barrier_size_exponent = 1.2;
3423 *  
3424 *   barrier_size =
3425 *   std::max(std::min(barrier_size * barrier_size_multiplier,
3426 *   std::pow(barrier_size, barrier_size_exponent)),
3427 *   min_barrier_size);
3428 *  
3429 *   std::cout << std::endl;
3430 *   }
3431 *   while (((barrier_size > min_barrier_size) ||
3432 *   (check_convergence(nonlinear_solution) == false)) &&
3433 *   (iteration_number < max_iterations));
3434 *  
3435 *   write_as_stl();
3436 *   }
3437 *   } // namespace SAND
3438 *  
3439 * @endcode
3440 *
3441 *
3442 * <a name="step_79-Themainfunction"></a>
3443 * <h3>The main function</h3>
3444 *
3445
3446 *
3447 * The remainder of the code, the `main()` function, is as usual:
3448 *
3449 * @code
3450 *   int main()
3451 *   {
3452 *   try
3453 *   {
3454 *   SAND::SANDTopOpt<2> elastic_problem_2d;
3455 *   elastic_problem_2d.run();
3456 *   }
3457 *   catch (std::exception &exc)
3458 *   {
3459 *   std::cerr << std::endl
3460 *   << std::endl
3461 *   << "----------------------------------------------------"
3462 *   << std::endl;
3463 *   std::cerr << "Exception on processing: " << std::endl
3464 *   << exc.what() << std::endl
3465 *   << "Aborting!" << std::endl
3466 *   << "----------------------------------------------------"
3467 *   << std::endl;
3468 *  
3469 *   return 1;
3470 *   }
3471 *   catch (...)
3472 *   {
3473 *   std::cerr << std::endl
3474 *   << std::endl
3475 *   << "----------------------------------------------------"
3476 *   << std::endl;
3477 *   std::cerr << "Unknown exception!" << std::endl
3478 *   << "Aborting!" << std::endl
3479 *   << "----------------------------------------------------"
3480 *   << std::endl;
3481 *   return 1;
3482 *   }
3483 *   return 0;
3484 *   }
3485 * @endcode
3486<a name="step_79-Results"></a><h1>Results</h1>
3487
3488<a name="step_79-TestProblem"></a><h3>Test Problem</h3>
3489
3490The algorithms used above are tested against a traditional topology optimization
3491 problem called the Messerschmitt-Bolkow-Blohm Beam (MBB Beam).
3492
3493This problem considers the optimal 2-d structure that can be built on a
3494rectangle 6 units wide, and 1 unit tall. The bottom corners are fixed in place
3495in the @f$y@f$ direction using a zero Dirichlet boundary condition, and a downward
3496force is applied in the center of the top of the beam by enforcing a Neumann
3497boundary condition. The rest of the boundary is allowed to move, and has no
3498external force applied, which takes the form of a zero Neumann boundary
3499condition. In essence, we are asking the following question: How should we
3500design a bridge in a way so that if the bottom left and bottom right point of
3501the bridge are on rollers that allow these points to move horizontally but not
3502vertically, and so that the displacement in response to the vertical force in
3503the center is minimal.
3504
3505While the total volume of the domain is 6 units, 3 units of material are allowed for
3506the structure. Because of the symmetry of the problem, it could be posed on a
3507rectangle of width 3 and height 1 by cutting the original domain in half, and
3508using zero Dirichlet boundary conditions in the @f$x@f$ direction along the cut
3509edge. That said, symmetry of the solution is a good indicator that the program
3510is working as expected, so we solved the problem on the whole domain,
3511as shown below. @cite Bendse2004
3512
3513<div style="text-align:center;">
3514 <img src="https://www.dealii.org/images/steps/developer/step-79.mbbgeometry.png"
3515 alt="The MBB problem domain and boundary conditions">
3516</div>
3517
3518
3519Using the program discussed above, we find the minimum volume of the MBB Beam and the
3520individual components of the solution look as follows:
3521
3522<div class="onecolumn" style="width: 80%; text-align: center;">
3523 <div>
3524 <img src="https://www.dealii.org/images/steps/developer/step-79.filtereddensity.png"
3525 alt="Filtered Density Solution">
3526 </div>
3527 <div>
3528 <img src="https://www.dealii.org/images/steps/developer/step-79.unfiltereddensity.png"
3529 alt="Unfiltered Density Solution">
3530 </div>
3531</div>
3532
3533
3534These pictures show that what we find here is in accordance with what one
3535typically sees in other publications on the topic @cite Bendse2004.
3536Maybe more interestingly, the
3537result looks like a truss bridge (except that we apply the load at the top of
3538the trusses, rather than the bottom as in real truss bridges, akin to a "deck
3539truss" bridge), suggesting that the designs that have been used in bridge-building
3540for centuries are indeed based on ideas we can now show to be optimal
3541in some sense.
3542<a name="step_79-Possibilitiesforextensions"></a><h4>Possibilities for extensions</h4>
3543
3544
3545The results shown above took around 75 iterations to find, which is quite
3546concerning given the expense in solving the large linear systems in each
3547iteration. Looking at the evolution, it does look as though the convergence has
3548moments of happening quickly and moments of happening slowly. We believe this to
3549be due to both a lack of precision on when and how to decrease the boundary
3550values, as well as our choice of merit function being sub-optimal. In the future,
3551a LOQO barrier update replacing the monotone reduction, as well as a Markov
3552Filter in place of a merit function will decrease the number of necessary
3553iterations significantly.
3554
3555The barrier decrease is most sensitive in the middle of the convergence, which
3556is problematic, as it seems like we need it to decrease quickly, then slowly,
3557then quickly again.
3558
3559Secondly, the linear solver used here is just the sparse direct solver based on
3560the SparseDirectUMFPACK class. This works reasonably well on small problems,
3561but the formulation of the optimization problem detailed above has quite a large
3562number of variables and so the linear problem is not only large but also has a
3563lot of nonzero entries in many rows, even on meshes that overall are still
3564relatively coarse. As a consequence, the solver time dominates the
3565computations, and more sophisticated approaches at solving the linear system
3566are necessary.
3567 *
3568 *
3569<a name="step_79-PlainProg"></a>
3570<h1> The plain program</h1>
3571@include "step-79.cc"
3572*/
std::vector< bool > component_mask
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
Point< 3 > center
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
__global__ void reduction(Number *result, const Number *v, const size_type N)
__global__ void set(Number *val, const Number s, const size_type N)
typename ActiveSelector::face_iterator face_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:64
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
IndexSet extract_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask)
Definition dof_tools.cc:414
void create_triangulation(Triangulation< dim, dim > &tria, const AdditionalData &additional_data=AdditionalData())
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:989
double volume(const Triangulation< dim, spacedim > &tria)
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ matrix
Contents is actually a matrix.
@ general
No special properties.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
const InputIterator OutputIterator out
Definition parallel.h:167
const Iterator const std_cxx20::type_identity_t< Iterator > & end
Definition parallel.h:610
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
const Iterator & begin
Definition parallel.h:609
STL namespace.
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)