1243 *
for (
unsigned int i = 0; i < dim; ++i)
1245 *
for (
unsigned int k = 0; k < dim; ++k)
1247 * coupling[displacement<dim> + i]
1249 * coupling[displacement_multiplier<dim> + k]
1255 * coupling[density_lower_slack<dim>][density_lower_slack<dim>] =
1257 * coupling[density_lower_slack<dim>][density_upper_slack<dim>] =
1259 * coupling[density_upper_slack<dim>][density_lower_slack<dim>] =
1262 * coupling[density_lower_slack_multiplier<dim>]
1264 * coupling[density_lower_slack_multiplier<dim>]
1266 * coupling[density_upper_slack_multiplier<dim>]
1272 * Before we can create the sparsity pattern, we also have to
1273 *
set up constraints. Since
this program does not adaptively
1274 *
refine the mesh, the only constraint we have is one that
1275 * couples all density variables to enforce the
volume
1276 * constraint. This will ultimately lead to a dense sub-block
1277 * of the
matrix, but there is little we can
do about that.
1286 * density_dofs.nth_index_in_set(density_dofs.n_elements() - 1);
1287 * constraints.clear();
1289 * std::vector<std::pair<types::global_dof_index, double>>
1290 * constraint_entries;
1291 * constraint_entries.reserve(density_dofs.n_elements() - 1);
1293 * if (dof_index != last_density_dof)
1294 * constraint_entries.emplace_back(dof_index, -1.);
1296 * constraints.add_constraint(last_density_dof, constraint_entries, 0.);
1298 * constraints.close();
1302 * We can now
finally create the sparsity pattern
for the
1303 *
matrix, taking into account which variables couple with
1304 * which other variables, and the constraints we have on the
1312 * The only part of the
matrix we have not dealt with is the
1314 * (integral) operators
for which deal.II does not currently
1315 * have
functions. What we will ultimately need to
do is go
1316 * over all cells and couple the unfiltered density on
this
1317 * cell to all filtered densities of neighboring cells that
1318 * are less than a threshold distance away, and the other way
1319 * around;
for the moment, we are only concerned with building
1320 * the sparsity pattern that would correspond to
this kind of
1321 *
matrix, so we perform the equivalent
loop and where later
1322 * on we would write into an entry of the
matrix, we now
1323 * simply add an entry to the sparsity
matrix:
1326 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1328 * const unsigned
int i = cell->active_cell_index();
1329 *
for (
const auto &check_cell : find_relevant_neighbors(cell))
1331 * const double distance =
1333 *
if (distance < filter_r)
1336 * .block(SolutionBlocks::unfiltered_density,
1337 * SolutionBlocks::unfiltered_density_multiplier)
1338 * .add(i, check_cell->active_cell_index());
1340 * .block(SolutionBlocks::unfiltered_density_multiplier,
1341 * SolutionBlocks::unfiltered_density)
1342 * .add(i, check_cell->active_cell_index());
1349 * Having so generated the
"dynamic" sparsity pattern, we can
1350 *
finally copy it to the structure that is used to associate
1351 * matrices with a sparsity pattern. Because the sparsity
1352 * pattern is large and complex, we also output it into a file
1353 * of its own
for visualization purposes -- in other words,
1354 *
for "visual debugging".
1357 * sparsity_pattern.copy_from(dsp);
1359 * std::ofstream out(
"sparsity.plt");
1360 * sparsity_pattern.print_gnuplot(out);
1362 * system_matrix.reinit(sparsity_pattern);
1367 * What is left is to correctly size the various vectors and
1368 * their blocks, as well as setting
initial guesses
for some
1369 * of the components of the (nonlinear) solution vector. We
1370 * here use the symbolic component names
for individual blocks
1371 * of the solution vector and,
for brevity, use the same trick
1372 * with `
using namespace` as above:
1375 * nonlinear_solution.reinit(block_sizes);
1376 * system_rhs.reinit(block_sizes);
1379 *
using namespace SolutionBlocks;
1380 * nonlinear_solution.block(density).add(density_ratio);
1381 * nonlinear_solution.block(unfiltered_density).add(density_ratio);
1382 * nonlinear_solution.block(unfiltered_density_multiplier)
1383 * .add(density_ratio);
1384 * nonlinear_solution.block(density_lower_slack).add(density_ratio);
1385 * nonlinear_solution.block(density_lower_slack_multiplier).add(50);
1386 * nonlinear_solution.block(density_upper_slack).add(1 - density_ratio);
1387 * nonlinear_solution.block(density_upper_slack_multiplier).add(50);
1395 * <a name=
"step_79-Creatingthefiltermatrix"></a>
1396 * <h3>Creating the filter
matrix</h3>
1400 * Next up, a function that is used once at the beginning of the
1401 * program: It creates a
matrix @f$H@f$ so that the filtered density
1402 * vector equals @f$H@f$ times the unfiltered density. The creation
1403 * of
this matrix is non-trivial, and it is used in every
1404 * iteration, and so rather than reforming it as we
do with the
1405 * Newton
matrix, it is made only once and stored separately.
1409 * The way
this matrix is computed follows the outline used above
1410 * already to form its sparsity pattern. We repeat
this process here
1411 *
for the sparsity pattern of
this separately formed
matrix, and
1412 * then actually build the
matrix itself. You may want to
check the
1413 * definition of
this matrix in the introduction to
this program.
1416 *
template <
int dim>
1417 *
void SANDTopOpt<dim>::setup_filter_matrix()
1421 * The sparsity pattern of the filter has already been determined
1422 * and implemented in the setup_system() function. We copy the
1423 * structure from the appropriate block and use it again here.
1429 * filter_sparsity_pattern.copy_from(
1430 * sparsity_pattern.block(SolutionBlocks::unfiltered_density,
1431 * SolutionBlocks::unfiltered_density_multiplier));
1432 * filter_matrix.reinit(filter_sparsity_pattern);
1436 * Having so built the sparsity pattern, now we re-do all of
1437 * these loops to actually compute the necessary values of the
1444 * for (const auto &cell : dof_handler.active_cell_iterators())
1446 *
const unsigned int i = cell->active_cell_index();
1447 *
for (
const auto &check_cell : find_relevant_neighbors(cell))
1449 * const double distance =
1451 *
if (distance < filter_r)
1453 * filter_matrix.add(i,
1454 * check_cell->active_cell_index(),
1455 * filter_r - distance);
1463 * The
final step is to normalize the
matrix so that
for each
1464 * row, the
sum of entries equals one.
1467 *
for (
unsigned int i = 0; i < filter_matrix.m(); ++i)
1469 *
double denominator = 0;
1471 * iter != filter_matrix.end(i);
1473 * denominator = denominator + iter->value();
1475 * iter != filter_matrix.end(i);
1477 * iter->value() = iter->value() / denominator;
1483 * This function is used
for building the filter
matrix. We create a
set of
1484 * all the cell iterators within a certain radius of the cell that is input.
1485 * These are the neighboring cells that will be relevant
for the filter.
1488 *
template <
int dim>
1489 * std::set<typename Triangulation<dim>::cell_iterator>
1490 * SANDTopOpt<dim>::find_relevant_neighbors(
1493 * std::set<unsigned int> neighbor_ids;
1494 * std::set<typename Triangulation<dim>::cell_iterator> cells_to_check;
1496 * neighbor_ids.insert(cell->active_cell_index());
1497 * cells_to_check.insert(cell);
1499 *
bool new_neighbors_found;
1502 * new_neighbors_found =
false;
1503 *
for (
const auto &check_cell :
1505 * cells_to_check.
begin(), cells_to_check.
end()))
1507 * for (const auto n : check_cell->face_indices())
1509 * if (!(check_cell->face(n)->at_boundary()))
1511 * const auto &neighbor = check_cell->neighbor(n);
1512 *
const double distance =
1513 * cell->center().distance(neighbor->center());
1514 *
if ((distance < filter_r) &&
1515 * !(neighbor_ids.count(neighbor->active_cell_index())))
1517 * cells_to_check.insert(neighbor);
1518 * neighbor_ids.insert(neighbor->active_cell_index());
1519 * new_neighbors_found =
true;
1525 *
while (new_neighbors_found);
1526 *
return cells_to_check;
1532 * <a name=
"step_79-AssemblingtheNewtonmatrix"></a>
1533 * <h3>Assembling the Newton
matrix</h3>
1537 * Whereas the setup_filter_matrix function built a
matrix that is the same as
1538 *
long as the mesh does not change (which we don
't do anyway in
1539 * this program), the next function builds the matrix to be solved
1540 * in each iteration. This is where the magic happens. The components
1541 * of the system of linear equations describing Newton's method
for
1542 * finding the solution of the KKT conditions are implemented here.
1546 * The top of the function is as in most of these functions and just
1547 * sets up all sorts of variables necessary
for the actual assembly,
1548 * including a whole bunch of extractors. The entire set up should
1549 * look familiar, though somewhat lengthier,
if you
've previously
1550 * looked at @ref step_22 "step-22".
1553 * template <int dim>
1554 * void SANDTopOpt<dim>::assemble_system()
1556 * TimerOutput::Scope t(timer, "assembly");
1558 * system_matrix = 0;
1562 * MappingQ<dim> mapping(1);
1563 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1564 * const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1565 * FEValues<dim> fe_values(mapping,
1567 * quadrature_formula,
1568 * update_values | update_gradients |
1569 * update_quadrature_points | update_JxW_values);
1570 * FEFaceValues<dim> fe_face_values(mapping,
1572 * face_quadrature_formula,
1573 * update_values | update_quadrature_points |
1574 * update_normal_vectors |
1575 * update_JxW_values);
1577 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1578 * const unsigned int n_q_points = quadrature_formula.size();
1580 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1581 * Vector<double> dummy_cell_rhs(dofs_per_cell);
1583 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1585 * std::vector<double> lambda_values(n_q_points);
1586 * std::vector<double> mu_values(n_q_points);
1587 * const Functions::ConstantFunction<dim> lambda(1.);
1588 * const Functions::ConstantFunction<dim> mu(1.);
1589 * std::vector<Tensor<1, dim>> rhs_values(n_q_points);
1593 * At this point, we apply the filter to the unfiltered
1594 * density, and apply the adjoint (transpose) operation to the
1595 * unfiltered density multiplier, both to the current best
1596 * guess for the nonlinear solution. We use this later to tell
1597 * us how far off our filtered density is from the filter
1598 * applied to the unfiltered density. That is because while at
1599 * the solution of the nonlinear problem, we have
1600 * @f$\rho=H\varrho@f$, but at intermediate iterations, we in
1601 * general have @f$\rho^k\neq H\varrho^k@f$ and the "residual"
1602 * @f$\rho^k-H\varrho^k@f$ will then appear as the right hand side
1603 * of one of the Newton update equations that we compute
1607 * BlockVector<double> filtered_unfiltered_density_solution =
1608 * nonlinear_solution;
1609 * BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
1610 * nonlinear_solution;
1612 * filter_matrix.vmult(filtered_unfiltered_density_solution.block(
1613 * SolutionBlocks::unfiltered_density),
1614 * nonlinear_solution.block(
1615 * SolutionBlocks::unfiltered_density));
1616 * filter_matrix.Tvmult(
1617 * filter_adjoint_unfiltered_density_multiplier_solution.block(
1618 * SolutionBlocks::unfiltered_density_multiplier),
1619 * nonlinear_solution.block(SolutionBlocks::unfiltered_density_multiplier));
1622 * std::vector<double> old_density_values(n_q_points);
1623 * std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
1624 * std::vector<double> old_displacement_divs(n_q_points);
1625 * std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
1626 * std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
1627 * std::vector<double> old_displacement_multiplier_divs(n_q_points);
1628 * std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
1630 * std::vector<double> old_lower_slack_multiplier_values(n_q_points);
1631 * std::vector<double> old_upper_slack_multiplier_values(n_q_points);
1632 * std::vector<double> old_lower_slack_values(n_q_points);
1633 * std::vector<double> old_upper_slack_values(n_q_points);
1634 * std::vector<double> old_unfiltered_density_values(n_q_points);
1635 * std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
1636 * std::vector<double> filtered_unfiltered_density_values(n_q_points);
1637 * std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
1640 * using namespace ValueExtractors;
1641 * for (const auto &cell : dof_handler.active_cell_iterators())
1645 * cell->get_dof_indices(local_dof_indices);
1647 * fe_values.reinit(cell);
1649 * lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
1650 * mu.value_list(fe_values.get_quadrature_points(), mu_values);
1654 * As part of the construction of our system matrix, we need to
1655 * retrieve values from our current guess at the solution.
1656 * The following lines of code retrieve the needed values.
1659 * fe_values[densities<dim>].get_function_values(nonlinear_solution,
1660 * old_density_values);
1661 * fe_values[displacements<dim>].get_function_values(
1662 * nonlinear_solution, old_displacement_values);
1663 * fe_values[displacements<dim>].get_function_divergences(
1664 * nonlinear_solution, old_displacement_divs);
1665 * fe_values[displacements<dim>].get_function_symmetric_gradients(
1666 * nonlinear_solution, old_displacement_symmgrads);
1667 * fe_values[displacement_multipliers<dim>].get_function_values(
1668 * nonlinear_solution, old_displacement_multiplier_values);
1669 * fe_values[displacement_multipliers<dim>].get_function_divergences(
1670 * nonlinear_solution, old_displacement_multiplier_divs);
1671 * fe_values[displacement_multipliers<dim>]
1672 * .get_function_symmetric_gradients(
1673 * nonlinear_solution, old_displacement_multiplier_symmgrads);
1674 * fe_values[density_lower_slacks<dim>].get_function_values(
1675 * nonlinear_solution, old_lower_slack_values);
1676 * fe_values[density_lower_slack_multipliers<dim>].get_function_values(
1677 * nonlinear_solution, old_lower_slack_multiplier_values);
1678 * fe_values[density_upper_slacks<dim>].get_function_values(
1679 * nonlinear_solution, old_upper_slack_values);
1680 * fe_values[density_upper_slack_multipliers<dim>].get_function_values(
1681 * nonlinear_solution, old_upper_slack_multiplier_values);
1682 * fe_values[unfiltered_densities<dim>].get_function_values(
1683 * nonlinear_solution, old_unfiltered_density_values);
1684 * fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1685 * nonlinear_solution, old_unfiltered_density_multiplier_values);
1686 * fe_values[unfiltered_densities<dim>].get_function_values(
1687 * filtered_unfiltered_density_solution,
1688 * filtered_unfiltered_density_values);
1689 * fe_values[unfiltered_density_multipliers<dim>].get_function_values(
1690 * filter_adjoint_unfiltered_density_multiplier_solution,
1691 * filter_adjoint_unfiltered_density_multiplier_values);
1693 * for (const auto q_point : fe_values.quadrature_point_indices())
1697 * We need several more values corresponding to the test functions
1698 * coming from the first derivatives taken from the Lagrangian,
1699 * that is the @f$d_{\bullet}@f$ functions. These are calculated here:
1702 * for (const auto i : fe_values.dof_indices())
1704 * const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
1705 * fe_values[displacements<dim>].symmetric_gradient(i, q_point);
1706 * const double displacement_phi_i_div =
1707 * fe_values[displacements<dim>].divergence(i, q_point);
1709 * const SymmetricTensor<2, dim>
1710 * displacement_multiplier_phi_i_symmgrad =
1711 * fe_values[displacement_multipliers<dim>].symmetric_gradient(
1713 * const double displacement_multiplier_phi_i_div =
1714 * fe_values[displacement_multipliers<dim>].divergence(i,
1717 * const double density_phi_i =
1718 * fe_values[densities<dim>].value(i, q_point);
1719 * const double unfiltered_density_phi_i =
1720 * fe_values[unfiltered_densities<dim>].value(i, q_point);
1721 * const double unfiltered_density_multiplier_phi_i =
1722 * fe_values[unfiltered_density_multipliers<dim>].value(i,
1725 * const double lower_slack_multiplier_phi_i =
1726 * fe_values[density_lower_slack_multipliers<dim>].value(
1729 * const double lower_slack_phi_i =
1730 * fe_values[density_lower_slacks<dim>].value(i, q_point);
1732 * const double upper_slack_phi_i =
1733 * fe_values[density_upper_slacks<dim>].value(i, q_point);
1735 * const double upper_slack_multiplier_phi_i =
1736 * fe_values[density_upper_slack_multipliers<dim>].value(
1740 * for (const auto j : fe_values.dof_indices())
1744 * Finally, we need values that come from the second round
1745 * of derivatives taken from the Lagrangian,
1746 * the @f$c_{\bullet}@f$ functions. These are calculated here:
1749 * const SymmetricTensor<2, dim> displacement_phi_j_symmgrad =
1750 * fe_values[displacements<dim>].symmetric_gradient(j,
1752 * const double displacement_phi_j_div =
1753 * fe_values[displacements<dim>].divergence(j, q_point);
1755 * const SymmetricTensor<2, dim>
1756 * displacement_multiplier_phi_j_symmgrad =
1757 * fe_values[displacement_multipliers<dim>]
1758 * .symmetric_gradient(j, q_point);
1759 * const double displacement_multiplier_phi_j_div =
1760 * fe_values[displacement_multipliers<dim>].divergence(
1763 * const double density_phi_j =
1764 * fe_values[densities<dim>].value(j, q_point);
1766 * const double unfiltered_density_phi_j =
1767 * fe_values[unfiltered_densities<dim>].value(j, q_point);
1768 * const double unfiltered_density_multiplier_phi_j =
1769 * fe_values[unfiltered_density_multipliers<dim>].value(
1773 * const double lower_slack_phi_j =
1774 * fe_values[density_lower_slacks<dim>].value(j, q_point);
1776 * const double upper_slack_phi_j =
1777 * fe_values[density_upper_slacks<dim>].value(j, q_point);
1779 * const double lower_slack_multiplier_phi_j =
1780 * fe_values[density_lower_slack_multipliers<dim>].value(
1783 * const double upper_slack_multiplier_phi_j =
1784 * fe_values[density_upper_slack_multipliers<dim>].value(
1789 * This is where the actual work starts. In
1790 * the following, we will build all of the
1791 * terms of the matrix -- they are numerous
1792 * and not entirely self-explanatory, also
1793 * depending on the previous solutions and its
1794 * derivatives (which we have already
1795 * evaluated above and put into the variables
1796 * called `old_*`). To understand what each of
1797 * these terms corresponds to, you will want
1798 * to look at the explicit form of these terms
1799 * in the introduction above.
1803 * The right hand sides of the equations being
1804 * driven to 0 give all the KKT conditions
1805 * for finding a local minimum -- the descriptions of what
1806 * each individual equation are given with the computations
1807 * of the right hand side.
1814 * cell_matrix(i, j) +=
1815 * fe_values.JxW(q_point) *
1818 * -density_phi_i * unfiltered_density_multiplier_phi_j
1820 * + density_penalty_exponent *
1821 * (density_penalty_exponent - 1) *
1822 * std::pow(old_density_values[q_point],
1823 * density_penalty_exponent - 2) *
1824 * density_phi_i * density_phi_j *
1825 * (old_displacement_multiplier_divs[q_point] *
1826 * old_displacement_divs[q_point] *
1827 * lambda_values[q_point] +
1828 * 2 * mu_values[q_point] *
1829 * (old_displacement_symmgrads[q_point] *
1830 * old_displacement_multiplier_symmgrads[q_point]))
1832 * + density_penalty_exponent *
1833 * std::pow(old_density_values[q_point],
1834 * density_penalty_exponent - 1) *
1836 * (displacement_multiplier_phi_j_div *
1837 * old_displacement_divs[q_point] *
1838 * lambda_values[q_point] +
1839 * 2 * mu_values[q_point] *
1840 * (old_displacement_symmgrads[q_point] *
1841 * displacement_multiplier_phi_j_symmgrad))
1843 * + density_penalty_exponent *
1844 * std::pow(old_density_values[q_point],
1845 * density_penalty_exponent - 1) *
1847 * (displacement_phi_j_div *
1848 * old_displacement_multiplier_divs[q_point] *
1849 * lambda_values[q_point] +
1850 * 2 * mu_values[q_point] *
1851 * (old_displacement_multiplier_symmgrads[q_point] *
1852 * displacement_phi_j_symmgrad)));
1855 * cell_matrix(i, j) +=
1856 * fe_values.JxW(q_point) *
1857 * (density_penalty_exponent *
1858 * std::pow(old_density_values[q_point],
1859 * density_penalty_exponent - 1) *
1861 * (old_displacement_multiplier_divs[q_point] *
1862 * displacement_phi_i_div * lambda_values[q_point] +
1863 * 2 * mu_values[q_point] *
1864 * (old_displacement_multiplier_symmgrads[q_point] *
1865 * displacement_phi_i_symmgrad))
1867 * + std::pow(old_density_values[q_point],
1868 * density_penalty_exponent) *
1869 * (displacement_multiplier_phi_j_div *
1870 * displacement_phi_i_div * lambda_values[q_point] +
1871 * 2 * mu_values[q_point] *
1872 * (displacement_multiplier_phi_j_symmgrad *
1873 * displacement_phi_i_symmgrad))
1877 * /* Equation 3, which has to do with the filter and which is
1878 * * calculated elsewhere. */
1879 * cell_matrix(i, j) +=
1880 * fe_values.JxW(q_point) *
1881 * (-1 * unfiltered_density_phi_i *
1882 * lower_slack_multiplier_phi_j +
1883 * unfiltered_density_phi_i * upper_slack_multiplier_phi_j);
1886 * /* Equation 4: Primal feasibility */
1887 * cell_matrix(i, j) +=
1888 * fe_values.JxW(q_point) *
1891 * density_penalty_exponent *
1892 * std::pow(old_density_values[q_point],
1893 * density_penalty_exponent - 1) *
1895 * (old_displacement_divs[q_point] *
1896 * displacement_multiplier_phi_i_div *
1897 * lambda_values[q_point] +
1898 * 2 * mu_values[q_point] *
1899 * (old_displacement_symmgrads[q_point] *
1900 * displacement_multiplier_phi_i_symmgrad))
1902 * + std::pow(old_density_values[q_point],
1903 * density_penalty_exponent) *
1904 * (displacement_phi_j_div *
1905 * displacement_multiplier_phi_i_div *
1906 * lambda_values[q_point] +
1907 * 2 * mu_values[q_point] *
1908 * (displacement_phi_j_symmgrad *
1909 * displacement_multiplier_phi_i_symmgrad)));
1911 * /* Equation 5: Primal feasibility */
1912 * cell_matrix(i, j) +=
1913 * -1 * fe_values.JxW(q_point) *
1914 * lower_slack_multiplier_phi_i *
1915 * (unfiltered_density_phi_j - lower_slack_phi_j);
1917 * /* Equation 6: Primal feasibility */
1918 * cell_matrix(i, j) +=
1919 * -1 * fe_values.JxW(q_point) *
1920 * upper_slack_multiplier_phi_i *
1921 * (-1 * unfiltered_density_phi_j - upper_slack_phi_j);
1923 * /* Equation 7: Primal feasibility - the part with the filter
1924 * * is added later */
1925 * cell_matrix(i, j) += -1 * fe_values.JxW(q_point) *
1926 * unfiltered_density_multiplier_phi_i *
1929 * /* Equation 8: Complementary slackness */
1930 * cell_matrix(i, j) +=
1931 * fe_values.JxW(q_point) *
1932 * (lower_slack_phi_i * lower_slack_multiplier_phi_j
1934 * + lower_slack_phi_i * lower_slack_phi_j *
1935 * old_lower_slack_multiplier_values[q_point] /
1936 * old_lower_slack_values[q_point]);
1938 * /* Equation 9: Complementary slackness */
1939 * cell_matrix(i, j) +=
1940 * fe_values.JxW(q_point) *
1941 * (upper_slack_phi_i * upper_slack_multiplier_phi_j
1944 * + upper_slack_phi_i * upper_slack_phi_j *
1945 * old_upper_slack_multiplier_values[q_point] /
1946 * old_upper_slack_values[q_point]);
1953 * Now that we have everything assembled, all we have to
1954 * do is deal with the effect of (Dirichlet) boundary
1955 * conditions and other constraints. We incorporate the
1956 * former locally with just the contributions from the
1957 * current cell, and then let the AffineConstraint class
1958 * deal with the latter while copying contributions from
1959 * the current cell into the global linear system:
1962 * MatrixTools::local_apply_boundary_values(boundary_values,
1963 * local_dof_indices,
1968 * constraints.distribute_local_to_global(cell_matrix,
1969 * local_dof_indices,
1975 * Having accumulated all of the terms that belong
1976 * into the Newton matrix, we now also have to
1977 * compute the terms for the right hand side
1978 * (i.e., the negative residual). We already do this
1979 * in another function, and so we call that here:
1982 * system_rhs = calculate_test_rhs(nonlinear_solution);
1986 * Here we use the filter matrix we have already
1987 * constructed. We only need to integrate this filter applied
1988 * to test functions, which are piecewise constant, and so the
1989 * integration becomes a simple multiplication by the measure
1990 * of the cell. Iterating over the pre-made filter matrix
1991 * allows us to use the information about which cells are in
1992 * or out of the filter without repeatedly checking neighbor
1996 * for (const auto &cell : dof_handler.active_cell_iterators())
1998 * const unsigned int i = cell->active_cell_index();
1999 * for (typename SparseMatrix<double>::iterator iter =
2000 * filter_matrix.begin(i);
2001 * iter != filter_matrix.end(i);
2004 * const unsigned int j = iter->column();
2005 * const double value = iter->value() * cell->measure();
2008 * .block(SolutionBlocks::unfiltered_density_multiplier,
2009 * SolutionBlocks::unfiltered_density)
2010 * .add(i, j, value);
2012 * .block(SolutionBlocks::unfiltered_density,
2013 * SolutionBlocks::unfiltered_density_multiplier)
2014 * .add(j, i, value);
2023 * <a name="step_79-SolvingtheNewtonlinearsystem"></a>
2024 * <h3>Solving the Newton linear system</h3>
2031 * We will need to solve a linear system in each iteration. We use
2032 * a direct solver, for now -- this is clearly not an efficient
2033 * choice for a matrix that has so many non-zeroes, and it will
2034 * not scale to anything interesting. For "real" applications, we
2035 * will need an iterative solver but the complexity of the system
2036 * means that an iterative solver algorithm will take a good deal
2037 * of work. Because this is not the focus of the current program,
2038 * we simply stick with the direct solver we have here -- the
2039 * function follows the same structure as used in @ref step_29 "step-29".
2042 * template <int dim>
2043 * BlockVector<double> SANDTopOpt<dim>::solve()
2045 * TimerOutput::Scope t(timer, "solver");
2047 * BlockVector<double> linear_solution;
2048 * linear_solution.reinit(nonlinear_solution);
2050 * SparseDirectUMFPACK A_direct;
2051 * A_direct.initialize(system_matrix);
2052 * A_direct.vmult(linear_solution, system_rhs);
2054 * constraints.distribute(linear_solution);
2056 * return linear_solution;
2063 * <a name="step_79-Detailsoftheoptimizationalgorithm"></a>
2064 * <h3>Details of the optimization algorithm</h3>
2068 * The next several functions deal with specific parts of the
2069 * optimization algorithm, most notably with deciding whether the
2070 * direction computed by solving the linearized (Newton) system is
2071 * viable and, if so, how far we want to go in this direction.
2076 * <a name="step_79-Computingsteplengths"></a>
2077 * <h4>Computing step lengths</h4>
2081 * We start with a function that does a binary search to figure
2082 * out the maximum step that meets the dual feasibility -- that
2083 * is, how far can we go so that @f$s>0@f$ and @f$z>0@f$. The function
2084 * returns a pair of values, one each for the @f$s@f$ and @f$z@f$ slack
2088 * template <int dim>
2089 * std::pair<double, double> SANDTopOpt<dim>::calculate_max_step_size(
2090 * const BlockVector<double> &state,
2091 * const BlockVector<double> &step) const
2093 * double fraction_to_boundary;
2094 * const double min_fraction_to_boundary = .8;
2095 * const double max_fraction_to_boundary = 1. - 1e-5;
2097 * if (min_fraction_to_boundary < 1 - barrier_size)
2099 * if (1 - barrier_size < max_fraction_to_boundary)
2100 * fraction_to_boundary = 1 - barrier_size;
2102 * fraction_to_boundary = max_fraction_to_boundary;
2105 * fraction_to_boundary = min_fraction_to_boundary;
2107 * double step_size_s_low = 0;
2108 * double step_size_z_low = 0;
2109 * double step_size_s_high = 1;
2110 * double step_size_z_high = 1;
2111 * double step_size_s, step_size_z;
2113 * const int max_bisection_method_steps = 50;
2114 * for (unsigned int k = 0; k < max_bisection_method_steps; ++k)
2116 * step_size_s = (step_size_s_low + step_size_s_high) / 2;
2117 * step_size_z = (step_size_z_low + step_size_z_high) / 2;
2119 * const BlockVector<double> state_test_s =
2120 * (fraction_to_boundary * state) + (step_size_s * step);
2122 * const BlockVector<double> state_test_z =
2123 * (fraction_to_boundary * state) + (step_size_z * step);
2125 * const bool accept_s =
2126 * (state_test_s.block(SolutionBlocks::density_lower_slack)
2127 * .is_non_negative()) &&
2128 * (state_test_s.block(SolutionBlocks::density_upper_slack)
2129 * .is_non_negative());
2130 * const bool accept_z =
2131 * (state_test_z.block(SolutionBlocks::density_lower_slack_multiplier)
2132 * .is_non_negative()) &&
2133 * (state_test_z.block(SolutionBlocks::density_upper_slack_multiplier)
2134 * .is_non_negative());
2137 * step_size_s_low = step_size_s;
2139 * step_size_s_high = step_size_s;
2142 * step_size_z_low = step_size_z;
2144 * step_size_z_high = step_size_z;
2147 * return {step_size_s_low, step_size_z_low};
2154 * <a name="step_79-Computingresiduals"></a>
2155 * <h4>Computing residuals</h4>
2159 * The next function computes a right hand side vector linearized
2160 * around a "test solution vector" that we can use to look at the
2161 * magnitude of the KKT conditions. This is then used for testing
2162 * the convergence before shrinking the barrier size, as well as in the
2163 * calculation of the @f$l_1@f$ merit.
2167 * The function is lengthy and complicated, but it is really just a
2168 * copy of the right hand side part of what the `assemble_system()`
2169 * function above did.
2172 * template <int dim>
2173 * BlockVector<double> SANDTopOpt<dim>::calculate_test_rhs(
2174 * const BlockVector<double> &test_solution) const
2178 * We first create a zero vector with size and blocking of system_rhs
2181 * BlockVector<double> test_rhs;
2182 * test_rhs.reinit(system_rhs);
2184 * MappingQ<dim> mapping(1);
2185 * const QGauss<dim> quadrature_formula(fe.degree + 1);
2186 * const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2187 * FEValues<dim> fe_values(mapping,
2189 * quadrature_formula,
2190 * update_values | update_gradients |
2191 * update_quadrature_points | update_JxW_values);
2192 * FEFaceValues<dim> fe_face_values(mapping,
2194 * face_quadrature_formula,
2195 * update_values | update_quadrature_points |
2196 * update_normal_vectors |
2197 * update_JxW_values);
2199 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
2200 * const unsigned int n_q_points = quadrature_formula.size();
2202 * Vector<double> cell_rhs(dofs_per_cell);
2203 * FullMatrix<double> dummy_cell_matrix(dofs_per_cell, dofs_per_cell);
2205 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
2207 * std::vector<double> lambda_values(n_q_points);
2208 * std::vector<double> mu_values(n_q_points);
2210 * const Functions::ConstantFunction<dim> lambda(1.), mu(1.);
2211 * std::vector<Tensor<1, dim>> rhs_values(n_q_points);
2214 * BlockVector<double> filtered_unfiltered_density_solution = test_solution;
2215 * BlockVector<double> filter_adjoint_unfiltered_density_multiplier_solution =
2217 * filtered_unfiltered_density_solution.block(
2218 * SolutionBlocks::unfiltered_density) = 0;
2219 * filter_adjoint_unfiltered_density_multiplier_solution.block(
2220 * SolutionBlocks::unfiltered_density_multiplier) = 0;
2222 * filter_matrix.vmult(filtered_unfiltered_density_solution.block(
2223 * SolutionBlocks::unfiltered_density),
2224 * test_solution.block(
2225 * SolutionBlocks::unfiltered_density));
2226 * filter_matrix.Tvmult(
2227 * filter_adjoint_unfiltered_density_multiplier_solution.block(
2228 * SolutionBlocks::unfiltered_density_multiplier),
2229 * test_solution.block(SolutionBlocks::unfiltered_density_multiplier));
2232 * std::vector<double> old_density_values(n_q_points);
2233 * std::vector<Tensor<1, dim>> old_displacement_values(n_q_points);
2234 * std::vector<double> old_displacement_divs(n_q_points);
2235 * std::vector<SymmetricTensor<2, dim>> old_displacement_symmgrads(n_q_points);
2236 * std::vector<Tensor<1, dim>> old_displacement_multiplier_values(n_q_points);
2237 * std::vector<double> old_displacement_multiplier_divs(n_q_points);
2238 * std::vector<SymmetricTensor<2, dim>> old_displacement_multiplier_symmgrads(
2240 * std::vector<double> old_lower_slack_multiplier_values(n_q_points);
2241 * std::vector<double> old_upper_slack_multiplier_values(n_q_points);
2242 * std::vector<double> old_lower_slack_values(n_q_points);
2243 * std::vector<double> old_upper_slack_values(n_q_points);
2244 * std::vector<double> old_unfiltered_density_values(n_q_points);
2245 * std::vector<double> old_unfiltered_density_multiplier_values(n_q_points);
2246 * std::vector<double> filtered_unfiltered_density_values(n_q_points);
2247 * std::vector<double> filter_adjoint_unfiltered_density_multiplier_values(
2250 * using namespace ValueExtractors;
2251 * for (const auto &cell : dof_handler.active_cell_iterators())
2255 * cell->get_dof_indices(local_dof_indices);
2257 * fe_values.reinit(cell);
2259 * lambda.value_list(fe_values.get_quadrature_points(), lambda_values);
2260 * mu.value_list(fe_values.get_quadrature_points(), mu_values);
2262 * fe_values[densities<dim>].get_function_values(test_solution,
2263 * old_density_values);
2264 * fe_values[displacements<dim>].get_function_values(
2265 * test_solution, old_displacement_values);
2266 * fe_values[displacements<dim>].get_function_divergences(
2267 * test_solution, old_displacement_divs);
2268 * fe_values[displacements<dim>].get_function_symmetric_gradients(
2269 * test_solution, old_displacement_symmgrads);
2270 * fe_values[displacement_multipliers<dim>].get_function_values(
2271 * test_solution, old_displacement_multiplier_values);
2272 * fe_values[displacement_multipliers<dim>].get_function_divergences(
2273 * test_solution, old_displacement_multiplier_divs);
2274 * fe_values[displacement_multipliers<dim>]
2275 * .get_function_symmetric_gradients(
2276 * test_solution, old_displacement_multiplier_symmgrads);
2277 * fe_values[density_lower_slacks<dim>].get_function_values(
2278 * test_solution, old_lower_slack_values);
2279 * fe_values[density_lower_slack_multipliers<dim>].get_function_values(
2280 * test_solution, old_lower_slack_multiplier_values);
2281 * fe_values[density_upper_slacks<dim>].get_function_values(
2282 * test_solution, old_upper_slack_values);
2283 * fe_values[density_upper_slack_multipliers<dim>].get_function_values(
2284 * test_solution, old_upper_slack_multiplier_values);
2285 * fe_values[unfiltered_densities<dim>].get_function_values(
2286 * test_solution, old_unfiltered_density_values);
2287 * fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2288 * test_solution, old_unfiltered_density_multiplier_values);
2289 * fe_values[unfiltered_densities<dim>].get_function_values(
2290 * filtered_unfiltered_density_solution,
2291 * filtered_unfiltered_density_values);
2292 * fe_values[unfiltered_density_multipliers<dim>].get_function_values(
2293 * filter_adjoint_unfiltered_density_multiplier_solution,
2294 * filter_adjoint_unfiltered_density_multiplier_values);
2296 * for (const auto q_point : fe_values.quadrature_point_indices())
2298 * for (const auto i : fe_values.dof_indices())
2300 * const SymmetricTensor<2, dim> displacement_phi_i_symmgrad =
2301 * fe_values[displacements<dim>].symmetric_gradient(i, q_point);
2302 * const double displacement_phi_i_div =
2303 * fe_values[displacements<dim>].divergence(i, q_point);
2305 * const SymmetricTensor<2, dim>
2306 * displacement_multiplier_phi_i_symmgrad =
2307 * fe_values[displacement_multipliers<dim>].symmetric_gradient(
2309 * const double displacement_multiplier_phi_i_div =
2310 * fe_values[displacement_multipliers<dim>].divergence(i,
2314 * const double density_phi_i =
2315 * fe_values[densities<dim>].value(i, q_point);
2316 * const double unfiltered_density_phi_i =
2317 * fe_values[unfiltered_densities<dim>].value(i, q_point);
2318 * const double unfiltered_density_multiplier_phi_i =
2319 * fe_values[unfiltered_density_multipliers<dim>].value(i,
2322 * const double lower_slack_multiplier_phi_i =
2323 * fe_values[density_lower_slack_multipliers<dim>].value(
2326 * const double lower_slack_phi_i =
2327 * fe_values[density_lower_slacks<dim>].value(i, q_point);
2329 * const double upper_slack_phi_i =
2330 * fe_values[density_upper_slacks<dim>].value(i, q_point);
2332 * const double upper_slack_multiplier_phi_i =
2333 * fe_values[density_upper_slack_multipliers<dim>].value(
2336 * /* Equation 1: This equation, along with equations
2337 * * 2 and 3, are the variational derivatives of the
2338 * * Lagrangian with respect to the decision
2339 * * variables - the density, displacement, and
2340 * * unfiltered density. */
2342 * -1 * fe_values.JxW(q_point) *
2343 * (density_penalty_exponent *
2344 * std::pow(old_density_values[q_point],
2345 * density_penalty_exponent - 1) *
2347 * (old_displacement_multiplier_divs[q_point] *
2348 * old_displacement_divs[q_point] *
2349 * lambda_values[q_point] +
2350 * 2 * mu_values[q_point] *
2351 * (old_displacement_symmgrads[q_point] *
2352 * old_displacement_multiplier_symmgrads[q_point])) -
2354 * old_unfiltered_density_multiplier_values[q_point]);
2356 * /* Equation 2; the boundary terms will be added further down
2359 * -1 * fe_values.JxW(q_point) *
2360 * (std::pow(old_density_values[q_point],
2361 * density_penalty_exponent) *
2362 * (old_displacement_multiplier_divs[q_point] *
2363 * displacement_phi_i_div * lambda_values[q_point] +
2364 * 2 * mu_values[q_point] *
2365 * (old_displacement_multiplier_symmgrads[q_point] *
2366 * displacement_phi_i_symmgrad)));
2370 * -1 * fe_values.JxW(q_point) *
2371 * (unfiltered_density_phi_i *
2372 * filter_adjoint_unfiltered_density_multiplier_values
2374 * unfiltered_density_phi_i *
2375 * old_upper_slack_multiplier_values[q_point] +
2376 * -1 * unfiltered_density_phi_i *
2377 * old_lower_slack_multiplier_values[q_point]);
2381 * /* Equation 4; boundary term will again be dealt
2382 * * with below. This equation being driven to 0
2383 * * ensures that the elasticity equation is met as
2384 * * a constraint. */
2385 * cell_rhs(i) += -1 * fe_values.JxW(q_point) *
2386 * (std::pow(old_density_values[q_point],
2387 * density_penalty_exponent) *
2388 * (old_displacement_divs[q_point] *
2389 * displacement_multiplier_phi_i_div *
2390 * lambda_values[q_point] +
2391 * 2 * mu_values[q_point] *
2392 * (displacement_multiplier_phi_i_symmgrad *
2393 * old_displacement_symmgrads[q_point])));
2395 * /* Equation 5: This equation sets the lower slack
2396 * * variable equal to the unfiltered density,
2397 * * giving a minimum density of 0. */
2398 * cell_rhs(i) += fe_values.JxW(q_point) *
2399 * (lower_slack_multiplier_phi_i *
2400 * (old_unfiltered_density_values[q_point] -
2401 * old_lower_slack_values[q_point]));
2403 * /* Equation 6: This equation sets the upper slack
2404 * * variable equal to one minus the unfiltered
2406 * cell_rhs(i) += fe_values.JxW(q_point) *
2407 * (upper_slack_multiplier_phi_i *
2408 * (1 - old_unfiltered_density_values[q_point] -
2409 * old_upper_slack_values[q_point]));
2411 * /* Equation 7: This is the difference between the
2412 * * density and the filter applied to the
2413 * * unfiltered density. This being driven to 0 by
2414 * * the Newton steps ensures that the filter is
2415 * * applied correctly. */
2416 * cell_rhs(i) += fe_values.JxW(q_point) *
2417 * (unfiltered_density_multiplier_phi_i *
2418 * (old_density_values[q_point] -
2419 * filtered_unfiltered_density_values[q_point]));
2421 * /* Equation 8: This along with equation 9 give the
2422 * * requirement that s*z = \alpha for the barrier
2423 * * size alpha, and gives complementary slackness
2424 * * from KKT conditions when \alpha goes to 0. */
2426 * -1 * fe_values.JxW(q_point) *
2427 * (lower_slack_phi_i *
2428 * (old_lower_slack_multiplier_values[q_point] -
2429 * barrier_size / old_lower_slack_values[q_point]));
2433 * -1 * fe_values.JxW(q_point) *
2434 * (upper_slack_phi_i *
2435 * (old_upper_slack_multiplier_values[q_point] -
2436 * barrier_size / old_upper_slack_values[q_point]));
2440 * for (const auto &face : cell->face_iterators())
2442 * if (face->at_boundary() &&
2443 * face->boundary_id() == BoundaryIds::down_force)
2445 * fe_face_values.reinit(cell, face);
2447 * for (const auto face_q_point :
2448 * fe_face_values.quadrature_point_indices())
2450 * for (const auto i : fe_face_values.dof_indices())
2452 * Tensor<1, dim> traction;
2453 * traction[1] = -1.;
2457 * (traction * fe_face_values[displacements<dim>].value(
2458 * i, face_q_point)) *
2459 * fe_face_values.JxW(face_q_point);
2463 * fe_face_values[displacement_multipliers<dim>].value(
2464 * i, face_q_point)) *
2465 * fe_face_values.JxW(face_q_point);
2471 * MatrixTools::local_apply_boundary_values(boundary_values,
2472 * local_dof_indices,
2473 * dummy_cell_matrix,
2477 * constraints.distribute_local_to_global(cell_rhs,
2478 * local_dof_indices,
2489 * <a name="step_79-Computingthemeritfunction"></a>
2490 * <h4>Computing the merit function</h4>
2494 * The algorithm we use herein uses a "watchdog" strategy to
2495 * determine where and how far to go from the current iterate. We
2496 * base the watchdog strategy on an exact @f$l_1@f$ merit function. This
2497 * function calculates the exact @f$l_1@f$ merit of a given, putative,
2502 * The merit function consists of the sum of the objective function
2503 * (which is simply an integral of external forces (on the boundary
2504 * of the domain) times the displacement values of a test solution
2505 * (typically, the current solution plus some multiple of the Newton
2506 * update), and the @f$l_1@f$ norms of the Lagrange multiplier
2507 * components of residual vectors. The following code computes these
2511 * template <int dim>
2512 * double SANDTopOpt<dim>::calculate_exact_merit(
2513 * const BlockVector<double> &test_solution)
2515 * TimerOutput::Scope t(timer, "merit function");
2519 * Start with computing the objective function:
2522 * double objective_function_merit = 0;
2524 * MappingQ<dim> mapping(1);
2525 * const QGauss<dim> quadrature_formula(fe.degree + 1);
2526 * const QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
2527 * FEValues<dim> fe_values(mapping,
2529 * quadrature_formula,
2530 * update_values | update_gradients |
2531 * update_quadrature_points | update_JxW_values);
2532 * FEFaceValues<dim> fe_face_values(mapping,
2534 * face_quadrature_formula,
2536 * update_quadrature_points |
2537 * update_normal_vectors |
2538 * update_JxW_values);
2540 * const unsigned int n_face_q_points = face_quadrature_formula.size();
2542 * std::vector<Tensor<1, dim>> displacement_face_values(n_face_q_points);
2544 * for (const auto &cell : dof_handler.active_cell_iterators())
2546 * for (const auto &face : cell->face_iterators())
2548 * if (face->at_boundary() &&
2549 * face->boundary_id() == BoundaryIds::down_force)
2551 * fe_face_values.reinit(cell, face);
2552 * fe_face_values[ValueExtractors::displacements<dim>]
2553 * .get_function_values(test_solution,
2554 * displacement_face_values);
2555 * for (unsigned int face_q_point = 0;
2556 * face_q_point < n_face_q_points;
2559 * Tensor<1, dim> traction;
2560 * traction[1] = -1.;
2562 * objective_function_merit +=
2563 * (traction * displacement_face_values[face_q_point]) *
2564 * fe_face_values.JxW(face_q_point);
2571 * for (const auto &cell : triangulation.active_cell_iterators())
2573 * objective_function_merit =
2574 * objective_function_merit -
2575 * barrier_size * cell->measure() *
2576 * std::log(test_solution.block(
2577 * SolutionBlocks::density_lower_slack)[cell->active_cell_index()]);
2578 * objective_function_merit =
2579 * objective_function_merit -
2580 * barrier_size * cell->measure() *
2581 * std::log(test_solution.block(
2582 * SolutionBlocks::density_upper_slack)[cell->active_cell_index()]);
2587 * Then compute the residual and take the @f$l_1@f$ norms of the
2588 * components that correspond to Lagrange multipliers. We add
2589 * those to the objective function computed above, and return
2590 * the sum at the bottom:
2593 * const BlockVector<double> test_rhs = calculate_test_rhs(test_solution);
2595 * const double elasticity_constraint_merit =
2596 * penalty_multiplier *
2597 * test_rhs.block(SolutionBlocks::displacement_multiplier).l1_norm();
2598 * const double filter_constraint_merit =
2599 * penalty_multiplier *
2600 * test_rhs.block(SolutionBlocks::unfiltered_density_multiplier).l1_norm();
2601 * const double lower_slack_merit =
2602 * penalty_multiplier *
2603 * test_rhs.block(SolutionBlocks::density_lower_slack_multiplier).l1_norm();
2604 * const double upper_slack_merit =
2605 * penalty_multiplier *
2606 * test_rhs.block(SolutionBlocks::density_upper_slack_multiplier).l1_norm();
2608 * const double total_merit =
2609 * objective_function_merit + elasticity_constraint_merit +
2610 * filter_constraint_merit + lower_slack_merit + upper_slack_merit;
2611 * return total_merit;
2619 * <a name="step_79-Findingasearchdirection"></a>
2620 * <h4>Finding a search direction</h4>
2624 * Next up is the function that actually computes a search direction
2625 * starting at the current state (passed as the first argument) and
2626 * returns the resulting vector. To this end, the function first
2627 * calls the functions that assemble the linear system that
2628 * corresponds to the Newton system, and that solve it.
2632 * This function also updates the penalty multiplier in the merit
2633 * function, and then returns the largest scaled feasible step.
2634 * It uses the `calculate_max_step_sizes()` function to find the
2635 * largest feasible step that satisfies @f$s>0@f$ and @f$z>0@f$.
2641 * template <int dim>
2642 * BlockVector<double> SANDTopOpt<dim>::find_max_step()
2644 * assemble_system();
2645 * BlockVector<double> step = solve();
2649 * Next we are going to update penalty_multiplier. In
2650 * essence, a larger penalty multiplier makes us consider the
2651 * constraints more. Looking at the Hessian and gradient with
2652 * respect to the step we want to take with our decision
2653 * variables, and comparing that to the norm of our constraint
2654 * error gives us a way to ensure that our merit function is
2655 * "exact" - that is, it has a minimum in the same location
2656 * that the objective function does. As our merit function is
2657 * exact for any penalty multiplier over some minimum value,
2658 * we only keep the computed value if it increases the penalty
2665 * const std::vector<unsigned int> decision_variables = {
2666 * SolutionBlocks::density,
2667 * SolutionBlocks::displacement,
2668 * SolutionBlocks::unfiltered_density,
2669 * SolutionBlocks::density_upper_slack,
2670 * SolutionBlocks::density_lower_slack};
2671 * double hess_part = 0;
2672 * double grad_part = 0;
2673 * for (const unsigned int decision_variable_i : decision_variables)
2675 * for (const unsigned int decision_variable_j : decision_variables)
2677 * Vector<double> temp_vector(step.block(decision_variable_i).size());
2678 * system_matrix.block(decision_variable_i, decision_variable_j)
2679 * .vmult(temp_vector, step.block(decision_variable_j));
2680 * hess_part += step.block(decision_variable_i) * temp_vector;
2682 * grad_part -= system_rhs.block(decision_variable_i) *
2683 * step.block(decision_variable_i);
2686 * const std::vector<unsigned int> equality_constraint_multipliers = {
2687 * SolutionBlocks::displacement_multiplier,
2688 * SolutionBlocks::unfiltered_density_multiplier,
2689 * SolutionBlocks::density_lower_slack_multiplier,
2690 * SolutionBlocks::density_upper_slack_multiplier};
2691 * double constraint_norm = 0;
2692 * for (const unsigned int multiplier_i : equality_constraint_multipliers)
2693 * constraint_norm += system_rhs.block(multiplier_i).linfty_norm();
2696 * double test_penalty_multiplier;
2697 * if (hess_part > 0)
2698 * test_penalty_multiplier =
2699 * (grad_part + .5 * hess_part) / (.05 * constraint_norm);
2701 * test_penalty_multiplier = (grad_part) / (.05 * constraint_norm);
2703 * penalty_multiplier = std::max(penalty_multiplier, test_penalty_multiplier);
2707 * Based on all of this, we can now compute step sizes for the
2708 * primal and dual (Lagrange multiplier) variables. Once we
2709 * have these, we scale the components of the solution vector,
2710 * and that is what this function returns.
2713 * const std::pair<double, double> max_step_sizes =
2714 * calculate_max_step_size(nonlinear_solution, step);
2715 * const double step_size_s = max_step_sizes.first;
2716 * const double step_size_z = max_step_sizes.second;
2718 * step.block(SolutionBlocks::density) *= step_size_s;
2719 * step.block(SolutionBlocks::displacement) *= step_size_s;
2720 * step.block(SolutionBlocks::unfiltered_density) *= step_size_s;
2721 * step.block(SolutionBlocks::displacement_multiplier) *= step_size_z;
2722 * step.block(SolutionBlocks::unfiltered_density_multiplier) *= step_size_z;
2723 * step.block(SolutionBlocks::density_lower_slack) *= step_size_s;
2724 * step.block(SolutionBlocks::density_lower_slack_multiplier) *= step_size_z;
2725 * step.block(SolutionBlocks::density_upper_slack) *= step_size_s;
2726 * step.block(SolutionBlocks::density_upper_slack_multiplier) *= step_size_z;
2736 * <a name="step_79-Computingascaledstep"></a>
2737 * <h4>Computing a scaled step</h4>
2741 * The next function then implements a back-tracking algorithm for a
2742 * line search. It keeps shrinking step size until it finds a step
2743 * where the merit is decreased, and then returns the new location
2744 * based on the current state vector, and the direction to go into,
2745 * times the step length.
2748 * template <int dim>
2749 * BlockVector<double>
2750 * SANDTopOpt<dim>::compute_scaled_step(const BlockVector<double> &state,
2751 * const BlockVector<double> &max_step,
2752 * const double descent_requirement)
2754 * const double merit_derivative =
2755 * (calculate_exact_merit(state + 1e-4 * max_step) -
2756 * calculate_exact_merit(state)) /
2758 * double step_size = 1;
2759 * unsigned int max_linesearch_iterations = 10;
2760 * for (unsigned int k = 0; k < max_linesearch_iterations; ++k)
2762 * if (calculate_exact_merit(state + step_size * max_step) <
2763 * calculate_exact_merit(state) +
2764 * step_size * descent_requirement * merit_derivative)
2767 * step_size = step_size / 2;
2769 * return state + (step_size * max_step);
2776 * <a name="step_79-Checkingforconvergence"></a>
2777 * <h4>Checking for convergence</h4>
2781 * The final auxiliary function in this block is the one that checks
2782 * to see if the KKT conditions are sufficiently met so that the
2783 * overall algorithm can lower the barrier size. It does so by
2784 * computing the @f$l_1@f$ norm of the residual, which is what
2785 * `calculate_test_rhs()` computes.
2788 * template <int dim>
2789 * bool SANDTopOpt<dim>::check_convergence(const BlockVector<double> &state)
2791 * const BlockVector<double> test_rhs = calculate_test_rhs(state);
2792 * const double test_rhs_norm = test_rhs.l1_norm();
2794 * const double convergence_condition = 1e-2;
2795 * const double target_norm = convergence_condition * barrier_size;
2797 * std::cout << " Checking convergence. Current rhs norm is "
2798 * << test_rhs_norm << ", target is " << target_norm << std::endl;
2800 * return (test_rhs_norm < target_norm);
2807 * <a name="step_79-Postprocessingthesolution"></a>
2808 * <h3>Postprocessing the solution</h3>
2812 * The first of the postprocessing functions outputs information
2813 * in a VTU file for visualization. It looks long, but it's really
2814 * just the same as what was done in @ref step_22
"step-22",
for example, just
2815 * with (a lot) more solution variables:
2818 * template <
int dim>
2819 * void SANDTopOpt<dim>::output_results(const unsigned
int iteration) const
2821 *
std::vector<
std::string> solution_names(1,
"density");
2822 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
2823 * data_component_interpretation(
2825 *
for (
unsigned int i = 0; i < dim; ++i)
2827 * solution_names.emplace_back(
"displacement");
2828 * data_component_interpretation.push_back(
2831 * solution_names.emplace_back(
"unfiltered_density");
2832 * data_component_interpretation.push_back(
2834 *
for (
unsigned int i = 0; i < dim; ++i)
2836 * solution_names.emplace_back(
"displacement_multiplier");
2837 * data_component_interpretation.push_back(
2840 * solution_names.emplace_back(
"unfiltered_density_multiplier");
2841 * data_component_interpretation.push_back(
2843 * solution_names.emplace_back(
"low_slack");
2844 * data_component_interpretation.push_back(
2846 * solution_names.emplace_back(
"low_slack_multiplier");
2847 * data_component_interpretation.push_back(
2849 * solution_names.emplace_back(
"high_slack");
2850 * data_component_interpretation.push_back(
2852 * solution_names.emplace_back(
"high_slack_multiplier");
2853 * data_component_interpretation.push_back(
2858 * data_out.add_data_vector(nonlinear_solution,
2861 * data_component_interpretation);
2862 * data_out.build_patches();
2864 * std::ofstream output(
"solution" + std::to_string(iteration) +
".vtu");
2865 * data_out.write_vtu(output);
2873 * printing. [STL](https:
2874 * files are made up of triangles and normal vectors, and we will
2875 * use it to show all of those cells with a density
value larger
2876 * than zero by
first extruding the mesh from a @f$z@f$
value of zero
2877 * to @f$z=0.25@f$, and then generating two triangles
for each face of
2878 * the cells with a sufficiently large density
value. The triangle
2879 * nodes must go counter-clockwise when looking from the
outside,
2880 * and the normal vectors must be unit vectors pointing outwards,
2881 * which
requires a few checks.
2884 *
template <
int dim>
2885 *
void SANDTopOpt<dim>::write_as_stl()
2887 *
static_assert(dim == 2,
2888 *
"This function is not implemented for anything "
2889 *
"other than the 2d case.");
2891 * std::ofstream stlfile;
2892 * stlfile.open(
"bridge.stl");
2894 * stlfile <<
"solid bridge\n" << std::scientific;
2895 *
double height = .25;
2897 *
for (
const auto &cell : dof_handler.active_cell_iterators())
2899 * if (nonlinear_solution.block(
2900 * SolutionBlocks::density)[cell->active_cell_index()] > 0.5)
2904 * We have now found a cell with a density
value larger
2905 * than zero. Let us start by writing out the bottom
2906 * and top faces. Owing to the ordering issue mentioned
2907 * above, we have to make sure that we understand
2908 * whether a cell has a right- or left-handed
2909 * coordinate system. We do this by interrogating the
2910 * directions of the two edges starting at vertex 0 and
2911 * whether they form a right-handed coordinate system.
2914 * const
Tensor<1, dim> edge_directions[2] = {cell->vertex(1) -
2919 * {{edge_directions[0][0], edge_directions[0][1]},
2920 * {edge_directions[1][0], edge_directions[1][1]}});
2921 *
const bool is_right_handed_cell = (
determinant(edge_tensor) > 0);
2923 *
if (is_right_handed_cell)
2926 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
2927 * << 0.000000e+00 <<
' ' << -1.000000e+00 <<
'\n';
2928 * stlfile <<
" outer loop\n";
2929 * stlfile <<
" vertex " << cell->vertex(0)[0] <<
' '
2930 * << cell->vertex(0)[1] <<
' ' << 0.000000e+00 <<
'\n';
2931 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
2932 * << cell->vertex(2)[1] <<
' ' << 0.000000e+00 <<
'\n';
2933 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
2934 * << cell->vertex(1)[1] <<
' ' << 0.000000e+00 <<
'\n';
2935 * stlfile <<
" endloop\n";
2936 * stlfile <<
" endfacet\n";
2937 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
2938 * << 0.000000e+00 <<
' ' << -1.000000e+00 <<
'\n';
2939 * stlfile <<
" outer loop\n";
2940 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
2941 * << cell->vertex(1)[1] <<
' ' << 0.000000e+00 <<
'\n';
2942 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
2943 * << cell->vertex(2)[1] <<
' ' << 0.000000e+00 <<
'\n';
2944 * stlfile <<
" vertex " << cell->vertex(3)[0] <<
' '
2945 * << cell->vertex(3)[1] <<
' ' << 0.000000e+00 <<
'\n';
2946 * stlfile <<
" endloop\n";
2947 * stlfile <<
" endfacet\n";
2950 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
2951 * << 0.000000e+00 <<
' ' << 1.000000e+00 <<
'\n';
2952 * stlfile <<
" outer loop\n";
2953 * stlfile <<
" vertex " << cell->vertex(0)[0] <<
' '
2954 * << cell->vertex(0)[1] <<
' ' << height <<
'\n';
2955 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
2956 * << cell->vertex(1)[1] <<
' ' << height <<
'\n';
2957 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
2958 * << cell->vertex(2)[1] <<
' ' << height <<
'\n';
2959 * stlfile <<
" endloop\n";
2960 * stlfile <<
" endfacet\n";
2961 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
2962 * << 0.000000e+00 <<
' ' << 1.000000e+00 <<
'\n';
2963 * stlfile <<
" outer loop\n";
2964 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
2965 * << cell->vertex(1)[1] <<
' ' << height <<
'\n';
2966 * stlfile <<
" vertex " << cell->vertex(3)[0] <<
' '
2967 * << cell->vertex(3)[1] <<
' ' << height <<
'\n';
2968 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
2969 * << cell->vertex(2)[1] <<
' ' << height <<
'\n';
2970 * stlfile <<
" endloop\n";
2971 * stlfile <<
" endfacet\n";
2976 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
2977 * << 0.000000e+00 <<
' ' << -1.000000e+00 <<
'\n';
2978 * stlfile <<
" outer loop\n";
2979 * stlfile <<
" vertex " << cell->vertex(0)[0] <<
' '
2980 * << cell->vertex(0)[1] <<
' ' << 0.000000e+00 <<
'\n';
2981 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
2982 * << cell->vertex(1)[1] <<
' ' << 0.000000e+00 <<
'\n';
2983 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
2984 * << cell->vertex(2)[1] <<
' ' << 0.000000e+00 <<
'\n';
2985 * stlfile <<
" endloop\n";
2986 * stlfile <<
" endfacet\n";
2987 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
2988 * << 0.000000e+00 <<
' ' << -1.000000e+00 <<
'\n';
2989 * stlfile <<
" outer loop\n";
2990 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
2991 * << cell->vertex(1)[1] <<
' ' << 0.000000e+00 <<
'\n';
2992 * stlfile <<
" vertex " << cell->vertex(3)[0] <<
' '
2993 * << cell->vertex(3)[1] <<
' ' << 0.000000e+00 <<
'\n';
2994 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
2995 * << cell->vertex(2)[1] <<
' ' << 0.000000e+00 <<
'\n';
2996 * stlfile <<
" endloop\n";
2997 * stlfile <<
" endfacet\n";
3000 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
3001 * << 0.000000e+00 <<
' ' << 1.000000e+00 <<
'\n';
3002 * stlfile <<
" outer loop\n";
3003 * stlfile <<
" vertex " << cell->vertex(0)[0] <<
' '
3004 * << cell->vertex(0)[1] <<
' ' << height <<
'\n';
3005 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
3006 * << cell->vertex(2)[1] <<
' ' << height <<
'\n';
3007 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
3008 * << cell->vertex(1)[1] <<
' ' << height <<
'\n';
3009 * stlfile <<
" endloop\n";
3010 * stlfile <<
" endfacet\n";
3011 * stlfile <<
" facet normal " << 0.000000e+00 <<
' '
3012 * << 0.000000e+00 <<
' ' << 1.000000e+00 <<
'\n';
3013 * stlfile <<
" outer loop\n";
3014 * stlfile <<
" vertex " << cell->vertex(1)[0] <<
' '
3015 * << cell->vertex(1)[1] <<
' ' << height <<
'\n';
3016 * stlfile <<
" vertex " << cell->vertex(2)[0] <<
' '
3017 * << cell->vertex(2)[1] <<
' ' << height <<
'\n';
3018 * stlfile <<
" vertex " << cell->vertex(3)[0] <<
' '
3019 * << cell->vertex(3)[1] <<
' ' << height <<
'\n';
3020 * stlfile <<
" endloop\n";
3021 * stlfile <<
" endfacet\n";
3026 * Next we need to deal with the four faces of the
3027 * cell, extended into the @f$z@f$ direction. However, we
3028 * only need to write these faces
if either the face
3029 * is on the domain boundary, or
if it is the
3030 *
interface between a cell with density greater than
3031 * 0.5, and a cell with a density less than 0.5.
3034 *
for (
unsigned int face_number = 0;
3035 * face_number < GeometryInfo<dim>::faces_per_cell;
3039 * cell->face(face_number);
3041 *
if ((face->at_boundary()) ||
3042 * (!face->at_boundary() &&
3043 * (nonlinear_solution.block(
3044 * 0)[cell->neighbor(face_number)->active_cell_index()] <
3048 * (face->center() - cell->center());
3049 *
const double normal_norm = normal_vector.norm();
3050 *
if ((face->vertex(0)[0] - face->vertex(0)[0]) *
3051 * (face->vertex(1)[1] - face->vertex(0)[1]) *
3053 * (face->vertex(0)[1] - face->vertex(0)[1]) * (0 - 0) *
3054 * normal_vector[0] +
3056 * (face->vertex(1)[0] - face->vertex(0)[0]) *
3057 * normal_vector[1] -
3058 * (face->vertex(0)[0] - face->vertex(0)[0]) * (0 - 0) *
3059 * normal_vector[1] -
3060 * (face->vertex(0)[1] - face->vertex(0)[1]) *
3061 * (face->vertex(1)[0] - face->vertex(0)[0]) *
3062 * normal_vector[0] -
3064 * (face->vertex(1)[1] - face->vertex(0)[1]) * 0 >
3067 * stlfile <<
" facet normal "
3068 * << normal_vector[0] / normal_norm <<
' '
3069 * << normal_vector[1] / normal_norm <<
' '
3070 * << 0.000000e+00 <<
'\n';
3071 * stlfile <<
" outer loop\n";
3072 * stlfile <<
" vertex " << face->vertex(0)[0]
3073 * <<
' ' << face->vertex(0)[1] <<
' '
3074 * << 0.000000e+00 <<
'\n';
3075 * stlfile <<
" vertex " << face->vertex(0)[0]
3076 * <<
' ' << face->vertex(0)[1] <<
' ' << height
3078 * stlfile <<
" vertex " << face->vertex(1)[0]
3079 * <<
' ' << face->vertex(1)[1] <<
' '
3080 * << 0.000000e+00 <<
'\n';
3081 * stlfile <<
" endloop\n";
3082 * stlfile <<
" endfacet\n";
3083 * stlfile <<
" facet normal "
3084 * << normal_vector[0] / normal_norm <<
' '
3085 * << normal_vector[1] / normal_norm <<
' '
3086 * << 0.000000e+00 <<
'\n';
3087 * stlfile <<
" outer loop\n";
3088 * stlfile <<
" vertex " << face->vertex(0)[0]
3089 * <<
' ' << face->vertex(0)[1] <<
' ' << height
3091 * stlfile <<
" vertex " << face->vertex(1)[0]
3092 * <<
' ' << face->vertex(1)[1] <<
' ' << height
3094 * stlfile <<
" vertex " << face->vertex(1)[0]
3095 * <<
' ' << face->vertex(1)[1] <<
' '
3096 * << 0.000000e+00 <<
'\n';
3097 * stlfile <<
" endloop\n";
3098 * stlfile <<
" endfacet\n";
3102 * stlfile <<
" facet normal "
3103 * << normal_vector[0] / normal_norm <<
' '
3104 * << normal_vector[1] / normal_norm <<
' '
3105 * << 0.000000e+00 <<
'\n';
3106 * stlfile <<
" outer loop\n";
3107 * stlfile <<
" vertex " << face->vertex(0)[0]
3108 * <<
' ' << face->vertex(0)[1] <<
' '
3109 * << 0.000000e+00 <<
'\n';
3110 * stlfile <<
" vertex " << face->vertex(1)[0]
3111 * <<
' ' << face->vertex(1)[1] <<
' '
3112 * << 0.000000e+00 <<
'\n';
3113 * stlfile <<
" vertex " << face->vertex(0)[0]
3114 * <<
' ' << face->vertex(0)[1] <<
' ' << height
3116 * stlfile <<
" endloop\n";
3117 * stlfile <<
" endfacet\n";
3118 * stlfile <<
" facet normal "
3119 * << normal_vector[0] / normal_norm <<
' '
3120 * << normal_vector[1] / normal_norm <<
' '
3121 * << 0.000000e+00 <<
'\n';
3122 * stlfile <<
" outer loop\n";
3123 * stlfile <<
" vertex " << face->vertex(0)[0]
3124 * <<
' ' << face->vertex(0)[1] <<
' ' << height
3126 * stlfile <<
" vertex " << face->vertex(1)[0]
3127 * <<
' ' << face->vertex(1)[1] <<
' '
3128 * << 0.000000e+00 <<
'\n';
3129 * stlfile <<
" vertex " << face->vertex(1)[0]
3130 * <<
' ' << face->vertex(1)[1] <<
' ' << height
3132 * stlfile <<
" endloop\n";
3133 * stlfile <<
" endfacet\n";
3139 * stlfile <<
"endsolid bridge";
3147 * <a name=
"step_79-Therunfunctiondrivingtheoverallalgorithm"></a>
3148 * <h3>The
run() function driving the overall algorithm</h3>
3152 * This function finally provides the overall driver logic. It is,
3153 * in the grand scheme of things, a rather complicated function
3154 * primarily because the optimization algorithm is difficult: It
3155 * isn't just about finding a Newton direction like in @ref step_15 "step-15" and
3156 * then going a fixed distance in this direction any more, but
3157 * instead about (i) determining what the optimal log-barrier
3158 * penalty parameter should be in the current step, (ii) a
3159 * complicated algorithm to determine how far we want to go, and
3160 * other ingredients. Let us see how we can break this down into
3161 * smaller chunks in the following documentation.
3165 * The function starts out simple enough with
first setting up the
3166 * mesh, the
DoFHandler, and then the various linear algebra objects
3167 * necessary for the following:
3170 * template <
int dim>
3171 *
void SANDTopOpt<dim>::run()
3173 * std::cout <<
"filter r is: " << filter_r << std::endl;
3180 * dof_handler.distribute_dofs(fe);
3183 * setup_boundary_values();
3184 * setup_block_system();
3185 * setup_filter_matrix();
3190 * We then
set a number of parameters that affect the
3191 *
log-barrier and line search components of the optimization
3195 * barrier_size = 25;
3196 *
const double min_barrier_size = .0005;
3198 *
const unsigned int max_uphill_steps = 8;
3199 *
const double descent_requirement = .0001;
3204 * Now start the principal iteration. The overall algorithm
3205 * works by
using an outer
loop in which we
loop until either
3206 * (i) the log-barrier parameter has become small enough, or (ii)
3207 * we have reached convergence. In any
case, we terminate
if
3208 *
end up with too large a number of iterations. This overall
3209 * structure is encoded as a `
do { ... }
while (...)`
loop
3210 * where the convergence condition is at the bottom.
3213 *
unsigned int iteration_number = 0;
3214 *
const unsigned int max_iterations = 10000;
3218 * std::cout <<
"Starting outer step in iteration " << iteration_number
3219 * <<
" with barrier parameter " << barrier_size << std::endl;
3223 * Within
this outer
loop, we have an inner
loop in which we
3224 *
try to find an update direction
using the watchdog
3225 * algorithm described in the introduction.
3229 * The
general idea of the watchdog algorithm itself is
3230 *
this: For a maximum of `max_uphill_steps` (i.e., a
loop
3231 * within the
"inner loop" mentioned above) attempts, we use
3232 * `find_max_step()` to compute a Newton update step, and
3233 * add these up in the `nonlinear_solution` vector. In each of
3234 * these attempts (starting from the place reached at the
3235 * end of the previous attempt), we
check whether we have
3236 * reached a target
value of the merit function described
3237 * above. The target
value is computed based on where
this
3238 * algorithm starts (the `nonlinear_solution` at the beginning of
3239 * the watchdog loop, saves as `watchdog_state`) and the
3240 *
first proposed direction provided by `find_max_step()` in
3241 * the
first go-around of
this loop (the `k==0`
case).
3246 * std::cout <<
" Starting inner step in iteration "
3247 * << iteration_number
3248 * <<
" with merit function penalty multiplier "
3249 * << penalty_multiplier << std::endl;
3251 *
bool watchdog_step_found =
false;
3258 *
for (
unsigned int k = 0; k < max_uphill_steps; ++k)
3260 * ++iteration_number;
3265 * first_step = update_step;
3266 * merit_derivative =
3267 * ((calculate_exact_merit(watchdog_state +
3268 * .0001 * first_step) -
3269 * calculate_exact_merit(watchdog_state)) /
3271 * target_merit = calculate_exact_merit(watchdog_state) +
3272 * descent_requirement * merit_derivative;
3275 * nonlinear_solution += update_step;
3276 *
const double current_merit =
3277 * calculate_exact_merit(nonlinear_solution);
3279 * std::cout <<
" current watchdog state merit is: "
3280 * << current_merit <<
"; target merit is "
3281 * << target_merit << std::endl;
3283 *
if (current_merit < target_merit)
3285 * watchdog_step_found =
true;
3286 * std::cout <<
" found workable step after " << k + 1
3287 * <<
" iterations" << std::endl;
3295 * The next part of the algorithm then depends on
3296 * whether the watchdog
loop above succeeded. If it
3297 * did, then we are satisfied and no further action is
3298 * necessary: We just stay where we are. If, however,
3299 * we took the maximal number of unsuccessful steps in
3300 * the
loop above, then we need to
do something
else,
3301 * and
this is what the following code block does.
3305 * Specifically, from the
final (unsuccessful) state
3306 * of the loop above, we seek one more update
3307 * direction and take what is called a
"stretch
3308 * step". If that stretch state satisfies a condition
3309 * involving the merit function, then we go there. On
3310 * the other hand,
if the stretch state is also
3311 * unacceptable (as all of the watchdog steps above
3312 * were), then we discard all of the watchdog steps
3313 * taken above and start over again where we had
3314 * started the watchdog iterations -- that place was
3315 * stored in the `watchdog_state` variable above. More
3316 * specifically, the conditions below
first test
3317 * whether we take a step from `watchdog_state` in
3318 * direction `first_step`, or whether we can
do one
3319 * more update from the stretch state to find a
new
3320 * place. It is possible that neither of these is
3321 * actually better than the state we started from at
3322 * the beginning of the watchdog algorithm, but even
3323 *
if that is so, that place clearly was a difficult
3324 * place to be in, and getting away to start the next
3325 * iteration from another place might be a useful
3326 * strategy to eventually converge.
3330 * We keep repeating the watchdog steps above along
3331 * with the logic below until
this inner iteration is
3332 *
finally converged (or
if we run up against the
3333 * maximal number of iterations -- where we count the
3334 * number of linear solves as iterations and increment
3335 * the counter every time we call `find_max_step()`
3336 * since that is where the linear solve actually
3337 * happens). In any
case, at the
end of each of these
3338 * inner iterations we also output the solution in a
3339 * form suitable
for visualization.
3345 *
if (watchdog_step_found ==
false)
3347 * ++iteration_number;
3350 * compute_scaled_step(nonlinear_solution,
3352 * descent_requirement);
3356 * If we did not get a successful watchdog step,
3357 * we now need to decide between going back to
3358 * where we started, or
using the
final state. We
3359 * compare the merits of both of these locations,
3360 * and then take a scaled step from whichever
3361 * location is better. As the scaled step is
3362 * guaranteed to lower the merit, we will
end up
3363 * keeping one of the two.
3366 *
if ((calculate_exact_merit(nonlinear_solution) <
3367 * calculate_exact_merit(watchdog_state)) ||
3368 * (calculate_exact_merit(stretch_state) < target_merit))
3370 * std::cout <<
" Taking scaled step from end of watchdog"
3372 * nonlinear_solution = stretch_state;
3377 * <<
" Taking scaled step from beginning of watchdog"
3379 *
if (calculate_exact_merit(stretch_state) >
3380 * calculate_exact_merit(watchdog_state))
3382 * nonlinear_solution =
3383 * compute_scaled_step(watchdog_state,
3385 * descent_requirement);
3389 * ++iteration_number;
3390 * nonlinear_solution = stretch_state;
3393 * nonlinear_solution =
3394 * compute_scaled_step(nonlinear_solution,
3396 * descent_requirement);
3401 * output_results(iteration_number);
3403 *
while ((iteration_number < max_iterations) &&
3404 * (check_convergence(nonlinear_solution) ==
false));
3409 * At the
end of the outer
loop, we have to update the
3410 * barrier parameter,
for which we use the following
3411 * formula. The rest of the function is then simply about
3412 * checking the outer
loop convergence condition, and
if
3413 * we decide to terminate computations, about writing the
3414 *
final "design" as an STL file
for use in 3
d printing,
3415 * and to output some timing information.
3418 *
const double barrier_size_multiplier = .8;
3419 *
const double barrier_size_exponent = 1.2;
3423 *
std::pow(barrier_size, barrier_size_exponent)),
3424 * min_barrier_size);
3426 * std::cout << std::endl;
3428 *
while (((barrier_size > min_barrier_size) ||
3429 * (check_convergence(nonlinear_solution) ==
false)) &&
3430 * (iteration_number < max_iterations));
3439 * <a name=
"step_79-Themainfunction"></a>
3440 * <h3>The main function</h3>
3444 * The remainder of the code, the `main()` function, is as usual:
3451 * SAND::SANDTopOpt<2> elastic_problem_2d;
3452 * elastic_problem_2d.run();
3454 *
catch (std::exception &exc)
3456 * std::cerr << std::endl
3458 * <<
"----------------------------------------------------"
3460 * std::cerr <<
"Exception on processing: " << std::endl
3461 * << exc.what() << std::endl
3462 * <<
"Aborting!" << std::endl
3463 * <<
"----------------------------------------------------"
3470 * std::cerr << std::endl
3472 * <<
"----------------------------------------------------"
3474 * std::cerr <<
"Unknown exception!" << std::endl
3475 * <<
"Aborting!" << std::endl
3476 * <<
"----------------------------------------------------"
3483<a name=
"step_79-Results"></a><h1>Results</h1>
3485<a name=
"step_79-TestProblem"></a><h3>Test Problem</h3>
3487The algorithms used above are tested against a traditional topology optimization
3488 problem called the Messerschmitt-Bolkow-Blohm Beam (MBB Beam).
3490This problem considers the optimal 2-
d structure that can be built on a
3491rectangle 6 units wide, and 1 unit tall. The bottom corners are fixed in place
3492in the @f$y@f$ direction
using a zero Dirichlet boundary condition, and a downward
3493force is applied in the
center of the top of the beam by enforcing a Neumann
3494boundary condition. The rest of the boundary is allowed to move, and has no
3495external force applied, which takes the form of a zero Neumann boundary
3496condition. In essence, we are asking the following question: How should we
3497design a bridge in a way so that
if the bottom left and bottom right
point of
3498the bridge are on rollers that allow these points to move horizontally but not
3499vertically, and so that the displacement in response to the vertical force in
3502While the total
volume of the domain is 6 units, 3 units of material are allowed
for
3503the structure. Because of the symmetry of the problem, it could be posed on a
3504rectangle of width 3 and height 1 by cutting the original domain in half, and
3505using zero Dirichlet boundary conditions in the @f$x@f$ direction along the cut
3506edge. That said, symmetry of the solution is a good indicator that the program
3507is working as expected, so we solved the problem on the whole domain,
3508as shown below. @cite Bendse2004
3510<div style=
"text-align:center;">
3511 <img src=
"https://www.dealii.org/images/steps/developer/step-79.mbbgeometry.png"
3512 alt=
"The MBB problem domain and boundary conditions">
3516Using the program discussed above, we find the minimum
volume of the MBB Beam and the
3517individual components of the solution look as follows:
3519<div
class=
"onecolumn" style=
"width: 80%; text-align: center;">
3521 <img src=
"https://www.dealii.org/images/steps/developer/step-79.filtereddensity.png"
3522 alt=
"Filtered Density Solution">
3525 <img src=
"https://www.dealii.org/images/steps/developer/step-79.unfiltereddensity.png"
3526 alt=
"Unfiltered Density Solution">
3531These pictures show that what we find here is in accordance with what one
3532typically sees in other publications on the topic @cite Bendse2004.
3533Maybe more interestingly, the
3534result looks like a truss bridge (except that we apply the load at the top of
3535the trusses, rather than the bottom as in real truss bridges, akin to a
"deck
3536truss" bridge), suggesting that the designs that have been used in bridge-building
3537for centuries are indeed based on ideas we can now show to be optimal
3539<a name=
"step_79-Possibilitiesforextensions"></a><h4>Possibilities
for extensions</h4>
3542The results shown above took around 75 iterations to find, which is quite
3543concerning given the expense in solving the large linear systems in each
3544iteration. Looking at the evolution, it does look as though the convergence has
3545moments of happening quickly and moments of happening slowly. We believe
this to
3546be due to both a lack of precision on when and how to decrease the boundary
3547values, as well as our choice of merit function being sub-optimal. In the future,
3548a LOQO barrier update replacing the monotone
reduction, as well as a Markov
3549Filter in place of a merit function will decrease the number of necessary
3550iterations significantly.
3552The barrier decrease is most sensitive in the middle of the convergence, which
3553is problematic, as it seems like we need it to decrease quickly, then slowly,
3556Secondly, the linear solver used here is just the sparse direct solver based on
3558but the formulation of the optimization problem detailed above has quite a large
3559number of variables and so the linear problem is not only large but also has a
3560lot of
nonzero entries in many rows, even on meshes that overall are still
3561relatively coarse. As a consequence, the solver time dominates the
3562computations, and more sophisticated approaches at solving the linear system
3566<a name=
"step_79-PlainProg"></a>
3567<h1> The plain program</h1>
3568@include
"step-79.cc"
std::vector< bool > component_mask
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
__global__ void reduction(Number *result, const Number *v, const size_type N)
__global__ void set(Number *val, const Number s, const size_type N)
typename ActiveSelector::face_iterator face_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ component_is_part_of_vector
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void create_triangulation(Triangulation< dim, dim > &tria, const AdditionalData &additional_data=AdditionalData())
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(& functions)(const void *v1, const void *v2)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)