deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-72.h
Go to the documentation of this file.
1) const
488 *   {
489 *   return std::sin(2 * numbers::PI * (p[0] + p[1]));
490 *   }
491 *  
492 *  
493 * @endcode
494 *
495 *
496 * <a name="step_72-ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
497 * <h3>The <code>MinimalSurfaceProblem</code> class implementation</h3>
498 *
499
500 *
501 *
502 * <a name="step_72-MinimalSurfaceProblemMinimalSurfaceProblem"></a>
503 * <h4>MinimalSurfaceProblem::MinimalSurfaceProblem</h4>
504 *
505
506 *
507 * There have been no changes made to the class constructor.
508 *
509 * @code
510 *   template <int dim>
511 *   MinimalSurfaceProblem<dim>::MinimalSurfaceProblem()
512 *   : dof_handler(triangulation)
513 *   , fe(2)
514 *   , quadrature_formula(fe.degree + 1)
515 *   {}
516 *  
517 *  
518 * @endcode
519 *
520 *
521 * <a name="step_72-MinimalSurfaceProblemsetup_system"></a>
522 * <h4>MinimalSurfaceProblem::setup_system</h4>
523 *
524
525 *
526 * There have been no changes made to the function that sets up the class
527 * data structures, namely the DoFHandler, the hanging node constraints
528 * applied to the problem, and the linear system.
529 *
530 * @code
531 *   template <int dim>
532 *   void MinimalSurfaceProblem<dim>::setup_system()
533 *   {
534 *   dof_handler.distribute_dofs(fe);
535 *   current_solution.reinit(dof_handler.n_dofs());
536 *   newton_update.reinit(dof_handler.n_dofs());
537 *   system_rhs.reinit(dof_handler.n_dofs());
538 *  
539 *   zero_constraints.clear();
541 *   0,
543 *   zero_constraints);
544 *   DoFTools::make_hanging_node_constraints(dof_handler, zero_constraints);
545 *   zero_constraints.close();
546 *  
547 *   nonzero_constraints.clear();
549 *   0,
550 *   BoundaryValues<dim>(),
551 *   nonzero_constraints);
552 *   nonzero_constraints.close();
553 *  
554 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
555 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, zero_constraints);
556 *  
557 *   sparsity_pattern.copy_from(dsp);
558 *   system_matrix.reinit(sparsity_pattern);
559 *   }
560 *  
561 * @endcode
562 *
563 *
564 * <a name="step_72-Assemblingthelinearsystem"></a>
565 * <h4>Assembling the linear system</h4>
566 *
567
568 *
569 *
570 * <a name="step_72-Manualassembly"></a>
571 * <h5>Manual assembly</h5>
572 *
573
574 *
575 * The assembly functions are the interesting contributions to this tutorial.
576 * The assemble_system_unassisted() method implements exactly the same
577 * assembly function as is detailed in @ref step_15 "step-15", but in this instance we
578 * use the MeshWorker::mesh_loop() function to multithread the assembly
579 * process. The reason for doing this is quite simple: When using
580 * automatic differentiation, we know that there is to be some additional
581 * computational overhead incurred. In order to mitigate this performance
582 * loss, we'd like to take advantage of as many (easily available)
583 * computational resources as possible. The MeshWorker::mesh_loop() concept
584 * makes this a relatively straightforward task. At the same time, for the
585 * purposes of fair comparison, we need to do the same to the implementation
586 * that uses no assistance when computing the residual or its linearization.
587 * (The MeshWorker::mesh_loop() function is first discussed in @ref step_12 "step-12" and
588 * @ref step_16 "step-16", if you'd like to read up on it.)
589 *
590
591 *
592 * The steps required to implement the multithreading are the same between the
593 * three functions, so we'll use the assemble_system_unassisted() function
594 * as an opportunity to focus on the multithreading itself.
595 *
596 * @code
597 *   template <int dim>
598 *   void MinimalSurfaceProblem<dim>::assemble_system_unassisted()
599 *   {
600 *   system_matrix = 0;
601 *   system_rhs = 0;
602 *  
603 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
604 *  
605 * @endcode
606 *
607 * The MeshWorker::mesh_loop() expects that we provide two exemplar data
608 * structures. The first, `ScratchData`, is to store all large data that
609 * is to be reused between threads. The `CopyData` will hold the
610 * contributions to the linear system that come from each cell. These
611 * independent matrix-vector pairs must be accumulated into the
612 * global linear system sequentially. Since we don't need anything
613 * on top of what the MeshWorker::ScratchData and MeshWorker::CopyData
614 * classes already provide, we use these exact class definitions for
615 * our problem. Note that we only require a single instance of a local
616 * matrix, local right-hand side vector, and cell degree of freedom index
617 * vector -- the MeshWorker::CopyData therefore has `1` for all three
618 * of its template arguments.
619 *
620 * @code
621 *   using ScratchData = MeshWorker::ScratchData<dim>;
622 *   using CopyData = MeshWorker::CopyData<1, 1, 1>;
623 *  
624 * @endcode
625 *
626 * We also need to know what type of iterator we'll be working with
627 * during assembly. For simplicity, we just ask the compiler to work
628 * this out for us using the decltype() specifier, knowing that we'll
629 * be iterating over active cells owned by the @p dof_handler.
630 *
631 * @code
632 *   using CellIteratorType = decltype(dof_handler.begin_active());
633 *  
634 * @endcode
635 *
636 * Here we initialize the exemplar data structures. Since we know that
637 * we need to compute the shape function gradients, weighted Jacobian,
638 * and the position of the quadrate points in real space, we pass these
639 * flags into the class constructor.
640 *
641 * @code
642 *   const ScratchData sample_scratch_data(fe,
643 *   quadrature_formula,
644 *   update_gradients |
646 *   update_JxW_values);
647 *   const CopyData sample_copy_data(dofs_per_cell);
648 *  
649 * @endcode
650 *
651 * Now we define a lambda function that will perform the assembly on
652 * a single cell. The three arguments are those that will be expected by
653 * MeshWorker::mesh_loop(), due to the arguments that we'll pass to that
654 * final call. We also capture the @p this pointer, which means that we'll
655 * have access to "this" (i.e., the current `MinimalSurfaceProblem<dim>`)
656 * class instance, and its private member data (since the lambda function is
657 * defined within a MinimalSurfaceProblem<dim> method).
658 *
659
660 *
661 * At the top of the function, we initialize the data structures
662 * that are dependent on the cell for which the work is being
663 * performed. Observe that the reinitialization call actually
664 * returns an instance to an FEValues object that is initialized
665 * and stored within (and, therefore, reused by) the
666 * `scratch_data` object.
667 *
668
669 *
670 * Similarly, we get aliases to the local matrix, local RHS
671 * vector, and local cell DoF indices from the `copy_data`
672 * instance that MeshWorker::mesh_loop() provides. We then
673 * initialize the cell DoF indices, knowing that the local matrix
674 * and vector are already correctly sized.
675 *
676 * @code
677 *   const auto cell_worker = [this](const CellIteratorType &cell,
678 *   ScratchData &scratch_data,
679 *   CopyData &copy_data) {
680 *   const auto &fe_values = scratch_data.reinit(cell);
681 *  
682 *   FullMatrix<double> &cell_matrix = copy_data.matrices[0];
683 *   Vector<double> &cell_rhs = copy_data.vectors[0];
684 *   std::vector<types::global_dof_index> &local_dof_indices =
685 *   copy_data.local_dof_indices[0];
686 *   cell->get_dof_indices(local_dof_indices);
687 *  
688 * @endcode
689 *
690 * For Newton's method, we require the gradient of the solution at the
691 * point about which the problem is being linearized.
692 *
693
694 *
695 * Once we have that, we can perform assembly for this cell in
696 * the usual way. One minor difference to @ref step_15 "step-15" is that we've
697 * used the (rather convenient) range-based loops to iterate
698 * over all quadrature points and degrees-of-freedom.
699 *
700 * @code
701 *   std::vector<Tensor<1, dim>> old_solution_gradients(
702 *   fe_values.n_quadrature_points);
703 *   fe_values.get_function_gradients(current_solution,
704 *   old_solution_gradients);
705 *  
706 *   for (const unsigned int q : fe_values.quadrature_point_indices())
707 *   {
708 *   const double coeff =
709 *   1.0 / std::sqrt(1.0 + old_solution_gradients[q] *
710 *   old_solution_gradients[q]);
711 *  
712 *   for (const unsigned int i : fe_values.dof_indices())
713 *   {
714 *   for (const unsigned int j : fe_values.dof_indices())
715 *   cell_matrix(i, j) +=
716 *   (((fe_values.shape_grad(i, q) // ((\nabla \phi_i
717 *   * coeff // * a_n
718 *   * fe_values.shape_grad(j, q)) // * \nabla \phi_j)
719 *   - // -
720 *   (fe_values.shape_grad(i, q) // (\nabla \phi_i
721 *   * coeff * coeff * coeff // * a_n^3
722 *   * (fe_values.shape_grad(j, q) // * (\nabla \phi_j
723 *   * old_solution_gradients[q]) // * \nabla u_n)
724 *   * old_solution_gradients[q])) // * \nabla u_n)))
725 *   * fe_values.JxW(q)); // * dx
726 *  
727 *   cell_rhs(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
728 *   * coeff // * a_n
729 *   * old_solution_gradients[q] // * \nabla u_n
730 *   * fe_values.JxW(q)); // * dx
731 *   }
732 *   }
733 *   };
734 *  
735 * @endcode
736 *
737 * The second lambda function that MeshWorker::mesh_loop() requires is
738 * one that performs the task of accumulating the local contributions
739 * in the global linear system. That is precisely what this one does,
740 * and the details of the implementation have been seen before. The
741 * primary point to recognize is that the local contributions are stored
742 * in the `copy_data` instance that is passed into this function. This
743 * `copy_data` has been filled with data during @a some call to the
744 * `cell_worker`.
745 *
746 * @code
747 *   const auto copier = [this](const CopyData &copy_data) {
748 *   const FullMatrix<double> &cell_matrix = copy_data.matrices[0];
749 *   const Vector<double> &cell_rhs = copy_data.vectors[0];
750 *   const std::vector<types::global_dof_index> &local_dof_indices =
751 *   copy_data.local_dof_indices[0];
752 *  
753 *   zero_constraints.distribute_local_to_global(
754 *   cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
755 *   };
756 *  
757 * @endcode
758 *
759 * We have all of the required functions definitions in place, so
760 * now we call the MeshWorker::mesh_loop() to perform the actual
761 * assembly. We pass a flag as the last parameter which states
762 * that we only want to perform the assembly on the
763 * cells. Internally, MeshWorker::mesh_loop() then distributes the
764 * available work to different threads, making efficient use of
765 * the multiple cores almost all of today's processors have to
766 * offer.
767 *
768 * @code
769 *   MeshWorker::mesh_loop(dof_handler.active_cell_iterators(),
770 *   cell_worker,
771 *   copier,
772 *   sample_scratch_data,
773 *   sample_copy_data,
774 *   MeshWorker::assemble_own_cells);
775 *   }
776 *  
777 * @endcode
778 *
779 *
780 * <a name="step_72-Assemblyviadifferentiationoftheresidualvector"></a>
781 * <h5>Assembly via differentiation of the residual vector</h5>
782 *
783
784 *
785 * As outlined in the introduction, what we need to do for this
786 * second approach is implement the local contributions @f$F(U)^K@f$
787 * from cell @f$K@f$ to the residual vector, and then let the
788 * AD machinery deal with how to compute the
789 * derivatives @f$J(U)_{ij}^K=\frac{\partial F(U)^K_i}{\partial U_j}@f$
790 * from it.
791 *
792
793 *
794 * For the following, recall that
795 * @f[
796 * F(U)_i^K \dealcoloneq
797 * \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
798 * u|^{2}}} \nabla u \right] \, dV ,
799 * @f]
800 * where @f$u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)@f$.
801 *
802
803 *
804 * Let us see how this is implemented in practice:
805 *
806 * @code
807 *   template <int dim>
808 *   void MinimalSurfaceProblem<dim>::assemble_system_with_residual_linearization()
809 *   {
810 *   system_matrix = 0;
811 *   system_rhs = 0;
812 *  
813 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
814 *  
815 *   using ScratchData = MeshWorker::ScratchData<dim>;
816 *   using CopyData = MeshWorker::CopyData<1, 1, 1>;
817 *   using CellIteratorType = decltype(dof_handler.begin_active());
818 *  
819 *   const ScratchData sample_scratch_data(fe,
820 *   quadrature_formula,
821 *   update_gradients |
823 *   update_JxW_values);
824 *   const CopyData sample_copy_data(dofs_per_cell);
825 *  
826 * @endcode
827 *
828 * We'll define up front the AD data structures that we'll be using,
829 * utilizing the techniques shown in @ref step_71 "step-71".
830 * In this case, we choose the helper class that will automatically compute
831 * the linearization of the finite element residual using Sacado forward
832 * automatic differentiation types. These number types can be used to
833 * compute first derivatives only. This is exactly what we want, because we
834 * know that we'll only be linearizing the residual, which means that we
835 * only need to compute first-order derivatives. The return values from the
836 * calculations are to be of type `double`.
837 *
838
839 *
840 * We also need an extractor to retrieve some data related to the field
841 * solution to the problem.
842 *
843 * @code
844 *   using ADHelper = Differentiation::AD::ResidualLinearization<
845 *   Differentiation::AD::NumberTypes::sacado_dfad,
846 *   double>;
847 *   using ADNumberType = typename ADHelper::ad_type;
848 *  
849 *   const FEValuesExtractors::Scalar u_fe(0);
850 *  
851 * @endcode
852 *
853 * With this, let us define the lambda function that will be used
854 * to compute the cell contributions to the Jacobian matrix and
855 * the right hand side:
856 *
857 * @code
858 *   const auto cell_worker = [&u_fe, this](const CellIteratorType &cell,
859 *   ScratchData &scratch_data,
860 *   CopyData &copy_data) {
861 *   const auto &fe_values = scratch_data.reinit(cell);
862 *   const unsigned int dofs_per_cell = fe_values.get_fe().n_dofs_per_cell();
863 *  
864 *   FullMatrix<double> &cell_matrix = copy_data.matrices[0];
865 *   Vector<double> &cell_rhs = copy_data.vectors[0];
866 *   std::vector<types::global_dof_index> &local_dof_indices =
867 *   copy_data.local_dof_indices[0];
868 *   cell->get_dof_indices(local_dof_indices);
869 *  
870 * @endcode
871 *
872 * We'll now create and initialize an instance of the AD helper class.
873 * To do this, we need to specify how many independent variables and
874 * dependent variables there are. The independent variables will be the
875 * number of local degrees of freedom that our solution vector has,
876 * i.e., the number @f$j@f$ in the per-element representation of the
877 * discretized solution vector
878 * @f$u (\mathbf{x})|_K = \sum\limits_{j} U^K_i \varphi_j(\mathbf{x})@f$
879 * that indicates how many solution coefficients are associated with
880 * each finite element. In deal.II, this equals
881 * FiniteElement::dofs_per_cell. The number of dependent variables will be
882 * the number of entries in the local residual vector that we will be
883 * forming. In this particular problem (like many others that employ the
884 * [standard Galerkin
885 * method](https://en.wikipedia.org/wiki/Galerkin_method)) the number of
886 * local solution coefficients matches the number of local residual
887 * equations.
888 *
889 * @code
890 *   const unsigned int n_independent_variables = local_dof_indices.size();
891 *   const unsigned int n_dependent_variables = dofs_per_cell;
892 *   ADHelper ad_helper(n_independent_variables, n_dependent_variables);
893 *  
894 * @endcode
895 *
896 * Next we inform the helper of the values of the solution, i.e., the
897 * actual values for @f$U_j@f$ about which we
898 * wish to linearize. As this is done on each element individually, we
899 * have to extract the solution coefficients from the global solution
900 * vector. In other words, we define all of those coefficients @f$U_j@f$
901 * where @f$j@f$ is a local degree of freedom as the independent variables
902 * that enter the computation of the vector @f$F(U)^{K}@f$ (the dependent
903 * function).
904 *
905
906 *
907 * Then we get the complete set of degree of freedom values as
908 * represented by auto-differentiable numbers. The operations
909 * performed with these variables are tracked by the AD library
910 * from this point until the object goes out of scope. So it is
911 * <em>precisely these variables</em> with respect to which we will
912 * compute derivatives of the residual entries.
913 *
914 * @code
915 *   ad_helper.register_dof_values(current_solution, local_dof_indices);
916 *  
917 *   const std::vector<ADNumberType> &dof_values_ad =
918 *   ad_helper.get_sensitive_dof_values();
919 *  
920 * @endcode
921 *
922 * Then we do some problem specific tasks, the first being to
923 * compute all values, (spatial) gradients, and the like based on
924 * "sensitive" AD degree of freedom values. In this instance we are
925 * retrieving the solution gradients at each quadrature point. Observe
926 * that the solution gradients are now sensitive
927 * to the values of the degrees of freedom as they use the @p ADNumberType
928 * as the scalar type and the @p dof_values_ad vector provides the local
929 * DoF values.
930 *
931 * @code
932 *   std::vector<Tensor<1, dim, ADNumberType>> old_solution_gradients(
933 *   fe_values.n_quadrature_points);
934 *   fe_values[u_fe].get_function_gradients_from_local_dof_values(
935 *   dof_values_ad, old_solution_gradients);
936 *  
937 * @endcode
938 *
939 * The next variable that we declare will store the cell residual vector
940 * contributions. This is rather self-explanatory, save for one
941 * <b>very important</b> detail:
942 * Note that each entry in the vector is hand-initialized with a value
943 * of zero. This is a <em>highly recommended</em> practice, as some AD
944 * libraries appear not to safely initialize the internal data
945 * structures of these number types. Not doing so could lead to some
946 * very hard to understand or detect bugs (appreciate that the author
947 * of this program mentions this out of, generally bad, experience). So
948 * out of an abundance of caution it's worthwhile zeroing the initial
949 * value explicitly. After that, apart from a sign change the residual
950 * assembly looks much the same as we saw for the cell RHS vector before:
951 * We loop over all quadrature points, ensure that the coefficient now
952 * encodes its dependence on the (sensitive) finite element DoF values by
953 * using the correct `ADNumberType`, and finally we assemble the
954 * components of the residual vector. For complete clarity, the finite
955 * element shape functions (and their gradients, etc.) as well as the
956 * "JxW" values remain scalar
957 * valued, but the @p coeff and the @p old_solution_gradients at each
958 * quadrature point are computed in terms of the independent
959 * variables.
960 *
961 * @code
962 *   std::vector<ADNumberType> residual_ad(n_dependent_variables,
963 *   ADNumberType(0.0));
964 *   for (const unsigned int q : fe_values.quadrature_point_indices())
965 *   {
966 *   const ADNumberType coeff =
967 *   1.0 / std::sqrt(1.0 + old_solution_gradients[q] *
968 *   old_solution_gradients[q]);
969 *  
970 *   for (const unsigned int i : fe_values.dof_indices())
971 *   {
972 *   residual_ad[i] += (fe_values.shape_grad(i, q) // \nabla \phi_i
973 *   * coeff // * a_n
974 *   * old_solution_gradients[q]) // * \nabla u_n
975 *   * fe_values.JxW(q); // * dx
976 *   }
977 *   }
978 *  
979 * @endcode
980 *
981 * Once we have the full cell residual vector computed, we can register
982 * it with the helper class.
983 *
984
985 *
986 * Thereafter, we compute the residual values (basically,
987 * extracting the real values from what we already computed) and
988 * their Jacobian (the linearization of each residual component
989 * with respect to all cell DoFs) at the evaluation point. For
990 * the purposes of assembly into the global linear system, we
991 * have to respect the sign difference between the residual and
992 * the RHS contribution: For Newton's method, the right hand
993 * side vector needs to be equal to the *negative* residual
994 * vector.
995 *
996 * @code
997 *   ad_helper.register_residual_vector(residual_ad);
998 *  
999 *   ad_helper.compute_residual(cell_rhs);
1000 *   cell_rhs *= -1.0;
1001 *  
1002 *   ad_helper.compute_linearization(cell_matrix);
1003 *   };
1004 *  
1005 * @endcode
1006 *
1007 * The remainder of the function equals what we had previously:
1008 *
1009 * @code
1010 *   const auto copier = [this](const CopyData &copy_data) {
1011 *   const FullMatrix<double> &cell_matrix = copy_data.matrices[0];
1012 *   const Vector<double> &cell_rhs = copy_data.vectors[0];
1013 *   const std::vector<types::global_dof_index> &local_dof_indices =
1014 *   copy_data.local_dof_indices[0];
1015 *  
1016 *   zero_constraints.distribute_local_to_global(
1017 *   cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
1018 *   };
1019 *  
1020 *   MeshWorker::mesh_loop(dof_handler.active_cell_iterators(),
1021 *   cell_worker,
1022 *   copier,
1023 *   sample_scratch_data,
1024 *   sample_copy_data,
1026 *   }
1027 *  
1028 * @endcode
1029 *
1030 *
1031 * <a name="step_72-Assemblyviadifferentiationoftheenergyfunctional"></a>
1032 * <h5>Assembly via differentiation of the energy functional</h5>
1033 *
1034
1035 *
1036 * In this third approach, we compute residual and Jacobian as first
1037 * and second derivatives of the local energy functional
1038 * @f[
1039 * E\left( U \right)^K
1040 * \dealcoloneq \int\limits_{K} \Psi \left( u \right) \, dV
1041 * \approx \sum\limits_{q}^{n_{\textrm{q-points}}} \Psi \left( u \left(
1042 * \mathbf{X}_{q} \right) \right) \underbrace{\vert J_{q} \vert \times
1043 * W_{q}}_{\text{JxW(q)}}
1044 * @f]
1045 * with the energy density given by
1046 * @f[
1047 * \Psi \left( u \right) = \sqrt{1+|\nabla u|^{2}} .
1048 * @f]
1049 *
1050
1051 *
1052 * Let us again see how this is done:
1053 *
1054 * @code
1055 *   template <int dim>
1056 *   void MinimalSurfaceProblem<dim>::assemble_system_using_energy_functional()
1057 *   {
1058 *   system_matrix = 0;
1059 *   system_rhs = 0;
1060 *  
1061 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1062 *  
1063 *   using ScratchData = MeshWorker::ScratchData<dim>;
1064 *   using CopyData = MeshWorker::CopyData<1, 1, 1>;
1065 *   using CellIteratorType = decltype(dof_handler.begin_active());
1066 *  
1067 *   const ScratchData sample_scratch_data(fe,
1068 *   quadrature_formula,
1069 *   update_gradients |
1071 *   update_JxW_values);
1072 *   const CopyData sample_copy_data(dofs_per_cell);
1073 *  
1074 * @endcode
1075 *
1076 * In this implementation of the assembly process, we choose the helper
1077 * class that will automatically compute both the residual and its
1078 * linearization from the cell contribution to an energy functional using
1079 * nested Sacado forward automatic differentiation types.
1080 * The selected number types can be used to compute both first and
1081 * second derivatives. We require this, as the residual defined as the
1082 * sensitivity of the potential energy with respect to the DoF values (i.e.
1083 * its gradient). We'll then need to linearize the residual, implying that
1084 * second derivatives of the potential energy must be computed. You might
1085 * want to compare this with the definition of `ADHelper` used int
1086 * previous function, where we used
1087 * `Differentiation::AD::ResidualLinearization<Differentiation::AD::NumberTypes::sacado_dfad,double>`.
1088 *
1089 * @code
1090 *   using ADHelper = Differentiation::AD::EnergyFunctional<
1091 *   Differentiation::AD::NumberTypes::sacado_dfad_dfad,
1092 *   double>;
1093 *   using ADNumberType = typename ADHelper::ad_type;
1094 *  
1095 *   const FEValuesExtractors::Scalar u_fe(0);
1096 *  
1097 * @endcode
1098 *
1099 * Let us then again define the lambda function that does the integration on
1100 * a cell.
1101 *
1102
1103 *
1104 * To initialize an instance of the helper class, we now only require
1105 * that the number of independent variables (that is, the number
1106 * of degrees of freedom associated with the element solution vector)
1107 * are known up front. This is because the second-derivative matrix that
1108 * results from an energy functional is necessarily square (and also,
1109 * incidentally, symmetric).
1110 *
1111 * @code
1112 *   const auto cell_worker = [&u_fe, this](const CellIteratorType &cell,
1113 *   ScratchData &scratch_data,
1114 *   CopyData &copy_data) {
1115 *   const auto &fe_values = scratch_data.reinit(cell);
1116 *  
1117 *   FullMatrix<double> &cell_matrix = copy_data.matrices[0];
1118 *   Vector<double> &cell_rhs = copy_data.vectors[0];
1119 *   std::vector<types::global_dof_index> &local_dof_indices =
1120 *   copy_data.local_dof_indices[0];
1121 *   cell->get_dof_indices(local_dof_indices);
1122 *  
1123 *   const unsigned int n_independent_variables = local_dof_indices.size();
1124 *   ADHelper ad_helper(n_independent_variables);
1125 *  
1126 * @endcode
1127 *
1128 * Once more, we register all cell DoFs values with the helper, followed
1129 * by extracting the "sensitive" variant of these values that are to be
1130 * used in subsequent operations that must be differentiated -- one of
1131 * those being the calculation of the solution gradients.
1132 *
1133 * @code
1134 *   ad_helper.register_dof_values(current_solution, local_dof_indices);
1135 *  
1136 *   const std::vector<ADNumberType> &dof_values_ad =
1137 *   ad_helper.get_sensitive_dof_values();
1138 *  
1139 *   std::vector<Tensor<1, dim, ADNumberType>> old_solution_gradients(
1140 *   fe_values.n_quadrature_points);
1141 *   fe_values[u_fe].get_function_gradients_from_local_dof_values(
1142 *   dof_values_ad, old_solution_gradients);
1143 *  
1144 * @endcode
1145 *
1146 * We next create a variable that stores the cell total energy.
1147 * Once more we emphasize that we explicitly zero-initialize this value,
1148 * thereby ensuring the integrity of the data for this starting value.
1149 *
1150
1151 *
1152 * The aim for our approach is then to compute the cell total
1153 * energy, which is the sum of the internal (due to right hand
1154 * side functions, typically linear in @f$U@f$) and external
1155 * energies. In this particular case, we have no external
1156 * energies (e.g., from source terms or Neumann boundary
1157 * conditions), so we'll focus on the internal energy part.
1158 *
1159
1160 *
1161 * In fact, computing @f$E(U)^K@f$ is almost trivial, requiring only
1162 * the following lines:
1163 *
1164 * @code
1165 *   ADNumberType energy_ad = ADNumberType(0.0);
1166 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1167 *   {
1168 *   const ADNumberType psi = std::sqrt(1.0 + old_solution_gradients[q] *
1169 *   old_solution_gradients[q]);
1170 *  
1171 *   energy_ad += psi * fe_values.JxW(q);
1172 *   }
1173 *  
1174 * @endcode
1175 *
1176 * After we've computed the total energy on this cell, we'll
1177 * register it with the helper. Based on that, we may now
1178 * compute the desired quantities, namely the residual values
1179 * and their Jacobian at the evaluation point. As before, the
1180 * Newton right hand side needs to be the negative of the
1181 * residual:
1182 *
1183 * @code
1184 *   ad_helper.register_energy_functional(energy_ad);
1185 *  
1186 *   ad_helper.compute_residual(cell_rhs);
1187 *   cell_rhs *= -1.0;
1188 *  
1189 *   ad_helper.compute_linearization(cell_matrix);
1190 *   };
1191 *  
1192 * @endcode
1193 *
1194 * As in the previous two functions, the remainder of the function is as
1195 * before:
1196 *
1197 * @code
1198 *   const auto copier = [this](const CopyData &copy_data) {
1199 *   const FullMatrix<double> &cell_matrix = copy_data.matrices[0];
1200 *   const Vector<double> &cell_rhs = copy_data.vectors[0];
1201 *   const std::vector<types::global_dof_index> &local_dof_indices =
1202 *   copy_data.local_dof_indices[0];
1203 *  
1204 *   zero_constraints.distribute_local_to_global(
1205 *   cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
1206 *   };
1207 *  
1208 *   MeshWorker::mesh_loop(dof_handler.active_cell_iterators(),
1209 *   cell_worker,
1210 *   copier,
1211 *   sample_scratch_data,
1212 *   sample_copy_data,
1214 *   }
1215 *  
1216 *  
1217 * @endcode
1218 *
1219 *
1220 * <a name="step_72-MinimalSurfaceProblemsolve"></a>
1221 * <h4>MinimalSurfaceProblem::solve</h4>
1222 *
1223
1224 *
1225 * The solve function is the same as is used in @ref step_15 "step-15".
1226 *
1227 * @code
1228 *   template <int dim>
1229 *   void MinimalSurfaceProblem<dim>::solve()
1230 *   {
1231 *   SolverControl solver_control(system_rhs.size(),
1232 *   system_rhs.l2_norm() * 1e-6);
1233 *   SolverCG<Vector<double>> solver(solver_control);
1234 *  
1235 *   PreconditionSSOR<SparseMatrix<double>> preconditioner;
1236 *   preconditioner.initialize(system_matrix, 1.2);
1237 *  
1238 *   solver.solve(system_matrix, newton_update, system_rhs, preconditioner);
1239 *  
1240 *   zero_constraints.distribute(newton_update);
1241 *  
1242 *   const double alpha = determine_step_length();
1243 *   current_solution.add(alpha, newton_update);
1244 *   }
1245 *  
1246 *  
1247 * @endcode
1248 *
1249 *
1250 * <a name="step_72-MinimalSurfaceProblemrefine_mesh"></a>
1251 * <h4>MinimalSurfaceProblem::refine_mesh</h4>
1252 *
1253
1254 *
1255 * Nothing has changed since @ref step_15 "step-15" with respect to the mesh refinement
1256 * procedure and transfer of the solution between adapted meshes.
1257 *
1258 * @code
1259 *   template <int dim>
1260 *   void MinimalSurfaceProblem<dim>::refine_mesh()
1261 *   {
1262 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1263 *  
1265 *   dof_handler,
1266 *   QGauss<dim - 1>(fe.degree + 1),
1267 *   std::map<types::boundary_id, const Function<dim> *>(),
1268 *   current_solution,
1269 *   estimated_error_per_cell);
1270 *  
1272 *   estimated_error_per_cell,
1273 *   0.3,
1274 *   0.03);
1275 *  
1276 *   triangulation.prepare_coarsening_and_refinement();
1277 *  
1278 *   SolutionTransfer<dim> solution_transfer(dof_handler);
1279 *   const Vector<double> coarse_solution = current_solution;
1280 *   solution_transfer.prepare_for_coarsening_and_refinement(coarse_solution);
1281 *  
1282 *   triangulation.execute_coarsening_and_refinement();
1283 *  
1284 *   setup_system();
1285 *  
1286 *   solution_transfer.interpolate(current_solution);
1287 *   nonzero_constraints.distribute(current_solution);
1288 *   }
1289 *  
1290 *  
1291 *  
1292 * @endcode
1293 *
1294 *
1295 * <a name="step_72-MinimalSurfaceProblemcompute_residual"></a>
1296 * <h4>MinimalSurfaceProblem::compute_residual</h4>
1297 *
1298
1299 *
1300 * ... as does the function used to compute the residual during the
1301 * solution iteration procedure. One could replace this by
1302 * differentiation of the energy functional if one really wanted,
1303 * but for simplicity we here simply copy what we already had in
1304 * @ref step_15 "step-15".
1305 *
1306 * @code
1307 *   template <int dim>
1308 *   double MinimalSurfaceProblem<dim>::compute_residual(const double alpha) const
1309 *   {
1310 *   Vector<double> residual(dof_handler.n_dofs());
1311 *  
1312 *   Vector<double> evaluation_point(dof_handler.n_dofs());
1313 *   evaluation_point = current_solution;
1314 *   evaluation_point.add(alpha, newton_update);
1315 *  
1316 *   FEValues<dim> fe_values(fe,
1317 *   quadrature_formula,
1319 *   update_JxW_values);
1320 *  
1321 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1322 *   const unsigned int n_q_points = quadrature_formula.size();
1323 *  
1324 *   Vector<double> cell_residual(dofs_per_cell);
1325 *   std::vector<Tensor<1, dim>> gradients(n_q_points);
1326 *  
1327 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1328 *  
1329 *   for (const auto &cell : dof_handler.active_cell_iterators())
1330 *   {
1331 *   cell_residual = 0;
1332 *   fe_values.reinit(cell);
1333 *  
1334 *   fe_values.get_function_gradients(evaluation_point, gradients);
1335 *  
1336 *   for (unsigned int q = 0; q < n_q_points; ++q)
1337 *   {
1338 *   const double coeff =
1339 *   1.0 / std::sqrt(1.0 + gradients[q] * gradients[q]);
1340 *  
1341 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1342 *   cell_residual(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
1343 *   * coeff // * a_n
1344 *   * gradients[q] // * \nabla u_n
1345 *   * fe_values.JxW(q)); // * dx
1346 *   }
1347 *  
1348 *   cell->get_dof_indices(local_dof_indices);
1349 *   zero_constraints.distribute_local_to_global(cell_residual,
1350 *   local_dof_indices,
1351 *   residual);
1352 *   }
1353 *  
1354 *   return residual.l2_norm();
1355 *   }
1356 *  
1357 *  
1358 *  
1359 * @endcode
1360 *
1361 *
1362 * <a name="step_72-MinimalSurfaceProblemdetermine_step_length"></a>
1363 * <h4>MinimalSurfaceProblem::determine_step_length</h4>
1364 *
1365
1366 *
1367 * The choice of step length (or, under-relaxation factor) for the nonlinear
1368 * iterations procedure remains fixed at the value chosen and discussed in
1369 * @ref step_15 "step-15".
1370 *
1371 * @code
1372 *   template <int dim>
1373 *   double MinimalSurfaceProblem<dim>::determine_step_length() const
1374 *   {
1375 *   return 0.1;
1376 *   }
1377 *  
1378 *  
1379 *  
1380 * @endcode
1381 *
1382 *
1383 * <a name="step_72-MinimalSurfaceProblemoutput_results"></a>
1384 * <h4>MinimalSurfaceProblem::output_results</h4>
1385 *
1386
1387 *
1388 * This last function to be called from `run()` outputs the current solution
1389 * (and the Newton update) in graphical form as a VTU file. It is entirely the
1390 * same as what has been used in previous tutorials.
1391 *
1392 * @code
1393 *   template <int dim>
1394 *   void MinimalSurfaceProblem<dim>::output_results(
1395 *   const unsigned int refinement_cycle) const
1396 *   {
1397 *   DataOut<dim> data_out;
1398 *  
1399 *   data_out.attach_dof_handler(dof_handler);
1400 *   data_out.add_data_vector(current_solution, "solution");
1401 *   data_out.add_data_vector(newton_update, "update");
1402 *   data_out.build_patches();
1403 *  
1404 *   const std::string filename =
1405 *   "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
1406 *   std::ofstream output(filename);
1407 *   data_out.write_vtu(output);
1408 *   }
1409 *  
1410 *  
1411 * @endcode
1412 *
1413 *
1414 * <a name="step_72-MinimalSurfaceProblemrun"></a>
1415 * <h4>MinimalSurfaceProblem::run</h4>
1416 *
1417
1418 *
1419 * In the run function, most remains the same as was first implemented
1420 * in @ref step_15 "step-15". The only observable changes are that we can now choose (via
1421 * the parameter file) what the final acceptable tolerance for the system
1422 * residual is, and that we can choose which method of assembly we wish to
1423 * utilize. To make the second choice clear, we output to the console some
1424 * message which indicates the selection. Since we're interested in comparing
1425 * the time taken to assemble for each of the three methods, we've also
1426 * added a timer that keeps a track of how much time is spent during assembly.
1427 * We also track the time taken to solve the linear system, so that we can
1428 * contrast those numbers to the part of the code which would normally take
1429 * the longest time to execute.
1430 *
1431 * @code
1432 *   template <int dim>
1433 *   void MinimalSurfaceProblem<dim>::run(const int formulation,
1434 *   const double tolerance)
1435 *   {
1436 *   std::cout << "******** Assembly approach ********" << std::endl;
1437 *   const std::array<std::string, 3> method_descriptions = {
1438 *   {"Unassisted implementation (full hand linearization).",
1439 *   "Automated linearization of the finite element residual.",
1440 *   "Automated computation of finite element residual and linearization using a variational formulation."}};
1441 *   AssertIndexRange(formulation, method_descriptions.size());
1442 *   std::cout << method_descriptions[formulation] << std::endl << std::endl;
1443 *  
1444 *  
1446 *  
1448 *   triangulation.refine_global(2);
1449 *  
1450 *   setup_system();
1451 *   nonzero_constraints.distribute(current_solution);
1452 *  
1453 *   double last_residual_norm = std::numeric_limits<double>::max();
1454 *   unsigned int refinement_cycle = 0;
1455 *   do
1456 *   {
1457 *   std::cout << "Mesh refinement step " << refinement_cycle << std::endl;
1458 *  
1459 *   if (refinement_cycle != 0)
1460 *   refine_mesh();
1461 *  
1462 *   std::cout << " Initial residual: " << compute_residual(0) << std::endl;
1463 *  
1464 *   for (unsigned int inner_iteration = 0; inner_iteration < 5;
1465 *   ++inner_iteration)
1466 *   {
1467 *   {
1468 *   TimerOutput::Scope t(timer, "Assemble");
1469 *  
1470 *   if (formulation == 0)
1471 *   assemble_system_unassisted();
1472 *   else if (formulation == 1)
1473 *   assemble_system_with_residual_linearization();
1474 *   else if (formulation == 2)
1475 *   assemble_system_using_energy_functional();
1476 *   else
1477 *   AssertThrow(false, ExcNotImplemented());
1478 *   }
1479 *  
1480 *   last_residual_norm = system_rhs.l2_norm();
1481 *  
1482 *   {
1483 *   TimerOutput::Scope t(timer, "Solve");
1484 *   solve();
1485 *   }
1486 *  
1487 *  
1488 *   std::cout << " Residual: " << compute_residual(0) << std::endl;
1489 *   }
1490 *  
1491 *   output_results(refinement_cycle);
1492 *  
1493 *   ++refinement_cycle;
1494 *   std::cout << std::endl;
1495 *   }
1496 *   while (last_residual_norm > tolerance);
1497 *   }
1498 *   } // namespace Step72
1499 *  
1500 * @endcode
1501 *
1502 *
1503 * <a name="step_72-Themainfunction"></a>
1504 * <h4>The main function</h4>
1505 *
1506
1507 *
1508 * Finally the main function. This follows the scheme of most other main
1509 * functions, with two obvious exceptions:
1510 * - We call Utilities::MPI::MPI_InitFinalize in order to set up (via a hidden
1511 * default parameter) the number of threads using the execution of
1512 * multithreaded tasks.
1513 * - We also have a few lines dedicates to reading in or initializing the
1514 * user-defined parameters that will be considered during the execution of the
1515 * program.
1516 *
1517 * @code
1518 *   int main(int argc, char *argv[])
1519 *   {
1520 *   try
1521 *   {
1522 *   using namespace Step72;
1523 *  
1524 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
1525 *  
1526 *   std::string prm_file;
1527 *   if (argc > 1)
1528 *   prm_file = argv[1];
1529 *   else
1530 *   prm_file = "parameters.prm";
1531 *  
1532 *   const MinimalSurfaceProblemParameters parameters;
1533 *   ParameterAcceptor::initialize(prm_file);
1534 *  
1535 *   MinimalSurfaceProblem<2> minimal_surface_problem_2d;
1536 *   minimal_surface_problem_2d.run(parameters.formulation,
1537 *   parameters.tolerance);
1538 *   }
1539 *   catch (std::exception &exc)
1540 *   {
1541 *   std::cerr << std::endl
1542 *   << std::endl
1543 *   << "----------------------------------------------------"
1544 *   << std::endl;
1545 *   std::cerr << "Exception on processing: " << std::endl
1546 *   << exc.what() << std::endl
1547 *   << "Aborting!" << std::endl
1548 *   << "----------------------------------------------------"
1549 *   << std::endl;
1550 *  
1551 *   return 1;
1552 *   }
1553 *   catch (...)
1554 *   {
1555 *   std::cerr << std::endl
1556 *   << std::endl
1557 *   << "----------------------------------------------------"
1558 *   << std::endl;
1559 *   std::cerr << "Unknown exception!" << std::endl
1560 *   << "Aborting!" << std::endl
1561 *   << "----------------------------------------------------"
1562 *   << std::endl;
1563 *   return 1;
1564 *   }
1565 *   return 0;
1566 *   }
1567 * @endcode
1568<a name="step_72-Results"></a><h1>Results</h1>
1569
1570
1571Since there was no change to the physics of the problem that has first been analyzed
1572in @ref step_15 "step-15", there is nothing to report about that. The only outwardly noticeable
1573difference between them is that, by default, this program will only run 9 mesh
1574refinement steps (as opposed to @ref step_15 "step-15", which executes 11 refinements).
1575This will be observable in the simulation status that appears between the
1576header text that prints which assembly method is being used, and the final
1577timings. (All timings reported below were obtained in release mode.)
1578
1579@code
1580Mesh refinement step 0
1581 Initial residual: 1.53143
1582 Residual: 1.08746
1583 Residual: 0.966748
1584 Residual: 0.859602
1585 Residual: 0.766462
1586 Residual: 0.685475
1587
1588...
1589
1590Mesh refinement step 9
1591 Initial residual: 0.00924594
1592 Residual: 0.00831928
1593 Residual: 0.0074859
1594 Residual: 0.0067363
1595 Residual: 0.00606197
1596 Residual: 0.00545529
1597@endcode
1598
1599So what is interesting for us to compare is how long the assembly process takes
1600for the three different implementations, and to put that into some greater context.
1601Below is the output for the hand linearization (as computed on a circa 2012
1602four core, eight thread laptop -- but we're only really interested in the
1603relative time between the different implementations):
1604@code
1605******** Assembly approach ********
1606Unassisted implementation (full hand linearization).
1607
1608...
1609
1610+---------------------------------------------+------------+------------+
1611| Total wallclock time elapsed since start | 35.1s | |
1612| | | |
1613| Section | no. calls | wall time | % of total |
1614+---------------------------------+-----------+------------+------------+
1615| Assemble | 50 | 1.56s | 4.5% |
1616| Solve | 50 | 30.8s | 88% |
1617+---------------------------------+-----------+------------+------------+
1618@endcode
1619And for the implementation that linearizes the residual in an automated
1620manner using the Sacado dynamic forward AD number type:
1621@code
1622******** Assembly approach ********
1623Automated linearization of the finite element residual.
1624
1625...
1626
1627+---------------------------------------------+------------+------------+
1628| Total wallclock time elapsed since start | 40.1s | |
1629| | | |
1630| Section | no. calls | wall time | % of total |
1631+---------------------------------+-----------+------------+------------+
1632| Assemble | 50 | 8.8s | 22% |
1633| Solve | 50 | 28.6s | 71% |
1634+---------------------------------+-----------+------------+------------+
1635@endcode
1636And, lastly, for the implementation that computes both the residual and
1637its linearization directly from an energy functional (using nested Sacado
1638dynamic forward AD numbers):
1639@code
1640******** Assembly approach ********
1641Automated computation of finite element residual and linearization using a variational formulation.
1642
1643...
1644
1645+---------------------------------------------+------------+------------+
1646| Total wallclock time elapsed since start | 48.8s | |
1647| | | |
1648| Section | no. calls | wall time | % of total |
1649+---------------------------------+-----------+------------+------------+
1650| Assemble | 50 | 16.7s | 34% |
1651| Solve | 50 | 29.3s | 60% |
1652+---------------------------------+-----------+------------+------------+
1653@endcode
1654
1655It's evident that the more work that is passed off to the automatic differentiation
1656framework to perform, the more time is spent during the assembly process. Accumulated
1657over all refinement steps, using one level of automatic differentiation resulted
1658in @f$5.65 \times@f$ more computational time being spent in the assembly stage when
1659compared to unassisted assembly, while assembling the discrete linear system took
1660@f$10.7 \times@f$ longer when deriving directly from the energy functional.
1661Unsurprisingly, the overall time spent solving the linear system remained unchanged.
1662This means that the proportion of time spent in the solve phase to the assembly phase
1663shifted significantly as the number of times automated differentiation was performed
1664at the finite element level. For many, this might mean that leveraging higher-order
1665differentiation (at the finite element level) in production code leads to an
1666unacceptable overhead, but it may still be useful during the prototyping phase.
1667A good compromise between the two may, therefore, be the automated linearization
1668of the finite element residual, which offers a lot of convenience at a measurable,
1669but perhaps not unacceptable, cost. Alternatively, one could consider
1670not re-building the Newton matrix in every step -- a topic that is
1671explored in substantial depth in @ref step_77 "step-77".
1672
1673Of course, in practice the actual overhead is very much dependent on the problem being evaluated
1674(e.g., how many components there are in the solution, what the nature of the function
1675being differentiated is, etc.). So the exact results presented here should be
1676interpreted within the context of this scalar problem alone, and when it comes to
1677other problems, some preliminary investigation by the user is certainly warranted.
1678
1679
1680<a name="step_72-Possibilitiesforextensions"></a><h3> Possibilities for extensions </h3>
1681
1682
1683Like @ref step_71 "step-71", there are a few items related to automatic differentiation that could
1684be evaluated further:
1685- The use of other AD frameworks should be investigated, with the outlook that
1686 alternative implementations may provide performance benefits.
1687- It is also worth evaluating AD number types other than those that have been
1688 hard-coded into this tutorial. With regard to twice differentiable types
1689 employed at the finite-element level, mixed differentiation modes ("RAD-FAD")
1690 should in principle be more computationally efficient than the single
1691 mode ("FAD-FAD") types employed here. The reason that the RAD-FAD type was not
1692 selected by default is that, at the time of writing, there remain some
1693 bugs in its implementation within the Sacado library that lead to memory leaks.
1694 This is documented in the @ref auto_symb_diff topic.
1695- It might be the case that using reduced precision types (i.e., `float`) as the
1696 scalar types for the AD numbers could render a reduction in computational
1697 expense during assembly. Using `float` as the data type for the
1698 matrix and the residual is not unreasonable, given that the Newton
1699 update is only meant to get us closer to the solution, but not
1700 actually *to* the solution; as a consequence, it makes sense to
1701 consider using reduced-precision data types for computing these
1702 updates, and then accumulating these updates in a solution vector
1703 that uses the full `double` precision accuracy.
1704- One further method of possibly reducing resources during assembly is to frame
1705 the AD implementations as a constitutive model. This would be similar to the
1706 approach adopted in @ref step_71 "step-71", and pushes the starting point for the automatic
1707 differentiation one level higher up the chain of computations. This, in turn,
1708 means that less operations are tracked by the AD library, thereby reducing the
1709 cost of differentiating (even though one would perform the differentiation at
1710 each cell quadrature point).
1711- @ref step_77 "step-77" is yet another variation of @ref step_15 "step-15" that addresses a very
1712 different part of the problem: Line search and whether it is
1713 necessary to re-build the Newton matrix in every nonlinear
1714 iteration. Given that the results above show that using automatic
1715 differentiation comes at a cost, the techniques in @ref step_77 "step-77" have the
1716 potential to offset these costs to some degree. It would therefore
1717 be quite interesting to combine the current program with the
1718 techniques in @ref step_77 "step-77". For production codes, this would certainly be
1719 the way to go.
1720 *
1721 *
1722<a name="step_72-PlainProg"></a>
1723<h1> The plain program</h1>
1724@include "step-72.cc"
1725*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void distribute_dofs(const FiniteElement< dim, spacedim > &fe)
const unsigned int dofs_per_cell
Definition fe_data.h:436
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
void initialize(const MatrixType &A, const AdditionalData &parameters=AdditionalData())
@ wall_times
Definition timer.h:651
float depth
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
#define AssertIndexRange(index, range)
#define AssertThrow(cond, exc)
void mesh_loop(const CellIteratorType &begin, const CellIteratorType &end, const CellWorkerFunctionType &cell_worker, const CopierType &copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const AssembleFlags flags=assemble_own_cells, const BoundaryWorkerFunctionType &boundary_worker=BoundaryWorkerFunctionType(), const FaceWorkerFunctionType &face_worker=FaceWorkerFunctionType(), const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition mesh_loop.h:281
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
void cell_residual(Vector< double > &result, const FEValuesBase< dim > &fe, const std::vector< Tensor< 1, dim > > &input, const ArrayView< const std::vector< double > > &velocity, double factor=1.)
Definition advection.h:130
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
constexpr const ReferenceCell Line
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
Definition numbers.h:254
STL namespace.
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation