Reference documentation for deal.II version GIT relicensing-439-g5fda5c893d 2024-04-20 06:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
Distributed_LDG_Method.h
Go to the documentation of this file.
1
452 *  
453 * @endcode
454 *
455 *
456 * <a name="Functions.cc-Functionscc"></a>
457 * <h3>Functions.cc</h3>
458 * In this file we keep right hand side function, Dirichlet boundary
459 * conditions and solution to our Poisson equation problem. Since
460 * these classes and functions have been discussed extensively in
461 * the deal.ii tutorials we won't discuss them any further.
462 *
463 * @code
464 *   #include <deal.II/base/function.h>
465 *   #include <deal.II/base/tensor_function.h>
466 *   #include <deal.II/lac/vector.h>
467 *   #include <cmath>
468 *  
469 *   using namespace dealii;
470 *  
471 *   template <int dim>
472 *   class RightHandSide : public Function<dim>
473 *   {
474 *   public:
475 *   RightHandSide() : Function<dim>(1)
476 *   {}
477 *  
478 *   virtual double value(const Point<dim> &p,
479 *   const unsigned int component = 0 ) const override;
480 *   };
481 *  
482 *   template <int dim>
483 *   class DirichletBoundaryValues : public Function<dim>
484 *   {
485 *   public:
486 *   DirichletBoundaryValues() : Function<dim>(1)
487 *   {}
488 *  
489 *   virtual double value(const Point<dim> &p,
490 *   const unsigned int component = 0 ) const override;
491 *   };
492 *  
493 *   template<int dim>
494 *   class TrueSolution : public Function<dim>
495 *   {
496 *   public:
497 *   TrueSolution() : Function<dim>(dim+1)
498 *   {}
499 *  
500 *   virtual void vector_value(const Point<dim> &p,
501 *   Vector<double> &valuess) const override;
502 *   };
503 *  
504 *   template <int dim>
505 *   double
506 *   RightHandSide<dim>::
507 *   value(const Point<dim> &p,
508 *   const unsigned int ) const
509 *   {
510 *   const double x = p[0];
511 *   const double y = p[1];
512 *   return 4*M_PI*M_PI*(cos(2*M_PI*y) - sin(2*M_PI*x));
513 *  
514 *   }
515 *  
516 *   template <int dim>
517 *   double
518 *   DirichletBoundaryValues<dim>::
519 *   value(const Point<dim> &p,
520 *   const unsigned int ) const
521 *   {
522 *   const double x = p[0];
523 *   const double y = p[1];
524 *   return cos(2*M_PI*y) -sin(2*M_PI*x) - x;
525 *   }
526 *  
527 *  
528 *   template <int dim>
529 *   void
530 *   TrueSolution<dim>::
531 *   vector_value(const Point<dim> &p,
532 *   Vector<double> &values) const
533 *   {
534 *   Assert(values.size() == dim+1,
535 *   ExcDimensionMismatch(values.size(), dim+1) );
536 *  
537 *   double x = p[0];
538 *   double y = p[1];
539 *  
540 *   values(0) = 1 + 2*M_PI*cos(2*M_PI*x);
541 *   values(1) = 2*M_PI*sin(2*M_PI*y);
542 *  
543 *   values(2) = cos(2*M_PI*y) - sin(2*M_PI*x) - x;
544 *   }
545 * @endcode
546
547
548<a name="ann-LDGPoisson.cc"></a>
549<h1>Annotated version of LDGPoisson.cc</h1>
550 *
551 *
552 *
553 *
554 * @code
555 *   /* -----------------------------------------------------------------------------
556 *   *
557 *   * SPDX-License-Identifier: LGPL-2.1-or-later
558 *   * Copyright (C) 2017 by Michael Harmon
559 *   *
560 *   * This file is part of the deal.II code gallery.
561 *   *
562 *   * -----------------------------------------------------------------------------
563 *   */
564 *  
565 * @endcode
566 *
567 *
568 * <a name="LDGPoisson.cc-LDGPoissoncc"></a>
569 * <h3>LDGPoisson.cc</h3>
570 * The code begins as per usual with a long list of the the included
571 * files from the deal.ii library.
572 *
573 * @code
574 *   #include <deal.II/base/quadrature_lib.h>
575 *   #include <deal.II/base/logstream.h>
576 *   #include <deal.II/base/function.h>
577 *   #include <deal.II/base/timer.h>
578 *  
579 *   #include <deal.II/lac/full_matrix.h>
580 *   #include <deal.II/lac/sparse_matrix.h>
581 *  
582 *  
583 *   #include <deal.II/grid/tria.h>
584 *   #include <deal.II/grid/grid_generator.h>
585 *   #include <deal.II/grid/grid_tools.h>
586 *   #include <deal.II/grid/tria_accessor.h>
587 *   #include <deal.II/grid/tria_iterator.h>
588 *  
589 *   #include <deal.II/dofs/dof_handler.h>
590 *   #include <deal.II/dofs/dof_renumbering.h>
591 *   #include <deal.II/dofs/dof_accessor.h>
592 *   #include <deal.II/dofs/dof_tools.h>
593 *  
594 *   #include <fstream>
595 *   #include <iostream>
596 *  
597 * @endcode
598 *
599 * Here's where the classes for the DG methods begin.
600 * We can use either the Lagrange polynomials,
601 *
602 * @code
603 *   #include <deal.II/fe/fe_dgq.h>
604 * @endcode
605 *
606 * or the Legendre polynomials
607 *
608 * @code
609 *   #include <deal.II/fe/fe_dgp.h>
610 * @endcode
611 *
612 * as basis functions. I'll be using the Lagrange polynomials.
613 *
614 * @code
615 *   #include <deal.II/fe/fe_system.h>
616 *   #include <deal.II/fe/fe_values.h>
617 *  
618 *   #include <deal.II/numerics/vector_tools.h>
619 *   #include <deal.II/numerics/matrix_tools.h>
620 *   #include <deal.II/numerics/data_out.h>
621 *  
622 *  
623 * @endcode
624 *
625 * Now we have to load in the deal.ii files that will allow us to use
626 * a distributed computing framework.
627 *
628 * @code
629 *   #include <deal.II/base/utilities.h>
630 *   #include <deal.II/base/index_set.h>
631 *   #include <deal.II/base/conditional_ostream.h>
632 *   #include <deal.II/lac/sparsity_tools.h>
633 *   #include <deal.II/distributed/tria.h>
634 *  
635 *  
636 * @endcode
637 *
638 * Additionally we load the files that will allow us to interact with
639 * the Trilinos library.
640 *
641 * @code
642 *   #include <deal.II/lac/trilinos_sparse_matrix.h>
643 *   #include <deal.II/lac/trilinos_vector.h>
644 *   #include <deal.II/lac/trilinos_solver.h>
645 *  
646 *  
647 * @endcode
648 *
649 * The functions class contains all the defintions of the functions we
650 * will use, i.e. the right hand side function, the boundary conditions
651 * and the test functions.
652 *
653 * @code
654 *   #include "Functions.cc"
655 *  
656 *   using namespace dealii;
657 *  
658 *  
659 * @endcode
660 *
661 * Here is the main class for the Local Discontinuous Galerkin method
662 * applied to Poisson's equation, we won't explain much of the
663 * the class and method declarations, but dive deeper into describing the
664 * functions when they are defined. The only thing I will menion
665 * about the class declaration is that this is where I labeled
666 * the different types of boundaries using enums.
667 *
668 * @code
669 *   template <int dim>
670 *   class LDGPoissonProblem
671 *   {
672 *  
673 *   public:
674 *   LDGPoissonProblem(const unsigned int degree,
675 *   const unsigned int n_refine);
676 *  
677 *   ~LDGPoissonProblem();
678 *  
679 *   void run();
680 *  
681 *  
682 *   private:
683 *   void make_grid();
684 *  
685 *   void make_dofs();
686 *  
687 *   void assemble_system();
688 *  
689 *   void assemble_cell_terms(const FEValues<dim> &cell_fe,
690 *   FullMatrix<double> &cell_matrix,
691 *   Vector<double> &cell_vector);
692 *  
693 *   void assemble_Neumann_boundary_terms(const FEFaceValues<dim> &face_fe,
694 *   FullMatrix<double> &local_matrix,
695 *   Vector<double> &local_vector);
696 *  
697 *   void assemble_Dirichlet_boundary_terms(const FEFaceValues<dim> &face_fe,
698 *   FullMatrix<double> &local_matrix,
699 *   Vector<double> &local_vector,
700 *   const double &h);
701 *  
702 *   void assemble_flux_terms(const FEFaceValuesBase<dim> &fe_face_values,
703 *   const FEFaceValuesBase<dim> &fe_neighbor_face_values,
704 *   FullMatrix<double> &vi_ui_matrix,
705 *   FullMatrix<double> &vi_ue_matrix,
706 *   FullMatrix<double> &ve_ui_matrix,
707 *   FullMatrix<double> &ve_ue_matrix,
708 *   const double &h);
709 *  
710 *   void distribute_local_flux_to_global(
711 *   const FullMatrix<double> &vi_ui_matrix,
712 *   const FullMatrix<double> &vi_ue_matrix,
713 *   const FullMatrix<double> &ve_ui_matrix,
714 *   const FullMatrix<double> &ve_ue_matrix,
715 *   const std::vector<types::global_dof_index> &local_dof_indices,
716 *   const std::vector<types::global_dof_index> &local_neighbor_dof_indices);
717 *  
718 *   void solve();
719 *  
720 *   void output_results() const;
721 *  
722 *   const unsigned int degree;
723 *   const unsigned int n_refine;
724 *   double penalty;
725 *   double h_max;
726 *   double h_min;
727 *  
728 *   enum
729 *   {
730 *   Dirichlet,
731 *   Neumann
732 *   };
733 *  
734 *   parallel::distributed::Triangulation<dim> triangulation;
735 *   FESystem<dim> fe;
736 *   DoFHandler<dim> dof_handler;
737 *  
738 *   AffineConstraints<double> constraints;
739 *  
740 *   SparsityPattern sparsity_pattern;
741 *  
742 *   TrilinosWrappers::SparseMatrix system_matrix;
743 *   TrilinosWrappers::MPI::Vector locally_relevant_solution;
744 *   TrilinosWrappers::MPI::Vector system_rhs;
745 *  
746 *   ConditionalOStream pcout;
747 *   TimerOutput computing_timer;
748 *  
749 *   SolverControl solver_control;
750 *   TrilinosWrappers::SolverDirect solver;
751 *  
752 *   const RightHandSide<dim> rhs_function;
753 *   const DirichletBoundaryValues<dim> Dirichlet_bc_function;
754 *   const TrueSolution<dim> true_solution;
755 *   };
756 *  
757 *  
758 * @endcode
759 *
760 *
761 * <a name="LDGPoisson.cc-Classconstructoranddestructor"></a>
762 * <h4>Class constructor and destructor</h4>
763 * The constructor and destructor for this class is very much like the
764 * like those for @ref step_40 "step-40". The difference being that we'll be passing
765 * in an integer, <code>degree</code>, which tells us the maxiumum order
766 * of the polynomial to use as well as <code>n_refine</code> which is the
767 * global number of times we refine our mesh. The other main differences
768 * are that we use a FESystem object for our choice of basis
769 * functions. This is reminiscent of the mixed finite element method in
770 * @ref step_20 "step-20", however, in our case we use a FESystem
771 * of the form,
772 *
773
774 *
775 * <code>
776 * fe( FESystem<dim>(FE_DGQ<dim>(degree), dim), 1,
777 * FE_DGQ<dim>(degree), 1)
778 * </code>
779 *
780
781 *
782 * which tells us that the basis functions contain discontinous polynomials
783 * of order <code>degree</code> in each of the <code>dim</code> dimensions
784 * for the vector field. For the scalar unknown we
785 * use a discontinuous polynomial of the order <code>degree</code>.
786 * The LDG method for Poisson equations solves for both the primary variable
787 * as well as its gradient, just like the mixed finite element method.
788 * However, unlike the mixed method, the LDG method uses discontinuous
789 * polynomials to approximate both variables.
790 * The other difference bewteen our constructor and that of @ref step_40 "step-40" is that
791 * we all instantiate our linear solver in the constructor definition.
792 *
793 * @code
794 *   template <int dim>
795 *   LDGPoissonProblem<dim>::
796 *   LDGPoissonProblem(const unsigned int degree,
797 *   const unsigned int n_refine)
798 *   :
799 *   degree(degree),
800 *   n_refine(n_refine),
801 *   triangulation(MPI_COMM_WORLD,
805 *   fe( FESystem<dim>(FE_DGQ<dim>(degree), dim), 1,
806 *   FE_DGQ<dim>(degree), 1),
807 *   dof_handler(triangulation),
808 *   pcout(std::cout,
809 *   Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0),
810 *   computing_timer(MPI_COMM_WORLD,
811 *   pcout,
814 *   solver_control(1),
815 *   solver(solver_control),
816 *   rhs_function(),
817 *   Dirichlet_bc_function()
818 *   {
819 *   }
820 *  
821 *   template <int dim>
822 *   LDGPoissonProblem<dim>::
823 *   ~LDGPoissonProblem()
824 *   {
825 *   dof_handler.clear();
826 *   }
827 *  
828 * @endcode
829 *
830 *
831 * <a name="LDGPoisson.cc-Make_grid"></a>
832 * <h4>Make_grid</h4>
833 * This function shows how to make a grid using local
834 * refinement and also shows how to label the boundaries
835 * using the defined enum.
836 *
837 * @code
838 *   template <int dim>
839 *   void
840 *   LDGPoissonProblem<dim>::
841 *   make_grid()
842 *   {
844 *   triangulation.refine_global(n_refine);
845 *  
846 *   unsigned int local_refine = 2;
847 *   for (unsigned int i =0; i <local_refine; ++i)
848 *   {
850 *   cell = triangulation.begin_active(),
851 *   endc = triangulation.end();
852 *  
853 * @endcode
854 *
855 * We loop over all the cells in the mesh
856 * and mark the appropriate cells for refinement.
857 * In this example we only choose cells which are
858 * near @f$x=0@f$ and @f$x=1@f$ in the
859 * the domain. This was just to show that
860 * the LDG method is working with local
861 * refinement and discussions on building
862 * more realistic refinement stategies are
863 * discussed elsewhere in the deal.ii
864 * documentation.
865 *
866 * @code
867 *   for (; cell != endc; ++cell)
868 *   {
869 *   if ((cell->center()[1]) > 0.9 )
870 *   {
871 *   if ((cell->center()[0] > 0.9) || (cell->center()[0] < 0.1))
872 *   cell->set_refine_flag();
873 *   }
874 *   }
875 * @endcode
876 *
877 * Now that we have marked all the cells
878 * that we want to refine locally we can go ahead and
879 * refine them.
880 *
881 * @code
882 *   triangulation.execute_coarsening_and_refinement();
883 *   }
884 *  
885 * @endcode
886 *
887 * To label the boundary faces of the mesh with their
888 * type, i.e. Dirichlet or Neumann,
889 * we loop over all the cells in the mesh and then over
890 * all the faces of each cell. We then have to figure out
891 * which faces are on the bounadry and set all faces
892 * on the boundary to have
893 * <code>boundary_id</code> to be <code>Dirichlet</code>.
894 * We remark that one could easily set more complicated
895 * conditions where there are both Dirichlet or
896 * Neumann boundaries.
897 *
898 * @code
900 *   cell = triangulation.begin(),
901 *   endc = triangulation.end();
902 *   for (; cell != endc; ++cell)
903 *   {
904 *   for (unsigned int face_no=0;
905 *   face_no < GeometryInfo<dim>::faces_per_cell;
906 *   face_no++)
907 *   {
908 *   if (cell->face(face_no)->at_boundary() )
909 *   cell->face(face_no)->set_boundary_id(Dirichlet);
910 *   }
911 *   }
912 *   }
913 *  
914 * @endcode
915 *
916 *
917 * <a name="LDGPoisson.cc-make_dofs"></a>
918 * <h3>make_dofs</h3>
919 * This function is responsible for distributing the degrees of
920 * freedom (dofs) to the processors and allocating memory for
921 * the global system matrix, <code>system_matrix</code>,
922 * and global right hand side vector, <code>system_rhs</code> .
923 * The dofs are the unknown coefficients for the polynomial
924 * approximation of our solution to Poisson's equation in the scalar
925 * variable and its gradient.
926 *
927 * @code
928 *   template <int dim>
929 *   void
930 *   LDGPoissonProblem<dim>::
931 *   make_dofs()
932 *   {
933 *   TimerOutput::Scope t(computing_timer, "setup");
934 *  
935 * @endcode
936 *
937 * The first step to setting up our linear system is to
938 * distribute the degrees of freedom (dofs) across the processors,
939 * this is done with the <code>distribute_dofs()</code>
940 * method of the DoFHandler. We remark
941 * the same exact function call that occurs when using deal.ii
942 * on a single machine, the DoFHandler automatically knows
943 * that we are distributed setting because it was instantiated
944 * with a distributed triangulation!
945 *
946 * @code
947 *   dof_handler.distribute_dofs(fe);
948 *  
949 * @endcode
950 *
951 * We now renumber the dofs so that the vector of unkonwn dofs
952 * that we are solving for, <code>locally_relevant_solution</code>,
953 * corresponds to a vector of the form,
954 *
955
956 *
957 * @f$ \left[\begin{matrix} \textbf{Q} \\ \textbf{U} \end{matrix}\right] @f$
958 *
959 * @code
960 *   DoFRenumbering::component_wise(dof_handler);
961 *  
962 * @endcode
963 *
964 * Now we get the locally owned dofs, that is the dofs that our local
965 * to this processor. These dofs corresponding entries in the
966 * matrix and vectors that we will write to.
967 *
968 * @code
969 *   const IndexSet &locally_owned_dofs = dof_handler.locally_owned_dofs();
970 *  
971 * @endcode
972 *
973 * In additon to the locally owned dofs, we also need the the locally
974 * relevant dofs. These are the dofs that have read access to and we
975 * need in order to do computations on our processor, but, that
976 * we do not have the ability to write to.
977 *
978 * @code
979 *   const IndexSet locally_relevant_dofs =
980 *   DoFTools::extract_locally_relevant_dofs(dof_handler);
981 *  
982 *   const std::vector<types::global_dof_index> dofs_per_component =
983 *   DoFTools::count_dofs_per_fe_component(dof_handler);
984 *  
985 * @endcode
986 *
987 * Discontinuous Galerkin methods are fantanistic methods in part because
988 * many of the limitations of traditional finite element methods no longer
989 * exist. Specifically, the need to use constraint matrices
990 * in order handle hanging nodes is no longer necessary. However,
991 * we will continue to use the constraint matrices inorder to efficiently
992 * distribute local computations to the global system, i.e. to the
993 * <code>system_matrix</code> and <code>system_rhs</code>. Therefore, we
994 * just instantiate the constraints matrix object, clear and close it.
995 *
996 * @code
997 *   constraints.clear();
998 *   constraints.close();
999 *  
1000 *  
1001 * @endcode
1002 *
1003 * Just like @ref step_40 "step-40" we create a dynamic sparsity pattern
1004 * and distribute it to the processors. Notice how we do not have to
1005 * explictly mention that we are using a FESystem for system of
1006 * variables instead of a FE_DGQ for a scalar variable
1007 * or that we are using a discributed DoFHandler. All these specifics
1008 * are taken care of under the hood by the deal.ii library.
1009 * In order to build the sparsity
1010 * pattern we use the DoFTools::make_flux_sparsity_pattern function
1011 * since we using a DG method and need to take into account the DG
1012 * fluxes in the sparsity pattern.
1013 *
1014 * @code
1015 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
1016 *   DoFTools::make_flux_sparsity_pattern(dof_handler,
1017 *   dsp);
1018 *  
1019 *   SparsityTools::distribute_sparsity_pattern(dsp,
1020 *   dof_handler.locally_owned_dofs(),
1021 *   MPI_COMM_WORLD,
1022 *   locally_relevant_dofs);
1023 *  
1024 * @endcode
1025 *
1026 * Here is one area that I had to learn the hard way. The local
1027 * discontinuous Galerkin method like the mixed method with the
1028 * Raviart-Thomas element is written
1029 * in mixed form and will lead to a block-structured matrix.
1030 * In @ref step_20 "step-20" we see that we that we initialize the
1031 * <code>system_martrix</code>
1032 * such that we explicitly declare it to be block-structured.
1033 * It turns out there are reasons to do this when you are going to be
1034 * using a Schur complement method to solve the system of equations.
1035 * While the LDG method will lead to a block-structured matrix,
1036 * we do not have to explicitly declare our matrix to be one.
1037 * I found that most of the distributed linear solvers did not
1038 * accept block structured matrices and since I was using a
1039 * distributed direct solver it was unnecessary to explicitly use a
1040 * block structured matrix.
1041 *
1042 * @code
1043 *   system_matrix.reinit(locally_owned_dofs,
1044 *   locally_owned_dofs,
1045 *   dsp,
1046 *   MPI_COMM_WORLD);
1047 *  
1048 * @endcode
1049 *
1050 * The final note that I will make in that this subroutine is that
1051 * we initialize this processors solution and the
1052 * right hand side vector the exact same was as we did in @ref step_40 "step-40".
1053 * We should note that the <code>locally_relevant_solution</code> solution
1054 * vector includes dofs that are locally relevant to our computations
1055 * while the <code>system_rhs</code> right hand side vector will only
1056 * include dofs that are locally owned by this processor.
1057 *
1058 * @code
1059 *   locally_relevant_solution.reinit(locally_relevant_dofs,
1060 *   MPI_COMM_WORLD);
1061 *  
1062 *   system_rhs.reinit(locally_owned_dofs,
1063 *   locally_relevant_dofs,
1064 *   MPI_COMM_WORLD,
1065 *   true);
1066 *  
1067 *   const unsigned int n_vector_field = dim * dofs_per_component[0];
1068 *   const unsigned int n_potential = dofs_per_component[dim];
1069 *  
1070 *   pcout << "Number of active cells : "
1071 *   << triangulation.n_global_active_cells()
1072 *   << std::endl
1073 *   << "Number of degrees of freedom: "
1074 *   << dof_handler.n_dofs()
1075 *   << " (" << n_vector_field << " + " << n_potential << ")"
1076 *   << std::endl;
1077 *   }
1078 *  
1079 * @endcode
1080 *
1081 *
1082 * <a name="LDGPoisson.cc-assemble_system"></a>
1083 * <h4>assemble_system</h4>
1084 * This is the function that will assemble the global system matrix and
1085 * global right hand side vector for the LDG method. It starts out
1086 * like many of the deal.ii tutorial codes: declaring quadrature formulas
1087 * and UpdateFlags objects, as well as vectors that will hold the
1088 * dof indices for the cells we are working on in the global system.
1089 *
1090 * @code
1091 *   template <int dim>
1092 *   void
1093 *   LDGPoissonProblem<dim>::
1094 *   assemble_system()
1095 *   {
1096 *   TimerOutput::Scope t(computing_timer, "assembly");
1097 *  
1098 *   QGauss<dim> quadrature_formula(fe.degree+1);
1099 *   QGauss<dim-1> face_quadrature_formula(fe.degree+1);
1100 *  
1101 *   const UpdateFlags update_flags = update_values
1102 *   | update_gradients
1103 *   | update_quadrature_points
1104 *   | update_JxW_values;
1105 *  
1106 *   const UpdateFlags face_update_flags = update_values
1107 *   | update_normal_vectors
1108 *   | update_quadrature_points
1109 *   | update_JxW_values;
1110 *  
1111 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1112 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1113 *   std::vector<types::global_dof_index>
1114 *   local_neighbor_dof_indices(dofs_per_cell);
1115 *  
1116 * @endcode
1117 *
1118 * We first remark that we have the FEValues objects for
1119 * the values of our cell basis functions as was done in most
1120 * other examples. Now because we
1121 * are using discontinuous Galerkin methods we also introduce a
1122 * FEFaceValues object, <code>fe_face_values</code>,
1123 * for evaluating the basis functions
1124 * on one side of an element face as well as another FEFaceValues object,
1125 * <code>fe_neighbor_face_values</code>, for evaluating the basis functions
1126 * on the opposite side of the face, i.e. on the neighoring element's face.
1127 * In addition, we also introduce a FESubfaceValues object,
1128 * <code>fe_subface_values</code>, that
1129 * will be used for dealing with faces that have multiple refinement
1130 * levels, i.e. hanging nodes. When we have to evaluate the fluxes across
1131 * a face that multiple refinement levels, we need to evaluate the
1132 * fluxes across all its childrens' faces; we'll explain this more when
1133 * the time comes.
1134 *
1135 * @code
1136 *   FEValues<dim> fe_values(fe, quadrature_formula, update_flags);
1137 *  
1138 *   FEFaceValues<dim> fe_face_values(fe,face_quadrature_formula,
1139 *   face_update_flags);
1140 *  
1141 *   FEFaceValues<dim> fe_neighbor_face_values(fe,
1142 *   face_quadrature_formula,
1143 *   face_update_flags);
1144 *  
1145 *   FESubfaceValues<dim> fe_subface_values(fe, face_quadrature_formula,
1146 *   face_update_flags);
1147 *  
1148 * @endcode
1149 *
1150 * Here are the local (dense) matrix and right hand side vector for
1151 * the solid integrals as well as the integrals on the boundaries in the
1152 * local discontinuous Galerkin method. These terms will be built for
1153 * each local element in the mesh and then distributed to the global
1154 * system matrix and right hand side vector.
1155 *
1156 * @code
1157 *   FullMatrix<double> local_matrix(dofs_per_cell,dofs_per_cell);
1158 *   Vector<double> local_vector(dofs_per_cell);
1159 *  
1160 * @endcode
1161 *
1162 * The next four matrices are used to incorporate the flux integrals across
1163 * interior faces of the mesh:
1164 *
1165 * @code
1166 *   FullMatrix<double> vi_ui_matrix(dofs_per_cell, dofs_per_cell);
1167 *   FullMatrix<double> vi_ue_matrix(dofs_per_cell, dofs_per_cell);
1168 *   FullMatrix<double> ve_ui_matrix(dofs_per_cell, dofs_per_cell);
1169 *   FullMatrix<double> ve_ue_matrix(dofs_per_cell, dofs_per_cell);
1170 * @endcode
1171 *
1172 * As explained in the section on the LDG method we take our test
1173 * function to be v and multiply it on the left side of our differential
1174 * equation that is on u and peform integration by parts as explained in the
1175 * introduction. Using this notation for test and solution function,
1176 * the matrices below will then stand for:
1177 *
1178
1179 *
1180 * <code>vi_ui</code> - Taking the value of the test function from
1181 * interior of this cell's face and the solution function
1182 * from the interior of this cell.
1183 *
1184
1185 *
1186 * <code>vi_ue</code> - Taking the value of the test function from
1187 * interior of this cell's face and the solution function
1188 * from the exterior of this cell.
1189 *
1190
1191 *
1192 * <code>ve_ui</code> - Taking the value of the test function from
1193 * exterior of this cell's face and the solution function
1194 * from the interior of this cell.
1195 *
1196
1197 *
1198 * <code>ve_ue</code> - Taking the value of the test function from
1199 * exterior of this cell's face and the solution function
1200 * from the exterior of this cell.
1201 *
1202
1203 *
1204 * Now that we have gotten preliminary orders out of the way,
1205 * we loop over all the cells
1206 * and assemble the local system matrix and local right hand side vector
1208 *
1209 * @code
1211 *   cell = dof_handler.begin_active(),
1212 *   endc = dof_handler.end();
1213 *  
1214 *   for (; cell!=endc; ++cell)
1215 *   {
1216 * @endcode
1217 *
1218 * Now, since we are working in a distributed setting,
1219 * we can only work on cells and write to dofs in the
1220 * <code>system_matrix</code>
1221 * and <code>rhs_vector</code>
1222 * that corresponds to cells that are locally owned
1223 * by this processor. We note that while we can only write to locally
1224 * owned dofs, we will still use information from cells that are
1225 * locally relevant. This is very much the same as in @ref step_40 "step-40".
1226 *
1227 * @code
1228 *   if (cell->is_locally_owned())
1229 *   {
1230 * @endcode
1231 *
1232 * We now assemble the local contributions to the system matrix
1233 * that includes the solid integrals in the LDG method as well as
1234 * the right hand side vector. This involves resetting the local
1235 * matrix and vector to contain all zeros, reinitializing the
1236 * FEValues object for this cell and then building the
1237 * <code>local_matrix</code> and <code>local_rhs</code> vector.
1238 *
1239 * @code
1240 *   local_matrix = 0;
1241 *   local_vector = 0;
1242 *  
1243 *   fe_values.reinit(cell);
1244 *   assemble_cell_terms(fe_values,
1245 *   local_matrix,
1246 *   local_vector);
1247 *  
1248 * @endcode
1249 *
1250 * We remark that we need to get the local indices for the dofs to
1251 * to this cell before we begin to compute the contributions
1252 * from the numerical fluxes, i.e. the boundary conditions and
1253 * interior fluxes.
1254 *
1255 * @code
1256 *   cell->get_dof_indices(local_dof_indices);
1257 *  
1258 * @endcode
1259 *
1260 * Now is where we start to loop over all the faces of the cell
1261 * and construct the local contribtuions from the numerical fluxes.
1262 * The numerical fluxes will be due to 3 contributions: the
1263 * interior faces, the faces on the Neumann boundary and the faces
1264 * on the Dirichlet boundary. We instantiate a
1265 * <code>face_iterator</code> to loop
1266 * over all the faces of this cell and first see if the face is on
1267 * the boundary. Notice how we do not reinitiaize the
1268 * <code>fe_face_values</code>
1269 * object for the face until we know that we are actually on face
1270 * that lies on the boundary of the domain. The reason for doing this
1271 * is for computational efficiency; reinitializing the FEFaceValues
1272 * for each face is expensive and we do not want to do it unless we
1273 * are actually going use it to do computations. After this, we test
1274 * if the face
1275 * is on the a Dirichlet or a Neumann segment of the boundary and
1276 * call the appropriate subroutine to assemble the contributions for
1277 * that boundary. Note that this assembles the flux contribution
1278 * in the <code>local_matrix</code> as well as the boundary
1279 * condition that ends up
1280 * in the <code>local_vector</code>.
1281 *
1282 * @code
1283 *   for (unsigned int face_no=0;
1284 *   face_no< GeometryInfo<dim>::faces_per_cell;
1285 *   face_no++)
1286 *   {
1287 *   typename DoFHandler<dim>::face_iterator face =
1288 *   cell->face(face_no);
1289 *  
1290 *   if (face->at_boundary() )
1291 *   {
1292 *   fe_face_values.reinit(cell, face_no);
1293 *  
1294 *   if (face->boundary_id() == Dirichlet)
1295 *   {
1296 * @endcode
1297 *
1298 * Here notice that in order to assemble the
1299 * flux due to the penalty term for the the
1300 * Dirichlet boundary condition we need the
1301 * local cell diameter size and we can get
1302 * that value for this specific cell with
1303 * the following,
1304 *
1305 * @code
1306 *   double h = cell->diameter();
1307 *   assemble_Dirichlet_boundary_terms(fe_face_values,
1308 *   local_matrix,
1309 *   local_vector,
1310 *   h);
1311 *   }
1312 *   else if (face->boundary_id() == Neumann)
1313 *   {
1314 *   assemble_Neumann_boundary_terms(fe_face_values,
1315 *   local_matrix,
1316 *   local_vector);
1317 *   }
1318 *   else
1319 *   Assert(false, ExcNotImplemented() );
1320 *   }
1321 *   else
1322 *   {
1323 * @endcode
1324 *
1325 * At this point we know that the face we are on is an
1326 * interior face. We can begin to assemble the interior
1327 * flux matrices, but first we want to make sure that the
1328 * neighbor cell to this face is a valid cell. Once we know
1329 * that the neighbor is a valid cell then we also want to get
1330 * the meighbor cell that shares this cell's face.
1331 *
1332
1333 *
1334 *
1335 * @code
1336 *   Assert(cell->neighbor(face_no).state() ==
1337 *   IteratorState::valid,
1338 *   ExcInternalError());
1339 *  
1340 *   typename DoFHandler<dim>::cell_iterator neighbor =
1341 *   cell->neighbor(face_no);
1342 *  
1343 * @endcode
1344 *
1345 * Now that we have the two cells whose face we want to
1346 * compute the numerical flux across, we need to know
1347 * if the face has been refined, i.e. if it has children
1348 * faces. This occurs when one of the cells has a
1349 * different level of refinement than
1350 * the other cell. If this is the case, then this face
1351 * has a different level of refinement than the other faces
1352 * of the cell, i.e. on this face there is a hanging node.
1353 * Hanging nodes are not a problem in DG methods, the only
1354 * time we have to watch out for them is at this step
1355 * and as you will see the changes we have to our make
1356 * are minor.
1357 *
1358 * @code
1359 *   if (face->has_children())
1360 *   {
1361 * @endcode
1362 *
1363 * We now need to find the face of our neighbor cell
1364 * such that neighbor(neigh_face_no) = cell(face_no).
1365 *
1366 * @code
1367 *   const unsigned int neighbor_face_no =
1368 *   cell->neighbor_of_neighbor(face_no);
1369 *  
1370 * @endcode
1371 *
1372 * Once we do this we then have to loop over all the
1373 * subfaces (children faces) of our cell's face and
1374 * compute the interior fluxes across the children faces
1375 * and the neighbor's face.
1376 *
1377 * @code
1378 *   for (unsigned int subface_no=0;
1379 *   subface_no < face->n_children();
1380 *   ++subface_no)
1381 *   {
1382 * @endcode
1383 *
1384 * We then get the neighbor cell's subface that
1385 * matches our cell face's subface and the
1386 * specific subface number. We assert that the parent
1387 * face cannot be more than one level of
1388 * refinement above the child's face. This is
1389 * because the deal.ii library does not allow
1390 * neighboring cells to have refinement levels
1391 * that are more than one level in difference.
1392 *
1393 * @code
1394 *   typename DoFHandler<dim>::cell_iterator neighbor_child =
1395 *   cell->neighbor_child_on_subface(face_no,
1396 *   subface_no);
1397 *  
1398 *   Assert(!neighbor_child->has_children(),
1399 *   ExcInternalError());
1400 *  
1401 * @endcode
1402 *
1403 * Now that we are ready to build the local flux
1404 * matrices for
1405 * this face we reset them e zero and
1406 * reinitialize this <code>fe_values</code>
1407 * to this cell's subface and
1408 * <code>neighbor_child</code>'s
1410 * objects on the appropriate faces.
1411 *
1412 * @code
1413 *   vi_ui_matrix = 0;
1414 *   vi_ue_matrix = 0;
1415 *   ve_ui_matrix = 0;
1416 *   ve_ue_matrix = 0;
1417 *  
1418 *   fe_subface_values.reinit(cell, face_no, subface_no);
1419 *   fe_neighbor_face_values.reinit(neighbor_child,
1420 *   neighbor_face_no);
1421 *  
1422 * @endcode
1423 *
1424 * In addition, we get the minimum of diameters of
1425 * the two cells to include in the penalty term
1426 *
1427 * @code
1428 *   double h = std::min(cell->diameter(),
1429 *   neighbor_child->diameter());
1430 *  
1431 * @endcode
1432 *
1433 * We now finally assemble the interior fluxes for
1434 * the case of a face which has been refined using
1435 * exactly the same subroutine as we do when both
1436 * cells have the same refinement level.
1437 *
1438 * @code
1439 *   assemble_flux_terms(fe_subface_values,
1440 *   fe_neighbor_face_values,
1441 *   vi_ui_matrix,
1442 *   vi_ue_matrix,
1443 *   ve_ui_matrix,
1444 *   ve_ue_matrix,
1445 *   h);
1446 *  
1447 * @endcode
1448 *
1449 * Now all that is left to be done before distribuing
1450 * the local flux matrices to the global system
1451 * is get the neighbor child faces dof indices.
1452 *
1453 * @code
1454 *   neighbor_child->get_dof_indices(local_neighbor_dof_indices);
1455 *  
1456 * @endcode
1457 *
1458 * Once we have this cells dof indices and the
1459 * neighboring cell's dof indices we can use the
1460 * ConstraintMatrix to distribute the local flux
1461 * matrices to the global system matrix.
1462 * This is done through the class function
1463 * <code>distribute_local_flux_to_global()</code>.
1464 *
1465 * @code
1466 *   distribute_local_flux_to_global(
1467 *   vi_ui_matrix,
1468 *   vi_ue_matrix,
1469 *   ve_ui_matrix,
1470 *   ve_ue_matrix,
1471 *   local_dof_indices,
1472 *   local_neighbor_dof_indices);
1473 *   }
1474 *   }
1475 *   else
1476 *   {
1477 * @endcode
1478 *
1479 * At this point we know that this cell and the neighbor
1480 * of this cell are on the same refinement level and
1481 * the work to assemble the interior flux matrices
1482 * is very much the same as before. Infact it is
1483 * much simpler since we do not have to loop through the
1484 * subfaces. However, we have to check that we do
1485 * not compute the same contribution twice. This would
1486 * happen because we are looping over all the faces of
1487 * all the cells in the mesh
1488 * and assembling the interior flux matrices for each face.
1489 * To avoid doing assembling the interior flux matrices
1490 * twice we only compute the
1491 * interior fluxes once for each face by restricting that
1492 * the following computation only occur on the on
1493 * the cell face with the lower CellId.
1494 *
1495 * @code
1496 *   if (neighbor->level() == cell->level() &&
1497 *   cell->id() < neighbor->id())
1498 *   {
1499 * @endcode
1500 *
1501 * Here we find the neighbor face such that
1502 * neighbor(neigh_face_no) = cell(face_no).
1503 * In addition we, reinitialize the FEFaceValues
1504 * and neighbor cell's FEFaceValues on their
1505 * respective cells' faces, as well as get the
1506 * minimum diameter of this cell
1507 * and the neighbor cell and assign
1508 * it to <code>h</code>.
1509 *
1510 * @code
1511 *   const unsigned int neighbor_face_no =
1512 *   cell->neighbor_of_neighbor(face_no);
1513 *  
1514 *   vi_ui_matrix = 0;
1515 *   vi_ue_matrix = 0;
1516 *   ve_ui_matrix = 0;
1517 *   ve_ue_matrix = 0;
1518 *  
1519 *   fe_face_values.reinit(cell, face_no);
1520 *   fe_neighbor_face_values.reinit(neighbor,
1521 *   neighbor_face_no);
1522 *  
1523 *   double h = std::min(cell->diameter(),
1524 *   neighbor->diameter());
1525 *  
1526 * @endcode
1527 *
1528 * Just as before we assemble the interior fluxes
1529 * using the
1530 * <code>assemble_flux_terms</code> subroutine,
1531 * get the neighbor cell's
1532 * face dof indices and use the constraint matrix to
1533 * distribute the local flux matrices to the global
1534 * <code>system_matrix</code> using the class
1535 * function
1536 * <code>distribute_local_flux_to_global()</code>
1537 *
1538 * @code
1539 *   assemble_flux_terms(fe_face_values,
1540 *   fe_neighbor_face_values,
1541 *   vi_ui_matrix,
1542 *   vi_ue_matrix,
1543 *   ve_ui_matrix,
1544 *   ve_ue_matrix,
1545 *   h);
1546 *  
1547 *   neighbor->get_dof_indices(local_neighbor_dof_indices);
1548 *  
1549 *   distribute_local_flux_to_global(
1550 *   vi_ui_matrix,
1551 *   vi_ue_matrix,
1552 *   ve_ui_matrix,
1553 *   ve_ue_matrix,
1554 *   local_dof_indices,
1555 *   local_neighbor_dof_indices);
1556 *  
1557 *  
1558 *   }
1559 *   }
1560 *   }
1561 *   }
1562 *  
1563 *  
1564 * @endcode
1565 *
1566 * Now that have looped over all the faces for this
1567 * cell and computed as well as disributed the local
1568 * flux matrices to the <code>system_matrix</code>, we
1569 * can finally distribute the cell's <code>local_matrix</code>
1570 * and <code>local_vector</code> contribution to the
1571 * global system matrix and global right hand side vector.
1572 * We remark that we have to wait until this point
1573 * to distribute the <code>local_matrix</code>
1574 * and <code>system_rhs</code> to the global system.
1575 * The reason being that in looping over the faces
1576 * the faces on the boundary of the domain contribute
1577 * to the <code>local_matrix</code>
1578 * and <code>system_rhs</code>. We could distribute
1579 * the local contributions for each component seperately,
1580 * but writing to the distributed sparse matrix and vector
1581 * is expensive and want to to minimize the number of times
1582 * we do so.
1583 *
1584 * @code
1585 *   constraints.distribute_local_to_global(local_matrix,
1586 *   local_dof_indices,
1587 *   system_matrix);
1588 *  
1589 *   constraints.distribute_local_to_global(local_vector,
1590 *   local_dof_indices,
1591 *   system_rhs);
1592 *  
1593 *   }
1594 *   }
1595 *  
1596 * @endcode
1597 *
1598 * We need to synchronize assembly of our global system
1599 * matrix and global right hand side vector with all the other
1600 * processors and
1601 * use the compress() function to do this.
1602 * This was discussed in detail in @ref step_40 "step-40".
1603 *
1604 * @code
1605 *   system_matrix.compress(VectorOperation::add);
1606 *   system_rhs.compress(VectorOperation::add);
1607 *   }
1608 *  
1609 *  
1610 * @endcode
1611 *
1612 *
1613 * <a name="LDGPoisson.cc-assemble_cell_terms"></a>
1614 * <h4>assemble_cell_terms</h4>
1615 * This function deals with constructing the local matrix due to
1616 * the solid integrals over each element and is very similar to the
1617 * the other examples in the deal.ii tutorials.
1618 *
1619 * @code
1620 *   template<int dim>
1621 *   void
1622 *   LDGPoissonProblem<dim>::
1623 *   assemble_cell_terms(
1624 *   const FEValues<dim> &cell_fe,
1625 *   FullMatrix<double> &cell_matrix,
1626 *   Vector<double> &cell_vector)
1627 *   {
1628 *   const unsigned int dofs_per_cell = cell_fe.dofs_per_cell;
1629 *   const unsigned int n_q_points = cell_fe.n_quadrature_points;
1630 *  
1631 *   const FEValuesExtractors::Vector VectorField(0);
1632 *   const FEValuesExtractors::Scalar Potential(dim);
1633 *  
1634 *   std::vector<double> rhs_values(n_q_points);
1635 *  
1636 * @endcode
1637 *
1638 * We first get the value of the right hand side function
1639 * evaluated at the quadrature points in the cell.
1640 *
1641 * @code
1642 *   rhs_function.value_list(cell_fe.get_quadrature_points(),
1643 *   rhs_values);
1644 *  
1645 * @endcode
1646 *
1647 * Now, we loop over the quadrature points in the
1648 * cell and then loop over the degrees of freedom and perform
1649 * quadrature to approximate the integrals.
1650 *
1651 * @code
1652 *   for (unsigned int q=0; q<n_q_points; ++q)
1653 *   {
1654 *   for (unsigned int i=0; i<dofs_per_cell; ++i)
1655 *   {
1656 *   const Tensor<1, dim> psi_i_field = cell_fe[VectorField].value(i,q);
1657 *   const double div_psi_i_field = cell_fe[VectorField].divergence(i,q);
1658 *   const double psi_i_potential = cell_fe[Potential].value(i,q);
1659 *   const Tensor<1, dim> grad_psi_i_potential = cell_fe[Potential].gradient(i,q);
1660 *  
1661 *   for (unsigned int j=0; j<dofs_per_cell; ++j)
1662 *   {
1663 *   const Tensor<1, dim> psi_j_field = cell_fe[VectorField].value(j,q);
1664 *   const double psi_j_potential = cell_fe[Potential].value(j,q);
1665 *  
1666 * @endcode
1667 *
1668 * This computation corresponds to assembling the local system
1669 * matrix for the integral over an element,
1670 *
1671
1672 *
1673 * @f$\int_{\Omega_{e}} \left(\textbf{w} \cdot \textbf{q}
1674 * - \nabla \cdot \textbf{w} u
1675 * - \nabla w \cdot \textbf{q}
1676 * \right) dx @f$
1677 *
1678 * @code
1679 *   cell_matrix(i,j) += ( (psi_i_field * psi_j_field)
1680 *   -
1681 *   (div_psi_i_field * psi_j_potential)
1682 *   -
1683 *   (grad_psi_i_potential * psi_j_field)
1684 *   ) * cell_fe.JxW(q);
1685 *   }
1686 *  
1687 * @endcode
1688 *
1689 * And this local right hand vector corresponds to the integral
1690 * over the element cell,
1691 *
1692
1693 *
1694 * @f$ \int_{\Omega_{e}} w \, f(\textbf{x}) \, dx @f$
1695 *
1696 * @code
1697 *   cell_vector(i) += psi_i_potential *
1698 *   rhs_values[q] *
1699 *   cell_fe.JxW(q);
1700 *   }
1701 *   }
1702 *   }
1703 *  
1704 * @endcode
1705 *
1706 *
1707 * <a name="LDGPoisson.cc-assemble_Dirichlet_boundary_terms"></a>
1708 * <h4>assemble_Dirichlet_boundary_terms</h4>
1709 * Here we have the function that builds the <code>local_matrix</code>
1710 * contribution
1711 * and local right hand side vector, <code>local_vector</code>
1712 * for the Dirichlet boundary condtions.
1713 *
1714 * @code
1715 *   template<int dim>
1716 *   void
1717 *   LDGPoissonProblem<dim>::
1718 *   assemble_Dirichlet_boundary_terms(
1719 *   const FEFaceValues<dim> &face_fe,
1720 *   FullMatrix<double> &local_matrix,
1721 *   Vector<double> &local_vector,
1722 *   const double &h)
1723 *   {
1724 *   const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1725 *   const unsigned int n_q_points = face_fe.n_quadrature_points;
1726 *  
1727 *   const FEValuesExtractors::Vector VectorField(0);
1728 *   const FEValuesExtractors::Scalar Potential(dim);
1729 *  
1730 *   std::vector<double> Dirichlet_bc_values(n_q_points);
1731 *  
1732 * @endcode
1733 *
1734 * In order to evaluate the flux on the Dirichlet boundary face we
1735 * first get the value of the Dirichlet boundary function on the quadrature
1736 * points of the face. Then we loop over all the quadrature points and
1737 * degrees of freedom and approximate the integrals on the Dirichlet boundary
1738 * element faces.
1739 *
1740 * @code
1741 *   Dirichlet_bc_function.value_list(face_fe.get_quadrature_points(),
1742 *   Dirichlet_bc_values);
1743 *  
1744 *   for (unsigned int q=0; q<n_q_points; ++q)
1745 *   {
1746 *   for (unsigned int i=0; i<dofs_per_cell; ++i)
1747 *   {
1748 *   const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1749 *   const double psi_i_potential = face_fe[Potential].value(i,q);
1750 *  
1751 *   for (unsigned int j=0; j<dofs_per_cell; ++j)
1752 *   {
1753 *   const Tensor<1, dim> psi_j_field = face_fe[VectorField].value(j,q);
1754 *   const double psi_j_potential = face_fe[Potential].value(j,q);
1755 *  
1756 * @endcode
1757 *
1758 * We compute contribution for the flux @f$\widehat{q}@f$ on
1759 * the Dirichlet boundary which enters our system matrix as,
1760 *
1761
1762 *
1763 * @f$ \int_{\text{face}} w \, ( \textbf{n} \cdot \textbf{q}
1764 * + \sigma u) ds @f$
1765 *
1766 * @code
1767 *   local_matrix(i,j) += psi_i_potential * (
1768 *   face_fe.normal_vector(q) *
1769 *   psi_j_field
1770 *   +
1771 *   (penalty/h) *
1772 *   psi_j_potential) *
1773 *   face_fe.JxW(q);
1774 *  
1775 *   }
1776 *  
1777 * @endcode
1778 *
1779 * We also compute the contribution for the flux for @f$\widehat{u}@f$
1780 * on the Dirichlet boundary which is the Dirichlet boundary
1781 * condition function and enters the right hand side vector as
1782 *
1783
1784 *
1785 * @f$\int_{\text{face}} (-\textbf{w} \cdot \textbf{n}
1786 * + \sigma w) \, u_{D} ds @f$
1787 *
1788 * @code
1789 *   local_vector(i) += (-1.0 * psi_i_field *
1790 *   face_fe.normal_vector(q)
1791 *   +
1792 *   (penalty/h) *
1793 *   psi_i_potential) *
1794 *   Dirichlet_bc_values[q] *
1795 *   face_fe.JxW(q);
1796 *   }
1797 *   }
1798 *   }
1799 *  
1800 * @endcode
1801 *
1802 *
1803 * <a name="LDGPoisson.cc-assemble_Neumann_boundary_terms"></a>
1804 * <h4>assemble_Neumann_boundary_terms</h4>
1805 * Here we have the function that builds the <code>local_matrix</code>
1806 * and <code>local_vector</code> for the Neumann boundary condtions.
1807 *
1808 * @code
1809 *   template<int dim>
1810 *   void
1811 *   LDGPoissonProblem<dim>::
1812 *   assemble_Neumann_boundary_terms(
1813 *   const FEFaceValues<dim> &face_fe,
1814 *   FullMatrix<double> &local_matrix,
1815 *   Vector<double> &local_vector)
1816 *   {
1817 *   const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1818 *   const unsigned int n_q_points = face_fe.n_quadrature_points;
1819 *  
1820 *   const FEValuesExtractors::Vector VectorField(0);
1821 *   const FEValuesExtractors::Scalar Potential(dim);
1822 *  
1823 * @endcode
1824 *
1825 * In order to get evaluate the flux on the Neumann boundary face we
1826 * first get the value of the Neumann boundary function on the quadrature
1827 * points of the face. Then we loop over all the quadrature points and
1828 * degrees of freedom and approximate the integrals on the Neumann boundary
1829 * element faces.
1830 *
1831 * @code
1832 *   std::vector<double > Neumann_bc_values(n_q_points);
1833 *  
1834 *   for (unsigned int q=0; q<n_q_points; ++q)
1835 *   {
1836 *   for (unsigned int i=0; i<dofs_per_cell; ++i)
1837 *   {
1838 *   const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1839 *   const double psi_i_potential = face_fe[Potential].value(i,q);
1840 *  
1841 *   for (unsigned int j=0; j<dofs_per_cell; ++j)
1842 *   {
1843 *  
1844 *   const double psi_j_potential = face_fe[Potential].value(j,q);
1845 *  
1846 * @endcode
1847 *
1848 * We compute contribution for the flux @f$\widehat{u}@f$ on the
1849 * Neumann boundary which enters our system matrix as,
1850 *
1851
1852 *
1853 * @f$\int_{\text{face}} \textbf{w} \cdot \textbf{n} \, u \, ds @f$
1854 *
1855 * @code
1856 *   local_matrix(i,j) += psi_i_field *
1857 *   face_fe.normal_vector(q) *
1858 *   psi_j_potential *
1859 *   face_fe.JxW(q);
1860 *  
1861 *   }
1862 *  
1863 * @endcode
1864 *
1865 * We also compute the contribution for the flux for
1866 * @f$\widehat{q}@f$ on the Neumann bounary which is the
1867 * Neumann boundary condition and enters the right
1868 * hand side vector as
1869 *
1870
1871 *
1872 * @f$\int_{\text{face}} -w \, g_{N} \, ds@f$
1873 *
1874 * @code
1875 *   local_vector(i) += -psi_i_potential *
1876 *   Neumann_bc_values[q] *
1877 *   face_fe.JxW(q);
1878 *   }
1879 *   }
1880 *   }
1881 *  
1882 * @endcode
1883 *
1884 *
1885 * <a name="LDGPoisson.cc-assemble_flux_terms"></a>
1886 * <h4>assemble_flux_terms</h4>
1887 * Now we finally get to the function which builds the interior fluxes.
1888 * This is a rather long function
1889 * and we will describe what is going on in detail.
1890 *
1891 * @code
1892 *   template<int dim>
1893 *   void
1894 *   LDGPoissonProblem<dim>::
1895 *   assemble_flux_terms(
1896 *   const FEFaceValuesBase<dim> &fe_face_values,
1897 *   const FEFaceValuesBase<dim> &fe_neighbor_face_values,
1898 *   FullMatrix<double> &vi_ui_matrix,
1899 *   FullMatrix<double> &vi_ue_matrix,
1900 *   FullMatrix<double> &ve_ui_matrix,
1901 *   FullMatrix<double> &ve_ue_matrix,
1902 *   const double &h)
1903 *   {
1904 *   const unsigned int n_face_points = fe_face_values.n_quadrature_points;
1905 *   const unsigned int dofs_this_cell = fe_face_values.dofs_per_cell;
1906 *   const unsigned int dofs_neighbor_cell = fe_neighbor_face_values.dofs_per_cell;
1907 *  
1908 *   const FEValuesExtractors::Vector VectorField(0);
1909 *   const FEValuesExtractors::Scalar Potential(dim);
1910 *  
1911 * @endcode
1912 *
1913 * The first thing we do is after the boilerplate is define
1914 * the unit vector @f$\boldsymbol \beta@f$ that is used in defining
1915 * the LDG/ALternating fluxes.
1916 *
1917 * @code
1918 *   Point<dim> beta;
1919 *   for (int i=0; i<dim; ++i)
1920 *   beta(i) = 1.0;
1921 *   beta /= sqrt(beta.square() );
1922 *  
1923 * @endcode
1924 *
1925 * Now we loop over all the quadrature points on the element face
1926 * and loop over all the degrees of freedom and approximate
1927 * the following flux integrals.
1928 *
1929 * @code
1930 *   for (unsigned int q=0; q<n_face_points; ++q)
1931 *   {
1932 *   for (unsigned int i=0; i<dofs_this_cell; ++i)
1933 *   {
1934 *   const Tensor<1,dim> psi_i_field_minus =
1935 *   fe_face_values[VectorField].value(i,q);
1936 *   const double psi_i_potential_minus =
1937 *   fe_face_values[Potential].value(i,q);
1938 *  
1939 *   for (unsigned int j=0; j<dofs_this_cell; ++j)
1940 *   {
1941 *   const Tensor<1,dim> psi_j_field_minus =
1942 *   fe_face_values[VectorField].value(j,q);
1943 *   const double psi_j_potential_minus =
1944 *   fe_face_values[Potential].value(j,q);
1945 *  
1946 * @endcode
1947 *
1948 * We compute the flux matrix where the test function's
1949 * as well as the solution function's values are taken from
1950 * the interior as,
1951 *
1952
1953 *
1954 * @f$\int_{\text{face}}
1955 * \left( \frac{1}{2} \, \textbf{n}^{-}
1956 * \cdot ( \textbf{w}^{-} u^{-}
1957 * + w^{-} \textbf{q}^{-})
1958 * + \boldsymbol \beta \cdot \textbf{w}^{-} u^{-}
1959 * - w^{-} \boldsymbol \beta \cdot \textbf{q}^{-}
1960 * + \sigma w^{-} \, u^{-} \right) ds@f$
1961 *
1962 * @code
1963 *   vi_ui_matrix(i,j) += (0.5 * (
1964 *   psi_i_field_minus *
1965 *   fe_face_values.normal_vector(q) *
1966 *   psi_j_potential_minus
1967 *   +
1968 *   psi_i_potential_minus *
1969 *   fe_face_values.normal_vector(q) *
1970 *   psi_j_field_minus )
1971 *   +
1972 *   beta *
1973 *   psi_i_field_minus *
1974 *   psi_j_potential_minus
1975 *   -
1976 *   beta *
1977 *   psi_i_potential_minus *
1978 *   psi_j_field_minus
1979 *   +
1980 *   (penalty/h) *
1981 *   psi_i_potential_minus *
1982 *   psi_j_potential_minus
1983 *   ) *
1984 *   fe_face_values.JxW(q);
1985 *   }
1986 *  
1987 *   for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
1988 *   {
1989 *   const Tensor<1,dim> psi_j_field_plus =
1990 *   fe_neighbor_face_values[VectorField].value(j,q);
1991 *   const double psi_j_potential_plus =
1992 *   fe_neighbor_face_values[Potential].value(j,q);
1993 *  
1994 * @endcode
1995 *
1996 * We compute the flux matrix where the test function is
1997 * from the interior of this elements face and solution
1998 * function is taken from the exterior. This corresponds
1999 * to the computation,
2000 *
2001
2002 *
2003 * @f$\int_{\text{face}}
2004 * \left( \frac{1}{2} \, \textbf{n}^{-} \cdot
2005 * ( \textbf{w}^{-} u^{+}
2006 * + w^{-} \textbf{q}^{+})
2007 * - \boldsymbol \beta \cdot \textbf{w}^{-} u^{+}
2008 * + w^{-} \boldsymbol \beta \cdot \textbf{q}^{+}
2009 * - \sigma w^{-} \, u^{+} \right) ds @f$
2010 *
2011 * @code
2012 *   vi_ue_matrix(i,j) += ( 0.5 * (
2013 *   psi_i_field_minus *
2014 *   fe_face_values.normal_vector(q) *
2015 *   psi_j_potential_plus
2016 *   +
2017 *   psi_i_potential_minus *
2018 *   fe_face_values.normal_vector(q) *
2019 *   psi_j_field_plus )
2020 *   -
2021 *   beta *
2022 *   psi_i_field_minus *
2023 *   psi_j_potential_plus
2024 *   +
2025 *   beta *
2026 *   psi_i_potential_minus *
2027 *   psi_j_field_plus
2028 *   -
2029 *   (penalty/h) *
2030 *   psi_i_potential_minus *
2031 *   psi_j_potential_plus
2032 *   ) *
2033 *   fe_face_values.JxW(q);
2034 *   }
2035 *   }
2036 *  
2037 *   for (unsigned int i=0; i<dofs_neighbor_cell; ++i)
2038 *   {
2039 *   const Tensor<1,dim> psi_i_field_plus =
2040 *   fe_neighbor_face_values[VectorField].value(i,q);
2041 *   const double psi_i_potential_plus =
2042 *   fe_neighbor_face_values[Potential].value(i,q);
2043 *  
2044 *   for (unsigned int j=0; j<dofs_this_cell; ++j)
2045 *   {
2046 *   const Tensor<1,dim> psi_j_field_minus =
2047 *   fe_face_values[VectorField].value(j,q);
2048 *   const double psi_j_potential_minus =
2049 *   fe_face_values[Potential].value(j,q);
2050 *  
2051 *  
2052 * @endcode
2053 *
2054 * We compute the flux matrix where the test function is
2055 * from the exterior of this elements face and solution
2056 * function is taken from the interior. This corresponds
2057 * to the computation,
2058 *
2059
2060 *
2061 * @f$ \int_{\text{face}}
2062 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2063 * (\textbf{w}^{+} u^{-}
2064 * + w^{+} \textbf{q}^{-} )
2065 * - \boldsymbol \beta \cdot \textbf{w}^{+} u^{-}
2066 * + w^{+} \boldsymbol \beta \cdot \textbf{q}^{-}
2067 * - \sigma w^{+} u^{-} \right) ds @f$
2068 *
2069 * @code
2070 *   ve_ui_matrix(i,j) += (-0.5 * (
2071 *   psi_i_field_plus *
2072 *   fe_face_values.normal_vector(q) *
2073 *   psi_j_potential_minus
2074 *   +
2075 *   psi_i_potential_plus *
2076 *   fe_face_values.normal_vector(q) *
2077 *   psi_j_field_minus)
2078 *   -
2079 *   beta *
2080 *   psi_i_field_plus *
2081 *   psi_j_potential_minus
2082 *   +
2083 *   beta *
2084 *   psi_i_potential_plus *
2085 *   psi_j_field_minus
2086 *   -
2087 *   (penalty/h) *
2088 *   psi_i_potential_plus *
2089 *   psi_j_potential_minus
2090 *   ) *
2091 *   fe_face_values.JxW(q);
2092 *   }
2093 *  
2094 *   for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
2095 *   {
2096 *   const Tensor<1,dim> psi_j_field_plus =
2097 *   fe_neighbor_face_values[VectorField].value(j,q);
2098 *   const double psi_j_potential_plus =
2099 *   fe_neighbor_face_values[Potential].value(j,q);
2100 *  
2101 * @endcode
2102 *
2103 * And lastly we compute the flux matrix where the test
2104 * function and solution function are taken from the exterior
2105 * cell to this face. This corresponds to the computation,
2106 *
2107
2108 *
2109 * @f$\int_{\text{face}}
2110 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2111 * ( \textbf{w}^{+} u^{+}
2112 * + w^{+} \textbf{q}^{+} )
2113 * + \boldsymbol \beta \cdot \textbf{w}^{+} u^{+}
2114 * - w^{+} \boldsymbol \beta \cdot \textbf{q}^{+}
2115 * + \sigma w^{+} u^{+} \right) ds @f$
2116 *
2117 * @code
2118 *   ve_ue_matrix(i,j) += (-0.5 * (
2119 *   psi_i_field_plus *
2120 *   fe_face_values.normal_vector(q) *
2121 *   psi_j_potential_plus
2122 *   +
2123 *   psi_i_potential_plus *
2124 *   fe_face_values.normal_vector(q) *
2125 *   psi_j_field_plus )
2126 *   +
2127 *   beta *
2128 *   psi_i_field_plus *
2129 *   psi_j_potential_plus
2130 *   -
2131 *   beta *
2132 *   psi_i_potential_plus *
2133 *   psi_j_field_plus
2134 *   +
2135 *   (penalty/h) *
2136 *   psi_i_potential_plus *
2137 *   psi_j_potential_plus
2138 *   ) *
2139 *   fe_face_values.JxW(q);
2140 *   }
2141 *  
2142 *   }
2143 *   }
2144 *   }
2145 *  
2146 * @endcode
2147 *
2148 *
2149 * <a name="LDGPoisson.cc-distribute_local_flux_to_global"></a>
2150 * <h4>distribute_local_flux_to_global</h4>
2151 * In this function we use the ConstraintMatrix to distribute
2152 * the local flux matrices to the global system matrix.
2153 * Since I have to do this twice in assembling the
2154 * system matrix, I made function to do it rather than have
2155 * repeated code.
2156 * We remark that the reader take special note of
2157 * the which matrices we are distributing and the order
2158 * in which we pass the dof indices vectors. In distributing
2159 * the first matrix, i.e. <code>vi_ui_matrix</code>, we are
2160 * taking the test function and solution function values from
2161 * the interior of this cell and therefore only need the
2162 * <code>local_dof_indices</code> since it contains the dof
2163 * indices to this cell. When we distribute the second matrix,
2164 * <code>vi_ue_matrix</code>, the test function is taken
2165 * form the inteior of
2166 * this cell while the solution function is taken from the
2167 * exterior, i.e. the neighbor cell. Notice that the order
2168 * degrees of freedom index vectors matrch this pattern: first
2169 * the <code>local_dof_indices</code> which is local to
2170 * this cell and then
2171 * the <code>local_neighbor_dof_indices</code> which is
2172 * local to the neighbor's
2173 * cell. The order in which we pass the dof indices for the
2174 * matrices is paramount to constructing our global system
2175 * matrix properly. The ordering of the last two matrices
2176 * follow the same logic as the first two we discussed.
2177 *
2178 * @code
2179 *   template<int dim>
2180 *   void
2181 *   LDGPoissonProblem<dim>::
2182 *   distribute_local_flux_to_global(
2183 *   const FullMatrix<double> &vi_ui_matrix,
2184 *   const FullMatrix<double> &vi_ue_matrix,
2185 *   const FullMatrix<double> &ve_ui_matrix,
2186 *   const FullMatrix<double> &ve_ue_matrix,
2187 *   const std::vector<types::global_dof_index> &local_dof_indices,
2188 *   const std::vector<types::global_dof_index> &local_neighbor_dof_indices)
2189 *   {
2190 *   constraints.distribute_local_to_global(vi_ui_matrix,
2191 *   local_dof_indices,
2192 *   system_matrix);
2193 *  
2194 *   constraints.distribute_local_to_global(vi_ue_matrix,
2195 *   local_dof_indices,
2196 *   local_neighbor_dof_indices,
2197 *   system_matrix);
2198 *  
2199 *   constraints.distribute_local_to_global(ve_ui_matrix,
2200 *   local_neighbor_dof_indices,
2201 *   local_dof_indices,
2202 *   system_matrix);
2203 *  
2204 *   constraints.distribute_local_to_global(ve_ue_matrix,
2205 *   local_neighbor_dof_indices,
2206 *   system_matrix);
2207 *   }
2208 *  
2209 *  
2210 *  
2211 * @endcode
2212 *
2213 *
2214 * <a name="LDGPoisson.cc-solve"></a>
2215 * <h4>solve</h4>
2216 * As mentioned earlier I used a direct solver to solve
2217 * the linear system of equations resulting from the LDG
2218 * method applied to the Poisson equation. One could also
2219 * use a iterative sovler, however, we then need to use
2220 * a preconditoner and that was something I did not wanted
2221 * to get into. For information on preconditioners
2222 * for the LDG Method see this
2223 * <a href="http://epubs.siam.org/doi/abs/10.1137/S1064827502410657">
2224 * paper</a>. The uses of a direct sovler here is
2225 * somewhat of a limitation. The built-in distributed
2226 * direct solver in Trilinos reduces everything to one
2227 * processor, solves the system and then distributes
2228 * everything back out to the other processors. However,
2229 * by linking to more advanced direct sovlers through
2230 * Trilinos one can accomplish fully distributed computations
2231 * and not much about the following function calls will
2232 * change.
2233 *
2234 * @code
2235 *   template<int dim>
2236 *   void
2237 *   LDGPoissonProblem<dim>::
2238 *   solve()
2239 *   {
2240 *   TimerOutput::Scope t(computing_timer, "solve");
2241 *  
2242 * @endcode
2243 *
2244 * As in @ref step_40 "step-40" in order to perform a linear solve
2245 * we need solution vector where there is no overlap across
2246 * the processors and we create this by instantiating
2247 * <code>completely_distributed_solution</code> solution
2248 * vector using
2249 * the copy constructor on the global system right hand
2250 * side vector which itself is completely distributed vector.
2251 *
2252 * @code
2254 *   completely_distributed_solution(system_rhs);
2255 *  
2256 * @endcode
2257 *
2258 * Now we can preform the solve on the completeley distributed
2259 * right hand side vector, system matrix and the completely
2260 * distributed solution.
2261 *
2262 * @code
2263 *   solver.solve(system_matrix,
2264 *   completely_distributed_solution,
2265 *   system_rhs);
2266 *  
2267 * @endcode
2268 *
2269 * We now distribute the constraints of our system onto the
2270 * completely solution vector, but in our case with the LDG
2271 * method there are none.
2272 *
2273 * @code
2274 *   constraints.distribute(completely_distributed_solution);
2275 *  
2276 * @endcode
2277 *
2278 * Lastly we copy the completely distributed solution vector,
2279 * <code>completely_distributed_solution</code>,
2280 * to solution vector which has some overlap between
2281 * processors, <code>locally_relevant_solution</code>.
2282 * We need the overlapped portions of our solution
2283 * in order to be able to do computations using the solution
2284 * later in the code or in post processing.
2285 *
2286 * @code
2287 *   locally_relevant_solution = completely_distributed_solution;
2288 *   }
2289 *  
2290 * @endcode
2291 *
2292 *
2293 * <a name="LDGPoisson.cc-output_results"></a>
2294 * <h4>output_results</h4>
2295 * This function deals with the writing of the reuslts in parallel
2296 * to disk. It is almost exactly the same as
2297 * in @ref step_40 "step-40" and we wont go into it. It is noteworthy
2298 * that in @ref step_40 "step-40" the output is only the scalar solution,
2299 * while in our situation, we are outputing both the scalar
2300 * solution as well as the vector field solution. The only
2301 * difference between this function and the one in @ref step_40 "step-40"
2302 * is in the <code>solution_names</code> vector where we have to add
2303 * the gradient dimensions. Everything else is taken care
2304 * of by the deal.ii library!
2305 *
2306 * @code
2307 *   template<int dim>
2308 *   void
2309 *   LDGPoissonProblem<dim>::
2310 *   output_results() const
2311 *   {
2312 *   std::vector<std::string> solution_names;
2313 *   switch (dim)
2314 *   {
2315 *   case 1:
2316 *   solution_names.push_back("u");
2317 *   solution_names.push_back("du/dx");
2318 *   break;
2319 *  
2320 *   case 2:
2321 *   solution_names.push_back("grad(u)_x");
2322 *   solution_names.push_back("grad(u)_y");
2323 *   solution_names.push_back("u");
2324 *   break;
2325 *  
2326 *   case 3:
2327 *   solution_names.push_back("grad(u)_x");
2328 *   solution_names.push_back("grad(u)_y");
2329 *   solution_names.push_back("grad(u)_z");
2330 *   solution_names.push_back("u");
2331 *   break;
2332 *  
2333 *   default:
2334 *   Assert(false, ExcNotImplemented() );
2335 *   }
2336 *  
2337 *   DataOut<dim> data_out;
2338 *   data_out.attach_dof_handler(dof_handler);
2339 *   data_out.add_data_vector(locally_relevant_solution,
2340 *   solution_names);
2341 *  
2342 *   Vector<float> subdomain(triangulation.n_active_cells());
2343 *  
2344 *   for (unsigned int i=0; i<subdomain.size(); ++i)
2345 *   subdomain(i) = triangulation.locally_owned_subdomain();
2346 *  
2347 *   data_out.add_data_vector(subdomain,"subdomain");
2348 *  
2349 *   data_out.build_patches();
2350 *  
2351 *   const std::string filename = ("solution." +
2353 *   triangulation.locally_owned_subdomain(),4));
2354 *  
2355 *   std::ofstream output((filename + ".vtu").c_str());
2356 *   data_out.write_vtu(output);
2357 *  
2358 *   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0 )
2359 *   {
2360 *   std::vector<std::string> filenames;
2361 *   for (unsigned int i=0;
2362 *   i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
2363 *   i++)
2364 *   {
2365 *   filenames.push_back("solution." +
2366 *   Utilities::int_to_string(i,4) +
2367 *   ".vtu");
2368 *   }
2369 *   std::ofstream master_output("solution.pvtu");
2370 *   data_out.write_pvtu_record(master_output, filenames);
2371 *   }
2372 *   }
2373 *  
2374 *  
2375 * @endcode
2376 *
2377 *
2378 * <a name="LDGPoisson.cc-run"></a>
2379 * <h4>run</h4>
2380 * The only public function of this class is pretty much exactly
2381 * the same as all the other deal.ii examples except I setting
2382 * the constant in the DG penalty (@f$\tilde{\sigma}@f$) to be 1.
2383 *
2384 * @code
2385 *   template<int dim>
2386 *   void
2387 *   LDGPoissonProblem<dim>::
2388 *   run()
2389 *   {
2390 *   penalty = 1.0;
2391 *   make_grid();
2392 *   make_dofs();
2393 *   assemble_system();
2394 *   solve();
2395 *   output_results();
2396 *   }
2397 *  
2398 *  
2399 * @endcode
2400 *
2401 *
2402 * <a name="LDGPoisson.cc-main"></a>
2403 * <h3>main</h3>
2404 * Here it the main class of our program, since it is nearly exactly
2405 * the same as @ref step_40 "step-40" and many of the other examples I won't
2406 * elaborate on it.
2407 *
2408 * @code
2409 *   int main(int argc, char *argv[])
2410 *   {
2411 *  
2412 *   try
2413 *   {
2414 *   using namespace dealii;
2415 *  
2416 *   deallog.depth_console(0);
2417 *  
2418 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
2419 *   numbers::invalid_unsigned_int);
2420 *  
2421 *   unsigned int degree = 1;
2422 *   unsigned int n_refine = 6;
2423 *   LDGPoissonProblem<2> Poisson(degree, n_refine);
2424 *   Poisson.run();
2425 *  
2426 *   }
2427 *   catch (std::exception &exc)
2428 *   {
2429 *   std::cerr << std::endl << std::endl
2430 *   << "----------------------------------------------------"
2431 *   << std::endl;
2432 *   std::cerr << "Exception on processing: " << std::endl
2433 *   << exc.what() << std::endl
2434 *   << "Aborting!" << std::endl
2435 *   << "----------------------------------------------------"
2436 *   << std::endl;
2437 *  
2438 *   return 1;
2439 *   }
2440 *   catch (...)
2441 *   {
2442 *   std::cerr << std::endl << std::endl
2443 *   << "----------------------------------------------------"
2444 *   << std::endl;
2445 *   std::cerr << "Unknown exception!" << std::endl
2446 *   << "Aborting!" << std::endl
2447 *   << "----------------------------------------------------"
2448 *   << std::endl;
2449 *   return 1;
2450 *   }
2451 *   return 0;
2452 *   }
2453 * @endcode
2454
2455
2456*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell, const unsigned int face_no, const unsigned int subface_no)
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell)
@ wall_times
Definition timer.h:651
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
typename ActiveSelector::cell_iterator cell_iterator
typename ActiveSelector::face_iterator face_iterator
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:989
double diameter(const Triangulation< dim, spacedim > &tria)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
const InputIterator OutputIterator out
Definition parallel.h:167
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
const Iterator & begin
Definition parallel.h:609
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int boundary_id
Definition types.h:144
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation