deal.II version GIT relicensing-2173-gae8fc9d14b 2024-11-24 06:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
Distributed_LDG_Method.h
Go to the documentation of this file.
1
451 *  
452 * @endcode
453 *
454 *
455 * <a name="Functions.cc-Functionscc"></a>
456 * <h3>Functions.cc</h3>
457 * In this file we keep right hand side function, Dirichlet boundary
458 * conditions and solution to our Poisson equation problem. Since
459 * these classes and functions have been discussed extensively in
460 * the deal.ii tutorials we won't discuss them any further.
461 *
462 * @code
463 *   #include <deal.II/base/function.h>
464 *   #include <deal.II/base/tensor_function.h>
465 *   #include <deal.II/lac/vector.h>
466 *   #include <cmath>
467 *  
468 *   using namespace dealii;
469 *  
470 *   template <int dim>
471 *   class RightHandSide : public Function<dim>
472 *   {
473 *   public:
474 *   RightHandSide() : Function<dim>(1)
475 *   {}
476 *  
477 *   virtual double value(const Point<dim> &p,
478 *   const unsigned int component = 0 ) const override;
479 *   };
480 *  
481 *   template <int dim>
482 *   class DirichletBoundaryValues : public Function<dim>
483 *   {
484 *   public:
485 *   DirichletBoundaryValues() : Function<dim>(1)
486 *   {}
487 *  
488 *   virtual double value(const Point<dim> &p,
489 *   const unsigned int component = 0 ) const override;
490 *   };
491 *  
492 *   template<int dim>
493 *   class TrueSolution : public Function<dim>
494 *   {
495 *   public:
496 *   TrueSolution() : Function<dim>(dim+1)
497 *   {}
498 *  
499 *   virtual void vector_value(const Point<dim> &p,
500 *   Vector<double> &values) const override;
501 *   };
502 *  
503 *   template <int dim>
504 *   double
505 *   RightHandSide<dim>::
506 *   value(const Point<dim> &p,
507 *   const unsigned int ) const
508 *   {
509 *   const double x = p[0];
510 *   const double y = p[1];
511 *   return 4*M_PI*M_PI*(cos(2*M_PI*y) - sin(2*M_PI*x));
512 *  
513 *   }
514 *  
515 *   template <int dim>
516 *   double
517 *   DirichletBoundaryValues<dim>::
518 *   value(const Point<dim> &p,
519 *   const unsigned int ) const
520 *   {
521 *   const double x = p[0];
522 *   const double y = p[1];
523 *   return cos(2*M_PI*y) -sin(2*M_PI*x) - x;
524 *   }
525 *  
526 *  
527 *   template <int dim>
528 *   void
529 *   TrueSolution<dim>::
530 *   vector_value(const Point<dim> &p,
531 *   Vector<double> &values) const
532 *   {
533 *   Assert(values.size() == dim+1,
534 *   ExcDimensionMismatch(values.size(), dim+1) );
535 *  
536 *   double x = p[0];
537 *   double y = p[1];
538 *  
539 *   values(0) = 1 + 2*M_PI*cos(2*M_PI*x);
540 *   values(1) = 2*M_PI*sin(2*M_PI*y);
541 *  
542 *   values(2) = cos(2*M_PI*y) - sin(2*M_PI*x) - x;
543 *   }
544 * @endcode
545
546
547<a name="ann-LDGPoisson.cc"></a>
548<h1>Annotated version of LDGPoisson.cc</h1>
549 *
550 *
551 *
552 *
553 * @code
554 *   /* -----------------------------------------------------------------------------
555 *   *
556 *   * SPDX-License-Identifier: LGPL-2.1-or-later
557 *   * Copyright (C) 2017 by Michael Harmon
558 *   *
559 *   * This file is part of the deal.II code gallery.
560 *   *
561 *   * -----------------------------------------------------------------------------
562 *   */
563 *  
564 * @endcode
565 *
566 *
567 * <a name="LDGPoisson.cc-LDGPoissoncc"></a>
568 * <h3>LDGPoisson.cc</h3>
569 * The code begins as per usual with a long list of the the included
570 * files from the deal.ii library.
571 *
572 * @code
573 *   #include <deal.II/base/quadrature_lib.h>
574 *   #include <deal.II/base/logstream.h>
575 *   #include <deal.II/base/function.h>
576 *   #include <deal.II/base/timer.h>
577 *  
578 *   #include <deal.II/lac/full_matrix.h>
579 *   #include <deal.II/lac/sparse_matrix.h>
580 *  
581 *  
582 *   #include <deal.II/grid/tria.h>
583 *   #include <deal.II/grid/grid_generator.h>
584 *   #include <deal.II/grid/grid_tools.h>
585 *   #include <deal.II/grid/tria_accessor.h>
586 *   #include <deal.II/grid/tria_iterator.h>
587 *  
588 *   #include <deal.II/dofs/dof_handler.h>
589 *   #include <deal.II/dofs/dof_renumbering.h>
590 *   #include <deal.II/dofs/dof_accessor.h>
591 *   #include <deal.II/dofs/dof_tools.h>
592 *  
593 *   #include <fstream>
594 *   #include <iostream>
595 *  
596 * @endcode
597 *
598 * Here's where the classes for the DG methods begin.
599 * We can use either the Lagrange polynomials,
600 *
601 * @code
602 *   #include <deal.II/fe/fe_dgq.h>
603 * @endcode
604 *
605 * or the Legendre polynomials
606 *
607 * @code
608 *   #include <deal.II/fe/fe_dgp.h>
609 * @endcode
610 *
611 * as basis functions. I'll be using the Lagrange polynomials.
612 *
613 * @code
614 *   #include <deal.II/fe/fe_system.h>
615 *   #include <deal.II/fe/fe_values.h>
616 *  
617 *   #include <deal.II/numerics/vector_tools.h>
618 *   #include <deal.II/numerics/matrix_tools.h>
619 *   #include <deal.II/numerics/data_out.h>
620 *  
621 *  
622 * @endcode
623 *
624 * Now we have to load in the deal.ii files that will allow us to use
625 * a distributed computing framework.
626 *
627 * @code
628 *   #include <deal.II/base/utilities.h>
629 *   #include <deal.II/base/index_set.h>
630 *   #include <deal.II/base/conditional_ostream.h>
631 *   #include <deal.II/lac/sparsity_tools.h>
632 *   #include <deal.II/distributed/tria.h>
633 *  
634 *  
635 * @endcode
636 *
637 * Additionally we load the files that will allow us to interact with
638 * the Trilinos library.
639 *
640 * @code
641 *   #include <deal.II/lac/trilinos_sparse_matrix.h>
642 *   #include <deal.II/lac/trilinos_vector.h>
643 *   #include <deal.II/lac/trilinos_solver.h>
644 *  
645 *  
646 * @endcode
647 *
648 * The functions class contains all the definitions of the functions we
649 * will use, i.e. the right hand side function, the boundary conditions
650 * and the test functions.
651 *
652 * @code
653 *   #include "Functions.cc"
654 *  
655 *   using namespace dealii;
656 *  
657 *  
658 * @endcode
659 *
660 * Here is the main class for the Local Discontinuous Galerkin method
661 * applied to Poisson's equation, we won't explain much of the
662 * the class and method declarations, but dive deeper into describing the
663 * functions when they are defined. The only thing I will mention
664 * about the class declaration is that this is where I labeled
665 * the different types of boundaries using enums.
666 *
667 * @code
668 *   template <int dim>
669 *   class LDGPoissonProblem
670 *   {
671 *  
672 *   public:
673 *   LDGPoissonProblem(const unsigned int degree,
674 *   const unsigned int n_refine);
675 *  
676 *   ~LDGPoissonProblem();
677 *  
678 *   void run();
679 *  
680 *  
681 *   private:
682 *   void make_grid();
683 *  
684 *   void make_dofs();
685 *  
686 *   void assemble_system();
687 *  
688 *   void assemble_cell_terms(const FEValues<dim> &cell_fe,
689 *   FullMatrix<double> &cell_matrix,
690 *   Vector<double> &cell_vector);
691 *  
692 *   void assemble_Neumann_boundary_terms(const FEFaceValues<dim> &face_fe,
693 *   FullMatrix<double> &local_matrix,
694 *   Vector<double> &local_vector);
695 *  
696 *   void assemble_Dirichlet_boundary_terms(const FEFaceValues<dim> &face_fe,
697 *   FullMatrix<double> &local_matrix,
698 *   Vector<double> &local_vector,
699 *   const double &h);
700 *  
701 *   void assemble_flux_terms(const FEFaceValuesBase<dim> &fe_face_values,
702 *   const FEFaceValuesBase<dim> &fe_neighbor_face_values,
703 *   FullMatrix<double> &vi_ui_matrix,
704 *   FullMatrix<double> &vi_ue_matrix,
705 *   FullMatrix<double> &ve_ui_matrix,
706 *   FullMatrix<double> &ve_ue_matrix,
707 *   const double &h);
708 *  
709 *   void distribute_local_flux_to_global(
710 *   const FullMatrix<double> &vi_ui_matrix,
711 *   const FullMatrix<double> &vi_ue_matrix,
712 *   const FullMatrix<double> &ve_ui_matrix,
713 *   const FullMatrix<double> &ve_ue_matrix,
714 *   const std::vector<types::global_dof_index> &local_dof_indices,
715 *   const std::vector<types::global_dof_index> &local_neighbor_dof_indices);
716 *  
717 *   void solve();
718 *  
719 *   void output_results() const;
720 *  
721 *   const unsigned int degree;
722 *   const unsigned int n_refine;
723 *   double penalty;
724 *   double h_max;
725 *   double h_min;
726 *  
727 *   enum
728 *   {
729 *   Dirichlet,
730 *   Neumann
731 *   };
732 *  
733 *   parallel::distributed::Triangulation<dim> triangulation;
734 *   FESystem<dim> fe;
735 *   DoFHandler<dim> dof_handler;
736 *  
737 *   AffineConstraints<double> constraints;
738 *  
739 *   SparsityPattern sparsity_pattern;
740 *  
741 *   TrilinosWrappers::SparseMatrix system_matrix;
742 *   TrilinosWrappers::MPI::Vector locally_relevant_solution;
743 *   TrilinosWrappers::MPI::Vector system_rhs;
744 *  
745 *   ConditionalOStream pcout;
746 *   TimerOutput computing_timer;
747 *  
748 *   SolverControl solver_control;
749 *   TrilinosWrappers::SolverDirect solver;
750 *  
751 *   const RightHandSide<dim> rhs_function;
752 *   const DirichletBoundaryValues<dim> Dirichlet_bc_function;
753 *   const TrueSolution<dim> true_solution;
754 *   };
755 *  
756 *  
757 * @endcode
758 *
759 *
760 * <a name="LDGPoisson.cc-Classconstructoranddestructor"></a>
761 * <h4>Class constructor and destructor</h4>
762 * The constructor and destructor for this class is very much like the
763 * like those for @ref step_40 "step-40". The difference being that we'll be passing
764 * in an integer, <code>degree</code>, which tells us the maximum order
765 * of the polynomial to use as well as <code>n_refine</code> which is the
766 * global number of times we refine our mesh. The other main differences
767 * are that we use a FESystem object for our choice of basis
768 * functions. This is reminiscent of the mixed finite element method in
769 * @ref step_20 "step-20", however, in our case we use a FESystem
770 * of the form,
771 *
772
773 *
774 * <code>
775 * fe( FESystem<dim>(FE_DGQ<dim>(degree), dim), 1,
776 * FE_DGQ<dim>(degree), 1)
777 * </code>
778 *
779
780 *
781 * which tells us that the basis functions contain discontinuous polynomials
782 * of order <code>degree</code> in each of the <code>dim</code> dimensions
783 * for the vector field. For the scalar unknown we
784 * use a discontinuous polynomial of the order <code>degree</code>.
785 * The LDG method for Poisson equations solves for both the primary variable
786 * as well as its gradient, just like the mixed finite element method.
787 * However, unlike the mixed method, the LDG method uses discontinuous
788 * polynomials to approximate both variables.
789 * The other difference between our constructor and that of @ref step_40 "step-40" is that
790 * we all instantiate our linear solver in the constructor definition.
791 *
792 * @code
793 *   template <int dim>
794 *   LDGPoissonProblem<dim>::
795 *   LDGPoissonProblem(const unsigned int degree,
796 *   const unsigned int n_refine)
797 *   :
798 *   degree(degree),
799 *   n_refine(n_refine),
800 *   triangulation(MPI_COMM_WORLD,
804 *   fe( FESystem<dim>(FE_DGQ<dim>(degree), dim), 1,
805 *   FE_DGQ<dim>(degree), 1),
806 *   dof_handler(triangulation),
807 *   pcout(std::cout,
808 *   Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0),
809 *   computing_timer(MPI_COMM_WORLD,
810 *   pcout,
813 *   solver_control(1),
814 *   solver(solver_control),
815 *   rhs_function(),
816 *   Dirichlet_bc_function()
817 *   {
818 *   }
819 *  
820 *   template <int dim>
821 *   LDGPoissonProblem<dim>::
822 *   ~LDGPoissonProblem()
823 *   {
824 *   dof_handler.clear();
825 *   }
826 *  
827 * @endcode
828 *
829 *
830 * <a name="LDGPoisson.cc-Make_grid"></a>
831 * <h4>Make_grid</h4>
832 * This function shows how to make a grid using local
833 * refinement and also shows how to label the boundaries
834 * using the defined enum.
835 *
836 * @code
837 *   template <int dim>
838 *   void
839 *   LDGPoissonProblem<dim>::
840 *   make_grid()
841 *   {
843 *   triangulation.refine_global(n_refine);
844 *  
845 *   unsigned int local_refine = 2;
846 *   for (unsigned int i =0; i <local_refine; ++i)
847 *   {
849 *   cell = triangulation.begin_active(),
850 *   endc = triangulation.end();
851 *  
852 * @endcode
853 *
854 * We loop over all the cells in the mesh
855 * and mark the appropriate cells for refinement.
856 * In this example we only choose cells which are
857 * near @f$x=0@f$ and @f$x=1@f$ in the
858 * the domain. This was just to show that
859 * the LDG method is working with local
860 * refinement and discussions on building
861 * more realistic refinement strategies are
862 * discussed elsewhere in the deal.ii
863 * documentation.
864 *
865 * @code
866 *   for (; cell != endc; ++cell)
867 *   {
868 *   if ((cell->center()[1]) > 0.9 )
869 *   {
870 *   if ((cell->center()[0] > 0.9) || (cell->center()[0] < 0.1))
871 *   cell->set_refine_flag();
872 *   }
873 *   }
874 * @endcode
875 *
876 * Now that we have marked all the cells
877 * that we want to refine locally we can go ahead and
878 * refine them.
879 *
880 * @code
881 *   triangulation.execute_coarsening_and_refinement();
882 *   }
883 *  
884 * @endcode
885 *
886 * To label the boundary faces of the mesh with their
887 * type, i.e. Dirichlet or Neumann,
888 * we loop over all the cells in the mesh and then over
889 * all the faces of each cell. We then have to figure out
890 * which faces are on the boundary and set all faces
891 * on the boundary to have
892 * <code>boundary_id</code> to be <code>Dirichlet</code>.
893 * We remark that one could easily set more complicated
894 * conditions where there are both Dirichlet or
895 * Neumann boundaries.
896 *
897 * @code
899 *   cell = triangulation.begin(),
900 *   endc = triangulation.end();
901 *   for (; cell != endc; ++cell)
902 *   {
903 *   for (unsigned int face_no=0;
904 *   face_no < GeometryInfo<dim>::faces_per_cell;
905 *   face_no++)
906 *   {
907 *   if (cell->face(face_no)->at_boundary() )
908 *   cell->face(face_no)->set_boundary_id(Dirichlet);
909 *   }
910 *   }
911 *   }
912 *  
913 * @endcode
914 *
915 *
916 * <a name="LDGPoisson.cc-make_dofs"></a>
917 * <h3>make_dofs</h3>
918 * This function is responsible for distributing the degrees of
919 * freedom (dofs) to the processors and allocating memory for
920 * the global system matrix, <code>system_matrix</code>,
921 * and global right hand side vector, <code>system_rhs</code> .
922 * The dofs are the unknown coefficients for the polynomial
923 * approximation of our solution to Poisson's equation in the scalar
924 * variable and its gradient.
925 *
926 * @code
927 *   template <int dim>
928 *   void
929 *   LDGPoissonProblem<dim>::
930 *   make_dofs()
931 *   {
932 *   TimerOutput::Scope t(computing_timer, "setup");
933 *  
934 * @endcode
935 *
936 * The first step to setting up our linear system is to
937 * distribute the degrees of freedom (dofs) across the processors,
938 * this is done with the <code>distribute_dofs()</code>
939 * method of the DoFHandler. We remark
940 * the same exact function call that occurs when using deal.ii
941 * on a single machine, the DoFHandler automatically knows
942 * that we are distributed setting because it was instantiated
943 * with a distributed triangulation!
944 *
945 * @code
946 *   dof_handler.distribute_dofs(fe);
947 *  
948 * @endcode
949 *
950 * We now renumber the dofs so that the vector of unknown dofs
951 * that we are solving for, <code>locally_relevant_solution</code>,
952 * corresponds to a vector of the form,
953 *
954
955 *
956 * @f$ \left[\begin{matrix} \textbf{Q} \\ \textbf{U} \end{matrix}\right] @f$
957 *
958 * @code
959 *   DoFRenumbering::component_wise(dof_handler);
960 *  
961 * @endcode
962 *
963 * Now we get the locally owned dofs, that is the dofs that our local
964 * to this processor. These dofs corresponding entries in the
965 * matrix and vectors that we will write to.
966 *
967 * @code
968 *   const IndexSet &locally_owned_dofs = dof_handler.locally_owned_dofs();
969 *  
970 * @endcode
971 *
972 * In addition to the locally owned dofs, we also need the the locally
973 * relevant dofs. These are the dofs that have read access to and we
974 * need in order to do computations on our processor, but, that
975 * we do not have the ability to write to.
976 *
977 * @code
978 *   const IndexSet locally_relevant_dofs =
979 *   DoFTools::extract_locally_relevant_dofs(dof_handler);
980 *  
981 *   const std::vector<types::global_dof_index> dofs_per_component =
982 *   DoFTools::count_dofs_per_fe_component(dof_handler);
983 *  
984 * @endcode
985 *
986 * Discontinuous Galerkin methods are fantanistic methods in part because
987 * many of the limitations of traditional finite element methods no longer
988 * exist. Specifically, the need to use constraint matrices
989 * in order handle hanging nodes is no longer necessary. However,
990 * we will continue to use the constraint matrices inorder to efficiently
991 * distribute local computations to the global system, i.e. to the
992 * <code>system_matrix</code> and <code>system_rhs</code>. Therefore, we
993 * just instantiate the constraints matrix object, clear and close it.
994 *
995 * @code
996 *   constraints.clear();
997 *   constraints.close();
998 *  
999 *  
1000 * @endcode
1001 *
1002 * Just like @ref step_40 "step-40" we create a dynamic sparsity pattern
1003 * and distribute it to the processors. Notice how we do not have to
1004 * explicitly mention that we are using a FESystem for system of
1005 * variables instead of a FE_DGQ for a scalar variable
1006 * or that we are using a discributed DoFHandler. All these specifics
1007 * are taken care of under the hood by the deal.ii library.
1008 * In order to build the sparsity
1009 * pattern we use the DoFTools::make_flux_sparsity_pattern function
1010 * since we using a DG method and need to take into account the DG
1011 * fluxes in the sparsity pattern.
1012 *
1013 * @code
1014 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
1015 *   DoFTools::make_flux_sparsity_pattern(dof_handler,
1016 *   dsp);
1017 *  
1018 *   SparsityTools::distribute_sparsity_pattern(dsp,
1019 *   dof_handler.locally_owned_dofs(),
1020 *   MPI_COMM_WORLD,
1021 *   locally_relevant_dofs);
1022 *  
1023 * @endcode
1024 *
1025 * Here is one area that I had to learn the hard way. The local
1026 * discontinuous Galerkin method like the mixed method with the
1027 * Raviart-Thomas element is written
1028 * in mixed form and will lead to a block-structured matrix.
1029 * In @ref step_20 "step-20" we see that we that we initialize the
1030 * <code>system_martrix</code>
1031 * such that we explicitly declare it to be block-structured.
1032 * It turns out there are reasons to do this when you are going to be
1033 * using a Schur complement method to solve the system of equations.
1034 * While the LDG method will lead to a block-structured matrix,
1035 * we do not have to explicitly declare our matrix to be one.
1036 * I found that most of the distributed linear solvers did not
1037 * accept block structured matrices and since I was using a
1038 * distributed direct solver it was unnecessary to explicitly use a
1039 * block structured matrix.
1040 *
1041 * @code
1042 *   system_matrix.reinit(locally_owned_dofs,
1043 *   locally_owned_dofs,
1044 *   dsp,
1045 *   MPI_COMM_WORLD);
1046 *  
1047 * @endcode
1048 *
1049 * The final note that I will make in that this subroutine is that
1050 * we initialize this processors solution and the
1051 * right hand side vector the exact same was as we did in @ref step_40 "step-40".
1052 * We should note that the <code>locally_relevant_solution</code> solution
1053 * vector includes dofs that are locally relevant to our computations
1054 * while the <code>system_rhs</code> right hand side vector will only
1055 * include dofs that are locally owned by this processor.
1056 *
1057 * @code
1058 *   locally_relevant_solution.reinit(locally_relevant_dofs,
1059 *   MPI_COMM_WORLD);
1060 *  
1061 *   system_rhs.reinit(locally_owned_dofs,
1062 *   locally_relevant_dofs,
1063 *   MPI_COMM_WORLD,
1064 *   true);
1065 *  
1066 *   const unsigned int n_vector_field = dim * dofs_per_component[0];
1067 *   const unsigned int n_potential = dofs_per_component[dim];
1068 *  
1069 *   pcout << "Number of active cells : "
1070 *   << triangulation.n_global_active_cells()
1071 *   << std::endl
1072 *   << "Number of degrees of freedom: "
1073 *   << dof_handler.n_dofs()
1074 *   << " (" << n_vector_field << " + " << n_potential << ")"
1075 *   << std::endl;
1076 *   }
1077 *  
1078 * @endcode
1079 *
1080 *
1081 * <a name="LDGPoisson.cc-assemble_system"></a>
1082 * <h4>assemble_system</h4>
1083 * This is the function that will assemble the global system matrix and
1084 * global right hand side vector for the LDG method. It starts out
1085 * like many of the deal.ii tutorial codes: declaring quadrature formulas
1086 * and UpdateFlags objects, as well as vectors that will hold the
1087 * dof indices for the cells we are working on in the global system.
1088 *
1089 * @code
1090 *   template <int dim>
1091 *   void
1092 *   LDGPoissonProblem<dim>::
1093 *   assemble_system()
1094 *   {
1095 *   TimerOutput::Scope t(computing_timer, "assembly");
1096 *  
1097 *   QGauss<dim> quadrature_formula(fe.degree+1);
1098 *   QGauss<dim-1> face_quadrature_formula(fe.degree+1);
1099 *  
1100 *   const UpdateFlags update_flags = update_values
1101 *   | update_gradients
1102 *   | update_quadrature_points
1103 *   | update_JxW_values;
1104 *  
1105 *   const UpdateFlags face_update_flags = update_values
1106 *   | update_normal_vectors
1107 *   | update_quadrature_points
1108 *   | update_JxW_values;
1109 *  
1110 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1111 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1112 *   std::vector<types::global_dof_index>
1113 *   local_neighbor_dof_indices(dofs_per_cell);
1114 *  
1115 * @endcode
1116 *
1117 * We first remark that we have the FEValues objects for
1118 * the values of our cell basis functions as was done in most
1119 * other examples. Now because we
1120 * are using discontinuous Galerkin methods we also introduce a
1121 * FEFaceValues object, <code>fe_face_values</code>,
1122 * for evaluating the basis functions
1123 * on one side of an element face as well as another FEFaceValues object,
1124 * <code>fe_neighbor_face_values</code>, for evaluating the basis functions
1125 * on the opposite side of the face, i.e. on the neighboring element's face.
1126 * In addition, we also introduce a FESubfaceValues object,
1127 * <code>fe_subface_values</code>, that
1128 * will be used for dealing with faces that have multiple refinement
1129 * levels, i.e. hanging nodes. When we have to evaluate the fluxes across
1130 * a face that multiple refinement levels, we need to evaluate the
1131 * fluxes across all its childrens' faces; we'll explain this more when
1132 * the time comes.
1133 *
1134 * @code
1135 *   FEValues<dim> fe_values(fe, quadrature_formula, update_flags);
1136 *  
1137 *   FEFaceValues<dim> fe_face_values(fe,face_quadrature_formula,
1138 *   face_update_flags);
1139 *  
1140 *   FEFaceValues<dim> fe_neighbor_face_values(fe,
1141 *   face_quadrature_formula,
1142 *   face_update_flags);
1143 *  
1144 *   FESubfaceValues<dim> fe_subface_values(fe, face_quadrature_formula,
1145 *   face_update_flags);
1146 *  
1147 * @endcode
1148 *
1149 * Here are the local (dense) matrix and right hand side vector for
1150 * the solid integrals as well as the integrals on the boundaries in the
1151 * local discontinuous Galerkin method. These terms will be built for
1152 * each local element in the mesh and then distributed to the global
1153 * system matrix and right hand side vector.
1154 *
1155 * @code
1156 *   FullMatrix<double> local_matrix(dofs_per_cell,dofs_per_cell);
1157 *   Vector<double> local_vector(dofs_per_cell);
1158 *  
1159 * @endcode
1160 *
1161 * The next four matrices are used to incorporate the flux integrals across
1162 * interior faces of the mesh:
1163 *
1164 * @code
1165 *   FullMatrix<double> vi_ui_matrix(dofs_per_cell, dofs_per_cell);
1166 *   FullMatrix<double> vi_ue_matrix(dofs_per_cell, dofs_per_cell);
1167 *   FullMatrix<double> ve_ui_matrix(dofs_per_cell, dofs_per_cell);
1168 *   FullMatrix<double> ve_ue_matrix(dofs_per_cell, dofs_per_cell);
1169 * @endcode
1170 *
1171 * As explained in the section on the LDG method we take our test
1172 * function to be v and multiply it on the left side of our differential
1173 * equation that is on u and perform integration by parts as explained in the
1174 * introduction. Using this notation for test and solution function,
1175 * the matrices below will then stand for:
1176 *
1177
1178 *
1179 * <code>vi_ui</code> - Taking the value of the test function from
1180 * interior of this cell's face and the solution function
1181 * from the interior of this cell.
1182 *
1183
1184 *
1185 * <code>vi_ue</code> - Taking the value of the test function from
1186 * interior of this cell's face and the solution function
1187 * from the exterior of this cell.
1188 *
1189
1190 *
1191 * <code>ve_ui</code> - Taking the value of the test function from
1192 * exterior of this cell's face and the solution function
1193 * from the interior of this cell.
1194 *
1195
1196 *
1197 * <code>ve_ue</code> - Taking the value of the test function from
1198 * exterior of this cell's face and the solution function
1199 * from the exterior of this cell.
1200 *
1201
1202 *
1203 * Now that we have gotten preliminary orders out of the way,
1204 * we loop over all the cells
1205 * and assemble the local system matrix and local right hand side vector
1207 *
1208 * @code
1210 *   cell = dof_handler.begin_active(),
1211 *   endc = dof_handler.end();
1212 *  
1213 *   for (; cell!=endc; ++cell)
1214 *   {
1215 * @endcode
1216 *
1217 * Now, since we are working in a distributed setting,
1218 * we can only work on cells and write to dofs in the
1219 * <code>system_matrix</code>
1220 * and <code>rhs_vector</code>
1221 * that corresponds to cells that are locally owned
1222 * by this processor. We note that while we can only write to locally
1223 * owned dofs, we will still use information from cells that are
1224 * locally relevant. This is very much the same as in @ref step_40 "step-40".
1225 *
1226 * @code
1227 *   if (cell->is_locally_owned())
1228 *   {
1229 * @endcode
1230 *
1231 * We now assemble the local contributions to the system matrix
1232 * that includes the solid integrals in the LDG method as well as
1233 * the right hand side vector. This involves resetting the local
1234 * matrix and vector to contain all zeros, reinitializing the
1235 * FEValues object for this cell and then building the
1236 * <code>local_matrix</code> and <code>local_rhs</code> vector.
1237 *
1238 * @code
1239 *   local_matrix = 0;
1240 *   local_vector = 0;
1241 *  
1242 *   fe_values.reinit(cell);
1243 *   assemble_cell_terms(fe_values,
1244 *   local_matrix,
1245 *   local_vector);
1246 *  
1247 * @endcode
1248 *
1249 * We remark that we need to get the local indices for the dofs to
1250 * to this cell before we begin to compute the contributions
1251 * from the numerical fluxes, i.e. the boundary conditions and
1252 * interior fluxes.
1253 *
1254 * @code
1255 *   cell->get_dof_indices(local_dof_indices);
1256 *  
1257 * @endcode
1258 *
1259 * Now is where we start to loop over all the faces of the cell
1260 * and construct the local contribtuions from the numerical fluxes.
1261 * The numerical fluxes will be due to 3 contributions: the
1262 * interior faces, the faces on the Neumann boundary and the faces
1263 * on the Dirichlet boundary. We instantiate a
1264 * <code>face_iterator</code> to loop
1265 * over all the faces of this cell and first see if the face is on
1266 * the boundary. Notice how we do not reinitiaize the
1267 * <code>fe_face_values</code>
1268 * object for the face until we know that we are actually on face
1269 * that lies on the boundary of the domain. The reason for doing this
1270 * is for computational efficiency; reinitializing the FEFaceValues
1271 * for each face is expensive and we do not want to do it unless we
1272 * are actually going use it to do computations. After this, we test
1273 * if the face
1274 * is on the a Dirichlet or a Neumann segment of the boundary and
1275 * call the appropriate subroutine to assemble the contributions for
1276 * that boundary. Note that this assembles the flux contribution
1277 * in the <code>local_matrix</code> as well as the boundary
1278 * condition that ends up
1279 * in the <code>local_vector</code>.
1280 *
1281 * @code
1282 *   for (unsigned int face_no=0;
1283 *   face_no< GeometryInfo<dim>::faces_per_cell;
1284 *   face_no++)
1285 *   {
1286 *   typename DoFHandler<dim>::face_iterator face =
1287 *   cell->face(face_no);
1288 *  
1289 *   if (face->at_boundary() )
1290 *   {
1291 *   fe_face_values.reinit(cell, face_no);
1292 *  
1293 *   if (face->boundary_id() == Dirichlet)
1294 *   {
1295 * @endcode
1296 *
1297 * Here notice that in order to assemble the
1298 * flux due to the penalty term for the the
1299 * Dirichlet boundary condition we need the
1300 * local cell diameter size and we can get
1301 * that value for this specific cell with
1302 * the following,
1303 *
1304 * @code
1305 *   double h = cell->diameter();
1306 *   assemble_Dirichlet_boundary_terms(fe_face_values,
1307 *   local_matrix,
1308 *   local_vector,
1309 *   h);
1310 *   }
1311 *   else if (face->boundary_id() == Neumann)
1312 *   {
1313 *   assemble_Neumann_boundary_terms(fe_face_values,
1314 *   local_matrix,
1315 *   local_vector);
1316 *   }
1317 *   else
1318 *   Assert(false, ExcNotImplemented() );
1319 *   }
1320 *   else
1321 *   {
1322 * @endcode
1323 *
1324 * At this point we know that the face we are on is an
1325 * interior face. We can begin to assemble the interior
1326 * flux matrices, but first we want to make sure that the
1327 * neighbor cell to this face is a valid cell. Once we know
1328 * that the neighbor is a valid cell then we also want to get
1329 * the meighbor cell that shares this cell's face.
1330 *
1331
1332 *
1333 *
1334 * @code
1335 *   Assert(cell->neighbor(face_no).state() ==
1336 *   IteratorState::valid,
1337 *   ExcInternalError());
1338 *  
1339 *   typename DoFHandler<dim>::cell_iterator neighbor =
1340 *   cell->neighbor(face_no);
1341 *  
1342 * @endcode
1343 *
1344 * Now that we have the two cells whose face we want to
1345 * compute the numerical flux across, we need to know
1346 * if the face has been refined, i.e. if it has children
1347 * faces. This occurs when one of the cells has a
1348 * different level of refinement than
1349 * the other cell. If this is the case, then this face
1350 * has a different level of refinement than the other faces
1351 * of the cell, i.e. on this face there is a hanging node.
1352 * Hanging nodes are not a problem in DG methods, the only
1353 * time we have to watch out for them is at this step
1354 * and as you will see the changes we have to our make
1355 * are minor.
1356 *
1357 * @code
1358 *   if (face->has_children())
1359 *   {
1360 * @endcode
1361 *
1362 * We now need to find the face of our neighbor cell
1363 * such that neighbor(neigh_face_no) = cell(face_no).
1364 *
1365 * @code
1366 *   const unsigned int neighbor_face_no =
1367 *   cell->neighbor_of_neighbor(face_no);
1368 *  
1369 * @endcode
1370 *
1371 * Once we do this we then have to loop over all the
1372 * subfaces (children faces) of our cell's face and
1373 * compute the interior fluxes across the children faces
1374 * and the neighbor's face.
1375 *
1376 * @code
1377 *   for (unsigned int subface_no=0;
1378 *   subface_no < face->n_children();
1379 *   ++subface_no)
1380 *   {
1381 * @endcode
1382 *
1383 * We then get the neighbor cell's subface that
1384 * matches our cell face's subface and the
1385 * specific subface number. We assert that the parent
1386 * face cannot be more than one level of
1387 * refinement above the child's face. This is
1388 * because the deal.ii library does not allow
1389 * neighboring cells to have refinement levels
1390 * that are more than one level in difference.
1391 *
1392 * @code
1393 *   typename DoFHandler<dim>::cell_iterator neighbor_child =
1394 *   cell->neighbor_child_on_subface(face_no,
1395 *   subface_no);
1396 *  
1397 *   Assert(!neighbor_child->has_children(),
1398 *   ExcInternalError());
1399 *  
1400 * @endcode
1401 *
1402 * Now that we are ready to build the local flux
1403 * matrices for
1404 * this face we reset them e zero and
1405 * reinitialize this <code>fe_values</code>
1406 * to this cell's subface and
1407 * <code>neighbor_child</code>'s
1409 * objects on the appropriate faces.
1410 *
1411 * @code
1412 *   vi_ui_matrix = 0;
1413 *   vi_ue_matrix = 0;
1414 *   ve_ui_matrix = 0;
1415 *   ve_ue_matrix = 0;
1416 *  
1417 *   fe_subface_values.reinit(cell, face_no, subface_no);
1418 *   fe_neighbor_face_values.reinit(neighbor_child,
1419 *   neighbor_face_no);
1420 *  
1421 * @endcode
1422 *
1423 * In addition, we get the minimum of diameters of
1424 * the two cells to include in the penalty term
1425 *
1426 * @code
1427 *   double h = std::min(cell->diameter(),
1428 *   neighbor_child->diameter());
1429 *  
1430 * @endcode
1431 *
1432 * We now finally assemble the interior fluxes for
1433 * the case of a face which has been refined using
1434 * exactly the same subroutine as we do when both
1435 * cells have the same refinement level.
1436 *
1437 * @code
1438 *   assemble_flux_terms(fe_subface_values,
1439 *   fe_neighbor_face_values,
1440 *   vi_ui_matrix,
1441 *   vi_ue_matrix,
1442 *   ve_ui_matrix,
1443 *   ve_ue_matrix,
1444 *   h);
1445 *  
1446 * @endcode
1447 *
1448 * Now all that is left to be done before distribuing
1449 * the local flux matrices to the global system
1450 * is get the neighbor child faces dof indices.
1451 *
1452 * @code
1453 *   neighbor_child->get_dof_indices(local_neighbor_dof_indices);
1454 *  
1455 * @endcode
1456 *
1457 * Once we have this cells dof indices and the
1458 * neighboring cell's dof indices we can use the
1459 * ConstraintMatrix to distribute the local flux
1460 * matrices to the global system matrix.
1461 * This is done through the class function
1462 * <code>distribute_local_flux_to_global()</code>.
1463 *
1464 * @code
1465 *   distribute_local_flux_to_global(
1466 *   vi_ui_matrix,
1467 *   vi_ue_matrix,
1468 *   ve_ui_matrix,
1469 *   ve_ue_matrix,
1470 *   local_dof_indices,
1471 *   local_neighbor_dof_indices);
1472 *   }
1473 *   }
1474 *   else
1475 *   {
1476 * @endcode
1477 *
1478 * At this point we know that this cell and the neighbor
1479 * of this cell are on the same refinement level and
1480 * the work to assemble the interior flux matrices
1481 * is very much the same as before. In fact it is
1482 * much simpler since we do not have to loop through the
1483 * subfaces. However, we have to check that we do
1484 * not compute the same contribution twice. This would
1485 * happen because we are looping over all the faces of
1486 * all the cells in the mesh
1487 * and assembling the interior flux matrices for each face.
1488 * To avoid doing assembling the interior flux matrices
1489 * twice we only compute the
1490 * interior fluxes once for each face by restricting that
1491 * the following computation only occur on the on
1492 * the cell face with the lower CellId.
1493 *
1494 * @code
1495 *   if (neighbor->level() == cell->level() &&
1496 *   cell->id() < neighbor->id())
1497 *   {
1498 * @endcode
1499 *
1500 * Here we find the neighbor face such that
1501 * neighbor(neigh_face_no) = cell(face_no).
1502 * In addition we, reinitialize the FEFaceValues
1503 * and neighbor cell's FEFaceValues on their
1504 * respective cells' faces, as well as get the
1505 * minimum diameter of this cell
1506 * and the neighbor cell and assign
1507 * it to <code>h</code>.
1508 *
1509 * @code
1510 *   const unsigned int neighbor_face_no =
1511 *   cell->neighbor_of_neighbor(face_no);
1512 *  
1513 *   vi_ui_matrix = 0;
1514 *   vi_ue_matrix = 0;
1515 *   ve_ui_matrix = 0;
1516 *   ve_ue_matrix = 0;
1517 *  
1518 *   fe_face_values.reinit(cell, face_no);
1519 *   fe_neighbor_face_values.reinit(neighbor,
1520 *   neighbor_face_no);
1521 *  
1522 *   double h = std::min(cell->diameter(),
1523 *   neighbor->diameter());
1524 *  
1525 * @endcode
1526 *
1527 * Just as before we assemble the interior fluxes
1528 * using the
1529 * <code>assemble_flux_terms</code> subroutine,
1530 * get the neighbor cell's
1531 * face dof indices and use the constraint matrix to
1532 * distribute the local flux matrices to the global
1533 * <code>system_matrix</code> using the class
1534 * function
1535 * <code>distribute_local_flux_to_global()</code>
1536 *
1537 * @code
1538 *   assemble_flux_terms(fe_face_values,
1539 *   fe_neighbor_face_values,
1540 *   vi_ui_matrix,
1541 *   vi_ue_matrix,
1542 *   ve_ui_matrix,
1543 *   ve_ue_matrix,
1544 *   h);
1545 *  
1546 *   neighbor->get_dof_indices(local_neighbor_dof_indices);
1547 *  
1548 *   distribute_local_flux_to_global(
1549 *   vi_ui_matrix,
1550 *   vi_ue_matrix,
1551 *   ve_ui_matrix,
1552 *   ve_ue_matrix,
1553 *   local_dof_indices,
1554 *   local_neighbor_dof_indices);
1555 *  
1556 *  
1557 *   }
1558 *   }
1559 *   }
1560 *   }
1561 *  
1562 *  
1563 * @endcode
1564 *
1565 * Now that have looped over all the faces for this
1566 * cell and computed as well as distributed the local
1567 * flux matrices to the <code>system_matrix</code>, we
1568 * can finally distribute the cell's <code>local_matrix</code>
1569 * and <code>local_vector</code> contribution to the
1570 * global system matrix and global right hand side vector.
1571 * We remark that we have to wait until this point
1572 * to distribute the <code>local_matrix</code>
1573 * and <code>system_rhs</code> to the global system.
1574 * The reason being that in looping over the faces
1575 * the faces on the boundary of the domain contribute
1576 * to the <code>local_matrix</code>
1577 * and <code>system_rhs</code>. We could distribute
1578 * the local contributions for each component separately,
1579 * but writing to the distributed sparse matrix and vector
1580 * is expensive and want to to minimize the number of times
1581 * we do so.
1582 *
1583 * @code
1584 *   constraints.distribute_local_to_global(local_matrix,
1585 *   local_dof_indices,
1586 *   system_matrix);
1587 *  
1588 *   constraints.distribute_local_to_global(local_vector,
1589 *   local_dof_indices,
1590 *   system_rhs);
1591 *  
1592 *   }
1593 *   }
1594 *  
1595 * @endcode
1596 *
1597 * We need to synchronize assembly of our global system
1598 * matrix and global right hand side vector with all the other
1599 * processors and
1600 * use the compress() function to do this.
1601 * This was discussed in detail in @ref step_40 "step-40".
1602 *
1603 * @code
1604 *   system_matrix.compress(VectorOperation::add);
1605 *   system_rhs.compress(VectorOperation::add);
1606 *   }
1607 *  
1608 *  
1609 * @endcode
1610 *
1611 *
1612 * <a name="LDGPoisson.cc-assemble_cell_terms"></a>
1613 * <h4>assemble_cell_terms</h4>
1614 * This function deals with constructing the local matrix due to
1615 * the solid integrals over each element and is very similar to the
1616 * the other examples in the deal.ii tutorials.
1617 *
1618 * @code
1619 *   template<int dim>
1620 *   void
1621 *   LDGPoissonProblem<dim>::
1622 *   assemble_cell_terms(
1623 *   const FEValues<dim> &cell_fe,
1624 *   FullMatrix<double> &cell_matrix,
1625 *   Vector<double> &cell_vector)
1626 *   {
1627 *   const unsigned int dofs_per_cell = cell_fe.dofs_per_cell;
1628 *   const unsigned int n_q_points = cell_fe.n_quadrature_points;
1629 *  
1630 *   const FEValuesExtractors::Vector VectorField(0);
1631 *   const FEValuesExtractors::Scalar Potential(dim);
1632 *  
1633 *   std::vector<double> rhs_values(n_q_points);
1634 *  
1635 * @endcode
1636 *
1637 * We first get the value of the right hand side function
1638 * evaluated at the quadrature points in the cell.
1639 *
1640 * @code
1641 *   rhs_function.value_list(cell_fe.get_quadrature_points(),
1642 *   rhs_values);
1643 *  
1644 * @endcode
1645 *
1646 * Now, we loop over the quadrature points in the
1647 * cell and then loop over the degrees of freedom and perform
1648 * quadrature to approximate the integrals.
1649 *
1650 * @code
1651 *   for (unsigned int q=0; q<n_q_points; ++q)
1652 *   {
1653 *   for (unsigned int i=0; i<dofs_per_cell; ++i)
1654 *   {
1655 *   const Tensor<1, dim> psi_i_field = cell_fe[VectorField].value(i,q);
1656 *   const double div_psi_i_field = cell_fe[VectorField].divergence(i,q);
1657 *   const double psi_i_potential = cell_fe[Potential].value(i,q);
1658 *   const Tensor<1, dim> grad_psi_i_potential = cell_fe[Potential].gradient(i,q);
1659 *  
1660 *   for (unsigned int j=0; j<dofs_per_cell; ++j)
1661 *   {
1662 *   const Tensor<1, dim> psi_j_field = cell_fe[VectorField].value(j,q);
1663 *   const double psi_j_potential = cell_fe[Potential].value(j,q);
1664 *  
1665 * @endcode
1666 *
1667 * This computation corresponds to assembling the local system
1668 * matrix for the integral over an element,
1669 *
1670
1671 *
1672 * @f$\int_{\Omega_{e}} \left(\textbf{w} \cdot \textbf{q}
1673 * - \nabla \cdot \textbf{w} u
1674 * - \nabla w \cdot \textbf{q}
1675 * \right) dx @f$
1676 *
1677 * @code
1678 *   cell_matrix(i,j) += ( (psi_i_field * psi_j_field)
1679 *   -
1680 *   (div_psi_i_field * psi_j_potential)
1681 *   -
1682 *   (grad_psi_i_potential * psi_j_field)
1683 *   ) * cell_fe.JxW(q);
1684 *   }
1685 *  
1686 * @endcode
1687 *
1688 * And this local right hand vector corresponds to the integral
1689 * over the element cell,
1690 *
1691
1692 *
1693 * @f$ \int_{\Omega_{e}} w \, f(\textbf{x}) \, dx @f$
1694 *
1695 * @code
1696 *   cell_vector(i) += psi_i_potential *
1697 *   rhs_values[q] *
1698 *   cell_fe.JxW(q);
1699 *   }
1700 *   }
1701 *   }
1702 *  
1703 * @endcode
1704 *
1705 *
1706 * <a name="LDGPoisson.cc-assemble_Dirichlet_boundary_terms"></a>
1707 * <h4>assemble_Dirichlet_boundary_terms</h4>
1708 * Here we have the function that builds the <code>local_matrix</code>
1709 * contribution
1710 * and local right hand side vector, <code>local_vector</code>
1711 * for the Dirichlet boundary conditions.
1712 *
1713 * @code
1714 *   template<int dim>
1715 *   void
1716 *   LDGPoissonProblem<dim>::
1717 *   assemble_Dirichlet_boundary_terms(
1718 *   const FEFaceValues<dim> &face_fe,
1719 *   FullMatrix<double> &local_matrix,
1720 *   Vector<double> &local_vector,
1721 *   const double &h)
1722 *   {
1723 *   const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1724 *   const unsigned int n_q_points = face_fe.n_quadrature_points;
1725 *  
1726 *   const FEValuesExtractors::Vector VectorField(0);
1727 *   const FEValuesExtractors::Scalar Potential(dim);
1728 *  
1729 *   std::vector<double> Dirichlet_bc_values(n_q_points);
1730 *  
1731 * @endcode
1732 *
1733 * In order to evaluate the flux on the Dirichlet boundary face we
1734 * first get the value of the Dirichlet boundary function on the quadrature
1735 * points of the face. Then we loop over all the quadrature points and
1736 * degrees of freedom and approximate the integrals on the Dirichlet boundary
1737 * element faces.
1738 *
1739 * @code
1740 *   Dirichlet_bc_function.value_list(face_fe.get_quadrature_points(),
1741 *   Dirichlet_bc_values);
1742 *  
1743 *   for (unsigned int q=0; q<n_q_points; ++q)
1744 *   {
1745 *   for (unsigned int i=0; i<dofs_per_cell; ++i)
1746 *   {
1747 *   const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1748 *   const double psi_i_potential = face_fe[Potential].value(i,q);
1749 *  
1750 *   for (unsigned int j=0; j<dofs_per_cell; ++j)
1751 *   {
1752 *   const Tensor<1, dim> psi_j_field = face_fe[VectorField].value(j,q);
1753 *   const double psi_j_potential = face_fe[Potential].value(j,q);
1754 *  
1755 * @endcode
1756 *
1757 * We compute contribution for the flux @f$\widehat{q}@f$ on
1758 * the Dirichlet boundary which enters our system matrix as,
1759 *
1760
1761 *
1762 * @f$ \int_{\text{face}} w \, ( \textbf{n} \cdot \textbf{q}
1763 * + \sigma u) ds @f$
1764 *
1765 * @code
1766 *   local_matrix(i,j) += psi_i_potential * (
1767 *   face_fe.normal_vector(q) *
1768 *   psi_j_field
1769 *   +
1770 *   (penalty/h) *
1771 *   psi_j_potential) *
1772 *   face_fe.JxW(q);
1773 *  
1774 *   }
1775 *  
1776 * @endcode
1777 *
1778 * We also compute the contribution for the flux for @f$\widehat{u}@f$
1779 * on the Dirichlet boundary which is the Dirichlet boundary
1780 * condition function and enters the right hand side vector as
1781 *
1782
1783 *
1784 * @f$\int_{\text{face}} (-\textbf{w} \cdot \textbf{n}
1785 * + \sigma w) \, u_{D} ds @f$
1786 *
1787 * @code
1788 *   local_vector(i) += (-1.0 * psi_i_field *
1789 *   face_fe.normal_vector(q)
1790 *   +
1791 *   (penalty/h) *
1792 *   psi_i_potential) *
1793 *   Dirichlet_bc_values[q] *
1794 *   face_fe.JxW(q);
1795 *   }
1796 *   }
1797 *   }
1798 *  
1799 * @endcode
1800 *
1801 *
1802 * <a name="LDGPoisson.cc-assemble_Neumann_boundary_terms"></a>
1803 * <h4>assemble_Neumann_boundary_terms</h4>
1804 * Here we have the function that builds the <code>local_matrix</code>
1805 * and <code>local_vector</code> for the Neumann boundary conditions.
1806 *
1807 * @code
1808 *   template<int dim>
1809 *   void
1810 *   LDGPoissonProblem<dim>::
1811 *   assemble_Neumann_boundary_terms(
1812 *   const FEFaceValues<dim> &face_fe,
1813 *   FullMatrix<double> &local_matrix,
1814 *   Vector<double> &local_vector)
1815 *   {
1816 *   const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1817 *   const unsigned int n_q_points = face_fe.n_quadrature_points;
1818 *  
1819 *   const FEValuesExtractors::Vector VectorField(0);
1820 *   const FEValuesExtractors::Scalar Potential(dim);
1821 *  
1822 * @endcode
1823 *
1824 * In order to get evaluate the flux on the Neumann boundary face we
1825 * first get the value of the Neumann boundary function on the quadrature
1826 * points of the face. Then we loop over all the quadrature points and
1827 * degrees of freedom and approximate the integrals on the Neumann boundary
1828 * element faces.
1829 *
1830 * @code
1831 *   std::vector<double > Neumann_bc_values(n_q_points);
1832 *  
1833 *   for (unsigned int q=0; q<n_q_points; ++q)
1834 *   {
1835 *   for (unsigned int i=0; i<dofs_per_cell; ++i)
1836 *   {
1837 *   const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1838 *   const double psi_i_potential = face_fe[Potential].value(i,q);
1839 *  
1840 *   for (unsigned int j=0; j<dofs_per_cell; ++j)
1841 *   {
1842 *  
1843 *   const double psi_j_potential = face_fe[Potential].value(j,q);
1844 *  
1845 * @endcode
1846 *
1847 * We compute contribution for the flux @f$\widehat{u}@f$ on the
1848 * Neumann boundary which enters our system matrix as,
1849 *
1850
1851 *
1852 * @f$\int_{\text{face}} \textbf{w} \cdot \textbf{n} \, u \, ds @f$
1853 *
1854 * @code
1855 *   local_matrix(i,j) += psi_i_field *
1856 *   face_fe.normal_vector(q) *
1857 *   psi_j_potential *
1858 *   face_fe.JxW(q);
1859 *  
1860 *   }
1861 *  
1862 * @endcode
1863 *
1864 * We also compute the contribution for the flux for
1865 * @f$\widehat{q}@f$ on the Neumann boundary which is the
1866 * Neumann boundary condition and enters the right
1867 * hand side vector as
1868 *
1869
1870 *
1871 * @f$\int_{\text{face}} -w \, g_{N} \, ds@f$
1872 *
1873 * @code
1874 *   local_vector(i) += -psi_i_potential *
1875 *   Neumann_bc_values[q] *
1876 *   face_fe.JxW(q);
1877 *   }
1878 *   }
1879 *   }
1880 *  
1881 * @endcode
1882 *
1883 *
1884 * <a name="LDGPoisson.cc-assemble_flux_terms"></a>
1885 * <h4>assemble_flux_terms</h4>
1886 * Now we finally get to the function which builds the interior fluxes.
1887 * This is a rather long function
1888 * and we will describe what is going on in detail.
1889 *
1890 * @code
1891 *   template<int dim>
1892 *   void
1893 *   LDGPoissonProblem<dim>::
1894 *   assemble_flux_terms(
1895 *   const FEFaceValuesBase<dim> &fe_face_values,
1896 *   const FEFaceValuesBase<dim> &fe_neighbor_face_values,
1897 *   FullMatrix<double> &vi_ui_matrix,
1898 *   FullMatrix<double> &vi_ue_matrix,
1899 *   FullMatrix<double> &ve_ui_matrix,
1900 *   FullMatrix<double> &ve_ue_matrix,
1901 *   const double &h)
1902 *   {
1903 *   const unsigned int n_face_points = fe_face_values.n_quadrature_points;
1904 *   const unsigned int dofs_this_cell = fe_face_values.dofs_per_cell;
1905 *   const unsigned int dofs_neighbor_cell = fe_neighbor_face_values.dofs_per_cell;
1906 *  
1907 *   const FEValuesExtractors::Vector VectorField(0);
1908 *   const FEValuesExtractors::Scalar Potential(dim);
1909 *  
1910 * @endcode
1911 *
1912 * The first thing we do is after the boilerplate is define
1913 * the unit vector @f$\boldsymbol \beta@f$ that is used in defining
1914 * the LDG/ALternating fluxes.
1915 *
1916 * @code
1917 *   Point<dim> beta;
1918 *   for (int i=0; i<dim; ++i)
1919 *   beta(i) = 1.0;
1920 *   beta /= sqrt(beta.square() );
1921 *  
1922 * @endcode
1923 *
1924 * Now we loop over all the quadrature points on the element face
1925 * and loop over all the degrees of freedom and approximate
1926 * the following flux integrals.
1927 *
1928 * @code
1929 *   for (unsigned int q=0; q<n_face_points; ++q)
1930 *   {
1931 *   for (unsigned int i=0; i<dofs_this_cell; ++i)
1932 *   {
1933 *   const Tensor<1,dim> psi_i_field_minus =
1934 *   fe_face_values[VectorField].value(i,q);
1935 *   const double psi_i_potential_minus =
1936 *   fe_face_values[Potential].value(i,q);
1937 *  
1938 *   for (unsigned int j=0; j<dofs_this_cell; ++j)
1939 *   {
1940 *   const Tensor<1,dim> psi_j_field_minus =
1941 *   fe_face_values[VectorField].value(j,q);
1942 *   const double psi_j_potential_minus =
1943 *   fe_face_values[Potential].value(j,q);
1944 *  
1945 * @endcode
1946 *
1947 * We compute the flux matrix where the test function's
1948 * as well as the solution function's values are taken from
1949 * the interior as,
1950 *
1951
1952 *
1953 * @f$\int_{\text{face}}
1954 * \left( \frac{1}{2} \, \textbf{n}^{-}
1955 * \cdot ( \textbf{w}^{-} u^{-}
1956 * + w^{-} \textbf{q}^{-})
1957 * + \boldsymbol \beta \cdot \textbf{w}^{-} u^{-}
1958 * - w^{-} \boldsymbol \beta \cdot \textbf{q}^{-}
1959 * + \sigma w^{-} \, u^{-} \right) ds@f$
1960 *
1961 * @code
1962 *   vi_ui_matrix(i,j) += (0.5 * (
1963 *   psi_i_field_minus *
1964 *   fe_face_values.normal_vector(q) *
1965 *   psi_j_potential_minus
1966 *   +
1967 *   psi_i_potential_minus *
1968 *   fe_face_values.normal_vector(q) *
1969 *   psi_j_field_minus )
1970 *   +
1971 *   beta *
1972 *   psi_i_field_minus *
1973 *   psi_j_potential_minus
1974 *   -
1975 *   beta *
1976 *   psi_i_potential_minus *
1977 *   psi_j_field_minus
1978 *   +
1979 *   (penalty/h) *
1980 *   psi_i_potential_minus *
1981 *   psi_j_potential_minus
1982 *   ) *
1983 *   fe_face_values.JxW(q);
1984 *   }
1985 *  
1986 *   for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
1987 *   {
1988 *   const Tensor<1,dim> psi_j_field_plus =
1989 *   fe_neighbor_face_values[VectorField].value(j,q);
1990 *   const double psi_j_potential_plus =
1991 *   fe_neighbor_face_values[Potential].value(j,q);
1992 *  
1993 * @endcode
1994 *
1995 * We compute the flux matrix where the test function is
1996 * from the interior of this elements face and solution
1997 * function is taken from the exterior. This corresponds
1998 * to the computation,
1999 *
2000
2001 *
2002 * @f$\int_{\text{face}}
2003 * \left( \frac{1}{2} \, \textbf{n}^{-} \cdot
2004 * ( \textbf{w}^{-} u^{+}
2005 * + w^{-} \textbf{q}^{+})
2006 * - \boldsymbol \beta \cdot \textbf{w}^{-} u^{+}
2007 * + w^{-} \boldsymbol \beta \cdot \textbf{q}^{+}
2008 * - \sigma w^{-} \, u^{+} \right) ds @f$
2009 *
2010 * @code
2011 *   vi_ue_matrix(i,j) += ( 0.5 * (
2012 *   psi_i_field_minus *
2013 *   fe_face_values.normal_vector(q) *
2014 *   psi_j_potential_plus
2015 *   +
2016 *   psi_i_potential_minus *
2017 *   fe_face_values.normal_vector(q) *
2018 *   psi_j_field_plus )
2019 *   -
2020 *   beta *
2021 *   psi_i_field_minus *
2022 *   psi_j_potential_plus
2023 *   +
2024 *   beta *
2025 *   psi_i_potential_minus *
2026 *   psi_j_field_plus
2027 *   -
2028 *   (penalty/h) *
2029 *   psi_i_potential_minus *
2030 *   psi_j_potential_plus
2031 *   ) *
2032 *   fe_face_values.JxW(q);
2033 *   }
2034 *   }
2035 *  
2036 *   for (unsigned int i=0; i<dofs_neighbor_cell; ++i)
2037 *   {
2038 *   const Tensor<1,dim> psi_i_field_plus =
2039 *   fe_neighbor_face_values[VectorField].value(i,q);
2040 *   const double psi_i_potential_plus =
2041 *   fe_neighbor_face_values[Potential].value(i,q);
2042 *  
2043 *   for (unsigned int j=0; j<dofs_this_cell; ++j)
2044 *   {
2045 *   const Tensor<1,dim> psi_j_field_minus =
2046 *   fe_face_values[VectorField].value(j,q);
2047 *   const double psi_j_potential_minus =
2048 *   fe_face_values[Potential].value(j,q);
2049 *  
2050 *  
2051 * @endcode
2052 *
2053 * We compute the flux matrix where the test function is
2054 * from the exterior of this elements face and solution
2055 * function is taken from the interior. This corresponds
2056 * to the computation,
2057 *
2058
2059 *
2060 * @f$ \int_{\text{face}}
2061 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2062 * (\textbf{w}^{+} u^{-}
2063 * + w^{+} \textbf{q}^{-} )
2064 * - \boldsymbol \beta \cdot \textbf{w}^{+} u^{-}
2065 * + w^{+} \boldsymbol \beta \cdot \textbf{q}^{-}
2066 * - \sigma w^{+} u^{-} \right) ds @f$
2067 *
2068 * @code
2069 *   ve_ui_matrix(i,j) += (-0.5 * (
2070 *   psi_i_field_plus *
2071 *   fe_face_values.normal_vector(q) *
2072 *   psi_j_potential_minus
2073 *   +
2074 *   psi_i_potential_plus *
2075 *   fe_face_values.normal_vector(q) *
2076 *   psi_j_field_minus)
2077 *   -
2078 *   beta *
2079 *   psi_i_field_plus *
2080 *   psi_j_potential_minus
2081 *   +
2082 *   beta *
2083 *   psi_i_potential_plus *
2084 *   psi_j_field_minus
2085 *   -
2086 *   (penalty/h) *
2087 *   psi_i_potential_plus *
2088 *   psi_j_potential_minus
2089 *   ) *
2090 *   fe_face_values.JxW(q);
2091 *   }
2092 *  
2093 *   for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
2094 *   {
2095 *   const Tensor<1,dim> psi_j_field_plus =
2096 *   fe_neighbor_face_values[VectorField].value(j,q);
2097 *   const double psi_j_potential_plus =
2098 *   fe_neighbor_face_values[Potential].value(j,q);
2099 *  
2100 * @endcode
2101 *
2102 * And lastly we compute the flux matrix where the test
2103 * function and solution function are taken from the exterior
2104 * cell to this face. This corresponds to the computation,
2105 *
2106
2107 *
2108 * @f$\int_{\text{face}}
2109 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2110 * ( \textbf{w}^{+} u^{+}
2111 * + w^{+} \textbf{q}^{+} )
2112 * + \boldsymbol \beta \cdot \textbf{w}^{+} u^{+}
2113 * - w^{+} \boldsymbol \beta \cdot \textbf{q}^{+}
2114 * + \sigma w^{+} u^{+} \right) ds @f$
2115 *
2116 * @code
2117 *   ve_ue_matrix(i,j) += (-0.5 * (
2118 *   psi_i_field_plus *
2119 *   fe_face_values.normal_vector(q) *
2120 *   psi_j_potential_plus
2121 *   +
2122 *   psi_i_potential_plus *
2123 *   fe_face_values.normal_vector(q) *
2124 *   psi_j_field_plus )
2125 *   +
2126 *   beta *
2127 *   psi_i_field_plus *
2128 *   psi_j_potential_plus
2129 *   -
2130 *   beta *
2131 *   psi_i_potential_plus *
2132 *   psi_j_field_plus
2133 *   +
2134 *   (penalty/h) *
2135 *   psi_i_potential_plus *
2136 *   psi_j_potential_plus
2137 *   ) *
2138 *   fe_face_values.JxW(q);
2139 *   }
2140 *  
2141 *   }
2142 *   }
2143 *   }
2144 *  
2145 * @endcode
2146 *
2147 *
2148 * <a name="LDGPoisson.cc-distribute_local_flux_to_global"></a>
2149 * <h4>distribute_local_flux_to_global</h4>
2150 * In this function we use the ConstraintMatrix to distribute
2151 * the local flux matrices to the global system matrix.
2152 * Since I have to do this twice in assembling the
2153 * system matrix, I made function to do it rather than have
2154 * repeated code.
2155 * We remark that the reader take special note of
2156 * the which matrices we are distributing and the order
2157 * in which we pass the dof indices vectors. In distributing
2158 * the first matrix, i.e. <code>vi_ui_matrix</code>, we are
2159 * taking the test function and solution function values from
2160 * the interior of this cell and therefore only need the
2161 * <code>local_dof_indices</code> since it contains the dof
2162 * indices to this cell. When we distribute the second matrix,
2163 * <code>vi_ue_matrix</code>, the test function is taken
2164 * form the inteior of
2165 * this cell while the solution function is taken from the
2166 * exterior, i.e. the neighbor cell. Notice that the order
2167 * degrees of freedom index vectors matrch this pattern: first
2168 * the <code>local_dof_indices</code> which is local to
2169 * this cell and then
2170 * the <code>local_neighbor_dof_indices</code> which is
2171 * local to the neighbor's
2172 * cell. The order in which we pass the dof indices for the
2173 * matrices is paramount to constructing our global system
2174 * matrix properly. The ordering of the last two matrices
2175 * follow the same logic as the first two we discussed.
2176 *
2177 * @code
2178 *   template<int dim>
2179 *   void
2180 *   LDGPoissonProblem<dim>::
2181 *   distribute_local_flux_to_global(
2182 *   const FullMatrix<double> &vi_ui_matrix,
2183 *   const FullMatrix<double> &vi_ue_matrix,
2184 *   const FullMatrix<double> &ve_ui_matrix,
2185 *   const FullMatrix<double> &ve_ue_matrix,
2186 *   const std::vector<types::global_dof_index> &local_dof_indices,
2187 *   const std::vector<types::global_dof_index> &local_neighbor_dof_indices)
2188 *   {
2189 *   constraints.distribute_local_to_global(vi_ui_matrix,
2190 *   local_dof_indices,
2191 *   system_matrix);
2192 *  
2193 *   constraints.distribute_local_to_global(vi_ue_matrix,
2194 *   local_dof_indices,
2195 *   local_neighbor_dof_indices,
2196 *   system_matrix);
2197 *  
2198 *   constraints.distribute_local_to_global(ve_ui_matrix,
2199 *   local_neighbor_dof_indices,
2200 *   local_dof_indices,
2201 *   system_matrix);
2202 *  
2203 *   constraints.distribute_local_to_global(ve_ue_matrix,
2204 *   local_neighbor_dof_indices,
2205 *   system_matrix);
2206 *   }
2207 *  
2208 *  
2209 *  
2210 * @endcode
2211 *
2212 *
2213 * <a name="LDGPoisson.cc-solve"></a>
2214 * <h4>solve</h4>
2215 * As mentioned earlier I used a direct solver to solve
2216 * the linear system of equations resulting from the LDG
2217 * method applied to the Poisson equation. One could also
2218 * use a iterative solver, however, we then need to use
2219 * a preconditioner and that was something I did not wanted
2220 * to get into. For information on preconditioners
2221 * for the LDG Method see this
2222 * <a href="http://epubs.siam.org/doi/abs/10.1137/S1064827502410657">
2223 * paper</a>. The uses of a direct solver here is
2224 * somewhat of a limitation. The built-in distributed
2225 * direct solver in Trilinos reduces everything to one
2226 * processor, solves the system and then distributes
2227 * everything back out to the other processors. However,
2228 * by linking to more advanced direct solvers through
2229 * Trilinos one can accomplish fully distributed computations
2230 * and not much about the following function calls will
2231 * change.
2232 *
2233 * @code
2234 *   template<int dim>
2235 *   void
2236 *   LDGPoissonProblem<dim>::
2237 *   solve()
2238 *   {
2239 *   TimerOutput::Scope t(computing_timer, "solve");
2240 *  
2241 * @endcode
2242 *
2243 * As in @ref step_40 "step-40" in order to perform a linear solve
2244 * we need solution vector where there is no overlap across
2245 * the processors and we create this by instantiating
2246 * <code>completely_distributed_solution</code> solution
2247 * vector using
2248 * the copy constructor on the global system right hand
2249 * side vector which itself is completely distributed vector.
2250 *
2251 * @code
2253 *   completely_distributed_solution(system_rhs);
2254 *  
2255 * @endcode
2256 *
2257 * Now we can perform the solve on the completeley distributed
2258 * right hand side vector, system matrix and the completely
2259 * distributed solution.
2260 *
2261 * @code
2262 *   solver.solve(system_matrix,
2263 *   completely_distributed_solution,
2264 *   system_rhs);
2265 *  
2266 * @endcode
2267 *
2268 * We now distribute the constraints of our system onto the
2269 * completely solution vector, but in our case with the LDG
2270 * method there are none.
2271 *
2272 * @code
2273 *   constraints.distribute(completely_distributed_solution);
2274 *  
2275 * @endcode
2276 *
2277 * Lastly we copy the completely distributed solution vector,
2278 * <code>completely_distributed_solution</code>,
2279 * to solution vector which has some overlap between
2280 * processors, <code>locally_relevant_solution</code>.
2281 * We need the overlapped portions of our solution
2282 * in order to be able to do computations using the solution
2283 * later in the code or in post processing.
2284 *
2285 * @code
2286 *   locally_relevant_solution = completely_distributed_solution;
2287 *   }
2288 *  
2289 * @endcode
2290 *
2291 *
2292 * <a name="LDGPoisson.cc-output_results"></a>
2293 * <h4>output_results</h4>
2294 * This function deals with the writing of the results in parallel
2295 * to disk. It is almost exactly the same as
2296 * in @ref step_40 "step-40" and we won't go into it. It is noteworthy
2297 * that in @ref step_40 "step-40" the output is only the scalar solution,
2298 * while in our situation, we are outputting both the scalar
2299 * solution as well as the vector field solution. The only
2300 * difference between this function and the one in @ref step_40 "step-40"
2301 * is in the <code>solution_names</code> vector where we have to add
2302 * the gradient dimensions. Everything else is taken care
2303 * of by the deal.ii library!
2304 *
2305 * @code
2306 *   template<int dim>
2307 *   void
2308 *   LDGPoissonProblem<dim>::
2309 *   output_results() const
2310 *   {
2311 *   std::vector<std::string> solution_names;
2312 *   switch (dim)
2313 *   {
2314 *   case 1:
2315 *   solution_names.push_back("u");
2316 *   solution_names.push_back("du/dx");
2317 *   break;
2318 *  
2319 *   case 2:
2320 *   solution_names.push_back("grad(u)_x");
2321 *   solution_names.push_back("grad(u)_y");
2322 *   solution_names.push_back("u");
2323 *   break;
2324 *  
2325 *   case 3:
2326 *   solution_names.push_back("grad(u)_x");
2327 *   solution_names.push_back("grad(u)_y");
2328 *   solution_names.push_back("grad(u)_z");
2329 *   solution_names.push_back("u");
2330 *   break;
2331 *  
2332 *   default:
2333 *   Assert(false, ExcNotImplemented() );
2334 *   }
2335 *  
2336 *   DataOut<dim> data_out;
2337 *   data_out.attach_dof_handler(dof_handler);
2338 *   data_out.add_data_vector(locally_relevant_solution,
2339 *   solution_names);
2340 *  
2341 *   Vector<float> subdomain(triangulation.n_active_cells());
2342 *  
2343 *   for (unsigned int i=0; i<subdomain.size(); ++i)
2344 *   subdomain(i) = triangulation.locally_owned_subdomain();
2345 *  
2346 *   data_out.add_data_vector(subdomain,"subdomain");
2347 *  
2348 *   data_out.build_patches();
2349 *  
2350 *   const std::string filename = ("solution." +
2351 *   Utilities::int_to_string(
2352 *   triangulation.locally_owned_subdomain(),4));
2353 *  
2354 *   std::ofstream output((filename + ".vtu").c_str());
2355 *   data_out.write_vtu(output);
2356 *  
2357 *   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0 )
2358 *   {
2359 *   std::vector<std::string> filenames;
2360 *   for (unsigned int i=0;
2361 *   i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
2362 *   i++)
2363 *   {
2364 *   filenames.push_back("solution." +
2365 *   Utilities::int_to_string(i,4) +
2366 *   ".vtu");
2367 *   }
2368 *   std::ofstream master_output("solution.pvtu");
2369 *   data_out.write_pvtu_record(master_output, filenames);
2370 *   }
2371 *   }
2372 *  
2373 *  
2374 * @endcode
2375 *
2376 *
2377 * <a name="LDGPoisson.cc-run"></a>
2378 * <h4>run</h4>
2379 * The only public function of this class is pretty much exactly
2380 * the same as all the other deal.ii examples except I setting
2381 * the constant in the DG penalty (@f$\tilde{\sigma}@f$) to be 1.
2382 *
2383 * @code
2384 *   template<int dim>
2385 *   void
2386 *   LDGPoissonProblem<dim>::
2387 *   run()
2388 *   {
2389 *   penalty = 1.0;
2390 *   make_grid();
2391 *   make_dofs();
2392 *   assemble_system();
2393 *   solve();
2394 *   output_results();
2395 *   }
2396 *  
2397 *  
2398 * @endcode
2399 *
2400 *
2401 * <a name="LDGPoisson.cc-main"></a>
2402 * <h3>main</h3>
2403 * Here it the main class of our program, since it is nearly exactly
2404 * the same as @ref step_40 "step-40" and many of the other examples I won't
2405 * elaborate on it.
2406 *
2407 * @code
2408 *   int main(int argc, char *argv[])
2409 *   {
2410 *  
2411 *   try
2412 *   {
2413 *   using namespace dealii;
2414 *  
2415 *   deallog.depth_console(0);
2416 *  
2417 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
2419 *  
2420 *   unsigned int degree = 1;
2421 *   unsigned int n_refine = 6;
2422 *   LDGPoissonProblem<2> Poisson(degree, n_refine);
2423 *   Poisson.run();
2424 *  
2425 *   }
2426 *   catch (std::exception &exc)
2427 *   {
2428 *   std::cerr << std::endl << std::endl
2429 *   << "----------------------------------------------------"
2430 *   << std::endl;
2431 *   std::cerr << "Exception on processing: " << std::endl
2432 *   << exc.what() << std::endl
2433 *   << "Aborting!" << std::endl
2434 *   << "----------------------------------------------------"
2435 *   << std::endl;
2436 *  
2437 *   return 1;
2438 *   }
2439 *   catch (...)
2440 *   {
2441 *   std::cerr << std::endl << std::endl
2442 *   << "----------------------------------------------------"
2443 *   << std::endl;
2444 *   std::cerr << "Unknown exception!" << std::endl
2445 *   << "Aborting!" << std::endl
2446 *   << "----------------------------------------------------"
2447 *   << std::endl;
2448 *   return 1;
2449 *   }
2450 *   return 0;
2451 *   }
2452 * @endcode
2453
2454
2455*/
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell, const unsigned int face_no, const unsigned int subface_no)
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell)
unsigned int depth_console(const unsigned int n)
Definition logstream.cc:349
@ wall_times
Definition timer.h:651
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
#define Assert(cond, exc)
typename ActiveSelector::cell_iterator cell_iterator
typename ActiveSelector::face_iterator face_iterator
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
LogStream deallog
Definition logstream.cc:36
std::size_t size
Definition mpi.cc:734
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
double diameter(const Triangulation< dim, spacedim > &tria)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorType::value_type * begin(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int boundary_id
Definition types.h:144
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation