456 * <a name=
"Functions.cc-Functionscc"></a>
458 * In
this file we keep right hand side function, Dirichlet boundary
459 * conditions and solution to our Poisson equation problem. Since
460 * these classes and
functions have been discussed extensively in
461 * the deal.ii tutorials we won
't discuss them any further.
464 * #include <deal.II/base/function.h>
465 * #include <deal.II/base/tensor_function.h>
466 * #include <deal.II/lac/vector.h>
469 * using namespace dealii;
472 * class RightHandSide : public Function<dim>
475 * RightHandSide() : Function<dim>(1)
478 * virtual double value(const Point<dim> &p,
479 * const unsigned int component = 0 ) const override;
483 * class DirichletBoundaryValues : public Function<dim>
486 * DirichletBoundaryValues() : Function<dim>(1)
489 * virtual double value(const Point<dim> &p,
490 * const unsigned int component = 0 ) const override;
494 * class TrueSolution : public Function<dim>
497 * TrueSolution() : Function<dim>(dim+1)
500 * virtual void vector_value(const Point<dim> &p,
501 * Vector<double> &values) const override;
506 * RightHandSide<dim>::
507 * value(const Point<dim> &p,
508 * const unsigned int ) const
510 * const double x = p[0];
511 * const double y = p[1];
512 * return 4*M_PI*M_PI*(cos(2*M_PI*y) - sin(2*M_PI*x));
518 * DirichletBoundaryValues<dim>::
519 * value(const Point<dim> &p,
520 * const unsigned int ) const
522 * const double x = p[0];
523 * const double y = p[1];
524 * return cos(2*M_PI*y) -sin(2*M_PI*x) - x;
530 * TrueSolution<dim>::
531 * vector_value(const Point<dim> &p,
532 * Vector<double> &values) const
534 * Assert(values.size() == dim+1,
535 * ExcDimensionMismatch(values.size(), dim+1) );
540 * values(0) = 1 + 2*M_PI*cos(2*M_PI*x);
541 * values(1) = 2*M_PI*sin(2*M_PI*y);
543 * values(2) = cos(2*M_PI*y) - sin(2*M_PI*x) - x;
548<a name="ann-LDGPoisson.cc"></a>
549<h1>Annotated version of LDGPoisson.cc</h1>
555 * /* -----------------------------------------------------------------------------
557 * * SPDX-License-Identifier: LGPL-2.1-or-later
558 * * Copyright (C) 2017 by Michael Harmon
560 * * This file is part of the deal.II code gallery.
562 * * -----------------------------------------------------------------------------
568 * <a name="LDGPoisson.cc-LDGPoissoncc"></a>
569 * <h3>LDGPoisson.cc</h3>
570 * The code begins as per usual with a long list of the the included
571 * files from the deal.ii library.
574 * #include <deal.II/base/quadrature_lib.h>
575 * #include <deal.II/base/logstream.h>
576 * #include <deal.II/base/function.h>
577 * #include <deal.II/base/timer.h>
579 * #include <deal.II/lac/full_matrix.h>
580 * #include <deal.II/lac/sparse_matrix.h>
583 * #include <deal.II/grid/tria.h>
584 * #include <deal.II/grid/grid_generator.h>
585 * #include <deal.II/grid/grid_tools.h>
586 * #include <deal.II/grid/tria_accessor.h>
587 * #include <deal.II/grid/tria_iterator.h>
589 * #include <deal.II/dofs/dof_handler.h>
590 * #include <deal.II/dofs/dof_renumbering.h>
591 * #include <deal.II/dofs/dof_accessor.h>
592 * #include <deal.II/dofs/dof_tools.h>
595 * #include <iostream>
599 * Here's where the classes
for the DG methods
begin.
600 * We can use either the Lagrange polynomials,
603 * #include <deal.II/fe/fe_dgq.h>
606 * or the Legendre polynomials
609 * #include <deal.II/fe/fe_dgp.h>
612 * as basis
functions. I
'll be using the Lagrange polynomials.
615 * #include <deal.II/fe/fe_system.h>
616 * #include <deal.II/fe/fe_values.h>
618 * #include <deal.II/numerics/vector_tools.h>
619 * #include <deal.II/numerics/matrix_tools.h>
620 * #include <deal.II/numerics/data_out.h>
625 * Now we have to load in the deal.ii files that will allow us to use
626 * a distributed computing framework.
629 * #include <deal.II/base/utilities.h>
630 * #include <deal.II/base/index_set.h>
631 * #include <deal.II/base/conditional_ostream.h>
632 * #include <deal.II/lac/sparsity_tools.h>
633 * #include <deal.II/distributed/tria.h>
638 * Additionally we load the files that will allow us to interact with
639 * the Trilinos library.
642 * #include <deal.II/lac/trilinos_sparse_matrix.h>
643 * #include <deal.II/lac/trilinos_vector.h>
644 * #include <deal.II/lac/trilinos_solver.h>
649 * The functions class contains all the definitions of the functions we
650 * will use, i.e. the right hand side function, the boundary conditions
651 * and the test functions.
654 * #include "Functions.cc"
656 * using namespace dealii;
661 * Here is the main class for the Local Discontinuous Galerkin method
662 * applied to Poisson's equation, we won
't explain much of the
663 * the class and method declarations, but dive deeper into describing the
664 * functions when they are defined. The only thing I will mention
665 * about the class declaration is that this is where I labeled
666 * the different types of boundaries using enums.
670 * class LDGPoissonProblem
674 * LDGPoissonProblem(const unsigned int degree,
675 * const unsigned int n_refine);
677 * ~LDGPoissonProblem();
687 * void assemble_system();
689 * void assemble_cell_terms(const FEValues<dim> &cell_fe,
690 * FullMatrix<double> &cell_matrix,
691 * Vector<double> &cell_vector);
693 * void assemble_Neumann_boundary_terms(const FEFaceValues<dim> &face_fe,
694 * FullMatrix<double> &local_matrix,
695 * Vector<double> &local_vector);
697 * void assemble_Dirichlet_boundary_terms(const FEFaceValues<dim> &face_fe,
698 * FullMatrix<double> &local_matrix,
699 * Vector<double> &local_vector,
702 * void assemble_flux_terms(const FEFaceValuesBase<dim> &fe_face_values,
703 * const FEFaceValuesBase<dim> &fe_neighbor_face_values,
704 * FullMatrix<double> &vi_ui_matrix,
705 * FullMatrix<double> &vi_ue_matrix,
706 * FullMatrix<double> &ve_ui_matrix,
707 * FullMatrix<double> &ve_ue_matrix,
710 * void distribute_local_flux_to_global(
711 * const FullMatrix<double> &vi_ui_matrix,
712 * const FullMatrix<double> &vi_ue_matrix,
713 * const FullMatrix<double> &ve_ui_matrix,
714 * const FullMatrix<double> &ve_ue_matrix,
715 * const std::vector<types::global_dof_index> &local_dof_indices,
716 * const std::vector<types::global_dof_index> &local_neighbor_dof_indices);
720 * void output_results() const;
722 * const unsigned int degree;
723 * const unsigned int n_refine;
734 * parallel::distributed::Triangulation<dim> triangulation;
736 * DoFHandler<dim> dof_handler;
738 * AffineConstraints<double> constraints;
740 * SparsityPattern sparsity_pattern;
742 * TrilinosWrappers::SparseMatrix system_matrix;
743 * TrilinosWrappers::MPI::Vector locally_relevant_solution;
744 * TrilinosWrappers::MPI::Vector system_rhs;
746 * ConditionalOStream pcout;
747 * TimerOutput computing_timer;
749 * SolverControl solver_control;
750 * TrilinosWrappers::SolverDirect solver;
752 * const RightHandSide<dim> rhs_function;
753 * const DirichletBoundaryValues<dim> Dirichlet_bc_function;
754 * const TrueSolution<dim> true_solution;
761 * <a name="LDGPoisson.cc-Classconstructoranddestructor"></a>
762 * <h4>Class constructor and destructor</h4>
763 * The constructor and destructor for this class is very much like the
764 * like those for @ref step_40 "step-40". The difference being that we'll be passing
765 * in an integer, <code>degree</code>, which tells us the maximum order
766 * of the polynomial to use as well as <code>n_refine</code> which is the
767 * global number of times we
refine our mesh. The other main differences
768 * are that we use a
FESystem object for our choice of basis
769 *
functions. This is reminiscent of the mixed finite element method in
770 * @ref step_20
"step-20", however, in our
case we use a
FESystem
782 * which tells us that the basis
functions contain discontinuous polynomials
783 * of order <code>degree</code> in each of the <code>dim</code> dimensions
784 *
for the vector field. For the
scalar unknown we
785 * use a discontinuous polynomial of the order <code>degree</code>.
786 * The LDG method
for Poisson equations solves
for both the primary variable
787 * as well as its
gradient, just like the mixed finite element method.
788 * However, unlike the mixed method, the LDG method uses discontinuous
790 * The other difference between our constructor and that of @ref step_40
"step-40" is that
791 * we all instantiate our linear solver in the constructor definition.
795 * LDGPoissonProblem<dim>::
796 * LDGPoissonProblem(
const unsigned int degree,
797 *
const unsigned int n_refine)
800 * n_refine(n_refine),
810 * computing_timer(MPI_COMM_WORLD,
815 * solver(solver_control),
817 * Dirichlet_bc_function()
822 * LDGPoissonProblem<dim>::
823 * ~LDGPoissonProblem()
825 * dof_handler.
clear();
831 * <a name=
"LDGPoisson.cc-Make_grid"></a>
833 * This function shows how to make a grid
using local
834 * refinement and also shows how to label the boundaries
835 *
using the defined
enum.
840 * LDGPoissonProblem<dim>::
846 *
unsigned int local_refine = 2;
847 *
for (
unsigned int i =0; i <local_refine; ++i)
855 * We
loop over all the cells in the mesh
856 * and mark the appropriate cells
for refinement.
857 * In
this example we only choose cells which are
858 * near @f$x=0@f$ and @f$x=1@f$ in the
859 * the domain. This was just to show that
860 * the LDG method is working with local
861 * refinement and discussions on building
862 * more realistic refinement strategies are
863 * discussed elsewhere in the deal.ii
867 *
for (; cell != endc; ++cell)
869 *
if ((cell->center()[1]) > 0.9 )
871 *
if ((cell->center()[0] > 0.9) || (cell->center()[0] < 0.1))
872 * cell->set_refine_flag();
877 * Now that we have marked all the cells
878 * that we want to
refine locally we can go ahead and
887 * To label the boundary faces of the mesh with their
888 * type, i.e. Dirichlet or Neumann,
889 * we
loop over all the cells in the mesh and then over
890 * all the faces of each cell. We then have to figure out
891 * which faces are on the boundary and
set all faces
892 * on the boundary to have
893 * <code>
boundary_id</code> to be <code>Dirichlet</code>.
894 * We remark that one could easily
set more complicated
895 * conditions where there are both Dirichlet or
896 * Neumann boundaries.
902 *
for (; cell != endc; ++cell)
904 *
for (
unsigned int face_no=0;
905 * face_no < GeometryInfo<dim>::faces_per_cell;
908 *
if (cell->face(face_no)->at_boundary() )
909 * cell->face(face_no)->set_boundary_id(Dirichlet);
917 * <a name=
"LDGPoisson.cc-make_dofs"></a>
919 * This function is responsible
for distributing the degrees of
920 * freedom (dofs) to the processors and allocating memory
for
921 * the global system
matrix, <code>system_matrix</code>,
922 * and global right hand side vector, <code>system_rhs</code> .
923 * The dofs are the unknown coefficients
for the polynomial
924 * approximation of our solution to Poisson
's equation in the scalar
925 * variable and its gradient.
930 * LDGPoissonProblem<dim>::
933 * TimerOutput::Scope t(computing_timer, "setup");
937 * The first step to setting up our linear system is to
938 * distribute the degrees of freedom (dofs) across the processors,
939 * this is done with the <code>distribute_dofs()</code>
940 * method of the DoFHandler. We remark
941 * the same exact function call that occurs when using deal.ii
942 * on a single machine, the DoFHandler automatically knows
943 * that we are distributed setting because it was instantiated
944 * with a distributed triangulation!
947 * dof_handler.distribute_dofs(fe);
951 * We now renumber the dofs so that the vector of unknown dofs
952 * that we are solving for, <code>locally_relevant_solution</code>,
953 * corresponds to a vector of the form,
957 * @f$ \left[\begin{matrix} \textbf{Q} \\ \textbf{U} \end{matrix}\right] @f$
960 * DoFRenumbering::component_wise(dof_handler);
964 * Now we get the locally owned dofs, that is the dofs that our local
965 * to this processor. These dofs corresponding entries in the
966 * matrix and vectors that we will write to.
969 * const IndexSet &locally_owned_dofs = dof_handler.locally_owned_dofs();
973 * In addition to the locally owned dofs, we also need the the locally
974 * relevant dofs. These are the dofs that have read access to and we
975 * need in order to do computations on our processor, but, that
976 * we do not have the ability to write to.
979 * const IndexSet locally_relevant_dofs =
980 * DoFTools::extract_locally_relevant_dofs(dof_handler);
982 * const std::vector<types::global_dof_index> dofs_per_component =
983 * DoFTools::count_dofs_per_fe_component(dof_handler);
987 * Discontinuous Galerkin methods are fantanistic methods in part because
988 * many of the limitations of traditional finite element methods no longer
989 * exist. Specifically, the need to use constraint matrices
990 * in order handle hanging nodes is no longer necessary. However,
991 * we will continue to use the constraint matrices inorder to efficiently
992 * distribute local computations to the global system, i.e. to the
993 * <code>system_matrix</code> and <code>system_rhs</code>. Therefore, we
994 * just instantiate the constraints matrix object, clear and close it.
997 * constraints.clear();
998 * constraints.close();
1003 * Just like @ref step_40 "step-40" we create a dynamic sparsity pattern
1004 * and distribute it to the processors. Notice how we do not have to
1005 * explicitly mention that we are using a FESystem for system of
1006 * variables instead of a FE_DGQ for a scalar variable
1007 * or that we are using a discributed DoFHandler. All these specifics
1008 * are taken care of under the hood by the deal.ii library.
1009 * In order to build the sparsity
1010 * pattern we use the DoFTools::make_flux_sparsity_pattern function
1011 * since we using a DG method and need to take into account the DG
1012 * fluxes in the sparsity pattern.
1015 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
1016 * DoFTools::make_flux_sparsity_pattern(dof_handler,
1019 * SparsityTools::distribute_sparsity_pattern(dsp,
1020 * dof_handler.locally_owned_dofs(),
1022 * locally_relevant_dofs);
1026 * Here is one area that I had to learn the hard way. The local
1027 * discontinuous Galerkin method like the mixed method with the
1028 * Raviart-Thomas element is written
1029 * in mixed form and will lead to a block-structured matrix.
1030 * In @ref step_20 "step-20" we see that we that we initialize the
1031 * <code>system_martrix</code>
1032 * such that we explicitly declare it to be block-structured.
1033 * It turns out there are reasons to do this when you are going to be
1034 * using a Schur complement method to solve the system of equations.
1035 * While the LDG method will lead to a block-structured matrix,
1036 * we do not have to explicitly declare our matrix to be one.
1037 * I found that most of the distributed linear solvers did not
1038 * accept block structured matrices and since I was using a
1039 * distributed direct solver it was unnecessary to explicitly use a
1040 * block structured matrix.
1043 * system_matrix.reinit(locally_owned_dofs,
1044 * locally_owned_dofs,
1050 * The final note that I will make in that this subroutine is that
1051 * we initialize this processors solution and the
1052 * right hand side vector the exact same was as we did in @ref step_40 "step-40".
1053 * We should note that the <code>locally_relevant_solution</code> solution
1054 * vector includes dofs that are locally relevant to our computations
1055 * while the <code>system_rhs</code> right hand side vector will only
1056 * include dofs that are locally owned by this processor.
1059 * locally_relevant_solution.reinit(locally_relevant_dofs,
1062 * system_rhs.reinit(locally_owned_dofs,
1063 * locally_relevant_dofs,
1067 * const unsigned int n_vector_field = dim * dofs_per_component[0];
1068 * const unsigned int n_potential = dofs_per_component[dim];
1070 * pcout << "Number of active cells : "
1071 * << triangulation.n_global_active_cells()
1073 * << "Number of degrees of freedom: "
1074 * << dof_handler.n_dofs()
1075 * << " (" << n_vector_field << " + " << n_potential << ")"
1082 * <a name="LDGPoisson.cc-assemble_system"></a>
1083 * <h4>assemble_system</h4>
1084 * This is the function that will assemble the global system matrix and
1085 * global right hand side vector for the LDG method. It starts out
1086 * like many of the deal.ii tutorial codes: declaring quadrature formulas
1087 * and UpdateFlags objects, as well as vectors that will hold the
1088 * dof indices for the cells we are working on in the global system.
1091 * template <int dim>
1093 * LDGPoissonProblem<dim>::
1096 * TimerOutput::Scope t(computing_timer, "assembly");
1098 * QGauss<dim> quadrature_formula(fe.degree+1);
1099 * QGauss<dim-1> face_quadrature_formula(fe.degree+1);
1101 * const UpdateFlags update_flags = update_values
1102 * | update_gradients
1103 * | update_quadrature_points
1104 * | update_JxW_values;
1106 * const UpdateFlags face_update_flags = update_values
1107 * | update_normal_vectors
1108 * | update_quadrature_points
1109 * | update_JxW_values;
1111 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1112 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1113 * std::vector<types::global_dof_index>
1114 * local_neighbor_dof_indices(dofs_per_cell);
1118 * We first remark that we have the FEValues objects for
1119 * the values of our cell basis functions as was done in most
1120 * other examples. Now because we
1121 * are using discontinuous Galerkin methods we also introduce a
1122 * FEFaceValues object, <code>fe_face_values</code>,
1123 * for evaluating the basis functions
1124 * on one side of an element face as well as another FEFaceValues object,
1125 * <code>fe_neighbor_face_values</code>, for evaluating the basis functions
1126 * on the opposite side of the face, i.e. on the neighboring element's face.
1128 * <code>fe_subface_values</code>, that
1129 * will be used
for dealing with faces that have multiple refinement
1130 * levels, i.e. hanging nodes. When we have to evaluate the fluxes across
1131 * a face that multiple refinement levels, we need to evaluate the
1132 * fluxes across all its childrens
' faces; we'll explain
this more when
1136 *
FEValues<dim> fe_values(fe, quadrature_formula, update_flags);
1139 * face_update_flags);
1142 * face_quadrature_formula,
1143 * face_update_flags);
1146 * face_update_flags);
1150 * Here are the local (dense)
matrix and right hand side vector
for
1151 * the solid integrals as well as the integrals on the boundaries in the
1152 * local discontinuous Galerkin method. These terms will be built
for
1153 * each local element in the mesh and then distributed to the global
1154 * system
matrix and right hand side vector.
1162 * The next four matrices are used to incorporate the flux integrals across
1163 * interior faces of the mesh:
1172 * As explained in the section on the LDG method we take our test
1173 * function to be v and multiply it on the left side of our differential
1174 * equation that is on u and perform integration by parts as explained in the
1175 * introduction. Using
this notation
for test and solution function,
1176 * the matrices below will then stand
for:
1180 * <code>vi_ui</code> - Taking the
value of the test function from
1181 * interior of
this cell
's face and the solution function
1182 * from the interior of this cell.
1186 * <code>vi_ue</code> - Taking the value of the test function from
1187 * interior of this cell's face and the solution function
1188 * from the exterior of
this cell.
1192 * <code>ve_ui</code> - Taking the
value of the test function from
1193 * exterior of
this cell
's face and the solution function
1194 * from the interior of this cell.
1198 * <code>ve_ue</code> - Taking the value of the test function from
1199 * exterior of this cell's face and the solution function
1200 * from the exterior of
this cell.
1204 * Now that we have gotten preliminary orders out of the way,
1205 * we
loop over all the cells
1206 * and
assemble the local system
matrix and local right hand side vector
1211 * cell = dof_handler.begin_active(),
1212 * endc = dof_handler.end();
1214 *
for (; cell!=endc; ++cell)
1218 * Now, since we are working in a distributed setting,
1219 * we can only work on cells and write to dofs in the
1220 * <code>system_matrix</code>
1221 * and <code>rhs_vector</code>
1222 * that corresponds to cells that are locally owned
1223 * by
this processor. We note that
while we can only write to locally
1224 * owned dofs, we will still use information from cells that are
1225 * locally relevant. This is very much the same as in @ref step_40
"step-40".
1228 *
if (cell->is_locally_owned())
1233 * that includes the solid integrals in the LDG method as well as
1234 * the right hand side vector. This involves resetting the local
1235 *
matrix and vector to contain all zeros, reinitializing the
1236 *
FEValues object for this cell and then building the
1237 * <code>local_matrix</code> and <code>local_rhs</code> vector.
1243 * fe_values.
reinit(cell);
1244 * assemble_cell_terms(fe_values,
1250 * We remark that we need to get the local indices
for the dofs to
1251 * to
this cell before we
begin to compute the contributions
1252 * from the numerical fluxes, i.e. the boundary conditions and
1256 * cell->get_dof_indices(local_dof_indices);
1260 * Now is where we start to
loop over all the faces of the cell
1261 * and construct the local contribtuions from the numerical fluxes.
1262 * The numerical fluxes will be due to 3 contributions: the
1263 * interior faces, the faces on the Neumann boundary and the faces
1264 * on the Dirichlet boundary. We instantiate a
1265 * <code>face_iterator</code> to
loop
1266 * over all the faces of
this cell and
first see
if the face is on
1267 * the boundary. Notice how we
do not reinitiaize the
1268 * <code>fe_face_values</code>
1269 *
object for the face until we know that we are actually on face
1270 * that lies on the boundary of the domain. The reason
for doing
this
1271 * is
for computational efficiency; reinitializing the
FEFaceValues
1272 *
for each face is expensive and we
do not want to
do it unless we
1273 * are actually going use it to
do computations. After
this, we test
1275 * is on the a Dirichlet or a Neumann segment of the boundary and
1276 * call the appropriate subroutine to
assemble the contributions
for
1277 * that boundary. Note that
this assembles the flux contribution
1278 * in the <code>local_matrix</code> as well as the boundary
1279 * condition that ends up
1280 * in the <code>local_vector</code>.
1283 *
for (
unsigned int face_no=0;
1284 * face_no< GeometryInfo<dim>::faces_per_cell;
1288 * cell->face(face_no);
1290 *
if (face->at_boundary() )
1292 * fe_face_values.reinit(cell, face_no);
1294 *
if (face->boundary_id() == Dirichlet)
1298 * Here notice that in order to
assemble the
1299 * flux due to the penalty term
for the the
1300 * Dirichlet boundary condition we need the
1301 * local cell
diameter size and we can get
1302 * that
value for this specific cell with
1306 *
double h = cell->diameter();
1307 * assemble_Dirichlet_boundary_terms(fe_face_values,
1312 *
else if (face->boundary_id() == Neumann)
1314 * assemble_Neumann_boundary_terms(fe_face_values,
1319 *
Assert(
false, ExcNotImplemented() );
1325 * At
this point we know that the face we are on is an
1327 * flux matrices, but
first we want to make sure that the
1328 * neighbor cell to
this face is a
valid cell. Once we know
1329 * that the neighbor is a
valid cell then we also want to get
1330 * the meighbor cell that shares
this cell
's face.
1336 * Assert(cell->neighbor(face_no).state() ==
1337 * IteratorState::valid,
1338 * ExcInternalError());
1340 * typename DoFHandler<dim>::cell_iterator neighbor =
1341 * cell->neighbor(face_no);
1345 * Now that we have the two cells whose face we want to
1346 * compute the numerical flux across, we need to know
1347 * if the face has been refined, i.e. if it has children
1348 * faces. This occurs when one of the cells has a
1349 * different level of refinement than
1350 * the other cell. If this is the case, then this face
1351 * has a different level of refinement than the other faces
1352 * of the cell, i.e. on this face there is a hanging node.
1353 * Hanging nodes are not a problem in DG methods, the only
1354 * time we have to watch out for them is at this step
1355 * and as you will see the changes we have to our make
1359 * if (face->has_children())
1363 * We now need to find the face of our neighbor cell
1364 * such that neighbor(neigh_face_no) = cell(face_no).
1367 * const unsigned int neighbor_face_no =
1368 * cell->neighbor_of_neighbor(face_no);
1372 * Once we do this we then have to loop over all the
1373 * subfaces (children faces) of our cell's face and
1374 * compute the interior fluxes across the children faces
1375 * and the neighbor
's face.
1378 * for (unsigned int subface_no=0;
1379 * subface_no < face->n_children();
1384 * We then get the neighbor cell's subface that
1385 * matches our cell face
's subface and the
1386 * specific subface number. We assert that the parent
1387 * face cannot be more than one level of
1388 * refinement above the child's face. This is
1389 * because the deal.ii library does not allow
1390 * neighboring cells to have refinement levels
1391 * that are more than one
level in difference.
1395 * cell->neighbor_child_on_subface(face_no,
1398 *
Assert(!neighbor_child->has_children(),
1399 * ExcInternalError());
1403 * Now that we are ready to build the local flux
1405 *
this face we reset them
e zero and
1406 * reinitialize this <code>fe_values</code>
1407 * to
this cell
's subface and
1408 * <code>neighbor_child</code>'s
1410 * objects on the appropriate faces.
1418 * fe_subface_values.
reinit(cell, face_no, subface_no);
1419 * fe_neighbor_face_values.reinit(neighbor_child,
1420 * neighbor_face_no);
1424 * In addition, we get the minimum of diameters of
1425 * the two cells to include in the penalty term
1428 *
double h =
std::min(cell->diameter(),
1429 * neighbor_child->diameter());
1433 * We now
finally assemble the interior fluxes
for
1434 * the
case of a face which has been refined
using
1435 * exactly the same subroutine as we
do when both
1436 * cells have the same refinement
level.
1439 * assemble_flux_terms(fe_subface_values,
1440 * fe_neighbor_face_values,
1449 * Now all that is left to be done before distribuing
1450 * the local flux matrices to the global system
1451 * is get the neighbor child faces dof indices.
1454 * neighbor_child->get_dof_indices(local_neighbor_dof_indices);
1458 * Once we have
this cells dof indices and the
1459 * neighboring cell
's dof indices we can use the
1460 * ConstraintMatrix to distribute the local flux
1461 * matrices to the global system matrix.
1462 * This is done through the class function
1463 * <code>distribute_local_flux_to_global()</code>.
1466 * distribute_local_flux_to_global(
1471 * local_dof_indices,
1472 * local_neighbor_dof_indices);
1479 * At this point we know that this cell and the neighbor
1480 * of this cell are on the same refinement level and
1481 * the work to assemble the interior flux matrices
1482 * is very much the same as before. In fact it is
1483 * much simpler since we do not have to loop through the
1484 * subfaces. However, we have to check that we do
1485 * not compute the same contribution twice. This would
1486 * happen because we are looping over all the faces of
1487 * all the cells in the mesh
1488 * and assembling the interior flux matrices for each face.
1489 * To avoid doing assembling the interior flux matrices
1490 * twice we only compute the
1491 * interior fluxes once for each face by restricting that
1492 * the following computation only occur on the on
1493 * the cell face with the lower CellId.
1496 * if (neighbor->level() == cell->level() &&
1497 * cell->id() < neighbor->id())
1501 * Here we find the neighbor face such that
1502 * neighbor(neigh_face_no) = cell(face_no).
1503 * In addition we, reinitialize the FEFaceValues
1505 * respective cells
' faces, as well as get the
1506 * minimum diameter of this cell
1507 * and the neighbor cell and assign
1508 * it to <code>h</code>.
1511 * const unsigned int neighbor_face_no =
1512 * cell->neighbor_of_neighbor(face_no);
1519 * fe_face_values.reinit(cell, face_no);
1520 * fe_neighbor_face_values.reinit(neighbor,
1521 * neighbor_face_no);
1523 * double h = std::min(cell->diameter(),
1524 * neighbor->diameter());
1528 * Just as before we assemble the interior fluxes
1530 * <code>assemble_flux_terms</code> subroutine,
1531 * get the neighbor cell's
1532 * face dof indices and use the constraint
matrix to
1533 * distribute the local flux matrices to the global
1534 * <code>system_matrix</code>
using the
class
1536 * <code>distribute_local_flux_to_global()</code>
1539 * assemble_flux_terms(fe_face_values,
1540 * fe_neighbor_face_values,
1547 * neighbor->get_dof_indices(local_neighbor_dof_indices);
1549 * distribute_local_flux_to_global(
1554 * local_dof_indices,
1555 * local_neighbor_dof_indices);
1566 * Now that have looped over all the faces
for this
1567 * cell and computed as well as distributed the local
1568 * flux matrices to the <code>system_matrix</code>, we
1569 * can
finally distribute the cell
's <code>local_matrix</code>
1570 * and <code>local_vector</code> contribution to the
1571 * global system matrix and global right hand side vector.
1572 * We remark that we have to wait until this point
1573 * to distribute the <code>local_matrix</code>
1574 * and <code>system_rhs</code> to the global system.
1575 * The reason being that in looping over the faces
1576 * the faces on the boundary of the domain contribute
1577 * to the <code>local_matrix</code>
1578 * and <code>system_rhs</code>. We could distribute
1579 * the local contributions for each component separately,
1580 * but writing to the distributed sparse matrix and vector
1581 * is expensive and want to to minimize the number of times
1585 * constraints.distribute_local_to_global(local_matrix,
1586 * local_dof_indices,
1589 * constraints.distribute_local_to_global(local_vector,
1590 * local_dof_indices,
1598 * We need to synchronize assembly of our global system
1599 * matrix and global right hand side vector with all the other
1601 * use the compress() function to do this.
1602 * This was discussed in detail in @ref step_40 "step-40".
1605 * system_matrix.compress(VectorOperation::add);
1606 * system_rhs.compress(VectorOperation::add);
1613 * <a name="LDGPoisson.cc-assemble_cell_terms"></a>
1614 * <h4>assemble_cell_terms</h4>
1615 * This function deals with constructing the local matrix due to
1616 * the solid integrals over each element and is very similar to the
1617 * the other examples in the deal.ii tutorials.
1622 * LDGPoissonProblem<dim>::
1623 * assemble_cell_terms(
1624 * const FEValues<dim> &cell_fe,
1625 * FullMatrix<double> &cell_matrix,
1626 * Vector<double> &cell_vector)
1628 * const unsigned int dofs_per_cell = cell_fe.dofs_per_cell;
1629 * const unsigned int n_q_points = cell_fe.n_quadrature_points;
1631 * const FEValuesExtractors::Vector VectorField(0);
1632 * const FEValuesExtractors::Scalar Potential(dim);
1634 * std::vector<double> rhs_values(n_q_points);
1638 * We first get the value of the right hand side function
1639 * evaluated at the quadrature points in the cell.
1642 * rhs_function.value_list(cell_fe.get_quadrature_points(),
1647 * Now, we loop over the quadrature points in the
1648 * cell and then loop over the degrees of freedom and perform
1649 * quadrature to approximate the integrals.
1652 * for (unsigned int q=0; q<n_q_points; ++q)
1654 * for (unsigned int i=0; i<dofs_per_cell; ++i)
1656 * const Tensor<1, dim> psi_i_field = cell_fe[VectorField].value(i,q);
1657 * const double div_psi_i_field = cell_fe[VectorField].divergence(i,q);
1658 * const double psi_i_potential = cell_fe[Potential].value(i,q);
1659 * const Tensor<1, dim> grad_psi_i_potential = cell_fe[Potential].gradient(i,q);
1661 * for (unsigned int j=0; j<dofs_per_cell; ++j)
1663 * const Tensor<1, dim> psi_j_field = cell_fe[VectorField].value(j,q);
1664 * const double psi_j_potential = cell_fe[Potential].value(j,q);
1668 * This computation corresponds to assembling the local system
1669 * matrix for the integral over an element,
1673 * @f$\int_{\Omega_{e}} \left(\textbf{w} \cdot \textbf{q}
1674 * - \nabla \cdot \textbf{w} u
1675 * - \nabla w \cdot \textbf{q}
1679 * cell_matrix(i,j) += ( (psi_i_field * psi_j_field)
1681 * (div_psi_i_field * psi_j_potential)
1683 * (grad_psi_i_potential * psi_j_field)
1684 * ) * cell_fe.JxW(q);
1689 * And this local right hand vector corresponds to the integral
1690 * over the element cell,
1694 * @f$ \int_{\Omega_{e}} w \, f(\textbf{x}) \, dx @f$
1697 * cell_vector(i) += psi_i_potential *
1707 * <a name="LDGPoisson.cc-assemble_Dirichlet_boundary_terms"></a>
1708 * <h4>assemble_Dirichlet_boundary_terms</h4>
1709 * Here we have the function that builds the <code>local_matrix</code>
1711 * and local right hand side vector, <code>local_vector</code>
1712 * for the Dirichlet boundary conditions.
1717 * LDGPoissonProblem<dim>::
1718 * assemble_Dirichlet_boundary_terms(
1719 * const FEFaceValues<dim> &face_fe,
1720 * FullMatrix<double> &local_matrix,
1721 * Vector<double> &local_vector,
1724 * const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1725 * const unsigned int n_q_points = face_fe.n_quadrature_points;
1727 * const FEValuesExtractors::Vector VectorField(0);
1728 * const FEValuesExtractors::Scalar Potential(dim);
1730 * std::vector<double> Dirichlet_bc_values(n_q_points);
1734 * In order to evaluate the flux on the Dirichlet boundary face we
1735 * first get the value of the Dirichlet boundary function on the quadrature
1736 * points of the face. Then we loop over all the quadrature points and
1737 * degrees of freedom and approximate the integrals on the Dirichlet boundary
1741 * Dirichlet_bc_function.value_list(face_fe.get_quadrature_points(),
1742 * Dirichlet_bc_values);
1744 * for (unsigned int q=0; q<n_q_points; ++q)
1746 * for (unsigned int i=0; i<dofs_per_cell; ++i)
1748 * const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1749 * const double psi_i_potential = face_fe[Potential].value(i,q);
1751 * for (unsigned int j=0; j<dofs_per_cell; ++j)
1753 * const Tensor<1, dim> psi_j_field = face_fe[VectorField].value(j,q);
1754 * const double psi_j_potential = face_fe[Potential].value(j,q);
1758 * We compute contribution for the flux @f$\widehat{q}@f$ on
1759 * the Dirichlet boundary which enters our system matrix as,
1763 * @f$ \int_{\text{face}} w \, ( \textbf{n} \cdot \textbf{q}
1764 * + \sigma u) ds @f$
1767 * local_matrix(i,j) += psi_i_potential * (
1768 * face_fe.normal_vector(q) *
1772 * psi_j_potential) *
1779 * We also compute the contribution for the flux for @f$\widehat{u}@f$
1780 * on the Dirichlet boundary which is the Dirichlet boundary
1781 * condition function and enters the right hand side vector as
1785 * @f$\int_{\text{face}} (-\textbf{w} \cdot \textbf{n}
1786 * + \sigma w) \, u_{D} ds @f$
1789 * local_vector(i) += (-1.0 * psi_i_field *
1790 * face_fe.normal_vector(q)
1793 * psi_i_potential) *
1794 * Dirichlet_bc_values[q] *
1803 * <a name="LDGPoisson.cc-assemble_Neumann_boundary_terms"></a>
1804 * <h4>assemble_Neumann_boundary_terms</h4>
1805 * Here we have the function that builds the <code>local_matrix</code>
1806 * and <code>local_vector</code> for the Neumann boundary conditions.
1811 * LDGPoissonProblem<dim>::
1812 * assemble_Neumann_boundary_terms(
1813 * const FEFaceValues<dim> &face_fe,
1814 * FullMatrix<double> &local_matrix,
1815 * Vector<double> &local_vector)
1817 * const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1818 * const unsigned int n_q_points = face_fe.n_quadrature_points;
1820 * const FEValuesExtractors::Vector VectorField(0);
1821 * const FEValuesExtractors::Scalar Potential(dim);
1825 * In order to get evaluate the flux on the Neumann boundary face we
1826 * first get the value of the Neumann boundary function on the quadrature
1827 * points of the face. Then we loop over all the quadrature points and
1828 * degrees of freedom and approximate the integrals on the Neumann boundary
1832 * std::vector<double > Neumann_bc_values(n_q_points);
1834 * for (unsigned int q=0; q<n_q_points; ++q)
1836 * for (unsigned int i=0; i<dofs_per_cell; ++i)
1838 * const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1839 * const double psi_i_potential = face_fe[Potential].value(i,q);
1841 * for (unsigned int j=0; j<dofs_per_cell; ++j)
1844 * const double psi_j_potential = face_fe[Potential].value(j,q);
1848 * We compute contribution for the flux @f$\widehat{u}@f$ on the
1849 * Neumann boundary which enters our system matrix as,
1853 * @f$\int_{\text{face}} \textbf{w} \cdot \textbf{n} \, u \, ds @f$
1856 * local_matrix(i,j) += psi_i_field *
1857 * face_fe.normal_vector(q) *
1865 * We also compute the contribution for the flux for
1866 * @f$\widehat{q}@f$ on the Neumann boundary which is the
1867 * Neumann boundary condition and enters the right
1868 * hand side vector as
1872 * @f$\int_{\text{face}} -w \, g_{N} \, ds@f$
1875 * local_vector(i) += -psi_i_potential *
1876 * Neumann_bc_values[q] *
1885 * <a name="LDGPoisson.cc-assemble_flux_terms"></a>
1886 * <h4>assemble_flux_terms</h4>
1887 * Now we finally get to the function which builds the interior fluxes.
1888 * This is a rather long function
1889 * and we will describe what is going on in detail.
1894 * LDGPoissonProblem<dim>::
1895 * assemble_flux_terms(
1896 * const FEFaceValuesBase<dim> &fe_face_values,
1897 * const FEFaceValuesBase<dim> &fe_neighbor_face_values,
1898 * FullMatrix<double> &vi_ui_matrix,
1899 * FullMatrix<double> &vi_ue_matrix,
1900 * FullMatrix<double> &ve_ui_matrix,
1901 * FullMatrix<double> &ve_ue_matrix,
1904 * const unsigned int n_face_points = fe_face_values.n_quadrature_points;
1905 * const unsigned int dofs_this_cell = fe_face_values.dofs_per_cell;
1906 * const unsigned int dofs_neighbor_cell = fe_neighbor_face_values.dofs_per_cell;
1908 * const FEValuesExtractors::Vector VectorField(0);
1909 * const FEValuesExtractors::Scalar Potential(dim);
1913 * The first thing we do is after the boilerplate is define
1914 * the unit vector @f$\boldsymbol \beta@f$ that is used in defining
1915 * the LDG/ALternating fluxes.
1919 * for (int i=0; i<dim; ++i)
1921 * beta /= sqrt(beta.square() );
1925 * Now we loop over all the quadrature points on the element face
1926 * and loop over all the degrees of freedom and approximate
1927 * the following flux integrals.
1930 * for (unsigned int q=0; q<n_face_points; ++q)
1932 * for (unsigned int i=0; i<dofs_this_cell; ++i)
1934 * const Tensor<1,dim> psi_i_field_minus =
1935 * fe_face_values[VectorField].value(i,q);
1936 * const double psi_i_potential_minus =
1937 * fe_face_values[Potential].value(i,q);
1939 * for (unsigned int j=0; j<dofs_this_cell; ++j)
1941 * const Tensor<1,dim> psi_j_field_minus =
1942 * fe_face_values[VectorField].value(j,q);
1943 * const double psi_j_potential_minus =
1944 * fe_face_values[Potential].value(j,q);
1948 * We compute the flux matrix where the test function's
1949 * as well as the solution function
's values are taken from
1954 * @f$\int_{\text{face}}
1955 * \left( \frac{1}{2} \, \textbf{n}^{-}
1956 * \cdot ( \textbf{w}^{-} u^{-}
1957 * + w^{-} \textbf{q}^{-})
1958 * + \boldsymbol \beta \cdot \textbf{w}^{-} u^{-}
1959 * - w^{-} \boldsymbol \beta \cdot \textbf{q}^{-}
1960 * + \sigma w^{-} \, u^{-} \right) ds@f$
1963 * vi_ui_matrix(i,j) += (0.5 * (
1964 * psi_i_field_minus *
1965 * fe_face_values.normal_vector(q) *
1966 * psi_j_potential_minus
1968 * psi_i_potential_minus *
1969 * fe_face_values.normal_vector(q) *
1970 * psi_j_field_minus )
1973 * psi_i_field_minus *
1974 * psi_j_potential_minus
1977 * psi_i_potential_minus *
1981 * psi_i_potential_minus *
1982 * psi_j_potential_minus
1984 * fe_face_values.JxW(q);
1987 * for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
1989 * const Tensor<1,dim> psi_j_field_plus =
1990 * fe_neighbor_face_values[VectorField].value(j,q);
1991 * const double psi_j_potential_plus =
1992 * fe_neighbor_face_values[Potential].value(j,q);
1996 * We compute the flux matrix where the test function is
1997 * from the interior of this elements face and solution
1998 * function is taken from the exterior. This corresponds
1999 * to the computation,
2003 * @f$\int_{\text{face}}
2004 * \left( \frac{1}{2} \, \textbf{n}^{-} \cdot
2005 * ( \textbf{w}^{-} u^{+}
2006 * + w^{-} \textbf{q}^{+})
2007 * - \boldsymbol \beta \cdot \textbf{w}^{-} u^{+}
2008 * + w^{-} \boldsymbol \beta \cdot \textbf{q}^{+}
2009 * - \sigma w^{-} \, u^{+} \right) ds @f$
2012 * vi_ue_matrix(i,j) += ( 0.5 * (
2013 * psi_i_field_minus *
2014 * fe_face_values.normal_vector(q) *
2015 * psi_j_potential_plus
2017 * psi_i_potential_minus *
2018 * fe_face_values.normal_vector(q) *
2019 * psi_j_field_plus )
2022 * psi_i_field_minus *
2023 * psi_j_potential_plus
2026 * psi_i_potential_minus *
2030 * psi_i_potential_minus *
2031 * psi_j_potential_plus
2033 * fe_face_values.JxW(q);
2037 * for (unsigned int i=0; i<dofs_neighbor_cell; ++i)
2039 * const Tensor<1,dim> psi_i_field_plus =
2040 * fe_neighbor_face_values[VectorField].value(i,q);
2041 * const double psi_i_potential_plus =
2042 * fe_neighbor_face_values[Potential].value(i,q);
2044 * for (unsigned int j=0; j<dofs_this_cell; ++j)
2046 * const Tensor<1,dim> psi_j_field_minus =
2047 * fe_face_values[VectorField].value(j,q);
2048 * const double psi_j_potential_minus =
2049 * fe_face_values[Potential].value(j,q);
2054 * We compute the flux matrix where the test function is
2055 * from the exterior of this elements face and solution
2056 * function is taken from the interior. This corresponds
2057 * to the computation,
2061 * @f$ \int_{\text{face}}
2062 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2063 * (\textbf{w}^{+} u^{-}
2064 * + w^{+} \textbf{q}^{-} )
2065 * - \boldsymbol \beta \cdot \textbf{w}^{+} u^{-}
2066 * + w^{+} \boldsymbol \beta \cdot \textbf{q}^{-}
2067 * - \sigma w^{+} u^{-} \right) ds @f$
2070 * ve_ui_matrix(i,j) += (-0.5 * (
2071 * psi_i_field_plus *
2072 * fe_face_values.normal_vector(q) *
2073 * psi_j_potential_minus
2075 * psi_i_potential_plus *
2076 * fe_face_values.normal_vector(q) *
2077 * psi_j_field_minus)
2080 * psi_i_field_plus *
2081 * psi_j_potential_minus
2084 * psi_i_potential_plus *
2088 * psi_i_potential_plus *
2089 * psi_j_potential_minus
2091 * fe_face_values.JxW(q);
2094 * for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
2096 * const Tensor<1,dim> psi_j_field_plus =
2097 * fe_neighbor_face_values[VectorField].value(j,q);
2098 * const double psi_j_potential_plus =
2099 * fe_neighbor_face_values[Potential].value(j,q);
2103 * And lastly we compute the flux matrix where the test
2104 * function and solution function are taken from the exterior
2105 * cell to this face. This corresponds to the computation,
2109 * @f$\int_{\text{face}}
2110 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2111 * ( \textbf{w}^{+} u^{+}
2112 * + w^{+} \textbf{q}^{+} )
2113 * + \boldsymbol \beta \cdot \textbf{w}^{+} u^{+}
2114 * - w^{+} \boldsymbol \beta \cdot \textbf{q}^{+}
2115 * + \sigma w^{+} u^{+} \right) ds @f$
2118 * ve_ue_matrix(i,j) += (-0.5 * (
2119 * psi_i_field_plus *
2120 * fe_face_values.normal_vector(q) *
2121 * psi_j_potential_plus
2123 * psi_i_potential_plus *
2124 * fe_face_values.normal_vector(q) *
2125 * psi_j_field_plus )
2128 * psi_i_field_plus *
2129 * psi_j_potential_plus
2132 * psi_i_potential_plus *
2136 * psi_i_potential_plus *
2137 * psi_j_potential_plus
2139 * fe_face_values.JxW(q);
2149 * <a name="LDGPoisson.cc-distribute_local_flux_to_global"></a>
2150 * <h4>distribute_local_flux_to_global</h4>
2151 * In this function we use the ConstraintMatrix to distribute
2152 * the local flux matrices to the global system matrix.
2153 * Since I have to do this twice in assembling the
2154 * system matrix, I made function to do it rather than have
2156 * We remark that the reader take special note of
2157 * the which matrices we are distributing and the order
2158 * in which we pass the dof indices vectors. In distributing
2159 * the first matrix, i.e. <code>vi_ui_matrix</code>, we are
2160 * taking the test function and solution function values from
2161 * the interior of this cell and therefore only need the
2162 * <code>local_dof_indices</code> since it contains the dof
2163 * indices to this cell. When we distribute the second matrix,
2164 * <code>vi_ue_matrix</code>, the test function is taken
2165 * form the inteior of
2166 * this cell while the solution function is taken from the
2167 * exterior, i.e. the neighbor cell. Notice that the order
2168 * degrees of freedom index vectors matrch this pattern: first
2169 * the <code>local_dof_indices</code> which is local to
2170 * this cell and then
2171 * the <code>local_neighbor_dof_indices</code> which is
2172 * local to the neighbor's
2173 * cell. The order in which we pass the dof indices
for the
2174 * matrices is paramount to constructing our global system
2175 *
matrix properly. The ordering of the last two matrices
2176 * follow the same logic as the
first two we discussed.
2181 * LDGPoissonProblem<dim>::
2182 * distribute_local_flux_to_global(
2187 *
const std::vector<types::global_dof_index> &local_dof_indices,
2188 *
const std::vector<types::global_dof_index> &local_neighbor_dof_indices)
2190 * constraints.distribute_local_to_global(vi_ui_matrix,
2191 * local_dof_indices,
2194 * constraints.distribute_local_to_global(vi_ue_matrix,
2195 * local_dof_indices,
2196 * local_neighbor_dof_indices,
2199 * constraints.distribute_local_to_global(ve_ui_matrix,
2200 * local_neighbor_dof_indices,
2201 * local_dof_indices,
2204 * constraints.distribute_local_to_global(ve_ue_matrix,
2205 * local_neighbor_dof_indices,
2214 * <a name=
"LDGPoisson.cc-solve"></a>
2216 * As mentioned earlier I used a direct solver to solve
2217 * the linear system of equations resulting from the LDG
2218 * method applied to the Poisson equation. One could also
2219 * use a iterative solver, however, we then need to use
2220 * a preconditioner and that was something I did not wanted
2221 * to get into. For information on preconditioners
2222 *
for the LDG Method see
this
2223 * <a href=
"http://epubs.siam.org/doi/abs/10.1137/S1064827502410657">
2224 * paper</a>. The uses of a direct solver here is
2225 * somewhat of a limitation. The built-in distributed
2226 * direct solver in Trilinos reduces everything to one
2227 * processor, solves the system and then distributes
2228 * everything back out to the other processors. However,
2229 * by linking to more advanced direct solvers through
2230 * Trilinos one can accomplish fully distributed computations
2231 * and not much about the following function calls will
2237 * LDGPoissonProblem<dim>::
2244 * As in @ref step_40
"step-40" in order to perform a linear solve
2245 * we need solution vector where there is no overlap across
2246 * the processors and we create
this by instantiating
2247 * <code>completely_distributed_solution</code> solution
2249 * the
copy constructor on the global system right hand
2250 * side vector which itself is completely distributed vector.
2254 * completely_distributed_solution(system_rhs);
2258 * Now we can perform the solve on the completeley distributed
2259 * right hand side vector, system
matrix and the completely
2260 * distributed solution.
2263 * solver.solve(system_matrix,
2264 * completely_distributed_solution,
2269 * We now distribute the constraints of our system onto the
2270 * completely solution vector, but in our
case with the LDG
2271 * method there are
none.
2274 * constraints.distribute(completely_distributed_solution);
2278 * Lastly we
copy the completely distributed solution vector,
2279 * <code>completely_distributed_solution</code>,
2280 * to solution vector which has some overlap between
2281 * processors, <code>locally_relevant_solution</code>.
2282 * We need the overlapped portions of our solution
2283 * in order to be able to
do computations
using the solution
2284 * later in the code or in post processing.
2287 * locally_relevant_solution = completely_distributed_solution;
2293 * <a name=
"LDGPoisson.cc-output_results"></a>
2294 * <h4>output_results</h4>
2295 * This function deals with the writing of the results in
parallel
2296 * to disk. It is almost exactly the same as
2297 * in @ref step_40
"step-40" and we won
't go into it. It is noteworthy
2298 * that in @ref step_40 "step-40" the output is only the scalar solution,
2299 * while in our situation, we are outputting both the scalar
2300 * solution as well as the vector field solution. The only
2301 * difference between this function and the one in @ref step_40 "step-40"
2302 * is in the <code>solution_names</code> vector where we have to add
2303 * the gradient dimensions. Everything else is taken care
2304 * of by the deal.ii library!
2309 * LDGPoissonProblem<dim>::
2310 * output_results() const
2312 * std::vector<std::string> solution_names;
2316 * solution_names.push_back("u");
2317 * solution_names.push_back("du/dx");
2321 * solution_names.push_back("grad(u)_x");
2322 * solution_names.push_back("grad(u)_y");
2323 * solution_names.push_back("u");
2327 * solution_names.push_back("grad(u)_x");
2328 * solution_names.push_back("grad(u)_y");
2329 * solution_names.push_back("grad(u)_z");
2330 * solution_names.push_back("u");
2334 * Assert(false, ExcNotImplemented() );
2337 * DataOut<dim> data_out;
2338 * data_out.attach_dof_handler(dof_handler);
2339 * data_out.add_data_vector(locally_relevant_solution,
2342 * Vector<float> subdomain(triangulation.n_active_cells());
2344 * for (unsigned int i=0; i<subdomain.size(); ++i)
2345 * subdomain(i) = triangulation.locally_owned_subdomain();
2347 * data_out.add_data_vector(subdomain,"subdomain");
2349 * data_out.build_patches();
2351 * const std::string filename = ("solution." +
2352 * Utilities::int_to_string(
2353 * triangulation.locally_owned_subdomain(),4));
2355 * std::ofstream output((filename + ".vtu").c_str());
2356 * data_out.write_vtu(output);
2358 * if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0 )
2360 * std::vector<std::string> filenames;
2361 * for (unsigned int i=0;
2362 * i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
2365 * filenames.push_back("solution." +
2366 * Utilities::int_to_string(i,4) +
2369 * std::ofstream master_output("solution.pvtu");
2370 * data_out.write_pvtu_record(master_output, filenames);
2378 * <a name="LDGPoisson.cc-run"></a>
2380 * The only public function of this class is pretty much exactly
2381 * the same as all the other deal.ii examples except I setting
2382 * the constant in the DG penalty (@f$\tilde{\sigma}@f$) to be 1.
2387 * LDGPoissonProblem<dim>::
2393 * assemble_system();
2402 * <a name="LDGPoisson.cc-main"></a>
2404 * Here it the main class of our program, since it is nearly exactly
2405 * the same as @ref step_40 "step-40" and many of the other examples I won't
2409 *
int main(
int argc,
char *argv[])
2414 *
using namespace dealii;
2421 *
unsigned int degree = 1;
2422 *
unsigned int n_refine = 6;
2423 * LDGPoissonProblem<2> Poisson(degree, n_refine);
2427 *
catch (std::exception &exc)
2429 * std::cerr << std::endl << std::endl
2430 * <<
"----------------------------------------------------"
2432 * std::cerr <<
"Exception on processing: " << std::endl
2433 * << exc.what() << std::endl
2434 * <<
"Aborting!" << std::endl
2435 * <<
"----------------------------------------------------"
2442 * std::cerr << std::endl << std::endl
2443 * <<
"----------------------------------------------------"
2445 * std::cerr <<
"Unknown exception!" << std::endl
2446 * <<
"Aborting!" << std::endl
2447 * <<
"----------------------------------------------------"
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell, const unsigned int face_no, const unsigned int subface_no)
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell)
unsigned int depth_console(const unsigned int n)
cell_iterator begin(const unsigned int level=0) const
void refine_global(const unsigned int times=1)
cell_iterator end() const
active_cell_iterator begin_active(const unsigned int level=0) const
virtual void execute_coarsening_and_refinement() override
virtual void clear() override
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
typename ActiveSelector::cell_iterator cell_iterator
typename ActiveSelector::face_iterator face_iterator
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorType::value_type * begin(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
void copy(const T *begin, const T *end, U *dest)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation