455 * <a name=
"Functions.cc-Functionscc"></a>
457 * In
this file we keep right hand side function, Dirichlet boundary
458 * conditions and solution to our Poisson equation problem. Since
459 * these classes and
functions have been discussed extensively in
460 * the deal.ii tutorials we won
't discuss them any further.
463 * #include <deal.II/base/function.h>
464 * #include <deal.II/base/tensor_function.h>
465 * #include <deal.II/lac/vector.h>
468 * using namespace dealii;
471 * class RightHandSide : public Function<dim>
474 * RightHandSide() : Function<dim>(1)
477 * virtual double value(const Point<dim> &p,
478 * const unsigned int component = 0 ) const override;
482 * class DirichletBoundaryValues : public Function<dim>
485 * DirichletBoundaryValues() : Function<dim>(1)
488 * virtual double value(const Point<dim> &p,
489 * const unsigned int component = 0 ) const override;
493 * class TrueSolution : public Function<dim>
496 * TrueSolution() : Function<dim>(dim+1)
499 * virtual void vector_value(const Point<dim> &p,
500 * Vector<double> &values) const override;
505 * RightHandSide<dim>::
506 * value(const Point<dim> &p,
507 * const unsigned int ) const
509 * const double x = p[0];
510 * const double y = p[1];
511 * return 4*M_PI*M_PI*(cos(2*M_PI*y) - sin(2*M_PI*x));
517 * DirichletBoundaryValues<dim>::
518 * value(const Point<dim> &p,
519 * const unsigned int ) const
521 * const double x = p[0];
522 * const double y = p[1];
523 * return cos(2*M_PI*y) -sin(2*M_PI*x) - x;
529 * TrueSolution<dim>::
530 * vector_value(const Point<dim> &p,
531 * Vector<double> &values) const
533 * Assert(values.size() == dim+1,
534 * ExcDimensionMismatch(values.size(), dim+1) );
539 * values(0) = 1 + 2*M_PI*cos(2*M_PI*x);
540 * values(1) = 2*M_PI*sin(2*M_PI*y);
542 * values(2) = cos(2*M_PI*y) - sin(2*M_PI*x) - x;
547<a name="ann-LDGPoisson.cc"></a>
548<h1>Annotated version of LDGPoisson.cc</h1>
554 * /* -----------------------------------------------------------------------------
556 * * SPDX-License-Identifier: LGPL-2.1-or-later
557 * * Copyright (C) 2017 by Michael Harmon
559 * * This file is part of the deal.II code gallery.
561 * * -----------------------------------------------------------------------------
567 * <a name="LDGPoisson.cc-LDGPoissoncc"></a>
568 * <h3>LDGPoisson.cc</h3>
569 * The code begins as per usual with a long list of the the included
570 * files from the deal.ii library.
573 * #include <deal.II/base/quadrature_lib.h>
574 * #include <deal.II/base/logstream.h>
575 * #include <deal.II/base/function.h>
576 * #include <deal.II/base/timer.h>
578 * #include <deal.II/lac/full_matrix.h>
579 * #include <deal.II/lac/sparse_matrix.h>
582 * #include <deal.II/grid/tria.h>
583 * #include <deal.II/grid/grid_generator.h>
584 * #include <deal.II/grid/grid_tools.h>
585 * #include <deal.II/grid/tria_accessor.h>
586 * #include <deal.II/grid/tria_iterator.h>
588 * #include <deal.II/dofs/dof_handler.h>
589 * #include <deal.II/dofs/dof_renumbering.h>
590 * #include <deal.II/dofs/dof_accessor.h>
591 * #include <deal.II/dofs/dof_tools.h>
594 * #include <iostream>
598 * Here's where the classes
for the DG methods
begin.
599 * We can use either the Lagrange polynomials,
602 * #include <deal.II/fe/fe_dgq.h>
605 * or the Legendre polynomials
608 * #include <deal.II/fe/fe_dgp.h>
611 * as basis
functions. I
'll be using the Lagrange polynomials.
614 * #include <deal.II/fe/fe_system.h>
615 * #include <deal.II/fe/fe_values.h>
617 * #include <deal.II/numerics/vector_tools.h>
618 * #include <deal.II/numerics/matrix_tools.h>
619 * #include <deal.II/numerics/data_out.h>
624 * Now we have to load in the deal.ii files that will allow us to use
625 * a distributed computing framework.
628 * #include <deal.II/base/utilities.h>
629 * #include <deal.II/base/index_set.h>
630 * #include <deal.II/base/conditional_ostream.h>
631 * #include <deal.II/lac/sparsity_tools.h>
632 * #include <deal.II/distributed/tria.h>
637 * Additionally we load the files that will allow us to interact with
638 * the Trilinos library.
641 * #include <deal.II/lac/trilinos_sparse_matrix.h>
642 * #include <deal.II/lac/trilinos_vector.h>
643 * #include <deal.II/lac/trilinos_solver.h>
648 * The functions class contains all the definitions of the functions we
649 * will use, i.e. the right hand side function, the boundary conditions
650 * and the test functions.
653 * #include "Functions.cc"
655 * using namespace dealii;
660 * Here is the main class for the Local Discontinuous Galerkin method
661 * applied to Poisson's equation, we won
't explain much of the
662 * the class and method declarations, but dive deeper into describing the
663 * functions when they are defined. The only thing I will mention
664 * about the class declaration is that this is where I labeled
665 * the different types of boundaries using enums.
669 * class LDGPoissonProblem
673 * LDGPoissonProblem(const unsigned int degree,
674 * const unsigned int n_refine);
676 * ~LDGPoissonProblem();
686 * void assemble_system();
688 * void assemble_cell_terms(const FEValues<dim> &cell_fe,
689 * FullMatrix<double> &cell_matrix,
690 * Vector<double> &cell_vector);
692 * void assemble_Neumann_boundary_terms(const FEFaceValues<dim> &face_fe,
693 * FullMatrix<double> &local_matrix,
694 * Vector<double> &local_vector);
696 * void assemble_Dirichlet_boundary_terms(const FEFaceValues<dim> &face_fe,
697 * FullMatrix<double> &local_matrix,
698 * Vector<double> &local_vector,
701 * void assemble_flux_terms(const FEFaceValuesBase<dim> &fe_face_values,
702 * const FEFaceValuesBase<dim> &fe_neighbor_face_values,
703 * FullMatrix<double> &vi_ui_matrix,
704 * FullMatrix<double> &vi_ue_matrix,
705 * FullMatrix<double> &ve_ui_matrix,
706 * FullMatrix<double> &ve_ue_matrix,
709 * void distribute_local_flux_to_global(
710 * const FullMatrix<double> &vi_ui_matrix,
711 * const FullMatrix<double> &vi_ue_matrix,
712 * const FullMatrix<double> &ve_ui_matrix,
713 * const FullMatrix<double> &ve_ue_matrix,
714 * const std::vector<types::global_dof_index> &local_dof_indices,
715 * const std::vector<types::global_dof_index> &local_neighbor_dof_indices);
719 * void output_results() const;
721 * const unsigned int degree;
722 * const unsigned int n_refine;
733 * parallel::distributed::Triangulation<dim> triangulation;
735 * DoFHandler<dim> dof_handler;
737 * AffineConstraints<double> constraints;
739 * SparsityPattern sparsity_pattern;
741 * TrilinosWrappers::SparseMatrix system_matrix;
742 * TrilinosWrappers::MPI::Vector locally_relevant_solution;
743 * TrilinosWrappers::MPI::Vector system_rhs;
745 * ConditionalOStream pcout;
746 * TimerOutput computing_timer;
748 * SolverControl solver_control;
749 * TrilinosWrappers::SolverDirect solver;
751 * const RightHandSide<dim> rhs_function;
752 * const DirichletBoundaryValues<dim> Dirichlet_bc_function;
753 * const TrueSolution<dim> true_solution;
760 * <a name="LDGPoisson.cc-Classconstructoranddestructor"></a>
761 * <h4>Class constructor and destructor</h4>
762 * The constructor and destructor for this class is very much like the
763 * like those for @ref step_40 "step-40". The difference being that we'll be passing
764 * in an integer, <code>degree</code>, which tells us the maximum order
765 * of the polynomial to use as well as <code>n_refine</code> which is the
766 * global number of times we
refine our mesh. The other main differences
767 * are that we use a
FESystem object for our choice of basis
768 *
functions. This is reminiscent of the mixed finite element method in
769 * @ref step_20
"step-20", however, in our
case we use a
FESystem
781 * which tells us that the basis
functions contain discontinuous polynomials
782 * of order <code>degree</code> in each of the <code>dim</code> dimensions
783 *
for the vector field. For the
scalar unknown we
784 * use a discontinuous polynomial of the order <code>degree</code>.
785 * The LDG method
for Poisson equations solves
for both the primary variable
786 * as well as its
gradient, just like the mixed finite element method.
787 * However, unlike the mixed method, the LDG method uses discontinuous
789 * The other difference between our constructor and that of @ref step_40
"step-40" is that
790 * we all instantiate our linear solver in the constructor definition.
794 * LDGPoissonProblem<dim>::
795 * LDGPoissonProblem(
const unsigned int degree,
796 *
const unsigned int n_refine)
799 * n_refine(n_refine),
809 * computing_timer(MPI_COMM_WORLD,
814 * solver(solver_control),
816 * Dirichlet_bc_function()
821 * LDGPoissonProblem<dim>::
822 * ~LDGPoissonProblem()
824 * dof_handler.clear();
830 * <a name=
"LDGPoisson.cc-Make_grid"></a>
832 * This function shows how to make a grid
using local
833 * refinement and also shows how to label the boundaries
834 *
using the defined
enum.
839 * LDGPoissonProblem<dim>::
845 *
unsigned int local_refine = 2;
846 *
for (
unsigned int i =0; i <local_refine; ++i)
854 * We
loop over all the cells in the mesh
855 * and mark the appropriate cells
for refinement.
856 * In
this example we only choose cells which are
857 * near @f$x=0@f$ and @f$x=1@f$ in the
858 * the domain. This was just to show that
859 * the LDG method is working with local
860 * refinement and discussions on building
861 * more realistic refinement strategies are
862 * discussed elsewhere in the deal.ii
866 *
for (; cell != endc; ++cell)
868 *
if ((cell->center()[1]) > 0.9 )
870 *
if ((cell->center()[0] > 0.9) || (cell->center()[0] < 0.1))
871 * cell->set_refine_flag();
876 * Now that we have marked all the cells
877 * that we want to
refine locally we can go ahead and
886 * To label the boundary faces of the mesh with their
887 * type, i.e. Dirichlet or Neumann,
888 * we
loop over all the cells in the mesh and then over
889 * all the faces of each cell. We then have to figure out
890 * which faces are on the boundary and set all faces
891 * on the boundary to have
892 * <code>
boundary_id</code> to be <code>Dirichlet</code>.
893 * We remark that one could easily set more complicated
894 * conditions where there are both Dirichlet or
895 * Neumann boundaries.
901 *
for (; cell != endc; ++cell)
903 *
for (
unsigned int face_no=0;
904 * face_no < GeometryInfo<dim>::faces_per_cell;
907 *
if (cell->face(face_no)->at_boundary() )
908 * cell->face(face_no)->set_boundary_id(Dirichlet);
916 * <a name=
"LDGPoisson.cc-make_dofs"></a>
918 * This function is responsible
for distributing the degrees of
919 * freedom (dofs) to the processors and allocating memory
for
920 * the global system
matrix, <code>system_matrix</code>,
921 * and global right hand side vector, <code>system_rhs</code> .
922 * The dofs are the unknown coefficients
for the polynomial
923 * approximation of our solution to Poisson
's equation in the scalar
924 * variable and its gradient.
929 * LDGPoissonProblem<dim>::
932 * TimerOutput::Scope t(computing_timer, "setup");
936 * The first step to setting up our linear system is to
937 * distribute the degrees of freedom (dofs) across the processors,
938 * this is done with the <code>distribute_dofs()</code>
939 * method of the DoFHandler. We remark
940 * the same exact function call that occurs when using deal.ii
941 * on a single machine, the DoFHandler automatically knows
942 * that we are distributed setting because it was instantiated
943 * with a distributed triangulation!
946 * dof_handler.distribute_dofs(fe);
950 * We now renumber the dofs so that the vector of unknown dofs
951 * that we are solving for, <code>locally_relevant_solution</code>,
952 * corresponds to a vector of the form,
956 * @f$ \left[\begin{matrix} \textbf{Q} \\ \textbf{U} \end{matrix}\right] @f$
959 * DoFRenumbering::component_wise(dof_handler);
963 * Now we get the locally owned dofs, that is the dofs that our local
964 * to this processor. These dofs corresponding entries in the
965 * matrix and vectors that we will write to.
968 * const IndexSet &locally_owned_dofs = dof_handler.locally_owned_dofs();
972 * In addition to the locally owned dofs, we also need the the locally
973 * relevant dofs. These are the dofs that have read access to and we
974 * need in order to do computations on our processor, but, that
975 * we do not have the ability to write to.
978 * const IndexSet locally_relevant_dofs =
979 * DoFTools::extract_locally_relevant_dofs(dof_handler);
981 * const std::vector<types::global_dof_index> dofs_per_component =
982 * DoFTools::count_dofs_per_fe_component(dof_handler);
986 * Discontinuous Galerkin methods are fantanistic methods in part because
987 * many of the limitations of traditional finite element methods no longer
988 * exist. Specifically, the need to use constraint matrices
989 * in order handle hanging nodes is no longer necessary. However,
990 * we will continue to use the constraint matrices inorder to efficiently
991 * distribute local computations to the global system, i.e. to the
992 * <code>system_matrix</code> and <code>system_rhs</code>. Therefore, we
993 * just instantiate the constraints matrix object, clear and close it.
996 * constraints.clear();
997 * constraints.close();
1002 * Just like @ref step_40 "step-40" we create a dynamic sparsity pattern
1003 * and distribute it to the processors. Notice how we do not have to
1004 * explicitly mention that we are using a FESystem for system of
1005 * variables instead of a FE_DGQ for a scalar variable
1006 * or that we are using a discributed DoFHandler. All these specifics
1007 * are taken care of under the hood by the deal.ii library.
1008 * In order to build the sparsity
1009 * pattern we use the DoFTools::make_flux_sparsity_pattern function
1010 * since we using a DG method and need to take into account the DG
1011 * fluxes in the sparsity pattern.
1014 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
1015 * DoFTools::make_flux_sparsity_pattern(dof_handler,
1018 * SparsityTools::distribute_sparsity_pattern(dsp,
1019 * dof_handler.locally_owned_dofs(),
1021 * locally_relevant_dofs);
1025 * Here is one area that I had to learn the hard way. The local
1026 * discontinuous Galerkin method like the mixed method with the
1027 * Raviart-Thomas element is written
1028 * in mixed form and will lead to a block-structured matrix.
1029 * In @ref step_20 "step-20" we see that we that we initialize the
1030 * <code>system_martrix</code>
1031 * such that we explicitly declare it to be block-structured.
1032 * It turns out there are reasons to do this when you are going to be
1033 * using a Schur complement method to solve the system of equations.
1034 * While the LDG method will lead to a block-structured matrix,
1035 * we do not have to explicitly declare our matrix to be one.
1036 * I found that most of the distributed linear solvers did not
1037 * accept block structured matrices and since I was using a
1038 * distributed direct solver it was unnecessary to explicitly use a
1039 * block structured matrix.
1042 * system_matrix.reinit(locally_owned_dofs,
1043 * locally_owned_dofs,
1049 * The final note that I will make in that this subroutine is that
1050 * we initialize this processors solution and the
1051 * right hand side vector the exact same was as we did in @ref step_40 "step-40".
1052 * We should note that the <code>locally_relevant_solution</code> solution
1053 * vector includes dofs that are locally relevant to our computations
1054 * while the <code>system_rhs</code> right hand side vector will only
1055 * include dofs that are locally owned by this processor.
1058 * locally_relevant_solution.reinit(locally_relevant_dofs,
1061 * system_rhs.reinit(locally_owned_dofs,
1062 * locally_relevant_dofs,
1066 * const unsigned int n_vector_field = dim * dofs_per_component[0];
1067 * const unsigned int n_potential = dofs_per_component[dim];
1069 * pcout << "Number of active cells : "
1070 * << triangulation.n_global_active_cells()
1072 * << "Number of degrees of freedom: "
1073 * << dof_handler.n_dofs()
1074 * << " (" << n_vector_field << " + " << n_potential << ")"
1081 * <a name="LDGPoisson.cc-assemble_system"></a>
1082 * <h4>assemble_system</h4>
1083 * This is the function that will assemble the global system matrix and
1084 * global right hand side vector for the LDG method. It starts out
1085 * like many of the deal.ii tutorial codes: declaring quadrature formulas
1086 * and UpdateFlags objects, as well as vectors that will hold the
1087 * dof indices for the cells we are working on in the global system.
1090 * template <int dim>
1092 * LDGPoissonProblem<dim>::
1095 * TimerOutput::Scope t(computing_timer, "assembly");
1097 * QGauss<dim> quadrature_formula(fe.degree+1);
1098 * QGauss<dim-1> face_quadrature_formula(fe.degree+1);
1100 * const UpdateFlags update_flags = update_values
1101 * | update_gradients
1102 * | update_quadrature_points
1103 * | update_JxW_values;
1105 * const UpdateFlags face_update_flags = update_values
1106 * | update_normal_vectors
1107 * | update_quadrature_points
1108 * | update_JxW_values;
1110 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
1111 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1112 * std::vector<types::global_dof_index>
1113 * local_neighbor_dof_indices(dofs_per_cell);
1117 * We first remark that we have the FEValues objects for
1118 * the values of our cell basis functions as was done in most
1119 * other examples. Now because we
1120 * are using discontinuous Galerkin methods we also introduce a
1121 * FEFaceValues object, <code>fe_face_values</code>,
1122 * for evaluating the basis functions
1123 * on one side of an element face as well as another FEFaceValues object,
1124 * <code>fe_neighbor_face_values</code>, for evaluating the basis functions
1125 * on the opposite side of the face, i.e. on the neighboring element's face.
1127 * <code>fe_subface_values</code>, that
1128 * will be used
for dealing with faces that have multiple refinement
1129 * levels, i.e. hanging nodes. When we have to evaluate the fluxes across
1130 * a face that multiple refinement levels, we need to evaluate the
1131 * fluxes across all its childrens
' faces; we'll explain
this more when
1135 *
FEValues<dim> fe_values(fe, quadrature_formula, update_flags);
1138 * face_update_flags);
1141 * face_quadrature_formula,
1142 * face_update_flags);
1145 * face_update_flags);
1149 * Here are the local (dense)
matrix and right hand side vector
for
1150 * the solid integrals as well as the integrals on the boundaries in the
1151 * local discontinuous Galerkin method. These terms will be built
for
1152 * each local element in the mesh and then distributed to the global
1153 * system
matrix and right hand side vector.
1161 * The next four matrices are used to incorporate the flux integrals across
1162 * interior faces of the mesh:
1171 * As explained in the section on the LDG method we take our test
1172 * function to be v and multiply it on the left side of our differential
1173 * equation that is on u and perform integration by parts as explained in the
1174 * introduction. Using
this notation
for test and solution function,
1175 * the matrices below will then stand
for:
1179 * <code>vi_ui</code> - Taking the
value of the test function from
1180 * interior of
this cell
's face and the solution function
1181 * from the interior of this cell.
1185 * <code>vi_ue</code> - Taking the value of the test function from
1186 * interior of this cell's face and the solution function
1187 * from the exterior of
this cell.
1191 * <code>ve_ui</code> - Taking the
value of the test function from
1192 * exterior of
this cell
's face and the solution function
1193 * from the interior of this cell.
1197 * <code>ve_ue</code> - Taking the value of the test function from
1198 * exterior of this cell's face and the solution function
1199 * from the exterior of
this cell.
1203 * Now that we have gotten preliminary orders out of the way,
1204 * we
loop over all the cells
1205 * and
assemble the local system
matrix and local right hand side vector
1210 * cell = dof_handler.begin_active(),
1211 * endc = dof_handler.end();
1213 *
for (; cell!=endc; ++cell)
1217 * Now, since we are working in a distributed setting,
1218 * we can only work on cells and write to dofs in the
1219 * <code>system_matrix</code>
1220 * and <code>rhs_vector</code>
1221 * that corresponds to cells that are locally owned
1222 * by
this processor. We note that
while we can only write to locally
1223 * owned dofs, we will still use information from cells that are
1224 * locally relevant. This is very much the same as in @ref step_40
"step-40".
1227 *
if (cell->is_locally_owned())
1232 * that includes the solid integrals in the LDG method as well as
1233 * the right hand side vector. This involves resetting the local
1234 *
matrix and vector to contain all zeros, reinitializing the
1235 *
FEValues object for this cell and then building the
1236 * <code>local_matrix</code> and <code>local_rhs</code> vector.
1242 * fe_values.
reinit(cell);
1243 * assemble_cell_terms(fe_values,
1249 * We remark that we need to get the local indices
for the dofs to
1250 * to
this cell before we
begin to compute the contributions
1251 * from the numerical fluxes, i.e. the boundary conditions and
1255 * cell->get_dof_indices(local_dof_indices);
1259 * Now is where we start to
loop over all the faces of the cell
1260 * and construct the local contribtuions from the numerical fluxes.
1261 * The numerical fluxes will be due to 3 contributions: the
1262 * interior faces, the faces on the Neumann boundary and the faces
1263 * on the Dirichlet boundary. We instantiate a
1264 * <code>face_iterator</code> to
loop
1265 * over all the faces of
this cell and
first see
if the face is on
1266 * the boundary. Notice how we
do not reinitiaize the
1267 * <code>fe_face_values</code>
1268 *
object for the face until we know that we are actually on face
1269 * that lies on the boundary of the domain. The reason
for doing
this
1270 * is
for computational efficiency; reinitializing the
FEFaceValues
1271 *
for each face is expensive and we
do not want to
do it unless we
1272 * are actually going use it to
do computations. After
this, we test
1274 * is on the a Dirichlet or a Neumann segment of the boundary and
1275 * call the appropriate subroutine to
assemble the contributions
for
1276 * that boundary. Note that
this assembles the flux contribution
1277 * in the <code>local_matrix</code> as well as the boundary
1278 * condition that ends up
1279 * in the <code>local_vector</code>.
1282 *
for (
unsigned int face_no=0;
1283 * face_no< GeometryInfo<dim>::faces_per_cell;
1287 * cell->face(face_no);
1289 *
if (face->at_boundary() )
1291 * fe_face_values.reinit(cell, face_no);
1293 *
if (face->boundary_id() == Dirichlet)
1297 * Here notice that in order to
assemble the
1298 * flux due to the penalty term
for the the
1299 * Dirichlet boundary condition we need the
1301 * that
value for this specific cell with
1305 *
double h = cell->diameter();
1306 * assemble_Dirichlet_boundary_terms(fe_face_values,
1311 *
else if (face->boundary_id() == Neumann)
1313 * assemble_Neumann_boundary_terms(fe_face_values,
1318 *
Assert(
false, ExcNotImplemented() );
1324 * At
this point we know that the face we are on is an
1326 * flux matrices, but
first we want to make sure that the
1327 * neighbor cell to
this face is a
valid cell. Once we know
1328 * that the neighbor is a
valid cell then we also want to get
1329 * the meighbor cell that shares
this cell
's face.
1335 * Assert(cell->neighbor(face_no).state() ==
1336 * IteratorState::valid,
1337 * ExcInternalError());
1339 * typename DoFHandler<dim>::cell_iterator neighbor =
1340 * cell->neighbor(face_no);
1344 * Now that we have the two cells whose face we want to
1345 * compute the numerical flux across, we need to know
1346 * if the face has been refined, i.e. if it has children
1347 * faces. This occurs when one of the cells has a
1348 * different level of refinement than
1349 * the other cell. If this is the case, then this face
1350 * has a different level of refinement than the other faces
1351 * of the cell, i.e. on this face there is a hanging node.
1352 * Hanging nodes are not a problem in DG methods, the only
1353 * time we have to watch out for them is at this step
1354 * and as you will see the changes we have to our make
1358 * if (face->has_children())
1362 * We now need to find the face of our neighbor cell
1363 * such that neighbor(neigh_face_no) = cell(face_no).
1366 * const unsigned int neighbor_face_no =
1367 * cell->neighbor_of_neighbor(face_no);
1371 * Once we do this we then have to loop over all the
1372 * subfaces (children faces) of our cell's face and
1373 * compute the interior fluxes across the children faces
1374 * and the neighbor
's face.
1377 * for (unsigned int subface_no=0;
1378 * subface_no < face->n_children();
1383 * We then get the neighbor cell's subface that
1384 * matches our cell face
's subface and the
1385 * specific subface number. We assert that the parent
1386 * face cannot be more than one level of
1387 * refinement above the child's face. This is
1388 * because the deal.ii library does not allow
1389 * neighboring cells to have refinement levels
1390 * that are more than one
level in difference.
1394 * cell->neighbor_child_on_subface(face_no,
1397 *
Assert(!neighbor_child->has_children(),
1398 * ExcInternalError());
1402 * Now that we are ready to build the local flux
1404 *
this face we reset them
e zero and
1405 * reinitialize this <code>fe_values</code>
1406 * to
this cell
's subface and
1407 * <code>neighbor_child</code>'s
1409 * objects on the appropriate faces.
1417 * fe_subface_values.
reinit(cell, face_no, subface_no);
1418 * fe_neighbor_face_values.reinit(neighbor_child,
1419 * neighbor_face_no);
1423 * In addition, we get the minimum of diameters of
1424 * the two cells to include in the penalty term
1427 *
double h =
std::min(cell->diameter(),
1428 * neighbor_child->diameter());
1432 * We now
finally assemble the interior fluxes
for
1433 * the
case of a face which has been refined
using
1434 * exactly the same subroutine as we
do when both
1435 * cells have the same refinement
level.
1438 * assemble_flux_terms(fe_subface_values,
1439 * fe_neighbor_face_values,
1448 * Now all that is left to be done before distribuing
1449 * the local flux matrices to the global system
1450 * is get the neighbor child faces dof indices.
1453 * neighbor_child->get_dof_indices(local_neighbor_dof_indices);
1457 * Once we have
this cells dof indices and the
1458 * neighboring cell
's dof indices we can use the
1459 * ConstraintMatrix to distribute the local flux
1460 * matrices to the global system matrix.
1461 * This is done through the class function
1462 * <code>distribute_local_flux_to_global()</code>.
1465 * distribute_local_flux_to_global(
1470 * local_dof_indices,
1471 * local_neighbor_dof_indices);
1478 * At this point we know that this cell and the neighbor
1479 * of this cell are on the same refinement level and
1480 * the work to assemble the interior flux matrices
1481 * is very much the same as before. In fact it is
1482 * much simpler since we do not have to loop through the
1483 * subfaces. However, we have to check that we do
1484 * not compute the same contribution twice. This would
1485 * happen because we are looping over all the faces of
1486 * all the cells in the mesh
1487 * and assembling the interior flux matrices for each face.
1488 * To avoid doing assembling the interior flux matrices
1489 * twice we only compute the
1490 * interior fluxes once for each face by restricting that
1491 * the following computation only occur on the on
1492 * the cell face with the lower CellId.
1495 * if (neighbor->level() == cell->level() &&
1496 * cell->id() < neighbor->id())
1500 * Here we find the neighbor face such that
1501 * neighbor(neigh_face_no) = cell(face_no).
1502 * In addition we, reinitialize the FEFaceValues
1504 * respective cells
' faces, as well as get the
1505 * minimum diameter of this cell
1506 * and the neighbor cell and assign
1507 * it to <code>h</code>.
1510 * const unsigned int neighbor_face_no =
1511 * cell->neighbor_of_neighbor(face_no);
1518 * fe_face_values.reinit(cell, face_no);
1519 * fe_neighbor_face_values.reinit(neighbor,
1520 * neighbor_face_no);
1522 * double h = std::min(cell->diameter(),
1523 * neighbor->diameter());
1527 * Just as before we assemble the interior fluxes
1529 * <code>assemble_flux_terms</code> subroutine,
1530 * get the neighbor cell's
1531 * face dof indices and use the constraint
matrix to
1532 * distribute the local flux matrices to the global
1533 * <code>system_matrix</code>
using the
class
1535 * <code>distribute_local_flux_to_global()</code>
1538 * assemble_flux_terms(fe_face_values,
1539 * fe_neighbor_face_values,
1546 * neighbor->get_dof_indices(local_neighbor_dof_indices);
1548 * distribute_local_flux_to_global(
1553 * local_dof_indices,
1554 * local_neighbor_dof_indices);
1565 * Now that have looped over all the faces
for this
1566 * cell and computed as well as distributed the local
1567 * flux matrices to the <code>system_matrix</code>, we
1568 * can
finally distribute the cell
's <code>local_matrix</code>
1569 * and <code>local_vector</code> contribution to the
1570 * global system matrix and global right hand side vector.
1571 * We remark that we have to wait until this point
1572 * to distribute the <code>local_matrix</code>
1573 * and <code>system_rhs</code> to the global system.
1574 * The reason being that in looping over the faces
1575 * the faces on the boundary of the domain contribute
1576 * to the <code>local_matrix</code>
1577 * and <code>system_rhs</code>. We could distribute
1578 * the local contributions for each component separately,
1579 * but writing to the distributed sparse matrix and vector
1580 * is expensive and want to to minimize the number of times
1584 * constraints.distribute_local_to_global(local_matrix,
1585 * local_dof_indices,
1588 * constraints.distribute_local_to_global(local_vector,
1589 * local_dof_indices,
1597 * We need to synchronize assembly of our global system
1598 * matrix and global right hand side vector with all the other
1600 * use the compress() function to do this.
1601 * This was discussed in detail in @ref step_40 "step-40".
1604 * system_matrix.compress(VectorOperation::add);
1605 * system_rhs.compress(VectorOperation::add);
1612 * <a name="LDGPoisson.cc-assemble_cell_terms"></a>
1613 * <h4>assemble_cell_terms</h4>
1614 * This function deals with constructing the local matrix due to
1615 * the solid integrals over each element and is very similar to the
1616 * the other examples in the deal.ii tutorials.
1621 * LDGPoissonProblem<dim>::
1622 * assemble_cell_terms(
1623 * const FEValues<dim> &cell_fe,
1624 * FullMatrix<double> &cell_matrix,
1625 * Vector<double> &cell_vector)
1627 * const unsigned int dofs_per_cell = cell_fe.dofs_per_cell;
1628 * const unsigned int n_q_points = cell_fe.n_quadrature_points;
1630 * const FEValuesExtractors::Vector VectorField(0);
1631 * const FEValuesExtractors::Scalar Potential(dim);
1633 * std::vector<double> rhs_values(n_q_points);
1637 * We first get the value of the right hand side function
1638 * evaluated at the quadrature points in the cell.
1641 * rhs_function.value_list(cell_fe.get_quadrature_points(),
1646 * Now, we loop over the quadrature points in the
1647 * cell and then loop over the degrees of freedom and perform
1648 * quadrature to approximate the integrals.
1651 * for (unsigned int q=0; q<n_q_points; ++q)
1653 * for (unsigned int i=0; i<dofs_per_cell; ++i)
1655 * const Tensor<1, dim> psi_i_field = cell_fe[VectorField].value(i,q);
1656 * const double div_psi_i_field = cell_fe[VectorField].divergence(i,q);
1657 * const double psi_i_potential = cell_fe[Potential].value(i,q);
1658 * const Tensor<1, dim> grad_psi_i_potential = cell_fe[Potential].gradient(i,q);
1660 * for (unsigned int j=0; j<dofs_per_cell; ++j)
1662 * const Tensor<1, dim> psi_j_field = cell_fe[VectorField].value(j,q);
1663 * const double psi_j_potential = cell_fe[Potential].value(j,q);
1667 * This computation corresponds to assembling the local system
1668 * matrix for the integral over an element,
1672 * @f$\int_{\Omega_{e}} \left(\textbf{w} \cdot \textbf{q}
1673 * - \nabla \cdot \textbf{w} u
1674 * - \nabla w \cdot \textbf{q}
1678 * cell_matrix(i,j) += ( (psi_i_field * psi_j_field)
1680 * (div_psi_i_field * psi_j_potential)
1682 * (grad_psi_i_potential * psi_j_field)
1683 * ) * cell_fe.JxW(q);
1688 * And this local right hand vector corresponds to the integral
1689 * over the element cell,
1693 * @f$ \int_{\Omega_{e}} w \, f(\textbf{x}) \, dx @f$
1696 * cell_vector(i) += psi_i_potential *
1706 * <a name="LDGPoisson.cc-assemble_Dirichlet_boundary_terms"></a>
1707 * <h4>assemble_Dirichlet_boundary_terms</h4>
1708 * Here we have the function that builds the <code>local_matrix</code>
1710 * and local right hand side vector, <code>local_vector</code>
1711 * for the Dirichlet boundary conditions.
1716 * LDGPoissonProblem<dim>::
1717 * assemble_Dirichlet_boundary_terms(
1718 * const FEFaceValues<dim> &face_fe,
1719 * FullMatrix<double> &local_matrix,
1720 * Vector<double> &local_vector,
1723 * const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1724 * const unsigned int n_q_points = face_fe.n_quadrature_points;
1726 * const FEValuesExtractors::Vector VectorField(0);
1727 * const FEValuesExtractors::Scalar Potential(dim);
1729 * std::vector<double> Dirichlet_bc_values(n_q_points);
1733 * In order to evaluate the flux on the Dirichlet boundary face we
1734 * first get the value of the Dirichlet boundary function on the quadrature
1735 * points of the face. Then we loop over all the quadrature points and
1736 * degrees of freedom and approximate the integrals on the Dirichlet boundary
1740 * Dirichlet_bc_function.value_list(face_fe.get_quadrature_points(),
1741 * Dirichlet_bc_values);
1743 * for (unsigned int q=0; q<n_q_points; ++q)
1745 * for (unsigned int i=0; i<dofs_per_cell; ++i)
1747 * const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1748 * const double psi_i_potential = face_fe[Potential].value(i,q);
1750 * for (unsigned int j=0; j<dofs_per_cell; ++j)
1752 * const Tensor<1, dim> psi_j_field = face_fe[VectorField].value(j,q);
1753 * const double psi_j_potential = face_fe[Potential].value(j,q);
1757 * We compute contribution for the flux @f$\widehat{q}@f$ on
1758 * the Dirichlet boundary which enters our system matrix as,
1762 * @f$ \int_{\text{face}} w \, ( \textbf{n} \cdot \textbf{q}
1763 * + \sigma u) ds @f$
1766 * local_matrix(i,j) += psi_i_potential * (
1767 * face_fe.normal_vector(q) *
1771 * psi_j_potential) *
1778 * We also compute the contribution for the flux for @f$\widehat{u}@f$
1779 * on the Dirichlet boundary which is the Dirichlet boundary
1780 * condition function and enters the right hand side vector as
1784 * @f$\int_{\text{face}} (-\textbf{w} \cdot \textbf{n}
1785 * + \sigma w) \, u_{D} ds @f$
1788 * local_vector(i) += (-1.0 * psi_i_field *
1789 * face_fe.normal_vector(q)
1792 * psi_i_potential) *
1793 * Dirichlet_bc_values[q] *
1802 * <a name="LDGPoisson.cc-assemble_Neumann_boundary_terms"></a>
1803 * <h4>assemble_Neumann_boundary_terms</h4>
1804 * Here we have the function that builds the <code>local_matrix</code>
1805 * and <code>local_vector</code> for the Neumann boundary conditions.
1810 * LDGPoissonProblem<dim>::
1811 * assemble_Neumann_boundary_terms(
1812 * const FEFaceValues<dim> &face_fe,
1813 * FullMatrix<double> &local_matrix,
1814 * Vector<double> &local_vector)
1816 * const unsigned int dofs_per_cell = face_fe.dofs_per_cell;
1817 * const unsigned int n_q_points = face_fe.n_quadrature_points;
1819 * const FEValuesExtractors::Vector VectorField(0);
1820 * const FEValuesExtractors::Scalar Potential(dim);
1824 * In order to get evaluate the flux on the Neumann boundary face we
1825 * first get the value of the Neumann boundary function on the quadrature
1826 * points of the face. Then we loop over all the quadrature points and
1827 * degrees of freedom and approximate the integrals on the Neumann boundary
1831 * std::vector<double > Neumann_bc_values(n_q_points);
1833 * for (unsigned int q=0; q<n_q_points; ++q)
1835 * for (unsigned int i=0; i<dofs_per_cell; ++i)
1837 * const Tensor<1, dim> psi_i_field = face_fe[VectorField].value(i,q);
1838 * const double psi_i_potential = face_fe[Potential].value(i,q);
1840 * for (unsigned int j=0; j<dofs_per_cell; ++j)
1843 * const double psi_j_potential = face_fe[Potential].value(j,q);
1847 * We compute contribution for the flux @f$\widehat{u}@f$ on the
1848 * Neumann boundary which enters our system matrix as,
1852 * @f$\int_{\text{face}} \textbf{w} \cdot \textbf{n} \, u \, ds @f$
1855 * local_matrix(i,j) += psi_i_field *
1856 * face_fe.normal_vector(q) *
1864 * We also compute the contribution for the flux for
1865 * @f$\widehat{q}@f$ on the Neumann boundary which is the
1866 * Neumann boundary condition and enters the right
1867 * hand side vector as
1871 * @f$\int_{\text{face}} -w \, g_{N} \, ds@f$
1874 * local_vector(i) += -psi_i_potential *
1875 * Neumann_bc_values[q] *
1884 * <a name="LDGPoisson.cc-assemble_flux_terms"></a>
1885 * <h4>assemble_flux_terms</h4>
1886 * Now we finally get to the function which builds the interior fluxes.
1887 * This is a rather long function
1888 * and we will describe what is going on in detail.
1893 * LDGPoissonProblem<dim>::
1894 * assemble_flux_terms(
1895 * const FEFaceValuesBase<dim> &fe_face_values,
1896 * const FEFaceValuesBase<dim> &fe_neighbor_face_values,
1897 * FullMatrix<double> &vi_ui_matrix,
1898 * FullMatrix<double> &vi_ue_matrix,
1899 * FullMatrix<double> &ve_ui_matrix,
1900 * FullMatrix<double> &ve_ue_matrix,
1903 * const unsigned int n_face_points = fe_face_values.n_quadrature_points;
1904 * const unsigned int dofs_this_cell = fe_face_values.dofs_per_cell;
1905 * const unsigned int dofs_neighbor_cell = fe_neighbor_face_values.dofs_per_cell;
1907 * const FEValuesExtractors::Vector VectorField(0);
1908 * const FEValuesExtractors::Scalar Potential(dim);
1912 * The first thing we do is after the boilerplate is define
1913 * the unit vector @f$\boldsymbol \beta@f$ that is used in defining
1914 * the LDG/ALternating fluxes.
1918 * for (int i=0; i<dim; ++i)
1920 * beta /= sqrt(beta.square() );
1924 * Now we loop over all the quadrature points on the element face
1925 * and loop over all the degrees of freedom and approximate
1926 * the following flux integrals.
1929 * for (unsigned int q=0; q<n_face_points; ++q)
1931 * for (unsigned int i=0; i<dofs_this_cell; ++i)
1933 * const Tensor<1,dim> psi_i_field_minus =
1934 * fe_face_values[VectorField].value(i,q);
1935 * const double psi_i_potential_minus =
1936 * fe_face_values[Potential].value(i,q);
1938 * for (unsigned int j=0; j<dofs_this_cell; ++j)
1940 * const Tensor<1,dim> psi_j_field_minus =
1941 * fe_face_values[VectorField].value(j,q);
1942 * const double psi_j_potential_minus =
1943 * fe_face_values[Potential].value(j,q);
1947 * We compute the flux matrix where the test function's
1948 * as well as the solution function
's values are taken from
1953 * @f$\int_{\text{face}}
1954 * \left( \frac{1}{2} \, \textbf{n}^{-}
1955 * \cdot ( \textbf{w}^{-} u^{-}
1956 * + w^{-} \textbf{q}^{-})
1957 * + \boldsymbol \beta \cdot \textbf{w}^{-} u^{-}
1958 * - w^{-} \boldsymbol \beta \cdot \textbf{q}^{-}
1959 * + \sigma w^{-} \, u^{-} \right) ds@f$
1962 * vi_ui_matrix(i,j) += (0.5 * (
1963 * psi_i_field_minus *
1964 * fe_face_values.normal_vector(q) *
1965 * psi_j_potential_minus
1967 * psi_i_potential_minus *
1968 * fe_face_values.normal_vector(q) *
1969 * psi_j_field_minus )
1972 * psi_i_field_minus *
1973 * psi_j_potential_minus
1976 * psi_i_potential_minus *
1980 * psi_i_potential_minus *
1981 * psi_j_potential_minus
1983 * fe_face_values.JxW(q);
1986 * for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
1988 * const Tensor<1,dim> psi_j_field_plus =
1989 * fe_neighbor_face_values[VectorField].value(j,q);
1990 * const double psi_j_potential_plus =
1991 * fe_neighbor_face_values[Potential].value(j,q);
1995 * We compute the flux matrix where the test function is
1996 * from the interior of this elements face and solution
1997 * function is taken from the exterior. This corresponds
1998 * to the computation,
2002 * @f$\int_{\text{face}}
2003 * \left( \frac{1}{2} \, \textbf{n}^{-} \cdot
2004 * ( \textbf{w}^{-} u^{+}
2005 * + w^{-} \textbf{q}^{+})
2006 * - \boldsymbol \beta \cdot \textbf{w}^{-} u^{+}
2007 * + w^{-} \boldsymbol \beta \cdot \textbf{q}^{+}
2008 * - \sigma w^{-} \, u^{+} \right) ds @f$
2011 * vi_ue_matrix(i,j) += ( 0.5 * (
2012 * psi_i_field_minus *
2013 * fe_face_values.normal_vector(q) *
2014 * psi_j_potential_plus
2016 * psi_i_potential_minus *
2017 * fe_face_values.normal_vector(q) *
2018 * psi_j_field_plus )
2021 * psi_i_field_minus *
2022 * psi_j_potential_plus
2025 * psi_i_potential_minus *
2029 * psi_i_potential_minus *
2030 * psi_j_potential_plus
2032 * fe_face_values.JxW(q);
2036 * for (unsigned int i=0; i<dofs_neighbor_cell; ++i)
2038 * const Tensor<1,dim> psi_i_field_plus =
2039 * fe_neighbor_face_values[VectorField].value(i,q);
2040 * const double psi_i_potential_plus =
2041 * fe_neighbor_face_values[Potential].value(i,q);
2043 * for (unsigned int j=0; j<dofs_this_cell; ++j)
2045 * const Tensor<1,dim> psi_j_field_minus =
2046 * fe_face_values[VectorField].value(j,q);
2047 * const double psi_j_potential_minus =
2048 * fe_face_values[Potential].value(j,q);
2053 * We compute the flux matrix where the test function is
2054 * from the exterior of this elements face and solution
2055 * function is taken from the interior. This corresponds
2056 * to the computation,
2060 * @f$ \int_{\text{face}}
2061 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2062 * (\textbf{w}^{+} u^{-}
2063 * + w^{+} \textbf{q}^{-} )
2064 * - \boldsymbol \beta \cdot \textbf{w}^{+} u^{-}
2065 * + w^{+} \boldsymbol \beta \cdot \textbf{q}^{-}
2066 * - \sigma w^{+} u^{-} \right) ds @f$
2069 * ve_ui_matrix(i,j) += (-0.5 * (
2070 * psi_i_field_plus *
2071 * fe_face_values.normal_vector(q) *
2072 * psi_j_potential_minus
2074 * psi_i_potential_plus *
2075 * fe_face_values.normal_vector(q) *
2076 * psi_j_field_minus)
2079 * psi_i_field_plus *
2080 * psi_j_potential_minus
2083 * psi_i_potential_plus *
2087 * psi_i_potential_plus *
2088 * psi_j_potential_minus
2090 * fe_face_values.JxW(q);
2093 * for (unsigned int j=0; j<dofs_neighbor_cell; ++j)
2095 * const Tensor<1,dim> psi_j_field_plus =
2096 * fe_neighbor_face_values[VectorField].value(j,q);
2097 * const double psi_j_potential_plus =
2098 * fe_neighbor_face_values[Potential].value(j,q);
2102 * And lastly we compute the flux matrix where the test
2103 * function and solution function are taken from the exterior
2104 * cell to this face. This corresponds to the computation,
2108 * @f$\int_{\text{face}}
2109 * \left( -\frac{1}{2}\, \textbf{n}^{-} \cdot
2110 * ( \textbf{w}^{+} u^{+}
2111 * + w^{+} \textbf{q}^{+} )
2112 * + \boldsymbol \beta \cdot \textbf{w}^{+} u^{+}
2113 * - w^{+} \boldsymbol \beta \cdot \textbf{q}^{+}
2114 * + \sigma w^{+} u^{+} \right) ds @f$
2117 * ve_ue_matrix(i,j) += (-0.5 * (
2118 * psi_i_field_plus *
2119 * fe_face_values.normal_vector(q) *
2120 * psi_j_potential_plus
2122 * psi_i_potential_plus *
2123 * fe_face_values.normal_vector(q) *
2124 * psi_j_field_plus )
2127 * psi_i_field_plus *
2128 * psi_j_potential_plus
2131 * psi_i_potential_plus *
2135 * psi_i_potential_plus *
2136 * psi_j_potential_plus
2138 * fe_face_values.JxW(q);
2148 * <a name="LDGPoisson.cc-distribute_local_flux_to_global"></a>
2149 * <h4>distribute_local_flux_to_global</h4>
2150 * In this function we use the ConstraintMatrix to distribute
2151 * the local flux matrices to the global system matrix.
2152 * Since I have to do this twice in assembling the
2153 * system matrix, I made function to do it rather than have
2155 * We remark that the reader take special note of
2156 * the which matrices we are distributing and the order
2157 * in which we pass the dof indices vectors. In distributing
2158 * the first matrix, i.e. <code>vi_ui_matrix</code>, we are
2159 * taking the test function and solution function values from
2160 * the interior of this cell and therefore only need the
2161 * <code>local_dof_indices</code> since it contains the dof
2162 * indices to this cell. When we distribute the second matrix,
2163 * <code>vi_ue_matrix</code>, the test function is taken
2164 * form the inteior of
2165 * this cell while the solution function is taken from the
2166 * exterior, i.e. the neighbor cell. Notice that the order
2167 * degrees of freedom index vectors matrch this pattern: first
2168 * the <code>local_dof_indices</code> which is local to
2169 * this cell and then
2170 * the <code>local_neighbor_dof_indices</code> which is
2171 * local to the neighbor's
2172 * cell. The order in which we pass the dof indices
for the
2173 * matrices is paramount to constructing our global system
2174 *
matrix properly. The ordering of the last two matrices
2175 * follow the same logic as the
first two we discussed.
2180 * LDGPoissonProblem<dim>::
2181 * distribute_local_flux_to_global(
2186 *
const std::vector<types::global_dof_index> &local_dof_indices,
2187 *
const std::vector<types::global_dof_index> &local_neighbor_dof_indices)
2189 * constraints.distribute_local_to_global(vi_ui_matrix,
2190 * local_dof_indices,
2193 * constraints.distribute_local_to_global(vi_ue_matrix,
2194 * local_dof_indices,
2195 * local_neighbor_dof_indices,
2198 * constraints.distribute_local_to_global(ve_ui_matrix,
2199 * local_neighbor_dof_indices,
2200 * local_dof_indices,
2203 * constraints.distribute_local_to_global(ve_ue_matrix,
2204 * local_neighbor_dof_indices,
2213 * <a name=
"LDGPoisson.cc-solve"></a>
2215 * As mentioned earlier I used a direct solver to solve
2216 * the linear system of equations resulting from the LDG
2217 * method applied to the Poisson equation. One could also
2218 * use a iterative solver, however, we then need to use
2219 * a preconditioner and that was something I did not wanted
2220 * to get into. For information on preconditioners
2221 *
for the LDG Method see
this
2222 * <a href=
"http://epubs.siam.org/doi/abs/10.1137/S1064827502410657">
2223 * paper</a>. The uses of a direct solver here is
2224 * somewhat of a limitation. The built-in distributed
2225 * direct solver in Trilinos reduces everything to one
2226 * processor, solves the system and then distributes
2227 * everything back out to the other processors. However,
2228 * by linking to more advanced direct solvers through
2229 * Trilinos one can accomplish fully distributed computations
2230 * and not much about the following function calls will
2236 * LDGPoissonProblem<dim>::
2243 * As in @ref step_40
"step-40" in order to perform a linear solve
2244 * we need solution vector where there is no overlap across
2245 * the processors and we create
this by instantiating
2246 * <code>completely_distributed_solution</code> solution
2248 * the
copy constructor on the global system right hand
2249 * side vector which itself is completely distributed vector.
2253 * completely_distributed_solution(system_rhs);
2257 * Now we can perform the solve on the completeley distributed
2258 * right hand side vector, system
matrix and the completely
2259 * distributed solution.
2262 * solver.solve(system_matrix,
2263 * completely_distributed_solution,
2268 * We now distribute the constraints of our system onto the
2269 * completely solution vector, but in our
case with the LDG
2270 * method there are
none.
2273 * constraints.distribute(completely_distributed_solution);
2277 * Lastly we
copy the completely distributed solution vector,
2278 * <code>completely_distributed_solution</code>,
2279 * to solution vector which has some overlap between
2280 * processors, <code>locally_relevant_solution</code>.
2281 * We need the overlapped portions of our solution
2282 * in order to be able to
do computations
using the solution
2283 * later in the code or in post processing.
2286 * locally_relevant_solution = completely_distributed_solution;
2292 * <a name=
"LDGPoisson.cc-output_results"></a>
2293 * <h4>output_results</h4>
2294 * This function deals with the writing of the results in
parallel
2295 * to disk. It is almost exactly the same as
2296 * in @ref step_40
"step-40" and we won
't go into it. It is noteworthy
2297 * that in @ref step_40 "step-40" the output is only the scalar solution,
2298 * while in our situation, we are outputting both the scalar
2299 * solution as well as the vector field solution. The only
2300 * difference between this function and the one in @ref step_40 "step-40"
2301 * is in the <code>solution_names</code> vector where we have to add
2302 * the gradient dimensions. Everything else is taken care
2303 * of by the deal.ii library!
2308 * LDGPoissonProblem<dim>::
2309 * output_results() const
2311 * std::vector<std::string> solution_names;
2315 * solution_names.push_back("u");
2316 * solution_names.push_back("du/dx");
2320 * solution_names.push_back("grad(u)_x");
2321 * solution_names.push_back("grad(u)_y");
2322 * solution_names.push_back("u");
2326 * solution_names.push_back("grad(u)_x");
2327 * solution_names.push_back("grad(u)_y");
2328 * solution_names.push_back("grad(u)_z");
2329 * solution_names.push_back("u");
2333 * Assert(false, ExcNotImplemented() );
2336 * DataOut<dim> data_out;
2337 * data_out.attach_dof_handler(dof_handler);
2338 * data_out.add_data_vector(locally_relevant_solution,
2341 * Vector<float> subdomain(triangulation.n_active_cells());
2343 * for (unsigned int i=0; i<subdomain.size(); ++i)
2344 * subdomain(i) = triangulation.locally_owned_subdomain();
2346 * data_out.add_data_vector(subdomain,"subdomain");
2348 * data_out.build_patches();
2350 * const std::string filename = ("solution." +
2351 * Utilities::int_to_string(
2352 * triangulation.locally_owned_subdomain(),4));
2354 * std::ofstream output((filename + ".vtu").c_str());
2355 * data_out.write_vtu(output);
2357 * if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0 )
2359 * std::vector<std::string> filenames;
2360 * for (unsigned int i=0;
2361 * i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
2364 * filenames.push_back("solution." +
2365 * Utilities::int_to_string(i,4) +
2368 * std::ofstream master_output("solution.pvtu");
2369 * data_out.write_pvtu_record(master_output, filenames);
2377 * <a name="LDGPoisson.cc-run"></a>
2379 * The only public function of this class is pretty much exactly
2380 * the same as all the other deal.ii examples except I setting
2381 * the constant in the DG penalty (@f$\tilde{\sigma}@f$) to be 1.
2386 * LDGPoissonProblem<dim>::
2392 * assemble_system();
2401 * <a name="LDGPoisson.cc-main"></a>
2403 * Here it the main class of our program, since it is nearly exactly
2404 * the same as @ref step_40 "step-40" and many of the other examples I won't
2408 *
int main(
int argc,
char *argv[])
2413 *
using namespace dealii;
2420 *
unsigned int degree = 1;
2421 *
unsigned int n_refine = 6;
2422 * LDGPoissonProblem<2> Poisson(degree, n_refine);
2426 *
catch (std::exception &exc)
2428 * std::cerr << std::endl << std::endl
2429 * <<
"----------------------------------------------------"
2431 * std::cerr <<
"Exception on processing: " << std::endl
2432 * << exc.what() << std::endl
2433 * <<
"Aborting!" << std::endl
2434 * <<
"----------------------------------------------------"
2441 * std::cerr << std::endl << std::endl
2442 * <<
"----------------------------------------------------"
2444 * std::cerr <<
"Unknown exception!" << std::endl
2445 * <<
"Aborting!" << std::endl
2446 * <<
"----------------------------------------------------"
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell, const unsigned int face_no, const unsigned int subface_no)
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access > > &cell)
unsigned int depth_console(const unsigned int n)
#define Assert(cond, exc)
typename ActiveSelector::cell_iterator cell_iterator
typename ActiveSelector::face_iterator face_iterator
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorType::value_type * begin(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation