deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-3.h
Go to the documentation of this file.
1 1)
766 *   , dof_handler(triangulation)
767 *   {}
768 *  
769 *  
770 * @endcode
771 *
772 *
773 * <a name="step_3-Step3make_grid"></a>
774 * <h4>Step3::make_grid</h4>
775 *
776
777 *
778 * Now, the first thing we've got to do is to generate the triangulation on
779 * which we would like to do our computation and number each vertex with a
780 * degree of freedom. We have seen these two steps in @ref step_1 "step-1" and @ref step_2 "step-2"
781 * before, respectively.
782 *
783
784 *
785 * This function does the first part, creating the mesh. We create the grid
786 * and refine all cells five times. Since the initial grid (which is the
787 * square @f$[-1,1] \times [-1,1]@f$) consists of only one cell, the final grid
788 * has 32 times 32 cells, for a total of 1024.
789 *
790
791 *
792 * Unsure that 1024 is the correct number? We can check that by outputting the
793 * number of cells using the <code>n_active_cells()</code> function on the
794 * triangulation.
795 *
796 * @code
797 *   void Step3::make_grid()
798 *   {
799 *   GridGenerator::hyper_cube(triangulation, -1, 1);
800 *   triangulation.refine_global(5);
801 *  
802 *   std::cout << "Number of active cells: " << triangulation.n_active_cells()
803 *   << std::endl;
804 *   }
805 *  
806 * @endcode
807 *
808 * @note We call the Triangulation::n_active_cells() function, rather than
809 * Triangulation::n_cells(). Here, <i>active</i> means the cells that aren't
810 * refined any further. We stress the adjective "active" since there are more
811 * cells, namely the parent cells of the finest cells, their parents, etc, up
812 * to the one cell which made up the initial grid. Of course, on the next
813 * coarser level, the number of cells is one quarter that of the cells on the
814 * finest level, i.e. 256, then 64, 16, 4, and 1. If you called
815 * <code>triangulation.n_cells()</code> instead in the code above, you would
816 * consequently get a value of 1365 instead. On the other hand, the number of
817 * cells (as opposed to the number of active cells) is not typically of much
818 * interest, so there is no good reason to print it.
819 *
820
821 *
822 *
823
824 *
825 *
826 * <a name="step_3-Step3setup_system"></a>
827 * <h4>Step3::setup_system</h4>
828 *
829
830 *
831 * Next we enumerate all the degrees of freedom and set up matrix and vector
832 * objects to hold the system data. Enumerating is done by using
833 * DoFHandler::distribute_dofs(), as we have seen in the @ref step_2 "step-2" example. Since
834 * we use the FE_Q class and have set the polynomial degree to 1 in the
835 * constructor, i.e. bilinear elements, this associates one degree of freedom
836 * with each vertex. While we're at generating output, let us also take a look
837 * at how many degrees of freedom are generated:
838 *
839 * @code
840 *   void Step3::setup_system()
841 *   {
842 *   dof_handler.distribute_dofs(fe);
843 *   std::cout << "Number of degrees of freedom: " << dof_handler.n_dofs()
844 *   << std::endl;
845 * @endcode
846 *
847 * There should be one DoF for each vertex. Since we have a 32 times 32
848 * grid, the number of DoFs should be 33 times 33, or 1089.
849 *
850
851 *
852 * As we have seen in the previous example, we set up a sparsity pattern by
853 * first creating a temporary structure, tagging those entries that might be
854 * nonzero, and then copying the data over to the SparsityPattern object
855 * that can then be used by the system matrix.
856 *
857 * @code
858 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
859 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
860 *   sparsity_pattern.copy_from(dsp);
861 *  
862 * @endcode
863 *
864 * Note that the SparsityPattern object does not hold the values of the
865 * matrix, it only stores the places where entries are. The entries
866 * themselves are stored in objects of type SparseMatrix, of which our
867 * variable system_matrix is one.
868 *
869
870 *
871 * The distinction between sparsity pattern and matrix was made to allow
872 * several matrices to use the same sparsity pattern. This may not seem
873 * relevant here, but when you consider the size which matrices can have,
874 * and that it may take some time to build the sparsity pattern, this
875 * becomes important in large-scale problems if you have to store several
876 * matrices in your program.
877 *
878 * @code
879 *   system_matrix.reinit(sparsity_pattern);
880 *  
881 * @endcode
882 *
883 * The last thing to do in this function is to set the sizes of the right
884 * hand side vector and the solution vector to the right values:
885 *
886 * @code
887 *   solution.reinit(dof_handler.n_dofs());
888 *   system_rhs.reinit(dof_handler.n_dofs());
889 *   }
890 *  
891 * @endcode
892 *
893 *
894 * <a name="step_3-Step3assemble_system"></a>
895 * <h4>Step3::assemble_system</h4>
896 *
897
898 *
899 *
900
901 *
902 * The next step is to compute the entries of the matrix and right hand side
903 * that form the linear system from which we compute the solution. This is the
904 * central function of each finite element program and we have discussed the
905 * primary steps in the introduction already.
906 *
907
908 *
909 * The general approach to assemble matrices and vectors is to loop over all
910 * cells, and on each cell compute the contribution of that cell to the global
911 * matrix and right hand side by quadrature. The point to realize now is that
912 * we need the values of the shape functions at the locations of quadrature
913 * points on the real cell. However, both the finite element shape functions
914 * as well as the quadrature points are only defined on the reference
915 * cell. They are therefore of little help to us, and we will in fact hardly
916 * ever query information about finite element shape functions or quadrature
917 * points from these objects directly.
918 *
919
920 *
921 * Rather, what is required is a way to map this data from the reference cell
922 * to the real cell. Classes that can do that are derived from the Mapping
923 * class, though one again often does not have to deal with them directly:
924 * many functions in the library can take a mapping object as argument, but
925 * when it is omitted they simply resort to the standard bilinear Q1
926 * mapping. We will go this route, and not bother with it for the moment (we
927 * come back to this in @ref step_10 "step-10", @ref step_11 "step-11", and @ref step_12 "step-12").
928 *
929
930 *
931 * So what we now have is a collection of three classes to deal with: finite
932 * element, quadrature, and mapping objects. That's too much, so there is one
933 * type of class that orchestrates information exchange between these three:
934 * the FEValues class. If given one instance of each three of these objects
935 * (or two, and an implicit linear mapping), it will be able to provide you
936 * with information about values and gradients of shape functions at
937 * quadrature points on a real cell.
938 *
939
940 *
941 * Using all this, we will assemble the linear system for this problem in the
942 * following function:
943 *
944 * @code
945 *   void Step3::assemble_system()
946 *   {
947 * @endcode
948 *
949 * Ok, let's start: we need a quadrature formula for the evaluation of the
950 * integrals on each cell. Let's take a Gauss formula with two quadrature
951 * points in each direction, i.e. a total of four points since we are in
952 * 2d. This quadrature formula integrates polynomials of degrees up to three
953 * exactly (in 1d). It is easy to check that this is sufficient for the
954 * present problem:
955 *
956 * @code
957 *   const QGauss<2> quadrature_formula(fe.degree + 1);
958 * @endcode
959 *
960 * And we initialize the object which we have briefly talked about above. It
961 * needs to be told which finite element we want to use, and the quadrature
962 * points and their weights (jointly described by a Quadrature object). As
963 * mentioned, we use the implied Q1 mapping, rather than specifying one
964 * ourselves explicitly. Finally, we have to tell it what we want it to
965 * compute on each cell: we need the values of the shape functions at the
966 * quadrature points (for the right hand side @f$(\varphi_i,f)@f$), their
967 * gradients (for the matrix entries @f$(\nabla \varphi_i, \nabla
968 * \varphi_j)@f$), and also the weights of the quadrature points and the
969 * determinants of the Jacobian transformations from the reference cell to
970 * the real cells.
971 *
972
973 *
974 * This list of what kind of information we actually need is given as a
975 * collection of flags as the third argument to the constructor of
976 * FEValues. Since these values have to be recomputed, or updated, every
977 * time we go to a new cell, all of these flags start with the prefix
978 * <code>update_</code> and then indicate what it actually is that we want
979 * updated. The flag to give if we want the values of the shape functions
980 * computed is #update_values; for the gradients it is
981 * #update_gradients. The determinants of the Jacobians and the quadrature
982 * weights are always used together, so only the products (Jacobians times
983 * weights, or short <code>JxW</code>) are computed; since we need them, we
984 * have to list #update_JxW_values as well:
985 *
986 * @code
987 *   FEValues<2> fe_values(fe,
988 *   quadrature_formula,
990 * @endcode
991 *
992 * The advantage of this approach is that we can specify what kind of
993 * information we actually need on each cell. It is easily understandable
994 * that this approach can significantly speed up finite element computations,
995 * compared to approaches where everything, including second derivatives,
996 * normal vectors to cells, etc are computed on each cell, regardless of
997 * whether they are needed or not.
998 *
999
1000 *
1001 * @note The syntax <code>update_values | update_gradients |
1002 * update_JxW_values</code> is not immediately obvious to anyone not
1003 * used to programming bit operations in C for years already. First,
1004 * <code>operator|</code> is the <i>bitwise or operator</i>, i.e.,
1005 * it takes two integer arguments that are interpreted as bit
1006 * patterns and returns an integer in which every bit is set for
1007 * which the corresponding bit is set in at least one of the two
1008 * arguments. For example, consider the operation
1009 * <code>9|10</code>. In binary, <code>9=0b1001</code> (where the
1010 * prefix <code>0b</code> indicates that the number is to be
1011 * interpreted as a binary number) and <code>10=0b1010</code>. Going
1012 * through each bit and seeing whether it is set in one of the
1013 * argument, we arrive at <code>0b1001|0b1010=0b1011</code> or, in
1014 * decimal notation, <code>9|10=11</code>. The second piece of
1015 * information you need to know is that the various
1016 * <code>update_*</code> flags are all integers that have <i>exactly
1017 * one bit set</i>. For example, assume that
1018 * <code>update_values=0b00001=1</code>,
1019 * <code>update_gradients=0b00010=2</code>,
1020 * <code>update_JxW_values=0b10000=16</code>. Then
1022 * 0b10011 = 19</code>. In other words, we obtain a number that
1023 * <i>encodes a binary mask representing all of the operations you
1024 * want to happen</i>, where each operation corresponds to exactly
1025 * one bit in the integer that, if equal to one, means that a
1026 * particular piece should be updated on each cell and, if it is
1027 * zero, means that we need not compute it. In other words, even
1028 * though <code>operator|</code> is the <i>bitwise OR operation</i>,
1029 * what it really represents is <i>I want this AND that AND the
1030 * other</i>. Such binary masks are quite common in C programming,
1031 * but maybe not so in higher level languages like C++, but serve
1032 * the current purpose quite well.
1033 *
1034
1035 *
1036 * For use further down below, we define a shortcut for a value that will
1037 * be used very frequently. Namely, an abbreviation for the number of degrees
1038 * of freedom on each cell (since we are in 2d and degrees of freedom are
1039 * associated with vertices only, this number is four, but we rather want to
1040 * write the definition of this variable in a way that does not preclude us
1041 * from later choosing a different finite element that has a different
1042 * number of degrees of freedom per cell, or work in a different space
1043 * dimension).
1044 *
1045
1046 *
1047 * In general, it is a good idea to use a symbolic name instead of
1048 * hard-coding these numbers even if you know them, since for example,
1049 * you may want to change the finite element at some time. Changing the
1050 * element would have to be done in a different function and it is easy
1051 * to forget to make a corresponding change in another part of the program.
1052 * It is better to not rely on your own calculations, but instead ask
1053 * the right object for the information: Here, we ask the finite element
1054 * to tell us about the number of degrees of freedom per cell and we
1055 * will get the correct number regardless of the space dimension or
1056 * polynomial degree we may have chosen elsewhere in the program.
1057 *
1058
1059 *
1060 * The shortcut here, defined primarily to discuss the basic concept
1061 * and not because it saves a lot of typing, will then make the following
1062 * loops a bit more readable. You will see such shortcuts in many places in
1063 * larger programs, and `dofs_per_cell` is one that is more or less the
1064 * conventional name for this kind of object.
1065 *
1066 * @code
1067 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1068 *  
1069 * @endcode
1070 *
1071 * Now, we said that we wanted to assemble the global matrix and vector
1072 * cell-by-cell. We could write the results directly into the global matrix,
1073 * but this is not very efficient since access to the elements of a sparse
1074 * matrix is slow. Rather, we first compute the contribution of each cell in
1075 * a small matrix with the degrees of freedom on the present cell, and only
1076 * transfer them to the global matrix when the computations are finished for
1077 * this cell. We do the same for the right hand side vector. So let's first
1078 * allocate these objects (these being local objects, all degrees of freedom
1079 * are coupling with all others, and we should use a full matrix object
1080 * rather than a sparse one for the local operations; everything will be
1081 * transferred to a global sparse matrix later on):
1082 *
1083 * @code
1084 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1085 *   Vector<double> cell_rhs(dofs_per_cell);
1086 *  
1087 * @endcode
1088 *
1089 * When assembling the contributions of each cell, we do this with the local
1090 * numbering of the degrees of freedom (i.e. the number running from zero
1091 * through dofs_per_cell-1). However, when we transfer the result into the
1092 * global matrix, we have to know the global numbers of the degrees of
1093 * freedom. When we query them, we need a scratch (temporary) array for
1094 * these numbers (see the discussion at the end of the introduction for
1095 * the type, types::global_dof_index, used here):
1096 *
1097 * @code
1098 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1099 *  
1100 * @endcode
1101 *
1102 * Now for the loop over all cells. We have seen before how this works for a
1103 * triangulation. A DoFHandler has cell iterators that are exactly analogous
1104 * to those of a Triangulation, but with extra information about the degrees
1105 * of freedom for the finite element you're using. Looping over the active
1106 * cells of a degree-of-freedom handler works the same as for a triangulation.
1107 *
1108
1109 *
1110 * Note that we declare the type of the cell as `const auto &` instead of
1111 * `auto` this time around. In step 1, we were modifying the cells of the
1112 * triangulation by flagging them with refinement indicators. Here we're only
1113 * examining the cells without modifying them, so it's good practice to
1114 * declare `cell` as `const` in order to enforce this invariant.
1115 *
1116 * @code
1117 *   for (const auto &cell : dof_handler.active_cell_iterators())
1118 *   {
1119 * @endcode
1120 *
1121 * We are now sitting on one cell, and we would like the values and
1122 * gradients of the shape functions be computed, as well as the
1123 * determinants of the Jacobian matrices of the mapping between
1124 * reference cell and true cell, at the quadrature points. Since all
1125 * these values depend on the geometry of the cell, we have to have the
1126 * FEValues object re-compute them on each cell:
1127 *
1128 * @code
1129 *   fe_values.reinit(cell);
1130 *  
1131 * @endcode
1132 *
1133 * Next, reset the local cell's contributions to global matrix and
1134 * global right hand side to zero, before we fill them:
1135 *
1136 * @code
1137 *   cell_matrix = 0;
1138 *   cell_rhs = 0;
1139 *  
1140 * @endcode
1141 *
1142 * Now it is time to start integration over the cell, which we
1143 * do by looping over all quadrature points, which we will
1144 * number by q_index.
1145 *
1146 * @code
1147 *   for (const unsigned int q_index : fe_values.quadrature_point_indices())
1148 *   {
1149 * @endcode
1150 *
1151 * First assemble the matrix: For the Laplace problem, the
1152 * matrix on each cell is the integral over the gradients of
1153 * shape function i and j. Since we do not integrate, but
1154 * rather use quadrature, this is the sum over all
1155 * quadrature points of the integrands times the determinant
1156 * of the Jacobian matrix at the quadrature point times the
1157 * weight of this quadrature point. You can get the gradient
1158 * of shape function @f$i@f$ at quadrature point with number q_index by
1159 * using <code>fe_values.shape_grad(i,q_index)</code>; this
1160 * gradient is a 2-dimensional vector (in fact it is of type
1161 * Tensor@<1,dim@>, with here dim=2) and the product of two
1162 * such vectors is the scalar product, i.e. the product of
1163 * the two shape_grad function calls is the dot
1164 * product. This is in turn multiplied by the Jacobian
1165 * determinant and the quadrature point weight (that one
1166 * gets together by the call to FEValues::JxW() ). Finally,
1167 * this is repeated for all shape functions @f$i@f$ and @f$j@f$:
1168 *
1169 * @code
1170 *   for (const unsigned int i : fe_values.dof_indices())
1171 *   for (const unsigned int j : fe_values.dof_indices())
1172 *   cell_matrix(i, j) +=
1173 *   (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
1174 *   fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
1175 *   fe_values.JxW(q_index)); // dx
1176 *  
1177 * @endcode
1178 *
1179 * We then do the same thing for the right hand side. Here,
1180 * the integral is over the shape function i times the right
1181 * hand side function, which we choose to be the function
1182 * with constant value one (more interesting examples will
1183 * be considered in the following programs).
1184 *
1185 * @code
1186 *   for (const unsigned int i : fe_values.dof_indices())
1187 *   cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
1188 *   1. * // f(x_q)
1189 *   fe_values.JxW(q_index)); // dx
1190 *   }
1191 * @endcode
1192 *
1193 * Now that we have the contribution of this cell, we have to transfer
1194 * it to the global matrix and right hand side. To this end, we first
1195 * have to find out which global numbers the degrees of freedom on this
1196 * cell have. Let's simply ask the cell for that information:
1197 *
1198 * @code
1199 *   cell->get_dof_indices(local_dof_indices);
1200 *  
1201 * @endcode
1202 *
1203 * Then again loop over all shape functions i and j and transfer the
1204 * local elements to the global matrix. The global numbers can be
1205 * obtained using local_dof_indices[i]:
1206 *
1207 * @code
1208 *   for (const unsigned int i : fe_values.dof_indices())
1209 *   for (const unsigned int j : fe_values.dof_indices())
1210 *   system_matrix.add(local_dof_indices[i],
1211 *   local_dof_indices[j],
1212 *   cell_matrix(i, j));
1213 *  
1214 * @endcode
1215 *
1216 * And again, we do the same thing for the right hand side vector.
1217 *
1218 * @code
1219 *   for (const unsigned int i : fe_values.dof_indices())
1220 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
1221 *   }
1222 *  
1223 *  
1224 * @endcode
1225 *
1226 * Now almost everything is set up for the solution of the discrete
1227 * system. However, we have not yet taken care of boundary values (in fact,
1228 * Laplace's equation without Dirichlet boundary values is not even uniquely
1229 * solvable, since you can add an arbitrary constant to the discrete
1230 * solution). We therefore have to do something about the situation.
1231 *
1232
1233 *
1234 * For this, we first obtain a list of the degrees of freedom on the
1235 * boundary and the value the shape function shall have there. For
1236 * simplicity, we only interpolate the boundary value function, rather than
1237 * projecting it onto the boundary. There is a function in the library which
1238 * does exactly this: VectorTools::interpolate_boundary_values(). Its
1239 * parameters are (omitting parameters for which default values exist and
1240 * that we don't care about): the DoFHandler object to get the global
1241 * numbers of the degrees of freedom on the boundary; the component of the
1242 * boundary where the boundary values shall be interpolated; the boundary
1243 * value function itself; and the output object.
1244 *
1245
1246 *
1247 * The component of the boundary is meant as follows: in many cases, you may
1248 * want to impose certain boundary values only on parts of the boundary. For
1249 * example, you may have inflow and outflow boundaries in fluid dynamics, or
1250 * clamped and free parts of bodies in deformation computations of
1251 * bodies. Then you will want to denote these different parts of the
1252 * boundary by indicators, and tell the interpolate_boundary_values
1253 * function to only compute the boundary values on a certain part of the
1254 * boundary (e.g. the clamped part, or the inflow boundary). By default,
1255 * all boundaries have a 0 boundary indicator, unless otherwise specified.
1256 * (For example, many functions in namespace GridGenerator specify otherwise.)
1257 * If sections of the boundary have different boundary conditions, you have to
1258 * number those parts with different boundary indicators. The function call
1259 * below will then only determine boundary values for those parts of the
1260 * boundary for which the boundary indicator is in fact the zero specified as
1261 * the second argument.
1262 *
1263
1264 *
1265 * The function describing the boundary values is an object of type Function
1266 * or of a derived class. One of the derived classes is
1267 * Functions::ZeroFunction, which describes (not unexpectedly) a function
1268 * which is zero everywhere. We create such an object in-place and pass it to
1270 *
1271
1272 *
1273 * Finally, the output object is a list of pairs of global degree of freedom
1274 * numbers (i.e. the number of the degrees of freedom on the boundary) and
1275 * their boundary values (which are zero here for all entries). This mapping
1276 * of DoF numbers to boundary values is done by the <code>std::map</code>
1277 * class.
1278 *
1279 * @code
1280 *   std::map<types::global_dof_index, double> boundary_values;
1281 *   VectorTools::interpolate_boundary_values(dof_handler,
1282 *   types::boundary_id(0),
1283 *   Functions::ZeroFunction<2>(),
1284 *   boundary_values);
1285 * @endcode
1286 *
1287 * Now that we got the list of boundary DoFs and their respective boundary
1288 * values, let's use them to modify the system of equations
1289 * accordingly. This is done by the following function call:
1290 *
1291 * @code
1292 *   MatrixTools::apply_boundary_values(boundary_values,
1293 *   system_matrix,
1294 *   solution,
1295 *   system_rhs);
1296 *   }
1297 *  
1298 *  
1299 * @endcode
1300 *
1301 *
1302 * <a name="step_3-Step3solve"></a>
1303 * <h4>Step3::solve</h4>
1304 *
1305
1306 *
1307 * The following function solves the discretized equation. As discussed in
1308 * the introduction, we want to use an iterative solver to do this,
1309 * specifically the Conjugate Gradient (CG) method.
1310 *
1311
1312 *
1313 * The way to do this in deal.II is a three-step process:
1314 * - First, we need to have an object that knows how to tell the CG algorithm
1315 * when to stop. This is done by using a SolverControl object, and as
1316 * stopping criterion we say: stop after a maximum of 1000 iterations (which
1317 * is far more than is needed for 1089 variables; see the results section to
1318 * find out how many were really used), and stop if the norm of the residual
1319 * is below @f$\tau=10^{-6}\|\mathbf b\|@f$ where @f$\mathbf b@f$ is the right hand
1320 * side vector. In practice, this latter criterion will be the one
1321 * which stops the iteration.
1322 * - Then we need the solver itself. The template parameter to the SolverCG
1323 * class is the type of the vectors we are using.
1324 * - The last step is to actually solve the system of equations. The CG solver
1325 * takes as arguments the components of the linear system @f$Ax=b@f$ (in the
1326 * order in which they appear in this equation), and a preconditioner
1327 * as the fourth argument. We don't feel ready to delve into preconditioners
1328 * yet, so we tell it to use the identity operation as preconditioner. Later
1329 * tutorial programs will spend significant amount of time and space on
1330 * constructing better preconditioners.
1331 *
1332
1333 *
1334 * At the end of this process, the `solution` variable contains the
1335 * nodal values of the solution function. At the end of the function, we
1336 * output how many Conjugate Gradients iterations it took to solve the
1337 * linear system.
1338 *
1339 * @code
1340 *   void Step3::solve()
1341 *   {
1342 *   SolverControl solver_control(1000, 1e-6 * system_rhs.l2_norm());
1343 *   SolverCG<Vector<double>> solver(solver_control);
1344 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
1345 *  
1346 *   std::cout << solver_control.last_step()
1347 *   << " CG iterations needed to obtain convergence." << std::endl;
1348 *   }
1349 *  
1350 *  
1351 * @endcode
1352 *
1353 *
1354 * <a name="step_3-Step3output_results"></a>
1355 * <h4>Step3::output_results</h4>
1356 *
1357
1358 *
1359 * The last part of a typical finite element program is to output the results
1360 * and maybe do some postprocessing (for example compute the maximal stress
1361 * values at the boundary, or the average flux across the outflow, etc). We
1362 * have no such postprocessing here, but we would like to write the solution
1363 * to a file.
1364 *
1365 * @code
1366 *   void Step3::output_results() const
1367 *   {
1368 * @endcode
1369 *
1370 * To write the output to a file, we need an object which knows about output
1371 * formats and the like. This is the DataOut class, and we need an object of
1372 * that type:
1373 *
1374 * @code
1375 *   DataOut<2> data_out;
1376 * @endcode
1377 *
1378 * Now we have to tell it where to take the values from which it shall
1379 * write. We tell it which DoFHandler object to use, and the solution vector
1380 * (and the name by which the solution variable shall appear in the output
1381 * file). If we had more than one vector which we would like to look at in
1382 * the output (for example right hand sides, errors per cell, etc) we would
1383 * add them as well:
1384 *
1385 * @code
1386 *   data_out.attach_dof_handler(dof_handler);
1387 *   data_out.add_data_vector(solution, "solution");
1388 * @endcode
1389 *
1390 * After the DataOut object knows which data it is to work on, we have to
1391 * tell it to process them into something the back ends can handle. The
1392 * reason is that we have separated the frontend (which knows about how to
1393 * treat DoFHandler objects and data vectors) from the back end (which knows
1394 * many different output formats) and use an intermediate data format to
1395 * transfer data from the front- to the backend. The data is transformed
1396 * into this intermediate format by the following function:
1397 *
1398 * @code
1399 *   data_out.build_patches();
1400 *  
1401 * @endcode
1402 *
1403 * Now we have everything in place for the actual output. Just open a file
1404 * and write the data into it, using VTK format (there are many other
1405 * functions in the DataOut class we are using here that can write the
1406 * data in postscript, AVS, GMV, Gnuplot, or some other file
1407 * formats):
1408 *
1409 * @code
1410 *   const std::string filename = "solution.vtk";
1411 *   std::ofstream output(filename);
1412 *   data_out.write_vtk(output);
1413 *   std::cout << "Output written to " << filename << std::endl;
1414 *   }
1415 *  
1416 *  
1417 * @endcode
1418 *
1419 *
1420 * <a name="step_3-Step3run"></a>
1421 * <h4>Step3::run</h4>
1422 *
1423
1424 *
1425 * Finally, the last function of this class is the main function which calls
1426 * all the other functions of the <code>Step3</code> class. The order in which
1427 * this is done resembles the order in which most finite element programs
1428 * work. Since the names are mostly self-explanatory, there is not much to
1429 * comment about:
1430 *
1431 * @code
1432 *   void Step3::run()
1433 *   {
1434 *   make_grid();
1435 *   setup_system();
1436 *   assemble_system();
1437 *   solve();
1438 *   output_results();
1439 *   }
1440 *  
1441 *  
1442 * @endcode
1443 *
1444 *
1445 * <a name="step_3-Thecodemaincodefunction"></a>
1446 * <h3>The <code>main</code> function</h3>
1447 *
1448
1449 *
1450 * This is the main function of the program. Since the concept of a
1451 * main function is mostly a remnant from the pre-object oriented era
1452 * before C++ programming, it often does not do much more than
1453 * creating an object of the top-level class and calling its principle
1454 * function.
1455 *
1456 * @code
1457 *   int main()
1458 *   {
1459 *   Step3 laplace_problem;
1460 *   laplace_problem.run();
1461 *  
1462 *   return 0;
1463 *   }
1464 * @endcode
1465<a name="step_3-Results"></a><h1>Results</h1>
1466
1467
1468The output of the program looks as follows:
1469@code
1470Number of active cells: 1024
1471Number of degrees of freedom: 1089
147236 CG iterations needed to obtain convergence.
1473Output written to solution.vtk
1474@endcode
1475
1476The last line is the output we generated at the bottom of the
1477`output_results()` function: The program generated the file
1478<code>solution.vtk</code>, which is in the VTK format that is widely
1479used by many visualization programs today -- including the two
1480heavy-weights <a href="https://www.llnl.gov/visit">VisIt</a> and
1481<a href="https://www.paraview.org">Paraview</a> that are the most
1482commonly used programs for this purpose today.
1483
1484Using VisIt, it is not very difficult to generate a picture of the
1485solution like this:
1486<table width="60%" align="center">
1487 <tr>
1488 <td align="center">
1489 <img src="https://www.dealii.org/images/steps/developer/step-3.solution-3.png" alt="Visualization of the solution of step-3">
1490 </td>
1491 </tr>
1492</table>
1493It shows both the solution and the mesh, elevated above the @f$x@f$-@f$y@f$ plane
1494based on the value of the solution at each point. Of course the solution
1495here is not particularly exciting, but that is a result of both what the
1496Laplace equation represents and the right hand side @f$f(\mathbf x)=1@f$ we
1497have chosen for this program: The Laplace equation describes (among many
1498other uses) the vertical deformation of a membrane subject to an external
1499(also vertical) force. In the current example, the membrane's borders
1500are clamped to a square frame with no vertical variation; a constant
1501force density will therefore intuitively lead to a membrane that
1502simply bulges upward -- like the one shown above.
1503
1504VisIt and Paraview both allow playing with various kinds of visualizations
1505of the solution. Several video lectures show how to use these programs.
1506See also <a href="https://www.math.colostate.edu/~bangerth/videos.676.11.html">video lecture 11</a>, <a href="https://www.math.colostate.edu/~bangerth/videos.676.32.html">video lecture 32</a>.
1507
1508
1509
1510<a name="step-3-extensions"></a>
1511<a name="step_3-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1512
1513
1514If you want to play around a little bit with this program, here are a few
1515suggestions:
1516</p>
1517
1518<ul>
1519 <li>
1520 Change the geometry and mesh: In the program, we have generated a square
1521 domain and mesh by using the GridGenerator::hyper_cube()
1522 function. However, the <code>GridGenerator</code> has a good number of other
1523 functions as well. Try an L-shaped domain, a ring, or other domains you find
1524 there.
1525 </li>
1526
1527 <li>
1528 Change the boundary condition: The code uses the Functions::ZeroFunction
1529 function to generate zero boundary conditions. However, you may want to try
1530 non-zero constant boundary values using
1531 <code>Functions::ConstantFunction&lt;2&gt;(1)</code> instead of
1532 <code>Functions::ZeroFunction&lt;2&gt;()</code> to have unit Dirichlet boundary
1533 values. More exotic functions are described in the documentation of the
1534 Functions namespace, and you may pick one to describe your particular boundary
1535 values.
1536 </li>
1537
1538 <li> Modify the type of boundary condition: Presently, what happens
1539 is that we use Dirichlet boundary values all around, since the
1540 default is that all boundary parts have boundary indicator zero, and
1541 then we tell the
1542 VectorTools::interpolate_boundary_values() function to
1543 interpolate boundary values to zero on all boundary components with
1544 indicator zero. <p> We can change this behavior if we assign parts
1545 of the boundary different indicators. For example, try this
1546 immediately after calling GridGenerator::hyper_cube():
1547 @code
1548 triangulation.begin_active()->face(0)->set_boundary_id(1);
1549 @endcode
1550
1551 What this does is it first asks the triangulation to
1552 return an iterator that points to the first active cell. Of course,
1553 this being the coarse mesh for the triangulation of a square, the
1554 triangulation has only a single cell at this moment, and it is
1555 active. Next, we ask the cell to return an iterator to its first
1556 face, and then we ask the face to reset the boundary indicator of
1557 that face to 1. What then follows is this: When the mesh is refined,
1558 faces of child cells inherit the boundary indicator of their
1559 parents, i.e. even on the finest mesh, the faces on one side of the
1560 square have boundary indicator 1. Later, when we get to
1561 interpolating boundary conditions, the
1562 VectorTools::interpolate_boundary_values() call will only produce boundary
1563 values for those faces that have zero boundary indicator, and leave
1564 those faces alone that have a different boundary indicator. What
1565 this then does is to impose Dirichlet boundary conditions on the
1566 former, and homogeneous Neumann conditions on the latter (i.e. zero
1567 normal derivative of the solution, unless one adds additional terms
1568 to the right hand side of the variational equality that deal with
1569 potentially non-zero Neumann conditions). You will see this if you
1570 run the program.
1571
1572 An alternative way to change the boundary indicator is to label
1573 the boundaries based on the Cartesian coordinates of the face centers.
1574 For example, we can label all of the cells along the top and
1575 bottom boundaries with a boundary indicator 1 by checking to
1576 see if the cell centers' y-coordinates are within a tolerance
1577 (here `1e-12`) of -1 and 1. Try this immediately after calling
1578 GridGenerator::hyper_cube(), as before:
1579 @code
1580 for (auto &face : triangulation.active_face_iterators())
1581 if (face->at_boundary())
1582 if (std::fabs(face->center()(1) - (-1.0)) < 1e-12 ||
1583 std::fabs(face->center()(1) - (1.0)) < 1e-12)
1584 face->set_boundary_id(1);
1585 @endcode
1586 Although this code is a bit longer than before, it is useful for
1587 complex geometries, as it does not require knowledge of face labels.
1588
1589 <li>
1590 A slight variation of the last point would be to set different boundary
1591 values as above, but then use a different boundary value function for
1592 boundary indicator one. In practice, what you have to do is to add a second
1593 call to <code>interpolate_boundary_values</code> for boundary indicator one:
1594 @code
1595 VectorTools::interpolate_boundary_values(dof_handler,
1596 types::boundary_id(1),
1597 Functions::ConstantFunction<2>(1.),
1598 boundary_values);
1599 @endcode
1600 If you have this call immediately after the first one to this function, then
1601 it will interpolate boundary values on faces with boundary indicator 1 to the
1602 unit value, and merge these interpolated values with those previously
1603 computed for boundary indicator 0. The result will be that we will get
1604 discontinuous boundary values, zero on three sides of the square, and one on
1605 the fourth.
1606
1607 <li>
1608 Use triangles: As mentioned in the results section of @ref step_1 "step-1", for
1609 historical reasons, almost all tutorial programs for deal.II are
1610 written using quadrilateral or hexahedral meshes. But deal.II also
1611 supports triangular and tetrahedral meshes. So a good experiment would
1612 be to replace the mesh used here by a triangular mesh.
1613
1614 This is *almost* trivial. First, as discussed in @ref step_1 "step-1", we may want
1615 to start with the quadrilateral mesh we are already creating, and
1616 then convert it into a triangular one. You can do that by replacing
1617 the first line of `Step3::make_grid()` by the following code:
1618 @code
1619 Triangulation<2> triangulation_quad;
1620 GridGenerator::hyper_cube(triangulation_quad, -1, 1);
1623 @endcode
1625 quadrilateral by eight triangles with half the diameter of the original
1626 quadrilateral; as a consequence, the resulting mesh is substantially
1627 finer and one might expect that the solution is consequently more
1628 accurate (but also has many more degrees of freedom). That is a question
1629 you can explore with the techniques discussed in the "Results" section
1630 of @ref step_4 "step-4", but that goes beyond what we want to demonstrate here.
1631
1632 If you run this program, you will run into an error message that
1633 will look something like this:
1634 @code
1635--------------------------------------------------------
1636An error occurred in line <2633> of file </home/bangerth/p/deal.II/1/dealii/include/deal.II/dofs/dof_accessor.templates.h> in function
1637 const ::FiniteElement<dimension_, space_dimension_>& ::DoFCellAccessor<dim, spacedim, lda>::get_fe() const [with int dimension_ = 2; int space_dimension_ = 2; bool level_dof_access = false]
1638The violated condition was:
1639 this->reference_cell() == fe.reference_cell()
1640Additional information:
1641 The reference-cell type used on this cell (Tri) does not match the
1642 reference-cell type of the finite element associated with this cell
1643 (Quad). Did you accidentally use simplex elements on hypercube meshes
1644 (or the other way around), or are you using a mixed mesh and assigned
1645 a simplex element to a hypercube cell (or the other way around) via
1646 the active_fe_index?
1647 @endcode
1648 It is worth carefully reading the error message. It doesn't just
1649 state that there is an error, but also how it may have
1650 arisen. Specifically, it asks whether we are using a finite element
1651 for simplex meshes (in 2d simplices are triangles) with a hypercube
1652 mesh (in 2d hypercubes are quadrilaterals) or the other way around?
1653
1654 Of course, this is exactly what we are doing, though this may
1655 perhaps not be clear to you. But if you look up the documentation,
1656 you will find that the FE_Q element we use in the main class can
1657 only be used on hypercube meshes; what we *want* to use instead now
1658 that we are using a simplex mesh is the FE_SimplexP class that is the
1659 equivalent to FE_Q for simplex cells. (To do this, you will also
1660 have to add `#include <deal.II/fe/fe_simplex_p.h>` at the top of the file.)
1661
1662 The last thing you need to change (which at the time of writing is
1663 unfortunately not prompted by getting an error message) is that when
1664 we integrate, we need to use a quadrature formula that is
1665 appropriate for triangles. This is done by changing QGauss by
1666 QGaussSimplex in the code.
1667
1668 With all of these steps, you then get the following solution:
1669 <img src="https://www.dealii.org/images/steps/developer/step-3.solution-triangles.png" alt="Visualization of the solution of step-3 using triangles">
1670
1671 <li>
1672 Observe convergence: We will only discuss computing errors in norms in
1673 @ref step_7 "step-7", but it is easy to check that computations converge
1674 already here. For example, we could evaluate the value of the solution in a
1675 single point and compare the value for different %numbers of global
1676 refinement (the number of global refinement steps is set in
1677 <code>LaplaceProblem::make_grid</code> above). To evaluate the
1678 solution at a point, say at @f$(\frac 13, \frac 13)@f$, we could add the
1679 following code to the <code>LaplaceProblem::output_results</code> function:
1680 @code
1681 std::cout << "Solution at (1/3,1/3): "
1682 << VectorTools::point_value(dof_handler, solution,
1683 Point<2>(1./3, 1./3))
1684 << std::endl;
1685 @endcode
1686 For 1 through 9 global refinement steps, we then get the following sequence
1687 of point values:
1688 <table align="center" class="doxtable">
1689 <tr> <th># of refinements</th> <th>@f$u_h(\frac 13,\frac13)@f$</th> </tr>
1690 <tr> <td>1</td> <td>0.166667</td> </tr>
1691 <tr> <td>2</td> <td>0.227381</td> </tr>
1692 <tr> <td>3</td> <td>0.237375</td> </tr>
1693 <tr> <td>4</td> <td>0.240435</td> </tr>
1694 <tr> <td>5</td> <td>0.241140</td> </tr>
1695 <tr> <td>6</td> <td>0.241324</td> </tr>
1696 <tr> <td>7</td> <td>0.241369</td> </tr>
1697 <tr> <td>8</td> <td>0.241380</td> </tr>
1698 <tr> <td>9</td> <td>0.241383</td> </tr>
1699 </table>
1700 By noticing that the difference between each two consecutive values reduces
1701 by about a factor of 4, we can conjecture that the "correct" value may be
1702 @f$u(\frac 13, \frac 13)\approx 0.241384@f$. In fact, if we assumed this to be
1703 the correct value, we could show that the sequence above indeed shows @f${\cal
1704 O}(h^2)@f$ convergence &mdash; theoretically, the convergence order should be
1705 @f${\cal O}(h^2 |\log h|)@f$ but the symmetry of the domain and the mesh may lead
1706 to the better convergence order observed.
1707
1708 A slight variant of this would be to repeat the test with quadratic
1709 elements. All you need to do is to set the polynomial degree of the finite
1710 element to two in the constructor
1711 <code>LaplaceProblem::LaplaceProblem</code>.
1712
1713 <li>Convergence of the mean: A different way to see that the solution
1714 actually converges (to something &mdash; we can't tell whether it's really
1715 the correct value!) is to compute the mean of the solution. To this end, add
1716 the following code to <code>LaplaceProblem::output_results</code>:
1717 @code
1718 std::cout << "Mean value: "
1719 << VectorTools::compute_mean_value (dof_handler,
1720 QGauss<2>(fe.degree + 1),
1721 solution,
1722 0)
1723 << std::endl;
1724 @endcode
1725 The documentation of the function explains what the second and fourth
1726 parameters mean, while the first and third should be obvious. Doing the same
1727 study again where we change the number of global refinement steps, we get
1728 the following result:
1729 <table align="center" class="doxtable">
1730 <tr> <th># of refinements</th> <th>@f$\int_\Omega u_h(x)\; dx@f$</th> </tr>
1731 <tr> <td>0</td> <td>0.09375000</td> </tr>
1732 <tr> <td>1</td> <td>0.12790179</td> </tr>
1733 <tr> <td>2</td> <td>0.13733440</td> </tr>
1734 <tr> <td>3</td> <td>0.13976069</td> </tr>
1735 <tr> <td>4</td> <td>0.14037251</td> </tr>
1736 <tr> <td>5</td> <td>0.14052586</td> </tr>
1737 <tr> <td>6</td> <td>0.14056422</td> </tr>
1738 <tr> <td>7</td> <td>0.14057382</td> </tr>
1739 <tr> <td>8</td> <td>0.14057622</td> </tr>
1740 </table>
1741 Again, the difference between two adjacent values goes down by about a
1742 factor of four, indicating convergence as @f${\cal O}(h^2)@f$.
1743</ul>
1744
1745
1746
1747<a name="step_3-UsingHDF5tooutputthesolutionandadditionaldata"></a><h3>Using %HDF5 to output the solution and additional data</h3>
1748
1749
1750%HDF5 is a commonly used format that can be read by many scripting
1751languages (e.g. R or Python). It is not difficult to get deal.II to
1752produce some %HDF5 files that can then be used in external scripts to
1753postprocess some of the data generated by this program. Here are some
1754ideas on what is possible.
1755
1756
1757<a name="step_3-Changingtheoutputtoh5"></a><h4> Changing the output to .h5</h4>
1758
1759
1760To fully make use of the automation we first need to introduce a private variable for the number of
1761global refinement steps <code>unsigned int n_refinement_steps </code>, which will be used for the output filename.
1762In <code>make_grid()</code> we then replace <code>triangulation.refine_global(5);</code> with
1763@code
1764n_refinement_steps = 5;
1765triangulation.refine_global(n_refinement_steps);
1766@endcode
1767The deal.II library has two different %HDF5 bindings, one in the HDF5
1768namespace (for interfacing to general-purpose data files)
1769and another one in DataOut (specifically for writing files for the
1770visualization of solutions).
1771Although the HDF5 deal.II binding supports both serial and MPI, the %HDF5 DataOut binding
1772only supports parallel output.
1773For this reason we need to initialize an MPI
1774communicator with only one processor. This is done by adding the following code.
1775@code
1776int main(int argc, char* argv[])
1777{
1778 Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1779 ...
1780}
1781@endcode
1782Next we change the `Step3::output_results()` output routine as
1783described in the DataOutBase namespace documentation:
1784@code
1785const std::string filename_h5 = "solution_" + std::to_string(n_refinement_steps) + ".h5";
1786DataOutBase::DataOutFilterFlags flags(true, true);
1787DataOutBase::DataOutFilter data_filter(flags);
1788data_out.write_filtered_data(data_filter);
1789data_out.write_hdf5_parallel(data_filter, filename_h5, MPI_COMM_WORLD);
1790@endcode
1791The resulting file can then be visualized just like the VTK file that
1792the original version of the tutorial produces; but, since %HDF5 is a
1793more general file format, it can also easily be processed in scripting
1794languages for other purposes.
1795
1796
1797<a name="step_3-Addingthepointvalueandthemeanseeextensionaboveintotheh5file"></a><h4> Adding the point value and the mean (see extension above) into the .h5 file</h4>
1798
1799
1800After outputting the solution, the file can be opened again to include
1801more datasets. This allows us to keep all the necessary information
1802of our experiment in a single result file, which can then be read and
1803processed by some postprocessing script.
1804(Have a look at HDF5::Group::write_dataset() for further
1805information on the possible output options.)
1806
1807To make this happen, we first include the necessary header into our file :
1808@code
1809#include <deal.II/base/hdf5.h>
1810@endcode
1811Adding the following lines to the end
1812of our output routine adds the information about the value of the
1813solution at a particular point, as well as the mean value of the
1814solution, to our %HDF5 file :
1815@code
1816HDF5::File data_file(filename_h5, HDF5::File::FileAccessMode::open, MPI_COMM_WORLD);
1818point_value[0] = VectorTools::point_value(dof_handler, solution,
1819 Point<2>(1./3, 1./3));
1820data_file.write_dataset("point_value", point_value);
1821Vector<double> mean_value(1);
1822mean_value[0] = VectorTools::compute_mean_value(dof_handler,
1823 QGauss<2>(fe.degree + 1),
1824 solution, 0);
1825data_file.write_dataset("mean_value",mean_value);
1826@endcode
1827
1828
1829
1830<a name="step_3-UsingRandggplot2togenerateplots"></a><h3> Using R and ggplot2 to generate plots</h3>
1831
1832@note Alternatively, one could use the python code in the next subsection.
1833
1834The data put into %HDF5 files above can then be used from scripting
1835languages for further postprocessing. In the following, let us show
1836how this can, in particular, be done with the
1837<a href="https://en.wikipedia.org/wiki/R_(programming_language)">R
1838programming language</a>, a widely used language in statistical data
1839analysis. (Similar things can also be done in Python, for example.)
1840If you are unfamiliar with R and ggplot2 you could check out the data carpentry course on R
1841<a href="https://datacarpentry.org/R-ecology-lesson/index.html">here</a>.
1842Furthermore, since most search engines struggle with searches of the form "R + topic",
1843we recommend using the specializes service <a
1844href="http://rseek.org">RSeek </a> instead.
1845
1846The most prominent difference between R and other languages is that
1847the assignment operator (`a = 5`) is typically written as
1848`a <- 5`. As the latter is considered standard we will use it in our examples as well.
1849To open the `.h5` file in R you have to install the <a href="https://bioconductor.org/packages/release/bioc/html/rhdf5.html">rhdf5</a> package, which is a part of the Bioconductor package.
1850
1851First we will include all necessary packages and have a look at how the data is structured in our file.
1852@code{.r}
1853library(rhdf5) # library for handling HDF5 files
1854library(ggplot2) # main plotting library
1855library(grDevices) # needed for output to PDF
1856library(viridis) # contains good colormaps for sequential data
1857
1858refinement <- 5
1859h5f <- H5Fopen(paste("solution_",refinement,".h5",sep=""))
1860print(h5f)
1861@endcode
1862This gives the following output
1863@code{.unparsed}
1864HDF5 FILE
1865 name /
1866filename
1867
1868 name otype dclass dim
18690 cells H5I_DATASET INTEGER x 1024
18701 mean_value H5I_DATASET FLOAT 1
18712 nodes H5I_DATASET FLOAT x 1089
18723 point_value H5I_DATASET FLOAT 1
18734 solution H5I_DATASET FLOAT x 1089
1874@endcode
1875The datasets can be accessed by <code>h5f\$name</code>. The function
1876<code>dim(h5f\$cells)</code> gives us the dimensions of the matrix
1877that is used to store our cells.
1878We can see the following three matrices, as well as the two
1879additional data points we added.
1880<ul>
1881<li> <code>cells</code>: a 4x1024 matrix that stores the (C++) vertex indices for each cell
1882<li> <code>nodes</code>: a 2x1089 matrix storing the position values (x,y) for our cell vertices
1883<li> <code>solution</code>: a 1x1089 matrix storing the values of our solution at each vertex
1884</ul>
1885Now we can use this data to generate various plots. Plotting with ggplot2 usually splits into two steps.
1886At first the data needs to be manipulated and added to a <code>data.frame</code>.
1887After that, a <code>ggplot</code> object is constructed and manipulated by adding plot elements to it.
1888
1889<code>nodes</code> and <code>cells</code> contain all the information we need to plot our grid.
1890The following code wraps all the data into one dataframe for plotting our grid:
1891@code{.r}
1892# Counting in R starts at 1 instead of 0, so we need to increment all
1893# vertex indices by one:
1894cell_ids <- h5f@f$cells+1
1895
1896# Store the x and y positions of each vertex in one big vector in a
1897# cell by cell fashion (every 4 entries belong to one cell):
1898cells_x <- h5f@f$nodes[1,][cell_ids]
1899cells_y <- h5f@f$nodes[2,][cell_ids]
1900
1901# Construct a vector that stores the matching cell by cell grouping
1902# (1,1,1,1,2,2,2,2,...):
1903groups <- rep(1:ncol(cell_ids),each=4)
1904
1905# Finally put everything into one dataframe:
1906meshdata <- data.frame(x = cells_x, y = cells_y, id = groups)
1907@endcode
1908
1909With the finished dataframe we have everything we need to plot our grid:
1910@code{.r}
1911pdf (paste("grid_",refinement,".pdf",sep=""),width = 5,height = 5) # Open new PDF file
1912plt <- ggplot(meshdata,aes(x=x,y=y,group=id)) # Construction of our plot
1913 # object, at first only data
1914
1915plt <- plt + geom_polygon(fill="white",colour="black") # Actual plotting of the grid as polygons
1916plt <- plt + ggtitle(paste("grid at refinement level #",refinement))
1917
1918print(plt) # Show the current state of the plot/add it to the pdf
1919dev.off() # Close PDF file
1920@endcode
1921
1922The contents of this file then look as follows (not very exciting, but
1923you get the idea):
1924<table width="60%" align="center">
1925 <tr>
1926 <td align="center">
1927 <img src="https://www.dealii.org/images/steps/developer/step-3.extensions.grid_5.png" alt="Grid after 5 refinement steps of step-3">
1928 </td>
1929 </tr>
1930</table>
1931
1932We can also visualize the solution itself, and this is going to look
1933more interesting.
1934To make a 2D pseudocolor plot of our solution we will use <code>geom_raster</code>.
1935This function needs a structured grid, i.e. uniform in x and y directions.
1936Luckily our data at this point is structured in the right way.
1937The following code plots a pseudocolor representation of our surface into a new PDF:
1938@code{.r}
1939pdf (paste("pseudocolor_",refinement,".pdf",sep=""),width = 5,height = 4.2) # Open new PDF file
1940colordata <- data.frame(x = h5f@f$nodes[1,],y = h5f@f$nodes[2,] , solution = h5f@f$solution[1,])
1941plt <- ggplot(colordata,aes(x=x,y=y,fill=solution))
1942plt <- plt + geom_raster(interpolate=TRUE)
1943plt <- plt + scale_fill_viridis()
1944plt <- plt + ggtitle(paste("solution at refinement level #",refinement))
1945
1946print(plt)
1947dev.off()
1948H5Fclose(h5f) # Close the HDF5 file
1949@endcode
1950This is now going to look as follows:
1951<table width="60%" align="center">
1952 <tr>
1953 <td align="center">
1954 <img src="https://www.dealii.org/images/steps/developer/step-3.extensions.pseudocolor_5.png" alt="Solution after 5 refinement steps of step-3">
1955 </td>
1956 </tr>
1957</table>
1958
1959For plotting the convergence curves we need to re-run the C++ code multiple times with different values for <code>n_refinement_steps</code>
1960starting from 1.
1961Since every file only contains a single data point we need to loop over them and concatenate the results into a single vector.
1962@code{.r}
1963n_ref <- 8 # Maximum refinement level for which results are existing
1964
1965# First we initiate all vectors with the results of the first level
1966h5f <- H5Fopen("solution_1.h5")
1967dofs <- dim(h5f@f$solution)[2]
1968mean <- h5f@f$mean_value
1969point <- h5f@f$point_value
1970H5Fclose(h5f)
1971
1972for (reflevel in 2:n_ref)
1973{
1974 h5f <- H5Fopen(paste("solution_",reflevel,".h5",sep=""))
1975 dofs <- c(dofs,dim(h5f\$solution)[2])
1976 mean <- c(mean,h5f\$mean_value)
1977 point <- c(point,h5f\$point_value)
1978 H5Fclose(h5f)
1979}
1980@endcode
1981As we are not interested in the values themselves but rather in the error compared to a "exact" solution we will
1982assume our highest refinement level to be that solution and omit it from the data.
1983@code{.r}
1984# Calculate the error w.r.t. our maximum refinement step
1985mean_error <- abs(mean[1:n_ref-1]-mean[n_ref])
1986point_error <- abs(point[1:n_ref-1]-point[n_ref])
1987
1988# Remove the highest value from our DoF data
1989dofs <- dofs[1:n_ref-1]
1990convdata <- data.frame(dofs = dofs, mean_value= mean_error, point_value = point_error)
1991@endcode
1992Now we have all the data available to generate our plots.
1993It is often useful to plot errors on a log-log scale, which is
1994accomplished in the following code:
1995@code
1996pdf (paste("convergence.pdf",sep=""),width = 5,height = 4.2)
1997plt <- ggplot(convdata,mapping=aes(x = dofs, y = mean_value))
1998plt <- plt+geom_line()
1999plt <- plt+labs(x="#DoFs",y = "mean value error")
2000plt <- plt+scale_x_log10()+scale_y_log10()
2001print(plt)
2002
2003plt <- ggplot(convdata,mapping=aes(x = dofs, y = point_value))
2004plt <- plt+geom_line()
2005plt <- plt+labs(x="#DoFs",y = "point value error")
2006plt <- plt+scale_x_log10()+scale_y_log10()
2007print(plt)
2008
2009dev.off()
2010@endcode
2011This results in the following plot that shows how the errors in the
2012mean value and the solution value at the chosen point nicely converge
2013to zero:
2014<table style="width:50%" align="center">
2015 <tr>
2016 <td><img src="https://www.dealii.org/images/steps/developer/step-3.extensions.convergence_mean.png" alt=""></td>
2017 <td><img src="https://www.dealii.org/images/steps/developer/step-3.extensions.convergence_point.png" alt=""></td>
2018 </tr>
2019</table>
2020
2021<a name="step_3-Usingpythontogenerateplots"></a><h3> Using python to generate plots</h3>
2022
2023
2024In this section we discuss the postprocessing of the data stored in %HDF5 files using the "python" programming language.
2025The necessary packages to import are
2026@code{.py}
2027import numpy as np # to work with multidimensional arrays
2028import h5py # to work with %HDF5 files
2029
2030import pandas as pd # for data frames
2031import matplotlib.pyplot as plt # plotting
2032from matplotlib.patches import Polygon
2033
2034from scipy.interpolate import griddata # interpolation function
2035from matplotlib import cm # for colormaps
2036
2037@endcode
2038The %HDF5 solution file corresponding to `refinement = 5` can be opened as
2039@code{.py}
2040refinement = 5
2041filename = "solution_%d.h5" % refinement
2042file = h5py.File(filename, "r")
2043@endcode
2044The following prints out the tables in the %HDF5 file
2045@code{.py}
2046for key, value in file.items():
2047 print(key, " : ", value)
2048@endcode
2049which prints out
2050@code{.unparsed}
2051cells : <HDF5 dataset "cells": shape (1024, 4), type "<u4">
2052mean_value : <HDF5 dataset "mean_value": shape (1,), type "<f8">
2053nodes : <HDF5 dataset "nodes": shape (1089, 2), type "<f8">
2054point_value : <HDF5 dataset "point_value": shape (1,), type "<f8">
2055solution : <HDF5 dataset "solution": shape (1089, 1), type "<f8">
2056@endcode
2057There are @f$(32+1)\times(32+1) = 1089@f$ nodes.
2058The coordinates of these nodes @f$(x,y)@f$ are stored in the table `nodes` in the %HDF5 file.
2059There are a total of @f$32\times 32 = 1024@f$ cells. The nodes which make up each cell are
2060marked in the table `cells` in the %HDF5 file.
2061
2062Next, we extract the data into multidimensional arrays
2063@code{.py}
2064nodes = np.array(file["/nodes"])
2065cells = np.array(file["/cells"])
2066solution = np.array(file["/solution"])
2067
2068x, y = nodes.T
2069@endcode
2070
2071The following stores the @f$x@f$ and @f$y@f$ coordinates of each node of each cell in one flat array.
2072@code{.py}
2073cell_x = x[cells.flatten()]
2074cell_y = y[cells.flatten()]
2075@endcode
2076The following tags the cell ids. Each four entries correspond to one cell.
2077Then we collect the coordinates and ids into a data frame
2078@code{.py}
2079n_cells = cells.shape[0]
2080cell_ids = np.repeat(np.arange(n_cells), 4)
2081meshdata = pd.DataFrame({"x": cell_x, "y": cell_y, "ids": cell_ids})
2082@endcode
2083The data frame looks
2084@code{.unparsed}
2085print(meshdata)
2086
2087 x y ids
20880 0.00000 0.00000 0
20891 0.03125 0.00000 0
20902 0.03125 0.03125 0
20913 0.00000 0.03125 0
20924 0.03125 0.00000 1
2093... ... ... ...
20944091 0.93750 1.00000 1022
20954092 0.96875 0.96875 1023
20964093 1.00000 0.96875 1023
20974094 1.00000 1.00000 1023
20984095 0.96875 1.00000 1023
2099
21004096 rows × 3 columns
2101@endcode
2102
2103To plot the mesh, we loop over all cells and connect the first and last node to complete the polygon
2104@code{.py}
2105fig, ax = plt.subplots()
2106ax.set_aspect("equal", "box")
2107ax.set_title("grid at refinement level #%d" % refinement)
2108
2109for cell_id, cell in meshdata.groupby(["ids"]):
2110 cell = pd.concat([cell, cell.head(1)])
2111 plt.plot(cell["x"], cell["y"], c="k")
2112@endcode
2113Alternatively one could use the matplotlib built-in Polygon class
2114@code{.py}
2115fig, ax = plt.subplots()
2116ax.set_aspect("equal", "box")
2117ax.set_title("grid at refinement level #%d" % refinement)
2118for cell_id, cell in meshdata.groupby(["ids"]):
2119 patch = Polygon(cell[["x", "y"]], facecolor="w", edgecolor="k")
2120 ax.add_patch(patch)
2121@endcode
2122
2123To plot the solution, we first create a finer grid and then interpolate the solution values
2124into the grid and then plot it.
2125@code{.py}
2126nx = int(np.sqrt(n_cells)) + 1
2127nx *= 10
2128xg = np.linspace(x.min(), x.max(), nx)
2129yg = np.linspace(y.min(), y.max(), nx)
2130
2131xgrid, ygrid = np.meshgrid(xg, yg)
2132solution_grid = griddata((x, y), solution.flatten(), (xgrid, ygrid), method="linear")
2133
2134fig = plt.figure()
2135ax = fig.add_subplot(1, 1, 1)
2136ax.set_title("solution at refinement level #%d" % refinement)
2137c = ax.pcolor(xgrid, ygrid, solution_grid, cmap=cm.viridis)
2138fig.colorbar(c, ax=ax)
2139
2140plt.show()
2141@endcode
2142
2143To check the convergence of `mean_value` and `point_value`
2144we loop over data of all refinements and store into vectors <code>mean_values</code> and <code>mean_values</code>
2145@code{.py}
2146mean_values = np.zeros((8,))
2147point_values = np.zeros((8,))
2148dofs = np.zeros((8,))
2149
2150for refinement in range(1, 9):
2151 filename = "solution_%d.h5" % refinement
2152 file = h5py.File(filename, "r")
2153 mean_values[refinement - 1] = np.array(file["/mean_value"])[0]
2154 point_values[refinement - 1] = np.array(file["/point_value"])[0]
2155 dofs[refinement - 1] = np.array(file["/solution"]).shape[0]
2156@endcode
2157
2158Following is the plot of <code>mean_values</code> on `log-log` scale
2159@code{.py}
2160mean_error = np.abs(mean_values[1:] - mean_values[:-1])
2161plt.loglog(dofs[:-1], mean_error)
2162plt.grid()
2163plt.xlabel("#DoFs")
2164plt.ylabel("mean value error")
2165plt.show()
2166@endcode
2167
2168Following is the plot of <code>point_values</code> on `log-log` scale
2169@code{.py}
2170point_error = np.abs(point_values[1:] - point_values[:-1])
2171plt.loglog(dofs[:-1], point_error)
2172plt.grid()
2173plt.xlabel("#DoFs")
2174plt.ylabel("point value error")
2175plt.show()
2176@endcode
2177
2178A python package which mimics the `R` ggplot2 (which is based on specifying the grammar of the graphics) is
2179<a href="https://plotnine.org/">plotnine</a>.
2180@code{.py}
2181We need to import the following from the <code>plotnine</code> package
2182from plotnine import (
2183 ggplot,
2184 aes,
2185 geom_raster,
2186 geom_polygon,
2187 geom_line,
2188 labs,
2189 scale_x_log10,
2190 scale_y_log10,
2191 ggtitle,
2192)
2193@endcode
2194Then plot the mesh <code>meshdata</code> dataframe
2195@code{.py}
2196plot = (
2197 ggplot(meshdata, aes(x="x", y="y", group="ids"))
2198 + geom_polygon(fill="white", colour="black")
2199 + ggtitle("grid at refinement level #%d" % refinement)
2200)
2201
2202print(plot)
2203@endcode
2204Collect the solution into a dataframe
2205@code{.py}
2206colordata = pd.DataFrame({"x": x, "y": y, "solution": solution.flatten()})
2207@endcode
2208Plot of the solution
2209@code{.py}
2210plot = (
2211 ggplot(colordata, aes(x="x", y="y", fill="solution"))
2212 + geom_raster(interpolate=True)
2213 + ggtitle("solution at refinement level #%d" % refinement)
2214)
2215
2216print(plot)
2217@endcode
2218
2219Collect the convergence data into a dataframe
2220@code{.py}
2221convdata = pd.DataFrame(
2222 {"dofs": dofs[:-1], "mean_value": mean_error, "point_value": point_error}
2223)
2224
2225@endcode
2226Following is the plot of <code>mean_values</code> on `log-log` scale
2227@code{.py}
2228plot = (
2229 ggplot(convdata, mapping=aes(x="dofs", y="mean_value"))
2230 + geom_line()
2231 + labs(x="#DoFs", y="mean value error")
2232 + scale_x_log10()
2233 + scale_y_log10()
2234)
2235
2236plot.save("mean_error.pdf", dpi=60)
2237print(plot)
2238@endcode
2239
2240Following is the plot of <code>point_values</code> on `log-log` scale
2241@code{.py}
2242plot = (
2243 ggplot(convdata, mapping=aes(x="dofs", y="point_value"))
2244 + geom_line()
2245 + labs(x="#DoFs", y="point value error")
2246 + scale_x_log10()
2247 + scale_y_log10()
2248)
2249
2250plot.save("point_error.pdf", dpi=60)
2251print(plot)
2252@endcode
2253 *
2254 *
2255<a name="step_3-PlainProg"></a>
2256<h1> The plain program</h1>
2257@include "step-3.cc"
2258*/
void distribute_dofs(const FiniteElement< dim, spacedim > &fe)
const SmartPointer< const Mapping< dim, spacedim >, FEValuesBase< dim, spacedim > > mapping
Definition fe_q.h:554
void write_dataset(const std::string &name, const Container &data) const
Definition hdf5.h:2244
Definition point.h:111
Point< 3 > center
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
const Event initial
Definition event.cc:64
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void convert_hypercube_to_simplex_mesh(const Triangulation< dim, spacedim > &in_tria, Triangulation< dim, spacedim > &out_tria)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void simplex(Triangulation< dim, dim > &tria, const std::vector< Point< dim > > &vertices)
void reference_cell(Triangulation< dim, spacedim > &tria, const ReferenceCell &reference_cell)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition hdf5.h:345
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
Number compute_mean_value(const hp::MappingCollection< dim, spacedim > &mapping_collection, const DoFHandler< dim, spacedim > &dof, const hp::QCollection< dim > &q_collection, const ReadVector< Number > &v, const unsigned int component)
void point_value(const DoFHandler< dim, spacedim > &dof, const VectorType &fe_function, const Point< spacedim, double > &point, Vector< typename VectorType::value_type > &value)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14882
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
STL namespace.
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation