Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-3.h
Go to the documentation of this file.
1
764 1)
765 *   , dof_handler(triangulation)
766 *   {}
767 *  
768 *  
769 * @endcode
770 *
771 *
772 * <a name="step_3-Step3make_grid"></a>
773 * <h4>Step3::make_grid</h4>
774 *
775
776 *
777 * Now, the first thing we've got to do is to generate the triangulation on
778 * which we would like to do our computation and number each vertex with a
779 * degree of freedom. We have seen these two steps in @ref step_1 "step-1" and @ref step_2 "step-2"
780 * before, respectively.
781 *
782
783 *
784 * This function does the first part, creating the mesh. We create the grid
785 * and refine all cells five times. Since the initial grid (which is the
786 * square @f$[-1,1] \times [-1,1]@f$) consists of only one cell, the final grid
787 * has 32 times 32 cells, for a total of 1024.
788 *
789
790 *
791 * Unsure that 1024 is the correct number? We can check that by outputting the
792 * number of cells using the <code>n_active_cells()</code> function on the
793 * triangulation.
794 *
795 * @code
796 *   void Step3::make_grid()
797 *   {
798 *   GridGenerator::hyper_cube(triangulation, -1, 1);
799 *   triangulation.refine_global(5);
800 *  
801 *   std::cout << "Number of active cells: " << triangulation.n_active_cells()
802 *   << std::endl;
803 *   }
804 *  
805 * @endcode
806 *
807 * @note We call the Triangulation::n_active_cells() function, rather than
808 * Triangulation::n_cells(). Here, <i>active</i> means the cells that aren't
809 * refined any further. We stress the adjective "active" since there are more
810 * cells, namely the parent cells of the finest cells, their parents, etc, up
811 * to the one cell which made up the initial grid. Of course, on the next
812 * coarser level, the number of cells is one quarter that of the cells on the
813 * finest level, i.e. 256, then 64, 16, 4, and 1. If you called
814 * <code>triangulation.n_cells()</code> instead in the code above, you would
815 * consequently get a value of 1365 instead. On the other hand, the number of
816 * cells (as opposed to the number of active cells) is not typically of much
817 * interest, so there is no good reason to print it.
818 *
819
820 *
821 *
822
823 *
824 *
825 * <a name="step_3-Step3setup_system"></a>
826 * <h4>Step3::setup_system</h4>
827 *
828
829 *
830 * Next we enumerate all the degrees of freedom and set up matrix and vector
831 * objects to hold the system data. Enumerating is done by using
832 * DoFHandler::distribute_dofs(), as we have seen in the @ref step_2 "step-2" example. Since
833 * we use the FE_Q class and have set the polynomial degree to 1 in the
834 * constructor, i.e. bilinear elements, this associates one degree of freedom
835 * with each vertex. While we're at generating output, let us also take a look
836 * at how many degrees of freedom are generated:
837 *
838 * @code
839 *   void Step3::setup_system()
840 *   {
841 *   dof_handler.distribute_dofs(fe);
842 *   std::cout << "Number of degrees of freedom: " << dof_handler.n_dofs()
843 *   << std::endl;
844 * @endcode
845 *
846 * There should be one DoF for each vertex. Since we have a 32 times 32
847 * grid, the number of DoFs should be 33 times 33, or 1089.
848 *
849
850 *
851 * As we have seen in the previous example, we set up a sparsity pattern by
852 * first creating a temporary structure, tagging those entries that might be
853 * nonzero, and then copying the data over to the SparsityPattern object
854 * that can then be used by the system matrix.
855 *
856 * @code
857 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
858 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
859 *   sparsity_pattern.copy_from(dsp);
860 *  
861 * @endcode
862 *
863 * Note that the SparsityPattern object does not hold the values of the
864 * matrix, it only stores the places where entries are. The entries
865 * themselves are stored in objects of type SparseMatrix, of which our
866 * variable system_matrix is one.
867 *
868
869 *
870 * The distinction between sparsity pattern and matrix was made to allow
871 * several matrices to use the same sparsity pattern. This may not seem
872 * relevant here, but when you consider the size which matrices can have,
873 * and that it may take some time to build the sparsity pattern, this
874 * becomes important in large-scale problems if you have to store several
875 * matrices in your program.
876 *
877 * @code
878 *   system_matrix.reinit(sparsity_pattern);
879 *  
880 * @endcode
881 *
882 * The last thing to do in this function is to set the sizes of the right
883 * hand side vector and the solution vector to the right values:
884 *
885 * @code
886 *   solution.reinit(dof_handler.n_dofs());
887 *   system_rhs.reinit(dof_handler.n_dofs());
888 *   }
889 *  
890 * @endcode
891 *
892 *
893 * <a name="step_3-Step3assemble_system"></a>
894 * <h4>Step3::assemble_system</h4>
895 *
896
897 *
898 *
899
900 *
901 * The next step is to compute the entries of the matrix and right hand side
902 * that form the linear system from which we compute the solution. This is the
903 * central function of each finite element program and we have discussed the
904 * primary steps in the introduction already.
905 *
906
907 *
908 * The general approach to assemble matrices and vectors is to loop over all
909 * cells, and on each cell compute the contribution of that cell to the global
910 * matrix and right hand side by quadrature. The point to realize now is that
911 * we need the values of the shape functions at the locations of quadrature
912 * points on the real cell. However, both the finite element shape functions
913 * as well as the quadrature points are only defined on the reference
914 * cell. They are therefore of little help to us, and we will in fact hardly
915 * ever query information about finite element shape functions or quadrature
916 * points from these objects directly.
917 *
918
919 *
920 * Rather, what is required is a way to map this data from the reference cell
921 * to the real cell. Classes that can do that are derived from the Mapping
922 * class, though one again often does not have to deal with them directly:
923 * many functions in the library can take a mapping object as argument, but
924 * when it is omitted they simply resort to the standard bilinear Q1
925 * mapping. We will go this route, and not bother with it for the moment (we
926 * come back to this in @ref step_10 "step-10", @ref step_11 "step-11", and @ref step_12 "step-12").
927 *
928
929 *
930 * So what we now have is a collection of three classes to deal with: finite
931 * element, quadrature, and mapping objects. That's too much, so there is one
932 * type of class that orchestrates information exchange between these three:
933 * the FEValues class. If given one instance of each three of these objects
934 * (or two, and an implicit linear mapping), it will be able to provide you
935 * with information about values and gradients of shape functions at
936 * quadrature points on a real cell.
937 *
938
939 *
940 * Using all this, we will assemble the linear system for this problem in the
941 * following function:
942 *
943 * @code
944 *   void Step3::assemble_system()
945 *   {
946 * @endcode
947 *
948 * Ok, let's start: we need a quadrature formula for the evaluation of the
949 * integrals on each cell. Let's take a Gauss formula with two quadrature
950 * points in each direction, i.e. a total of four points since we are in
951 * 2d. This quadrature formula integrates polynomials of degrees up to three
952 * exactly (in 1d). It is easy to check that this is sufficient for the
953 * present problem:
954 *
955 * @code
956 *   const QGauss<2> quadrature_formula(fe.degree + 1);
957 * @endcode
958 *
959 * And we initialize the object which we have briefly talked about above. It
960 * needs to be told which finite element we want to use, and the quadrature
961 * points and their weights (jointly described by a Quadrature object). As
962 * mentioned, we use the implied Q1 mapping, rather than specifying one
963 * ourselves explicitly. Finally, we have to tell it what we want it to
964 * compute on each cell: we need the values of the shape functions at the
965 * quadrature points (for the right hand side @f$(\varphi_i,f)@f$), their
966 * gradients (for the matrix entries @f$(\nabla \varphi_i, \nabla
967 * \varphi_j)@f$), and also the weights of the quadrature points and the
968 * determinants of the Jacobian transformations from the reference cell to
969 * the real cells.
970 *
971
972 *
973 * This list of what kind of information we actually need is given as a
974 * collection of flags as the third argument to the constructor of
975 * FEValues. Since these values have to be recomputed, or updated, every
976 * time we go to a new cell, all of these flags start with the prefix
977 * <code>update_</code> and then indicate what it actually is that we want
978 * updated. The flag to give if we want the values of the shape functions
979 * computed is #update_values; for the gradients it is
980 * #update_gradients. The determinants of the Jacobians and the quadrature
981 * weights are always used together, so only the products (Jacobians times
982 * weights, or short <code>JxW</code>) are computed; since we need them, we
983 * have to list #update_JxW_values as well:
984 *
985 * @code
986 *   FEValues<2> fe_values(fe,
987 *   quadrature_formula,
989 * @endcode
990 *
991 * The advantage of this approach is that we can specify what kind of
992 * information we actually need on each cell. It is easily understandable
993 * that this approach can significantly speed up finite element computations,
994 * compared to approaches where everything, including second derivatives,
995 * normal vectors to cells, etc are computed on each cell, regardless of
996 * whether they are needed or not.
997 *
998
999 *
1000 * @note The syntax <code>update_values | update_gradients |
1001 * update_JxW_values</code> is not immediately obvious to anyone not
1002 * used to programming bit operations in C for years already. First,
1003 * <code>operator|</code> is the <i>bitwise or operator</i>, i.e.,
1004 * it takes two integer arguments that are interpreted as bit
1005 * patterns and returns an integer in which every bit is set for
1006 * which the corresponding bit is set in at least one of the two
1007 * arguments. For example, consider the operation
1008 * <code>9|10</code>. In binary, <code>9=0b1001</code> (where the
1009 * prefix <code>0b</code> indicates that the number is to be
1010 * interpreted as a binary number) and <code>10=0b1010</code>. Going
1011 * through each bit and seeing whether it is set in one of the
1012 * argument, we arrive at <code>0b1001|0b1010=0b1011</code> or, in
1013 * decimal notation, <code>9|10=11</code>. The second piece of
1014 * information you need to know is that the various
1015 * <code>update_*</code> flags are all integers that have <i>exactly
1016 * one bit set</i>. For example, assume that
1017 * <code>update_values=0b00001=1</code>,
1018 * <code>update_gradients=0b00010=2</code>,
1019 * <code>update_JxW_values=0b10000=16</code>. Then
1021 * 0b10011 = 19</code>. In other words, we obtain a number that
1022 * <i>encodes a binary mask representing all of the operations you
1023 * want to happen</i>, where each operation corresponds to exactly
1024 * one bit in the integer that, if equal to one, means that a
1025 * particular piece should be updated on each cell and, if it is
1026 * zero, means that we need not compute it. In other words, even
1027 * though <code>operator|</code> is the <i>bitwise OR operation</i>,
1028 * what it really represents is <i>I want this AND that AND the
1029 * other</i>. Such binary masks are quite common in C programming,
1030 * but maybe not so in higher level languages like C++, but serve
1031 * the current purpose quite well.
1032 *
1033
1034 *
1035 * For use further down below, we define a shortcut for a value that will
1036 * be used very frequently. Namely, an abbreviation for the number of degrees
1037 * of freedom on each cell (since we are in 2d and degrees of freedom are
1038 * associated with vertices only, this number is four, but we rather want to
1039 * write the definition of this variable in a way that does not preclude us
1040 * from later choosing a different finite element that has a different
1041 * number of degrees of freedom per cell, or work in a different space
1042 * dimension).
1043 *
1044
1045 *
1046 * In general, it is a good idea to use a symbolic name instead of
1047 * hard-coding these numbers even if you know them, since for example,
1048 * you may want to change the finite element at some time. Changing the
1049 * element would have to be done in a different function and it is easy
1050 * to forget to make a corresponding change in another part of the program.
1051 * It is better to not rely on your own calculations, but instead ask
1052 * the right object for the information: Here, we ask the finite element
1053 * to tell us about the number of degrees of freedom per cell and we
1054 * will get the correct number regardless of the space dimension or
1055 * polynomial degree we may have chosen elsewhere in the program.
1056 *
1057
1058 *
1059 * The shortcut here, defined primarily to discuss the basic concept
1060 * and not because it saves a lot of typing, will then make the following
1061 * loops a bit more readable. You will see such shortcuts in many places in
1062 * larger programs, and `dofs_per_cell` is one that is more or less the
1063 * conventional name for this kind of object.
1064 *
1065 * @code
1066 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1067 *  
1068 * @endcode
1069 *
1070 * Now, we said that we wanted to assemble the global matrix and vector
1071 * cell-by-cell. We could write the results directly into the global matrix,
1072 * but this is not very efficient since access to the elements of a sparse
1073 * matrix is slow. Rather, we first compute the contribution of each cell in
1074 * a small matrix with the degrees of freedom on the present cell, and only
1075 * transfer them to the global matrix when the computations are finished for
1076 * this cell. We do the same for the right hand side vector. So let's first
1077 * allocate these objects (these being local objects, all degrees of freedom
1078 * are coupling with all others, and we should use a full matrix object
1079 * rather than a sparse one for the local operations; everything will be
1080 * transferred to a global sparse matrix later on):
1081 *
1082 * @code
1083 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1084 *   Vector<double> cell_rhs(dofs_per_cell);
1085 *  
1086 * @endcode
1087 *
1088 * When assembling the contributions of each cell, we do this with the local
1089 * numbering of the degrees of freedom (i.e. the number running from zero
1090 * through dofs_per_cell-1). However, when we transfer the result into the
1091 * global matrix, we have to know the global numbers of the degrees of
1092 * freedom. When we query them, we need a scratch (temporary) array for
1093 * these numbers (see the discussion at the end of the introduction for
1094 * the type, types::global_dof_index, used here):
1095 *
1096 * @code
1097 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1098 *  
1099 * @endcode
1100 *
1101 * Now for the loop over all cells. We have seen before how this works for a
1102 * triangulation. A DoFHandler has cell iterators that are exactly analogous
1103 * to those of a Triangulation, but with extra information about the degrees
1104 * of freedom for the finite element you're using. Looping over the active
1105 * cells of a degree-of-freedom handler works the same as for a triangulation.
1106 *
1107
1108 *
1109 * Note that we declare the type of the cell as `const auto &` instead of
1110 * `auto` this time around. In step 1, we were modifying the cells of the
1111 * triangulation by flagging them with refinement indicators. Here we're only
1112 * examining the cells without modifying them, so it's good practice to
1113 * declare `cell` as `const` in order to enforce this invariant.
1114 *
1115 * @code
1116 *   for (const auto &cell : dof_handler.active_cell_iterators())
1117 *   {
1118 * @endcode
1119 *
1120 * We are now sitting on one cell, and we would like the values and
1121 * gradients of the shape functions be computed, as well as the
1122 * determinants of the Jacobian matrices of the mapping between
1123 * reference cell and true cell, at the quadrature points. Since all
1124 * these values depend on the geometry of the cell, we have to have the
1125 * FEValues object re-compute them on each cell:
1126 *
1127 * @code
1128 *   fe_values.reinit(cell);
1129 *  
1130 * @endcode
1131 *
1132 * Next, reset the local cell's contributions to global matrix and
1133 * global right hand side to zero, before we fill them:
1134 *
1135 * @code
1136 *   cell_matrix = 0;
1137 *   cell_rhs = 0;
1138 *  
1139 * @endcode
1140 *
1141 * Now it is time to start integration over the cell, which we
1142 * do by looping over all quadrature points, which we will
1143 * number by q_index.
1144 *
1145 * @code
1146 *   for (const unsigned int q_index : fe_values.quadrature_point_indices())
1147 *   {
1148 * @endcode
1149 *
1150 * First assemble the matrix: For the Laplace problem, the
1151 * matrix on each cell is the integral over the gradients of
1152 * shape function i and j. Since we do not integrate, but
1153 * rather use quadrature, this is the sum over all
1154 * quadrature points of the integrands times the determinant
1155 * of the Jacobian matrix at the quadrature point times the
1156 * weight of this quadrature point. You can get the gradient
1157 * of shape function @f$i@f$ at quadrature point with number q_index by
1158 * using <code>fe_values.shape_grad(i,q_index)</code>; this
1159 * gradient is a 2-dimensional vector (in fact it is of type
1160 * Tensor@<1,dim@>, with here dim=2) and the product of two
1161 * such vectors is the scalar product, i.e. the product of
1162 * the two shape_grad function calls is the dot
1163 * product. This is in turn multiplied by the Jacobian
1164 * determinant and the quadrature point weight (that one
1165 * gets together by the call to FEValues::JxW() ). Finally,
1166 * this is repeated for all shape functions @f$i@f$ and @f$j@f$:
1167 *
1168 * @code
1169 *   for (const unsigned int i : fe_values.dof_indices())
1170 *   for (const unsigned int j : fe_values.dof_indices())
1171 *   cell_matrix(i, j) +=
1172 *   (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
1173 *   fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
1174 *   fe_values.JxW(q_index)); // dx
1175 *  
1176 * @endcode
1177 *
1178 * We then do the same thing for the right hand side. Here,
1179 * the integral is over the shape function i times the right
1180 * hand side function, which we choose to be the function
1181 * with constant value one (more interesting examples will
1182 * be considered in the following programs).
1183 *
1184 * @code
1185 *   for (const unsigned int i : fe_values.dof_indices())
1186 *   cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
1187 *   1. * // f(x_q)
1188 *   fe_values.JxW(q_index)); // dx
1189 *   }
1190 * @endcode
1191 *
1192 * Now that we have the contribution of this cell, we have to transfer
1193 * it to the global matrix and right hand side. To this end, we first
1194 * have to find out which global numbers the degrees of freedom on this
1195 * cell have. Let's simply ask the cell for that information:
1196 *
1197 * @code
1198 *   cell->get_dof_indices(local_dof_indices);
1199 *  
1200 * @endcode
1201 *
1202 * Then again loop over all shape functions i and j and transfer the
1203 * local elements to the global matrix. The global numbers can be
1204 * obtained using local_dof_indices[i]:
1205 *
1206 * @code
1207 *   for (const unsigned int i : fe_values.dof_indices())
1208 *   for (const unsigned int j : fe_values.dof_indices())
1209 *   system_matrix.add(local_dof_indices[i],
1210 *   local_dof_indices[j],
1211 *   cell_matrix(i, j));
1212 *  
1213 * @endcode
1214 *
1215 * And again, we do the same thing for the right hand side vector.
1216 *
1217 * @code
1218 *   for (const unsigned int i : fe_values.dof_indices())
1219 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
1220 *   }
1221 *  
1222 *  
1223 * @endcode
1224 *
1225 * Now almost everything is set up for the solution of the discrete
1226 * system. However, we have not yet taken care of boundary values (in fact,
1227 * Laplace's equation without Dirichlet boundary values is not even uniquely
1228 * solvable, since you can add an arbitrary constant to the discrete
1229 * solution). We therefore have to do something about the situation.
1230 *
1231
1232 *
1233 * For this, we first obtain a list of the degrees of freedom on the
1234 * boundary and the value the shape function shall have there. For
1235 * simplicity, we only interpolate the boundary value function, rather than
1236 * projecting it onto the boundary. There is a function in the library which
1237 * does exactly this: VectorTools::interpolate_boundary_values(). Its
1238 * parameters are (omitting parameters for which default values exist and
1239 * that we don't care about): the DoFHandler object to get the global
1240 * numbers of the degrees of freedom on the boundary; the component of the
1241 * boundary where the boundary values shall be interpolated; the boundary
1242 * value function itself; and the output object.
1243 *
1244
1245 *
1246 * The component of the boundary is meant as follows: in many cases, you may
1247 * want to impose certain boundary values only on parts of the boundary. For
1248 * example, you may have inflow and outflow boundaries in fluid dynamics, or
1249 * clamped and free parts of bodies in deformation computations of
1250 * bodies. Then you will want to denote these different parts of the
1251 * boundary by indicators, and tell the interpolate_boundary_values
1252 * function to only compute the boundary values on a certain part of the
1253 * boundary (e.g. the clamped part, or the inflow boundary). By default,
1254 * all boundaries have a 0 boundary indicator, unless otherwise specified.
1255 * (For example, many functions in namespace GridGenerator specify otherwise.)
1256 * If sections of the boundary have different boundary conditions, you have to
1257 * number those parts with different boundary indicators. The function call
1258 * below will then only determine boundary values for those parts of the
1259 * boundary for which the boundary indicator is in fact the zero specified as
1260 * the second argument.
1261 *
1262
1263 *
1264 * The function describing the boundary values is an object of type Function
1265 * or of a derived class. One of the derived classes is
1266 * Functions::ZeroFunction, which describes (not unexpectedly) a function
1267 * which is zero everywhere. We create such an object in-place and pass it to
1269 *
1270
1271 *
1272 * Finally, the output object is a list of pairs of global degree of freedom
1273 * numbers (i.e. the number of the degrees of freedom on the boundary) and
1274 * their boundary values (which are zero here for all entries). This mapping
1275 * of DoF numbers to boundary values is done by the <code>std::map</code>
1276 * class.
1277 *
1278 * @code
1279 *   std::map<types::global_dof_index, double> boundary_values;
1280 *   VectorTools::interpolate_boundary_values(dof_handler,
1281 *   types::boundary_id(0),
1282 *   Functions::ZeroFunction<2>(),
1283 *   boundary_values);
1284 * @endcode
1285 *
1286 * Now that we got the list of boundary DoFs and their respective boundary
1287 * values, let's use them to modify the system of equations
1288 * accordingly. This is done by the following function call:
1289 *
1290 * @code
1291 *   MatrixTools::apply_boundary_values(boundary_values,
1292 *   system_matrix,
1293 *   solution,
1294 *   system_rhs);
1295 *   }
1296 *  
1297 *  
1298 * @endcode
1299 *
1300 *
1301 * <a name="step_3-Step3solve"></a>
1302 * <h4>Step3::solve</h4>
1303 *
1304
1305 *
1306 * The following function solves the discretized equation. As discussed in
1307 * the introduction, we want to use an iterative solver to do this,
1308 * specifically the Conjugate Gradient (CG) method.
1309 *
1310
1311 *
1312 * The way to do this in deal.II is a three-step process:
1313 * - First, we need to have an object that knows how to tell the CG algorithm
1314 * when to stop. This is done by using a SolverControl object, and as
1315 * stopping criterion we say: stop after a maximum of 1000 iterations (which
1316 * is far more than is needed for 1089 variables; see the results section to
1317 * find out how many were really used), and stop if the norm of the residual
1318 * is below @f$\tau=10^{-6}\|\mathbf b\|@f$ where @f$\mathbf b@f$ is the right hand
1319 * side vector. In practice, this latter criterion will be the one
1320 * which stops the iteration.
1321 * - Then we need the solver itself. The template parameter to the SolverCG
1322 * class is the type of the vectors we are using.
1323 * - The last step is to actually solve the system of equations. The CG solver
1324 * takes as arguments the components of the linear system @f$Ax=b@f$ (in the
1325 * order in which they appear in this equation), and a preconditioner
1326 * as the fourth argument. We don't feel ready to delve into preconditioners
1327 * yet, so we tell it to use the identity operation as preconditioner. Later
1328 * tutorial programs will spend significant amount of time and space on
1329 * constructing better preconditioners.
1330 *
1331
1332 *
1333 * At the end of this process, the `solution` variable contains the
1334 * nodal values of the solution function. At the end of the function, we
1335 * output how many Conjugate Gradients iterations it took to solve the
1336 * linear system.
1337 *
1338 * @code
1339 *   void Step3::solve()
1340 *   {
1341 *   SolverControl solver_control(1000, 1e-6 * system_rhs.l2_norm());
1342 *   SolverCG<Vector<double>> solver(solver_control);
1343 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
1344 *  
1345 *   std::cout << solver_control.last_step()
1346 *   << " CG iterations needed to obtain convergence." << std::endl;
1347 *   }
1348 *  
1349 *  
1350 * @endcode
1351 *
1352 *
1353 * <a name="step_3-Step3output_results"></a>
1354 * <h4>Step3::output_results</h4>
1355 *
1356
1357 *
1358 * The last part of a typical finite element program is to output the results
1359 * and maybe do some postprocessing (for example compute the maximal stress
1360 * values at the boundary, or the average flux across the outflow, etc). We
1361 * have no such postprocessing here, but we would like to write the solution
1362 * to a file.
1363 *
1364 * @code
1365 *   void Step3::output_results() const
1366 *   {
1367 * @endcode
1368 *
1369 * To write the output to a file, we need an object which knows about output
1370 * formats and the like. This is the DataOut class, and we need an object of
1371 * that type:
1372 *
1373 * @code
1374 *   DataOut<2> data_out;
1375 * @endcode
1376 *
1377 * Now we have to tell it where to take the values from which it shall
1378 * write. We tell it which DoFHandler object to use, and the solution vector
1379 * (and the name by which the solution variable shall appear in the output
1380 * file). If we had more than one vector which we would like to look at in
1381 * the output (for example right hand sides, errors per cell, etc) we would
1382 * add them as well:
1383 *
1384 * @code
1385 *   data_out.attach_dof_handler(dof_handler);
1386 *   data_out.add_data_vector(solution, "solution");
1387 * @endcode
1388 *
1389 * After the DataOut object knows which data it is to work on, we have to
1390 * tell it to process them into something the back ends can handle. The
1391 * reason is that we have separated the frontend (which knows about how to
1392 * treat DoFHandler objects and data vectors) from the back end (which knows
1393 * many different output formats) and use an intermediate data format to
1394 * transfer data from the front- to the backend. The data is transformed
1395 * into this intermediate format by the following function:
1396 *
1397 * @code
1398 *   data_out.build_patches();
1399 *  
1400 * @endcode
1401 *
1402 * Now we have everything in place for the actual output. Just open a file
1403 * and write the data into it, using VTK format (there are many other
1404 * functions in the DataOut class we are using here that can write the
1405 * data in postscript, AVS, GMV, Gnuplot, or some other file
1406 * formats):
1407 *
1408 * @code
1409 *   const std::string filename = "solution.vtk";
1410 *   std::ofstream output(filename);
1411 *   data_out.write_vtk(output);
1412 *   std::cout << "Output written to " << filename << std::endl;
1413 *   }
1414 *  
1415 *  
1416 * @endcode
1417 *
1418 *
1419 * <a name="step_3-Step3run"></a>
1420 * <h4>Step3::run</h4>
1421 *
1422
1423 *
1424 * Finally, the last function of this class is the main function which calls
1425 * all the other functions of the <code>Step3</code> class. The order in which
1426 * this is done resembles the order in which most finite element programs
1427 * work. Since the names are mostly self-explanatory, there is not much to
1428 * comment about:
1429 *
1430 * @code
1431 *   void Step3::run()
1432 *   {
1433 *   make_grid();
1434 *   setup_system();
1435 *   assemble_system();
1436 *   solve();
1437 *   output_results();
1438 *   }
1439 *  
1440 *  
1441 * @endcode
1442 *
1443 *
1444 * <a name="step_3-Thecodemaincodefunction"></a>
1445 * <h3>The <code>main</code> function</h3>
1446 *
1447
1448 *
1449 * This is the main function of the program. Since the concept of a
1450 * main function is mostly a remnant from the pre-object oriented era
1451 * before C++ programming, it often does not do much more than
1452 * creating an object of the top-level class and calling its principle
1453 * function.
1454 *
1455 * @code
1456 *   int main()
1457 *   {
1458 *   Step3 laplace_problem;
1459 *   laplace_problem.run();
1460 *  
1461 *   return 0;
1462 *   }
1463 * @endcode
1464<a name="step_3-Results"></a><h1>Results</h1>
1465
1466
1467The output of the program looks as follows:
1468@code
1469Number of active cells: 1024
1470Number of degrees of freedom: 1089
147136 CG iterations needed to obtain convergence.
1472Output written to solution.vtk
1473@endcode
1474
1475The last line is the output we generated at the bottom of the
1476`output_results()` function: The program generated the file
1477<code>solution.vtk</code>, which is in the VTK format that is widely
1478used by many visualization programs today -- including the two
1479heavy-weights <a href="https://www.llnl.gov/visit">VisIt</a> and
1480<a href="https://www.paraview.org">Paraview</a> that are the most
1481commonly used programs for this purpose today.
1482
1483Using VisIt, it is not very difficult to generate a picture of the
1484solution like this:
1485<table width="60%" align="center">
1486 <tr>
1487 <td align="center">
1488 <img src="https://www.dealii.org/images/steps/developer/step-3.solution-3.png" alt="Visualization of the solution of step-3">
1489 </td>
1490 </tr>
1491</table>
1492It shows both the solution and the mesh, elevated above the @f$x@f$-@f$y@f$ plane
1493based on the value of the solution at each point. Of course the solution
1494here is not particularly exciting, but that is a result of both what the
1495Laplace equation represents and the right hand side @f$f(\mathbf x)=1@f$ we
1496have chosen for this program: The Laplace equation describes (among many
1497other uses) the vertical deformation of a membrane subject to an external
1498(also vertical) force. In the current example, the membrane's borders
1499are clamped to a square frame with no vertical variation; a constant
1500force density will therefore intuitively lead to a membrane that
1501simply bulges upward -- like the one shown above.
1502
1503VisIt and Paraview both allow playing with various kinds of visualizations
1504of the solution. Several video lectures show how to use these programs.
1505See also <a href="https://www.math.colostate.edu/~bangerth/videos.676.11.html">video lecture 11</a>, <a href="https://www.math.colostate.edu/~bangerth/videos.676.32.html">video lecture 32</a>.
1506
1507
1508
1509<a name="step-3-extensions"></a>
1510<a name="step_3-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1511
1512
1513If you want to play around a little bit with this program, here are a few
1514suggestions:
1515</p>
1516
1517<ul>
1518 <li>
1519 Change the geometry and mesh: In the program, we have generated a square
1520 domain and mesh by using the GridGenerator::hyper_cube()
1521 function. However, the <code>GridGenerator</code> has a good number of other
1522 functions as well. Try an L-shaped domain, a ring, or other domains you find
1523 there.
1524 </li>
1525
1526 <li>
1527 Change the boundary condition: The code uses the Functions::ZeroFunction
1528 function to generate zero boundary conditions. However, you may want to try
1529 non-zero constant boundary values using
1530 <code>Functions::ConstantFunction&lt;2&gt;(1)</code> instead of
1531 <code>Functions::ZeroFunction&lt;2&gt;()</code> to have unit Dirichlet boundary
1532 values. More exotic functions are described in the documentation of the
1533 Functions namespace, and you may pick one to describe your particular boundary
1534 values.
1535 </li>
1536
1537 <li> Modify the type of boundary condition: Presently, what happens
1538 is that we use Dirichlet boundary values all around, since the
1539 default is that all boundary parts have boundary indicator zero, and
1540 then we tell the
1541 VectorTools::interpolate_boundary_values() function to
1542 interpolate boundary values to zero on all boundary components with
1543 indicator zero. <p> We can change this behavior if we assign parts
1544 of the boundary different indicators. For example, try this
1545 immediately after calling GridGenerator::hyper_cube():
1546 @code
1547 triangulation.begin_active()->face(0)->set_boundary_id(1);
1548 @endcode
1549
1550 What this does is it first asks the triangulation to
1551 return an iterator that points to the first active cell. Of course,
1552 this being the coarse mesh for the triangulation of a square, the
1553 triangulation has only a single cell at this moment, and it is
1554 active. Next, we ask the cell to return an iterator to its first
1555 face, and then we ask the face to reset the boundary indicator of
1556 that face to 1. What then follows is this: When the mesh is refined,
1557 faces of child cells inherit the boundary indicator of their
1558 parents, i.e. even on the finest mesh, the faces on one side of the
1559 square have boundary indicator 1. Later, when we get to
1560 interpolating boundary conditions, the
1561 VectorTools::interpolate_boundary_values() call will only produce boundary
1562 values for those faces that have zero boundary indicator, and leave
1563 those faces alone that have a different boundary indicator. What
1564 this then does is to impose Dirichlet boundary conditions on the
1565 former, and homogeneous Neumann conditions on the latter (i.e. zero
1566 normal derivative of the solution, unless one adds additional terms
1567 to the right hand side of the variational equality that deal with
1568 potentially non-zero Neumann conditions). You will see this if you
1569 run the program.
1570
1571 An alternative way to change the boundary indicator is to label
1572 the boundaries based on the Cartesian coordinates of the face centers.
1573 For example, we can label all of the cells along the top and
1574 bottom boundaries with a boundary indicator 1 by checking to
1575 see if the cell centers' y-coordinates are within a tolerance
1576 (here `1e-12`) of -1 and 1. Try this immediately after calling
1577 GridGenerator::hyper_cube(), as before:
1578 @code
1579 for (auto &face : triangulation.active_face_iterators())
1580 if (face->at_boundary())
1581 if (std::fabs(face->center()(1) - (-1.0)) < 1e-12 ||
1582 std::fabs(face->center()(1) - (1.0)) < 1e-12)
1583 face->set_boundary_id(1);
1584 @endcode
1585 Although this code is a bit longer than before, it is useful for
1586 complex geometries, as it does not require knowledge of face labels.
1587
1588 <li>
1589 A slight variation of the last point would be to set different boundary
1590 values as above, but then use a different boundary value function for
1591 boundary indicator one. In practice, what you have to do is to add a second
1592 call to <code>interpolate_boundary_values</code> for boundary indicator one:
1593 @code
1594 VectorTools::interpolate_boundary_values(dof_handler,
1595 types::boundary_id(1),
1596 Functions::ConstantFunction<2>(1.),
1597 boundary_values);
1598 @endcode
1599 If you have this call immediately after the first one to this function, then
1600 it will interpolate boundary values on faces with boundary indicator 1 to the
1601 unit value, and merge these interpolated values with those previously
1602 computed for boundary indicator 0. The result will be that we will get
1603 discontinuous boundary values, zero on three sides of the square, and one on
1604 the fourth.
1605
1606 <li>
1607 Use triangles: As mentioned in the results section of @ref step_1 "step-1", for
1608 historical reasons, almost all tutorial programs for deal.II are
1609 written using quadrilateral or hexahedral meshes. But deal.II also
1610 supports triangular and tetrahedral meshes. So a good experiment would
1611 be to replace the mesh used here by a triangular mesh.
1612
1613 This is *almost* trivial. First, as discussed in @ref step_1 "step-1", we may want
1614 to start with the quadrilateral mesh we are already creating, and
1615 then convert it into a triangular one. You can do that by replacing
1616 the first line of `Step3::make_grid()` by the following code:
1617 @code
1618 Triangulation<2> triangulation_quad;
1619 GridGenerator::hyper_cube(triangulation_quad, -1, 1);
1622 @endcode
1624 quadrilateral by eight triangles with half the diameter of the original
1625 quadrilateral; as a consequence, the resulting mesh is substantially
1626 finer and one might expect that the solution is consequently more
1627 accurate (but also has many more degrees of freedom). That is a question
1628 you can explore with the techniques discussed in the "Results" section
1629 of @ref step_4 "step-4", but that goes beyond what we want to demonstrate here.
1630
1631 If you run this program, you will run into an error message that
1632 will look something like this:
1633 @code
1634--------------------------------------------------------
1635An error occurred in line <2633> of file </home/bangerth/p/deal.II/1/dealii/include/deal.II/dofs/dof_accessor.templates.h> in function
1636 const ::FiniteElement<dimension_, space_dimension_>& ::DoFCellAccessor<dim, spacedim, lda>::get_fe() const [with int dimension_ = 2; int space_dimension_ = 2; bool level_dof_access = false]
1637The violated condition was:
1638 this->reference_cell() == fe.reference_cell()
1639Additional information:
1640 The reference-cell type used on this cell (Tri) does not match the
1641 reference-cell type of the finite element associated with this cell
1642 (Quad). Did you accidentally use simplex elements on hypercube meshes
1643 (or the other way around), or are you using a mixed mesh and assigned
1644 a simplex element to a hypercube cell (or the other way around) via
1645 the active_fe_index?
1646 @endcode
1647 It is worth carefully reading the error message. It doesn't just
1648 state that there is an error, but also how it may have
1649 arisen. Specifically, it asks whether we are using a finite element
1650 for simplex meshes (in 2d simplices are triangles) with a hypercube
1651 mesh (in 2d hypercubes are quadrilaterals) or the other way around?
1652
1653 Of course, this is exactly what we are doing, though this may
1654 perhaps not be clear to you. But if you look up the documentation,
1655 you will find that the FE_Q element we use in the main class can
1656 only be used on hypercube meshes; what we *want* to use instead now
1657 that we are using a simplex mesh is the FE_SimplexP class that is the
1658 equivalent to FE_Q for simplex cells. (To do this, you will also
1659 have to add `#include <deal.II/fe/fe_simplex_p.h>` at the top of the file.)
1660
1661 The last thing you need to change (which at the time of writing is
1662 unfortunately not prompted by getting an error message) is that when
1663 we integrate, we need to use a quadrature formula that is
1664 appropriate for triangles. This is done by changing QGauss by
1665 QGaussSimplex in the code.
1666
1667 With all of these steps, you then get the following solution:
1668 <img src="https://www.dealii.org/images/steps/developer/step-3.solution-triangles.png" alt="Visualization of the solution of step-3 using triangles">
1669
1670 <li>
1671 Observe convergence: We will only discuss computing errors in norms in
1672 @ref step_7 "step-7", but it is easy to check that computations converge
1673 already here. For example, we could evaluate the value of the solution in a
1674 single point and compare the value for different %numbers of global
1675 refinement (the number of global refinement steps is set in
1676 <code>LaplaceProblem::make_grid</code> above). To evaluate the
1677 solution at a point, say at @f$(\frac 13, \frac 13)@f$, we could add the
1678 following code to the <code>LaplaceProblem::output_results</code> function:
1679 @code
1680 std::cout << "Solution at (1/3,1/3): "
1681 << VectorTools::point_value(dof_handler, solution,
1682 Point<2>(1./3, 1./3))
1683 << std::endl;
1684 @endcode
1685 For 1 through 9 global refinement steps, we then get the following sequence
1686 of point values:
1687 <table align="center" class="doxtable">
1688 <tr> <th># of refinements</th> <th>@f$u_h(\frac 13,\frac13)@f$</th> </tr>
1689 <tr> <td>1</td> <td>0.166667</td> </tr>
1690 <tr> <td>2</td> <td>0.227381</td> </tr>
1691 <tr> <td>3</td> <td>0.237375</td> </tr>
1692 <tr> <td>4</td> <td>0.240435</td> </tr>
1693 <tr> <td>5</td> <td>0.241140</td> </tr>
1694 <tr> <td>6</td> <td>0.241324</td> </tr>
1695 <tr> <td>7</td> <td>0.241369</td> </tr>
1696 <tr> <td>8</td> <td>0.241380</td> </tr>
1697 <tr> <td>9</td> <td>0.241383</td> </tr>
1698 </table>
1699 By noticing that the difference between each two consecutive values reduces
1700 by about a factor of 4, we can conjecture that the "correct" value may be
1701 @f$u(\frac 13, \frac 13)\approx 0.241384@f$. In fact, if we assumed this to be
1702 the correct value, we could show that the sequence above indeed shows @f${\cal
1703 O}(h^2)@f$ convergence &mdash; theoretically, the convergence order should be
1704 @f${\cal O}(h^2 |\log h|)@f$ but the symmetry of the domain and the mesh may lead
1705 to the better convergence order observed.
1706
1707 A slight variant of this would be to repeat the test with quadratic
1708 elements. All you need to do is to set the polynomial degree of the finite
1709 element to two in the constructor
1710 <code>LaplaceProblem::LaplaceProblem</code>.
1711
1712 <li>Convergence of the mean: A different way to see that the solution
1713 actually converges (to something &mdash; we can't tell whether it's really
1714 the correct value!) is to compute the mean of the solution. To this end, add
1715 the following code to <code>LaplaceProblem::output_results</code>:
1716 @code
1717 std::cout << "Mean value: "
1718 << VectorTools::compute_mean_value (dof_handler,
1719 QGauss<2>(fe.degree + 1),
1720 solution,
1721 0)
1722 << std::endl;
1723 @endcode
1724 The documentation of the function explains what the second and fourth
1725 parameters mean, while the first and third should be obvious. Doing the same
1726 study again where we change the number of global refinement steps, we get
1727 the following result:
1728 <table align="center" class="doxtable">
1729 <tr> <th># of refinements</th> <th>@f$\int_\Omega u_h(x)\; dx@f$</th> </tr>
1730 <tr> <td>0</td> <td>0.09375000</td> </tr>
1731 <tr> <td>1</td> <td>0.12790179</td> </tr>
1732 <tr> <td>2</td> <td>0.13733440</td> </tr>
1733 <tr> <td>3</td> <td>0.13976069</td> </tr>
1734 <tr> <td>4</td> <td>0.14037251</td> </tr>
1735 <tr> <td>5</td> <td>0.14052586</td> </tr>
1736 <tr> <td>6</td> <td>0.14056422</td> </tr>
1737 <tr> <td>7</td> <td>0.14057382</td> </tr>
1738 <tr> <td>8</td> <td>0.14057622</td> </tr>
1739 </table>
1740 Again, the difference between two adjacent values goes down by about a
1741 factor of four, indicating convergence as @f${\cal O}(h^2)@f$.
1742</ul>
1743
1744
1745
1746<a name="step_3-UsingHDF5tooutputthesolutionandadditionaldata"></a><h3>Using %HDF5 to output the solution and additional data</h3>
1747
1748
1749%HDF5 is a commonly used format that can be read by many scripting
1750languages (e.g. R or Python). It is not difficult to get deal.II to
1751produce some %HDF5 files that can then be used in external scripts to
1752postprocess some of the data generated by this program. Here are some
1753ideas on what is possible.
1754
1755
1756<a name="step_3-Changingtheoutputtoh5"></a><h4> Changing the output to .h5</h4>
1757
1758
1759To fully make use of the automation we first need to introduce a private variable for the number of
1760global refinement steps <code>unsigned int n_refinement_steps </code>, which will be used for the output filename.
1761In <code>make_grid()</code> we then replace <code>triangulation.refine_global(5);</code> with
1762@code
1763n_refinement_steps = 5;
1764triangulation.refine_global(n_refinement_steps);
1765@endcode
1766The deal.II library has two different %HDF5 bindings, one in the HDF5
1767namespace (for interfacing to general-purpose data files)
1768and another one in DataOut (specifically for writing files for the
1769visualization of solutions).
1770Although the HDF5 deal.II binding supports both serial and MPI, the %HDF5 DataOut binding
1771only supports parallel output.
1772For this reason we need to initialize an MPI
1773communicator with only one processor. This is done by adding the following code.
1774@code
1775int main(int argc, char* argv[])
1776{
1777 Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1778 ...
1779}
1780@endcode
1781Next we change the `Step3::output_results()` output routine as
1782described in the DataOutBase namespace documentation:
1783@code
1784const std::string filename_h5 = "solution_" + std::to_string(n_refinement_steps) + ".h5";
1785DataOutBase::DataOutFilterFlags flags(true, true);
1786DataOutBase::DataOutFilter data_filter(flags);
1787data_out.write_filtered_data(data_filter);
1788data_out.write_hdf5_parallel(data_filter, filename_h5, MPI_COMM_WORLD);
1789@endcode
1790The resulting file can then be visualized just like the VTK file that
1791the original version of the tutorial produces; but, since %HDF5 is a
1792more general file format, it can also easily be processed in scripting
1793languages for other purposes.
1794
1795
1796<a name="step_3-Addingthepointvalueandthemeanseeextensionaboveintotheh5file"></a><h4> Adding the point value and the mean (see extension above) into the .h5 file</h4>
1797
1798
1799After outputting the solution, the file can be opened again to include
1800more datasets. This allows us to keep all the necessary information
1801of our experiment in a single result file, which can then be read and
1802processed by some postprocessing script.
1803(Have a look at HDF5::Group::write_dataset() for further
1804information on the possible output options.)
1805
1806To make this happen, we first include the necessary header into our file :
1807@code
1808#include <deal.II/base/hdf5.h>
1809@endcode
1810Adding the following lines to the end
1811of our output routine adds the information about the value of the
1812solution at a particular point, as well as the mean value of the
1813solution, to our %HDF5 file :
1814@code
1815HDF5::File data_file(filename_h5, HDF5::File::FileAccessMode::open, MPI_COMM_WORLD);
1817point_value[0] = VectorTools::point_value(dof_handler, solution,
1818 Point<2>(1./3, 1./3));
1819data_file.write_dataset("point_value", point_value);
1820Vector<double> mean_value(1);
1821mean_value[0] = VectorTools::compute_mean_value(dof_handler,
1822 QGauss<2>(fe.degree + 1),
1823 solution, 0);
1824data_file.write_dataset("mean_value",mean_value);
1825@endcode
1826
1827
1828
1829<a name="step_3-UsingRandggplot2togenerateplots"></a><h3> Using R and ggplot2 to generate plots</h3>
1830
1831@note Alternatively, one could use the python code in the next subsection.
1832
1833The data put into %HDF5 files above can then be used from scripting
1834languages for further postprocessing. In the following, let us show
1835how this can, in particular, be done with the
1836<a href="https://en.wikipedia.org/wiki/R_(programming_language)">R
1837programming language</a>, a widely used language in statistical data
1838analysis. (Similar things can also be done in Python, for example.)
1839If you are unfamiliar with R and ggplot2 you could check out the data carpentry course on R
1840<a href="https://datacarpentry.org/R-ecology-lesson/index.html">here</a>.
1841Furthermore, since most search engines struggle with searches of the form "R + topic",
1842we recommend using the specializes service <a
1843href="http://rseek.org">RSeek </a> instead.
1844
1845The most prominent difference between R and other languages is that
1846the assignment operator (`a = 5`) is typically written as
1847`a <- 5`. As the latter is considered standard we will use it in our examples as well.
1848To open the `.h5` file in R you have to install the <a href="https://bioconductor.org/packages/release/bioc/html/rhdf5.html">rhdf5</a> package, which is a part of the Bioconductor package.
1849
1850First we will include all necessary packages and have a look at how the data is structured in our file.
1851@code{.r}
1852library(rhdf5) # library for handling HDF5 files
1853library(ggplot2) # main plotting library
1854library(grDevices) # needed for output to PDF
1855library(viridis) # contains good colormaps for sequential data
1856
1857refinement <- 5
1858h5f <- H5Fopen(paste("solution_",refinement,".h5",sep=""))
1859print(h5f)
1860@endcode
1861This gives the following output
1862@code{.unparsed}
1863HDF5 FILE
1864 name /
1865filename
1866
1867 name otype dclass dim
18680 cells H5I_DATASET INTEGER x 1024
18691 mean_value H5I_DATASET FLOAT 1
18702 nodes H5I_DATASET FLOAT x 1089
18713 point_value H5I_DATASET FLOAT 1
18724 solution H5I_DATASET FLOAT x 1089
1873@endcode
1874The datasets can be accessed by <code>h5f\$name</code>. The function
1875<code>dim(h5f\$cells)</code> gives us the dimensions of the matrix
1876that is used to store our cells.
1877We can see the following three matrices, as well as the two
1878additional data points we added.
1879<ul>
1880<li> <code>cells</code>: a 4x1024 matrix that stores the (C++) vertex indices for each cell
1881<li> <code>nodes</code>: a 2x1089 matrix storing the position values (x,y) for our cell vertices
1882<li> <code>solution</code>: a 1x1089 matrix storing the values of our solution at each vertex
1883</ul>
1884Now we can use this data to generate various plots. Plotting with ggplot2 usually splits into two steps.
1885At first the data needs to be manipulated and added to a <code>data.frame</code>.
1886After that, a <code>ggplot</code> object is constructed and manipulated by adding plot elements to it.
1887
1888<code>nodes</code> and <code>cells</code> contain all the information we need to plot our grid.
1889The following code wraps all the data into one dataframe for plotting our grid:
1890@code{.r}
1891# Counting in R starts at 1 instead of 0, so we need to increment all
1892# vertex indices by one:
1893cell_ids <- h5f@f$cells+1
1894
1895# Store the x and y positions of each vertex in one big vector in a
1896# cell by cell fashion (every 4 entries belong to one cell):
1897cells_x <- h5f@f$nodes[1,][cell_ids]
1898cells_y <- h5f@f$nodes[2,][cell_ids]
1899
1900# Construct a vector that stores the matching cell by cell grouping
1901# (1,1,1,1,2,2,2,2,...):
1902groups <- rep(1:ncol(cell_ids),each=4)
1903
1904# Finally put everything into one dataframe:
1905meshdata <- data.frame(x = cells_x, y = cells_y, id = groups)
1906@endcode
1907
1908With the finished dataframe we have everything we need to plot our grid:
1909@code{.r}
1910pdf (paste("grid_",refinement,".pdf",sep=""),width = 5,height = 5) # Open new PDF file
1911plt <- ggplot(meshdata,aes(x=x,y=y,group=id)) # Construction of our plot
1912 # object, at first only data
1913
1914plt <- plt + geom_polygon(fill="white",colour="black") # Actual plotting of the grid as polygons
1915plt <- plt + ggtitle(paste("grid at refinement level #",refinement))
1916
1917print(plt) # Show the current state of the plot/add it to the pdf
1918dev.off() # Close PDF file
1919@endcode
1920
1921The contents of this file then look as follows (not very exciting, but
1922you get the idea):
1923<table width="60%" align="center">
1924 <tr>
1925 <td align="center">
1926 <img src="https://www.dealii.org/images/steps/developer/step-3.extensions.grid_5.png" alt="Grid after 5 refinement steps of step-3">
1927 </td>
1928 </tr>
1929</table>
1930
1931We can also visualize the solution itself, and this is going to look
1932more interesting.
1933To make a 2D pseudocolor plot of our solution we will use <code>geom_raster</code>.
1934This function needs a structured grid, i.e. uniform in x and y directions.
1935Luckily our data at this point is structured in the right way.
1936The following code plots a pseudocolor representation of our surface into a new PDF:
1937@code{.r}
1938pdf (paste("pseudocolor_",refinement,".pdf",sep=""),width = 5,height = 4.2) # Open new PDF file
1939colordata <- data.frame(x = h5f@f$nodes[1,],y = h5f@f$nodes[2,] , solution = h5f@f$solution[1,])
1940plt <- ggplot(colordata,aes(x=x,y=y,fill=solution))
1941plt <- plt + geom_raster(interpolate=TRUE)
1942plt <- plt + scale_fill_viridis()
1943plt <- plt + ggtitle(paste("solution at refinement level #",refinement))
1944
1945print(plt)
1946dev.off()
1947H5Fclose(h5f) # Close the HDF5 file
1948@endcode
1949This is now going to look as follows:
1950<table width="60%" align="center">
1951 <tr>
1952 <td align="center">
1953 <img src="https://www.dealii.org/images/steps/developer/step-3.extensions.pseudocolor_5.png" alt="Solution after 5 refinement steps of step-3">
1954 </td>
1955 </tr>
1956</table>
1957
1958For plotting the convergence curves we need to re-run the C++ code multiple times with different values for <code>n_refinement_steps</code>
1959starting from 1.
1960Since every file only contains a single data point we need to loop over them and concatenate the results into a single vector.
1961@code{.r}
1962n_ref <- 8 # Maximum refinement level for which results are existing
1963
1964# First we initiate all vectors with the results of the first level
1965h5f <- H5Fopen("solution_1.h5")
1966dofs <- dim(h5f@f$solution)[2]
1967mean <- h5f@f$mean_value
1968point <- h5f@f$point_value
1969H5Fclose(h5f)
1970
1971for (reflevel in 2:n_ref)
1972{
1973 h5f <- H5Fopen(paste("solution_",reflevel,".h5",sep=""))
1974 dofs <- c(dofs,dim(h5f\$solution)[2])
1975 mean <- c(mean,h5f\$mean_value)
1976 point <- c(point,h5f\$point_value)
1977 H5Fclose(h5f)
1978}
1979@endcode
1980As we are not interested in the values themselves but rather in the error compared to a "exact" solution we will
1981assume our highest refinement level to be that solution and omit it from the data.
1982@code{.r}
1983# Calculate the error w.r.t. our maximum refinement step
1984mean_error <- abs(mean[1:n_ref-1]-mean[n_ref])
1985point_error <- abs(point[1:n_ref-1]-point[n_ref])
1986
1987# Remove the highest value from our DoF data
1988dofs <- dofs[1:n_ref-1]
1989convdata <- data.frame(dofs = dofs, mean_value= mean_error, point_value = point_error)
1990@endcode
1991Now we have all the data available to generate our plots.
1992It is often useful to plot errors on a log-log scale, which is
1993accomplished in the following code:
1994@code
1995pdf (paste("convergence.pdf",sep=""),width = 5,height = 4.2)
1996plt <- ggplot(convdata,mapping=aes(x = dofs, y = mean_value))
1997plt <- plt+geom_line()
1998plt <- plt+labs(x="#DoFs",y = "mean value error")
1999plt <- plt+scale_x_log10()+scale_y_log10()
2000print(plt)
2001
2002plt <- ggplot(convdata,mapping=aes(x = dofs, y = point_value))
2003plt <- plt+geom_line()
2004plt <- plt+labs(x="#DoFs",y = "point value error")
2005plt <- plt+scale_x_log10()+scale_y_log10()
2006print(plt)
2007
2008dev.off()
2009@endcode
2010This results in the following plot that shows how the errors in the
2011mean value and the solution value at the chosen point nicely converge
2012to zero:
2013<table style="width:50%" align="center">
2014 <tr>
2015 <td><img src="https://www.dealii.org/images/steps/developer/step-3.extensions.convergence_mean.png" alt=""></td>
2016 <td><img src="https://www.dealii.org/images/steps/developer/step-3.extensions.convergence_point.png" alt=""></td>
2017 </tr>
2018</table>
2019
2020<a name="step_3-Usingpythontogenerateplots"></a><h3> Using python to generate plots</h3>
2021
2022
2023In this section we discuss the postprocessing of the data stored in %HDF5 files using the "python" programming language.
2024The necessary packages to import are
2025@code{.py}
2026import numpy as np # to work with multidimensional arrays
2027import h5py # to work with %HDF5 files
2028
2029import pandas as pd # for data frames
2030import matplotlib.pyplot as plt # plotting
2031from matplotlib.patches import Polygon
2032
2033from scipy.interpolate import griddata # interpolation function
2034from matplotlib import cm # for colormaps
2035
2036@endcode
2037The %HDF5 solution file corresponding to `refinement = 5` can be opened as
2038@code{.py}
2039refinement = 5
2040filename = "solution_%d.h5" % refinement
2041file = h5py.File(filename, "r")
2042@endcode
2043The following prints out the tables in the %HDF5 file
2044@code{.py}
2045for key, value in file.items():
2046 print(key, " : ", value)
2047@endcode
2048which prints out
2049@code{.unparsed}
2050cells : <HDF5 dataset "cells": shape (1024, 4), type "<u4">
2051mean_value : <HDF5 dataset "mean_value": shape (1,), type "<f8">
2052nodes : <HDF5 dataset "nodes": shape (1089, 2), type "<f8">
2053point_value : <HDF5 dataset "point_value": shape (1,), type "<f8">
2054solution : <HDF5 dataset "solution": shape (1089, 1), type "<f8">
2055@endcode
2056There are @f$(32+1)\times(32+1) = 1089@f$ nodes.
2057The coordinates of these nodes @f$(x,y)@f$ are stored in the table `nodes` in the %HDF5 file.
2058There are a total of @f$32\times 32 = 1024@f$ cells. The nodes which make up each cell are
2059marked in the table `cells` in the %HDF5 file.
2060
2061Next, we extract the data into multidimensional arrays
2062@code{.py}
2063nodes = np.array(file["/nodes"])
2064cells = np.array(file["/cells"])
2065solution = np.array(file["/solution"])
2066
2067x, y = nodes.T
2068@endcode
2069
2070The following stores the @f$x@f$ and @f$y@f$ coordinates of each node of each cell in one flat array.
2071@code{.py}
2072cell_x = x[cells.flatten()]
2073cell_y = y[cells.flatten()]
2074@endcode
2075The following tags the cell ids. Each four entries correspond to one cell.
2076Then we collect the coordinates and ids into a data frame
2077@code{.py}
2078n_cells = cells.shape[0]
2079cell_ids = np.repeat(np.arange(n_cells), 4)
2080meshdata = pd.DataFrame({"x": cell_x, "y": cell_y, "ids": cell_ids})
2081@endcode
2082The data frame looks
2083@code{.unparsed}
2084print(meshdata)
2085
2086 x y ids
20870 0.00000 0.00000 0
20881 0.03125 0.00000 0
20892 0.03125 0.03125 0
20903 0.00000 0.03125 0
20914 0.03125 0.00000 1
2092... ... ... ...
20934091 0.93750 1.00000 1022
20944092 0.96875 0.96875 1023
20954093 1.00000 0.96875 1023
20964094 1.00000 1.00000 1023
20974095 0.96875 1.00000 1023
2098
20994096 rows × 3 columns
2100@endcode
2101
2102To plot the mesh, we loop over all cells and connect the first and last node to complete the polygon
2103@code{.py}
2104fig, ax = plt.subplots()
2105ax.set_aspect("equal", "box")
2106ax.set_title("grid at refinement level #%d" % refinement)
2107
2108for cell_id, cell in meshdata.groupby(["ids"]):
2109 cell = pd.concat([cell, cell.head(1)])
2110 plt.plot(cell["x"], cell["y"], c="k")
2111@endcode
2112Alternatively one could use the matplotlib built-in Polygon class
2113@code{.py}
2114fig, ax = plt.subplots()
2115ax.set_aspect("equal", "box")
2116ax.set_title("grid at refinement level #%d" % refinement)
2117for cell_id, cell in meshdata.groupby(["ids"]):
2118 patch = Polygon(cell[["x", "y"]], facecolor="w", edgecolor="k")
2119 ax.add_patch(patch)
2120@endcode
2121
2122To plot the solution, we first create a finer grid and then interpolate the solution values
2123into the grid and then plot it.
2124@code{.py}
2125nx = int(np.sqrt(n_cells)) + 1
2126nx *= 10
2127xg = np.linspace(x.min(), x.max(), nx)
2128yg = np.linspace(y.min(), y.max(), nx)
2129
2130xgrid, ygrid = np.meshgrid(xg, yg)
2131solution_grid = griddata((x, y), solution.flatten(), (xgrid, ygrid), method="linear")
2132
2133fig = plt.figure()
2134ax = fig.add_subplot(1, 1, 1)
2135ax.set_title("solution at refinement level #%d" % refinement)
2136c = ax.pcolor(xgrid, ygrid, solution_grid, cmap=cm.viridis)
2137fig.colorbar(c, ax=ax)
2138
2139plt.show()
2140@endcode
2141
2142To check the convergence of `mean_value` and `point_value`
2143we loop over data of all refinements and store into vectors <code>mean_values</code> and <code>mean_values</code>
2144@code{.py}
2145mean_values = np.zeros((8,))
2146point_values = np.zeros((8,))
2147dofs = np.zeros((8,))
2148
2149for refinement in range(1, 9):
2150 filename = "solution_%d.h5" % refinement
2151 file = h5py.File(filename, "r")
2152 mean_values[refinement - 1] = np.array(file["/mean_value"])[0]
2153 point_values[refinement - 1] = np.array(file["/point_value"])[0]
2154 dofs[refinement - 1] = np.array(file["/solution"]).shape[0]
2155@endcode
2156
2157Following is the plot of <code>mean_values</code> on `log-log` scale
2158@code{.py}
2159mean_error = np.abs(mean_values[1:] - mean_values[:-1])
2160plt.loglog(dofs[:-1], mean_error)
2161plt.grid()
2162plt.xlabel("#DoFs")
2163plt.ylabel("mean value error")
2164plt.show()
2165@endcode
2166
2167Following is the plot of <code>point_values</code> on `log-log` scale
2168@code{.py}
2169point_error = np.abs(point_values[1:] - point_values[:-1])
2170plt.loglog(dofs[:-1], point_error)
2171plt.grid()
2172plt.xlabel("#DoFs")
2173plt.ylabel("point value error")
2174plt.show()
2175@endcode
2176
2177A python package which mimics the `R` ggplot2 (which is based on specifying the grammar of the graphics) is
2178<a href="https://plotnine.org/">plotnine</a>.
2179@code{.py}
2180We need to import the following from the <code>plotnine</code> package
2181from plotnine import (
2182 ggplot,
2183 aes,
2184 geom_raster,
2185 geom_polygon,
2186 geom_line,
2187 labs,
2188 scale_x_log10,
2189 scale_y_log10,
2190 ggtitle,
2191)
2192@endcode
2193Then plot the mesh <code>meshdata</code> dataframe
2194@code{.py}
2195plot = (
2196 ggplot(meshdata, aes(x="x", y="y", group="ids"))
2197 + geom_polygon(fill="white", colour="black")
2198 + ggtitle("grid at refinement level #%d" % refinement)
2199)
2200
2201print(plot)
2202@endcode
2203Collect the solution into a dataframe
2204@code{.py}
2205colordata = pd.DataFrame({"x": x, "y": y, "solution": solution.flatten()})
2206@endcode
2207Plot of the solution
2208@code{.py}
2209plot = (
2210 ggplot(colordata, aes(x="x", y="y", fill="solution"))
2211 + geom_raster(interpolate=True)
2212 + ggtitle("solution at refinement level #%d" % refinement)
2213)
2214
2215print(plot)
2216@endcode
2217
2218Collect the convergence data into a dataframe
2219@code{.py}
2220convdata = pd.DataFrame(
2221 {"dofs": dofs[:-1], "mean_value": mean_error, "point_value": point_error}
2222)
2223
2224@endcode
2225Following is the plot of <code>mean_values</code> on `log-log` scale
2226@code{.py}
2227plot = (
2228 ggplot(convdata, mapping=aes(x="dofs", y="mean_value"))
2229 + geom_line()
2230 + labs(x="#DoFs", y="mean value error")
2231 + scale_x_log10()
2232 + scale_y_log10()
2233)
2234
2235plot.save("mean_error.pdf", dpi=60)
2236print(plot)
2237@endcode
2238
2239Following is the plot of <code>point_values</code> on `log-log` scale
2240@code{.py}
2241plot = (
2242 ggplot(convdata, mapping=aes(x="dofs", y="point_value"))
2243 + geom_line()
2244 + labs(x="#DoFs", y="point value error")
2245 + scale_x_log10()
2246 + scale_y_log10()
2247)
2248
2249plot.save("point_error.pdf", dpi=60)
2250print(plot)
2251@endcode
2252 *
2253 *
2254<a name="step_3-PlainProg"></a>
2255<h1> The plain program</h1>
2256@include "step-3.cc"
2257*/
void distribute_dofs(const FiniteElement< dim, spacedim > &fe)
const SmartPointer< const Mapping< dim, spacedim >, FEValuesBase< dim, spacedim > > mapping
Definition fe_q.h:554
void write_dataset(const std::string &name, const Container &data) const
Definition hdf5.h:2244
Definition point.h:111
void refine_global(const unsigned int times=1)
unsigned int n_cells() const
Point< 3 > center
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
const Event initial
Definition event.cc:64
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void convert_hypercube_to_simplex_mesh(const Triangulation< dim, spacedim > &in_tria, Triangulation< dim, spacedim > &out_tria)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void simplex(Triangulation< dim, dim > &tria, const std::vector< Point< dim > > &vertices)
void reference_cell(Triangulation< dim, spacedim > &tria, const ReferenceCell &reference_cell)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition hdf5.h:345
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition cuda.h:96
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
Number compute_mean_value(const hp::MappingCollection< dim, spacedim > &mapping_collection, const DoFHandler< dim, spacedim > &dof, const hp::QCollection< dim > &q_collection, const ReadVector< Number > &v, const unsigned int component)
void point_value(const DoFHandler< dim, spacedim > &dof, const VectorType &fe_function, const Point< spacedim, double > &point, Vector< typename VectorType::value_type > &value)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14883
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
STL namespace.
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation