Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-23.h
Go to the documentation of this file.
1 ,
659  * const unsigned int component = 0) const override
660  * {
661  * (void)component;
662  * Assert(component == 0, ExcIndexRange(component, 0, 1));
663  * return 0;
664  * }
665  * };
666  *
667  *
668  *
669  * template <int dim>
670  * class InitialValuesV : public Function<dim>
671  * {
672  * public:
673  * virtual double value(const Point<dim> & /*p*/,
674  * const unsigned int component = 0) const override
675  * {
676  * (void)component;
677  * Assert(component == 0, ExcIndexRange(component, 0, 1));
678  * return 0;
679  * }
680  * };
681  *
682  *
683  *
684  * @endcode
685  *
686  * Secondly, we have the right hand side forcing term. Boring as we are, we
687  * choose zero here as well:
688  *
689  * @code
690  * template <int dim>
691  * class RightHandSide : public Function<dim>
692  * {
693  * public:
694  * virtual double value(const Point<dim> & /*p*/,
695  * const unsigned int component = 0) const override
696  * {
697  * (void)component;
698  * Assert(component == 0, ExcIndexRange(component, 0, 1));
699  * return 0;
700  * }
701  * };
702  *
703  *
704  *
705  * @endcode
706  *
707  * Finally, we have boundary values for @f$u@f$ and @f$v@f$. They are as described
708  * in the introduction, one being the time derivative of the other:
709  *
710  * @code
711  * template <int dim>
712  * class BoundaryValuesU : public Function<dim>
713  * {
714  * public:
715  * virtual double value(const Point<dim> & p,
716  * const unsigned int component = 0) const override
717  * {
718  * (void)component;
719  * Assert(component == 0, ExcIndexRange(component, 0, 1));
720  *
721  * if ((this->get_time() <= 0.5) && (p[0] < 0) && (p[1] < 1. / 3) &&
722  * (p[1] > -1. / 3))
723  * return std::sin(this->get_time() * 4 * numbers::PI);
724  * else
725  * return 0;
726  * }
727  * };
728  *
729  *
730  *
731  * template <int dim>
732  * class BoundaryValuesV : public Function<dim>
733  * {
734  * public:
735  * virtual double value(const Point<dim> & p,
736  * const unsigned int component = 0) const override
737  * {
738  * (void)component;
739  * Assert(component == 0, ExcIndexRange(component, 0, 1));
740  *
741  * if ((this->get_time() <= 0.5) && (p[0] < 0) && (p[1] < 1. / 3) &&
742  * (p[1] > -1. / 3))
743  * return (std::cos(this->get_time() * 4 * numbers::PI) * 4 * numbers::PI);
744  * else
745  * return 0;
746  * }
747  * };
748  *
749  *
750  *
751  * @endcode
752  *
753  *
754  * <a name="ImplementationofthecodeWaveEquationcodeclass"></a>
755  * <h3>Implementation of the <code>WaveEquation</code> class</h3>
756  *
757 
758  *
759  * The implementation of the actual logic is actually fairly short, since we
760  * relegate things like assembling the matrices and right hand side vectors
761  * to the library. The rest boils down to not much more than 130 lines of
762  * actual code, a significant fraction of which is boilerplate code that can
763  * be taken from previous example programs (e.g. the functions that solve
764  * linear systems, or that generate output).
765  *
766 
767  *
768  * Let's start with the constructor (for an explanation of the choice of
769  * time step, see the section on Courant, Friedrichs, and Lewy in the
770  * introduction):
771  *
772  * @code
773  * template <int dim>
774  * WaveEquation<dim>::WaveEquation()
775  * : fe(1)
776  * , dof_handler(triangulation)
777  * , time_step(1. / 64)
778  * , time(time_step)
779  * , timestep_number(1)
780  * , theta(0.5)
781  * {}
782  *
783  *
784  * @endcode
785  *
786  *
787  * <a name="WaveEquationsetup_system"></a>
788  * <h4>WaveEquation::setup_system</h4>
789  *
790 
791  *
792  * The next function is the one that sets up the mesh, DoFHandler, and
793  * matrices and vectors at the beginning of the program, i.e. before the
794  * first time step. The first few lines are pretty much standard if you've
795  * read through the tutorial programs at least up to @ref step_6 "step-6":
796  *
797  * @code
798  * template <int dim>
799  * void WaveEquation<dim>::setup_system()
800  * {
802  * triangulation.refine_global(7);
803  *
804  * std::cout << "Number of active cells: " << triangulation.n_active_cells()
805  * << std::endl;
806  *
807  * dof_handler.distribute_dofs(fe);
808  *
809  * std::cout << "Number of degrees of freedom: " << dof_handler.n_dofs()
810  * << std::endl
811  * << std::endl;
812  *
813  * DynamicSparsityPattern dsp(dof_handler.n_dofs(), dof_handler.n_dofs());
814  * DoFTools::make_sparsity_pattern(dof_handler, dsp);
815  * sparsity_pattern.copy_from(dsp);
816  *
817  * @endcode
818  *
819  * Then comes a block where we have to initialize the 3 matrices we need
820  * in the course of the program: the mass matrix, the Laplace matrix, and
821  * the matrix @f$M+k^2\theta^2A@f$ used when solving for @f$U^n@f$ in each time
822  * step.
823  *
824 
825  *
826  * When setting up these matrices, note that they all make use of the same
827  * sparsity pattern object. Finally, the reason why matrices and sparsity
828  * patterns are separate objects in deal.II (unlike in many other finite
829  * element or linear algebra classes) becomes clear: in a significant
830  * fraction of applications, one has to hold several matrices that happen
831  * to have the same sparsity pattern, and there is no reason for them not
832  * to share this information, rather than re-building and wasting memory
833  * on it several times.
834  *
835 
836  *
837  * After initializing all of these matrices, we call library functions
838  * that build the Laplace and mass matrices. All they need is a DoFHandler
839  * object and a quadrature formula object that is to be used for numerical
840  * integration. Note that in many respects these functions are better than
841  * what we would usually do in application programs, for example because
842  * they automatically parallelize building the matrices if multiple
843  * processors are available in a machine: for more information see the
844  * documentation of WorkStream or the
845  * @ref threads "Parallel computing with multiple processors"
846  * module. The matrices for solving linear systems will be filled in the
847  * run() method because we need to re-apply boundary conditions every time
848  * step.
849  *
850  * @code
851  * mass_matrix.reinit(sparsity_pattern);
852  * laplace_matrix.reinit(sparsity_pattern);
853  * matrix_u.reinit(sparsity_pattern);
854  * matrix_v.reinit(sparsity_pattern);
855  *
856  * MatrixCreator::create_mass_matrix(dof_handler,
857  * QGauss<dim>(fe.degree + 1),
858  * mass_matrix);
859  * MatrixCreator::create_laplace_matrix(dof_handler,
860  * QGauss<dim>(fe.degree + 1),
861  * laplace_matrix);
862  *
863  * @endcode
864  *
865  * The rest of the function is spent on setting vector sizes to the
866  * correct value. The final line closes the hanging node constraints
867  * object. Since we work on a uniformly refined mesh, no constraints exist
868  * or have been computed (i.e. there was no need to call
869  * DoFTools::make_hanging_node_constraints as in other programs), but we
870  * need a constraints object in one place further down below anyway.
871  *
872  * @code
873  * solution_u.reinit(dof_handler.n_dofs());
874  * solution_v.reinit(dof_handler.n_dofs());
875  * old_solution_u.reinit(dof_handler.n_dofs());
876  * old_solution_v.reinit(dof_handler.n_dofs());
877  * system_rhs.reinit(dof_handler.n_dofs());
878  *
879  * constraints.close();
880  * }
881  *
882  *
883  *
884  * @endcode
885  *
886  *
887  * <a name="WaveEquationsolve_uandWaveEquationsolve_v"></a>
888  * <h4>WaveEquation::solve_u and WaveEquation::solve_v</h4>
889  *
890 
891  *
892  * The next two functions deal with solving the linear systems associated
893  * with the equations for @f$U^n@f$ and @f$V^n@f$. Both are not particularly
894  * interesting as they pretty much follow the scheme used in all the
895  * previous tutorial programs.
896  *
897 
898  *
899  * One can make little experiments with preconditioners for the two matrices
900  * we have to invert. As it turns out, however, for the matrices at hand
901  * here, using Jacobi or SSOR preconditioners reduces the number of
902  * iterations necessary to solve the linear system slightly, but due to the
903  * cost of applying the preconditioner it is no win in terms of run-time. It
904  * is not much of a loss either, but let's keep it simple and just do
905  * without:
906  *
907  * @code
908  * template <int dim>
909  * void WaveEquation<dim>::solve_u()
910  * {
911  * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
912  * SolverCG<Vector<double>> cg(solver_control);
913  *
914  * cg.solve(matrix_u, solution_u, system_rhs, PreconditionIdentity());
915  *
916  * std::cout << " u-equation: " << solver_control.last_step()
917  * << " CG iterations." << std::endl;
918  * }
919  *
920  *
921  *
922  * template <int dim>
923  * void WaveEquation<dim>::solve_v()
924  * {
925  * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
926  * SolverCG<Vector<double>> cg(solver_control);
927  *
928  * cg.solve(matrix_v, solution_v, system_rhs, PreconditionIdentity());
929  *
930  * std::cout << " v-equation: " << solver_control.last_step()
931  * << " CG iterations." << std::endl;
932  * }
933  *
934  *
935  *
936  * @endcode
937  *
938  *
939  * <a name="WaveEquationoutput_results"></a>
940  * <h4>WaveEquation::output_results</h4>
941  *
942 
943  *
944  * Likewise, the following function is pretty much what we've done
945  * before. The only thing worth mentioning is how here we generate a string
946  * representation of the time step number padded with leading zeros to 3
947  * character length using the Utilities::int_to_string function's second
948  * argument.
949  *
950  * @code
951  * template <int dim>
952  * void WaveEquation<dim>::output_results() const
953  * {
954  * DataOut<dim> data_out;
955  *
956  * data_out.attach_dof_handler(dof_handler);
957  * data_out.add_data_vector(solution_u, "U");
958  * data_out.add_data_vector(solution_v, "V");
959  *
960  * data_out.build_patches();
961  *
962  * const std::string filename =
963  * "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtu";
964  * @endcode
965  *
966  * Like @ref step_15 "step-15", since we write output at every time step (and the system
967  * we have to solve is relatively easy), we instruct DataOut to use the
968  * zlib compression algorithm that is optimized for speed instead of disk
969  * usage since otherwise plotting the output becomes a bottleneck:
970  *
971  * @code
972  * DataOutBase::VtkFlags vtk_flags;
973  * vtk_flags.compression_level =
974  * DataOutBase::VtkFlags::ZlibCompressionLevel::best_speed;
975  * data_out.set_flags(vtk_flags);
976  * std::ofstream output(filename);
977  * data_out.write_vtu(output);
978  * }
979  *
980  *
981  *
982  * @endcode
983  *
984  *
985  * <a name="WaveEquationrun"></a>
986  * <h4>WaveEquation::run</h4>
987  *
988 
989  *
990  * The following is really the only interesting function of the program. It
991  * contains the loop over all time steps, but before we get to that we have
992  * to set up the grid, DoFHandler, and matrices. In addition, we have to
993  * somehow get started with initial values. To this end, we use the
994  * VectorTools::project function that takes an object that describes a
995  * continuous function and computes the @f$L^2@f$ projection of this function
996  * onto the finite element space described by the DoFHandler object. Can't
997  * be any simpler than that:
998  *
999  * @code
1000  * template <int dim>
1001  * void WaveEquation<dim>::run()
1002  * {
1003  * setup_system();
1004  *
1005  * VectorTools::project(dof_handler,
1006  * constraints,
1007  * QGauss<dim>(fe.degree + 1),
1008  * InitialValuesU<dim>(),
1009  * old_solution_u);
1010  * VectorTools::project(dof_handler,
1011  * constraints,
1012  * QGauss<dim>(fe.degree + 1),
1013  * InitialValuesV<dim>(),
1014  * old_solution_v);
1015  *
1016  * @endcode
1017  *
1018  * The next thing is to loop over all the time steps until we reach the
1019  * end time (@f$T=5@f$ in this case). In each time step, we first have to
1020  * solve for @f$U^n@f$, using the equation @f$(M^n + k^2\theta^2 A^n)U^n =@f$
1021  * @f$(M^{n,n-1} - k^2\theta(1-\theta) A^{n,n-1})U^{n-1} + kM^{n,n-1}V^{n-1}
1022  * +@f$ @f$k\theta \left[k \theta F^n + k(1-\theta) F^{n-1} \right]@f$. Note
1023  * that we use the same mesh for all time steps, so that @f$M^n=M^{n,n-1}=M@f$
1024  * and @f$A^n=A^{n,n-1}=A@f$. What we therefore have to do first is to add up
1025  * @f$MU^{n-1} - k^2\theta(1-\theta) AU^{n-1} + kMV^{n-1}@f$ and the forcing
1026  * terms, and put the result into the <code>system_rhs</code> vector. (For
1027  * these additions, we need a temporary vector that we declare before the
1028  * loop to avoid repeated memory allocations in each time step.)
1029  *
1030 
1031  *
1032  * The one thing to realize here is how we communicate the time variable
1033  * to the object describing the right hand side: each object derived from
1034  * the Function class has a time field that can be set using the
1035  * Function::set_time and read by Function::get_time. In essence, using
1036  * this mechanism, all functions of space and time are therefore
1037  * considered functions of space evaluated at a particular time. This
1038  * matches well what we typically need in finite element programs, where
1039  * we almost always work on a single time step at a time, and where it
1040  * never happens that, for example, one would like to evaluate a
1041  * space-time function for all times at any given spatial location.
1042  *
1043  * @code
1044  * Vector<double> tmp(solution_u.size());
1045  * Vector<double> forcing_terms(solution_u.size());
1046  *
1047  * for (; time <= 5; time += time_step, ++timestep_number)
1048  * {
1049  * std::cout << "Time step " << timestep_number << " at t=" << time
1050  * << std::endl;
1051  *
1052  * mass_matrix.vmult(system_rhs, old_solution_u);
1053  *
1054  * mass_matrix.vmult(tmp, old_solution_v);
1055  * system_rhs.add(time_step, tmp);
1056  *
1057  * laplace_matrix.vmult(tmp, old_solution_u);
1058  * system_rhs.add(-theta * (1 - theta) * time_step * time_step, tmp);
1059  *
1060  * RightHandSide<dim> rhs_function;
1061  * rhs_function.set_time(time);
1062  * VectorTools::create_right_hand_side(dof_handler,
1063  * QGauss<dim>(fe.degree + 1),
1064  * rhs_function,
1065  * tmp);
1066  * forcing_terms = tmp;
1067  * forcing_terms *= theta * time_step;
1068  *
1069  * rhs_function.set_time(time - time_step);
1070  * VectorTools::create_right_hand_side(dof_handler,
1071  * QGauss<dim>(fe.degree + 1),
1072  * rhs_function,
1073  * tmp);
1074  *
1075  * forcing_terms.add((1 - theta) * time_step, tmp);
1076  *
1077  * system_rhs.add(theta * time_step, forcing_terms);
1078  *
1079  * @endcode
1080  *
1081  * After so constructing the right hand side vector of the first
1082  * equation, all we have to do is apply the correct boundary
1083  * values. As for the right hand side, this is a space-time function
1084  * evaluated at a particular time, which we interpolate at boundary
1085  * nodes and then use the result to apply boundary values as we
1086  * usually do. The result is then handed off to the solve_u()
1087  * function:
1088  *
1089  * @code
1090  * {
1091  * BoundaryValuesU<dim> boundary_values_u_function;
1092  * boundary_values_u_function.set_time(time);
1093  *
1094  * std::map<types::global_dof_index, double> boundary_values;
1095  * VectorTools::interpolate_boundary_values(dof_handler,
1096  * 0,
1097  * boundary_values_u_function,
1098  * boundary_values);
1099  *
1100  * @endcode
1101  *
1102  * The matrix for solve_u() is the same in every time steps, so one
1103  * could think that it is enough to do this only once at the
1104  * beginning of the simulation. However, since we need to apply
1105  * boundary values to the linear system (which eliminate some matrix
1106  * rows and columns and give contributions to the right hand side),
1107  * we have to refill the matrix in every time steps before we
1108  * actually apply boundary data. The actual content is very simple:
1109  * it is the sum of the mass matrix and a weighted Laplace matrix:
1110  *
1111  * @code
1112  * matrix_u.copy_from(mass_matrix);
1113  * matrix_u.add(theta * theta * time_step * time_step, laplace_matrix);
1114  * MatrixTools::apply_boundary_values(boundary_values,
1115  * matrix_u,
1116  * solution_u,
1117  * system_rhs);
1118  * }
1119  * solve_u();
1120  *
1121  *
1122  * @endcode
1123  *
1124  * The second step, i.e. solving for @f$V^n@f$, works similarly, except
1125  * that this time the matrix on the left is the mass matrix (which we
1126  * copy again in order to be able to apply boundary conditions, and
1127  * the right hand side is @f$MV^{n-1} - k\left[ \theta A U^n +
1128  * (1-\theta) AU^{n-1}\right]@f$ plus forcing terms. Boundary values
1129  * are applied in the same way as before, except that now we have to
1130  * use the BoundaryValuesV class:
1131  *
1132  * @code
1133  * laplace_matrix.vmult(system_rhs, solution_u);
1134  * system_rhs *= -theta * time_step;
1135  *
1136  * mass_matrix.vmult(tmp, old_solution_v);
1137  * system_rhs += tmp;
1138  *
1139  * laplace_matrix.vmult(tmp, old_solution_u);
1140  * system_rhs.add(-time_step * (1 - theta), tmp);
1141  *
1142  * system_rhs += forcing_terms;
1143  *
1144  * {
1145  * BoundaryValuesV<dim> boundary_values_v_function;
1146  * boundary_values_v_function.set_time(time);
1147  *
1148  * std::map<types::global_dof_index, double> boundary_values;
1149  * VectorTools::interpolate_boundary_values(dof_handler,
1150  * 0,
1151  * boundary_values_v_function,
1152  * boundary_values);
1153  * matrix_v.copy_from(mass_matrix);
1154  * MatrixTools::apply_boundary_values(boundary_values,
1155  * matrix_v,
1156  * solution_v,
1157  * system_rhs);
1158  * }
1159  * solve_v();
1160  *
1161  * @endcode
1162  *
1163  * Finally, after both solution components have been computed, we
1164  * output the result, compute the energy in the solution, and go on to
1165  * the next time step after shifting the present solution into the
1166  * vectors that hold the solution at the previous time step. Note the
1167  * function SparseMatrix::matrix_norm_square that can compute
1168  * @f$\left<V^n,MV^n\right>@f$ and @f$\left<U^n,AU^n\right>@f$ in one step,
1169  * saving us the expense of a temporary vector and several lines of
1170  * code:
1171  *
1172  * @code
1173  * output_results();
1174  *
1175  * std::cout << " Total energy: "
1176  * << (mass_matrix.matrix_norm_square(solution_v) +
1177  * laplace_matrix.matrix_norm_square(solution_u)) /
1178  * 2
1179  * << std::endl;
1180  *
1181  * old_solution_u = solution_u;
1182  * old_solution_v = solution_v;
1183  * }
1184  * }
1185  * } // namespace Step23
1186  *
1187  *
1188  * @endcode
1189  *
1190  *
1191  * <a name="Thecodemaincodefunction"></a>
1192  * <h3>The <code>main</code> function</h3>
1193  *
1194 
1195  *
1196  * What remains is the main function of the program. There is nothing here
1197  * that hasn't been shown in several of the previous programs:
1198  *
1199  * @code
1200  * int main()
1201  * {
1202  * try
1203  * {
1204  * using namespace Step23;
1205  *
1206  * WaveEquation<2> wave_equation_solver;
1207  * wave_equation_solver.run();
1208  * }
1209  * catch (std::exception &exc)
1210  * {
1211  * std::cerr << std::endl
1212  * << std::endl
1213  * << "----------------------------------------------------"
1214  * << std::endl;
1215  * std::cerr << "Exception on processing: " << std::endl
1216  * << exc.what() << std::endl
1217  * << "Aborting!" << std::endl
1218  * << "----------------------------------------------------"
1219  * << std::endl;
1220  *
1221  * return 1;
1222  * }
1223  * catch (...)
1224  * {
1225  * std::cerr << std::endl
1226  * << std::endl
1227  * << "----------------------------------------------------"
1228  * << std::endl;
1229  * std::cerr << "Unknown exception!" << std::endl
1230  * << "Aborting!" << std::endl
1231  * << "----------------------------------------------------"
1232  * << std::endl;
1233  * return 1;
1234  * }
1235  *
1236  * return 0;
1237  * }
1238  * @endcode
1239 <a name="Results"></a><h1>Results</h1>
1240 
1241 
1242 When the program is run, it produces the following output:
1243 @code
1244 Number of active cells: 16384
1245 Number of degrees of freedom: 16641
1246 
1247 Time step 1 at t=0.015625
1248  u-equation: 8 CG iterations.
1249  v-equation: 22 CG iterations.
1250  Total energy: 1.17887
1251 Time step 2 at t=0.03125
1252  u-equation: 8 CG iterations.
1253  v-equation: 20 CG iterations.
1254  Total energy: 2.9655
1255 Time step 3 at t=0.046875
1256  u-equation: 8 CG iterations.
1257  v-equation: 21 CG iterations.
1258  Total energy: 4.33761
1259 Time step 4 at t=0.0625
1260  u-equation: 7 CG iterations.
1261  v-equation: 21 CG iterations.
1262  Total energy: 5.35499
1263 Time step 5 at t=0.078125
1264  u-equation: 7 CG iterations.
1265  v-equation: 21 CG iterations.
1266  Total energy: 6.18652
1267 Time step 6 at t=0.09375
1268  u-equation: 7 CG iterations.
1269  v-equation: 20 CG iterations.
1270  Total energy: 6.6799
1271 
1272 ...
1273 
1274 Time step 31 at t=0.484375
1275  u-equation: 7 CG iterations.
1276  v-equation: 20 CG iterations.
1277  Total energy: 21.9068
1278 Time step 32 at t=0.5
1279  u-equation: 7 CG iterations.
1280  v-equation: 20 CG iterations.
1281  Total energy: 23.3394
1282 Time step 33 at t=0.515625
1283  u-equation: 7 CG iterations.
1284  v-equation: 20 CG iterations.
1285  Total energy: 23.1019
1286 
1287 ...
1288 
1289 Time step 319 at t=4.98438
1290  u-equation: 7 CG iterations.
1291  v-equation: 20 CG iterations.
1292  Total energy: 23.1019
1293 Time step 320 at t=5
1294  u-equation: 7 CG iterations.
1295  v-equation: 20 CG iterations.
1296  Total energy: 23.1019
1297 @endcode
1298 
1299 What we see immediately is that the energy is a constant at least after
1300 @f$t=\frac 12@f$ (until which the boundary source term @f$g@f$ is nonzero, injecting
1301 energy into the system).
1302 
1303 In addition to the screen output, the program writes the solution of each time
1304 step to an output file. If we process them adequately and paste them into a
1305 movie, we get the following:
1306 
1307 <img src="https://www.dealii.org/images/steps/developer/step-23.movie.gif" alt="Animation of the solution of step 23.">
1308 
1309 The movie shows the generated wave nice traveling through the domain and back,
1310 being reflected at the clamped boundary. Some numerical noise is trailing the
1311 wave, an artifact of a too-large mesh size that can be reduced by reducing the
1312 mesh width and the time step.
1313 
1314 
1315 <a name="extensions"></a>
1316 <a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1317 
1318 
1319 If you want to explore a bit, try out some of the following things:
1320 <ul>
1321  <li>Varying @f$\theta@f$. This gives different time stepping schemes, some of
1322  which are stable while others are not. Take a look at how the energy
1323  evolves.
1324 
1325  <li>Different initial and boundary conditions, right hand sides.
1326 
1327  <li>More complicated domains or more refined meshes. Remember that the time
1328  step needs to be bounded by the mesh width, so changing the mesh should
1329  always involve also changing the time step. We will come back to this issue
1330  in @ref step_24 "step-24".
1331 
1332  <li>Variable coefficients: In real media, the wave speed is often
1333  variable. In particular, the "real" wave equation in realistic media would
1334  read
1335  @f[
1336  \rho(x) \frac{\partial^2 u}{\partial t^2}
1337  -
1338  \nabla \cdot
1339  a(x) \nabla u = f,
1340  @f]
1341  where @f$\rho(x)@f$ is the density of the material, and @f$a(x)@f$ is related to the
1342  stiffness coefficient. The wave speed is then @f$c=\sqrt{a/\rho}@f$.
1343 
1344  To make such a change, we would have to compute the mass and Laplace
1345  matrices with a variable coefficient. Fortunately, this isn't too hard: the
1346  functions MatrixCreator::create_laplace_matrix and
1347  MatrixCreator::create_mass_matrix have additional default parameters that can
1348  be used to pass non-constant coefficient functions to them. The required
1349  changes are therefore relatively small. On the other hand, care must be
1350  taken again to make sure the time step is within the allowed range.
1351 
1352  <li>In the in-code comments, we discussed the fact that the matrices for
1353  solving for @f$U^n@f$ and @f$V^n@f$ need to be reset in every time because of
1354  boundary conditions, even though the actual content does not change. It is
1355  possible to avoid copying by not eliminating columns in the linear systems,
1356  which is implemented by appending a @p false argument to the call:
1357  @code
1358  MatrixTools::apply_boundary_values(boundary_values,
1359  matrix_u,
1360  solution_u,
1361  system_rhs,
1362  false);
1363  @endcode
1364 
1365  <li>deal.II being a library that supports adaptive meshes it would of course be
1366  nice if this program supported change the mesh every few time steps. Given the
1367  structure of the solution &mdash; a wave that travels through the domain &mdash;
1368  it would seem appropriate if we only refined the mesh where the wave currently is,
1369  and not simply everywhere. It is intuitively clear that we should be able to
1370  save a significant amount of cells this way. (Though upon further thought one
1371  realizes that this is really only the case in the initial stages of the simulation.
1372  After some time, for wave phenomena, the domain is filled with reflections of
1373  the initial wave going in every direction and filling every corner of the domain.
1374  At this point, there is in general little one can gain using local mesh
1375  refinement.)
1376 
1377  To make adaptively changing meshes possible, there are basically two routes.
1378  The "correct" way would be to go back to the weak form we get using Rothe's
1379  method. For example, the first of the two equations to be solved in each time
1380  step looked like this:
1381  \f{eqnarray*}
1382  (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
1383  (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
1384  +
1385  k(v^{n-1},\varphi)
1386  + k^2\theta
1387  \left[
1388  \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
1389  \right].
1390  \f}
1391  Now, note that we solve for @f$u^n@f$ on mesh @f${\mathbb T}^n@f$, and
1392  consequently the test functions @f$\varphi@f$ have to be from the space
1393  @f$V_h^n@f$ as well. As discussed in the introduction, terms like
1394  @f$(u^{n-1},\varphi)@f$ then require us to integrate the solution of the
1395  previous step (which may have been computed on a different mesh
1396  @f${\mathbb T}^{n-1}@f$) against the test functions of the current mesh,
1397  leading to a matrix @f$M^{n,n-1}@f$. This process of integrating shape
1398  functions from different meshes is, at best, awkward. It can be done
1399  but because it is difficult to ensure that @f${\mathbb T}^{n-1}@f$ and
1400  @f${\mathbb T}^{n}@f$ differ by at most one level of refinement, one
1401  has to recursively match cells from both meshes. It is feasible to
1402  do this, but it leads to lengthy and not entirely obvious code.
1403 
1404  The second approach is the following: whenever we change the mesh,
1405  we simply interpolate the solution from the last time step on the old
1406  mesh to the new mesh, using the SolutionTransfer class. In other words,
1407  instead of the equation above, we would solve
1408  \f{eqnarray*}
1409  (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
1410  (I^n u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla I^n u^{n-1},\nabla \varphi)
1411  +
1412  k(I^n v^{n-1},\varphi)
1413  + k^2\theta
1414  \left[
1415  \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
1416  \right],
1417  \f}
1418  where @f$I^n@f$ interpolates a given function onto mesh @f${\mathbb T}^n@f$.
1419  This is a much simpler approach because, in each time step, we no
1420  longer have to worry whether @f$u^{n-1},v^{n-1}@f$ were computed on the
1421  same mesh as we are using now or on a different mesh. Consequently,
1422  the only changes to the code necessary are the addition of a function
1423  that computes the error, marks cells for refinement, sets up a
1424  SolutionTransfer object, transfers the solution to the new mesh, and
1425  rebuilds matrices and right hand side vectors on the new mesh. Neither
1426  the functions building the matrices and right hand sides, nor the
1427  solvers need to be changed.
1428 
1429  While this second approach is, strictly speaking,
1430  not quite correct in the Rothe framework (it introduces an addition source
1431  of error, namely the interpolation), it is nevertheless what
1432  almost everyone solving time dependent equations does. We will use this
1433  method in @ref step_31 "step-31", for example.
1434 </ul>
1435  *
1436  *
1437 <a name="PlainProg"></a>
1438 <h1> The plain program</h1>
1439 @include "step-23.cc"
1440 */
FunctionTime< numbers::NumberTraits< double >::real_type >::get_time
numbers::NumberTraits< double >::real_type get_time() const
DoFTools::nonzero
@ nonzero
Definition: dof_tools.h:244
LocalIntegrators::L2::mass_matrix
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
Definition: l2.h:63
DataOutInterface::write_vtu
void write_vtu(std::ostream &out) const
Definition: data_out_base.cc:6864
SolverCG
Definition: solver_cg.h:98
Utilities::int_to_string
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition: utilities.cc:474
MatrixCreator::create_laplace_matrix
void create_laplace_matrix(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim > &q, SparseMatrix< double > &matrix, const Function< spacedim > *const a=nullptr, const AffineConstraints< double > &constraints=AffineConstraints< double >())
WorkStream
Definition: work_stream.h:157
SymmetricTensor::invert
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:3467
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorTools::project
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >(0)), const bool project_to_boundary_first=false)
DataOutBase::VtkFlags::compression_level
ZlibCompressionLevel compression_level
Definition: data_out_base.h:1159
DoFTools::always
@ always
Definition: dof_tools.h:236
second
Point< 2 > second
Definition: grid_out.cc:4353
DataOut::build_patches
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1071
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
DoFHandler
Definition: dof_handler.h:205
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
level
unsigned int level
Definition: grid_out.cc:4355
PreconditionIdentity
Definition: precondition.h:80
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
LAPACKSupport::T
static const char T
Definition: lapack_support.h:163
DoFTools::make_sparsity_pattern
void make_sparsity_pattern(const DoFHandlerType &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
Definition: dof_tools_sparsity.cc:63
Algorithms::Events::initial
const Event initial
Definition: event.cc:65
StandardExceptions::ExcIndexRange
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
internal::reinit
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:621
SolutionTransfer
Definition: solution_transfer.h:340
MatrixCreator::create_mass_matrix
void create_mass_matrix(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim > &q, SparseMatrix< number > &matrix, const Function< spacedim, number > *const a=nullptr, const AffineConstraints< number > &constraints=AffineConstraints< number >())
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
std_cxx17::apply
auto apply(F &&fn, Tuple &&t) -> decltype(apply_impl(std::forward< F >(fn), std::forward< Tuple >(t), std_cxx14::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >()))
Definition: tuple.h:40
DynamicSparsityPattern
Definition: dynamic_sparsity_pattern.h:323
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
Function::value
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
DoFTools::make_hanging_node_constraints
void make_hanging_node_constraints(const DoFHandlerType &dof_handler, AffineConstraints< number > &constraints)
Definition: dof_tools_constraints.cc:1725
DataOutBase::VtkFlags
Definition: data_out_base.h:1095
QGauss
Definition: quadrature_lib.h:40
LAPACKSupport::A
static const char A
Definition: lapack_support.h:155
MatrixCreator
Definition: matrix_tools.h:233
DataOut_DoFData::attach_dof_handler
void attach_dof_handler(const DoFHandlerType &)
value
static const bool value
Definition: dof_tools_constraints.cc:433
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
VectorizedArray::sqrt
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5412
DataOutInterface::set_flags
void set_flags(const FlagType &flags)
Definition: data_out_base.cc:7837
GridGenerator::hyper_cube
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
LAPACKSupport::zero
static const types::blas_int zero
Definition: lapack_support.h:179
Point< dim >
FETools::interpolate
void interpolate(const DoFHandlerType1< dim, spacedim > &dof1, const InVector &u1, const DoFHandlerType2< dim, spacedim > &dof2, OutVector &u2)
Function
Definition: function.h:151
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
DoFTools
Definition: dof_tools.h:214
SolverControl
Definition: solver_control.h:67
MeshWorker::loop
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:443
first
Point< 2 > first
Definition: grid_out.cc:4352
DataOut< dim >
numbers::PI
static constexpr double PI
Definition: numbers.h:237
std::sin
inline ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5297
DataOut_DoFData::add_data_vector
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
Definition: data_out_dof_data.h:1090