127 * #ifndef dealii__cdr_parameters_h
128 * #define dealii__cdr_parameters_h
130 * #include <deal.II/base/parameter_handler.h>
136 * I prefer to use the
ParameterHandler class in a slightly different way than
137 * usual: The
class Parameters creates, uses, and then destroys a
139 * of keeping it around. This is nice because now all of the
run time
140 * parameters are contained in a simple
class and it can be copied or passed
141 * around very easily.
151 *
double inner_radius;
152 *
double outer_radius;
154 *
double diffusion_coefficient;
155 *
double reaction_coefficient;
156 *
bool time_dependent_forcing;
158 *
unsigned int initial_refinement_level;
159 *
unsigned int max_refinement_level;
160 *
unsigned int fe_order;
164 *
unsigned int n_time_steps;
166 *
unsigned int save_interval;
167 *
unsigned int patch_level;
170 * read_parameter_file(
const std::string &file_name);
181<a name=
"ann-common/include/deal.II-cdr/system_matrix.h"></a>
182<h1>Annotated version of common/include/deal.II-cdr/system_matrix.h</h1>
198 * #ifndef dealii__cdr_system_matrix_h
199 * #define dealii__cdr_system_matrix_h
200 * #include <deal.II/base/quadrature_lib.h>
202 * #include <deal.II/dofs/dof_handler.h>
204 * #include <deal.II/lac/affine_constraints.h>
206 * #include <deal.II-cdr/parameters.h>
208 * #include <functional>
212 * One of the goals I had in writing
this entry was to
split up
functions into
213 * different compilation units instead of
using one large file. This is the
214 * header file
for a pair of
functions (only one of which I ultimately use)
215 * which build the system
matrix.
222 *
template <
int dim,
typename MatrixType>
224 * create_system_matrix(
228 *
const CDR::Parameters & parameters,
229 *
const double time_step,
230 * MatrixType & system_matrix);
232 *
template <
int dim,
typename MatrixType>
234 * create_system_matrix(
238 *
const CDR::Parameters & parameters,
239 *
const double time_step,
241 * MatrixType & system_matrix);
247<a name=
"ann-common/include/deal.II-cdr/system_matrix.templates.h"></a>
248<h1>Annotated version of common/include/deal.II-cdr/system_matrix.templates.h</h1>
264 * #ifndef dealii__cdr_system_matrix_templates_h
265 * #define dealii__cdr_system_matrix_templates_h
266 * #include <deal.II/base/quadrature_lib.h>
268 * #include <deal.II/dofs/dof_handler.h>
270 * #include <deal.II/fe/fe_q.h>
271 * #include <deal.II/fe/fe_values.h>
273 * #include <deal.II/lac/affine_constraints.h>
275 * #include <deal.II-cdr/parameters.h>
276 * #include <deal.II-cdr/system_matrix.h>
278 * #include <functional>
287 * This is the actual implementation of the <code>create_system_matrix</code>
288 * function described in the header file. It is similar to the system
matrix
289 * assembly routine in @ref step_40
"step-40".
292 *
template <
int dim,
typename UpdateFunction>
294 * internal_create_system_matrix(
298 *
const CDR::Parameters & parameters,
299 *
const double time_step,
300 * UpdateFunction update_system_matrix)
302 *
auto & fe = dof_handler.get_fe();
303 *
const auto dofs_per_cell = fe.dofs_per_cell;
310 * std::vector<types::global_dof_index> local_indices(dofs_per_cell);
312 *
for (
const auto &cell : dof_handler.active_cell_iterators())
314 * if (cell->is_locally_owned())
318 * cell->get_dof_indices(local_indices);
319 *
for (
unsigned int q = 0; q < quad.size(); ++q)
321 *
const auto current_convection =
322 * convection_function(fe_values.quadrature_point(q));
324 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
326 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
328 *
const auto convection_contribution =
329 * current_convection * fe_values.shape_grad(j, q);
334 * Here are the time step, mass, and reaction parts:
338 * time_step / 2.0 * parameters.reaction_coefficient) *
339 * fe_values.shape_value(i, q) *
340 * fe_values.shape_value(j, q) +
344 * and the convection part:
347 * (fe_values.shape_value(i, q) *
348 * convection_contribution
351 * and, finally, the diffusion part:
354 * + parameters.diffusion_coefficient *
355 * (fe_values.shape_grad(i, q) *
356 * fe_values.shape_grad(j, q))));
360 * update_system_matrix(local_indices, cell_matrix);
365 *
template <
int dim,
typename MatrixType>
367 * create_system_matrix(
371 *
const CDR::Parameters & parameters,
372 *
const double time_step,
374 * MatrixType & system_matrix)
376 * internal_create_system_matrix<dim>(
379 * convection_function,
382 * [&constraints, &system_matrix](
383 *
const std::vector<types::global_dof_index> &local_indices,
385 * constraints.distribute_local_to_global(cell_matrix,
391 *
template <
int dim,
typename MatrixType>
393 * create_system_matrix(
397 *
const CDR::Parameters & parameters,
398 *
const double time_step,
399 * MatrixType & system_matrix)
401 * internal_create_system_matrix<dim>(
404 * convection_function,
408 *
const std::vector<types::global_dof_index> &local_indices,
410 * system_matrix.add(local_indices, cell_matrix);
418<a name=
"ann-common/include/deal.II-cdr/system_rhs.h"></a>
419<h1>Annotated version of common/include/deal.II-cdr/system_rhs.h</h1>
435 * #ifndef dealii__cdr_system_rhs_h
436 * #define dealii__cdr_system_rhs_h
437 * #include <deal.II/base/
point.h>
438 * #include <deal.II/base/quadrature_lib.h>
439 * #include <deal.II/base/tensor.h>
441 * #include <deal.II/dofs/dof_handler.h>
443 * #include <deal.II/lac/affine_constraints.h>
445 * #include <deal.II-cdr/parameters.h>
447 * #include <functional>
451 * Similarly to <code>create_system_matrix</code>, I wrote a separate function
452 * to compute the right hand side.
459 *
template <
int dim,
typename VectorType>
465 *
const std::function<
double(
double,
const Point<dim>)> &forcing_function,
466 *
const CDR::Parameters & parameters,
467 *
const VectorType & previous_solution,
469 *
const double current_time,
470 * VectorType & system_rhs);
476<a name=
"ann-common/include/deal.II-cdr/system_rhs.templates.h"></a>
477<h1>Annotated version of common/include/deal.II-cdr/system_rhs.templates.h</h1>
493 * #ifndef dealii__cdr_system_rhs_templates_h
494 * #define dealii__cdr_system_rhs_templates_h
495 * #include <deal.II/base/
point.h>
496 * #include <deal.II/base/quadrature_lib.h>
497 * #include <deal.II/base/tensor.h>
499 * #include <deal.II/dofs/dof_handler.h>
501 * #include <deal.II/fe/fe_q.h>
502 * #include <deal.II/fe/fe_values.h>
504 * #include <deal.II/lac/affine_constraints.h>
505 * #include <deal.II/lac/vector.h>
507 * #include <deal.II-cdr/parameters.h>
508 * #include <deal.II-cdr/system_rhs.h>
510 * #include <functional>
517 *
template <
int dim,
typename VectorType>
523 *
const std::function<
double(
double,
const Point<dim>)> &forcing_function,
524 *
const CDR::Parameters & parameters,
525 *
const VectorType & previous_solution,
527 *
const double current_time,
528 * VectorType & system_rhs)
530 *
auto & fe = dof_handler.get_fe();
531 *
const auto dofs_per_cell = fe.dofs_per_cell;
532 *
const double time_step =
533 * (parameters.stop_time - parameters.start_time) / parameters.n_time_steps;
543 * std::vector<types::global_dof_index> local_indices(dofs_per_cell);
545 *
const double previous_time{current_time - time_step};
547 *
for (
const auto &cell : dof_handler.active_cell_iterators())
549 * if (cell->is_locally_owned())
553 * cell->get_dof_indices(local_indices);
554 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
556 * current_fe_coefficients[i] =
557 * previous_solution[local_indices[i]];
560 *
for (
unsigned int q = 0; q < quad.size(); ++q)
562 *
const auto current_convection =
563 * convection_function(fe_values.quadrature_point(q));
565 *
const double current_forcing =
566 * forcing_function(current_time, fe_values.quadrature_point(q));
567 *
const double previous_forcing =
568 * forcing_function(previous_time,
569 * fe_values.quadrature_point(q));
570 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
572 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
574 *
const auto convection_contribution =
575 * current_convection * fe_values.shape_grad(j, q);
581 * Here are the mass and reaction part:
584 * (((1.0 - time_step / 2.0 *
585 * parameters.reaction_coefficient) *
586 * fe_values.shape_value(i, q) *
587 * fe_values.shape_value(j, q) -
591 * the convection part:
594 * (fe_values.shape_value(i, q) *
595 * convection_contribution
598 * the diffusion part:
601 * + parameters.diffusion_coefficient *
602 * (fe_values.shape_grad(i, q) *
603 * fe_values.shape_grad(j, q)))) *
604 * current_fe_coefficients[j]
607 * and, finally, the forcing function part:
610 * + time_step / 2.0 *
611 * (current_forcing + previous_forcing) *
612 * fe_values.shape_value(i, q));
616 * constraints.distribute_local_to_global(cell_rhs,
627<a name=
"ann-common/include/deal.II-cdr/write_pvtu_output.h"></a>
628<h1>Annotated version of common/include/deal.II-cdr/write_pvtu_output.h</h1>
644 * #ifndef dealii__cdr_write_pvtu_output_h
645 * #define dealii__cdr_write_pvtu_output_h
646 * #include <deal.II/dofs/dof_handler.h>
650 * This is a small
class which handles PVTU output.
657 *
class WritePVTUOutput
660 * WritePVTUOutput(
const unsigned int patch_level);
662 *
template <
int dim,
typename VectorType>
665 *
const VectorType & solution,
666 *
const unsigned int time_step_n,
667 *
const double current_time);
670 *
const unsigned int patch_level;
678<a name=
"ann-common/include/deal.II-cdr/write_pvtu_output.templates.h"></a>
679<h1>Annotated version of common/include/deal.II-cdr/write_pvtu_output.templates.h</h1>
695 * #ifndef dealii__cdr_write_pvtu_output_templates_h
696 * #define dealii__cdr_write_pvtu_output_templates_h
697 * #include <deal.II/base/data_out_base.h>
698 * #include <deal.II/base/utilities.h>
700 * #include <deal.II/dofs/dof_handler.h>
702 * #include <deal.II/lac/vector.h>
704 * #include <deal.II/numerics/data_out.h>
706 * #include <deal.II-cdr/write_pvtu_output.h>
714 * Here is the implementation of the important function. This is similar to
715 * what is presented in @ref step_40
"step-40".
722 *
template <
int dim,
typename VectorType>
725 *
const VectorType & solution,
726 *
const unsigned int time_step_n,
727 *
const double current_time)
731 * data_out.add_data_vector(solution,
"u");
733 *
const auto &
triangulation = dof_handler.get_triangulation();
735 *
for (
auto &domain : subdomain)
739 * data_out.add_data_vector(subdomain,
"subdomain");
740 * data_out.build_patches(patch_level);
743 * flags.
time = current_time;
746 * While the
default flag is
for the best compression
level,
using
747 * <code>
best_speed</code> makes
this function much faster.
751 * data_out.set_flags(flags);
753 *
unsigned int subdomain_n;
767 * data_out.write_vtu(output);
769 *
if (this_mpi_process == 0)
771 * std::vector<std::string> filenames;
772 *
for (
unsigned int i = 0;
775 * filenames.push_back(
"solution-" +
778 * std::ofstream master_output(
780 * data_out.write_pvtu_record(master_output, filenames);
788<a name=
"ann-common/source/parameters.cc"></a>
789<h1>Annotated version of common/source/parameters.cc</h1>
793 * -----------------------------------------------------------------------------
797 * SPDX-License-Identifier: LGPL-2.1-or-later
798 * Copyright (C) 2015 by David Wells
802 * This file is part of the deal.II code gallery.
806 * -----------------------------------------------------------------------------
812 * #include <deal.II-cdr/parameters.h>
820 * Parameters::configure_parameter_handler(
ParameterHandler ¶meter_handler)
822 * parameter_handler.enter_subsection(
"Geometry");
824 * parameter_handler.declare_entry(
"inner_radius",
828 * parameter_handler.declare_entry(
"outer_radius",
833 * parameter_handler.leave_subsection();
835 * parameter_handler.enter_subsection(
"Physical Parameters");
837 * parameter_handler.declare_entry(
"diffusion_coefficient",
840 *
"Diffusion coefficient.");
841 * parameter_handler.declare_entry(
"reaction_coefficient",
844 *
"Reaction coefficient.");
845 * parameter_handler.declare_entry(
"time_dependent_forcing",
849 *
"the forcing function depends on time.");
851 * parameter_handler.leave_subsection();
853 * parameter_handler.enter_subsection(
"Finite Element");
855 * parameter_handler.declare_entry(
"initial_refinement_level",
858 *
"Initial number of levels in the mesh.");
859 * parameter_handler.declare_entry(
"max_refinement_level",
862 *
"Maximum number of levels in the mesh.");
863 * parameter_handler.declare_entry(
"fe_order",
866 *
"Finite element order.");
868 * parameter_handler.leave_subsection();
870 * parameter_handler.enter_subsection(
"Time Step");
872 * parameter_handler.declare_entry(
"start_time",
876 * parameter_handler.declare_entry(
"stop_time",
880 * parameter_handler.declare_entry(
"n_time_steps",
883 *
"Number of time steps.");
885 * parameter_handler.leave_subsection();
887 * parameter_handler.enter_subsection(
"Output");
889 * parameter_handler.declare_entry(
"save_interval",
893 * parameter_handler.declare_entry(
"patch_level",
898 * parameter_handler.leave_subsection();
902 * Parameters::read_parameter_file(
const std::string &file_name)
906 * std::ifstream file(file_name);
907 * configure_parameter_handler(parameter_handler);
908 * parameter_handler.parse_input(file);
911 * parameter_handler.enter_subsection(
"Geometry");
913 * inner_radius = parameter_handler.get_double(
"inner_radius");
914 * outer_radius = parameter_handler.get_double(
"outer_radius");
916 * parameter_handler.leave_subsection();
918 * parameter_handler.enter_subsection(
"Physical Parameters");
920 * diffusion_coefficient =
921 * parameter_handler.get_double(
"diffusion_coefficient");
922 * reaction_coefficient =
923 * parameter_handler.get_double(
"reaction_coefficient");
924 * time_dependent_forcing =
925 * parameter_handler.get_bool(
"time_dependent_forcing");
927 * parameter_handler.leave_subsection();
930 * parameter_handler.enter_subsection(
"Finite Element");
932 * initial_refinement_level =
933 * parameter_handler.get_integer(
"initial_refinement_level");
934 * max_refinement_level =
935 * parameter_handler.get_integer(
"max_refinement_level");
936 * fe_order = parameter_handler.get_integer(
"fe_order");
938 * parameter_handler.leave_subsection();
940 * parameter_handler.enter_subsection(
"Time Step");
942 * start_time = parameter_handler.get_double(
"start_time");
943 * stop_time = parameter_handler.get_double(
"stop_time");
944 * n_time_steps = parameter_handler.get_integer(
"n_time_steps");
946 * parameter_handler.leave_subsection();
948 * parameter_handler.enter_subsection(
"Output");
950 * save_interval = parameter_handler.get_integer(
"save_interval");
951 * patch_level = parameter_handler.get_integer(
"patch_level");
953 * parameter_handler.leave_subsection();
959<a name=
"ann-common/source/system_matrix.cc"></a>
960<h1>Annotated version of common/source/system_matrix.cc</h1>
976 * #include <deal.II/lac/sparse_matrix.h>
977 * #include <deal.II/lac/trilinos_sparse_matrix.h>
979 * #include <deal.II-cdr/parameters.h>
980 * #include <deal.II-cdr/system_matrix.h>
981 * #include <deal.II-cdr/system_matrix.templates.h>
985 * This file exists just to build
template specializations of
986 * <code>create_system_matrix</code>. Even though the solver is
run in
987 *
parallel with Trilinos objects, other
serial solvers can use the same
988 * function without recompilation by compiling everything here just one time.
996 * create_system_matrix<2, SparseMatrix<double>>(
999 *
const std::function<Tensor<1, 2>(
const Point<2>)> &convection_function,
1000 *
const CDR::Parameters & parameters,
1001 *
const double time_step,
1005 * create_system_matrix<3, SparseMatrix<double>>(
1008 *
const std::function<Tensor<1, 3>(
const Point<3>)> &convection_function,
1009 *
const CDR::Parameters & parameters,
1010 *
const double time_step,
1014 * create_system_matrix<2, SparseMatrix<double>>(
1017 *
const std::function<Tensor<1, 2>(
const Point<2>)> &convection_function,
1018 *
const CDR::Parameters & parameters,
1019 *
const double time_step,
1024 * create_system_matrix<3, SparseMatrix<double>>(
1027 *
const std::function<Tensor<1, 3>(
const Point<3>)> &convection_function,
1028 *
const CDR::Parameters & parameters,
1029 *
const double time_step,
1034 * create_system_matrix<2, TrilinosWrappers::SparseMatrix>(
1038 *
const CDR::Parameters & parameters,
1039 *
const double time_step,
1043 * create_system_matrix<3, TrilinosWrappers::SparseMatrix>(
1047 *
const CDR::Parameters & parameters,
1048 *
const double time_step,
1052 * create_system_matrix<2, TrilinosWrappers::SparseMatrix>(
1056 *
const CDR::Parameters & parameters,
1057 *
const double time_step,
1062 * create_system_matrix<3, TrilinosWrappers::SparseMatrix>(
1066 *
const CDR::Parameters & parameters,
1067 *
const double time_step,
1074<a name=
"ann-common/source/system_rhs.cc"></a>
1075<h1>Annotated version of common/source/system_rhs.cc</h1>
1091 * #include <deal.II/lac/sparse_matrix.h>
1092 * #include <deal.II/lac/trilinos_sparse_matrix.h>
1093 * #include <deal.II/lac/trilinos_vector.h>
1094 * #include <deal.II/lac/vector.h>
1096 * #include <deal.II-cdr/system_rhs.templates.h>
1100 * Like <code>system_matrix.cc</code>,
this file just compiles
template
1106 *
using namespace dealii;
1109 * create_system_rhs<2, Vector<double>>(
1112 *
const std::function<Tensor<1, 2>(
const Point<2>)> & convection_function,
1113 *
const std::function<double(
double,
const Point<2>)> &forcing_function,
1114 *
const CDR::Parameters & parameters,
1117 *
const double current_time,
1121 * create_system_rhs<3, Vector<double>>(
1124 *
const std::function<Tensor<1, 3>(
const Point<3>)> & convection_function,
1125 *
const std::function<double(
double,
const Point<3>)> &forcing_function,
1126 *
const CDR::Parameters & parameters,
1129 *
const double current_time,
1133 * create_system_rhs<2, TrilinosWrappers::MPI::Vector>(
1137 *
const std::function<
double(
double,
const Point<2>)> &forcing_function,
1138 *
const CDR::Parameters & parameters,
1141 *
const double current_time,
1145 * create_system_rhs<3, TrilinosWrappers::MPI::Vector>(
1149 *
const std::function<
double(
double,
const Point<3>)> &forcing_function,
1150 *
const CDR::Parameters & parameters,
1153 *
const double current_time,
1159<a name=
"ann-common/source/write_pvtu_output.cc"></a>
1160<h1>Annotated version of common/source/write_pvtu_output.cc</h1>
1176 * #include <deal.II/lac/trilinos_vector.h>
1177 * #include <deal.II/lac/vector.h>
1179 * #include <deal.II-cdr/write_pvtu_output.templates.h>
1183 * Again,
this file just compiles the constructor and also the templated
1189 *
using namespace dealii;
1191 * WritePVTUOutput::WritePVTUOutput(
const unsigned int patch_level)
1192 * : patch_level{patch_level}
1197 * WritePVTUOutput::write_output(
const DoFHandler<2> & dof_handler,
1199 *
const unsigned int time_step_n,
1200 *
const double current_time);
1203 * WritePVTUOutput::write_output(
const DoFHandler<3> & dof_handler,
1205 *
const unsigned int time_step_n,
1206 *
const double current_time);
1209 * WritePVTUOutput::write_output(
const DoFHandler<2> &dof_handler,
1211 *
const unsigned int time_step_n,
1212 *
const double current_time);
1215 * WritePVTUOutput::write_output(
const DoFHandler<3> &dof_handler,
1217 *
const unsigned int time_step_n,
1218 *
const double current_time);
1223<a name=
"ann-solver/cdr.cc"></a>
1224<h1>Annotated version of solver/cdr.cc</h1>
1240 * #include <deal.II/base/conditional_ostream.h>
1241 * #include <deal.II/base/quadrature_lib.h>
1243 * #include <deal.II/dofs/dof_handler.h>
1244 * #include <deal.II/dofs/dof_tools.h>
1246 * #include <deal.II/fe/fe_q.h>
1248 * #include <deal.II/grid/grid_generator.h>
1249 * #include <deal.II/grid/manifold_lib.h>
1251 * #include <deal.II/lac/affine_constraints.h>
1252 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
1254 * #include <deal.II/numerics/error_estimator.h>
1258 * These headers are
for distributed computations:
1261 * #include <deal.II/base/index_set.h>
1262 * #include <deal.II/base/utilities.h>
1264 * #include <deal.II/distributed/grid_refinement.h>
1265 * #include <deal.II/distributed/solution_transfer.h>
1266 * #include <deal.II/distributed/tria.h>
1268 * #include <deal.II/lac/sparsity_tools.h>
1269 * #include <deal.II/lac/trilinos_precondition.h>
1270 * #include <deal.II/lac/trilinos_solver.h>
1271 * #include <deal.II/lac/trilinos_sparse_matrix.h>
1272 * #include <deal.II/lac/trilinos_vector.h>
1274 * #include <deal.II-cdr/parameters.h>
1275 * #include <deal.II-cdr/system_matrix.h>
1276 * #include <deal.II-cdr/system_rhs.h>
1277 * #include <deal.II-cdr/write_pvtu_output.h>
1280 * #include <functional>
1281 * #include <iostream>
1283 *
using namespace dealii;
1289 * This is the actual solver
class which performs time iteration and calls the
1290 * appropriate library
functions to
do it.
1293 *
template <
int dim>
1297 * CDRProblem(
const CDR::Parameters ¶meters);
1302 *
const CDR::Parameters parameters;
1303 *
const double time_step;
1304 *
double current_time;
1316 *
const std::function<Tensor<1, dim>(
const Point<dim>)> convection_function;
1317 *
const std::function<double(
double,
const Point<dim>)> forcing_function;
1327 * As is usual in
parallel programs, I keep two copies of parts of the
1328 * complete solution: <code>locally_relevant_solution</code> contains both
1329 * the locally calculated solution as well as the layer of cells at its
1330 * boundary (the @ref GlossGhostCell
"ghost cells")
while
1331 * <code>completely_distributed_solution</code> only contains the parts of
1332 * the solution computed on the current @ref GlossMPIProcess
"MPI process".
1356 *
template <
int dim>
1357 * CDRProblem<dim>::CDRProblem(
const CDR::Parameters ¶meters)
1358 * : parameters(parameters)
1359 * , time_step{(parameters.stop_time - parameters.start_time) /
1360 * parameters.n_time_steps}
1361 * , current_time{parameters.start_time}
1362 * , mpi_communicator(MPI_COMM_WORLD)
1365 * , fe(parameters.fe_order)
1366 * , quad(parameters.fe_order + 2)
1379 * , forcing_function{[](
double t,
const Point<dim> p) ->
double {
1381 *
std::exp(-40 * Utilities::fixed_power<6>(p[0] - 1.5)) *
1382 *
std::exp(-40 * Utilities::fixed_power<6>(p[1]));
1385 * , pcout(std::cout, this_mpi_process == 0)
1387 *
Assert(dim == 2, ExcNotImplemented());
1391 *
template <
int dim>
1393 * CDRProblem<dim>::setup_geometry()
1398 * parameters.inner_radius,
1399 * parameters.outer_radius);
1400 *
triangulation.set_manifold(manifold_id, boundary_description);
1401 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1405 *
triangulation.refine_global(parameters.initial_refinement_level);
1409 *
template <
int dim>
1411 * CDRProblem<dim>::setup_dofs()
1413 * dof_handler.distribute_dofs(fe);
1414 * pcout <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
1416 * locally_owned_dofs = dof_handler.locally_owned_dofs();
1419 * constraints.clear();
1420 * constraints.reinit(locally_relevant_dofs);
1425 * constraints.close();
1427 * completely_distributed_solution.reinit(locally_owned_dofs, mpi_communicator);
1429 * locally_relevant_solution.reinit(locally_owned_dofs,
1430 * locally_relevant_dofs,
1431 * mpi_communicator);
1435 *
template <
int dim>
1437 * CDRProblem<dim>::setup_system()
1441 * dynamic_sparsity_pattern,
1445 * dof_handler.locally_owned_dofs(),
1447 * locally_relevant_dofs);
1449 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1450 * system_matrix.reinit(locally_owned_dofs,
1451 * dynamic_sparsity_pattern,
1452 * mpi_communicator);
1454 * CDR::create_system_matrix<dim>(dof_handler,
1456 * convection_function,
1462 * preconditioner.initialize(system_matrix);
1466 *
template <
int dim>
1468 * CDRProblem<dim>::time_iterate()
1470 *
double current_time = parameters.start_time;
1471 * CDR::WritePVTUOutput pvtu_output(parameters.patch_level);
1472 *
for (
unsigned int time_step_n = 0; time_step_n < parameters.n_time_steps;
1475 * current_time += time_step;
1478 * CDR::create_system_rhs<dim>(dof_handler,
1480 * convection_function,
1483 * locally_relevant_solution,
1490 * 1e-6 * system_rhs.l2_norm(),
1494 * solver.solve(system_matrix,
1495 * completely_distributed_solution,
1498 * constraints.distribute(completely_distributed_solution);
1499 * locally_relevant_solution = completely_distributed_solution;
1501 *
if (time_step_n % parameters.save_interval == 0)
1503 * pvtu_output.write_output(dof_handler,
1504 * locally_relevant_solution,
1514 *
template <
int dim>
1516 * CDRProblem<dim>::refine_mesh()
1518 *
using FunctionMap = std::map<types::boundary_id, const Function<dim> *>;
1524 * locally_relevant_solution,
1525 * estimated_error_per_cell);
1529 * This solver uses a crude refinement strategy where cells with relatively
1530 * high errors are refined and cells with relatively low errors are
1531 * coarsened. The maximum refinement
level is capped to prevent
run-away
1535 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1537 * if (
std::
abs(estimated_error_per_cell[cell->active_cell_index()]) >= 1
e-3)
1539 * cell->set_refine_flag();
1541 *
else if (
std::abs(estimated_error_per_cell[cell->active_cell_index()]) <=
1544 * cell->set_coarsen_flag();
1548 *
if (
triangulation.n_levels() > parameters.max_refinement_level)
1550 *
for (
const auto &cell :
triangulation.cell_iterators_on_level(
1551 * parameters.max_refinement_level))
1553 * cell->clear_refine_flag();
1559 * Transferring the solution between different grids is ultimately just a
1560 * few function calls but they must be made in exactly the right order.
1564 * solution_transfer(dof_handler);
1567 * solution_transfer.prepare_for_coarsening_and_refinement(
1568 * locally_relevant_solution);
1575 * The <code>solution_transfer</code>
object stores a pointer to
1576 * <code>locally_relevant_solution</code>, so when
1577 * parallel::distributed::SolutionTransfer::interpolate is called it uses
1578 * those
values to populate <code>temporary</code>.
1582 * solution_transfer.interpolate(temporary);
1585 * After <code>temporary</code> has the correct
value,
this call correctly
1586 * populates <code>completely_distributed_solution</code>, which had its
1587 *
index set updated above with the call to <code>setup_dofs</code>.
1590 * completely_distributed_solution = temporary;
1593 * Constraints cannot be applied to
1594 * @ref GlossGhostedVector
"vectors with ghost entries" since the ghost
1595 * entries are write only, so
this first goes through the completely
1596 * distributed vector.
1599 * constraints.distribute(completely_distributed_solution);
1600 * locally_relevant_solution = completely_distributed_solution;
1605 *
template <
int dim>
1607 * CDRProblem<dim>::run()
1616 *
constexpr int dim{2};
1620 * main(
int argc,
char *argv[])
1624 * One of the
new features in
C++11 is the <code>chrono</code> component of
1625 * the standard library. This gives us an easy way to time the output.
1628 *
auto t0 = std::chrono::high_resolution_clock::now();
1631 * CDR::Parameters parameters;
1632 * parameters.read_parameter_file(
"parameters.prm");
1633 * CDRProblem<dim> cdr_problem(parameters);
1634 * cdr_problem.run();
1636 *
auto t1 = std::chrono::high_resolution_clock::now();
1639 * std::cout <<
"time elapsed: "
1640 * << std::chrono::duration_cast<std::chrono::milliseconds>(t1 -
1643 * <<
" milliseconds." << std::endl;
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
#define Assert(cond, exc)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void make_zero_boundary_constraints(const DoFHandler< dim, spacedim > &dof, const types::boundary_id boundary_id, AffineConstraints< number > &zero_boundary_constraints, const ComponentMask &component_mask={})
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
void hyper_shell(Triangulation< dim, spacedim > &tria, const Point< spacedim > ¢er, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, bool colorize=false)
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
::SolutionTransfer< dim, VectorType, spacedim > SolutionTransfer
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation