128 * #ifndef dealii__cdr_parameters_h
129 * #define dealii__cdr_parameters_h
131 * #include <deal.II/base/parameter_handler.h>
137 * I prefer to use the
ParameterHandler class in a slightly different way than
138 * usual: The
class Parameters creates, uses, and then destroys a
140 * of keeping it around. This is nice because now all of the
run time
141 * parameters are contained in a simple
class and it can be copied or passed
142 * around very easily.
152 *
double inner_radius;
153 *
double outer_radius;
155 *
double diffusion_coefficient;
156 *
double reaction_coefficient;
157 *
bool time_dependent_forcing;
159 *
unsigned int initial_refinement_level;
160 *
unsigned int max_refinement_level;
161 *
unsigned int fe_order;
165 *
unsigned int n_time_steps;
167 *
unsigned int save_interval;
168 *
unsigned int patch_level;
171 * read_parameter_file(
const std::string &file_name);
182<a name=
"ann-common/include/deal.II-cdr/system_matrix.h"></a>
183<h1>Annotated version of common/include/deal.II-cdr/system_matrix.h</h1>
199 * #ifndef dealii__cdr_system_matrix_h
200 * #define dealii__cdr_system_matrix_h
201 * #include <deal.II/base/quadrature_lib.h>
203 * #include <deal.II/dofs/dof_handler.h>
205 * #include <deal.II/lac/affine_constraints.h>
207 * #include <deal.II-cdr/parameters.h>
209 * #include <functional>
213 * One of the goals I had in writing
this entry was to
split up
functions into
214 * different compilation units instead of
using one large file. This is the
215 * header file
for a pair of
functions (only one of which I ultimately use)
216 * which build the system
matrix.
223 *
template <
int dim,
typename MatrixType>
225 * create_system_matrix(
229 *
const CDR::Parameters & parameters,
230 *
const double time_step,
231 * MatrixType & system_matrix);
233 *
template <
int dim,
typename MatrixType>
235 * create_system_matrix(
239 *
const CDR::Parameters & parameters,
240 *
const double time_step,
242 * MatrixType & system_matrix);
248<a name=
"ann-common/include/deal.II-cdr/system_matrix.templates.h"></a>
249<h1>Annotated version of common/include/deal.II-cdr/system_matrix.templates.h</h1>
265 * #ifndef dealii__cdr_system_matrix_templates_h
266 * #define dealii__cdr_system_matrix_templates_h
267 * #include <deal.II/base/quadrature_lib.h>
269 * #include <deal.II/dofs/dof_handler.h>
271 * #include <deal.II/fe/fe_q.h>
272 * #include <deal.II/fe/fe_values.h>
274 * #include <deal.II/lac/affine_constraints.h>
276 * #include <deal.II-cdr/parameters.h>
277 * #include <deal.II-cdr/system_matrix.h>
279 * #include <functional>
288 * This is the actual implementation of the <code>create_system_matrix</code>
289 * function described in the header file. It is similar to the system
matrix
290 * assembly routine in @ref step_40
"step-40".
293 *
template <
int dim,
typename UpdateFunction>
295 * internal_create_system_matrix(
299 *
const CDR::Parameters & parameters,
300 *
const double time_step,
301 * UpdateFunction update_system_matrix)
303 *
auto & fe = dof_handler.get_fe();
304 *
const auto dofs_per_cell = fe.dofs_per_cell;
311 * std::vector<types::global_dof_index> local_indices(dofs_per_cell);
313 *
for (
const auto &cell : dof_handler.active_cell_iterators())
315 * if (cell->is_locally_owned())
319 * cell->get_dof_indices(local_indices);
320 *
for (
unsigned int q = 0; q < quad.size(); ++q)
322 *
const auto current_convection =
323 * convection_function(fe_values.quadrature_point(q));
325 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
327 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
329 *
const auto convection_contribution =
330 * current_convection * fe_values.shape_grad(j, q);
335 * Here are the time step, mass, and reaction parts:
339 * time_step / 2.0 * parameters.reaction_coefficient) *
340 * fe_values.shape_value(i, q) *
341 * fe_values.shape_value(j, q) +
345 * and the convection part:
348 * (fe_values.shape_value(i, q) *
349 * convection_contribution
352 * and, finally, the diffusion part:
355 * + parameters.diffusion_coefficient *
356 * (fe_values.shape_grad(i, q) *
357 * fe_values.shape_grad(j, q))));
361 * update_system_matrix(local_indices, cell_matrix);
366 *
template <
int dim,
typename MatrixType>
368 * create_system_matrix(
372 *
const CDR::Parameters & parameters,
373 *
const double time_step,
375 * MatrixType & system_matrix)
377 * internal_create_system_matrix<dim>(
380 * convection_function,
383 * [&constraints, &system_matrix](
384 *
const std::vector<types::global_dof_index> &local_indices,
386 * constraints.distribute_local_to_global(cell_matrix,
392 *
template <
int dim,
typename MatrixType>
394 * create_system_matrix(
398 *
const CDR::Parameters & parameters,
399 *
const double time_step,
400 * MatrixType & system_matrix)
402 * internal_create_system_matrix<dim>(
405 * convection_function,
409 *
const std::vector<types::global_dof_index> &local_indices,
411 * system_matrix.add(local_indices, cell_matrix);
419<a name=
"ann-common/include/deal.II-cdr/system_rhs.h"></a>
420<h1>Annotated version of common/include/deal.II-cdr/system_rhs.h</h1>
436 * #ifndef dealii__cdr_system_rhs_h
437 * #define dealii__cdr_system_rhs_h
438 * #include <deal.II/base/
point.h>
439 * #include <deal.II/base/quadrature_lib.h>
440 * #include <deal.II/base/tensor.h>
442 * #include <deal.II/dofs/dof_handler.h>
444 * #include <deal.II/lac/affine_constraints.h>
446 * #include <deal.II-cdr/parameters.h>
448 * #include <functional>
452 * Similarly to <code>create_system_matrix</code>, I wrote a separate function
453 * to compute the right hand side.
460 *
template <
int dim,
typename VectorType>
466 *
const std::function<
double(
double,
const Point<dim>)> &forcing_function,
467 *
const CDR::Parameters & parameters,
468 *
const VectorType & previous_solution,
470 *
const double current_time,
471 * VectorType & system_rhs);
477<a name=
"ann-common/include/deal.II-cdr/system_rhs.templates.h"></a>
478<h1>Annotated version of common/include/deal.II-cdr/system_rhs.templates.h</h1>
494 * #ifndef dealii__cdr_system_rhs_templates_h
495 * #define dealii__cdr_system_rhs_templates_h
496 * #include <deal.II/base/
point.h>
497 * #include <deal.II/base/quadrature_lib.h>
498 * #include <deal.II/base/tensor.h>
500 * #include <deal.II/dofs/dof_handler.h>
502 * #include <deal.II/fe/fe_q.h>
503 * #include <deal.II/fe/fe_values.h>
505 * #include <deal.II/lac/affine_constraints.h>
506 * #include <deal.II/lac/vector.h>
508 * #include <deal.II-cdr/parameters.h>
509 * #include <deal.II-cdr/system_rhs.h>
511 * #include <functional>
518 *
template <
int dim,
typename VectorType>
524 *
const std::function<
double(
double,
const Point<dim>)> &forcing_function,
525 *
const CDR::Parameters & parameters,
526 *
const VectorType & previous_solution,
528 *
const double current_time,
529 * VectorType & system_rhs)
531 *
auto & fe = dof_handler.get_fe();
532 *
const auto dofs_per_cell = fe.dofs_per_cell;
533 *
const double time_step =
534 * (parameters.stop_time - parameters.start_time) / parameters.n_time_steps;
544 * std::vector<types::global_dof_index> local_indices(dofs_per_cell);
546 *
const double previous_time{current_time - time_step};
548 *
for (
const auto &cell : dof_handler.active_cell_iterators())
550 * if (cell->is_locally_owned())
554 * cell->get_dof_indices(local_indices);
555 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
557 * current_fe_coefficients[i] =
558 * previous_solution[local_indices[i]];
561 *
for (
unsigned int q = 0; q < quad.size(); ++q)
563 *
const auto current_convection =
564 * convection_function(fe_values.quadrature_point(q));
566 *
const double current_forcing =
567 * forcing_function(current_time, fe_values.quadrature_point(q));
568 *
const double previous_forcing =
569 * forcing_function(previous_time,
570 * fe_values.quadrature_point(q));
571 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
573 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
575 *
const auto convection_contribution =
576 * current_convection * fe_values.shape_grad(j, q);
582 * Here are the mass and reaction part:
585 * (((1.0 - time_step / 2.0 *
586 * parameters.reaction_coefficient) *
587 * fe_values.shape_value(i, q) *
588 * fe_values.shape_value(j, q) -
592 * the convection part:
595 * (fe_values.shape_value(i, q) *
596 * convection_contribution
599 * the diffusion part:
602 * + parameters.diffusion_coefficient *
603 * (fe_values.shape_grad(i, q) *
604 * fe_values.shape_grad(j, q)))) *
605 * current_fe_coefficients[j]
608 * and, finally, the forcing function part:
611 * + time_step / 2.0 *
612 * (current_forcing + previous_forcing) *
613 * fe_values.shape_value(i, q));
617 * constraints.distribute_local_to_global(cell_rhs,
628<a name=
"ann-common/include/deal.II-cdr/write_pvtu_output.h"></a>
629<h1>Annotated version of common/include/deal.II-cdr/write_pvtu_output.h</h1>
645 * #ifndef dealii__cdr_write_pvtu_output_h
646 * #define dealii__cdr_write_pvtu_output_h
647 * #include <deal.II/dofs/dof_handler.h>
651 * This is a small
class which handles PVTU output.
658 *
class WritePVTUOutput
661 * WritePVTUOutput(
const unsigned int patch_level);
663 *
template <
int dim,
typename VectorType>
666 *
const VectorType & solution,
667 *
const unsigned int time_step_n,
668 *
const double current_time);
671 *
const unsigned int patch_level;
679<a name=
"ann-common/include/deal.II-cdr/write_pvtu_output.templates.h"></a>
680<h1>Annotated version of common/include/deal.II-cdr/write_pvtu_output.templates.h</h1>
696 * #ifndef dealii__cdr_write_pvtu_output_templates_h
697 * #define dealii__cdr_write_pvtu_output_templates_h
698 * #include <deal.II/base/data_out_base.h>
699 * #include <deal.II/base/utilities.h>
701 * #include <deal.II/dofs/dof_handler.h>
703 * #include <deal.II/lac/vector.h>
705 * #include <deal.II/numerics/data_out.h>
707 * #include <deal.II-cdr/write_pvtu_output.h>
715 * Here is the implementation of the important function. This is similar to
716 * what is presented in @ref step_40
"step-40".
723 *
template <
int dim,
typename VectorType>
726 *
const VectorType & solution,
727 *
const unsigned int time_step_n,
728 *
const double current_time)
732 * data_out.add_data_vector(solution,
"u");
736 *
for (
auto &domain : subdomain)
740 * data_out.add_data_vector(subdomain,
"subdomain");
741 * data_out.build_patches(patch_level);
744 * flags.
time = current_time;
747 * While the
default flag is
for the best compression
level,
using
748 * <code>
best_speed</code> makes
this function much faster.
751 * flags.compression_level =
752 * DataOutBase::VtkFlags::ZlibCompressionLevel::best_speed;
753 * data_out.set_flags(flags);
755 *
unsigned int subdomain_n;
769 * data_out.write_vtu(output);
771 *
if (this_mpi_process == 0)
773 * std::vector<std::string> filenames;
774 *
for (
unsigned int i = 0;
777 * filenames.push_back(
"solution-" +
780 * std::ofstream master_output(
782 * data_out.write_pvtu_record(master_output, filenames);
790<a name=
"ann-common/source/parameters.cc"></a>
791<h1>Annotated version of common/source/parameters.cc</h1>
795 * -----------------------------------------------------------------------------
799 * SPDX-License-Identifier: LGPL-2.1-or-later
800 * Copyright (C) 2015 by David Wells
804 * This file is part of the deal.II code gallery.
808 * -----------------------------------------------------------------------------
814 * #include <deal.II-cdr/parameters.h>
822 * Parameters::configure_parameter_handler(
ParameterHandler ¶meter_handler)
824 * parameter_handler.enter_subsection(
"Geometry");
826 * parameter_handler.declare_entry(
"inner_radius",
830 * parameter_handler.declare_entry(
"outer_radius",
835 * parameter_handler.leave_subsection();
837 * parameter_handler.enter_subsection(
"Physical Parameters");
839 * parameter_handler.declare_entry(
"diffusion_coefficient",
842 *
"Diffusion coefficient.");
843 * parameter_handler.declare_entry(
"reaction_coefficient",
846 *
"Reaction coefficient.");
847 * parameter_handler.declare_entry(
"time_dependent_forcing",
851 *
"the forcing function depends on time.");
853 * parameter_handler.leave_subsection();
855 * parameter_handler.enter_subsection(
"Finite Element");
857 * parameter_handler.declare_entry(
"initial_refinement_level",
860 *
"Initial number of levels in the mesh.");
861 * parameter_handler.declare_entry(
"max_refinement_level",
864 *
"Maximum number of levels in the mesh.");
865 * parameter_handler.declare_entry(
"fe_order",
868 *
"Finite element order.");
870 * parameter_handler.leave_subsection();
872 * parameter_handler.enter_subsection(
"Time Step");
874 * parameter_handler.declare_entry(
"start_time",
878 * parameter_handler.declare_entry(
"stop_time",
882 * parameter_handler.declare_entry(
"n_time_steps",
885 *
"Number of time steps.");
887 * parameter_handler.leave_subsection();
889 * parameter_handler.enter_subsection(
"Output");
891 * parameter_handler.declare_entry(
"save_interval",
895 * parameter_handler.declare_entry(
"patch_level",
900 * parameter_handler.leave_subsection();
904 * Parameters::read_parameter_file(
const std::string &file_name)
908 * std::ifstream file(file_name);
909 * configure_parameter_handler(parameter_handler);
910 * parameter_handler.parse_input(file);
913 * parameter_handler.enter_subsection(
"Geometry");
915 * inner_radius = parameter_handler.get_double(
"inner_radius");
916 * outer_radius = parameter_handler.get_double(
"outer_radius");
918 * parameter_handler.leave_subsection();
920 * parameter_handler.enter_subsection(
"Physical Parameters");
922 * diffusion_coefficient =
923 * parameter_handler.get_double(
"diffusion_coefficient");
924 * reaction_coefficient =
925 * parameter_handler.get_double(
"reaction_coefficient");
926 * time_dependent_forcing =
927 * parameter_handler.get_bool(
"time_dependent_forcing");
929 * parameter_handler.leave_subsection();
932 * parameter_handler.enter_subsection(
"Finite Element");
934 * initial_refinement_level =
935 * parameter_handler.get_integer(
"initial_refinement_level");
936 * max_refinement_level =
937 * parameter_handler.get_integer(
"max_refinement_level");
938 * fe_order = parameter_handler.get_integer(
"fe_order");
940 * parameter_handler.leave_subsection();
942 * parameter_handler.enter_subsection(
"Time Step");
944 * start_time = parameter_handler.get_double(
"start_time");
945 * stop_time = parameter_handler.get_double(
"stop_time");
946 * n_time_steps = parameter_handler.get_integer(
"n_time_steps");
948 * parameter_handler.leave_subsection();
950 * parameter_handler.enter_subsection(
"Output");
952 * save_interval = parameter_handler.get_integer(
"save_interval");
953 * patch_level = parameter_handler.get_integer(
"patch_level");
955 * parameter_handler.leave_subsection();
961<a name=
"ann-common/source/system_matrix.cc"></a>
962<h1>Annotated version of common/source/system_matrix.cc</h1>
978 * #include <deal.II/lac/sparse_matrix.h>
979 * #include <deal.II/lac/trilinos_sparse_matrix.h>
981 * #include <deal.II-cdr/parameters.h>
982 * #include <deal.II-cdr/system_matrix.h>
983 * #include <deal.II-cdr/system_matrix.templates.h>
987 * This file exists just to build
template specializations of
988 * <code>create_system_matrix</code>. Even though the solver is
run in
989 *
parallel with Trilinos objects, other
serial solvers can use the same
990 * function without recompilation by compiling everything here just one time.
998 * create_system_matrix<2, SparseMatrix<double>>(
1001 *
const std::function<Tensor<1, 2>(
const Point<2>)> &convection_function,
1002 *
const CDR::Parameters & parameters,
1003 *
const double time_step,
1007 * create_system_matrix<3, SparseMatrix<double>>(
1010 *
const std::function<Tensor<1, 3>(
const Point<3>)> &convection_function,
1011 *
const CDR::Parameters & parameters,
1012 *
const double time_step,
1016 * create_system_matrix<2, SparseMatrix<double>>(
1019 *
const std::function<Tensor<1, 2>(
const Point<2>)> &convection_function,
1020 *
const CDR::Parameters & parameters,
1021 *
const double time_step,
1026 * create_system_matrix<3, SparseMatrix<double>>(
1029 *
const std::function<Tensor<1, 3>(
const Point<3>)> &convection_function,
1030 *
const CDR::Parameters & parameters,
1031 *
const double time_step,
1036 * create_system_matrix<2, TrilinosWrappers::SparseMatrix>(
1040 *
const CDR::Parameters & parameters,
1041 *
const double time_step,
1045 * create_system_matrix<3, TrilinosWrappers::SparseMatrix>(
1049 *
const CDR::Parameters & parameters,
1050 *
const double time_step,
1054 * create_system_matrix<2, TrilinosWrappers::SparseMatrix>(
1058 *
const CDR::Parameters & parameters,
1059 *
const double time_step,
1064 * create_system_matrix<3, TrilinosWrappers::SparseMatrix>(
1068 *
const CDR::Parameters & parameters,
1069 *
const double time_step,
1076<a name=
"ann-common/source/system_rhs.cc"></a>
1077<h1>Annotated version of common/source/system_rhs.cc</h1>
1093 * #include <deal.II/lac/sparse_matrix.h>
1094 * #include <deal.II/lac/trilinos_sparse_matrix.h>
1095 * #include <deal.II/lac/trilinos_vector.h>
1096 * #include <deal.II/lac/vector.h>
1098 * #include <deal.II-cdr/system_rhs.templates.h>
1102 * Like <code>system_matrix.cc</code>,
this file just compiles
template
1108 *
using namespace dealii;
1111 * create_system_rhs<2, Vector<double>>(
1114 *
const std::function<Tensor<1, 2>(
const Point<2>)> & convection_function,
1115 *
const std::function<double(
double,
const Point<2>)> &forcing_function,
1116 *
const CDR::Parameters & parameters,
1119 *
const double current_time,
1123 * create_system_rhs<3, Vector<double>>(
1126 *
const std::function<Tensor<1, 3>(
const Point<3>)> & convection_function,
1127 *
const std::function<double(
double,
const Point<3>)> &forcing_function,
1128 *
const CDR::Parameters & parameters,
1131 *
const double current_time,
1135 * create_system_rhs<2, TrilinosWrappers::MPI::Vector>(
1139 *
const std::function<
double(
double,
const Point<2>)> &forcing_function,
1140 *
const CDR::Parameters & parameters,
1143 *
const double current_time,
1147 * create_system_rhs<3, TrilinosWrappers::MPI::Vector>(
1151 *
const std::function<
double(
double,
const Point<3>)> &forcing_function,
1152 *
const CDR::Parameters & parameters,
1155 *
const double current_time,
1161<a name=
"ann-common/source/write_pvtu_output.cc"></a>
1162<h1>Annotated version of common/source/write_pvtu_output.cc</h1>
1178 * #include <deal.II/lac/trilinos_vector.h>
1179 * #include <deal.II/lac/vector.h>
1181 * #include <deal.II-cdr/write_pvtu_output.templates.h>
1185 * Again,
this file just compiles the constructor and also the templated
1191 *
using namespace dealii;
1193 * WritePVTUOutput::WritePVTUOutput(
const unsigned int patch_level)
1194 * : patch_level{patch_level}
1199 * WritePVTUOutput::write_output(
const DoFHandler<2> & dof_handler,
1201 *
const unsigned int time_step_n,
1202 *
const double current_time);
1205 * WritePVTUOutput::write_output(
const DoFHandler<3> & dof_handler,
1207 *
const unsigned int time_step_n,
1208 *
const double current_time);
1211 * WritePVTUOutput::write_output(
const DoFHandler<2> &dof_handler,
1213 *
const unsigned int time_step_n,
1214 *
const double current_time);
1217 * WritePVTUOutput::write_output(
const DoFHandler<3> &dof_handler,
1219 *
const unsigned int time_step_n,
1220 *
const double current_time);
1225<a name=
"ann-solver/cdr.cc"></a>
1226<h1>Annotated version of solver/cdr.cc</h1>
1242 * #include <deal.II/base/conditional_ostream.h>
1243 * #include <deal.II/base/quadrature_lib.h>
1245 * #include <deal.II/dofs/dof_handler.h>
1246 * #include <deal.II/dofs/dof_tools.h>
1248 * #include <deal.II/fe/fe_q.h>
1250 * #include <deal.II/grid/grid_generator.h>
1251 * #include <deal.II/grid/manifold_lib.h>
1253 * #include <deal.II/lac/affine_constraints.h>
1254 * #include <deal.II/lac/dynamic_sparsity_pattern.h>
1256 * #include <deal.II/numerics/error_estimator.h>
1260 * These headers are
for distributed computations:
1263 * #include <deal.II/base/index_set.h>
1264 * #include <deal.II/base/utilities.h>
1266 * #include <deal.II/distributed/grid_refinement.h>
1267 * #include <deal.II/distributed/solution_transfer.h>
1268 * #include <deal.II/distributed/tria.h>
1270 * #include <deal.II/lac/sparsity_tools.h>
1271 * #include <deal.II/lac/trilinos_precondition.h>
1272 * #include <deal.II/lac/trilinos_solver.h>
1273 * #include <deal.II/lac/trilinos_sparse_matrix.h>
1274 * #include <deal.II/lac/trilinos_vector.h>
1276 * #include <deal.II-cdr/parameters.h>
1277 * #include <deal.II-cdr/system_matrix.h>
1278 * #include <deal.II-cdr/system_rhs.h>
1279 * #include <deal.II-cdr/write_pvtu_output.h>
1282 * #include <functional>
1283 * #include <iostream>
1285 *
using namespace dealii;
1291 * This is the actual solver
class which performs time iteration and calls the
1292 * appropriate library
functions to
do it.
1295 *
template <
int dim>
1299 * CDRProblem(
const CDR::Parameters ¶meters);
1304 *
const CDR::Parameters parameters;
1305 *
const double time_step;
1306 *
double current_time;
1318 *
const std::function<Tensor<1, dim>(
const Point<dim>)> convection_function;
1319 *
const std::function<double(
double,
const Point<dim>)> forcing_function;
1329 * As is usual in
parallel programs, I keep two copies of parts of the
1330 * complete solution: <code>locally_relevant_solution</code> contains both
1331 * the locally calculated solution as well as the layer of cells at its
1332 * boundary (the @ref GlossGhostCell
"ghost cells")
while
1333 * <code>completely_distributed_solution</code> only contains the parts of
1334 * the solution computed on the current @ref GlossMPIProcess
"MPI process".
1358 *
template <
int dim>
1359 * CDRProblem<dim>::CDRProblem(
const CDR::Parameters ¶meters)
1360 * : parameters(parameters)
1361 * , time_step{(parameters.stop_time - parameters.start_time) /
1362 * parameters.n_time_steps}
1363 * , current_time{parameters.start_time}
1364 * , mpi_communicator(MPI_COMM_WORLD)
1367 * , fe(parameters.fe_order)
1368 * , quad(parameters.fe_order + 2)
1381 * , forcing_function{[](
double t,
const Point<dim> p) ->
double {
1387 * , pcout(std::cout, this_mpi_process == 0)
1389 *
Assert(dim == 2, ExcNotImplemented());
1393 *
template <
int dim>
1395 * CDRProblem<dim>::setup_geometry()
1400 * parameters.inner_radius,
1401 * parameters.outer_radius);
1403 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1411 *
template <
int dim>
1413 * CDRProblem<dim>::setup_dofs()
1415 * dof_handler.distribute_dofs(fe);
1416 * pcout <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
1418 * locally_owned_dofs = dof_handler.locally_owned_dofs();
1421 * constraints.clear();
1422 * constraints.reinit(locally_relevant_dofs);
1427 * constraints.close();
1429 * completely_distributed_solution.reinit(locally_owned_dofs, mpi_communicator);
1431 * locally_relevant_solution.reinit(locally_owned_dofs,
1432 * locally_relevant_dofs,
1433 * mpi_communicator);
1437 *
template <
int dim>
1439 * CDRProblem<dim>::setup_system()
1443 * dynamic_sparsity_pattern,
1447 * dof_handler.locally_owned_dofs(),
1449 * locally_relevant_dofs);
1451 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1452 * system_matrix.reinit(locally_owned_dofs,
1453 * dynamic_sparsity_pattern,
1454 * mpi_communicator);
1456 * CDR::create_system_matrix<dim>(dof_handler,
1458 * convection_function,
1464 * preconditioner.initialize(system_matrix);
1468 *
template <
int dim>
1470 * CDRProblem<dim>::time_iterate()
1472 *
double current_time = parameters.start_time;
1473 * CDR::WritePVTUOutput pvtu_output(parameters.patch_level);
1474 *
for (
unsigned int time_step_n = 0; time_step_n < parameters.n_time_steps;
1477 * current_time += time_step;
1480 * CDR::create_system_rhs<dim>(dof_handler,
1482 * convection_function,
1485 * locally_relevant_solution,
1492 * 1e-6 * system_rhs.l2_norm(),
1496 * solver.solve(system_matrix,
1497 * completely_distributed_solution,
1500 * constraints.distribute(completely_distributed_solution);
1501 * locally_relevant_solution = completely_distributed_solution;
1503 *
if (time_step_n % parameters.save_interval == 0)
1505 * pvtu_output.write_output(dof_handler,
1506 * locally_relevant_solution,
1516 *
template <
int dim>
1518 * CDRProblem<dim>::refine_mesh()
1520 *
using FunctionMap = std::map<types::boundary_id, const Function<dim> *>;
1526 * locally_relevant_solution,
1527 * estimated_error_per_cell);
1531 * This solver uses a crude refinement strategy where cells with relatively
1532 * high errors are refined and cells with relatively low errors are
1533 * coarsened. The maximum refinement
level is capped to prevent
run-away
1537 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1539 * if (
std::
abs(estimated_error_per_cell[cell->active_cell_index()]) >= 1
e-3)
1541 * cell->set_refine_flag();
1543 *
else if (
std::abs(estimated_error_per_cell[cell->active_cell_index()]) <=
1546 * cell->set_coarsen_flag();
1552 *
for (
const auto &cell :
triangulation.cell_iterators_on_level(
1553 * parameters.max_refinement_level))
1555 * cell->clear_refine_flag();
1561 * Transferring the solution between different grids is ultimately just a
1562 * few function calls but they must be made in exactly the right order.
1566 * solution_transfer(dof_handler);
1569 * solution_transfer.prepare_for_coarsening_and_refinement(
1570 * locally_relevant_solution);
1577 * The <code>solution_transfer</code>
object stores a pointer to
1578 * <code>locally_relevant_solution</code>, so when
1580 * those values to populate <code>temporary</code>.
1584 * solution_transfer.interpolate(temporary);
1587 * After <code>temporary</code> has the correct
value,
this call correctly
1588 * populates <code>completely_distributed_solution</code>, which had its
1589 *
index set updated above with the call to <code>setup_dofs</code>.
1592 * completely_distributed_solution = temporary;
1595 * Constraints cannot be applied to
1596 * @ref GlossGhostedVector
"vectors with ghost entries" since the ghost
1597 * entries are write only, so
this first goes through the completely
1598 * distributed vector.
1601 * constraints.distribute(completely_distributed_solution);
1602 * locally_relevant_solution = completely_distributed_solution;
1607 *
template <
int dim>
1609 * CDRProblem<dim>::run()
1618 *
constexpr int dim{2};
1622 * main(
int argc,
char *argv[])
1626 * One of the
new features in
C++11 is the <code>chrono</code> component of
1627 * the standard library. This gives us an easy way to time the output.
1630 *
auto t0 = std::chrono::high_resolution_clock::now();
1633 * CDR::Parameters parameters;
1634 * parameters.read_parameter_file(
"parameters.prm");
1635 * CDRProblem<dim> cdr_problem(parameters);
1636 * cdr_problem.run();
1638 *
auto t1 = std::chrono::high_resolution_clock::now();
1641 * std::cout <<
"time elapsed: "
1642 * << std::chrono::duration_cast<std::chrono::milliseconds>(t1 -
1645 * <<
" milliseconds." << std::endl;
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
unsigned int n_levels() const
Triangulation< dim, spacedim > & get_triangulation()
types::subdomain_id locally_owned_subdomain() const override
void interpolate(std::vector< VectorType * > &all_out)
virtual void execute_coarsening_and_refinement() override
virtual bool prepare_coarsening_and_refinement() override
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void make_zero_boundary_constraints(const DoFHandler< dim, spacedim > &dof, const types::boundary_id boundary_id, AffineConstraints< number > &zero_boundary_constraints, const ComponentMask &component_mask={})
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void set_manifold(const types::manifold_id number, const Manifold< dim, spacedim > &manifold_object)
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
void hyper_shell(Triangulation< dim, spacedim > &tria, const Point< spacedim > ¢er, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, bool colorize=false)
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
constexpr T fixed_power(const T t)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(& functions)(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation