Reference documentation for deal.II version GIT relicensing-487-ge9eb5ab491 2024-04-25 07:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
coupled_laplace_problem.h
Go to the documentation of this file.
1
127 *  
128 * @endcode
129 *
130 * The included deal.II header files are the same as in the other example
131 * programs:
132 *
133 * @code
134 *   #include <deal.II/base/function.h>
135 *   #include <deal.II/base/logstream.h>
136 *   #include <deal.II/base/quadrature_lib.h>
137 *  
138 *   #include <deal.II/dofs/dof_accessor.h>
139 *   #include <deal.II/dofs/dof_handler.h>
140 *   #include <deal.II/dofs/dof_tools.h>
141 *  
142 *   #include <deal.II/fe/fe_q.h>
143 *   #include <deal.II/fe/fe_values.h>
144 *  
145 *   #include <deal.II/grid/grid_generator.h>
146 *   #include <deal.II/grid/tria.h>
147 *   #include <deal.II/grid/tria_accessor.h>
148 *   #include <deal.II/grid/tria_iterator.h>
149 *  
150 *   #include <deal.II/lac/dynamic_sparsity_pattern.h>
151 *   #include <deal.II/lac/full_matrix.h>
152 *   #include <deal.II/lac/precondition.h>
153 *   #include <deal.II/lac/solver_cg.h>
154 *   #include <deal.II/lac/sparse_matrix.h>
155 *   #include <deal.II/lac/vector.h>
156 *  
157 *   #include <deal.II/numerics/data_out.h>
158 *   #include <deal.II/numerics/matrix_tools.h>
159 *   #include <deal.II/numerics/vector_tools.h>
160 * @endcode
161 *
162 * In addition to the deal.II header files, we include the preCICE API in order
163 * to obtain access to preCICE specific functionality
164 *
165 * @code
166 *   #include <precice/precice.hpp>
167 *  
168 *   #include <fstream>
169 *   #include <iostream>
170 *  
171 *   using namespace dealii;
172 *  
173 * @endcode
174 *
175 * Configuration parameters
176 *
177
178 *
179 * We set up a simple hard-coded struct containing all names we need for
180 * external coupling. The struct includes the name of the preCICE
181 * configuration file as well as the name of the simulation participant, the
182 * name of the coupling mesh and the name of the exchanged data. The last three
183 * names you also find in the preCICE configuration file. For real application
184 * cases, these names are better handled by a parameter file.
185 *
186 * @code
187 *   struct CouplingParamters
188 *   {
189 *   const std::string config_file = "precice-config.xml";
190 *   const std::string participant_name = "laplace-solver";
191 *   const std::string mesh_name = "dealii-mesh";
192 *   const std::string read_data_name = "boundary-data";
193 *   };
194 *  
195 *  
196 * @endcode
197 *
198 * The Adapter class
199 *
200
201 *
202 * The Adapter class handles all functionalities to couple the deal.II solver
203 * code to other solvers with preCICE, i.e., data structures are set up and all
204 * relevant information is passed to preCICE.
205 *
206
207 *
208 *
209 * @code
210 *   template <int dim, typename ParameterClass>
211 *   class Adapter
212 *   {
213 *   public:
214 *   Adapter(const ParameterClass & parameters,
215 *   const types::boundary_id dealii_boundary_interface_id);
216 *  
217 *   void
218 *   initialize(const DoFHandler<dim> & dof_handler,
219 *   std::map<types::global_dof_index, double> &boundary_data,
220 *   const MappingQGeneric<dim> & mapping);
221 *  
222 *   void
223 *   read_data(double relative_read_time,
224 *   std::map<types::global_dof_index, double> &boundary_data);
225 *  
226 *   void
227 *   advance(const double computed_timestep_length);
228 *  
229 * @endcode
230 *
231 * public precCICE solver interface
232 *
233 * @code
234 *   precice::Participant precice;
235 *  
236 * @endcode
237 *
238 * Boundary ID of the deal.II triangulation, associated with the coupling
239 * interface. The variable is defined in the constructor of this class and
240 * intentionally public so that it can be used during the grid generation and
241 * system assembly. The only thing, one needs to make sure is that this ID is
242 * unique for a particular triangulation.
243 *
244 * @code
245 *   const unsigned int dealii_boundary_interface_id;
246 *  
247 *   private:
248 * @endcode
249 *
250 * preCICE related initializations
251 * These variables are specified in and read from a parameter file, which is
252 * in this simple tutorial program the CouplingParameter struct already
253 * introduced in the beginning.
254 *
255 * @code
256 *   const std::string mesh_name;
257 *   const std::string read_data_name;
258 *  
259 * @endcode
260 *
261 * The node IDs are filled by preCICE during the initialization and associated to
262 * the spherical vertices we pass to preCICE
263 *
264 * @code
265 *   int n_interface_nodes;
266 *  
267 * @endcode
268 *
269 * DoF IndexSet, containing relevant coupling DoF indices at the coupling
270 * boundary
271 *
272 * @code
273 *   IndexSet coupling_dofs;
274 *  
275 * @endcode
276 *
277 * Data containers which are passed to preCICE in an appropriate preCICE
278 * specific format
279 *
280 * @code
281 *   std::vector<int> interface_nodes_ids;
282 *   std::vector<double> read_data_buffer;
283 *  
284 * @endcode
285 *
286 * The MPI rank and total number of MPI ranks is required by preCICE when the
287 * Participant is created. Since this tutorial runs only in serial mode we
288 * define the variables manually in this class instead of using the regular
289 * MPI interface.
290 *
291 * @code
292 *   static constexpr int this_mpi_process = 0;
293 *   static constexpr int n_mpi_processes = 1;
294 *  
295 * @endcode
296 *
297 * Function to transform the obtained data from preCICE into an appropriate
298 * map for Dirichlet boundary conditions
299 *
300 * @code
301 *   void
302 *   format_precice_to_dealii(
303 *   std::map<types::global_dof_index, double> &boundary_data) const;
304 *   };
305 *  
306 *  
307 *  
308 * @endcode
309 *
310 * In the constructor of the Adapter class, we set up the preCICE
311 * Participant. We need to tell preCICE our name as participant of the
312 * simulation and the name of the preCICE configuration file. Both have already
313 * been specified in the CouplingParameter class above. Thus, we pass the class
314 * directly to the constructor and read out all relevant information. As a
315 * second parameter, we need to specify the boundary ID of our triangulation,
316 * which is associated with the coupling interface.
317 *
318 * @code
319 *   template <int dim, typename ParameterClass>
320 *   Adapter<dim, ParameterClass>::Adapter(
321 *   const ParameterClass & parameters,
322 *   const types::boundary_id deal_boundary_interface_id)
323 *   : precice(parameters.participant_name,
324 *   parameters.config_file,
325 *   this_mpi_process,
326 *   n_mpi_processes)
327 *   , dealii_boundary_interface_id(deal_boundary_interface_id)
328 *   , mesh_name(parameters.mesh_name)
329 *   , read_data_name(parameters.read_data_name)
330 *   {}
331 *  
332 *  
333 *  
334 * @endcode
335 *
336 * This function initializes preCICE (e.g. establishes communication channels
337 * and allocates memory) and passes all relevant data to preCICE. For surface
338 * coupling, relevant data is in particular the location of the data points at
339 * the associated interface(s). The `boundary_data` is an empty map, which is
340 * filled by preCICE, i.e., information of the other participant. Throughout
341 * the system assembly, the map can directly be used in order to apply the
342 * Dirichlet boundary conditions in the linear system.
343 *
344 * @code
345 *   template <int dim, typename ParameterClass>
346 *   void
347 *   Adapter<dim, ParameterClass>::initialize(
348 *   const DoFHandler<dim> & dof_handler,
349 *   std::map<types::global_dof_index, double> &boundary_data,
350 *   const MappingQGeneric<dim> & mapping)
351 *   {
352 *   Assert(dim > 1, ExcNotImplemented());
353 *   AssertDimension(dim, precice.getMeshDimensions(mesh_name));
354 *  
355 * @endcode
356 *
357 * Afterwards, we extract the number of interface nodes and the coupling DoFs
358 * at the coupling interface from our deal.II solver via
360 *
361 * @code
362 *   std::set<types::boundary_id> couplingBoundary;
363 *   couplingBoundary.insert(dealii_boundary_interface_id);
364 *  
365 * @endcode
366 *
367 * The `ComponentMask()` might be important in case we deal with vector valued
368 * problems, because vector valued problems have a DoF for each component.
369 *
370 * @code
371 *   coupling_dofs = DoFTools::extract_boundary_dofs(dof_handler,
372 *   ComponentMask(),
373 *   couplingBoundary);
374 *  
375 * @endcode
376 *
377 * The coupling DoFs are used to set up the `boundary_data` map. At the end,
378 * we associate here each DoF with a respective boundary value.
379 *
380 * @code
381 *   for (const auto i : coupling_dofs)
382 *   boundary_data[i] = 0.0;
383 *  
384 * @endcode
385 *
386 * Since we deal with a scalar problem, the number of DoFs at the particular
387 * interface corresponds to the number of interface nodes.
388 *
389 * @code
390 *   n_interface_nodes = coupling_dofs.n_elements();
391 *  
392 *   std::cout << "\t Number of coupling nodes: " << n_interface_nodes
393 *   << std::endl;
394 *  
395 * @endcode
396 *
397 * Now, we need to tell preCICE the coordinates of the interface nodes. Hence,
398 * we set up a std::vector to pass the node positions to preCICE. Each node is
399 * specified only once.
400 *
401 * @code
402 *   std::vector<double> interface_nodes_positions;
403 *   interface_nodes_positions.reserve(dim * n_interface_nodes);
404 *  
405 * @endcode
406 *
407 * Set up the appropriate size of the data container needed for data
408 * exchange. Here, we deal with a scalar problem, so that only a scalar value
409 * is read/written per interface node.
410 *
411 * @code
412 *   read_data_buffer.resize(n_interface_nodes);
413 * @endcode
414 *
415 * The IDs are filled by preCICE during the initializations.
416 *
417 * @code
418 *   interface_nodes_ids.resize(n_interface_nodes);
419 *  
420 * @endcode
421 *
422 * The node location is obtained using `map_dofs_to_support_points()`.
423 *
424 * @code
425 *   std::map<types::global_dof_index, Point<dim>> support_points;
426 *   DoFTools::map_dofs_to_support_points(mapping, dof_handler, support_points);
427 *  
428 * @endcode
429 *
430 * `support_points` contains now the coordinates of all DoFs. In the next
431 * step, the relevant coordinates are extracted using the IndexSet with the
432 * extracted coupling_dofs.
433 *
434 * @code
435 *   for (const auto element : coupling_dofs)
436 *   for (int i = 0; i < dim; ++i)
437 *   interface_nodes_positions.push_back(support_points[element][i]);
438 *  
439 * @endcode
440 *
441 * Now we have all information to define the coupling mesh and pass the
442 * information to preCICE.
443 *
444 * @code
445 *   precice.setMeshVertices(mesh_name,
446 *   interface_nodes_positions,
447 *   interface_nodes_ids);
448 *  
449 * @endcode
450 *
451 * Then, we initialize preCICE internally calling the API function
452 * `initialize()`
453 *
454 * @code
455 *   precice.initialize();
456 *   }
457 *  
458 *   template <int dim, typename ParameterClass>
459 *   void
460 *   Adapter<dim, ParameterClass>::read_data(
461 *   double relative_read_time,
462 *   std::map<types::global_dof_index, double> &boundary_data)
463 *   {
464 * @endcode
465 *
466 * here, we obtain data, i.e. the boundary condition, from another
467 * participant. We have already vertex IDs and just need to convert our
468 * obtained data to the deal.II compatible 'boundary map' , which is done in
469 * the format_deal_to_precice function.
470 *
471 * @code
472 *   precice.readData(mesh_name,
473 *   read_data_name,
474 *   interface_nodes_ids,
475 *   relative_read_time,
476 *   read_data_buffer);
477 *  
478 * @endcode
479 *
480 * After receiving the coupling data in `read_data_buffer`, we convert it to
481 * the std::map `boundary_data` which is later needed in order to apply
482 * Dirichlet boundary conditions
483 *
484 * @code
485 *   format_precice_to_dealii(boundary_data);
486 *   }
487 *  
488 *  
489 * @endcode
490 *
491 * The function `advance()` is called in the main time loop after the
492 * computation in each time step. Here, preCICE exchanges the coupling data
493 * internally and computes mappings as well as acceleration methods.
494 *
495 * @code
496 *   template <int dim, typename ParameterClass>
497 *   void
498 *   Adapter<dim, ParameterClass>::advance(const double computed_timestep_length)
499 *   {
500 * @endcode
501 *
502 * We specify the computed time-step length and pass it to preCICE.
503 *
504 * @code
505 *   precice.advance(computed_timestep_length);
506 *   }
507 *  
508 *  
509 *  
510 * @endcode
511 *
512 * This function takes the std::vector obtained by preCICE in `read_data_buffer` and
513 * inserts the values to the right position in the boundary map used throughout
514 * our deal.II solver for Dirichlet boundary conditions. The function is only
515 * used internally in the Adapter class and not called in the solver itself. The
516 * order, in which preCICE sorts the data in the `read_data_buffer` vector is exactly
517 * the same as the order of the initially passed vertices coordinates.
518 *
519 * @code
520 *   template <int dim, typename ParameterClass>
521 *   void
522 *   Adapter<dim, ParameterClass>::format_precice_to_dealii(
523 *   std::map<types::global_dof_index, double> &boundary_data) const
524 *   {
525 * @endcode
526 *
527 * We already stored the coupling DoF indices in the `boundary_data` map, so
528 * that we can simply iterate over all keys in the map.
529 *
530 * @code
531 *   auto dof_component = boundary_data.begin();
532 *   for (int i = 0; i < n_interface_nodes; ++i)
533 *   {
534 *   AssertIndexRange(i, read_data_buffer.size());
535 *   boundary_data[dof_component->first] = read_data_buffer[i];
536 *   ++dof_component;
537 *   }
538 *   }
539 *  
540 *  
541 * @endcode
542 *
543 * The solver class is essentially the same as in @ref step_4 "step-4". We only extend the
544 * stationary problem to a time-dependent problem and introduced the coupling.
545 * Comments are added at any point, where the workflow differs from @ref step_4 "step-4".
546 *
547 * @code
548 *   template <int dim>
549 *   class CoupledLaplaceProblem
550 *   {
551 *   public:
552 *   CoupledLaplaceProblem();
553 *  
554 *   void
555 *   run();
556 *  
557 *   private:
558 *   void
559 *   make_grid();
560 *   void
561 *   setup_system();
562 *   void
563 *   assemble_system();
564 *   void
565 *   solve();
566 *   void
567 *   output_results() const;
568 *  
570 *   FE_Q<dim> fe;
571 *   DoFHandler<dim> dof_handler;
573 *  
574 *   SparsityPattern sparsity_pattern;
575 *   SparseMatrix<double> system_matrix;
576 *  
577 *   Vector<double> solution;
578 *   Vector<double> old_solution;
579 *   Vector<double> system_rhs;
580 *  
581 * @endcode
582 *
583 * We allocate all structures required for the preCICE coupling: The map
584 * is used to apply Dirichlet boundary conditions and filled in the Adapter
585 * class with data from the other participant. The CouplingParameters hold the
586 * preCICE configuration as described above. The interface boundary ID is the
587 * ID associated to our coupling interface and needs to be specified, when we
588 * set up the Adapter class object, because we pass it directly to the
589 * Constructor of this class.
590 *
591 * @code
592 *   std::map<types::global_dof_index, double> boundary_data;
593 *   CouplingParamters parameters;
594 *   const types::boundary_id interface_boundary_id;
595 *   Adapter<dim, CouplingParamters> adapter;
596 *  
597 * @endcode
598 *
599 * The time-step size delta_t is the acutual time-step size used for all
600 * computations. The preCICE time-step size is obtained by preCICE in order to
601 * ensure a synchronization at all coupling time steps. The solver time
602 * step-size is the desired time-step size of our individual solver. In more
603 * sophisticated computations, it might be determined adaptively. The
604 * `time_step` counter is just used for the time-step number.
605 *
606 * @code
607 *   double delta_t;
608 *   double precice_delta_t;
609 *   const double solver_delta_t = 0.1;
610 *   unsigned int time_step = 0;
611 *   };
612 *  
613 *  
614 *  
615 *   template <int dim>
616 *   class RightHandSide : public Function<dim>
617 *   {
618 *   public:
619 *   virtual double
620 *   value(const Point<dim> &p, const unsigned int component = 0) const override;
621 *   };
622 *  
623 *  
624 *  
625 *   template <int dim>
626 *   class BoundaryValues : public Function<dim>
627 *   {
628 *   public:
629 *   virtual double
630 *   value(const Point<dim> &p, const unsigned int component = 0) const override;
631 *   };
632 *  
633 *   template <int dim>
634 *   double
635 *   RightHandSide<dim>::value(const Point<dim> &p,
636 *   const unsigned int /*component*/) const
637 *   {
638 *   double return_value = 0.0;
639 *   for (unsigned int i = 0; i < dim; ++i)
640 *   return_value += 4.0 * std::pow(p(i), 4.0);
641 *  
642 *   return return_value;
643 *   }
644 *  
645 *  
646 *   template <int dim>
647 *   double
648 *   BoundaryValues<dim>::value(const Point<dim> &p,
649 *   const unsigned int /*component*/) const
650 *   {
651 *   return p.square();
652 *   }
653 *  
654 *  
655 *  
656 *   template <int dim>
657 *   CoupledLaplaceProblem<dim>::CoupledLaplaceProblem()
658 *   : fe(1)
659 *   , dof_handler(triangulation)
660 *   , interface_boundary_id(1)
661 *   , adapter(parameters, interface_boundary_id)
662 *   {}
663 *  
664 *  
665 *   template <int dim>
666 *   void
667 *   CoupledLaplaceProblem<dim>::make_grid()
668 *   {
670 *   triangulation.refine_global(4);
671 *  
672 *   for (const auto &cell : triangulation.active_cell_iterators())
673 *   for (const auto &face : cell->face_iterators())
674 *   {
675 * @endcode
676 *
677 * We choose the boundary in positive x direction for the
678 * interface coupling.
679 *
680 * @code
681 *   if (face->at_boundary() && (face->center()[0] == 1))
682 *   face->set_boundary_id(interface_boundary_id);
683 *   }
684 *  
685 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
686 *   << std::endl
687 *   << " Total number of cells: " << triangulation.n_cells()
688 *   << std::endl;
689 *   }
690 *  
691 *  
692 *   template <int dim>
693 *   void
694 *   CoupledLaplaceProblem<dim>::setup_system()
695 *   {
696 *   dof_handler.distribute_dofs(fe);
697 *  
698 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
699 *   << std::endl;
700 *  
701 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
702 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
703 *   sparsity_pattern.copy_from(dsp);
704 *  
705 *   system_matrix.reinit(sparsity_pattern);
706 *  
707 *   solution.reinit(dof_handler.n_dofs());
708 *   old_solution.reinit(dof_handler.n_dofs());
709 *   system_rhs.reinit(dof_handler.n_dofs());
710 *   }
711 *  
712 *  
713 *  
714 *   template <int dim>
715 *   void
716 *   CoupledLaplaceProblem<dim>::assemble_system()
717 *   {
718 * @endcode
719 *
720 * Reset global structures
721 *
722 * @code
723 *   system_rhs = 0;
724 *   system_matrix = 0;
725 * @endcode
726 *
727 * Update old solution values
728 *
729 * @code
730 *   old_solution = solution;
731 *  
732 *   QGauss<dim> quadrature_formula(fe.degree + 1);
733 *  
734 *   RightHandSide<dim> right_hand_side;
735 *  
736 *   FEValues<dim> fe_values(fe,
737 *   quadrature_formula,
740 *  
741 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
742 *  
743 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
744 *   Vector<double> cell_rhs(dofs_per_cell);
745 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
746 * @endcode
747 *
748 * The solution values from previous time steps are stored for each quadrature
749 * point
750 *
751 * @code
752 *   std::vector<double> local_values_old_solution(fe_values.n_quadrature_points);
753 *  
754 *   for (const auto &cell : dof_handler.active_cell_iterators())
755 *   {
756 *   fe_values.reinit(cell);
757 *   cell_matrix = 0;
758 *   cell_rhs = 0;
759 * @endcode
760 *
761 * Get the local values from the `fe_values' object
762 *
763 * @code
764 *   fe_values.get_function_values(old_solution, local_values_old_solution);
765 *  
766 * @endcode
767 *
768 * The system matrix contains additionally a mass matrix due to the time
769 * discretization. The RHS has contributions from the old solution values.
770 *
771 * @code
772 *   for (const unsigned int q_index : fe_values.quadrature_point_indices())
773 *   for (const unsigned int i : fe_values.dof_indices())
774 *   {
775 *   for (const unsigned int j : fe_values.dof_indices())
776 *   cell_matrix(i, j) +=
777 *   ((fe_values.shape_value(i, q_index) * // phi_i(x_q)
778 *   fe_values.shape_value(j, q_index)) + // phi_j(x_q)
779 *   (delta_t * // delta t
780 *   fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
781 *   fe_values.shape_grad(j, q_index))) * // grad phi_j(x_q)
782 *   fe_values.JxW(q_index); // dx
783 *  
784 *   const auto x_q = fe_values.quadrature_point(q_index);
785 *   const auto &local_value = local_values_old_solution[q_index];
786 *   cell_rhs(i) += ((delta_t * // delta t
787 *   fe_values.shape_value(i, q_index) * // phi_i(x_q)
788 *   right_hand_side.value(x_q)) + // f(x_q)
789 *   fe_values.shape_value(i, q_index) *
790 *   local_value) * // phi_i(x_q)*val
791 *   fe_values.JxW(q_index); // dx
792 *   }
793 *  
794 * @endcode
795 *
796 * Copy local to global
797 *
798 * @code
799 *   cell->get_dof_indices(local_dof_indices);
800 *   for (const unsigned int i : fe_values.dof_indices())
801 *   {
802 *   for (const unsigned int j : fe_values.dof_indices())
803 *   system_matrix.add(local_dof_indices[i],
804 *   local_dof_indices[j],
805 *   cell_matrix(i, j));
806 *  
807 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
808 *   }
809 *   }
810 *   {
811 * @endcode
812 *
813 * At first, we apply the Dirichlet boundary condition from @ref step_4 "step-4", as
814 * usual.
815 *
816 * @code
817 *   std::map<types::global_dof_index, double> boundary_values;
818 *   VectorTools::interpolate_boundary_values(dof_handler,
819 *   0,
820 *   BoundaryValues<dim>(),
821 *   boundary_values);
822 *   MatrixTools::apply_boundary_values(boundary_values,
823 *   system_matrix,
824 *   solution,
825 *   system_rhs);
826 *   }
827 *   {
828 * @endcode
829 *
830 * Afterwards, we apply the coupling boundary condition. The `boundary_data`
831 * has already been filled by preCICE.
832 *
833 * @code
834 *   MatrixTools::apply_boundary_values(boundary_data,
835 *   system_matrix,
836 *   solution,
837 *   system_rhs);
838 *   }
839 *   }
840 *  
841 *  
842 *  
843 *   template <int dim>
844 *   void
845 *   CoupledLaplaceProblem<dim>::solve()
846 *   {
847 *   SolverControl solver_control(1000, 1e-12);
848 *   SolverCG<Vector<double>> solver(solver_control);
849 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
850 *  
851 *   std::cout << " " << solver_control.last_step()
852 *   << " CG iterations needed to obtain convergence." << std::endl;
853 *   }
854 *  
855 *  
856 *  
857 *   template <int dim>
858 *   void
859 *   CoupledLaplaceProblem<dim>::output_results() const
860 *   {
861 *   DataOut<dim> data_out;
862 *  
863 *   data_out.attach_dof_handler(dof_handler);
864 *   data_out.add_data_vector(solution, "solution");
865 *  
866 *   data_out.build_patches(mapping);
867 *  
868 *   std::ofstream output("solution-" + std::to_string(time_step) + ".vtk");
869 *   data_out.write_vtk(output);
870 *   }
871 *  
872 *  
873 *  
874 *   template <int dim>
875 *   void
876 *   CoupledLaplaceProblem<dim>::run()
877 *   {
878 *   std::cout << "Solving problem in " << dim << " space dimensions."
879 *   << std::endl;
880 *  
881 *   make_grid();
882 *   setup_system();
883 *  
884 * @endcode
885 *
886 * After we set up the system, we initialize preCICE and the adapter using the
887 * functionalities of the Adapter.
888 *
889 * @code
890 *   adapter.initialize(dof_handler, boundary_data, mapping);
891 *  
892 * @endcode
893 *
894 * preCICE steers the coupled simulation: `isCouplingOngoing` is
895 * used to synchronize the end of the simulation with the coupling partner
896 *
897 * @code
898 *   while (adapter.precice.isCouplingOngoing())
899 *   {
900 * @endcode
901 *
902 * The time step number is solely used to generate unique output files
903 *
904 * @code
905 *   ++time_step;
906 * @endcode
907 *
908 * preCICE returns the maximum admissible time-step size,
909 * which needs to be compared to our desired solver time-step size.
910 *
911 * @code
912 *   precice_delta_t = adapter.precice.getMaxTimeStepSize();
913 *   delta_t = std::min(precice_delta_t, solver_delta_t);
914 * @endcode
915 *
916 * Next we read data. Since we use a fully backward Euler method, we want
917 * the data to be associated to the end of the current time-step (delta_t)
918 * Time-interpolation methods in preCICE allow to get readData at any
919 * point in time, if the coupling scheme allows it
920 *
921 * @code
922 *   adapter.read_data(delta_t, boundary_data);
923 *  
924 * @endcode
925 *
926 * In the time loop, we assemble the coupled system and solve it as
927 * usual.
928 *
929 * @code
930 *   assemble_system();
931 *   solve();
932 *  
933 * @endcode
934 *
935 * After we solved the system, we advance the coupling to the next time
936 * level. In a bi-directional coupled simulation, we would pass our
937 * calculated data to preCICE
938 *
939 * @code
940 *   adapter.advance(delta_t);
941 *  
942 * @endcode
943 *
944 * Write an output file if the time step is completed. In case of an
945 * implicit coupling, where individual time steps are computed more than
946 * once, the function `isTimeWindowCompleted` prevents unnecessary result
947 * writing. For this simple tutorial configuration (explicit coupling),
948 * the function returns always `true`.
949 *
950 * @code
951 *   if (adapter.precice.isTimeWindowComplete())
952 *   output_results();
953 *   }
954 *   }
955 *  
956 *  
957 *  
958 *   int
959 *   main()
960 *   {
961 *   CoupledLaplaceProblem<2> laplace_problem;
962 *   laplace_problem.run();
963 *  
964 *   return 0;
965 *   }
966 * @endcode
967
968
969<a name="ann-fancy_boundary_condition.cc"></a>
970<h1>Annotated version of fancy_boundary_condition.cc</h1>
971 *
972 *
973 *
974 *
975 * @code
976 *   /* -----------------------------------------------------------------------------
977 *   *
978 *   * SPDX-License-Identifier: LGPL-2.1-or-later
979 *   * Copyright (C) 2020 by David Schneider
980 *   * Copyright (C) 2020 by Benjamin Uekermann
981 *   *
982 *   * This file is part of the deal.II code gallery.
983 *   *
984 *   * -----------------------------------------------------------------------------
985 *   */
986 *  
987 * @endcode
988 *
989 * This program does not use any deal.II functionality and depends only on
990 * preCICE and the standard libraries.
991 *
992 * @code
993 *   #include <precice/precice.hpp>
994 *  
995 *   #include <iostream>
996 *   #include <sstream>
997 *   #include <vector>
998 *  
999 * @endcode
1000 *
1001 * The program computes a time-varying parabolic boundary condition, which is
1002 * passed to preCICE and serves as Dirichlet boundary condition for the other
1003 * coupling participant.
1004 *
1005
1006 *
1007 * Function to generate boundary values in each time step
1008 *
1009 * @code
1010 *   void
1011 *   define_boundary_values(std::vector<double> &boundary_data,
1012 *   const double time,
1013 *   const double end_time)
1014 *   {
1015 * @endcode
1016 *
1017 * Scale the current time value
1018 *
1019 * @code
1020 *   const double relative_time = time / end_time;
1021 * @endcode
1022 *
1023 * Define the amplitude. Values run from -0.5 to 0.5
1024 *
1025 * @code
1026 *   const double amplitude = (relative_time - 0.5);
1027 * @endcode
1028 *
1029 * Specify the actual data we want to pass to the other participant. Here, we
1030 * choose a parabola with boundary values 2 in order to enforce continuity
1031 * to adjacent boundaries.
1032 *
1033 * @code
1034 *   const double n_elements = boundary_data.size();
1035 *   const double right_zero = boundary_data.size() - 1;
1036 *   const double left_zero = 0;
1037 *   const double offset = 2;
1038 *   for (uint i = 0; i < n_elements; ++i)
1039 *   boundary_data[i] =
1040 *   -amplitude * ((i - left_zero) * (i - right_zero)) + offset;
1041 *   }
1042 *  
1043 *  
1044 *   int
1045 *   main()
1046 *   {
1047 *   std::cout << "Boundary participant: starting... \n";
1048 *  
1049 * @endcode
1050 *
1051 * Configuration
1052 *
1053 * @code
1054 *   const std::string configFileName("precice-config.xml");
1055 *   const std::string solverName("boundary-participant");
1056 *   const std::string meshName("boundary-mesh");
1057 *   const std::string dataWriteName("boundary-data");
1058 *  
1059 * @endcode
1060 *
1061 * Adjust to MPI rank and size for parallel computation
1062 *
1063 * @code
1064 *   const int commRank = 0;
1065 *   const int commSize = 1;
1066 *  
1067 *   precice::Participant precice(solverName, configFileName, commRank, commSize);
1068 *  
1069 *   const int dimensions = precice.getMeshDimensions(meshName);
1070 *   const int numberOfVertices = 6;
1071 *  
1072 * @endcode
1073 *
1074 * Set up data structures
1075 *
1076 * @code
1077 *   std::vector<double> writeData(numberOfVertices);
1078 *   std::vector<int> vertexIDs(numberOfVertices);
1079 *   std::vector<double> vertices(numberOfVertices * dimensions);
1080 *  
1081 * @endcode
1082 *
1083 * Define a boundary mesh
1084 *
1085 * @code
1086 *   std::cout << "Boundary participant: defining boundary mesh \n";
1087 *   const double length = 2;
1088 *   const double xCoord = 1;
1089 *   const double deltaY = length / (numberOfVertices - 1);
1090 *   for (int i = 0; i < numberOfVertices; ++i)
1091 *   for (int j = 0; j < dimensions; ++j)
1092 *   {
1093 *   const unsigned int index = dimensions * i + j;
1094 * @endcode
1095 *
1096 * The x-coordinate is always 1, i.e., the boundary is parallel to the
1097 * y-axis. The y-coordinate is descending from 1 to -1.
1098 *
1099 * @code
1100 *   if (j == 0)
1101 *   vertices[index] = xCoord;
1102 *   else
1103 *   vertices[index] = 1 - deltaY * i;
1104 *   }
1105 *  
1106 * @endcode
1107 *
1108 * Pass the vertices to preCICE
1109 *
1110 * @code
1111 *   precice.setMeshVertices(meshName, vertices, vertexIDs);
1112 *  
1113 * @endcode
1114 *
1115 * Variables for the time
1116 *
1117 * @code
1118 *   const double end_time = 1;
1119 *   double time = 0;
1120 *  
1121 * @endcode
1122 *
1123 * Not used in the configuration by default
1124 *
1125 * @code
1126 *   if (precice.requiresInitialData())
1127 *   {
1128 *   std::cout << "Boundary participant: writing initial data \n";
1129 *   define_boundary_values(writeData, time, end_time);
1130 *   precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1131 *   }
1132 *  
1133 * @endcode
1134 *
1135 * initialize the Participant
1136 *
1137 * @code
1138 *   precice.initialize();
1139 *  
1140 * @endcode
1141 *
1142 * Start time loop
1143 *
1144 * @code
1145 *   while (precice.isCouplingOngoing())
1146 *   {
1147 *   double dt = precice.getMaxTimeStepSize();
1148 *   time += dt;
1149 *  
1150 * @endcode
1151 *
1152 * Generate new boundary data
1153 *
1154 * @code
1155 *   define_boundary_values(writeData, time, end_time);
1156 *  
1157 *   std::cout << "Boundary participant: writing coupling data \n";
1158 *   precice.writeData(meshName, dataWriteName, vertexIDs, writeData);
1159 *  
1160 *   std::cout << "Boundary participant: advancing in time\n";
1161 *   precice.advance(dt);
1162 *   }
1163 *  
1164 *   std::cout << "Boundary participant: closing...\n";
1165 *  
1166 *   return 0;
1167 *   }
1168 * @endcode
1169
1170
1171*/
Definition fe_q.h:550
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
Definition point.h:111
constexpr numbers::NumberTraits< Number >::real_type square() const
Point< 3 > center
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4624
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
IndexSet extract_boundary_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask={}, const std::set< types::boundary_id > &boundary_ids={})
Definition dof_tools.cc:614
void map_dofs_to_support_points(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::vector< Point< spacedim > > &support_points, const ComponentMask &mask={})
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
spacedim & mapping
spacedim const Point< spacedim > & p
Definition grid_tools.h:990
const std::vector< bool > & used
const Triangulation< dim, spacedim > & tria
spacedim & mesh
Definition grid_tools.h:989
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
const InputIterator OutputIterator out
Definition parallel.h:167
const Iterator const std_cxx20::type_identity_t< Iterator > & end
Definition parallel.h:610
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
void advance(std::tuple< I1, I2 > &t, const unsigned int n)