Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
time_dependent_navier_stokes.h
Go to the documentation of this file.
1
204 *  
205 *   #include <deal.II/base/function.h>
206 *   #include <deal.II/base/logstream.h>
207 *   #include <deal.II/base/quadrature_lib.h>
208 *   #include <deal.II/base/quadrature_point_data.h>
209 *   #include <deal.II/base/tensor.h>
210 *   #include <deal.II/base/timer.h>
211 *   #include <deal.II/base/utilities.h>
212 *  
213 *   #include <deal.II/lac/affine_constraints.h>
214 *   #include <deal.II/lac/block_sparse_matrix.h>
215 *   #include <deal.II/lac/block_vector.h>
216 *   #include <deal.II/lac/dynamic_sparsity_pattern.h>
217 *   #include <deal.II/lac/full_matrix.h>
218 *   #include <deal.II/lac/precondition.h>
219 *   #include <deal.II/lac/solver_cg.h>
220 *   #include <deal.II/lac/solver_gmres.h>
221 *   #include <deal.II/lac/sparse_direct.h>
222 *   #include <deal.II/lac/sparsity_tools.h>
223 *  
224 *   #include <deal.II/lac/petsc_block_sparse_matrix.h>
225 *   #include <deal.II/lac/petsc_sparse_matrix.h>
226 *   #include <deal.II/lac/petsc_vector.h>
227 *   #include <deal.II/lac/petsc_precondition.h>
228 *   #include <deal.II/lac/petsc_solver.h>
229 *  
230 *   #include <deal.II/grid/grid_generator.h>
231 *   #include <deal.II/grid/grid_refinement.h>
232 *   #include <deal.II/grid/grid_tools.h>
233 *   #include <deal.II/grid/manifold_lib.h>
234 *   #include <deal.II/grid/tria.h>
235 *   #include <deal.II/grid/tria_accessor.h>
236 *   #include <deal.II/grid/tria_iterator.h>
237 *  
238 *   #include <deal.II/dofs/dof_accessor.h>
239 *   #include <deal.II/dofs/dof_handler.h>
240 *   #include <deal.II/dofs/dof_renumbering.h>
241 *   #include <deal.II/dofs/dof_tools.h>
242 *  
243 *   #include <deal.II/fe/fe_q.h>
244 *   #include <deal.II/fe/fe_system.h>
245 *   #include <deal.II/fe/fe_values.h>
246 *  
247 *   #include <deal.II/numerics/data_out.h>
248 *   #include <deal.II/numerics/error_estimator.h>
249 *   #include <deal.II/numerics/matrix_tools.h>
250 *   #include <deal.II/numerics/vector_tools.h>
251 *  
252 *   #include <deal.II/distributed/grid_refinement.h>
253 *   #include <deal.II/distributed/solution_transfer.h>
254 *   #include <deal.II/distributed/tria.h>
255 *  
256 *   #include <fstream>
257 *   #include <iostream>
258 *   #include <sstream>
259 *  
260 *   namespace fluid
261 *   {
262 *   using namespace dealii;
263 *  
264 * @endcode
265 *
266 *
267 * <a name="time_dependent_navier_stokes.cc-Createthetriangulation"></a>
268 * <h3>Create the triangulation</h3>
269 * The code to create triangulation is copied from
270 * [Martin Kronbichler's
271 * code](https://github.com/kronbichler/adaflo/blob/master/tests/flow_past_cylinder.cc)
272 * with very few modifications.
273 *
274
275 *
276 *
277 * <a name="time_dependent_navier_stokes.cc-Helperfunction"></a>
278 * <h4>Helper function</h4>
279 *
280 * @code
281 *   void create_triangulation_2d(Triangulation<2> &tria,
282 *   bool compute_in_2d = true)
283 *   {
284 *   SphericalManifold<2> boundary(Point<2>(0.5, 0.2));
285 *   Triangulation<2> left, middle, right, tmp, tmp2;
286 *   GridGenerator::subdivided_hyper_rectangle(
287 *   left,
288 *   std::vector<unsigned int>({3U, 4U}),
289 *   Point<2>(),
290 *   Point<2>(0.3, 0.41),
291 *   false);
292 *   GridGenerator::subdivided_hyper_rectangle(
293 *   right,
294 *   std::vector<unsigned int>({18U, 4U}),
295 *   Point<2>(0.7, 0),
296 *   Point<2>(2.5, 0.41),
297 *   false);
298 *  
299 * @endcode
300 *
301 * Create middle part first as a hyper shell.
302 *
303 * @code
304 *   GridGenerator::hyper_shell(middle, Point<2>(0.5, 0.2), 0.05, 0.2, 4, true);
305 *   middle.reset_all_manifolds();
306 *   for (Triangulation<2>::cell_iterator cell = middle.begin();
307 *   cell != middle.end();
308 *   ++cell)
309 *   for (unsigned int f = 0; f < GeometryInfo<2>::faces_per_cell; ++f)
310 *   {
311 *   bool is_inner_rim = true;
312 *   for (unsigned int v = 0; v < GeometryInfo<2>::vertices_per_face; ++v)
313 *   {
314 *   Point<2> &vertex = cell->face(f)->vertex(v);
315 *   if (std::abs(vertex.distance(Point<2>(0.5, 0.2)) - 0.05) > 1e-10)
316 *   {
317 *   is_inner_rim = false;
318 *   break;
319 *   }
320 *   }
321 *   if (is_inner_rim)
322 *   cell->face(f)->set_manifold_id(1);
323 *   }
324 *   middle.set_manifold(1, boundary);
325 *   middle.refine_global(1);
326 *  
327 * @endcode
328 *
329 * Then move the vertices to the points where we want them to be to create a
330 * slightly asymmetric cube with a hole:
331 *
332 * @code
333 *   for (Triangulation<2>::cell_iterator cell = middle.begin();
334 *   cell != middle.end();
335 *   ++cell)
336 *   for (unsigned int v = 0; v < GeometryInfo<2>::vertices_per_cell; ++v)
337 *   {
338 *   Point<2> &vertex = cell->vertex(v);
339 *   if (std::abs(vertex[0] - 0.7) < 1e-10 &&
340 *   std::abs(vertex[1] - 0.2) < 1e-10)
341 *   vertex = Point<2>(0.7, 0.205);
342 *   else if (std::abs(vertex[0] - 0.6) < 1e-10 &&
343 *   std::abs(vertex[1] - 0.3) < 1e-10)
344 *   vertex = Point<2>(0.7, 0.41);
345 *   else if (std::abs(vertex[0] - 0.6) < 1e-10 &&
346 *   std::abs(vertex[1] - 0.1) < 1e-10)
347 *   vertex = Point<2>(0.7, 0);
348 *   else if (std::abs(vertex[0] - 0.5) < 1e-10 &&
349 *   std::abs(vertex[1] - 0.4) < 1e-10)
350 *   vertex = Point<2>(0.5, 0.41);
351 *   else if (std::abs(vertex[0] - 0.5) < 1e-10 &&
352 *   std::abs(vertex[1] - 0.0) < 1e-10)
353 *   vertex = Point<2>(0.5, 0.0);
354 *   else if (std::abs(vertex[0] - 0.4) < 1e-10 &&
355 *   std::abs(vertex[1] - 0.3) < 1e-10)
356 *   vertex = Point<2>(0.3, 0.41);
357 *   else if (std::abs(vertex[0] - 0.4) < 1e-10 &&
358 *   std::abs(vertex[1] - 0.1) < 1e-10)
359 *   vertex = Point<2>(0.3, 0);
360 *   else if (std::abs(vertex[0] - 0.3) < 1e-10 &&
361 *   std::abs(vertex[1] - 0.2) < 1e-10)
362 *   vertex = Point<2>(0.3, 0.205);
363 *   else if (std::abs(vertex[0] - 0.56379) < 1e-4 &&
364 *   std::abs(vertex[1] - 0.13621) < 1e-4)
365 *   vertex = Point<2>(0.59, 0.11);
366 *   else if (std::abs(vertex[0] - 0.56379) < 1e-4 &&
367 *   std::abs(vertex[1] - 0.26379) < 1e-4)
368 *   vertex = Point<2>(0.59, 0.29);
369 *   else if (std::abs(vertex[0] - 0.43621) < 1e-4 &&
370 *   std::abs(vertex[1] - 0.13621) < 1e-4)
371 *   vertex = Point<2>(0.41, 0.11);
372 *   else if (std::abs(vertex[0] - 0.43621) < 1e-4 &&
373 *   std::abs(vertex[1] - 0.26379) < 1e-4)
374 *   vertex = Point<2>(0.41, 0.29);
375 *   }
376 *  
377 * @endcode
378 *
379 * Refine once to create the same level of refinement as in the
380 * neighboring domains:
381 *
382 * @code
383 *   middle.refine_global(1);
384 *  
385 * @endcode
386 *
387 * Must copy the triangulation because we cannot merge triangulations with
388 * refinement:
389 *
390 * @code
391 *   GridGenerator::flatten_triangulation(middle, tmp2);
392 *  
393 * @endcode
394 *
395 * Left domain is required in 3d only.
396 *
397 * @code
398 *   if (compute_in_2d)
399 *   {
400 *   GridGenerator::merge_triangulations(tmp2, right, tria);
401 *   }
402 *   else
403 *   {
404 *   GridGenerator::merge_triangulations(left, tmp2, tmp);
405 *   GridGenerator::merge_triangulations(tmp, right, tria);
406 *   }
407 *   }
408 *  
409 * @endcode
410 *
411 *
412 * <a name="time_dependent_navier_stokes.cc-2Dflowaroundcylindertriangulation"></a>
413 * <h4>2D flow around cylinder triangulation</h4>
414 *
415 * @code
416 *   void create_triangulation(Triangulation<2> &tria)
417 *   {
418 *   create_triangulation_2d(tria);
419 * @endcode
420 *
421 * Set the left boundary (inflow) to 0, the right boundary (outflow) to 1,
422 * upper to 2, lower to 3 and the cylindrical surface to 4.
423 *
424 * @code
425 *   for (Triangulation<2>::active_cell_iterator cell = tria.begin();
426 *   cell != tria.end();
427 *   ++cell)
428 *   {
429 *   for (unsigned int f = 0; f < GeometryInfo<2>::faces_per_cell; ++f)
430 *   {
431 *   if (cell->face(f)->at_boundary())
432 *   {
433 *   if (std::abs(cell->face(f)->center()[0] - 2.5) < 1e-12)
434 *   {
435 *   cell->face(f)->set_all_boundary_ids(1);
436 *   }
437 *   else if (std::abs(cell->face(f)->center()[0] - 0.3) < 1e-12)
438 *   {
439 *   cell->face(f)->set_all_boundary_ids(0);
440 *   }
441 *   else if (std::abs(cell->face(f)->center()[1] - 0.41) < 1e-12)
442 *   {
443 *   cell->face(f)->set_all_boundary_ids(3);
444 *   }
445 *   else if (std::abs(cell->face(f)->center()[1]) < 1e-12)
446 *   {
447 *   cell->face(f)->set_all_boundary_ids(2);
448 *   }
449 *   else
450 *   {
451 *   cell->face(f)->set_all_boundary_ids(4);
452 *   }
453 *   }
454 *   }
455 *   }
456 *   }
457 *  
458 * @endcode
459 *
460 *
461 * <a name="time_dependent_navier_stokes.cc-3Dflowaroundcylindertriangulation"></a>
462 * <h4>3D flow around cylinder triangulation</h4>
463 *
464 * @code
465 *   void create_triangulation(Triangulation<3> &tria)
466 *   {
467 *   Triangulation<2> tria_2d;
468 *   create_triangulation_2d(tria_2d, false);
469 *   GridGenerator::extrude_triangulation(tria_2d, 5, 0.41, tria);
470 * @endcode
471 *
472 * Set the ids of the boundaries in x direction to 0 and 1; y direction to 2 and 3;
473 * z direction to 4 and 5; the cylindrical surface 6.
474 *
475 * @code
476 *   for (Triangulation<3>::active_cell_iterator cell = tria.begin();
477 *   cell != tria.end();
478 *   ++cell)
479 *   {
480 *   for (unsigned int f = 0; f < GeometryInfo<3>::faces_per_cell; ++f)
481 *   {
482 *   if (cell->face(f)->at_boundary())
483 *   {
484 *   if (std::abs(cell->face(f)->center()[0] - 2.5) < 1e-12)
485 *   {
486 *   cell->face(f)->set_all_boundary_ids(1);
487 *   }
488 *   else if (std::abs(cell->face(f)->center()[0]) < 1e-12)
489 *   {
490 *   cell->face(f)->set_all_boundary_ids(0);
491 *   }
492 *   else if (std::abs(cell->face(f)->center()[1] - 0.41) < 1e-12)
493 *   {
494 *   cell->face(f)->set_all_boundary_ids(3);
495 *   }
496 *   else if (std::abs(cell->face(f)->center()[1]) < 1e-12)
497 *   {
498 *   cell->face(f)->set_all_boundary_ids(2);
499 *   }
500 *   else if (std::abs(cell->face(f)->center()[2] - 0.41) < 1e-12)
501 *   {
502 *   cell->face(f)->set_all_boundary_ids(5);
503 *   }
504 *   else if (std::abs(cell->face(f)->center()[2]) < 1e-12)
505 *   {
506 *   cell->face(f)->set_all_boundary_ids(4);
507 *   }
508 *   else
509 *   {
510 *   cell->face(f)->set_all_boundary_ids(6);
511 *   }
512 *   }
513 *   }
514 *   }
515 *   }
516 *  
517 * @endcode
518 *
519 *
520 * <a name="time_dependent_navier_stokes.cc-Timestepping"></a>
521 * <h3>Time stepping</h3>
522 * This class is pretty much self-explanatory.
523 *
524 * @code
525 *   class Time
526 *   {
527 *   public:
528 *   Time(const double time_end,
529 *   const double delta_t,
530 *   const double output_interval,
531 *   const double refinement_interval)
532 *   : timestep(0),
533 *   time_current(0.0),
534 *   time_end(time_end),
535 *   delta_t(delta_t),
536 *   output_interval(output_interval),
537 *   refinement_interval(refinement_interval)
538 *   {
539 *   }
540 *   double current() const { return time_current; }
541 *   double end() const { return time_end; }
542 *   double get_delta_t() const { return delta_t; }
543 *   unsigned int get_timestep() const { return timestep; }
544 *   bool time_to_output() const;
545 *   bool time_to_refine() const;
546 *   void increment();
547 *  
548 *   private:
549 *   unsigned int timestep;
550 *   double time_current;
551 *   const double time_end;
552 *   const double delta_t;
553 *   const double output_interval;
554 *   const double refinement_interval;
555 *   };
556 *  
557 *   bool Time::time_to_output() const
558 *   {
559 *   unsigned int delta = static_cast<unsigned int>(output_interval / delta_t);
560 *   return (timestep >= delta && timestep % delta == 0);
561 *   }
562 *  
563 *   bool Time::time_to_refine() const
564 *   {
565 *   unsigned int delta = static_cast<unsigned int>(refinement_interval / delta_t);
566 *   return (timestep >= delta && timestep % delta == 0);
567 *   }
568 *  
569 *   void Time::increment()
570 *   {
571 *   time_current += delta_t;
572 *   ++timestep;
573 *   }
574 *  
575 * @endcode
576 *
577 *
578 * <a name="time_dependent_navier_stokes.cc-Boundaryvalues"></a>
579 * <h3>Boundary values</h3>
580 * Dirichlet boundary conditions for the velocity inlet and walls.
581 *
582 * @code
583 *   template <int dim>
584 *   class BoundaryValues : public Function<dim>
585 *   {
586 *   public:
587 *   BoundaryValues() : Function<dim>(dim + 1) {}
588 *   virtual double value(const Point<dim> &p,
589 *   const unsigned int component) const override;
590 *  
591 *   virtual void vector_value(const Point<dim> &p,
592 *   Vector<double> &values) const override;
593 *   };
594 *  
595 *   template <int dim>
596 *   double BoundaryValues<dim>::value(const Point<dim> &p,
597 *   const unsigned int component) const
598 *   {
599 *   Assert(component < this->n_components,
600 *   ExcIndexRange(component, 0, this->n_components));
601 *   double left_boundary = (dim == 2 ? 0.3 : 0.0);
602 *   if (component == 0 && std::abs(p[0] - left_boundary) < 1e-10)
603 *   {
604 * @endcode
605 *
606 * For a parabolic velocity profile, @f$U_\mathrm{avg} = 2/3
607 * U_\mathrm{max}@f$
608 * in 2D, and @f$U_\mathrm{avg} = 4/9 U_\mathrm{max}@f$ in 3D.
609 * If @f$\nu = 0.001@f$, @f$D = 0.1@f$, then @f$Re = 100 U_\mathrm{avg}@f$.
610 *
611 * @code
612 *   double Uavg = 1.0;
613 *   double Umax = (dim == 2 ? 3 * Uavg / 2 : 9 * Uavg / 4);
614 *   double value = 4 * Umax * p[1] * (0.41 - p[1]) / (0.41 * 0.41);
615 *   if (dim == 3)
616 *   {
617 *   value *= 4 * p[2] * (0.41 - p[2]) / (0.41 * 0.41);
618 *   }
619 *   return value;
620 *   }
621 *   return 0;
622 *   }
623 *  
624 *   template <int dim>
625 *   void BoundaryValues<dim>::vector_value(const Point<dim> &p,
626 *   Vector<double> &values) const
627 *   {
628 *   for (unsigned int c = 0; c < this->n_components; ++c)
629 *   values(c) = BoundaryValues<dim>::value(p, c);
630 *   }
631 *  
632 * @endcode
633 *
634 *
635 * <a name="time_dependent_navier_stokes.cc-Blockpreconditioner"></a>
636 * <h3>Block preconditioner</h3>
637 *
638
639 *
640 * The block Schur preconditioner can be written as the product of three
641 * matrices:
642 * @f$
643 * P^{-1} = \begin{pmatrix} \tilde{A}^{-1} & 0\\ 0 & I\end{pmatrix}
644 * \begin{pmatrix} I & -B^T\\ 0 & I\end{pmatrix}
645 * \begin{pmatrix} I & 0\\ 0 & \tilde{S}^{-1}\end{pmatrix}
646 * @f$
647 * @f$\tilde{A}@f$ is symmetric since the convection term is eliminated from the
648 * LHS.
649 * @f$\tilde{S}^{-1}@f$ is the inverse of the Schur complement of @f$\tilde{A}@f$,
650 * which consists of a reaction term, a diffusion term, a Grad-Div term
651 * and a convection term.
652 * In practice, the convection contribution is ignored, namely
653 * @f$\tilde{S}^{-1} = -(\nu + \gamma)M_p^{-1} -
654 * \frac{1}{\Delta{t}}{[B(diag(M_u))^{-1}B^T]}^{-1}@f$
655 * where @f$M_p@f$ is the pressure mass, and
656 * @f${[B(diag(M_u))^{-1}B^T]}@f$ is an approximation to the Schur complement of
657 * (velocity) mass matrix @f$BM_u^{-1}B^T@f$.
658 *
659
660 *
661 * Same as the tutorials, we define a vmult operation for the block
662 * preconditioner
663 * instead of write it as a matrix. It can be seen from the above definition,
664 * the result of the vmult operation of the block preconditioner can be
665 * obtained
666 * from the results of the vmult operations of @f$M_u^{-1}@f$, @f$M_p^{-1}@f$,
667 * @f$\tilde{A}^{-1}@f$, which can be transformed into solving three symmetric
668 * linear
669 * systems.
670 *
671 * @code
672 *   class BlockSchurPreconditioner : public Subscriptor
673 *   {
674 *   public:
675 *   BlockSchurPreconditioner(
676 *   TimerOutput &timer,
677 *   double gamma,
678 *   double viscosity,
679 *   double dt,
680 *   const std::vector<IndexSet> &owned_partitioning,
681 *   const PETScWrappers::MPI::BlockSparseMatrix &system,
682 *   const PETScWrappers::MPI::BlockSparseMatrix &mass,
683 *   PETScWrappers::MPI::BlockSparseMatrix &schur);
684 *  
685 *   void vmult(PETScWrappers::MPI::BlockVector &dst,
686 *   const PETScWrappers::MPI::BlockVector &src) const;
687 *  
688 *   private:
689 *   TimerOutput &timer;
690 *   const double gamma;
691 *   const double viscosity;
692 *   const double dt;
693 *  
694 *   const SmartPointer<const PETScWrappers::MPI::BlockSparseMatrix>
695 *   system_matrix;
696 *   const SmartPointer<const PETScWrappers::MPI::BlockSparseMatrix> mass_matrix;
697 * @endcode
698 *
699 * As discussed, @f${[B(diag(M_u))^{-1}B^T]}@f$ and its inverse
700 * need to be computed.
701 * We can either explicitly compute it out as a matrix, or define
702 * it as a class with a vmult operation.
703 * The second approach saves some computation to construct the matrix,
704 * but leads to slow convergence in CG solver because it is impossible
705 * to apply a preconditioner. We go with the first route.
706 *
707 * @code
708 *   const SmartPointer<PETScWrappers::MPI::BlockSparseMatrix> mass_schur;
709 *   };
710 *  
711 * @endcode
712 *
713 *
714 * <a name="time_dependent_navier_stokes.cc-BlockSchurPreconditionerBlockSchurPreconditioner"></a>
715 * <h4>BlockSchurPreconditioner::BlockSchurPreconditioner</h4>
716 *
717
718 *
719 * Input parameters and system matrix, mass matrix as well as the mass schur
720 * matrix are needed in the preconditioner. In addition, we pass the
721 * partitioning information into this class because we need to create some
722 * temporary block vectors inside.
723 *
724 * @code
725 *   BlockSchurPreconditioner::BlockSchurPreconditioner(
726 *   TimerOutput &timer,
727 *   double gamma,
728 *   double viscosity,
729 *   double dt,
730 *   const std::vector<IndexSet> &owned_partitioning,
731 *   const PETScWrappers::MPI::BlockSparseMatrix &system,
732 *   const PETScWrappers::MPI::BlockSparseMatrix &mass,
733 *   PETScWrappers::MPI::BlockSparseMatrix &schur)
734 *   : timer(timer),
735 *   gamma(gamma),
736 *   viscosity(viscosity),
737 *   dt(dt),
738 *   system_matrix(&system),
739 *   mass_matrix(&mass),
740 *   mass_schur(&schur)
741 *   {
742 *   TimerOutput::Scope timer_section(timer, "CG for Sm");
743 * @endcode
744 *
745 * The schur complemete of mass matrix is actually being computed here.
746 *
747 * @code
748 *   PETScWrappers::MPI::BlockVector tmp1, tmp2;
749 *   tmp1.reinit(owned_partitioning, mass_matrix->get_mpi_communicator());
750 *   tmp2.reinit(owned_partitioning, mass_matrix->get_mpi_communicator());
751 *   tmp1 = 1;
752 *   tmp2 = 0;
753 * @endcode
754 *
755 * Jacobi preconditioner of matrix A is by definition @f${diag(A)}^{-1}@f$,
756 * this is exactly what we want to compute.
757 *
758 * @code
759 *   PETScWrappers::PreconditionJacobi jacobi(mass_matrix->block(0, 0));
760 *   jacobi.vmult(tmp2.block(0), tmp1.block(0));
761 *   system_matrix->block(1, 0).mmult(
762 *   mass_schur->block(1, 1), system_matrix->block(0, 1), tmp2.block(0));
763 *   }
764 *  
765 * @endcode
766 *
767 *
768 * <a name="time_dependent_navier_stokes.cc-BlockSchurPreconditionervmult"></a>
769 * <h4>BlockSchurPreconditioner::vmult</h4>
770 *
771
772 *
773 * The vmult operation strictly follows the definition of
774 * BlockSchurPreconditioner
775 * introduced above. Conceptually it computes @f$u = P^{-1}v@f$.
776 *
777 * @code
778 *   void BlockSchurPreconditioner::vmult(
779 *   PETScWrappers::MPI::BlockVector &dst,
780 *   const PETScWrappers::MPI::BlockVector &src) const
781 *   {
782 * @endcode
783 *
784 * Temporary vectors
785 *
786 * @code
787 *   PETScWrappers::MPI::Vector utmp(src.block(0));
788 *   PETScWrappers::MPI::Vector tmp(src.block(1));
789 *   tmp = 0;
790 * @endcode
791 *
792 * This block computes @f$u_1 = \tilde{S}^{-1} v_1@f$,
793 * where CG solvers are used for @f$M_p^{-1}@f$ and @f$S_m^{-1}@f$.
794 *
795 * @code
796 *   {
797 *   TimerOutput::Scope timer_section(timer, "CG for Mp");
798 *   SolverControl mp_control(src.block(1).size(),
799 *   1e-6 * src.block(1).l2_norm());
800 *   PETScWrappers::SolverCG cg_mp(mp_control,
801 *   mass_schur->get_mpi_communicator());
802 * @endcode
803 *
804 * @f$-(\nu + \gamma)M_p^{-1}v_1@f$
805 *
806 * @code
807 *   PETScWrappers::PreconditionBlockJacobi Mp_preconditioner;
808 *   Mp_preconditioner.initialize(mass_matrix->block(1, 1));
809 *   cg_mp.solve(
810 *   mass_matrix->block(1, 1), tmp, src.block(1), Mp_preconditioner);
811 *   tmp *= -(viscosity + gamma);
812 *   }
813 * @endcode
814 *
815 * @f$-\frac{1}{dt}S_m^{-1}v_1@f$
816 *
817 * @code
818 *   {
819 *   TimerOutput::Scope timer_section(timer, "CG for Sm");
820 *   SolverControl sm_control(src.block(1).size(),
821 *   1e-6 * src.block(1).l2_norm());
822 *   PETScWrappers::SolverCG cg_sm(sm_control,
823 *   mass_schur->get_mpi_communicator());
824 * @endcode
825 *
826 * PreconditionBlockJacobi works find on Sm if we do not refine the mesh.
827 * Because after refine_mesh is called, zero entries will be created on
828 * the diagonal (not sure why), which prevents PreconditionBlockJacobi
829 * from being used.
830 *
831 * @code
832 *   PETScWrappers::PreconditionNone Sm_preconditioner;
833 *   Sm_preconditioner.initialize(mass_schur->block(1, 1));
834 *   cg_sm.solve(
835 *   mass_schur->block(1, 1), dst.block(1), src.block(1), Sm_preconditioner);
836 *   dst.block(1) *= -1 / dt;
837 *   }
838 * @endcode
839 *
840 * Adding up these two, we get @f$\tilde{S}^{-1}v_1@f$.
841 *
842 * @code
843 *   dst.block(1) += tmp;
844 * @endcode
845 *
846 * Compute @f$v_0 - B^T\tilde{S}^{-1}v_1@f$ based on @f$u_1@f$.
847 *
848 * @code
849 *   system_matrix->block(0, 1).vmult(utmp, dst.block(1));
850 *   utmp *= -1.0;
851 *   utmp += src.block(0);
852 * @endcode
853 *
854 * Finally, compute the product of @f$\tilde{A}^{-1}@f$ and utmp
855 * using another CG solver.
856 *
857 * @code
858 *   {
859 *   TimerOutput::Scope timer_section(timer, "CG for A");
860 *   SolverControl a_control(src.block(0).size(),
861 *   1e-6 * src.block(0).l2_norm());
862 *   PETScWrappers::SolverCG cg_a(a_control,
863 *   mass_schur->get_mpi_communicator());
864 * @endcode
865 *
866 * We do not use any preconditioner for this block, which is of course
867 * slow,
868 * only because the performance of the only two preconditioners available
869 * PreconditionBlockJacobi and PreconditionBoomerAMG are even worse than
870 * none.
871 *
872 * @code
873 *   PETScWrappers::PreconditionNone A_preconditioner;
874 *   A_preconditioner.initialize(system_matrix->block(0, 0));
875 *   cg_a.solve(
876 *   system_matrix->block(0, 0), dst.block(0), utmp, A_preconditioner);
877 *   }
878 *   }
879 *  
880 * @endcode
881 *
882 *
883 * <a name="time_dependent_navier_stokes.cc-TheincompressibleNavierStokessolver"></a>
884 * <h3>The incompressible Navier-Stokes solver</h3>
885 *
886
887 *
888 * Parallel incompressible Navier Stokes equation solver using
889 * implicit-explicit time scheme.
890 * This program is built upon dealii tutorials @ref step_57 "step-57", @ref step_40 "step-40", @ref step_22 "step-22",
891 * and @ref step_20 "step-20".
892 * The system equation is written in the incremental form, and we treat
893 * the convection term explicitly. Therefore the system equation is linear
894 * and symmetric, which does not need to be solved with Newton's iteration.
895 * The system is further stabilized and preconditioned with Grad-Div method,
896 * where GMRES solver is used as the outer solver.
897 *
898 * @code
899 *   template <int dim>
900 *   class InsIMEX
901 *   {
902 *   public:
904 *   void run();
905 *   ~InsIMEX() { timer.print_summary(); }
906 *  
907 *   private:
908 *   void setup_dofs();
909 *   void make_constraints();
910 *   void initialize_system();
911 *   void assemble(bool use_nonzero_constraints, bool assemble_system);
912 *   std::pair<unsigned int, double> solve(bool use_nonzero_constraints,
913 *   bool assemble_system);
914 *   void refine_mesh(const unsigned int, const unsigned int);
915 *   void output_results(const unsigned int) const;
916 *   double viscosity;
917 *   double gamma;
918 *   const unsigned int degree;
919 *   std::vector<types::global_dof_index> dofs_per_block;
920 *  
922 *   FESystem<dim> fe;
923 *   DoFHandler<dim> dof_handler;
924 *   QGauss<dim> volume_quad_formula;
925 *   QGauss<dim - 1> face_quad_formula;
926 *  
927 *   AffineConstraints<double> zero_constraints;
928 *   AffineConstraints<double> nonzero_constraints;
929 *  
930 *   BlockSparsityPattern sparsity_pattern;
931 * @endcode
932 *
933 * System matrix to be solved
934 *
935 * @code
936 *   PETScWrappers::MPI::BlockSparseMatrix system_matrix;
937 * @endcode
938 *
939 * Mass matrix is a block matrix which includes both velocity
940 * mass matrix and pressure mass matrix.
941 *
942 * @code
944 * @endcode
945 *
946 * The schur complement of mass matrix is not a block matrix.
947 * However, because we want to reuse the partition we created
948 * for the system matrix, it is defined as a block matrix
949 * where only one block is actually used.
950 *
951 * @code
953 * @endcode
954 *
955 * The latest known solution.
956 *
957 * @code
958 *   PETScWrappers::MPI::BlockVector present_solution;
959 * @endcode
960 *
961 * The increment at a certain time step.
962 *
963 * @code
964 *   PETScWrappers::MPI::BlockVector solution_increment;
965 * @endcode
966 *
967 * System RHS
968 *
969 * @code
970 *   PETScWrappers::MPI::BlockVector system_rhs;
971 *  
972 *   MPI_Comm mpi_communicator;
973 *  
974 *   ConditionalOStream pcout;
975 *  
976 * @endcode
977 *
978 * The IndexSets of owned velocity and pressure respectively.
979 *
980 * @code
981 *   std::vector<IndexSet> owned_partitioning;
982 *  
983 * @endcode
984 *
985 * The IndexSets of relevant velocity and pressure respectively.
986 *
987 * @code
988 *   std::vector<IndexSet> relevant_partitioning;
989 *  
990 * @endcode
991 *
992 * The IndexSet of all relevant dofs.
993 *
994 * @code
995 *   IndexSet locally_relevant_dofs;
996 *  
997 * @endcode
998 *
999 * The BlockSchurPreconditioner for the entire system.
1000 *
1001 * @code
1002 *   std::shared_ptr<BlockSchurPreconditioner> preconditioner;
1003 *  
1004 *   Time time;
1005 *   mutable TimerOutput timer;
1006 *   };
1007 *  
1008 * @endcode
1009 *
1010 *
1011 * <a name="time_dependent_navier_stokes.cc-InsIMEXInsIMEX"></a>
1012 * <h4>InsIMEX::InsIMEX</h4>
1013 *
1014 * @code
1015 *   template <int dim>
1016 *   InsIMEX<dim>::InsIMEX(parallel::distributed::Triangulation<dim> &tria)
1017 *   : viscosity(0.001),
1018 *   gamma(0.1),
1019 *   degree(1),
1020 *   triangulation(tria),
1021 *   fe(FE_Q<dim>(degree + 1), dim, FE_Q<dim>(degree), 1),
1022 *   dof_handler(triangulation),
1023 *   volume_quad_formula(degree + 2),
1024 *   face_quad_formula(degree + 2),
1025 *   mpi_communicator(MPI_COMM_WORLD),
1026 *   pcout(std::cout, Utilities::MPI::this_mpi_process(mpi_communicator) == 0),
1027 *   time(1e0, 1e-3, 1e-2, 1e-2),
1028 *   timer(
1029 *   mpi_communicator, pcout, TimerOutput::never, TimerOutput::wall_times)
1030 *   {
1031 *   }
1032 *  
1033 * @endcode
1034 *
1035 *
1036 * <a name="time_dependent_navier_stokes.cc-InsIMEXsetup_dofs"></a>
1037 * <h4>InsIMEX::setup_dofs</h4>
1038 *
1039 * @code
1040 *   template <int dim>
1041 *   void InsIMEX<dim>::setup_dofs()
1042 *   {
1043 * @endcode
1044 *
1045 * The first step is to associate DoFs with a given mesh.
1046 *
1047 * @code
1048 *   dof_handler.distribute_dofs(fe);
1049 * @endcode
1050 *
1051 * We renumber the components to have all velocity DoFs come before
1052 * the pressure DoFs to be able to split the solution vector in two blocks
1053 * which are separately accessed in the block preconditioner.
1054 *
1055 * @code
1056 *   DoFRenumbering::Cuthill_McKee(dof_handler);
1057 *   std::vector<unsigned int> block_component(dim + 1, 0);
1058 *   block_component[dim] = 1;
1059 *   DoFRenumbering::component_wise(dof_handler, block_component);
1060 *   dofs_per_block = DoFTools::count_dofs_per_fe_block(dof_handler, block_component);
1061 * @endcode
1062 *
1063 * Partitioning.
1064 *
1065 * @code
1066 *   unsigned int dof_u = dofs_per_block[0];
1067 *   unsigned int dof_p = dofs_per_block[1];
1068 *   owned_partitioning.resize(2);
1069 *   owned_partitioning[0] = dof_handler.locally_owned_dofs().get_view(0, dof_u);
1070 *   owned_partitioning[1] =
1071 *   dof_handler.locally_owned_dofs().get_view(dof_u, dof_u + dof_p);
1072 *   locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler);
1073 *   relevant_partitioning.resize(2);
1074 *   relevant_partitioning[0] = locally_relevant_dofs.get_view(0, dof_u);
1075 *   relevant_partitioning[1] =
1076 *   locally_relevant_dofs.get_view(dof_u, dof_u + dof_p);
1077 *   pcout << " Number of active fluid cells: "
1078 *   << triangulation.n_global_active_cells() << std::endl
1079 *   << " Number of degrees of freedom: " << dof_handler.n_dofs() << " ("
1080 *   << dof_u << '+' << dof_p << ')' << std::endl;
1081 *   }
1082 *  
1083 * @endcode
1084 *
1085 *
1086 * <a name="time_dependent_navier_stokes.cc-InsIMEXmake_constraints"></a>
1087 * <h4>InsIMEX::make_constraints</h4>
1088 *
1089 * @code
1090 *   template <int dim>
1091 *   void InsIMEX<dim>::make_constraints()
1092 *   {
1093 * @endcode
1094 *
1095 * Because the equation is written in incremental form, two constraints
1096 * are needed: nonzero constraint and zero constraint.
1097 *
1098 * @code
1099 *   nonzero_constraints.clear();
1100 *   zero_constraints.clear();
1101 *   nonzero_constraints.reinit(locally_relevant_dofs);
1102 *   zero_constraints.reinit(locally_relevant_dofs);
1103 *   DoFTools::make_hanging_node_constraints(dof_handler, nonzero_constraints);
1104 *   DoFTools::make_hanging_node_constraints(dof_handler, zero_constraints);
1105 *  
1106 * @endcode
1107 *
1108 * Apply Dirichlet boundary conditions on all boundaries except for the
1109 * outlet.
1110 *
1111 * @code
1112 *   std::vector<unsigned int> dirichlet_bc_ids;
1113 *   if (dim == 2)
1114 *   dirichlet_bc_ids = std::vector<unsigned int>{0, 2, 3, 4};
1115 *   else
1116 *   dirichlet_bc_ids = std::vector<unsigned int>{0, 2, 3, 4, 5, 6};
1117 *  
1118 *   const FEValuesExtractors::Vector velocities(0);
1119 *   for (auto id : dirichlet_bc_ids)
1120 *   {
1121 *   VectorTools::interpolate_boundary_values(dof_handler,
1122 *   id,
1123 *   BoundaryValues<dim>(),
1124 *   nonzero_constraints,
1125 *   fe.component_mask(velocities));
1127 *   dof_handler,
1128 *   id,
1129 *   Functions::ZeroFunction<dim>(dim + 1),
1130 *   zero_constraints,
1131 *   fe.component_mask(velocities));
1132 *   }
1133 *   nonzero_constraints.close();
1134 *   zero_constraints.close();
1135 *   }
1136 *  
1137 * @endcode
1138 *
1139 *
1140 * <a name="time_dependent_navier_stokes.cc-InsIMEXinitialize_system"></a>
1141 * <h4>InsIMEX::initialize_system</h4>
1142 *
1143 * @code
1144 *   template <int dim>
1145 *   void InsIMEX<dim>::initialize_system()
1146 *   {
1147 *   preconditioner.reset();
1148 *   system_matrix.clear();
1149 *   mass_matrix.clear();
1150 *   mass_schur.clear();
1151 *  
1152 *   BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);
1153 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, nonzero_constraints);
1154 *   sparsity_pattern.copy_from(dsp);
1156 *   dsp,
1157 *   dof_handler.locally_owned_dofs(),
1158 *   mpi_communicator,
1159 *   locally_relevant_dofs);
1160 *  
1161 *   system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
1162 *   mass_matrix.reinit(owned_partitioning, dsp, mpi_communicator);
1163 *  
1164 * @endcode
1165 *
1166 * Only the @f$(1, 1)@f$ block in the mass schur matrix is used.
1167 * Compute the sparsity pattern for mass schur in advance.
1168 * The only nonzero block has the same sparsity pattern as @f$BB^T@f$.
1169 *
1170 * @code
1171 *   BlockDynamicSparsityPattern schur_dsp(dofs_per_block, dofs_per_block);
1172 *   schur_dsp.block(1, 1).compute_mmult_pattern(sparsity_pattern.block(1, 0),
1173 *   sparsity_pattern.block(0, 1));
1174 *   mass_schur.reinit(owned_partitioning, schur_dsp, mpi_communicator);
1175 *  
1176 * @endcode
1177 *
1178 * present_solution is ghosted because it is used in the
1179 * output and mesh refinement functions.
1180 *
1181 * @code
1182 *   present_solution.reinit(
1183 *   owned_partitioning, relevant_partitioning, mpi_communicator);
1184 * @endcode
1185 *
1186 * solution_increment is non-ghosted because the linear solver needs
1187 * a completely distributed vector.
1188 *
1189 * @code
1190 *   solution_increment.reinit(owned_partitioning, mpi_communicator);
1191 * @endcode
1192 *
1193 * system_rhs is non-ghosted because it is only used in the linear
1194 * solver and residual evaluation.
1195 *
1196 * @code
1197 *   system_rhs.reinit(owned_partitioning, mpi_communicator);
1198 *   }
1199 *  
1200 * @endcode
1201 *
1202 *
1203 * <a name="time_dependent_navier_stokes.cc-InsIMEXassemble"></a>
1204 * <h4>InsIMEX::assemble</h4>
1205 *
1206
1207 *
1208 * Assemble the system matrix, mass matrix, and the RHS.
1209 * It can be used to assemble the entire system or only the RHS.
1210 * An additional option is added to determine whether nonzero
1211 * constraints or zero constraints should be used.
1212 * Note that we only need to assemble the LHS for twice: once with the nonzero
1213 * constraint
1214 * and once for zero constraint. But we must assemble the RHS at every time
1215 * step.
1216 *
1217 * @code
1218 *   template <int dim>
1219 *   void InsIMEX<dim>::assemble(bool use_nonzero_constraints,
1220 *   bool assemble_system)
1221 *   {
1222 *   TimerOutput::Scope timer_section(timer, "Assemble system");
1223 *  
1224 *   if (assemble_system)
1225 *   {
1226 *   system_matrix = 0;
1227 *   mass_matrix = 0;
1228 *   }
1229 *   system_rhs = 0;
1230 *  
1231 *   FEValues<dim> fe_values(fe,
1232 *   volume_quad_formula,
1235 *   FEFaceValues<dim> fe_face_values(fe,
1236 *   face_quad_formula,
1239 *   update_JxW_values);
1240 *  
1241 *   const unsigned int dofs_per_cell = fe.dofs_per_cell;
1242 *   const unsigned int n_q_points = volume_quad_formula.size();
1243 *  
1244 *   const FEValuesExtractors::Vector velocities(0);
1245 *   const FEValuesExtractors::Scalar pressure(dim);
1246 *  
1247 *   FullMatrix<double> local_matrix(dofs_per_cell, dofs_per_cell);
1248 *   FullMatrix<double> local_mass_matrix(dofs_per_cell, dofs_per_cell);
1249 *   Vector<double> local_rhs(dofs_per_cell);
1250 *  
1251 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1252 *  
1253 *   std::vector<Tensor<1, dim>> current_velocity_values(n_q_points);
1254 *   std::vector<Tensor<2, dim>> current_velocity_gradients(n_q_points);
1255 *   std::vector<double> current_velocity_divergences(n_q_points);
1256 *   std::vector<double> current_pressure_values(n_q_points);
1257 *  
1258 *   std::vector<double> div_phi_u(dofs_per_cell);
1259 *   std::vector<Tensor<1, dim>> phi_u(dofs_per_cell);
1260 *   std::vector<Tensor<2, dim>> grad_phi_u(dofs_per_cell);
1261 *   std::vector<double> phi_p(dofs_per_cell);
1262 *  
1263 *   for (auto cell = dof_handler.begin_active(); cell != dof_handler.end();
1264 *   ++cell)
1265 *   {
1266 *   if (cell->is_locally_owned())
1267 *   {
1268 *   fe_values.reinit(cell);
1269 *  
1270 *   if (assemble_system)
1271 *   {
1272 *   local_matrix = 0;
1273 *   local_mass_matrix = 0;
1274 *   }
1275 *   local_rhs = 0;
1276 *  
1277 *   fe_values[velocities].get_function_values(present_solution,
1278 *   current_velocity_values);
1279 *  
1280 *   fe_values[velocities].get_function_gradients(
1281 *   present_solution, current_velocity_gradients);
1282 *  
1283 *   fe_values[velocities].get_function_divergences(
1284 *   present_solution, current_velocity_divergences);
1285 *  
1286 *   fe_values[pressure].get_function_values(present_solution,
1287 *   current_pressure_values);
1288 *  
1289 * @endcode
1290 *
1291 * Assemble the system matrix and mass matrix simultaneouly.
1292 * The mass matrix only uses the @f$(0, 0)@f$ and @f$(1, 1)@f$ blocks.
1293 *
1294 * @code
1295 *   for (unsigned int q = 0; q < n_q_points; ++q)
1296 *   {
1297 *   for (unsigned int k = 0; k < dofs_per_cell; ++k)
1298 *   {
1299 *   div_phi_u[k] = fe_values[velocities].divergence(k, q);
1300 *   grad_phi_u[k] = fe_values[velocities].gradient(k, q);
1301 *   phi_u[k] = fe_values[velocities].value(k, q);
1302 *   phi_p[k] = fe_values[pressure].value(k, q);
1303 *   }
1304 *  
1305 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1306 *   {
1307 *   if (assemble_system)
1308 *   {
1309 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1310 *   {
1311 *   local_matrix(i, j) +=
1312 *   (viscosity *
1313 *   scalar_product(grad_phi_u[j], grad_phi_u[i]) -
1314 *   div_phi_u[i] * phi_p[j] -
1315 *   phi_p[i] * div_phi_u[j] +
1316 *   gamma * div_phi_u[j] * div_phi_u[i] +
1317 *   phi_u[i] * phi_u[j] / time.get_delta_t()) *
1318 *   fe_values.JxW(q);
1319 *   local_mass_matrix(i, j) +=
1320 *   (phi_u[i] * phi_u[j] + phi_p[i] * phi_p[j]) *
1321 *   fe_values.JxW(q);
1322 *   }
1323 *   }
1324 *   local_rhs(i) -=
1325 *   (viscosity * scalar_product(current_velocity_gradients[q],
1326 *   grad_phi_u[i]) -
1327 *   current_velocity_divergences[q] * phi_p[i] -
1328 *   current_pressure_values[q] * div_phi_u[i] +
1329 *   gamma * current_velocity_divergences[q] * div_phi_u[i] +
1330 *   current_velocity_gradients[q] *
1331 *   current_velocity_values[q] * phi_u[i]) *
1332 *   fe_values.JxW(q);
1333 *   }
1334 *   }
1335 *  
1336 *   cell->get_dof_indices(local_dof_indices);
1337 *  
1338 *   const AffineConstraints<double> &constraints_used =
1339 *   use_nonzero_constraints ? nonzero_constraints : zero_constraints;
1340 *   if (assemble_system)
1341 *   {
1342 *   constraints_used.distribute_local_to_global(local_matrix,
1343 *   local_rhs,
1344 *   local_dof_indices,
1345 *   system_matrix,
1346 *   system_rhs);
1347 *   constraints_used.distribute_local_to_global(
1348 *   local_mass_matrix, local_dof_indices, mass_matrix);
1349 *   }
1350 *   else
1351 *   {
1352 *   constraints_used.distribute_local_to_global(
1353 *   local_rhs, local_dof_indices, system_rhs);
1354 *   }
1355 *   }
1356 *   }
1357 *  
1358 *   if (assemble_system)
1359 *   {
1360 *   system_matrix.compress(VectorOperation::add);
1361 *   mass_matrix.compress(VectorOperation::add);
1362 *   }
1363 *   system_rhs.compress(VectorOperation::add);
1364 *   }
1365 *  
1366 * @endcode
1367 *
1368 *
1369 * <a name="time_dependent_navier_stokes.cc-InsIMEXsolve"></a>
1370 * <h4>InsIMEX::solve</h4>
1371 * Solve the linear system using FGMRES solver with block preconditioner.
1372 * After solving the linear system, the same AffineConstraints object as used
1373 * in assembly must be used again, to set the constrained value.
1374 * The second argument is used to determine whether the block
1375 * preconditioner should be reset or not.
1376 *
1377 * @code
1378 *   template <int dim>
1379 *   std::pair<unsigned int, double>
1380 *   InsIMEX<dim>::solve(bool use_nonzero_constraints, bool assemble_system)
1381 *   {
1382 *   if (assemble_system)
1383 *   {
1384 *   preconditioner.reset(new BlockSchurPreconditioner(timer,
1385 *   gamma,
1386 *   viscosity,
1387 *   time.get_delta_t(),
1388 *   owned_partitioning,
1389 *   system_matrix,
1390 *   mass_matrix,
1391 *   mass_schur));
1392 *   }
1393 *  
1394 *   SolverControl solver_control(
1395 *   system_matrix.m(), 1e-8 * system_rhs.l2_norm(), true);
1396 * @endcode
1397 *
1398 * Because PETScWrappers::SolverGMRES only accepts preconditioner
1399 * derived from PETScWrappers::PreconditionBase,
1400 * we use dealii SolverFGMRES.
1401 *
1402 * @code
1404 *   SolverFGMRES<PETScWrappers::MPI::BlockVector> gmres(solver_control,
1405 *   vector_memory);
1406 *  
1407 * @endcode
1408 *
1409 * The solution vector must be non-ghosted
1410 *
1411 * @code
1412 *   gmres.solve(system_matrix, solution_increment, system_rhs, *preconditioner);
1413 *  
1414 *   const AffineConstraints<double> &constraints_used =
1415 *   use_nonzero_constraints ? nonzero_constraints : zero_constraints;
1416 *   constraints_used.distribute(solution_increment);
1417 *  
1418 *   return {solver_control.last_step(), solver_control.last_value()};
1419 *   }
1420 *  
1421 * @endcode
1422 *
1423 *
1424 * <a name="time_dependent_navier_stokes.cc-InsIMEXrun"></a>
1425 * <h4>InsIMEX::run</h4>
1426 *
1427 * @code
1428 *   template <int dim>
1429 *   void InsIMEX<dim>::run()
1430 *   {
1431 *   pcout << "Running with PETSc on "
1432 *   << Utilities::MPI::n_mpi_processes(mpi_communicator)
1433 *   << " MPI rank(s)..." << std::endl;
1434 *  
1436 *   setup_dofs();
1437 *   make_constraints();
1438 *   initialize_system();
1439 *  
1440 * @endcode
1441 *
1442 * Time loop.
1443 *
1444 * @code
1445 *   bool refined = false;
1446 *   while (time.end() - time.current() > 1e-12)
1447 *   {
1448 *   if (time.get_timestep() == 0)
1449 *   {
1450 *   output_results(0);
1451 *   }
1452 *   time.increment();
1453 *   std::cout.precision(6);
1454 *   std::cout.width(12);
1455 *   pcout << std::string(96, '*') << std::endl
1456 *   << "Time step = " << time.get_timestep()
1457 *   << ", at t = " << std::scientific << time.current() << std::endl;
1458 * @endcode
1459 *
1460 * Resetting
1461 *
1462 * @code
1463 *   solution_increment = 0;
1464 * @endcode
1465 *
1466 * Only use nonzero constraints at the very first time step
1467 *
1468 * @code
1469 *   bool apply_nonzero_constraints = (time.get_timestep() == 1);
1470 * @endcode
1471 *
1472 * We have to assemble the LHS for the initial two time steps:
1473 * once using nonzero_constraints, once using zero_constraints,
1474 * as well as the steps immediately after mesh refinement.
1475 *
1476 * @code
1477 *   bool assemble_system = (time.get_timestep() < 3 || refined);
1478 *   refined = false;
1479 *   assemble(apply_nonzero_constraints, assemble_system);
1480 *   auto state = solve(apply_nonzero_constraints, assemble_system);
1481 * @endcode
1482 *
1483 * Note we have to use a non-ghosted vector to do the addition.
1484 *
1485 * @code
1487 *   tmp.reinit(owned_partitioning, mpi_communicator);
1488 *   tmp = present_solution;
1489 *   tmp += solution_increment;
1490 *   present_solution = tmp;
1491 *   pcout << std::scientific << std::left << " GMRES_ITR = " << std::setw(3)
1492 *   << state.first << " GMRES_RES = " << state.second << std::endl;
1493 * @endcode
1494 *
1495 * Output
1496 *
1497 * @code
1498 *   if (time.time_to_output())
1499 *   {
1500 *   output_results(time.get_timestep());
1501 *   }
1502 *   if (time.time_to_refine())
1503 *   {
1504 *   refine_mesh(0, 4);
1505 *   refined = true;
1506 *   }
1507 *   }
1508 *   }
1509 *  
1510 * @endcode
1511 *
1512 *
1513 * <a name="time_dependent_navier_stokes.cc-InsIMEXoutput_result"></a>
1514 * <h4>InsIMEX::output_result</h4>
1515 *
1516
1517 *
1518 *
1519 * @code
1520 *   template <int dim>
1521 *   void InsIMEX<dim>::output_results(const unsigned int output_index) const
1522 *   {
1523 *   TimerOutput::Scope timer_section(timer, "Output results");
1524 *   pcout << "Writing results..." << std::endl;
1525 *   std::vector<std::string> solution_names(dim, "velocity");
1526 *   solution_names.push_back("pressure");
1527 *  
1528 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
1529 *   data_component_interpretation(
1531 *   data_component_interpretation.push_back(
1533 *   DataOut<dim> data_out;
1534 *   data_out.attach_dof_handler(dof_handler);
1535 * @endcode
1536 *
1537 * vector to be output must be ghosted
1538 *
1539 * @code
1540 *   data_out.add_data_vector(present_solution,
1541 *   solution_names,
1543 *   data_component_interpretation);
1544 *  
1545 * @endcode
1546 *
1547 * Partition
1548 *
1549 * @code
1550 *   Vector<float> subdomain(triangulation.n_active_cells());
1551 *   for (unsigned int i = 0; i < subdomain.size(); ++i)
1552 *   {
1553 *   subdomain(i) = triangulation.locally_owned_subdomain();
1554 *   }
1555 *   data_out.add_data_vector(subdomain, "subdomain");
1556 *  
1557 *   data_out.build_patches(degree + 1);
1558 *  
1559 *   std::string basename =
1560 *   "navierstokes" + Utilities::int_to_string(output_index, 6) + "-";
1561 *  
1562 *   std::string filename =
1563 *   basename +
1565 *   ".vtu";
1566 *  
1567 *   std::ofstream output(filename);
1568 *   data_out.write_vtu(output);
1569 *  
1570 *   static std::vector<std::pair<double, std::string>> times_and_names;
1571 *   if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
1572 *   {
1573 *   for (unsigned int i = 0;
1574 *   i < Utilities::MPI::n_mpi_processes(mpi_communicator);
1575 *   ++i)
1576 *   {
1577 *   times_and_names.push_back(
1578 *   {time.current(),
1579 *   basename + Utilities::int_to_string(i, 4) + ".vtu"});
1580 *   }
1581 *   std::ofstream pvd_output("navierstokes.pvd");
1582 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
1583 *   }
1584 *   }
1585 *  
1586 * @endcode
1587 *
1588 *
1589 * <a name="time_dependent_navier_stokes.cc-InsIMEXrefine_mesh"></a>
1590 * <h4>InsIMEX::refine_mesh</h4>
1591 *
1592
1593 *
1594 *
1595 * @code
1596 *   template <int dim>
1597 *   void InsIMEX<dim>::refine_mesh(const unsigned int min_grid_level,
1598 *   const unsigned int max_grid_level)
1599 *   {
1600 *   TimerOutput::Scope timer_section(timer, "Refine mesh");
1601 *   pcout << "Refining mesh..." << std::endl;
1602 *  
1603 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1604 *   const FEValuesExtractors::Vector velocity(0);
1605 *   KellyErrorEstimator<dim>::estimate(dof_handler,
1606 *   face_quad_formula,
1607 *   {},
1608 *   present_solution,
1609 *   estimated_error_per_cell,
1610 *   fe.component_mask(velocity));
1612 *   triangulation, estimated_error_per_cell, 0.6, 0.4);
1613 *   if (triangulation.n_levels() > max_grid_level)
1614 *   {
1615 *   for (auto cell = triangulation.begin_active(max_grid_level);
1616 *   cell != triangulation.end();
1617 *   ++cell)
1618 *   {
1619 *   cell->clear_refine_flag();
1620 *   }
1621 *   }
1622 *   for (auto cell = triangulation.begin_active(min_grid_level);
1623 *   cell != triangulation.end_active(min_grid_level);
1624 *   ++cell)
1625 *   {
1626 *   cell->clear_coarsen_flag();
1627 *   }
1628 *  
1629 * @endcode
1630 *
1631 * Prepare to transfer
1632 *
1633 * @code
1636 *   trans(dof_handler);
1637 *  
1639 *  
1640 *   trans.prepare_for_coarsening_and_refinement(present_solution);
1641 *  
1642 * @endcode
1643 *
1644 * Refine the mesh
1645 *
1646 * @code
1648 *  
1649 * @endcode
1650 *
1651 * Reinitialize the system
1652 *
1653 * @code
1654 *   setup_dofs();
1655 *   make_constraints();
1656 *   initialize_system();
1657 *  
1658 * @endcode
1659 *
1660 * Transfer solution
1661 * Need a non-ghosted vector for interpolation
1662 *
1663 * @code
1664 *   PETScWrappers::MPI::BlockVector tmp(solution_increment);
1665 *   tmp = 0;
1666 *   trans.interpolate(tmp);
1667 *   present_solution = tmp;
1668 *   }
1669 *   }
1670 *  
1671 * @endcode
1672 *
1673 *
1674 * <a name="time_dependent_navier_stokes.cc-mainfunction"></a>
1675 * <h3>main function</h3>
1676 *
1677
1678 *
1679 *
1680 * @code
1681 *   int main(int argc, char *argv[])
1682 *   {
1683 *   try
1684 *   {
1685 *   using namespace dealii;
1686 *   using namespace fluid;
1687 *  
1688 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1689 *   parallel::distributed::Triangulation<2> tria(MPI_COMM_WORLD);
1690 *   create_triangulation(tria);
1691 *   InsIMEX<2> flow(tria);
1692 *   flow.run();
1693 *   }
1694 *   catch (std::exception &exc)
1695 *   {
1696 *   std::cerr << std::endl
1697 *   << std::endl
1698 *   << "----------------------------------------------------"
1699 *   << std::endl;
1700 *   std::cerr << "Exception on processing: " << std::endl
1701 *   << exc.what() << std::endl
1702 *   << "Aborting!" << std::endl
1703 *   << "----------------------------------------------------"
1704 *   << std::endl;
1705 *   return 1;
1706 *   }
1707 *   catch (...)
1708 *   {
1709 *   std::cerr << std::endl
1710 *   << std::endl
1711 *   << "----------------------------------------------------"
1712 *   << std::endl;
1713 *   std::cerr << "Unknown exception!" << std::endl
1714 *   << "Aborting!" << std::endl
1715 *   << "----------------------------------------------------"
1716 *   << std::endl;
1717 *   return 1;
1718 *   }
1719 *   return 0;
1720 *   }
1721 * @endcode
1722
1723
1724*/
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
Definition fe_q.h:554
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
@ wall_times
Definition timer.h:651
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
unsigned int n_levels() const
cell_iterator end() const
active_cell_iterator end_active(const unsigned int level) const
active_cell_iterator begin_active(const unsigned int level=0) const
virtual types::global_cell_index n_global_active_cells() const override
Definition tria_base.cc:151
types::subdomain_id locally_owned_subdomain() const override
Definition tria_base.cc:345
virtual void execute_coarsening_and_refinement() override
Definition tria.cc:3320
virtual bool prepare_coarsening_and_refinement() override
Definition tria.cc:2805
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
const Event initial
Definition event.cc:64
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > &times_and_names)
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void Cuthill_McKee(DoFHandler< dim, spacedim > &dof_handler, const bool reversed_numbering=false, const bool use_constraints=false, const std::vector< types::global_dof_index > &starting_indices=std::vector< types::global_dof_index >())
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())
void create_triangulation(Triangulation< dim, dim > &tria, const AdditionalData &additional_data=AdditionalData())
@ matrix
Contents is actually a matrix.
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
Definition l2.h:57
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
long double gamma(const unsigned int n)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
void advance(std::tuple< I1, I2 > &t, const unsigned int n)