Reference documentation for deal.II version GIT relicensing-218-g1f873f3929 2024-03-28 15:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-48.h
Go to the documentation of this file.
1) const override
551 *   {
552 *   double t = this->get_time();
553 *  
554 *   const double m = 0.5;
555 *   const double c1 = 0.;
556 *   const double c2 = 0.;
557 *   const double factor =
558 *   (m / std::sqrt(1. - m * m) * std::sin(std::sqrt(1. - m * m) * t + c2));
559 *   double result = 1.;
560 *   for (unsigned int d = 0; d < dim; ++d)
561 *   result *= -4. * std::atan(factor / std::cosh(m * p[d] + c1));
562 *   return result;
563 *   }
564 *   };
565 *  
566 *  
567 *  
568 * @endcode
569 *
570 *
571 * <a name="step_48-SineGordonProblemclass"></a>
572 * <h3>SineGordonProblem class</h3>
573 *
574
575 *
576 * This is the main class that builds on the class in @ref step_25 "step-25". However, we
577 * replaced the SparseMatrix<double> class by the MatrixFree class to store
578 * the geometry data. Also, we use a distributed triangulation in this
579 * example.
580 *
581 * @code
582 *   template <int dim>
583 *   class SineGordonProblem
584 *   {
585 *   public:
586 *   SineGordonProblem();
587 *   void run();
588 *  
589 *   private:
590 *   ConditionalOStream pcout;
591 *  
592 *   void make_grid_and_dofs();
593 *   void output_results(const unsigned int timestep_number);
594 *  
595 *   #ifdef DEAL_II_WITH_P4EST
597 *   #else
599 *   #endif
600 *   FE_Q<dim> fe;
601 *   DoFHandler<dim> dof_handler;
602 *  
604 *  
605 *   AffineConstraints<double> constraints;
606 *   IndexSet locally_relevant_dofs;
607 *  
608 *   MatrixFree<dim, double> matrix_free_data;
609 *  
610 *   LinearAlgebra::distributed::Vector<double> solution, old_solution,
611 *   old_old_solution;
612 *  
613 *   const unsigned int n_global_refinements;
614 *   double time, time_step;
615 *   const double final_time;
616 *   const double cfl_number;
617 *   const unsigned int output_timestep_skip;
618 *   };
619 *  
620 *  
621 * @endcode
622 *
623 *
624 * <a name="step_48-SineGordonProblemSineGordonProblem"></a>
625 * <h4>SineGordonProblem::SineGordonProblem</h4>
626 *
627
628 *
629 * This is the constructor of the SineGordonProblem class. The time interval
630 * and time step size are defined here. Moreover, we use the degree of the
631 * finite element that we defined at the top of the program to initialize a
632 * FE_Q finite element based on Gauss-Lobatto support points. These points
633 * are convenient because in conjunction with a QGaussLobatto quadrature
634 * rule of the same order they give a diagonal mass matrix without
635 * compromising accuracy too much (note that the integration is inexact,
636 * though), see also the discussion in the introduction. Note that FE_Q
637 * selects the Gauss-Lobatto nodal points by default due to their improved
638 * conditioning versus equidistant points. To make things more explicit, we
639 * state the selection of the nodal points nonetheless.
640 *
641 * @code
642 *   template <int dim>
643 *   SineGordonProblem<dim>::SineGordonProblem()
644 *   : pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
645 *   #ifdef DEAL_II_WITH_P4EST
646 *   , triangulation(MPI_COMM_WORLD)
647 *   #endif
648 *   , fe(QGaussLobatto<1>(fe_degree + 1))
649 *   , dof_handler(triangulation)
650 *   , n_global_refinements(10 - 2 * dim)
651 *   , time(-10)
652 *   , time_step(10.)
653 *   , final_time(10.)
654 *   , cfl_number(.1 / fe_degree)
655 *   , output_timestep_skip(200)
656 *   {}
657 *  
658 * @endcode
659 *
660 *
661 * <a name="step_48-SineGordonProblemmake_grid_and_dofs"></a>
662 * <h4>SineGordonProblem::make_grid_and_dofs</h4>
663 *
664
665 *
666 * As in @ref step_25 "step-25" this functions sets up a cube grid in <code>dim</code>
667 * dimensions of extent @f$[-15,15]@f$. We refine the mesh more in the center of
668 * the domain since the solution is concentrated there. We first refine all
669 * cells whose center is within a radius of 11, and then refine once more
670 * for a radius 6. This simple ad hoc refinement could be done better by
671 * adapting the mesh to the solution using error estimators during the time
672 * stepping as done in other example programs, and using
674 * new mesh.
675 *
676 * @code
677 *   template <int dim>
678 *   void SineGordonProblem<dim>::make_grid_and_dofs()
679 *   {
681 *   triangulation.refine_global(n_global_refinements);
682 *   {
683 *   for (const auto &cell : triangulation.active_cell_iterators())
684 *   if (cell->is_locally_owned())
685 *   if (cell->center().norm() < 11)
686 *   cell->set_refine_flag();
687 *   triangulation.execute_coarsening_and_refinement();
688 *  
689 *   for (const auto &cell : triangulation.active_cell_iterators())
690 *   if (cell->is_locally_owned())
691 *   if (cell->center().norm() < 6)
692 *   cell->set_refine_flag();
693 *   triangulation.execute_coarsening_and_refinement();
694 *   }
695 *  
696 *   pcout << " Number of global active cells: "
697 *   << triangulation.n_global_active_cells() << std::endl;
698 *  
699 *   dof_handler.distribute_dofs(fe);
700 *  
701 *   pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
702 *   << std::endl;
703 *  
704 *  
705 * @endcode
706 *
707 * We generate hanging node constraints for ensuring continuity of the
708 * solution. As in @ref step_40 "step-40", we need to equip the constraint matrix with
709 * the IndexSet of locally active and locally relevant degrees of freedom
710 * to avoid it consuming too much memory for big problems. Next, the
711 * MatrixFree object for the problem is set up. Note that we specify a
712 * particular scheme for shared-memory parallelization (hence one would
713 * use multithreading for intra-node parallelism and not MPI; we here
714 * choose the standard option &mdash; if we wanted to disable shared
715 * memory parallelization even in case where there is more than one TBB
716 * thread available in the program, we would choose
718 * instead of using the default QGauss quadrature argument, we supply a
719 * QGaussLobatto quadrature formula to enable the desired
720 * behavior. Finally, three solution vectors are initialized. MatrixFree
721 * expects a particular layout of ghost indices (as it handles index
722 * access in MPI-local numbers that need to match between the vector and
723 * MatrixFree), so we just ask it to initialize the vectors to be sure the
724 * ghost exchange is properly handled.
725 *
726 * @code
727 *   locally_relevant_dofs =
729 *   constraints.clear();
730 *   constraints.reinit(dof_handler.locally_owned_dofs(), locally_relevant_dofs);
731 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
732 *   constraints.close();
733 *  
734 *   typename MatrixFree<dim>::AdditionalData additional_data;
735 *   additional_data.tasks_parallel_scheme =
737 *  
738 *   matrix_free_data.reinit(mapping,
739 *   dof_handler,
740 *   constraints,
741 *   QGaussLobatto<1>(fe_degree + 1),
742 *   additional_data);
743 *  
744 *   matrix_free_data.initialize_dof_vector(solution);
745 *   old_solution.reinit(solution);
746 *   old_old_solution.reinit(solution);
747 *   }
748 *  
749 *  
750 *  
751 * @endcode
752 *
753 *
754 * <a name="step_48-SineGordonProblemoutput_results"></a>
755 * <h4>SineGordonProblem::output_results</h4>
756 *
757
758 *
759 * This function prints the norm of the solution and writes the solution
760 * vector to a file. The norm is standard (except for the fact that we need
761 * to accumulate the norms over all processors for the parallel grid which
762 * we do via the VectorTools::compute_global_error() function), and the
763 * second is similar to what we did in @ref step_40 "step-40" or @ref step_37 "step-37". Note that we can
764 * use the same vector for output as the one used during computations: The
765 * vectors in the matrix-free framework always provide full information on
766 * all locally owned cells (this is what is needed in the local evaluations,
767 * too), including ghost vector entries on these cells. This is the only
768 * data that is needed in the VectorTools::integrate_difference() function
769 * as well as in DataOut. The only action to take at this point is to make
770 * sure that the vector updates its ghost values before we read from
771 * them, and to reset ghost values once done. This is a feature present only
772 * in the LinearAlgebra::distributed::Vector class. Distributed vectors with
773 * PETSc and Trilinos, on the other hand, need to be copied to special
774 * vectors including ghost values (see the relevant section in @ref step_40 "step-40"). If
775 * we also wanted to access all degrees of freedom on ghost cells (e.g. when
776 * computing error estimators that use the jump of solution over cell
777 * boundaries), we would need more information and create a vector
778 * initialized with locally relevant dofs just as in @ref step_40 "step-40". Observe also
779 * that we need to distribute constraints for output - they are not filled
780 * during computations (rather, they are interpolated on the fly in the
781 * matrix-free method FEEvaluation::read_dof_values()).
782 *
783 * @code
784 *   template <int dim>
785 *   void
786 *   SineGordonProblem<dim>::output_results(const unsigned int timestep_number)
787 *   {
788 *   constraints.distribute(solution);
789 *  
790 *   Vector<float> norm_per_cell(triangulation.n_active_cells());
791 *   solution.update_ghost_values();
793 *   dof_handler,
794 *   solution,
796 *   norm_per_cell,
797 *   QGauss<dim>(fe_degree + 1),
799 *   const double solution_norm =
801 *   norm_per_cell,
803 *  
804 *   pcout << " Time:" << std::setw(8) << std::setprecision(3) << time
805 *   << ", solution norm: " << std::setprecision(5) << std::setw(7)
806 *   << solution_norm << std::endl;
807 *  
808 *   DataOut<dim> data_out;
809 *  
810 *   data_out.attach_dof_handler(dof_handler);
811 *   data_out.add_data_vector(solution, "solution");
812 *   data_out.build_patches(mapping);
813 *  
814 *   data_out.write_vtu_with_pvtu_record(
815 *   "./", "solution", timestep_number, MPI_COMM_WORLD, 3);
816 *  
817 *   solution.zero_out_ghost_values();
818 *   }
819 *  
820 *  
821 * @endcode
822 *
823 *
824 * <a name="step_48-SineGordonProblemrun"></a>
825 * <h4>SineGordonProblem::run</h4>
826 *
827
828 *
829 * This function is called by the main function and steps into the
830 * subroutines of the class.
831 *
832
833 *
834 * After printing some information about the parallel setup, the first
835 * action is to set up the grid and the cell operator. Then, the time step
836 * is computed from the CFL number given in the constructor and the finest
837 * mesh size. The finest mesh size is computed as the diameter of the last
838 * cell in the triangulation, which is the last cell on the finest level of
839 * the mesh. This is only possible for meshes where all elements on a level
840 * have the same size, otherwise, one needs to loop over all cells. Note
841 * that we need to query all the processors for their finest cell since
842 * not all processors might hold a region where the mesh is at the finest
843 * level. Then, we readjust the time step a little to hit the final time
844 * exactly.
845 *
846 * @code
847 *   template <int dim>
848 *   void SineGordonProblem<dim>::run()
849 *   {
850 *   {
851 *   pcout << "Number of MPI ranks: "
852 *   << Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) << std::endl;
853 *   pcout << "Number of threads on each rank: "
854 *   << MultithreadInfo::n_threads() << std::endl;
855 *   const unsigned int n_vect_doubles = VectorizedArray<double>::size();
856 *   const unsigned int n_vect_bits = 8 * sizeof(double) * n_vect_doubles;
857 *   pcout << "Vectorization over " << n_vect_doubles
858 *   << " doubles = " << n_vect_bits << " bits ("
860 *   << std::endl
861 *   << std::endl;
862 *   }
863 *   make_grid_and_dofs();
864 *  
865 *   const double local_min_cell_diameter =
866 *   triangulation.last()->diameter() / std::sqrt(dim);
867 *   const double global_min_cell_diameter =
868 *   -Utilities::MPI::max(-local_min_cell_diameter, MPI_COMM_WORLD);
869 *   time_step = cfl_number * global_min_cell_diameter;
870 *   time_step = (final_time - time) / (int((final_time - time) / time_step));
871 *   pcout << " Time step size: " << time_step
872 *   << ", finest cell: " << global_min_cell_diameter << std::endl
873 *   << std::endl;
874 *  
875 * @endcode
876 *
877 * Next the initial value is set. Since we have a two-step time stepping
878 * method, we also need a value of the solution at time-time_step. For
879 * accurate results, one would need to compute this from the time
880 * derivative of the solution at initial time, but here we ignore this
881 * difficulty and just set it to the initial value function at that
882 * artificial time.
883 *
884
885 *
886 * We then go on by writing the initial state to file and collecting
887 * the two starting solutions in a <tt>std::vector</tt> of pointers that
888 * get later consumed by the SineGordonOperation::apply() function. Next,
889 * an instance of the <code> SineGordonOperation class </code> based on
890 * the finite element degree specified at the top of this file is set up.
891 *
892 * @code
893 *   VectorTools::interpolate(mapping,
894 *   dof_handler,
895 *   InitialCondition<dim>(1, time),
896 *   solution);
897 *   VectorTools::interpolate(mapping,
898 *   dof_handler,
899 *   InitialCondition<dim>(1, time - time_step),
900 *   old_solution);
901 *   output_results(0);
902 *  
903 *   std::vector<LinearAlgebra::distributed::Vector<double> *>
904 *   previous_solutions({&old_solution, &old_old_solution});
905 *  
906 *   SineGordonOperation<dim, fe_degree> sine_gordon_op(matrix_free_data,
907 *   time_step);
908 *  
909 * @endcode
910 *
911 * Now loop over the time steps. In each iteration, we shift the solution
912 * vectors by one and call the `apply` function of the
913 * `SineGordonOperator` class. Then, we write the solution to a file. We
914 * clock the wall times for the computational time needed as wall as the
915 * time needed to create the output and report the numbers when the time
916 * stepping is finished.
917 *
918
919 *
920 * Note how this shift is implemented: We simply call the swap method on
921 * the two vectors which swaps only some pointers without the need to copy
922 * data around, a relatively expensive operation within an explicit time
923 * stepping method. Let us see what happens in more detail: First, we
924 * exchange <code>old_solution</code> with <code>old_old_solution</code>,
925 * which means that <code>old_old_solution</code> gets
926 * <code>old_solution</code>, which is what we expect. Similarly,
927 * <code>old_solution</code> gets the content from <code>solution</code>
928 * in the next step. After this, <code>solution</code> holds
929 * <code>old_old_solution</code>, but that will be overwritten during this
930 * step.
931 *
932 * @code
933 *   unsigned int timestep_number = 1;
934 *  
935 *   Timer timer;
936 *   double wtime = 0;
937 *   double output_time = 0;
938 *   for (time += time_step; time <= final_time;
939 *   time += time_step, ++timestep_number)
940 *   {
941 *   timer.restart();
942 *   old_old_solution.swap(old_solution);
943 *   old_solution.swap(solution);
944 *   sine_gordon_op.apply(solution, previous_solutions);
945 *   wtime += timer.wall_time();
946 *  
947 *   timer.restart();
948 *   if (timestep_number % output_timestep_skip == 0)
949 *   output_results(timestep_number / output_timestep_skip);
950 *  
951 *   output_time += timer.wall_time();
952 *   }
953 *   timer.restart();
954 *   output_results(timestep_number / output_timestep_skip + 1);
955 *   output_time += timer.wall_time();
956 *  
957 *   pcout << std::endl
958 *   << " Performed " << timestep_number << " time steps." << std::endl;
959 *  
960 *   pcout << " Average wallclock time per time step: "
961 *   << wtime / timestep_number << 's' << std::endl;
962 *  
963 *   pcout << " Spent " << output_time << "s on output and " << wtime
964 *   << "s on computations." << std::endl;
965 *   }
966 *   } // namespace Step48
967 *  
968 *  
969 *  
970 * @endcode
971 *
972 *
973 * <a name="step_48-Thecodemaincodefunction"></a>
974 * <h3>The <code>main</code> function</h3>
975 *
976
977 *
978 * As in @ref step_40 "step-40", we initialize MPI at the start of the program. Since we will
979 * in general mix MPI parallelization with threads, we also set the third
980 * argument in MPI_InitFinalize that controls the number of threads to an
981 * invalid number, which means that the TBB library chooses the number of
982 * threads automatically, typically to the number of available cores in the
983 * system. As an alternative, you can also set this number manually if you
984 * want to set a specific number of threads (e.g. when MPI-only is required).
985 *
986 * @code
987 *   int main(int argc, char **argv)
988 *   {
989 *   using namespace Step48;
990 *   using namespace dealii;
991 *  
992 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(
993 *   argc, argv, numbers::invalid_unsigned_int);
994 *  
995 *   try
996 *   {
997 *   SineGordonProblem<dimension> sg_problem;
998 *   sg_problem.run();
999 *   }
1000 *   catch (std::exception &exc)
1001 *   {
1002 *   std::cerr << std::endl
1003 *   << std::endl
1004 *   << "----------------------------------------------------"
1005 *   << std::endl;
1006 *   std::cerr << "Exception on processing: " << std::endl
1007 *   << exc.what() << std::endl
1008 *   << "Aborting!" << std::endl
1009 *   << "----------------------------------------------------"
1010 *   << std::endl;
1011 *  
1012 *   return 1;
1013 *   }
1014 *   catch (...)
1015 *   {
1016 *   std::cerr << std::endl
1017 *   << std::endl
1018 *   << "----------------------------------------------------"
1019 *   << std::endl;
1020 *   std::cerr << "Unknown exception!" << std::endl
1021 *   << "Aborting!" << std::endl
1022 *   << "----------------------------------------------------"
1023 *   << std::endl;
1024 *   return 1;
1025 *   }
1026 *  
1027 *   return 0;
1028 *   }
1029 * @endcode
1030<a name="step_48-Results"></a><h1>Results</h1>
1031
1032
1033<a name="step_48-Comparisonwithasparsematrix"></a><h3>Comparison with a sparse matrix</h3>
1034
1035
1036In order to demonstrate the gain in using the MatrixFree class instead of
1037the standard <code>deal.II</code> assembly routines for evaluating the
1038information from old time steps, we study a simple serial run of the code on a
1039nonadaptive mesh. Since much time is spent on evaluating the sine function, we
1040do not only show the numbers of the full sine-Gordon equation but also for the
1041wave equation (the sine-term skipped from the sine-Gordon equation). We use
1042both second and fourth order elements. The results are summarized in the
1043following table.
1044
1045<table align="center" class="doxtable">
1046 <tr>
1047 <th>&nbsp;</th>
1048 <th colspan="3">wave equation</th>
1049 <th colspan="2">sine-Gordon</th>
1050 </tr>
1051 <tr>
1052 <th>&nbsp;</th>
1053 <th>MF</th>
1054 <th>SpMV</th>
1055 <th>dealii</th>
1056 <th>MF</th>
1057 <th>dealii</th>
1058 </tr>
1059 <tr>
1060 <td>2D, @f$\mathcal{Q}_2@f$</td>
1061 <td align="right"> 0.0106</td>
1062 <td align="right"> 0.00971</td>
1063 <td align="right"> 0.109</td>
1064 <td align="right"> 0.0243</td>
1065 <td align="right"> 0.124</td>
1066 </tr>
1067 <tr>
1068 <td>2D, @f$\mathcal{Q}_4@f$</td>
1069 <td align="right"> 0.0328</td>
1070 <td align="right"> 0.0706</td>
1071 <td align="right"> 0.528</td>
1072 <td align="right"> 0.0714</td>
1073 <td align="right"> 0.502</td>
1074 </tr>
1075 <tr>
1076 <td>3D, @f$\mathcal{Q}_2@f$</td>
1077 <td align="right"> 0.0151</td>
1078 <td align="right"> 0.0320</td>
1079 <td align="right"> 0.331</td>
1080 <td align="right"> 0.0376</td>
1081 <td align="right"> 0.364</td>
1082 </tr>
1083 <tr>
1084 <td>3D, @f$\mathcal{Q}_4@f$</td>
1085 <td align="right"> 0.0918</td>
1086 <td align="right"> 0.844</td>
1087 <td align="right"> 6.83</td>
1088 <td align="right"> 0.194</td>
1089 <td align="right"> 6.95</td>
1090 </tr>
1091</table>
1092
1093It is apparent that the matrix-free code outperforms the standard assembly
1094routines in deal.II by far. In 3D and for fourth order elements, one operator
1095evaluation is also almost ten times as fast as a sparse matrix-vector
1096product.
1097
1098<a name="step_48-Parallelrunin2Dand3D"></a><h3>Parallel run in 2D and 3D</h3>
1099
1100
1101We start with the program output obtained on a workstation with 12 cores / 24
1102threads (one Intel Xeon E5-2687W v4 CPU running at 3.2 GHz, hyperthreading
1103enabled), running the program in release mode:
1104@code
1105\$ make run
1106Number of MPI ranks: 1
1107Number of threads on each rank: 24
1108Vectorization over 4 doubles = 256 bits (AVX)
1109
1110 Number of global active cells: 15412
1111 Number of degrees of freedom: 249065
1112 Time step size: 0.00292997, finest cell: 0.117188
1113
1114 Time: -10, solution norm: 9.5599
1115 Time: -9.41, solution norm: 17.678
1116 Time: -8.83, solution norm: 23.504
1117 Time: -8.24, solution norm: 27.5
1118 Time: -7.66, solution norm: 29.513
1119 Time: -7.07, solution norm: 29.364
1120 Time: -6.48, solution norm: 27.23
1121 Time: -5.9, solution norm: 23.527
1122 Time: -5.31, solution norm: 18.439
1123 Time: -4.73, solution norm: 11.935
1124 Time: -4.14, solution norm: 5.5284
1125 Time: -3.55, solution norm: 8.0354
1126 Time: -2.97, solution norm: 14.707
1127 Time: -2.38, solution norm: 20
1128 Time: -1.8, solution norm: 22.834
1129 Time: -1.21, solution norm: 22.771
1130 Time: -0.624, solution norm: 20.488
1131 Time: -0.0381, solution norm: 16.697
1132 Time: 0.548, solution norm: 11.221
1133 Time: 1.13, solution norm: 5.3912
1134 Time: 1.72, solution norm: 8.4528
1135 Time: 2.31, solution norm: 14.335
1136 Time: 2.89, solution norm: 18.555
1137 Time: 3.48, solution norm: 20.894
1138 Time: 4.06, solution norm: 21.305
1139 Time: 4.65, solution norm: 19.903
1140 Time: 5.24, solution norm: 16.864
1141 Time: 5.82, solution norm: 12.223
1142 Time: 6.41, solution norm: 6.758
1143 Time: 6.99, solution norm: 7.2423
1144 Time: 7.58, solution norm: 12.888
1145 Time: 8.17, solution norm: 17.273
1146 Time: 8.75, solution norm: 19.654
1147 Time: 9.34, solution norm: 19.838
1148 Time: 9.92, solution norm: 17.964
1149 Time: 10, solution norm: 17.595
1150
1151 Performed 6826 time steps.
1152 Average wallclock time per time step: 0.0013453s
1153 Spent 14.976s on output and 9.1831s on computations.
1154@endcode
1155
1156In 3D, the respective output looks like
1157@code
1158\$ make run
1159Number of MPI ranks: 1
1160Number of threads on each rank: 24
1161Vectorization over 4 doubles = 256 bits (AVX)
1162
1163 Number of global active cells: 17592
1164 Number of degrees of freedom: 1193881
1165 Time step size: 0.0117233, finest cell: 0.46875
1166
1167 Time: -10, solution norm: 29.558
1168 Time: -7.66, solution norm: 129.13
1169 Time: -5.31, solution norm: 67.753
1170 Time: -2.97, solution norm: 79.245
1171 Time: -0.621, solution norm: 123.52
1172 Time: 1.72, solution norm: 43.525
1173 Time: 4.07, solution norm: 93.285
1174 Time: 6.41, solution norm: 97.722
1175 Time: 8.76, solution norm: 36.734
1176 Time: 10, solution norm: 94.115
1177
1178 Performed 1706 time steps.
1179 Average wallclock time per time step: 0.0084542s
1180 Spent 16.766s on output and 14.423s on computations.
1181@endcode
1182
1183It takes 0.008 seconds for one time step with more than a million
1184degrees of freedom (note that we would need many processors to reach such
1185numbers when solving linear systems).
1186
1187If we replace the thread-parallelization by a pure MPI parallelization, the
1188timings change into:
1189@code
1190\$ mpirun -n 24 ./step-48
1191Number of MPI ranks: 24
1192Number of threads on each rank: 1
1193Vectorization over 4 doubles = 256 bits (AVX)
1194...
1195 Performed 1706 time steps.
1196 Average wallclock time per time step: 0.0051747s
1197 Spent 2.0535s on output and 8.828s on computations.
1198@endcode
1199
1200We observe a dramatic speedup for the output (which makes sense, given that
1201most code of the output is not parallelized via threads, whereas it is for
1202MPI), but less than the theoretical factor of 12 we would expect from the
1203parallelism. More interestingly, the computations also get faster when
1204switching from the threads-only variant to the MPI-only variant. This is a
1205general observation for the MatrixFree framework (as of updating this data in
12062019). The main reason is that the decisions regarding work on conflicting
1207cell batches made to enable execution in parallel are overly pessimistic:
1208While they ensure that no work on neighboring cells is done on different
1209threads at the same time, this conservative setting implies that data from
1210neighboring cells is also evicted from caches by the time neighbors get
1211touched. Furthermore, the current scheme is not able to provide a constant
1212load for all 24 threads for the given mesh with 17,592 cells.
1213
1214The current program allows to also mix MPI parallelization with thread
1215parallelization. This is most beneficial when running programs on clusters
1216with multiple nodes, using MPI for the inter-node parallelization and threads
1217for the intra-node parallelization. On the workstation used above, we can run
1218threads in the hyperthreading region (i.e., using 2 threads for each of the 12
1219MPI ranks). An important setting for mixing MPI with threads is to ensure
1220proper binning of tasks to CPUs. On many clusters the placing is either
1221automatically via the `mpirun/mpiexec` environment, or there can be manual
1222settings. Here, we simply report the run times the plain version of the
1223program (noting that things could be improved towards the timings of the
1224MPI-only program when proper pinning is done):
1225@code
1226\$ mpirun -n 12 ./step-48
1227Number of MPI ranks: 12
1228Number of threads on each rank: 2
1229Vectorization over 4 doubles = 256 bits (AVX)
1230...
1231 Performed 1706 time steps.
1232 Average wallclock time per time step: 0.0056651s
1233 Spent 2.5175s on output and 9.6646s on computations.
1234@endcode
1235
1236
1237
1238<a name="step_48-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1239
1240
1241There are several things in this program that could be improved to make it
1242even more efficient (besides improved boundary conditions and physical
1243stuff as discussed in @ref step_25 "step-25"):
1244
1245<ul> <li> <b>Faster evaluation of sine terms:</b> As becomes obvious
1246 from the comparison of the plain wave equation and the sine-Gordon
1247 equation above, the evaluation of the sine terms dominates the total
1248 time for the finite element operator application. There are a few
1249 reasons for this: Firstly, the deal.II sine computation of a
1250 VectorizedArray field is not vectorized (as opposed to the rest of
1251 the operator application). This could be cured by handing the sine
1252 computation to a library with vectorized sine computations like
1253 Intel's math kernel library (MKL). By using the function
1254 <code>vdSin</code> in MKL, the program uses half the computing time
1255 in 2D and 40 percent less time in 3D. On the other hand, the sine
1256 computation is structurally much more complicated than the simple
1257 arithmetic operations like additions and multiplications in the rest
1258 of the local operation.
1259
1260 <li> <b>Higher order time stepping:</b> While the implementation allows for
1261 arbitrary order in the spatial part (by adjusting the degree of the finite
1262 element), the time stepping scheme is a standard second-order leap-frog
1263 scheme. Since solutions in wave propagation problems are usually very
1264 smooth, the error is likely dominated by the time stepping part. Of course,
1265 this could be cured by using smaller time steps (at a fixed spatial
1266 resolution), but it would be more efficient to use higher order time
1267 stepping as well. While it would be straight-forward to do so for a
1268 first-order system (use some Runge&ndash;Kutta scheme of higher order,
1269 probably combined with adaptive time step selection like the <a
1270 href="http://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method">Dormand&ndash;Prince
1271 method</a>), it is more challenging for the second-order formulation. At
1272 least in the finite difference community, people usually use the PDE to find
1273 spatial correction terms that improve the temporal error.
1274
1275</ul>
1276 *
1277 *
1278<a name="step_48-PlainProg"></a>
1279<h1> The plain program</h1>
1280@include "step-48.cc"
1281*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
Definition fe_q.h:550
void reinit(const MappingType &mapping, const DoFHandler< dim > &dof_handler, const AffineConstraints< number2 > &constraint, const QuadratureType &quad, const AdditionalData &additional_data=AdditionalData())
static unsigned int n_threads()
Definition timer.h:117
void restart()
Definition timer.h:896
Point< 3 > center
Point< 2 > second
Definition grid_out.cc:4614
Point< 2 > first
Definition grid_out.cc:4613
unsigned int level
Definition grid_out.cc:4616
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
const Event initial
Definition event.cc:64
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
spacedim & mapping
const std::vector< bool > & used
void shift(const Tensor< 1, spacedim > &shift_vector, Triangulation< dim, spacedim > &triangulation)
spacedim & mesh
Definition grid_tools.h:980
double diameter(const Triangulation< dim, spacedim > &tria)
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
void free(T *&pointer)
Definition cuda.h:96
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:128
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
std::string get_time()
std::string get_current_vectorization_level()
Definition utilities.cc:938
double compute_global_error(const Triangulation< dim, spacedim > &tria, const InVector &cellwise_error, const NormType &norm, const double exponent=2.)
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
spacedim const DoFHandler< dim, spacedim > const FullMatrix< double > & transfer
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
static const unsigned int invalid_unsigned_int
Definition types.h:220
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
STL namespace.
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
void swap(SmartPointer< T, P > &t1, SmartPointer< T, Q > &t2)
TasksParallelScheme tasks_parallel_scheme