Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-48.h
Go to the documentation of this file.
1 ) const override
550  * {
551  * double t = this->get_time();
552  *
553  * const double m = 0.5;
554  * const double c1 = 0.;
555  * const double c2 = 0.;
556  * const double factor =
557  * (m / std::sqrt(1. - m * m) * std::sin(std::sqrt(1. - m * m) * t + c2));
558  * double result = 1.;
559  * for (unsigned int d = 0; d < dim; ++d)
560  * result *= -4. * std::atan(factor / std::cosh(m * p[d] + c1));
561  * return result;
562  * }
563  * };
564  *
565  *
566  *
567  * @endcode
568  *
569  *
570  * <a name="SineGordonProblemclass"></a>
571  * <h3>SineGordonProblem class</h3>
572  *
573 
574  *
575  * This is the main class that builds on the class in @ref step_25 "step-25". However, we
576  * replaced the SparseMatrix<double> class by the MatrixFree class to store
577  * the geometry data. Also, we use a distributed triangulation in this
578  * example.
579  *
580  * @code
581  * template <int dim>
582  * class SineGordonProblem
583  * {
584  * public:
585  * SineGordonProblem();
586  * void run();
587  *
588  * private:
589  * ConditionalOStream pcout;
590  *
591  * void make_grid_and_dofs();
592  * void output_results(const unsigned int timestep_number);
593  *
594  * #ifdef DEAL_II_WITH_P4EST
596  * #else
598  * #endif
599  * FE_Q<dim> fe;
600  * DoFHandler<dim> dof_handler;
601  * AffineConstraints<double> constraints;
602  * IndexSet locally_relevant_dofs;
603  *
604  * MatrixFree<dim, double> matrix_free_data;
605  *
606  * LinearAlgebra::distributed::Vector<double> solution, old_solution,
607  * old_old_solution;
608  *
609  * const unsigned int n_global_refinements;
610  * double time, time_step;
611  * const double final_time;
612  * const double cfl_number;
613  * const unsigned int output_timestep_skip;
614  * };
615  *
616  *
617  * @endcode
618  *
619  *
620  * <a name="SineGordonProblemSineGordonProblem"></a>
621  * <h4>SineGordonProblem::SineGordonProblem</h4>
622  *
623 
624  *
625  * This is the constructor of the SineGordonProblem class. The time interval
626  * and time step size are defined here. Moreover, we use the degree of the
627  * finite element that we defined at the top of the program to initialize a
628  * FE_Q finite element based on Gauss-Lobatto support points. These points
629  * are convenient because in conjunction with a QGaussLobatto quadrature
630  * rule of the same order they give a diagonal mass matrix without
631  * compromising accuracy too much (note that the integration is inexact,
632  * though), see also the discussion in the introduction. Note that FE_Q
633  * selects the Gauss-Lobatto nodal points by default due to their improved
634  * conditioning versus equidistant points. To make things more explicit, we
635  * state the selection of the nodal points nonetheless.
636  *
637  * @code
638  * template <int dim>
639  * SineGordonProblem<dim>::SineGordonProblem()
640  * : pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
641  * ,
642  * #ifdef DEAL_II_WITH_P4EST
643  * triangulation(MPI_COMM_WORLD)
644  * ,
645  * #endif
646  * fe(QGaussLobatto<1>(fe_degree + 1))
647  * , dof_handler(triangulation)
648  * , n_global_refinements(10 - 2 * dim)
649  * , time(-10)
650  * , time_step(10.)
651  * , final_time(10.)
652  * , cfl_number(.1 / fe_degree)
653  * , output_timestep_skip(200)
654  * {}
655  *
656  * @endcode
657  *
658  *
659  * <a name="SineGordonProblemmake_grid_and_dofs"></a>
660  * <h4>SineGordonProblem::make_grid_and_dofs</h4>
661  *
662 
663  *
664  * As in @ref step_25 "step-25" this functions sets up a cube grid in <code>dim</code>
665  * dimensions of extent @f$[-15,15]@f$. We refine the mesh more in the center of
666  * the domain since the solution is concentrated there. We first refine all
667  * cells whose center is within a radius of 11, and then refine once more
668  * for a radius 6. This simple ad hoc refinement could be done better by
669  * adapting the mesh to the solution using error estimators during the time
670  * stepping as done in other example programs, and using
671  * parallel::distributed::SolutionTransfer to transfer the solution to the
672  * new mesh.
673  *
674  * @code
675  * template <int dim>
676  * void SineGordonProblem<dim>::make_grid_and_dofs()
677  * {
679  * triangulation.refine_global(n_global_refinements);
680  * {
682  * cell = triangulation.begin_active(),
683  * end_cell = triangulation.end();
684  * for (; cell != end_cell; ++cell)
685  * if (cell->is_locally_owned())
686  * if (cell->center().norm() < 11)
687  * cell->set_refine_flag();
688  * triangulation.execute_coarsening_and_refinement();
689  *
690  * cell = triangulation.begin_active();
691  * end_cell = triangulation.end();
692  * for (; cell != end_cell; ++cell)
693  * if (cell->is_locally_owned())
694  * if (cell->center().norm() < 6)
695  * cell->set_refine_flag();
696  * triangulation.execute_coarsening_and_refinement();
697  * }
698  *
699  * pcout << " Number of global active cells: "
700  * #ifdef DEAL_II_WITH_P4EST
701  * << triangulation.n_global_active_cells()
702  * #else
703  * << triangulation.n_active_cells()
704  * #endif
705  * << std::endl;
706  *
707  * dof_handler.distribute_dofs(fe);
708  *
709  * pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
710  * << std::endl;
711  *
712  *
713  * @endcode
714  *
715  * We generate hanging node constraints for ensuring continuity of the
716  * solution. As in @ref step_40 "step-40", we need to equip the constraint matrix with
717  * the IndexSet of locally relevant degrees of freedom to avoid it to
718  * consume too much memory for big problems. Next, the <code> MatrixFree
719  * </code> object for the problem is set up. Note that we specify a
720  * particular scheme for shared-memory parallelization (hence one would
721  * use multithreading for intra-node parallelism and not MPI; we here
722  * choose the standard option &mdash; if we wanted to disable shared
723  * memory parallelization even in case where there is more than one TBB
724  * thread available in the program, we would choose
726  * instead of using the default QGauss quadrature argument, we supply a
727  * QGaussLobatto quadrature formula to enable the desired
728  * behavior. Finally, three solution vectors are initialized. MatrixFree
729  * expects a particular layout of ghost indices (as it handles index
730  * access in MPI-local numbers that need to match between the vector and
731  * MatrixFree), so we just ask it to initialize the vectors to be sure the
732  * ghost exchange is properly handled.
733  *
734  * @code
735  * DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
736  * constraints.clear();
737  * constraints.reinit(locally_relevant_dofs);
738  * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
739  * constraints.close();
740  *
741  * typename MatrixFree<dim>::AdditionalData additional_data;
742  * additional_data.tasks_parallel_scheme =
744  *
745  * matrix_free_data.reinit(dof_handler,
746  * constraints,
747  * QGaussLobatto<1>(fe_degree + 1),
748  * additional_data);
749  *
750  * matrix_free_data.initialize_dof_vector(solution);
751  * old_solution.reinit(solution);
752  * old_old_solution.reinit(solution);
753  * }
754  *
755  *
756  *
757  * @endcode
758  *
759  *
760  * <a name="SineGordonProblemoutput_results"></a>
761  * <h4>SineGordonProblem::output_results</h4>
762  *
763 
764  *
765  * This function prints the norm of the solution and writes the solution
766  * vector to a file. The norm is standard (except for the fact that we need
767  * to accumulate the norms over all processors for the parallel grid which
768  * we do via the VectorTools::compute_global_error() function), and the
769  * second is similar to what we did in @ref step_40 "step-40" or @ref step_37 "step-37". Note that we can
770  * use the same vector for output as the one used during computations: The
771  * vectors in the matrix-free framework always provide full information on
772  * all locally owned cells (this is what is needed in the local evaluations,
773  * too), including ghost vector entries on these cells. This is the only
774  * data that is needed in the VectorTools::integrate_difference() function
775  * as well as in DataOut. The only action to take at this point is to make
776  * sure that the vector updates its ghost values before we read from
777  * them. This is a feature present only in the
778  * LinearAlgebra::distributed::Vector class. Distributed vectors with PETSc
779  * and Trilinos, on the other hand, need to be copied to special vectors
780  * including ghost values (see the relevant section in @ref step_40 "step-40"). If we also
781  * wanted to access all degrees of freedom on ghost cells (e.g. when
782  * computing error estimators that use the jump of solution over cell
783  * boundaries), we would need more information and create a vector
784  * initialized with locally relevant dofs just as in @ref step_40 "step-40". Observe also
785  * that we need to distribute constraints for output - they are not filled
786  * during computations (rather, they are interpolated on the fly in the
787  * matrix-free method FEEvaluation::read_dof_values()).
788  *
789  * @code
790  * template <int dim>
791  * void
792  * SineGordonProblem<dim>::output_results(const unsigned int timestep_number)
793  * {
794  * constraints.distribute(solution);
795  *
796  * Vector<float> norm_per_cell(triangulation.n_active_cells());
797  * solution.update_ghost_values();
798  * VectorTools::integrate_difference(dof_handler,
799  * solution,
801  * norm_per_cell,
802  * QGauss<dim>(fe_degree + 1),
804  * const double solution_norm =
806  * norm_per_cell,
808  *
809  * pcout << " Time:" << std::setw(8) << std::setprecision(3) << time
810  * << ", solution norm: " << std::setprecision(5) << std::setw(7)
811  * << solution_norm << std::endl;
812  *
813  * DataOut<dim> data_out;
814  *
815  * data_out.attach_dof_handler(dof_handler);
816  * data_out.add_data_vector(solution, "solution");
817  * data_out.build_patches();
818  *
819  * data_out.write_vtu_with_pvtu_record(
820  * "./", "solution", timestep_number, MPI_COMM_WORLD, 3);
821  * }
822  *
823  *
824  * @endcode
825  *
826  *
827  * <a name="SineGordonProblemrun"></a>
828  * <h4>SineGordonProblem::run</h4>
829  *
830 
831  *
832  * This function is called by the main function and steps into the
833  * subroutines of the class.
834  *
835 
836  *
837  * After printing some information about the parallel setup, the first
838  * action is to set up the grid and the cell operator. Then, the time step
839  * is computed from the CFL number given in the constructor and the finest
840  * mesh size. The finest mesh size is computed as the diameter of the last
841  * cell in the triangulation, which is the last cell on the finest level of
842  * the mesh. This is only possible for meshes where all elements on a level
843  * have the same size, otherwise, one needs to loop over all cells. Note
844  * that we need to query all the processors for their finest cell since
845  * not all processors might hold a region where the mesh is at the finest
846  * level. Then, we readjust the time step a little to hit the final time
847  * exactly.
848  *
849  * @code
850  * template <int dim>
852  * {
853  * {
854  * pcout << "Number of MPI ranks: "
855  * << Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) << std::endl;
856  * pcout << "Number of threads on each rank: "
857  * << MultithreadInfo::n_threads() << std::endl;
858  * const unsigned int n_vect_doubles = VectorizedArray<double>::size();
859  * const unsigned int n_vect_bits = 8 * sizeof(double) * n_vect_doubles;
860  * pcout << "Vectorization over " << n_vect_doubles
861  * << " doubles = " << n_vect_bits << " bits ("
863  * << std::endl
864  * << std::endl;
865  * }
866  * make_grid_and_dofs();
867  *
868  * const double local_min_cell_diameter =
869  * triangulation.last()->diameter() / std::sqrt(dim);
870  * const double global_min_cell_diameter =
871  * -Utilities::MPI::max(-local_min_cell_diameter, MPI_COMM_WORLD);
872  * time_step = cfl_number * global_min_cell_diameter;
873  * time_step = (final_time - time) / (int((final_time - time) / time_step));
874  * pcout << " Time step size: " << time_step
875  * << ", finest cell: " << global_min_cell_diameter << std::endl
876  * << std::endl;
877  *
878  * @endcode
879  *
880  * Next the initial value is set. Since we have a two-step time stepping
881  * method, we also need a value of the solution at time-time_step. For
882  * accurate results, one would need to compute this from the time
883  * derivative of the solution at initial time, but here we ignore this
884  * difficulty and just set it to the initial value function at that
885  * artificial time.
886  *
887 
888  *
889  * We then go on by writing the initial state to file and collecting
890  * the two starting solutions in a <tt>std::vector</tt> of pointers that
891  * get later consumed by the SineGordonOperation::apply() function. Next,
892  * an instance of the <code> SineGordonOperation class </code> based on
893  * the finite element degree specified at the top of this file is set up.
894  *
895  * @code
896  * VectorTools::interpolate(dof_handler,
897  * InitialCondition<dim>(1, time),
898  * solution);
899  * VectorTools::interpolate(dof_handler,
900  * InitialCondition<dim>(1, time - time_step),
901  * old_solution);
902  * output_results(0);
903  *
904  * std::vector<LinearAlgebra::distributed::Vector<double> *>
905  * previous_solutions({&old_solution, &old_old_solution});
906  *
907  * SineGordonOperation<dim, fe_degree> sine_gordon_op(matrix_free_data,
908  * time_step);
909  *
910  * @endcode
911  *
912  * Now loop over the time steps. In each iteration, we shift the solution
913  * vectors by one and call the `apply` function of the
914  * `SineGordonOperator` class. Then, we write the solution to a file. We
915  * clock the wall times for the computational time needed as wall as the
916  * time needed to create the output and report the numbers when the time
917  * stepping is finished.
918  *
919 
920  *
921  * Note how this shift is implemented: We simply call the swap method on
922  * the two vectors which swaps only some pointers without the need to copy
923  * data around, a relatively expensive operation within an explicit time
924  * stepping method. Let us see what happens in more detail: First, we
925  * exchange <code>old_solution</code> with <code>old_old_solution</code>,
926  * which means that <code>old_old_solution</code> gets
927  * <code>old_solution</code>, which is what we expect. Similarly,
928  * <code>old_solution</code> gets the content from <code>solution</code>
929  * in the next step. After this, <code>solution</code> holds
930  * <code>old_old_solution</code>, but that will be overwritten during this
931  * step.
932  *
933  * @code
934  * unsigned int timestep_number = 1;
935  *
936  * Timer timer;
937  * double wtime = 0;
938  * double output_time = 0;
939  * for (time += time_step; time <= final_time;
940  * time += time_step, ++timestep_number)
941  * {
942  * timer.restart();
943  * old_old_solution.swap(old_solution);
944  * old_solution.swap(solution);
945  * sine_gordon_op.apply(solution, previous_solutions);
946  * wtime += timer.wall_time();
947  *
948  * timer.restart();
949  * if (timestep_number % output_timestep_skip == 0)
950  * output_results(timestep_number / output_timestep_skip);
951  *
952  * output_time += timer.wall_time();
953  * }
954  * timer.restart();
955  * output_results(timestep_number / output_timestep_skip + 1);
956  * output_time += timer.wall_time();
957  *
958  * pcout << std::endl
959  * << " Performed " << timestep_number << " time steps." << std::endl;
960  *
961  * pcout << " Average wallclock time per time step: "
962  * << wtime / timestep_number << "s" << std::endl;
963  *
964  * pcout << " Spent " << output_time << "s on output and " << wtime
965  * << "s on computations." << std::endl;
966  * }
967  * } // namespace Step48
968  *
969  *
970  *
971  * @endcode
972  *
973  *
974  * <a name="Thecodemaincodefunction"></a>
975  * <h3>The <code>main</code> function</h3>
976  *
977 
978  *
979  * As in @ref step_40 "step-40", we initialize MPI at the start of the program. Since we will
980  * in general mix MPI parallelization with threads, we also set the third
981  * argument in MPI_InitFinalize that controls the number of threads to an
982  * invalid number, which means that the TBB library chooses the number of
983  * threads automatically, typically to the number of available cores in the
984  * system. As an alternative, you can also set this number manually if you
985  * want to set a specific number of threads (e.g. when MPI-only is required).
986  *
987  * @code
988  * int main(int argc, char **argv)
989  * {
990  * using namespace Step48;
991  * using namespace dealii;
992  *
993  * Utilities::MPI::MPI_InitFinalize mpi_initialization(
994  * argc, argv, numbers::invalid_unsigned_int);
995  *
996  * try
997  * {
998  * SineGordonProblem<dimension> sg_problem;
999  * sg_problem.run();
1000  * }
1001  * catch (std::exception &exc)
1002  * {
1003  * std::cerr << std::endl
1004  * << std::endl
1005  * << "----------------------------------------------------"
1006  * << std::endl;
1007  * std::cerr << "Exception on processing: " << std::endl
1008  * << exc.what() << std::endl
1009  * << "Aborting!" << std::endl
1010  * << "----------------------------------------------------"
1011  * << std::endl;
1012  *
1013  * return 1;
1014  * }
1015  * catch (...)
1016  * {
1017  * std::cerr << std::endl
1018  * << std::endl
1019  * << "----------------------------------------------------"
1020  * << std::endl;
1021  * std::cerr << "Unknown exception!" << std::endl
1022  * << "Aborting!" << std::endl
1023  * << "----------------------------------------------------"
1024  * << std::endl;
1025  * return 1;
1026  * }
1027  *
1028  * return 0;
1029  * }
1030  * @endcode
1031 <a name="Results"></a><h1>Results</h1>
1032 
1033 
1034 <a name="Comparisonwithasparsematrix"></a><h3>Comparison with a sparse matrix</h3>
1035 
1036 
1037 In order to demonstrate the gain in using the MatrixFree class instead of
1038 the standard <code>deal.II</code> assembly routines for evaluating the
1039 information from old time steps, we study a simple serial run of the code on a
1040 nonadaptive mesh. Since much time is spent on evaluating the sine function, we
1041 do not only show the numbers of the full sine-Gordon equation but also for the
1042 wave equation (the sine-term skipped from the sine-Gordon equation). We use
1043 both second and fourth order elements. The results are summarized in the
1044 following table.
1045 
1046 <table align="center" class="doxtable">
1047  <tr>
1048  <th>&nbsp;</th>
1049  <th colspan="3">wave equation</th>
1050  <th colspan="2">sine-Gordon</th>
1051  </tr>
1052  <tr>
1053  <th>&nbsp;</th>
1054  <th>MF</th>
1055  <th>SpMV</th>
1056  <th>dealii</th>
1057  <th>MF</th>
1058  <th>dealii</th>
1059  </tr>
1060  <tr>
1061  <td>2D, @f$\mathcal{Q}_2@f$</td>
1062  <td align="right"> 0.0106</td>
1063  <td align="right"> 0.00971</td>
1064  <td align="right"> 0.109</td>
1065  <td align="right"> 0.0243</td>
1066  <td align="right"> 0.124</td>
1067  </tr>
1068  <tr>
1069  <td>2D, @f$\mathcal{Q}_4@f$</td>
1070  <td align="right"> 0.0328</td>
1071  <td align="right"> 0.0706</td>
1072  <td align="right"> 0.528</td>
1073  <td align="right"> 0.0714</td>
1074  <td align="right"> 0.502</td>
1075  </tr>
1076  <tr>
1077  <td>3D, @f$\mathcal{Q}_2@f$</td>
1078  <td align="right"> 0.0151</td>
1079  <td align="right"> 0.0320</td>
1080  <td align="right"> 0.331</td>
1081  <td align="right"> 0.0376</td>
1082  <td align="right"> 0.364</td>
1083  </tr>
1084  <tr>
1085  <td>3D, @f$\mathcal{Q}_4@f$</td>
1086  <td align="right"> 0.0918</td>
1087  <td align="right"> 0.844</td>
1088  <td align="right"> 6.83</td>
1089  <td align="right"> 0.194</td>
1090  <td align="right"> 6.95</td>
1091  </tr>
1092 </table>
1093 
1094 It is apparent that the matrix-free code outperforms the standard assembly
1095 routines in deal.II by far. In 3D and for fourth order elements, one operator
1096 evaluation is also almost ten times as fast as a sparse matrix-vector
1097 product.
1098 
1099 <a name="Parallelrunin2Dand3D"></a><h3>Parallel run in 2D and 3D</h3>
1100 
1101 
1102 We start with the program output obtained on a workstation with 12 cores / 24
1103 threads (one Intel Xeon E5-2687W v4 CPU running at 3.2 GHz, hyperthreading
1104 enabled), running the program in release mode:
1105 @code
1106 $ make run
1107 Number of MPI ranks: 1
1108 Number of threads on each rank: 24
1109 Vectorization over 4 doubles = 256 bits (AVX)
1110 
1111  Number of global active cells: 15412
1112  Number of degrees of freedom: 249065
1113  Time step size: 0.00292997, finest cell: 0.117188
1114 
1115  Time: -10, solution norm: 9.5599
1116  Time: -9.41, solution norm: 17.678
1117  Time: -8.83, solution norm: 23.504
1118  Time: -8.24, solution norm: 27.5
1119  Time: -7.66, solution norm: 29.513
1120  Time: -7.07, solution norm: 29.364
1121  Time: -6.48, solution norm: 27.23
1122  Time: -5.9, solution norm: 23.527
1123  Time: -5.31, solution norm: 18.439
1124  Time: -4.73, solution norm: 11.935
1125  Time: -4.14, solution norm: 5.5284
1126  Time: -3.55, solution norm: 8.0354
1127  Time: -2.97, solution norm: 14.707
1128  Time: -2.38, solution norm: 20
1129  Time: -1.8, solution norm: 22.834
1130  Time: -1.21, solution norm: 22.771
1131  Time: -0.624, solution norm: 20.488
1132  Time: -0.0381, solution norm: 16.697
1133  Time: 0.548, solution norm: 11.221
1134  Time: 1.13, solution norm: 5.3912
1135  Time: 1.72, solution norm: 8.4528
1136  Time: 2.31, solution norm: 14.335
1137  Time: 2.89, solution norm: 18.555
1138  Time: 3.48, solution norm: 20.894
1139  Time: 4.06, solution norm: 21.305
1140  Time: 4.65, solution norm: 19.903
1141  Time: 5.24, solution norm: 16.864
1142  Time: 5.82, solution norm: 12.223
1143  Time: 6.41, solution norm: 6.758
1144  Time: 6.99, solution norm: 7.2423
1145  Time: 7.58, solution norm: 12.888
1146  Time: 8.17, solution norm: 17.273
1147  Time: 8.75, solution norm: 19.654
1148  Time: 9.34, solution norm: 19.838
1149  Time: 9.92, solution norm: 17.964
1150  Time: 10, solution norm: 17.595
1151 
1152  Performed 6826 time steps.
1153  Average wallclock time per time step: 0.0013453s
1154  Spent 14.976s on output and 9.1831s on computations.
1155 @endcode
1156 
1157 In 3D, the respective output looks like
1158 @code
1159 $ make run
1160 Number of MPI ranks: 1
1161 Number of threads on each rank: 24
1162 Vectorization over 4 doubles = 256 bits (AVX)
1163 
1164  Number of global active cells: 17592
1165  Number of degrees of freedom: 1193881
1166  Time step size: 0.0117233, finest cell: 0.46875
1167 
1168  Time: -10, solution norm: 29.558
1169  Time: -7.66, solution norm: 129.13
1170  Time: -5.31, solution norm: 67.753
1171  Time: -2.97, solution norm: 79.245
1172  Time: -0.621, solution norm: 123.52
1173  Time: 1.72, solution norm: 43.525
1174  Time: 4.07, solution norm: 93.285
1175  Time: 6.41, solution norm: 97.722
1176  Time: 8.76, solution norm: 36.734
1177  Time: 10, solution norm: 94.115
1178 
1179  Performed 1706 time steps.
1180  Average wallclock time per time step: 0.0084542s
1181  Spent 16.766s on output and 14.423s on computations.
1182 @endcode
1183 
1184 It takes 0.008 seconds for one time step with more than a million
1185 degrees of freedom (note that we would need many processors to reach such
1186 numbers when solving linear systems).
1187 
1188 If we replace the thread-parallelization by a pure MPI parallelization, the
1189 timings change into:
1190 @code
1191 $ mpirun -n 24 ./step-48
1192 Number of MPI ranks: 24
1193 Number of threads on each rank: 1
1194 Vectorization over 4 doubles = 256 bits (AVX)
1195 ...
1196  Performed 1706 time steps.
1197  Average wallclock time per time step: 0.0051747s
1198  Spent 2.0535s on output and 8.828s on computations.
1199 @endcode
1200 
1201 We observe a dramatic speedup for the output (which makes sense, given that
1202 most code of the output is not parallelized via threads, whereas it is for
1203 MPI), but less than the theoretical factor of 12 we would expect from the
1204 parallelism. More interestingly, the computations also get faster when
1205 switching from the threads-only variant to the MPI-only variant. This is a
1206 general observation for the MatrixFree framework (as of updating this data in
1207 2019). The main reason is that the decisions regarding work on conflicting
1208 cell batches made to enable execution in parallel are overly pessimistic:
1209 While they ensure that no work on neighboring cells is done on different
1210 threads at the same time, this conservative setting implies that data from
1211 neighboring cells is also evicted from caches by the time neighbors get
1212 touched. Furthermore, the current scheme is not able to provide a constant
1213 load for all 24 threads for the given mesh with 17,592 cells.
1214 
1215 The current program allows to also mix MPI parallelization with thread
1216 parallelization. This is most beneficial when running programs on clusters
1217 with multiple nodes, using MPI for the inter-node parallelization and threads
1218 for the intra-node parallelization. On the workstation used above, we can run
1219 threads in the hyperthreading region (i.e., using 2 threads for each of the 12
1220 MPI ranks). An important setting for mixing MPI with threads is to ensure
1221 proper binning of tasks to CPUs. On many clusters the placing is either
1222 automatically via the `mpirun/mpiexec` environment, or there can be manual
1223 settings. Here, we simply report the run times the plain version of the
1224 program (noting that things could be improved towards the timings of the
1225 MPI-only program when proper pinning is done):
1226 @code
1227 $ mpirun -n 12 ./step-48
1228 Number of MPI ranks: 12
1229 Number of threads on each rank: 2
1230 Vectorization over 4 doubles = 256 bits (AVX)
1231 ...
1232  Performed 1706 time steps.
1233  Average wallclock time per time step: 0.0056651s
1234  Spent 2.5175s on output and 9.6646s on computations.
1235 @endcode
1236 
1237 
1238 
1239 <a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1240 
1241 
1242 There are several things in this program that could be improved to make it
1243 even more efficient (besides improved boundary conditions and physical
1244 stuff as discussed in @ref step_25 "step-25"):
1245 
1246 <ul> <li> <b>Faster evaluation of sine terms:</b> As becomes obvious
1247  from the comparison of the plain wave equation and the sine-Gordon
1248  equation above, the evaluation of the sine terms dominates the total
1249  time for the finite element operator application. There are a few
1250  reasons for this: Firstly, the deal.II sine computation of a
1251  VectorizedArray field is not vectorized (as opposed to the rest of
1252  the operator application). This could be cured by handing the sine
1253  computation to a library with vectorized sine computations like
1254  Intel's math kernel library (MKL). By using the function
1255  <code>vdSin</code> in MKL, the program uses half the computing time
1256  in 2D and 40 percent less time in 3D. On the other hand, the sine
1257  computation is structurally much more complicated than the simple
1258  arithmetic operations like additions and multiplications in the rest
1259  of the local operation.
1260 
1261  <li> <b>Higher order time stepping:</b> While the implementation allows for
1262  arbitrary order in the spatial part (by adjusting the degree of the finite
1263  element), the time stepping scheme is a standard second-order leap-frog
1264  scheme. Since solutions in wave propagation problems are usually very
1265  smooth, the error is likely dominated by the time stepping part. Of course,
1266  this could be cured by using smaller time steps (at a fixed spatial
1267  resolution), but it would be more efficient to use higher order time
1268  stepping as well. While it would be straight-forward to do so for a
1269  first-order system (use some Runge&ndash;Kutta scheme of higher order,
1270  probably combined with adaptive time step selection like the <a
1271  href="http://en.wikipedia.org/wiki/Dormand%E2%80%93Prince_method">Dormand&ndash;Prince
1272  method</a>), it is more challenging for the second-order formulation. At
1273  least in the finite difference community, people usually use the PDE to find
1274  spatial correction terms that improve the temporal error.
1275 
1276 </ul>
1277  *
1278  *
1279 <a name="PlainProg"></a>
1280 <h1> The plain program</h1>
1281 @include "step-48.cc"
1282 */
LAPACKSupport::diagonal
@ diagonal
Matrix is diagonal.
Definition: lapack_support.h:121
IndexSet
Definition: index_set.h:74
Utilities::System::get_time
std::string get_time()
Definition: utilities.cc:1020
VectorTools::L2_norm
@ L2_norm
Definition: vector_tools_common.h:113
FE_Q
Definition: fe_q.h:554
MultithreadInfo::n_threads
static unsigned int n_threads()
Definition: multithread_info.cc:169
dealii
Definition: namespace_dealii.h:25
LinearAlgebra
Definition: communication_pattern_base.h:30
LinearAlgebra::distributed::Vector< double >
Differentiation::SD::atan
Expression atan(const Expression &x)
Definition: symengine_math.cc:147
Triangulation< dim >
SparseMatrix< double >
VectorizedArray
Definition: vectorization.h:395
VectorTools::integrate_difference
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const InVector &fe_function, const Function< spacedim, typename InVector::value_type > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorTools::compute_global_error
double compute_global_error(const Triangulation< dim, spacedim > &tria, const InVector &cellwise_error, const NormType &norm, const double exponent=2.)
MatrixFree
Definition: matrix_free.h:117
DataOutInterface::write_vtu_with_pvtu_record
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
Definition: data_out_base.cc:7001
DoFTools::always
@ always
Definition: dof_tools.h:236
Differentiation::SD::cosh
Expression cosh(const Expression &x)
Definition: symengine_math.cc:189
second
Point< 2 > second
Definition: grid_out.cc:4353
DataOut::build_patches
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1071
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
DoFHandler< dim >
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
Timer
Definition: timer.h:119
Utilities::CUDA::free
void free(T *&pointer)
Definition: cuda.h:96
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
level
unsigned int level
Definition: grid_out.cc:4355
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
Timer::wall_time
double wall_time() const
Definition: timer.cc:264
VectorTools
Definition: vector_tools.h:303
DataOutBase::none
@ none
Definition: data_out_base.h:1557
MatrixFree::AdditionalData
Definition: matrix_free.h:182
Algorithms::Events::initial
const Event initial
Definition: event.cc:65
TimeStepping::invalid
@ invalid
Definition: time_stepping.h:71
double
LocalIntegrators::Divergence::norm
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:548
Physics::Elasticity::Kinematics::b
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
VectorizedArrayBase< VectorizedArray< Number, width >, 1 >::size
static constexpr std::size_t size()
Definition: vectorization.h:261
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
parallel::distributed::Triangulation< dim >
std_cxx17::apply
auto apply(F &&fn, Tuple &&t) -> decltype(apply_impl(std::forward< F >(fn), std::forward< Tuple >(t), std_cxx14::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >()))
Definition: tuple.h:40
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
LAPACKSupport::general
@ general
No special properties.
Definition: lapack_support.h:113
BlockIndices::swap
void swap(BlockIndices &u, BlockIndices &v)
Definition: block_indices.h:475
DoFTools::make_hanging_node_constraints
void make_hanging_node_constraints(const DoFHandlerType &dof_handler, AffineConstraints< number > &constraints)
Definition: dof_tools_constraints.cc:1725
numbers
Definition: numbers.h:207
QGaussLobatto
Definition: quadrature_lib.h:76
Timer::restart
void restart()
Definition: timer.h:910
QGauss
Definition: quadrature_lib.h:40
DataOut_DoFData::attach_dof_handler
void attach_dof_handler(const DoFHandlerType &)
value
static const bool value
Definition: dof_tools_constraints.cc:433
AffineConstraints< double >
GridTools::diameter
double diameter(const Triangulation< dim, spacedim > &tria)
Definition: grid_tools.cc:76
std::sqrt
inline ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5412
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
DoFTools::extract_locally_relevant_dofs
void extract_locally_relevant_dofs(const DoFHandlerType &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:1173
GridGenerator::hyper_cube
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
MatrixFree::AdditionalData::tasks_parallel_scheme
TasksParallelScheme tasks_parallel_scheme
Definition: matrix_free.h:336
GridTools::shift
void shift(const Tensor< 1, spacedim > &shift_vector, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:817
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
ConditionalOStream
Definition: conditional_ostream.h:81
numbers::invalid_unsigned_int
static const unsigned int invalid_unsigned_int
Definition: types.h:191
Utilities::System::get_current_vectorization_level
const std::string get_current_vectorization_level()
Definition: utilities.cc:945
Functions::ZeroFunction
Definition: function.h:513
FEEvaluation
Definition: fe_evaluation.h:57
FETools::interpolate
void interpolate(const DoFHandlerType1< dim, spacedim > &dof1, const InVector &u1, const DoFHandlerType2< dim, spacedim > &dof2, OutVector &u2)
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
parallel::distributed::SolutionTransfer
Definition: solution_transfer.h:235
MeshWorker::loop
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:443
first
Point< 2 > first
Definition: grid_out.cc:4352
DataOut
Definition: data_out.h:148
Vector
Definition: mapping_q1_eulerian.h:32
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
center
Point< 3 > center
Definition: data_out_base.cc:169
parallel
Definition: distributed.h:416
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
internal::VectorOperations::copy
void copy(const T *begin, const T *end, U *dest)
Definition: vector_operations_internal.h:67
Utilities::MPI::MPI_InitFinalize
Definition: mpi.h:828
std::sin
inline ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5297
MatrixFree::reinit
void reinit(const Mapping< dim > &mapping, const DoFHandlerType &dof_handler, const AffineConstraints< number2 > &constraint, const QuadratureType &quad, const AdditionalData &additional_data=AdditionalData())
DataOut_DoFData::add_data_vector
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
Definition: data_out_dof_data.h:1090