Reference documentation for deal.II version GIT relicensing-1062-gc06da148b8 2024-07-15 19:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
init_finalize.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2023 - 2024 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
17#include <deal.II/base/mpi.h>
19
23
24#include <Kokkos_Core.hpp>
25
26#ifdef DEAL_II_WITH_TRILINOS
27# ifdef DEAL_II_WITH_MPI
30
31# include <Epetra_MpiComm.h>
32# endif
33#endif
34
35#ifdef DEAL_II_WITH_PETSC
38
39# include <petscsys.h>
40#endif
41
42#ifdef DEAL_II_WITH_SLEPC
44
45# include <slepcsys.h>
46#endif
47
48#ifdef DEAL_II_WITH_P4EST
49# include <p4est_bits.h>
50#endif
51
52#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
53# include <zoltan_cpp.h>
54#endif
55
57
58
59/* Force initialization of static struct: */
61
62
64 char **&argv,
65 const InitializeLibrary &libraries,
66 const unsigned int max_num_threads)
67 : libraries(libraries)
68{
69 static bool constructor_has_already_run = false;
70 (void)constructor_has_already_run;
71 Assert(constructor_has_already_run == false,
72 ExcMessage("You can only create a single object of this class "
73 "in a program since it initializes the MPI system."));
74
75
76 int ierr = 0;
77#ifdef DEAL_II_WITH_MPI
78 if (static_cast<bool>(libraries & InitializeLibrary::MPI))
79 {
80 // if we have PETSc, we will initialize it and let it handle MPI.
81 // Otherwise, we will do it.
82 int MPI_has_been_started = 0;
83 ierr = MPI_Initialized(&MPI_has_been_started);
84 AssertThrowMPI(ierr);
85 AssertThrow(MPI_has_been_started == 0,
86 ExcMessage("MPI error. You can only start MPI once!"));
87
88 int provided;
89 // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
90 // we might use several threads but never call two MPI functions at the
91 // same time. For an explanation see on why we do this see
92 // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
93 int wanted = MPI_THREAD_SERIALIZED;
94 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
95 AssertThrowMPI(ierr);
96
97 // disable for now because at least some implementations always return
98 // MPI_THREAD_SINGLE.
99 // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
100 // ExcMessage("MPI reports that we are not allowed to use multiple
101 // threads."));
102 }
103#else
104 // make sure the compiler doesn't warn about these variables
105 (void)argc;
106 (void)argv;
107 (void)ierr;
108#endif
109
110 // we are allowed to call MPI_Init ourselves and PETScInitialize will
111 // detect this. This allows us to use MPI_Init_thread instead.
112#ifdef DEAL_II_WITH_PETSC
113 PetscErrorCode pierr;
114# ifdef DEAL_II_WITH_SLEPC
115 // Initialize SLEPc (with PETSc):
116 if (static_cast<bool>(libraries & InitializeLibrary::SLEPc))
117 {
118 finalize_petscslepc = SlepcInitializeCalled ? false : true;
119 pierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
121 }
122# else
123 // or just initialize PETSc alone:
124 if (static_cast<bool>(libraries & InitializeLibrary::PETSc))
125 {
126 finalize_petscslepc = PetscInitializeCalled ? false : true;
127 pierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
128 AssertThrow(pierr == 0, ExcPETScError(pierr));
129 }
130# endif
131
132 // Disable PETSc exception handling. This just prints a large wall
133 // of text that is not particularly helpful for what we do:
134 if (static_cast<bool>(libraries & InitializeLibrary::SLEPc) ||
135 static_cast<bool>(libraries & InitializeLibrary::PETSc))
136 {
137 pierr = PetscPopSignalHandler();
138 AssertThrow(pierr == 0, ExcPETScError(pierr));
139 }
140#endif
141
142 // Initialize zoltan
143#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
144 if (static_cast<bool>(libraries & InitializeLibrary::Zoltan))
145 {
146 float version;
147 Zoltan_Initialize(argc, argv, &version);
148 }
149#endif
150
151 // Initialize p4est and libsc components
152#ifdef DEAL_II_WITH_P4EST
153 if (static_cast<bool>(libraries & InitializeLibrary::P4EST))
154 {
155# if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
156 // This feature is broken in version 2.0.0 for calls to
157 // MPI_Comm_create_group (see cburstedde/p4est#30).
158 // Disabling it leads to more verbose p4est error messages
159 // which should be fine.
160 sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
161# endif
162 p4est_init(nullptr, SC_LP_SILENT);
163 }
164#endif
165
166 constructor_has_already_run = true;
167
168
169 // Now also see how many threads we'd like to run
170 if (max_num_threads != numbers::invalid_unsigned_int)
171 {
172 // set maximum number of threads (also respecting the environment
173 // variable that the called function evaluates) based on what the
174 // user asked
175 MultithreadInfo::set_thread_limit(max_num_threads);
176 }
177 else
178 // user wants automatic choice
179 {
180 unsigned int n_threads = MultithreadInfo::n_cores();
181#ifdef DEAL_II_WITH_MPI
182 if (static_cast<bool>(libraries & InitializeLibrary::MPI))
183 {
184 int MPI_has_been_started = 0;
185 int ierr = MPI_Initialized(&MPI_has_been_started);
186 AssertThrowMPI(ierr);
187
188 // we need to figure out how many MPI processes there are on the
189 // current node, as well as how many CPU cores we have. for the
190 // first task, check what get_hostname() returns and then do an
191 // allgather so each processor gets the answer
192 //
193 // in calculating the length of the string, don't forget the
194 // terminating \0 on C-style strings
195 const std::string hostname = Utilities::System::get_hostname();
196
197 int my_hostname_size = hostname.size() + 1;
198 int max_hostname_size = -1;
199 ierr = MPI_Allreduce(&my_hostname_size,
200 &max_hostname_size,
201 1,
202 MPI_INT,
203 MPI_MAX,
204 MPI_COMM_WORLD);
205 AssertThrowMPI(ierr);
206 std::vector<char> hostname_array(max_hostname_size);
207 std::copy(hostname.c_str(),
208 hostname.c_str() + hostname.size() + 1,
209 hostname_array.begin());
210
211 int n_mpi_processes = 1;
212 if (MPI_has_been_started)
213 {
214 ierr = MPI_Comm_size(MPI_COMM_WORLD, &n_mpi_processes);
215 AssertThrowMPI(ierr);
216 }
217 std::vector<char> all_hostnames(max_hostname_size * n_mpi_processes);
218 ierr = MPI_Allgather(hostname_array.data(),
219 max_hostname_size,
220 MPI_CHAR,
221 all_hostnames.data(),
222 max_hostname_size,
223 MPI_CHAR,
224 MPI_COMM_WORLD);
225 AssertThrowMPI(ierr);
226
227 // search how often our own hostname appears and the how-manyth
228 // instance the current process represents
229 unsigned int n_local_processes = 0;
230 unsigned int nth_process_on_host = 0;
231 int rank = 0;
232 if (MPI_has_been_started)
233 {
234 ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
235 AssertThrowMPI(ierr);
236 }
237 for (int i = 0; i < n_mpi_processes; ++i)
238 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
239 hostname)
240 {
241 ++n_local_processes;
242 if (i <= rank)
243 ++nth_process_on_host;
244 }
245 Assert(nth_process_on_host > 0, ExcInternalError());
246
247
248 // compute how many cores each process gets. if the number does not
249 // divide evenly, then we get one more core if we are among the
250 // first few processes
251 //
252 // if the number would be zero, round up to one since every process
253 // needs to have at least one thread
254 n_threads =
255 std::max(MultithreadInfo::n_cores() / n_local_processes +
256 (nth_process_on_host <=
257 MultithreadInfo::n_cores() % n_local_processes ?
258 1 :
259 0),
260 1U);
261 }
262#endif
263
264 // finally set this number of threads
266 }
267
268 // Initialize Kokkos
269 if (static_cast<bool>(libraries & InitializeLibrary::Kokkos))
270 {
271 // argv has argc+1 elements and the last one is a nullptr. For appending
272 // one element we thus create a new argv by copying the first argc
273 // elements, append the new option, and then a nullptr.
274 //
275 // We do get in trouble, though, if a user program is called with
276 // '--help' as a command line argument. This '--help' gets passed on to
277 // Kokkos, which promptly responds with a lengthy message that the user
278 // likely did not intend. As a consequence, filter out this specific
279 // flag.
280 std::vector<char *> argv_new;
281 for (auto *const arg : make_array_view(&argv[0], &argv[0] + argc))
282 if (strcmp(arg, "--help") != 0)
283 argv_new.push_back(arg);
284
285 std::stringstream threads_flag;
286#if KOKKOS_VERSION >= 30700
287 threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
288#else
289 threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
290#endif
291 const std::string threads_flag_string = threads_flag.str();
292 argv_new.push_back(const_cast<char *>(threads_flag_string.c_str()));
293 argv_new.push_back(nullptr);
294
295 // The first argument in Kokkos::initialize is of type int&. Hence, we
296 // need to define a new variable to pass to it (instead of using argc+1
297 // inline).
298 int argc_new = argv_new.size() - 1;
299 Kokkos::initialize(argc_new, argv_new.data());
300 }
301
302 // As a final step call the at_mpi_init() signal handler.
304}
305
306
307
308void
310{
311 // insert if it is not in the set already:
312 requests.insert(&request);
313}
314
315
316
317void
319{
320 Assert(requests.find(&request) != requests.end(),
322 "You tried to call unregister_request() with an invalid request."));
323
324 requests.erase(&request);
325}
326
327
328
329std::set<MPI_Request *> InitFinalize::requests;
330
331
332
333void
335{
336 if (!is_finalized)
337 {
338 // First, call the at_mpi_finalize() signal handler.
340
341 // make memory pool release all PETSc/Trilinos/MPI-based vectors that
342 // are no longer used at this point. this is relevant because the static
343 // object destructors run for these vectors at the end of the program
344 // would run after MPI_Finalize is called, leading to errors
345
346#ifdef DEAL_II_WITH_MPI
347 // Before exiting, wait for nonblocking communication to complete:
348 for (auto *request : requests)
349 {
350 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
351 AssertThrowMPI(ierr);
352 }
353
354 // Start with deal.II MPI vectors and delete vectors from the pools:
356 LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
358 release_unused_memory();
360 LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
362 release_unused_memory();
363
364 // Next with Trilinos:
365# ifdef DEAL_II_WITH_TRILINOS
367 TrilinosWrappers::MPI::Vector>::release_unused_memory();
369 TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
370# endif
371#endif
372
373
374 // Now deal with PETSc (with or without MPI). Only delete the vectors if
375 // finalize hasn't been called yet, otherwise this will lead to errors.
376#ifdef DEAL_II_WITH_PETSC
377 if (!PetscFinalizeCalled)
378 {
380 PETScWrappers::MPI::Vector>::release_unused_memory();
382 PETScWrappers::MPI::BlockVector>::release_unused_memory();
383 }
384# ifdef DEAL_II_WITH_SLEPC
385 // and now end SLEPc with PETSc if we did so
386 if (static_cast<bool>(libraries & InitializeLibrary::SLEPc) &&
388 {
389 PetscErrorCode ierr = SlepcFinalize();
390 AssertThrow(ierr == 0,
392 }
393# else
394 // or just end PETSc if we did so
395 if (static_cast<bool>(libraries & InitializeLibrary::PETSc) &&
397 {
398 PetscErrorCode ierr = PetscFinalize();
399 AssertThrow(ierr == 0, ExcPETScError(ierr));
400 }
401# endif
402#endif
403
404#ifdef DEAL_II_WITH_P4EST
405 // now end p4est and libsc
406 // Note: p4est has no finalize function
407 if (static_cast<bool>(libraries & InitializeLibrary::P4EST))
408 sc_finalize();
409#endif
410
411
412 // Finalize Kokkos
413 if (static_cast<bool>(libraries & InitializeLibrary::Kokkos))
414 Kokkos::finalize();
415
416 // only MPI_Finalize if we are running with MPI. We also need to do this
417 // when running PETSc, because we initialize MPI ourselves before
418 // calling PetscInitialize
419#ifdef DEAL_II_WITH_MPI
420 int MPI_has_been_started = 0;
421 const int ierr = MPI_Initialized(&MPI_has_been_started);
422 AssertThrowMPI(ierr);
423 if (static_cast<bool>(libraries & InitializeLibrary::MPI) &&
424 (MPI_has_been_started))
425 {
426# if __cpp_lib_uncaught_exceptions >= 201411
427 // std::uncaught_exception() is deprecated in c++17
428 if (std::uncaught_exceptions() > 0)
429# else
430 if (std::uncaught_exception() == true)
431# endif
432 {
433 // do not try to call MPI_Finalize to avoid a deadlock.
434 }
435 else
436 {
437 const int ierr = MPI_Finalize();
438 (void)ierr;
439 AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
440 }
441 }
442#endif
443 is_finalized = true;
444 }
445}
446
447
448
453
454
ArrayView< std::remove_reference_t< typename std::iterator_traits< Iterator >::reference >, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:949
InitializeLibrary libraries
static void unregister_request(MPI_Request &request)
static void register_request(MPI_Request &request)
static std::set< MPI_Request * > requests
bool finalize_petscslepc
InitFinalize(int &argc, char **&argv, const InitializeLibrary &libraries, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static Signals signals
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
InitializeLibrary
std::string get_hostname()
Definition utilities.cc:998
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
****code *  *  MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize