Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_remote_point_evaluation.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2021 - 2022 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_mpi_mpi_remote_point_evaluation_h
17#define dealii_mpi_mpi_remote_point_evaluation_h
18
19#include <deal.II/base/config.h>
20
22
24
25
26namespace Utilities
27{
28 namespace MPI
29 {
40 template <int dim, int spacedim = dim>
42 {
43 public:
60 const double tolerance = 1e-6,
61 const bool enforce_unique_mapping = false,
62 const unsigned int rtree_level = 0,
63 const std::function<std::vector<bool>()> &marked_vertices = {});
64
69
81 void
82 reinit(const std::vector<Point<spacedim>> &points,
85
89 struct CellData
90 {
94 std::vector<std::pair<int, int>> cells;
95
100 std::vector<unsigned int> reference_point_ptrs;
101
105 std::vector<Point<dim>> reference_point_values;
106 };
107
122 template <typename T>
123 void
125 std::vector<T> &output,
126 std::vector<T> &buffer,
127 const std::function<void(const ArrayView<T> &, const CellData &)>
128 &evaluation_function) const;
129
138 template <typename T>
139 void
141 const std::vector<T> &input,
142 std::vector<T> & buffer,
143 const std::function<void(const ArrayView<const T> &, const CellData &)>
144 &evaluation_function) const;
145
150 const std::vector<unsigned int> &
151 get_point_ptrs() const;
152
159 bool
160 is_map_unique() const;
161
165 bool
166 all_points_found() const;
167
171 bool
172 point_found(const unsigned int i) const;
173
178 get_triangulation() const;
179
184 get_mapping() const;
185
191 bool
192 is_ready() const;
193
194 private:
199 const double tolerance;
200
206
210 const unsigned int rtree_level;
211
216 const std::function<std::vector<bool>()> marked_vertices;
217
221 boost::signals2::connection tria_signal;
222
229
234
239
244
249
255 std::vector<unsigned int> point_ptrs;
256
260 std::vector<unsigned int> recv_permutation;
261
266 std::vector<unsigned int> recv_ptrs;
267
271 std::vector<unsigned int> recv_ranks;
272
278
282 std::vector<unsigned int> send_permutation;
283
287 std::vector<unsigned int> send_ranks;
288
293 std::vector<unsigned int> send_ptrs;
294 };
295
296
297 template <int dim, int spacedim>
298 template <typename T>
299 void
301 std::vector<T> &output,
302 std::vector<T> &buffer,
303 const std::function<void(const ArrayView<T> &, const CellData &)>
304 &evaluation_function) const
305 {
306#ifndef DEAL_II_WITH_MPI
307 Assert(false, ExcNeedsMPI());
308 (void)output;
309 (void)buffer;
310 (void)evaluation_function;
311#else
312 static CollectiveMutex mutex;
314
315 output.resize(point_ptrs.back());
316 buffer.resize(send_permutation.size() * 2);
317 ArrayView<T> buffer_1(buffer.data(), buffer.size() / 2);
318 ArrayView<T> buffer_2(buffer.data() + buffer.size() / 2,
319 buffer.size() / 2);
320
321 // evaluate functions at points
322 evaluation_function(buffer_1, cell_data);
323
324 // sort for communication
325 for (unsigned int i = 0; i < send_permutation.size(); ++i)
326 buffer_2[send_permutation[i]] = buffer_1[i];
327
328 // process remote quadrature points and send them away
329 std::map<unsigned int, std::vector<char>> temp_map;
330
331 std::vector<MPI_Request> requests;
332 requests.reserve(send_ranks.size());
333
334 const unsigned int my_rank =
336
337 std::map<unsigned int, std::vector<T>> temp_recv_map;
338
339 for (unsigned int i = 0; i < send_ranks.size(); ++i)
340 {
341 if (send_ranks[i] == my_rank)
342 {
343 // process locally-owned values
344 temp_recv_map[my_rank] =
345 std::vector<T>(buffer_2.begin() + send_ptrs[i],
346 buffer_2.begin() + send_ptrs[i + 1]);
347 continue;
348 }
349
350 temp_map[send_ranks[i]] =
351 Utilities::pack(std::vector<T>(buffer_2.begin() + send_ptrs[i],
352 buffer_2.begin() + send_ptrs[i + 1]),
353 false);
354
355 auto &buffer = temp_map[send_ranks[i]];
356
357 requests.push_back(MPI_Request());
358
359 const int ierr = MPI_Isend(buffer.data(),
360 buffer.size(),
361 MPI_CHAR,
362 send_ranks[i],
365 &requests.back());
366 AssertThrowMPI(ierr);
367 }
368
369 for (const auto recv_rank : recv_ranks)
370 {
371 if (recv_rank == my_rank)
372 continue;
373
374 MPI_Status status;
375 int ierr = MPI_Probe(MPI_ANY_SOURCE,
378 &status);
379 AssertThrowMPI(ierr);
380
381 int message_length;
382 ierr = MPI_Get_count(&status, MPI_CHAR, &message_length);
383 AssertThrowMPI(ierr);
384
385 std::vector<char> buffer(message_length);
386
387 ierr = MPI_Recv(buffer.data(),
388 buffer.size(),
389 MPI_CHAR,
390 status.MPI_SOURCE,
393 MPI_STATUS_IGNORE);
394 AssertThrowMPI(ierr);
395
396 temp_recv_map[status.MPI_SOURCE] =
397 Utilities::unpack<std::vector<T>>(buffer, false);
398 }
399
400 // make sure all messages have been sent
401 const int ierr =
402 MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
403 AssertThrowMPI(ierr);
404
405 // copy received data into output vector
406 auto it = recv_permutation.begin();
407 for (const auto &j : temp_recv_map)
408 for (const auto &i : j.second)
409 {
410 output[*it] = i;
411 it++;
412 }
413#endif
414 }
415
416
417 template <int dim, int spacedim>
418 template <typename T>
419 void
421 const std::vector<T> &input,
422 std::vector<T> & buffer,
423 const std::function<void(const ArrayView<const T> &, const CellData &)>
424 &evaluation_function) const
425 {
426#ifndef DEAL_II_WITH_MPI
427 Assert(false, ExcNeedsMPI());
428 (void)input;
429 (void)buffer;
430 (void)evaluation_function;
431#else
432 static CollectiveMutex mutex;
434
435 const auto &ptr = this->get_point_ptrs();
436
437 std::map<unsigned int, std::vector<T>> temp_recv_map;
438
439 for (unsigned int i = 0; i < recv_ranks.size(); ++i)
440 temp_recv_map[recv_ranks[i]].resize(recv_ptrs[i + 1] - recv_ptrs[i]);
441
442 const unsigned int my_rank =
444
445# ifdef DEBUG
446 {
447 unsigned int i = 0;
448
449 for (auto &j : temp_recv_map)
450 i += j.second.size();
451
452 AssertDimension(recv_permutation.size(), i);
453 }
454# endif
455
456 {
457 // duplicate data to be able to sort it more easily in the next step
458 std::vector<T> buffer_(ptr.back());
459 for (unsigned int i = 0, c = 0; i < ptr.size() - 1; ++i)
460 {
461 const auto n_entries = ptr[i + 1] - ptr[i];
462
463 for (unsigned int j = 0; j < n_entries; ++j, ++c)
464 buffer_[c] = input[i];
465 }
466
467 // sort data according to the ranks
468 auto it = recv_permutation.begin();
469 for (auto &j : temp_recv_map)
470 for (auto &i : j.second)
471 {
472 i = buffer_[*it];
473 it++;
474 }
475 }
476
477 // buffer.resize(point_ptrs.back());
478 buffer.resize(send_permutation.size() * 2);
479 ArrayView<T> buffer_1(buffer.data(), buffer.size() / 2);
480 ArrayView<T> buffer_2(buffer.data() + buffer.size() / 2,
481 buffer.size() / 2);
482
483 // process remote quadrature points and send them away
484 std::map<unsigned int, std::vector<char>> temp_map;
485
486 std::vector<MPI_Request> requests;
487 requests.reserve(recv_ranks.size());
488
489 for (const auto recv_rank : recv_ranks)
490 {
491 if (recv_rank == my_rank)
492 continue;
493
494 temp_map[recv_rank] =
495 Utilities::pack(temp_recv_map[recv_rank], false);
496
497 auto &buffer_send = temp_map[recv_rank];
498
499 requests.push_back(MPI_Request());
500
501 const int ierr = MPI_Isend(buffer_send.data(),
502 buffer_send.size(),
503 MPI_CHAR,
504 recv_rank,
507 &requests.back());
508 AssertThrowMPI(ierr);
509 }
510
511 for (unsigned int i = 0; i < send_ranks.size(); ++i)
512 {
513 if (send_ranks[i] == my_rank)
514 {
515 const auto &buffer_send = temp_recv_map[send_ranks[i]];
516 // process locally-owned values
517 const unsigned int j = std::distance(send_ranks.begin(),
518 std::find(send_ranks.begin(),
519 send_ranks.end(),
520 my_rank));
521
522 AssertDimension(buffer_send.size(),
523 send_ptrs[j + 1] - send_ptrs[j]);
524
525 for (unsigned int i = send_ptrs[j], c = 0; i < send_ptrs[j + 1];
526 ++i, ++c)
527 buffer_1[i] = buffer_send[c];
528
529 continue;
530 }
531
532 MPI_Status status;
533 int ierr = MPI_Probe(MPI_ANY_SOURCE,
536 &status);
537 AssertThrowMPI(ierr);
538
539 int message_length;
540 ierr = MPI_Get_count(&status, MPI_CHAR, &message_length);
541 AssertThrowMPI(ierr);
542
543 std::vector<char> recv_buffer(message_length);
544
545 ierr = MPI_Recv(recv_buffer.data(),
546 recv_buffer.size(),
547 MPI_CHAR,
548 status.MPI_SOURCE,
551 MPI_STATUS_IGNORE);
552 AssertThrowMPI(ierr);
553
554 const auto recv_buffer_unpacked =
555 Utilities::unpack<std::vector<T>>(recv_buffer, false);
556
557 auto ptr =
558 std::find(send_ranks.begin(), send_ranks.end(), status.MPI_SOURCE);
559
560 Assert(ptr != send_ranks.end(), ExcNotImplemented());
561
562 const unsigned int j = std::distance(send_ranks.begin(), ptr);
563
564 AssertDimension(recv_buffer_unpacked.size(),
565 send_ptrs[j + 1] - send_ptrs[j]);
566
567 for (unsigned int i = send_ptrs[j], c = 0; i < send_ptrs[j + 1];
568 ++i, ++c)
569 {
570 AssertIndexRange(i, buffer_1.size());
571 AssertIndexRange(c, recv_buffer_unpacked.size());
572 buffer_1[i] = recv_buffer_unpacked[c];
573 }
574 }
575
576 const int ierr =
577 MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
578 AssertThrowMPI(ierr);
579
580 // sort for easy access during function call
581 for (unsigned int i = 0; i < send_permutation.size(); ++i)
582 buffer_2[i] = buffer_1[send_permutation[i]];
583
584 // evaluate function at points
585 evaluation_function(buffer_2, cell_data);
586#endif
587 }
588
589 } // end of namespace MPI
590} // end of namespace Utilities
591
592
594
595#endif
iterator begin() const
Definition: array_view.h:585
std::size_t size() const
Definition: array_view.h:576
Abstract base class for mapping classes.
Definition: mapping.h:311
Definition: point.h:111
virtual MPI_Comm get_communicator() const
void evaluate_and_process(std::vector< T > &output, std::vector< T > &buffer, const std::function< void(const ArrayView< T > &, const CellData &)> &evaluation_function) const
const Triangulation< dim, spacedim > & get_triangulation() const
SmartPointer< const Mapping< dim, spacedim > > mapping
void process_and_evaluate(const std::vector< T > &input, std::vector< T > &buffer, const std::function< void(const ArrayView< const T > &, const CellData &)> &evaluation_function) const
const std::vector< unsigned int > & get_point_ptrs() const
const Mapping< dim, spacedim > & get_mapping() const
const std::function< std::vector< bool >()> marked_vertices
void reinit(const std::vector< Point< spacedim > > &points, const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping)
SmartPointer< const Triangulation< dim, spacedim > > tria
bool point_found(const unsigned int i) const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1483
const ::Triangulation< dim, spacedim > & tria