Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
tria_base.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2015 - 2022 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
19#include <deal.II/base/mpi.templates.h>
22
25
28#include <deal.II/grid/tria.h>
31
35
36#include <algorithm>
37#include <cstdint>
38#include <fstream>
39#include <iostream>
40#include <numeric>
41
42
44
45namespace parallel
46{
47 template <int dim, int spacedim>
49 const MPI_Comm &mpi_communicator,
50 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
51 smooth_grid,
52 const bool check_for_distorted_cells)
53 : ::Triangulation<dim, spacedim>(smooth_grid,
54 check_for_distorted_cells)
55 , mpi_communicator(mpi_communicator)
56 , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
57 , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
58 {
59#ifndef DEAL_II_WITH_MPI
60 Assert(false, ExcNeedsMPI());
61#endif
62 }
63
64
65
66 template <int dim, int spacedim>
67 void
69 const ::Triangulation<dim, spacedim> &other_tria)
70 {
71#ifndef DEAL_II_WITH_MPI
72 (void)other_tria;
73 Assert(false, ExcNeedsMPI());
74#else
76
77 if (const ::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
78 dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
79 *>(&other_tria))
80 {
81 // release unused vector memory because we will have very different
82 // vectors now
85 }
86#endif
87 }
88
89
90
91 template <int dim, int spacedim>
92 std::size_t
94 {
95 std::size_t mem =
97 MemoryConsumption::memory_consumption(this->mpi_communicator) +
100 number_cache.n_global_active_cells) +
101 MemoryConsumption::memory_consumption(number_cache.n_global_levels);
102 return mem;
103 }
104
105 template <int dim, int spacedim>
107 {
108 // release unused vector memory because the vector layout is going to look
109 // very different now
112 }
113
114 template <int dim, int spacedim>
116 : n_locally_owned_active_cells(0)
117 , number_of_global_coarse_cells(0)
118 , n_global_levels(0)
119 {}
120
121 template <int dim, int spacedim>
122 unsigned int
124 {
125 return number_cache.n_locally_owned_active_cells;
126 }
127
128 template <int dim, int spacedim>
129 unsigned int
131 {
132 return number_cache.n_global_levels;
133 }
134
135 template <int dim, int spacedim>
138 {
139 return number_cache.n_global_active_cells;
140 }
141
142 template <int dim, int spacedim>
145 {
146 return mpi_communicator;
148
149#ifdef DEAL_II_WITH_MPI
150 template <int dim, int spacedim>
151 void
154 number_cache.ghost_owners.clear();
155 number_cache.level_ghost_owners.clear();
156 number_cache.n_locally_owned_active_cells = 0;
157
158 if (this->n_levels() == 0)
159 {
160 // Skip communication done below if we do not have any cells
161 // (meaning the Triangulation is empty on all processors). This will
162 // happen when called from the destructor of Triangulation, which
163 // can get called during exception handling causing a hang in this
164 // function.
165 number_cache.n_global_active_cells = 0;
166 number_cache.n_global_levels = 0;
167 return;
168 }
169
170
171 {
172 // find ghost owners
173 for (const auto &cell : this->active_cell_iterators())
174 if (cell->is_ghost())
175 number_cache.ghost_owners.insert(cell->subdomain_id());
176
177 Assert(number_cache.ghost_owners.size() <
178 Utilities::MPI::n_mpi_processes(this->mpi_communicator),
180 }
181
182 if (this->n_levels() > 0)
183 number_cache.n_locally_owned_active_cells = std::count_if(
184 this->begin_active(),
186 this->end()),
187 [](const auto &i) { return i.is_locally_owned(); });
188 else
189 number_cache.n_locally_owned_active_cells = 0;
190
191 // Potentially cast to a 64 bit type before accumulating to avoid
192 // overflow:
193 number_cache.n_global_active_cells =
195 number_cache.n_locally_owned_active_cells),
196 this->mpi_communicator);
197
198 number_cache.n_global_levels =
199 Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
200
201 // Store MPI ranks of level ghost owners of this processor on all
202 // levels.
203 if (this->is_multilevel_hierarchy_constructed() == true)
204 {
205 number_cache.level_ghost_owners.clear();
206
207 // if there is nothing to do, then do nothing
208 if (this->n_levels() == 0)
209 return;
211 // find level ghost owners
212 for (const auto &cell : this->cell_iterators())
213 if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
214 cell->level_subdomain_id() != this->locally_owned_subdomain())
215 this->number_cache.level_ghost_owners.insert(
216 cell->level_subdomain_id());
218# ifdef DEBUG
219 // Check that level_ghost_owners is symmetric by sending a message
220 // to everyone
221 {
222 int ierr = MPI_Barrier(this->mpi_communicator);
223 AssertThrowMPI(ierr);
224
225 const int mpi_tag = Utilities::MPI::internal::Tags::
227
228 // important: preallocate to avoid (re)allocation:
229 std::vector<MPI_Request> requests(
230 this->number_cache.level_ghost_owners.size());
231 unsigned int dummy = 0;
232 unsigned int req_counter = 0;
233
234 for (const auto &it : this->number_cache.level_ghost_owners)
236 ierr = MPI_Isend(&dummy,
237 1,
238 MPI_UNSIGNED,
239 it,
240 mpi_tag,
241 this->mpi_communicator,
242 &requests[req_counter]);
243 AssertThrowMPI(ierr);
244 ++req_counter;
245 }
246
247 for (const auto &it : this->number_cache.level_ghost_owners)
248 {
249 unsigned int dummy;
250 ierr = MPI_Recv(&dummy,
251 1,
252 MPI_UNSIGNED,
253 it,
254 mpi_tag,
255 this->mpi_communicator,
256 MPI_STATUS_IGNORE);
257 AssertThrowMPI(ierr);
258 }
259
260 if (requests.size() > 0)
261 {
262 ierr = MPI_Waitall(requests.size(),
263 requests.data(),
264 MPI_STATUSES_IGNORE);
265 AssertThrowMPI(ierr);
266 }
267
268 ierr = MPI_Barrier(this->mpi_communicator);
269 AssertThrowMPI(ierr);
270 }
271# endif
272
273 Assert(this->number_cache.level_ghost_owners.size() <
274 Utilities::MPI::n_mpi_processes(this->mpi_communicator),
276 }
277
278 this->number_cache.number_of_global_coarse_cells = this->n_cells(0);
279
280 // reset global cell ids
281 this->reset_global_cell_indices();
282 }
283
284#else
285
286 template <int dim, int spacedim>
287 void
289 {
291 }
292
293#endif
295 template <int dim, int spacedim>
296 void
298 {
299 // run algorithm for locally-owned cells
301
302 // translate ReferenceCell to unsigned int (needed by
303 // Utilities::MPI::compute_set_union)
304 std::vector<unsigned int> reference_cells_ui;
305
306 for (const auto &i : this->reference_cells)
307 reference_cells_ui.push_back(static_cast<unsigned int>(i));
308
309 // create union
310 reference_cells_ui =
311 Utilities::MPI::compute_set_union(reference_cells_ui,
312 this->mpi_communicator);
313
314 // transform back and store result
315 this->reference_cells.clear();
316 for (const auto &i : reference_cells_ui)
317 this->reference_cells.emplace_back(
319 }
320
321
322
323 template <int dim, int spacedim>
326 {
327 return my_subdomain;
328 }
329
330
331
332 template <int dim, int spacedim>
333 const std::set<types::subdomain_id> &
335 {
336 return number_cache.ghost_owners;
337 }
338
339
340
341 template <int dim, int spacedim>
342 const std::set<types::subdomain_id> &
344 {
345 return number_cache.level_ghost_owners;
346 }
347
348
349
350 template <int dim, int spacedim>
351 std::vector<types::boundary_id>
353 {
356 this->mpi_communicator);
357 }
358
359
360
361 template <int dim, int spacedim>
362 std::vector<types::manifold_id>
364 {
367 this->mpi_communicator);
369
370
371
372 template <int dim, int spacedim>
373 void
375 {
376#ifndef DEAL_II_WITH_MPI
378#else
379 if (const auto pst =
381 this))
382 if (pst->with_artificial_cells() == false)
384 // Specialization for parallel::shared::Triangulation without
385 // artificial cells. The code below only works if a halo of a single
386 // ghost cells is needed.
387
388 std::vector<unsigned int> cell_counter(n_subdomains + 1);
390 // count number of cells of each process
391 for (const auto &cell : this->active_cell_iterators())
392 cell_counter[cell->subdomain_id() + 1]++;
393
394 // take prefix sum to obtain offset of each process
395 for (unsigned int i = 0; i < n_subdomains; ++i)
396 cell_counter[i + 1] += cell_counter[i];
397
398 AssertDimension(cell_counter.back(), this->n_active_cells());
399
400 // create partitioners
401 IndexSet is_local(this->n_active_cells());
402 is_local.add_range(cell_counter[my_subdomain],
403 cell_counter[my_subdomain + 1]);
404 number_cache.active_cell_index_partitioner =
405 std::make_shared<const Utilities::MPI::Partitioner>(
406 is_local,
407 complete_index_set(this->n_active_cells()),
408 this->mpi_communicator);
409
410 // set global active cell indices and increment process-local counters
411 for (const auto &cell : this->active_cell_iterators())
412 cell->set_global_active_cell_index(
413 cell_counter[cell->subdomain_id()]++);
414
415 Assert(this->is_multilevel_hierarchy_constructed() == false,
417
418 return;
419 }
420
421 // 1) determine number of active locally-owned cells
422 const types::global_cell_index n_locally_owned_cells =
423 this->n_locally_owned_active_cells();
424
425 // 2) determine the offset of each process
427
428 const int ierr = MPI_Exscan(
429 &n_locally_owned_cells,
430 &cell_index,
431 1,
432 Utilities::MPI::mpi_type_id_for_type<decltype(n_locally_owned_cells)>,
433 MPI_SUM,
434 this->mpi_communicator);
435 AssertThrowMPI(ierr);
436
437 // 3) give global indices to locally-owned cells and mark all other cells as
438 // invalid
439 std::pair<types::global_cell_index, types::global_cell_index> my_range;
440 my_range.first = cell_index;
441
442 for (const auto &cell : this->active_cell_iterators())
443 if (cell->is_locally_owned())
444 cell->set_global_active_cell_index(cell_index++);
445 else
446 cell->set_global_active_cell_index(numbers::invalid_dof_index);
448 my_range.second = cell_index;
449
450 // 4) determine the global indices of ghost cells
451 std::vector<types::global_dof_index> is_ghost_vector;
452 GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
453 *this,
454 [](const auto &cell) { return cell->global_active_cell_index(); },
455 [&is_ghost_vector](const auto &cell, const auto &id) {
456 cell->set_global_active_cell_index(id);
457 is_ghost_vector.push_back(id);
458 });
459
460 // 5) set up new partitioner
461 IndexSet is_local(this->n_global_active_cells());
462 is_local.add_range(my_range.first, my_range.second);
463
464 std::sort(is_ghost_vector.begin(), is_ghost_vector.end());
465 IndexSet is_ghost(this->n_global_active_cells());
466 is_ghost.add_indices(is_ghost_vector.begin(), is_ghost_vector.end());
467
468 number_cache.active_cell_index_partitioner =
469 std::make_shared<const Utilities::MPI::Partitioner>(
470 is_local, is_ghost, this->mpi_communicator);
471
472 // 6) proceed with multigrid levels if requested
473 if (this->is_multilevel_hierarchy_constructed() == true)
474 {
475 // 1) determine number of locally-owned cells on levels
476 std::vector<types::global_cell_index> n_cells_level(
477 this->n_global_levels(), 0);
478
479 for (auto cell : this->cell_iterators())
480 if (cell->level_subdomain_id() == this->locally_owned_subdomain())
481 n_cells_level[cell->level()]++;
482
483 // 2) determine the offset of each process
484 std::vector<types::global_cell_index> cell_index(
485 this->n_global_levels(), 0);
486
487 int ierr = MPI_Exscan(
488 n_cells_level.data(),
489 cell_index.data(),
490 this->n_global_levels(),
491 Utilities::MPI::mpi_type_id_for_type<decltype(*n_cells_level.data())>,
492 MPI_SUM,
493 this->mpi_communicator);
494 AssertThrowMPI(ierr);
495
496 // 3) determine global number of "active" cells on each level
497 Utilities::MPI::sum(n_cells_level,
498 this->mpi_communicator,
499 n_cells_level);
500
501 // 4) give global indices to locally-owned cells on level and mark
502 // all other cells as invalid
503 std::vector<
504 std::pair<types::global_cell_index, types::global_cell_index>>
505 my_ranges(this->n_global_levels());
506 for (unsigned int l = 0; l < this->n_global_levels(); ++l)
507 my_ranges[l].first = cell_index[l];
508
509 for (auto cell : this->cell_iterators())
510 if (cell->level_subdomain_id() == this->locally_owned_subdomain())
511 cell->set_global_level_cell_index(cell_index[cell->level()]++);
512 else
513 cell->set_global_level_cell_index(numbers::invalid_dof_index);
514
515 for (unsigned int l = 0; l < this->n_global_levels(); ++l)
516 my_ranges[l].second = cell_index[l];
517
518 // 5) update the numbers of ghost level cells
519 std::vector<std::vector<types::global_dof_index>> is_ghost_vectors(
520 this->n_global_levels());
524 *this,
525 [](const auto &cell) { return cell->global_level_cell_index(); },
526 [&is_ghost_vectors](const auto &cell, const auto &id) {
527 cell->set_global_level_cell_index(id);
528 is_ghost_vectors[cell->level()].push_back(id);
529 });
530
531 number_cache.level_cell_index_partitioners.resize(
532 this->n_global_levels());
533
534 // 6) set up cell partitioners for each level
535 for (unsigned int l = 0; l < this->n_global_levels(); ++l)
536 {
537 IndexSet is_local(n_cells_level[l]);
538 is_local.add_range(my_ranges[l].first, my_ranges[l].second);
539
540 IndexSet is_ghost(n_cells_level[l]);
541 std::sort(is_ghost_vectors[l].begin(), is_ghost_vectors[l].end());
542 is_ghost.add_indices(is_ghost_vectors[l].begin(),
543 is_ghost_vectors[l].end());
544
545 number_cache.level_cell_index_partitioners[l] =
546 std::make_shared<const Utilities::MPI::Partitioner>(
547 is_local, is_ghost, this->mpi_communicator);
548 }
549 }
550
551#endif
552 }
553
554
555
556 template <int dim, int spacedim>
557 void
559 const std::vector<bool> &vertex_locally_moved)
560 {
561 AssertDimension(vertex_locally_moved.size(), this->n_vertices());
562#ifdef DEBUG
563 {
564 const std::vector<bool> locally_owned_vertices =
566 for (unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
567 Assert((vertex_locally_moved[i] == false) ||
568 (locally_owned_vertices[i] == true),
569 ExcMessage("The vertex_locally_moved argument must not "
570 "contain vertices that are not locally owned"));
571 }
572#endif
573
574 Point<spacedim> invalid_point;
575 for (unsigned int d = 0; d < spacedim; ++d)
576 invalid_point[d] = std::numeric_limits<double>::quiet_NaN();
577
578 const auto pack = [&](const auto &cell) {
579 std::vector<Point<spacedim>> vertices(cell->n_vertices());
580
581 for (const auto v : cell->vertex_indices())
582 if (vertex_locally_moved[cell->vertex_index(v)])
583 vertices[v] = cell->vertex(v);
584 else
585 vertices[v] = invalid_point;
586
587 return vertices;
588 };
589
590 const auto unpack = [&](const auto &cell, const auto &vertices) {
591 for (const auto v : cell->vertex_indices())
592 if (std::isnan(vertices[v][0]) == false)
593 cell->vertex(v) = vertices[v];
594 };
595
596 if (this->is_multilevel_hierarchy_constructed())
598 std::vector<Point<spacedim>>>(*this, pack, unpack);
599 else
600 GridTools::exchange_cell_data_to_ghosts<std::vector<Point<spacedim>>>(
601 *this, pack, unpack);
602 }
603
604
605
606 template <int dim, int spacedim>
607 const std::weak_ptr<const Utilities::MPI::Partitioner>
609 {
610 return number_cache.active_cell_index_partitioner;
611 }
612
613
614
615 template <int dim, int spacedim>
616 const std::weak_ptr<const Utilities::MPI::Partitioner>
618 const unsigned int level) const
619 {
620 Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented());
621 AssertIndexRange(level, this->n_global_levels());
623 return number_cache.level_cell_index_partitioners[level];
624 }
625
626
627
628 template <int dim, int spacedim>
631 {
632 return number_cache.number_of_global_coarse_cells;
633 }
634
635
636
637 template <int dim, int spacedim>
639 const MPI_Comm &mpi_communicator,
640 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
641 smooth_grid,
642 const bool check_for_distorted_cells)
643 : ::parallel::TriangulationBase<dim, spacedim>(
644 mpi_communicator,
645 smooth_grid,
646 check_for_distorted_cells)
647 , cell_attached_data({0, 0, {}, {}})
648 , data_transfer(mpi_communicator)
649 {}
650
651
652
653 template <int dim, int spacedim>
654 void
656 {
657 cell_attached_data = {0, 0, {}, {}};
658 data_transfer.clear();
659
661 }
662
663
664
665 template <int dim, int spacedim>
666 void
668 const unsigned int global_first_cell,
669 const unsigned int global_num_cells,
670 const std::string &filename) const
671 {
672 // cast away constness
673 auto tria = const_cast<
675
676 if (this->cell_attached_data.n_attached_data_sets > 0)
677 {
678 // pack attached data first
679 tria->data_transfer.pack_data(
680 tria->local_cell_relations,
681 tria->cell_attached_data.pack_callbacks_fixed,
682 tria->cell_attached_data.pack_callbacks_variable);
683
684 // then store buffers in file
685 tria->data_transfer.save(global_first_cell, global_num_cells, filename);
686
687 // and release the memory afterwards
688 tria->data_transfer.clear();
689 }
690
691 // clear all of the callback data, as explained in the documentation of
692 // register_data_attach()
694 tria->cell_attached_data.n_attached_data_sets = 0;
695 tria->cell_attached_data.pack_callbacks_fixed.clear();
696 tria->cell_attached_data.pack_callbacks_variable.clear();
697 }
698 }
699
700
701
702 template <int dim, int spacedim>
703 bool
705 {
706 if (this->n_global_levels() <= 1)
707 return false; // can not have hanging nodes without refined cells
708
709 // if there are any active cells with level less than n_global_levels()-1,
710 // then there is obviously also one with level n_global_levels()-1, and
711 // consequently there must be a hanging node somewhere.
712 //
713 // The problem is that we cannot just ask for the first active cell, but
714 // instead need to filter over locally owned cells.
715 const bool have_coarser_cell =
716 std::any_of(this->begin_active(this->n_global_levels() - 2),
717 this->end_active(this->n_global_levels() - 2),
718 [](const CellAccessor<dim, spacedim> &cell) {
719 return cell.is_locally_owned();
720 });
721
722 // return true if at least one process has a coarser cell
723 return Utilities::MPI::max(have_coarser_cell ? 1 : 0,
724 this->mpi_communicator) != 0;
725 }
726
727
728
729 template <int dim, int spacedim>
730 void
732 const unsigned int global_first_cell,
733 const unsigned int global_num_cells,
734 const unsigned int local_num_cells,
735 const std::string &filename,
736 const unsigned int n_attached_deserialize_fixed,
737 const unsigned int n_attached_deserialize_variable)
738 {
739 // load saved data, if any was stored
740 if (this->cell_attached_data.n_attached_deserialize > 0)
741 {
742 this->data_transfer.load(global_first_cell,
743 global_num_cells,
744 local_num_cells,
745 filename,
746 n_attached_deserialize_fixed,
747 n_attached_deserialize_variable);
748
749 this->data_transfer.unpack_cell_status(this->local_cell_relations);
750
751 // the CellStatus of all stored cells should always be CELL_PERSIST.
752 for (const auto &cell_rel : this->local_cell_relations)
753 {
754 (void)cell_rel;
755 Assert(
756 (cell_rel.second == // cell_status
758 spacedim>::CELL_PERSIST),
760 }
761 }
762 }
763
764
765
766 template <int dim, int spacedim>
767 unsigned int
769 const std::function<std::vector<char>(const cell_iterator &,
770 const CellStatus)> &pack_callback,
771 const bool returns_variable_size_data)
772 {
773 unsigned int handle = numbers::invalid_unsigned_int;
774
775 // Add new callback function to the corresponding register.
776 // Encode handles according to returns_variable_size_data.
777 if (returns_variable_size_data)
778 {
779 handle = 2 * cell_attached_data.pack_callbacks_variable.size();
780 cell_attached_data.pack_callbacks_variable.push_back(pack_callback);
781 }
782 else
784 handle = 2 * cell_attached_data.pack_callbacks_fixed.size() + 1;
785 cell_attached_data.pack_callbacks_fixed.push_back(pack_callback);
786 }
787
788 // Increase overall counter.
789 ++cell_attached_data.n_attached_data_sets;
790
791 return handle;
792 }
793
794
796 template <int dim, int spacedim>
797 void
799 const unsigned int handle,
800 const std::function<
801 void(const cell_iterator &,
802 const CellStatus,
803 const boost::iterator_range<std::vector<char>::const_iterator> &)>
804 &unpack_callback)
805 {
806 // perform unpacking
807 data_transfer.unpack_data(local_cell_relations, handle, unpack_callback);
808
809 // decrease counters
810 --cell_attached_data.n_attached_data_sets;
811 if (cell_attached_data.n_attached_deserialize > 0)
812 --cell_attached_data.n_attached_deserialize;
813
814 // important: only remove data if we are not in the deserialization
815 // process. There, each SolutionTransfer registers and unpacks before
816 // the next one does this, so n_attached_data_sets is only 1 here. This
817 // would destroy the saved data before the second SolutionTransfer can
818 // get it. This created a bug that is documented in
819 // tests/mpi/p4est_save_03 with more than one SolutionTransfer.
820
821 if (cell_attached_data.n_attached_data_sets == 0 &&
822 cell_attached_data.n_attached_deserialize == 0)
823 {
824 // everybody got their data, time for cleanup!
825 cell_attached_data.pack_callbacks_fixed.clear();
826 cell_attached_data.pack_callbacks_variable.clear();
827 data_transfer.clear();
828
829 // reset all cell_status entries after coarsening/refinement
830 for (auto &cell_rel : local_cell_relations)
831 cell_rel.second =
833 }
834 }
835
836
837
838 /* ------------------ class DataTransfer<dim,spacedim> ----------------- */
839
840
841 template <int dim, int spacedim>
843 const MPI_Comm &mpi_communicator)
844 : variable_size_data_stored(false)
845 , mpi_communicator(mpi_communicator)
846 {}
847
848
849
850 template <int dim, int spacedim>
851 void
853 const std::vector<cell_relation_t> &cell_relations,
854 const std::vector<typename CellAttachedData::pack_callback_t>
855 &pack_callbacks_fixed,
856 const std::vector<typename CellAttachedData::pack_callback_t>
857 &pack_callbacks_variable)
858 {
859 Assert(src_data_fixed.size() == 0,
860 ExcMessage("Previously packed data has not been released yet!"));
861 Assert(src_sizes_variable.size() == 0, ExcInternalError());
862
863 const unsigned int n_callbacks_fixed = pack_callbacks_fixed.size();
864 const unsigned int n_callbacks_variable = pack_callbacks_variable.size();
865
866 // Store information that we packed variable size data in
867 // a member variable for later.
868 variable_size_data_stored = (n_callbacks_variable > 0);
869
870 // If variable transfer is scheduled, we will store the data size that
871 // each variable size callback function writes in this auxiliary
872 // container. The information will be stored by each cell in this vector
873 // temporarily.
874 std::vector<unsigned int> cell_sizes_variable_cumulative(
875 n_callbacks_variable);
876
877 // Prepare the buffer structure, in which each callback function will
878 // store its data for each active cell.
879 // The outmost shell in this container construct corresponds to the
880 // data packed per cell. The next layer resembles the data that
881 // each callback function packs on the corresponding cell. These
882 // buffers are chains of chars stored in an std::vector<char>.
883 // A visualisation of the data structure:
884 /* clang-format off */
885 // | cell_1 | | cell_2 | ...
886 // || callback_1 || callback_2 |...| || callback_1 || callback_2 |...| ...
887 // |||char|char|...|||char|char|...|...| |||char|char|...|||char|char|...|...| ...
888 /* clang-format on */
889 std::vector<std::vector<std::vector<char>>> packed_fixed_size_data(
890 cell_relations.size());
891 std::vector<std::vector<std::vector<char>>> packed_variable_size_data(
892 variable_size_data_stored ? cell_relations.size() : 0);
893
894 //
895 // --------- Pack data for fixed and variable size transfer ---------
896 //
897 // Iterate over all cells, call all callback functions on each cell,
898 // and store their data in the corresponding buffer scope.
899 {
900 auto cell_rel_it = cell_relations.cbegin();
901 auto data_cell_fixed_it = packed_fixed_size_data.begin();
902 auto data_cell_variable_it = packed_variable_size_data.begin();
903 for (; cell_rel_it != cell_relations.cend(); ++cell_rel_it)
904 {
905 const auto &dealii_cell = cell_rel_it->first;
906 const auto &cell_status = cell_rel_it->second;
907
908 // Assertions about the tree structure.
909 switch (cell_status)
910 {
915 // double check the condition that we will only ever attach
916 // data to active cells when we get here
917 Assert(dealii_cell->is_active(), ExcInternalError());
918 break;
919
922 // double check the condition that we will only ever attach
923 // data to cells with children when we get here. however, we
924 // can only tolerate one level of coarsening at a time, so
925 // check that the children are all active
926 Assert(dealii_cell->is_active() == false, ExcInternalError());
927 for (unsigned int c = 0;
928 c < GeometryInfo<dim>::max_children_per_cell;
929 ++c)
930 Assert(dealii_cell->child(c)->is_active(),
932 break;
933
936 // do nothing on invalid cells
937 break;
938
939 default:
940 Assert(false, ExcInternalError());
941 break;
942 }
943
944 // Reserve memory corresponding to the number of callback
945 // functions that will be called.
946 // If variable size transfer is scheduled, we need to leave
947 // room for an array that holds information about how many
948 // bytes each of the variable size callback functions will
949 // write.
950 // On cells flagged with CELL_INVALID, only its CellStatus
951 // will be stored.
952 const unsigned int n_fixed_size_data_sets_on_cell =
953 1 +
954 ((cell_status ==
956 spacedim>::CELL_INVALID) ?
957 0 :
958 ((variable_size_data_stored ? 1 : 0) + n_callbacks_fixed));
959 data_cell_fixed_it->resize(n_fixed_size_data_sets_on_cell);
960
961 // We continue with packing all data on this specific cell.
962 auto data_fixed_it = data_cell_fixed_it->begin();
963
964 // First, we pack the CellStatus information.
965 // to get consistent data sizes on each cell for the fixed size
966 // transfer, we won't allow compression
967 *data_fixed_it =
968 Utilities::pack(cell_status, /*allow_compression=*/false);
969 ++data_fixed_it;
970
971 // Proceed with all registered callback functions.
972 // Skip cells with the CELL_INVALID flag.
973 if (cell_status !=
975 spacedim>::CELL_INVALID)
976 {
977 // Pack fixed size data.
978 for (auto callback_it = pack_callbacks_fixed.cbegin();
979 callback_it != pack_callbacks_fixed.cend();
980 ++callback_it, ++data_fixed_it)
981 {
982 *data_fixed_it = (*callback_it)(dealii_cell, cell_status);
983 }
984
985 // Pack variable size data.
986 // If we store variable size data, we need to transfer
987 // the sizes of each corresponding callback function
988 // via fixed size transfer as well.
989 if (variable_size_data_stored)
990 {
991 const unsigned int n_variable_size_data_sets_on_cell =
992 ((cell_status ==
995 0 :
996 n_callbacks_variable);
997 data_cell_variable_it->resize(
998 n_variable_size_data_sets_on_cell);
999
1000 auto callback_it = pack_callbacks_variable.cbegin();
1001 auto data_variable_it = data_cell_variable_it->begin();
1002 auto sizes_variable_it =
1003 cell_sizes_variable_cumulative.begin();
1004 for (; callback_it != pack_callbacks_variable.cend();
1005 ++callback_it, ++data_variable_it, ++sizes_variable_it)
1006 {
1007 *data_variable_it =
1008 (*callback_it)(dealii_cell, cell_status);
1009
1010 // Store data sizes for each callback function first.
1011 // Make it cumulative below.
1012 *sizes_variable_it = data_variable_it->size();
1013 }
1014
1015 // Turn size vector into its cumulative representation.
1016 std::partial_sum(cell_sizes_variable_cumulative.begin(),
1017 cell_sizes_variable_cumulative.end(),
1018 cell_sizes_variable_cumulative.begin());
1019
1020 // Serialize cumulative variable size vector
1021 // value-by-value. This way we can circumvent the overhead
1022 // of storing the container object as a whole, since we
1023 // know its size by the number of registered callback
1024 // functions.
1025 data_fixed_it->resize(n_callbacks_variable *
1026 sizeof(unsigned int));
1027 for (unsigned int i = 0; i < n_callbacks_variable; ++i)
1028 std::memcpy(&(data_fixed_it->at(i * sizeof(unsigned int))),
1029 &(cell_sizes_variable_cumulative.at(i)),
1030 sizeof(unsigned int));
1031
1032 ++data_fixed_it;
1033 }
1034
1035 // Double check that we packed everything we wanted
1036 // in the fixed size buffers.
1037 Assert(data_fixed_it == data_cell_fixed_it->end(),
1039 }
1040
1041 ++data_cell_fixed_it;
1042
1043 // Increment the variable size data iterator
1044 // only if we actually pack this kind of data
1045 // to avoid getting out of bounds.
1046 if (variable_size_data_stored)
1047 ++data_cell_variable_it;
1048 } // loop over cell_relations
1049 }
1050
1051 //
1052 // ----------- Gather data sizes for fixed size transfer ------------
1053 //
1054 // Generate a vector which stores the sizes of each callback function,
1055 // including the packed CellStatus transfer.
1056 // Find the very first cell that we wrote to with all callback
1057 // functions (i.e. a cell that was not flagged with CELL_INVALID)
1058 // and store the sizes of each buffer.
1059 //
1060 // To deal with the case that at least one of the processors does not
1061 // own any cell at all, we will exchange the information about the data
1062 // sizes among them later. The code in between is still well-defined,
1063 // since the following loops will be skipped.
1064 std::vector<unsigned int> local_sizes_fixed(
1065 1 + n_callbacks_fixed + (variable_size_data_stored ? 1 : 0));
1066 for (const auto &data_cell : packed_fixed_size_data)
1067 {
1068 if (data_cell.size() == local_sizes_fixed.size())
1069 {
1070 auto sizes_fixed_it = local_sizes_fixed.begin();
1071 auto data_fixed_it = data_cell.cbegin();
1072 for (; data_fixed_it != data_cell.cend();
1073 ++data_fixed_it, ++sizes_fixed_it)
1074 {
1075 *sizes_fixed_it = data_fixed_it->size();
1076 }
1077
1078 break;
1079 }
1080 }
1081
1082 // Check if all cells have valid sizes.
1083 for (auto data_cell_fixed_it = packed_fixed_size_data.cbegin();
1084 data_cell_fixed_it != packed_fixed_size_data.cend();
1085 ++data_cell_fixed_it)
1086 {
1087 Assert((data_cell_fixed_it->size() == 1) ||
1088 (data_cell_fixed_it->size() == local_sizes_fixed.size()),
1090 }
1091
1092 // Share information about the packed data sizes
1093 // of all callback functions across all processors, in case one
1094 // of them does not own any cells at all.
1095 std::vector<unsigned int> global_sizes_fixed(local_sizes_fixed.size());
1096 Utilities::MPI::max(local_sizes_fixed,
1097 this->mpi_communicator,
1098 global_sizes_fixed);
1099
1100 // Construct cumulative sizes, since this is the only information
1101 // we need from now on.
1102 sizes_fixed_cumulative.resize(global_sizes_fixed.size());
1103 std::partial_sum(global_sizes_fixed.begin(),
1104 global_sizes_fixed.end(),
1105 sizes_fixed_cumulative.begin());
1106
1107 //
1108 // ---------- Gather data sizes for variable size transfer ----------
1109 //
1110 if (variable_size_data_stored)
1111 {
1112 src_sizes_variable.reserve(packed_variable_size_data.size());
1113 for (const auto &data_cell : packed_variable_size_data)
1114 {
1115 int variable_data_size_on_cell = 0;
1116
1117 for (const auto &data : data_cell)
1118 variable_data_size_on_cell += data.size();
1119
1120 src_sizes_variable.push_back(variable_data_size_on_cell);
1121 }
1122 }
1123
1124 //
1125 // ------------------------ Build buffers ---------------------------
1126 //
1127 const unsigned int expected_size_fixed =
1128 cell_relations.size() * sizes_fixed_cumulative.back();
1129 const unsigned int expected_size_variable =
1130 std::accumulate(src_sizes_variable.begin(),
1131 src_sizes_variable.end(),
1132 std::vector<int>::size_type(0));
1133
1134 // Move every piece of packed fixed size data into the consecutive
1135 // buffer.
1136 src_data_fixed.reserve(expected_size_fixed);
1137 for (const auto &data_cell_fixed : packed_fixed_size_data)
1138 {
1139 // Move every fraction of packed data into the buffer
1140 // reserved for this particular cell.
1141 for (const auto &data_fixed : data_cell_fixed)
1142 std::move(data_fixed.begin(),
1143 data_fixed.end(),
1144 std::back_inserter(src_data_fixed));
1145
1146 // If we only packed the CellStatus information
1147 // (i.e. encountered a cell flagged CELL_INVALID),
1148 // fill the remaining space with invalid entries.
1149 // We can skip this if there is nothing else to pack.
1150 if ((data_cell_fixed.size() == 1) &&
1151 (sizes_fixed_cumulative.size() > 1))
1152 {
1153 const std::size_t bytes_skipped =
1154 sizes_fixed_cumulative.back() - sizes_fixed_cumulative.front();
1155
1156 src_data_fixed.insert(src_data_fixed.end(),
1157 bytes_skipped,
1158 static_cast<char>(-1)); // invalid_char
1159 }
1160 }
1161
1162 // Move every piece of packed variable size data into the consecutive
1163 // buffer.
1164 if (variable_size_data_stored)
1165 {
1166 src_data_variable.reserve(expected_size_variable);
1167 for (const auto &data_cell : packed_variable_size_data)
1168 {
1169 // Move every fraction of packed data into the buffer
1170 // reserved for this particular cell.
1171 for (const auto &data : data_cell)
1172 std::move(data.begin(),
1173 data.end(),
1174 std::back_inserter(src_data_variable));
1175 }
1176 }
1177
1178 // Double check that we packed everything correctly.
1179 Assert(src_data_fixed.size() == expected_size_fixed, ExcInternalError());
1180 Assert(src_data_variable.size() == expected_size_variable,
1182 }
1183
1184
1185
1186 template <int dim, int spacedim>
1187 void
1189 std::vector<cell_relation_t> &cell_relations) const
1190 {
1191 Assert(sizes_fixed_cumulative.size() > 0,
1192 ExcMessage("No data has been packed!"));
1193 if (cell_relations.size() > 0)
1194 {
1195 Assert(dest_data_fixed.size() > 0,
1196 ExcMessage("No data has been received!"));
1197 }
1198
1199 // Size of CellStatus object that will be unpacked on each cell.
1200 const unsigned int size = sizes_fixed_cumulative.front();
1201
1202 // Iterate over all cells and overwrite the CellStatus
1203 // information from the transferred data.
1204 // Proceed buffer iterator position to next cell after
1205 // each iteration.
1206 auto cell_rel_it = cell_relations.begin();
1207 auto dest_fixed_it = dest_data_fixed.cbegin();
1208 for (; cell_rel_it != cell_relations.end();
1209 ++cell_rel_it, dest_fixed_it += sizes_fixed_cumulative.back())
1210 {
1211 cell_rel_it->second = // cell_status
1213 dim,
1214 spacedim>::CellStatus>(dest_fixed_it,
1215 dest_fixed_it + size,
1216 /*allow_compression=*/false);
1217 }
1218 }
1219
1220
1221
1222 template <int dim, int spacedim>
1223 void
1225 const std::vector<cell_relation_t> &cell_relations,
1226 const unsigned int handle,
1227 const std::function<
1228 void(const typename ::Triangulation<dim, spacedim>::cell_iterator &,
1229 const typename ::Triangulation<dim, spacedim>::CellStatus &,
1230 const boost::iterator_range<std::vector<char>::const_iterator> &)>
1231 &unpack_callback) const
1232 {
1233 // We decode the handle returned by register_data_attach() back into
1234 // a format we can use. All even handles belong to those callback
1235 // functions which write/read variable size data, all odd handles
1236 // interact with fixed size buffers.
1237 const bool callback_variable_transfer = (handle % 2 == 0);
1238 const unsigned int callback_index = handle / 2;
1239
1240 // Cells will always receive fixed size data (i.e., CellStatus
1241 // information), but not necessarily variable size data (e.g., with a
1242 // ParticleHandler a cell might not contain any particle at all).
1243 // Thus it is sufficient to check if fixed size data has been received.
1244 Assert(sizes_fixed_cumulative.size() > 0,
1245 ExcMessage("No data has been packed!"));
1246 if (cell_relations.size() > 0)
1247 {
1248 Assert(dest_data_fixed.size() > 0,
1249 ExcMessage("No data has been received!"));
1250 }
1251
1252 std::vector<char>::const_iterator dest_data_it;
1253 std::vector<char>::const_iterator dest_sizes_cell_it;
1254
1255 // Depending on whether our callback function unpacks fixed or
1256 // variable size data, we have to pursue different approaches
1257 // to localize the correct fraction of the buffer from which
1258 // we are allowed to read.
1259 unsigned int offset = numbers::invalid_unsigned_int;
1260 unsigned int size = numbers::invalid_unsigned_int;
1261 unsigned int data_increment = numbers::invalid_unsigned_int;
1262
1263 if (callback_variable_transfer)
1264 {
1265 // For the variable size data, we need to extract the
1266 // data size from the fixed size buffer on each cell.
1267 //
1268 // We packed this information last, so the last packed
1269 // object in the fixed size buffer corresponds to the
1270 // variable data sizes.
1271 //
1272 // The last entry of sizes_fixed_cumulative corresponds
1273 // to the size of all fixed size data packed on the cell.
1274 // To get the offset for the last packed object, we need
1275 // to get the next-to-last entry.
1276 const unsigned int offset_variable_data_sizes =
1277 sizes_fixed_cumulative[sizes_fixed_cumulative.size() - 2];
1278
1279 // This iterator points to the data size that the
1280 // callback_function packed for each specific cell.
1281 // Adjust buffer iterator to the offset of the callback
1282 // function so that we only have to advance its position
1283 // to the next cell after each iteration.
1284 dest_sizes_cell_it = dest_data_fixed.cbegin() +
1285 offset_variable_data_sizes +
1286 callback_index * sizeof(unsigned int);
1287
1288 // Let the data iterator point to the correct buffer.
1289 dest_data_it = dest_data_variable.cbegin();
1290 }
1291 else
1292 {
1293 // For the fixed size data, we can get the information about
1294 // the buffer location on each cell directly from the
1295 // sizes_fixed_cumulative vector.
1296 offset = sizes_fixed_cumulative[callback_index];
1297 size = sizes_fixed_cumulative[callback_index + 1] - offset;
1298 data_increment = sizes_fixed_cumulative.back();
1299
1300 // Let the data iterator point to the correct buffer.
1301 // Adjust buffer iterator to the offset of the callback
1302 // function so that we only have to advance its position
1303 // to the next cell after each iteration.
1304 if (cell_relations.begin() != cell_relations.end())
1305 dest_data_it = dest_data_fixed.cbegin() + offset;
1306 }
1307
1308 // Iterate over all cells and unpack the transferred data.
1309 auto cell_rel_it = cell_relations.begin();
1310 auto dest_sizes_it = dest_sizes_variable.cbegin();
1311 for (; cell_rel_it != cell_relations.end(); ++cell_rel_it)
1312 {
1313 const auto &dealii_cell = cell_rel_it->first;
1314 const auto &cell_status = cell_rel_it->second;
1315
1316 if (callback_variable_transfer)
1317 {
1318 // Update the increment according to the whole data size
1319 // of the current cell.
1320 data_increment = *dest_sizes_it;
1321
1322 if (cell_status !=
1324 spacedim>::CELL_INVALID)
1325 {
1326 // Extract the corresponding values for offset and size from
1327 // the cumulative sizes array stored in the fixed size
1328 // buffer.
1329 if (callback_index == 0)
1330 offset = 0;
1331 else
1332 std::memcpy(&offset,
1333 &(*(dest_sizes_cell_it - sizeof(unsigned int))),
1334 sizeof(unsigned int));
1335
1336 std::memcpy(&size,
1337 &(*dest_sizes_cell_it),
1338 sizeof(unsigned int));
1339
1340 size -= offset;
1341
1342 // Move the data iterator to the corresponding position
1343 // of the callback function and adjust the increment
1344 // accordingly.
1345 dest_data_it += offset;
1346 data_increment -= offset;
1347 }
1348
1349 // Advance data size iterators to the next cell, avoid iterating
1350 // past the end of dest_sizes_cell_it
1351 if (cell_rel_it != cell_relations.end() - 1)
1352 dest_sizes_cell_it += sizes_fixed_cumulative.back();
1353 ++dest_sizes_it;
1354 }
1355
1356 switch (cell_status)
1357 {
1359 spacedim>::CELL_PERSIST:
1361 spacedim>::CELL_COARSEN:
1362 unpack_callback(dealii_cell,
1363 cell_status,
1364 boost::make_iterator_range(dest_data_it,
1365 dest_data_it + size));
1366 break;
1367
1369 spacedim>::CELL_REFINE:
1370 unpack_callback(dealii_cell->parent(),
1371 cell_status,
1372 boost::make_iterator_range(dest_data_it,
1373 dest_data_it + size));
1374 break;
1375
1377 spacedim>::CELL_INVALID:
1378 // Skip this cell.
1379 break;
1380
1381 default:
1382 Assert(false, ExcInternalError());
1383 break;
1384 }
1385
1386 if (cell_rel_it != cell_relations.end() - 1)
1387 dest_data_it += data_increment;
1388 }
1389 }
1390
1391
1392
1393 template <int dim, int spacedim>
1394 void
1396 const unsigned int global_first_cell,
1397 const unsigned int global_num_cells,
1398 const std::string &filename) const
1399 {
1400#ifdef DEAL_II_WITH_MPI
1401 // Large fractions of this function have been copied from
1402 // DataOutInterface::write_vtu_in_parallel.
1403 // TODO: Write general MPIIO interface.
1404
1405 Assert(sizes_fixed_cumulative.size() > 0,
1406 ExcMessage("No data has been packed!"));
1407
1409
1410 const unsigned int bytes_per_cell = sizes_fixed_cumulative.back();
1411
1412 //
1413 // ---------- Fixed size data ----------
1414 //
1415 {
1416 const std::string fname_fixed = std::string(filename) + "_fixed.data";
1417
1418 MPI_Info info;
1419 int ierr = MPI_Info_create(&info);
1420 AssertThrowMPI(ierr);
1421
1422 MPI_File fh;
1423 ierr = MPI_File_open(mpi_communicator,
1424 fname_fixed.c_str(),
1425 MPI_MODE_CREATE | MPI_MODE_WRONLY,
1426 info,
1427 &fh);
1428 AssertThrowMPI(ierr);
1429
1430 ierr = MPI_File_set_size(fh, 0); // delete the file contents
1431 AssertThrowMPI(ierr);
1432 // this barrier is necessary, because otherwise others might already
1433 // write while one core is still setting the size to zero.
1434 ierr = MPI_Barrier(mpi_communicator);
1435 AssertThrowMPI(ierr);
1436 ierr = MPI_Info_free(&info);
1437 AssertThrowMPI(ierr);
1438 // ------------------
1439
1440 // Write cumulative sizes to file.
1441 // Since each processor owns the same information about the data
1442 // sizes, it is sufficient to let only the first processor perform
1443 // this task.
1444 if (myrank == 0)
1445 {
1447 fh,
1448 0,
1449 sizes_fixed_cumulative.data(),
1450 sizes_fixed_cumulative.size(),
1451 MPI_UNSIGNED,
1452 MPI_STATUS_IGNORE);
1453 AssertThrowMPI(ierr);
1454 }
1455
1456 // Write packed data to file simultaneously.
1457 const MPI_Offset size_header =
1458 sizes_fixed_cumulative.size() * sizeof(unsigned int);
1459
1460 // Make sure we do the following computation in 64bit integers to be
1461 // able to handle 4GB+ files:
1462 const MPI_Offset my_global_file_position =
1463 size_header +
1464 static_cast<MPI_Offset>(global_first_cell) * bytes_per_cell;
1465
1466 ierr =
1468 my_global_file_position,
1469 src_data_fixed.data(),
1470 src_data_fixed.size(),
1471 MPI_BYTE,
1472 MPI_STATUS_IGNORE);
1473 AssertThrowMPI(ierr);
1474
1475 ierr = MPI_File_close(&fh);
1476 AssertThrowMPI(ierr);
1477 }
1478
1479
1480
1481 //
1482 // ---------- Variable size data ----------
1483 //
1484 if (variable_size_data_stored)
1485 {
1486 const std::string fname_variable =
1487 std::string(filename) + "_variable.data";
1488
1489 MPI_Info info;
1490 int ierr = MPI_Info_create(&info);
1491 AssertThrowMPI(ierr);
1492
1493 MPI_File fh;
1494 ierr = MPI_File_open(mpi_communicator,
1495 fname_variable.c_str(),
1496 MPI_MODE_CREATE | MPI_MODE_WRONLY,
1497 info,
1498 &fh);
1499 AssertThrowMPI(ierr);
1500
1501 ierr = MPI_File_set_size(fh, 0); // delete the file contents
1502 AssertThrowMPI(ierr);
1503 // this barrier is necessary, because otherwise others might already
1504 // write while one core is still setting the size to zero.
1505 ierr = MPI_Barrier(mpi_communicator);
1506 AssertThrowMPI(ierr);
1507 ierr = MPI_Info_free(&info);
1508 AssertThrowMPI(ierr);
1509
1510 // Write sizes of each cell into file simultaneously.
1511 {
1512 const MPI_Offset my_global_file_position =
1513 static_cast<MPI_Offset>(global_first_cell) * sizeof(unsigned int);
1514
1515 // It is very unlikely that a single process has more than
1516 // 2 billion cells, but we might as well check.
1517 AssertThrow(src_sizes_variable.size() <
1518 static_cast<std::size_t>(
1519 std::numeric_limits<int>::max()),
1521
1523 fh,
1524 my_global_file_position,
1525 src_sizes_variable.data(),
1526 src_sizes_variable.size(),
1527 MPI_INT,
1528 MPI_STATUS_IGNORE);
1529 AssertThrowMPI(ierr);
1530 }
1531
1532 // Gather size of data in bytes we want to store from this
1533 // processor and compute the prefix sum. We do this in 64 bit
1534 // to avoid overflow for files larger than 4GB:
1535 const std::uint64_t size_on_proc = src_data_variable.size();
1536 std::uint64_t prefix_sum = 0;
1537 ierr = MPI_Exscan(&size_on_proc,
1538 &prefix_sum,
1539 1,
1540 MPI_UINT64_T,
1541 MPI_SUM,
1543 AssertThrowMPI(ierr);
1544
1545 const MPI_Offset my_global_file_position =
1546 static_cast<MPI_Offset>(global_num_cells) * sizeof(unsigned int) +
1547 prefix_sum;
1548
1549 // Write data consecutively into file.
1550 ierr =
1552 my_global_file_position,
1553 src_data_variable.data(),
1554 src_data_variable.size(),
1555 MPI_BYTE,
1556 MPI_STATUS_IGNORE);
1557 AssertThrowMPI(ierr);
1558
1559
1560 ierr = MPI_File_close(&fh);
1561 AssertThrowMPI(ierr);
1562 }
1563#else
1564 (void)global_first_cell;
1565 (void)global_num_cells;
1566 (void)filename;
1567
1568 AssertThrow(false, ExcNeedsMPI());
1569#endif
1570 }
1571
1572
1573
1574 template <int dim, int spacedim>
1575 void
1577 const unsigned int global_first_cell,
1578 const unsigned int global_num_cells,
1579 const unsigned int local_num_cells,
1580 const std::string &filename,
1581 const unsigned int n_attached_deserialize_fixed,
1582 const unsigned int n_attached_deserialize_variable)
1583 {
1584#ifdef DEAL_II_WITH_MPI
1585 // Large fractions of this function have been copied from
1586 // DataOutInterface::write_vtu_in_parallel.
1587 // TODO: Write general MPIIO interface.
1588
1589 Assert(dest_data_fixed.size() == 0,
1590 ExcMessage("Previously loaded data has not been released yet!"));
1591
1592 variable_size_data_stored = (n_attached_deserialize_variable > 0);
1593
1594 //
1595 // ---------- Fixed size data ----------
1596 //
1597 {
1598 const std::string fname_fixed = std::string(filename) + "_fixed.data";
1599
1600 MPI_Info info;
1601 int ierr = MPI_Info_create(&info);
1602 AssertThrowMPI(ierr);
1603
1604 MPI_File fh;
1605 ierr = MPI_File_open(
1606 mpi_communicator, fname_fixed.c_str(), MPI_MODE_RDONLY, info, &fh);
1607 AssertThrowMPI(ierr);
1608
1609 ierr = MPI_Info_free(&info);
1610 AssertThrowMPI(ierr);
1611
1612 // Read cumulative sizes from file.
1613 // Since all processors need the same information about the data
1614 // sizes, let each of them retrieve it by reading from the same
1615 // location in the file.
1616 sizes_fixed_cumulative.resize(1 + n_attached_deserialize_fixed +
1617 (variable_size_data_stored ? 1 : 0));
1619 fh,
1620 0,
1621 sizes_fixed_cumulative.data(),
1622 sizes_fixed_cumulative.size(),
1623 MPI_UNSIGNED,
1624 MPI_STATUS_IGNORE);
1625 AssertThrowMPI(ierr);
1626
1627 // Allocate sufficient memory.
1628 const unsigned int bytes_per_cell = sizes_fixed_cumulative.back();
1629 dest_data_fixed.resize(static_cast<size_t>(local_num_cells) *
1630 bytes_per_cell);
1631
1632 // Read packed data from file simultaneously.
1633 const MPI_Offset size_header =
1634 sizes_fixed_cumulative.size() * sizeof(unsigned int);
1635
1636 // Make sure we do the following computation in 64bit integers to be
1637 // able to handle 4GB+ files:
1638 const MPI_Offset my_global_file_position =
1639 size_header +
1640 static_cast<MPI_Offset>(global_first_cell) * bytes_per_cell;
1641
1643 my_global_file_position,
1644 dest_data_fixed.data(),
1645 dest_data_fixed.size(),
1646 MPI_BYTE,
1647 MPI_STATUS_IGNORE);
1648 AssertThrowMPI(ierr);
1649
1650
1651 ierr = MPI_File_close(&fh);
1652 AssertThrowMPI(ierr);
1653 }
1654
1655 //
1656 // ---------- Variable size data ----------
1657 //
1658 if (variable_size_data_stored)
1659 {
1660 const std::string fname_variable =
1661 std::string(filename) + "_variable.data";
1662
1663 MPI_Info info;
1664 int ierr = MPI_Info_create(&info);
1665 AssertThrowMPI(ierr);
1666
1667 MPI_File fh;
1668 ierr = MPI_File_open(
1669 mpi_communicator, fname_variable.c_str(), MPI_MODE_RDONLY, info, &fh);
1670 AssertThrowMPI(ierr);
1671
1672 ierr = MPI_Info_free(&info);
1673 AssertThrowMPI(ierr);
1674
1675 // Read sizes of all locally owned cells.
1676 dest_sizes_variable.resize(local_num_cells);
1677
1678 const MPI_Offset my_global_file_position_sizes =
1679 static_cast<MPI_Offset>(global_first_cell) * sizeof(unsigned int);
1680
1682 fh,
1683 my_global_file_position_sizes,
1684 dest_sizes_variable.data(),
1685 dest_sizes_variable.size(),
1686 MPI_INT,
1687 MPI_STATUS_IGNORE);
1688 AssertThrowMPI(ierr);
1689
1690
1691 // Compute my data size in bytes and compute prefix sum. We do this
1692 // in 64 bit to avoid overflow for files larger than 4 GB:
1693 const std::uint64_t size_on_proc =
1694 std::accumulate(dest_sizes_variable.begin(),
1695 dest_sizes_variable.end(),
1696 0ULL);
1697
1698 std::uint64_t prefix_sum = 0;
1699 ierr = MPI_Exscan(&size_on_proc,
1700 &prefix_sum,
1701 1,
1702 MPI_UINT64_T,
1703 MPI_SUM,
1705 AssertThrowMPI(ierr);
1706
1707 const MPI_Offset my_global_file_position =
1708 static_cast<MPI_Offset>(global_num_cells) * sizeof(unsigned int) +
1709 prefix_sum;
1710
1711 dest_data_variable.resize(size_on_proc);
1712
1713 ierr =
1715 my_global_file_position,
1716 dest_data_variable.data(),
1717 dest_data_variable.size(),
1718 MPI_BYTE,
1719 MPI_STATUS_IGNORE);
1720 AssertThrowMPI(ierr);
1721
1722 ierr = MPI_File_close(&fh);
1723 AssertThrowMPI(ierr);
1724 }
1725#else
1726 (void)global_first_cell;
1727 (void)global_num_cells;
1728 (void)local_num_cells;
1729 (void)filename;
1730 (void)n_attached_deserialize_fixed;
1731 (void)n_attached_deserialize_variable;
1732
1733 AssertThrow(false, ExcNeedsMPI());
1734#endif
1735 }
1736
1737
1738
1739 template <int dim, int spacedim>
1740 void
1742 {
1743 variable_size_data_stored = false;
1744
1745 // free information about data sizes
1746 sizes_fixed_cumulative.clear();
1747 sizes_fixed_cumulative.shrink_to_fit();
1748
1749 // free fixed size transfer data
1750 src_data_fixed.clear();
1751 src_data_fixed.shrink_to_fit();
1752
1753 dest_data_fixed.clear();
1754 dest_data_fixed.shrink_to_fit();
1755
1756 // free variable size transfer data
1757 src_sizes_variable.clear();
1758 src_sizes_variable.shrink_to_fit();
1759
1760 src_data_variable.clear();
1761 src_data_variable.shrink_to_fit();
1762
1763 dest_sizes_variable.clear();
1764 dest_sizes_variable.shrink_to_fit();
1765
1766 dest_data_variable.clear();
1767 dest_data_variable.shrink_to_fit();
1768 }
1769
1770} // end namespace parallel
1771
1772
1773
1774/*-------------- Explicit Instantiations -------------------------------*/
1775#include "tria_base.inst"
1776
bool is_locally_owned() const
Definition: point.h:111
virtual void clear()
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
@ CELL_COARSEN
Definition: tria.h:2039
@ CELL_REFINE
Definition: tria.h:2035
@ CELL_INVALID
Definition: tria.h:2043
@ CELL_PERSIST
Definition: tria.h:2031
void save(Archive &ar, const unsigned int version) const
virtual std::size_t memory_consumption() const
virtual void update_reference_cells()
void pack_data(const std::vector< cell_relation_t > &cell_relations, const std::vector< typename CellAttachedData::pack_callback_t > &pack_callbacks_fixed, const std::vector< typename CellAttachedData::pack_callback_t > &pack_callbacks_variable)
Definition: tria_base.cc:852
void save(const unsigned int global_first_cell, const unsigned int global_num_cells, const std::string &filename) const
Definition: tria_base.cc:1395
void unpack_cell_status(std::vector< cell_relation_t > &cell_relations) const
Definition: tria_base.cc:1188
void unpack_data(const std::vector< cell_relation_t > &cell_relations, const unsigned int handle, const std::function< void(const typename ::Triangulation< dim, spacedim >::cell_iterator &, const typename ::Triangulation< dim, spacedim >::CellStatus &, const boost::iterator_range< std::vector< char >::const_iterator > &)> &unpack_callback) const
Definition: tria_base.cc:1224
DataTransfer(const MPI_Comm &mpi_communicator)
Definition: tria_base.cc:842
void load(const unsigned int global_first_cell, const unsigned int global_num_cells, const unsigned int local_num_cells, const std::string &filename, const unsigned int n_attached_deserialize_fixed, const unsigned int n_attached_deserialize_variable)
Definition: tria_base.cc:1576
virtual void clear() override
Definition: tria_base.cc:655
void save_attached_data(const unsigned int global_first_cell, const unsigned int global_num_cells, const std::string &filename) const
Definition: tria_base.cc:667
virtual bool has_hanging_nodes() const override
Definition: tria_base.cc:704
DistributedTriangulationBase(const MPI_Comm &mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:638
void notify_ready_to_unpack(const unsigned int handle, const std::function< void(const cell_iterator &, const CellStatus, const boost::iterator_range< std::vector< char >::const_iterator > &)> &unpack_callback)
Definition: tria_base.cc:798
typename ::Triangulation< dim, spacedim >::CellStatus CellStatus
Definition: tria_base.h:466
unsigned int register_data_attach(const std::function< std::vector< char >(const cell_iterator &, const CellStatus)> &pack_callback, const bool returns_variable_size_data)
Definition: tria_base.cc:768
typename ::Triangulation< dim, spacedim >::cell_iterator cell_iterator
Definition: tria_base.h:463
void load_attached_data(const unsigned int global_first_cell, const unsigned int global_num_cells, const unsigned int local_num_cells, const std::string &filename, const unsigned int n_attached_deserialize_fixed, const unsigned int n_attached_deserialize_variable)
Definition: tria_base.cc:731
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:343
virtual std::size_t memory_consumption() const override
Definition: tria_base.cc:93
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:137
void update_reference_cells() override
Definition: tria_base.cc:297
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:334
const std::weak_ptr< const Utilities::MPI::Partitioner > global_level_cell_index_partitioner(const unsigned int level) const
Definition: tria_base.cc:617
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:325
const MPI_Comm mpi_communicator
Definition: tria_base.h:302
TriangulationBase(const MPI_Comm &mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:48
virtual unsigned int n_global_levels() const override
Definition: tria_base.cc:130
virtual std::vector< types::boundary_id > get_boundary_ids() const override
Definition: tria_base.cc:352
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:123
virtual types::coarse_cell_id n_global_coarse_cells() const override
Definition: tria_base.cc:630
virtual void update_number_cache()
Definition: tria_base.cc:152
const std::weak_ptr< const Utilities::MPI::Partitioner > global_active_cell_index_partitioner() const
Definition: tria_base.cc:608
void communicate_locally_moved_vertices(const std::vector< bool > &vertex_locally_moved)
Definition: tria_base.cc:558
virtual MPI_Comm get_communicator() const override
Definition: tria_base.cc:144
virtual std::vector< types::manifold_id > get_manifold_ids() const override
Definition: tria_base.cc:363
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
Definition: tria_base.cc:68
virtual ~TriangulationBase() override
Definition: tria_base.cc:106
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
Point< 3 > vertices[4]
Point< 2 > second
Definition: grid_out.cc:4604
Point< 2 > first
Definition: grid_out.cc:4603
unsigned int level
Definition: grid_out.cc:4606
unsigned int cell_index
Definition: grid_tools.cc:1129
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
void exchange_cell_data_to_level_ghosts(const MeshType &mesh, const std::function< std_cxx17::optional< DataType >(const typename MeshType::level_cell_iterator &)> &pack, const std::function< void(const typename MeshType::level_cell_iterator &, const DataType &)> &unpack, const std::function< bool(const typename MeshType::level_cell_iterator &)> &cell_filter=always_return< typename MeshType::level_cell_iterator, bool >{ true})
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1014
std::vector< bool > get_locally_owned_vertices(const Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:4363
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
@ triangulation_base_fill_level_ghost_owners
TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
Definition: mpi_tags.h:102
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
T sum(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1552
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:140
T max(const T &t, const MPI_Comm &mpi_communicator)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1483
T unpack(const std::vector< char > &buffer, const bool allow_compression=true)
Definition: utilities.h:1648
constexpr ::ReferenceCell make_reference_cell_from_int(const std::uint8_t kind)
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:13734
const types::subdomain_id artificial_subdomain_id
Definition: types.h:298
static const unsigned int invalid_unsigned_int
Definition: types.h:201
const types::global_dof_index invalid_dof_index
Definition: types.h:216
unsigned int global_cell_index
Definition: types.h:105
const ::Triangulation< dim, spacedim > & tria