deal.II version GIT relicensing-2167-g9622207b8f 2024-11-21 12:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
fully_distributed_tria.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2019 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17#include <deal.II/base/mpi.h>
19
22
24
25#include <fstream>
26#include <memory>
27
29
30// Forward declarations
31namespace GridGenerator
32{
33 template <int dim, int spacedim>
34 void
36 const double left,
37 const double right,
38 const bool colorize);
39} // namespace GridGenerator
40
41namespace parallel
42{
43 namespace fullydistributed
44 {
45 template <int dim, int spacedim>
47 Triangulation<dim, spacedim>::Triangulation(const MPI_Comm mpi_communicator)
48 : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
49 , settings(TriangulationDescription::Settings::default_setting)
50 , partitioner([](::Triangulation<dim, spacedim> &tria,
51 const unsigned int n_partitions) {
53 })
54 , currently_processing_create_triangulation_for_internal_usage(false)
55 , currently_processing_prepare_coarsening_and_refinement_for_internal_usage(
56 false)
57 {}
58
59
60
61 template <int dim, int spacedim>
63 void Triangulation<dim, spacedim>::create_triangulation(
64 const TriangulationDescription::Description<dim, spacedim>
65 &construction_data)
66 {
67 // check if the communicator of this parallel triangulation has been used
68 // to construct the TriangulationDescription::Description
69 Assert(construction_data.comm == this->mpi_communicator,
70 ExcMessage("MPI communicators do not match!"));
71
72 // store internally the settings
73 settings = construction_data.settings;
74
75 // set the smoothing properties
76 if (settings &
78 this->set_mesh_smoothing(
79 static_cast<
80 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
83 else
84 this->set_mesh_smoothing(
85 static_cast<
86 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
88
89 this->set_mesh_smoothing(construction_data.smoothing);
90
91 // clear internal data structures
92 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
93 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
94
95 // check if no locally relevant coarse-grid cells have been provided
96 if (construction_data.coarse_cell_vertices.empty())
97 {
98 // 1) create a dummy hypercube
99 currently_processing_create_triangulation_for_internal_usage = true;
100 GridGenerator::hyper_cube(*this, 0, 1, false);
101 currently_processing_create_triangulation_for_internal_usage = false;
102
103 // 2) mark cell as artificial
104 auto cell = this->begin();
105 cell->set_subdomain_id(::numbers::artificial_subdomain_id);
106 cell->set_level_subdomain_id(
108
109 // 3) set up dummy mapping between locally relevant coarse-grid cells
110 // and global cells
111 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(
113 this->coarse_cell_index_to_coarse_cell_id_vector.emplace_back(
115 }
116 else
117 {
118 // 1) store `coarse-cell index to coarse-cell id`-mapping
119 this->coarse_cell_index_to_coarse_cell_id_vector =
120 construction_data.coarse_cell_index_to_coarse_cell_id;
121
122 // 2) set up `coarse-cell id to coarse-cell index`-mapping
123 std::map<types::coarse_cell_id, unsigned int>
124 coarse_cell_id_to_coarse_cell_index_vector;
125 for (unsigned int i = 0;
126 i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
127 ++i)
128 coarse_cell_id_to_coarse_cell_index_vector
129 [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
130
131 for (auto i : coarse_cell_id_to_coarse_cell_index_vector)
132 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
133
134 // create locally-relevant
135 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
136 true;
137 currently_processing_create_triangulation_for_internal_usage = true;
139 construction_data);
140 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
141 false;
142 currently_processing_create_triangulation_for_internal_usage = false;
143
144 // create a copy of cell_infos such that we can sort them
145 auto cell_infos = construction_data.cell_infos;
146
147 // sort cell_infos on each level separately (as done in
148 // ::Triangulation::create_triangulation())
149 for (auto &cell_info : cell_infos)
150 std::sort(cell_info.begin(),
151 cell_info.end(),
154 const CellId a_id(a.id);
155 const CellId b_id(b.id);
156
157 const auto a_coarse_cell_index =
158 this->coarse_cell_id_to_coarse_cell_index(
159 a_id.get_coarse_cell_id());
160 const auto b_coarse_cell_index =
161 this->coarse_cell_id_to_coarse_cell_index(
162 b_id.get_coarse_cell_id());
163
164 // according to their coarse-cell index and if that is
165 // same according to their cell id (the result is that
166 // cells on each level are sorted according to their
167 // index on that level - what we need in the following
168 // operations)
169 if (a_coarse_cell_index != b_coarse_cell_index)
170 return a_coarse_cell_index < b_coarse_cell_index;
171 else
172 return a_id < b_id;
173 });
174
175 // 4a) set all cells artificial (and set the actual
176 // (level_)subdomain_ids in the next step)
177 for (const auto &cell : this->cell_iterators())
178 {
179 if (cell->is_active())
180 cell->set_subdomain_id(
182
183 cell->set_level_subdomain_id(
185 }
186
187 // 4b) set actual (level_)subdomain_ids
188 for (unsigned int level = 0;
189 level < cell_infos.size() && !cell_infos[level].empty();
190 ++level)
191 {
192 auto cell = this->begin(level);
193 auto cell_info = cell_infos[level].begin();
194 for (; cell_info != cell_infos[level].end(); ++cell_info)
195 {
196 // find cell that has the correct cell
197 while (cell_info->id != cell->id().template to_binary<dim>())
198 ++cell;
199
200 // subdomain id
201 if (cell->is_active())
202 cell->set_subdomain_id(cell_info->subdomain_id);
203
204 // level subdomain id
206 construct_multigrid_hierarchy)
207 cell->set_level_subdomain_id(cell_info->level_subdomain_id);
208 }
209 }
210 }
211
212 this->update_number_cache();
213 this->update_cell_relations();
214 }
215
216
217
218 template <int dim, int spacedim>
220 void Triangulation<dim, spacedim>::create_triangulation(
221 const std::vector<Point<spacedim>> &vertices,
222 const std::vector<::CellData<dim>> &cells,
223 const SubCellData &subcelldata)
224 {
225 Assert(
226 currently_processing_create_triangulation_for_internal_usage,
228 "You have called the overload of\n"
229 "\n"
230 " parallel::fullydistributed::Triangulation::"
231 "create_triangulation()\n"
232 "\n"
233 "which takes 3 arguments. This function is not yet implemented for "
234 "this class. If you have not called this function directly, it "
235 "might have been called via a function from the GridGenerator or "
236 "GridIn namespace. To set up a fully-distributed Triangulation with "
237 "these utility functions, please start by using the same process to "
238 "set up a serial Triangulation, parallel::shared::Triangulation, or "
239 "a parallel::distributed::Triangulation. Once that is complete use "
240 "the copy_triangulation() member function to finish setting up the "
241 "original fully distributed Triangulation. Alternatively, you can "
242 "use TriangulationDescription::Utilities::"
243 "create_description_from_triangulation() or "
244 "create_description_from_triangulation_in_groups() to create the "
245 "description of the local partition, and pass that description to "
246 "parallel::fullydistributed::Triangulation::create_triangulation()."));
247
249 cells,
250 subcelldata);
251 }
252
253
254
255 template <int dim, int spacedim>
257 void Triangulation<dim, spacedim>::copy_triangulation(
258 const ::Triangulation<dim, spacedim> &other_tria)
259 {
260 // pointer to the triangulation for which the construction data
261 // should be created (normally it is the input triangulation but
262 // in the case of a serial triangulation we create a copy which should
263 // be used)
264 const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
265
266 // temporary serial triangulation (since the input triangulation is const
267 // and we might modify its subdomain_ids and level_subdomain_ids during
268 // partitioning)
270
271 // check if other triangulation is not a parallel one, which needs to be
272 // partitioned
273 if (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
274 *>(&other_tria) == nullptr)
275 {
276 // actually copy the serial triangulation
277 serial_tria.copy_triangulation(other_tria);
278
279 // partition triangulation
280 this->partitioner(serial_tria,
282 this->mpi_communicator));
283
284 // partition multigrid levels
285 if (this->is_multilevel_hierarchy_constructed())
287
288 // use the new serial triangulation to create the construction data
289 other_tria_ptr = &serial_tria;
290 }
291
292 // create construction data
293 const auto construction_data = TriangulationDescription::Utilities::
295 this->mpi_communicator,
296 this->settings);
297
298 // finally create triangulation
299 this->create_triangulation(construction_data);
300 }
301
302
303
304 template <int dim, int spacedim>
306 void Triangulation<dim, spacedim>::set_partitioner(
307 const std::function<void(::Triangulation<dim, spacedim> &,
308 const unsigned int)> &partitioner,
309 const TriangulationDescription::Settings &settings)
310 {
311 this->partitioner = partitioner;
312 this->settings = settings;
313 }
314
315
316
317 template <int dim, int spacedim>
319 void Triangulation<dim, spacedim>::set_partitioner(
320 const RepartitioningPolicyTools::Base<dim, spacedim> &partitioner,
321 const TriangulationDescription::Settings &settings)
322 {
323 this->partitioner_distributed = &partitioner;
324 this->settings = settings;
325 }
326
327
328
329 template <int dim, int spacedim>
331 void Triangulation<dim, spacedim>::repartition()
332 {
333 // signal that repartitioning has started
334 this->signals.pre_distributed_repartition();
335
336 // create construction_data with the help of the partitioner
337 const auto construction_data = TriangulationDescription::Utilities::
339 *this,
340 this->partitioner_distributed->partition(*this),
341 this->settings);
342
343 // clear old content
344 this->clear();
345 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
346 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
347
348 // use construction_data to set up new triangulation
349 this->create_triangulation(construction_data);
350
351 // signal that repartitioning has completed
352 this->signals.post_distributed_repartition();
353 }
354
355
356
357 template <int dim, int spacedim>
359 void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
360 {
362 }
363
364
365
366 template <int dim, int spacedim>
368 bool Triangulation<dim, spacedim>::prepare_coarsening_and_refinement()
369 {
370 Assert(
371 currently_processing_prepare_coarsening_and_refinement_for_internal_usage,
372 ExcMessage("No coarsening and refinement is supported!"));
373
374 return ::Triangulation<dim, spacedim>::
375 prepare_coarsening_and_refinement();
376 }
377
378
379
380 template <int dim, int spacedim>
382 std::size_t Triangulation<dim, spacedim>::memory_consumption() const
383 {
384 const std::size_t mem =
388 coarse_cell_id_to_coarse_cell_index_vector) +
390 coarse_cell_index_to_coarse_cell_id_vector);
391 return mem;
392 }
393
394
395
396 template <int dim, int spacedim>
398 bool Triangulation<dim, spacedim>::is_multilevel_hierarchy_constructed()
399 const
400 {
401 return (
402 settings &
404 }
405
406
407
408 template <int dim, int spacedim>
410 unsigned int Triangulation<dim, spacedim>::
411 coarse_cell_id_to_coarse_cell_index(
412 const types::coarse_cell_id coarse_cell_id) const
413 {
414 const auto coarse_cell_index = std::lower_bound(
415 coarse_cell_id_to_coarse_cell_index_vector.begin(),
416 coarse_cell_id_to_coarse_cell_index_vector.end(),
417 coarse_cell_id,
418 [](const std::pair<types::coarse_cell_id, unsigned int> &pair,
419 const types::coarse_cell_id &val) { return pair.first < val; });
420 if (coarse_cell_index !=
421 coarse_cell_id_to_coarse_cell_index_vector.cend())
422 return coarse_cell_index->second;
423 else
424 return numbers::invalid_unsigned_int; // cell could no be found
425 }
426
427
428
429 template <int dim, int spacedim>
433 const unsigned int coarse_cell_index) const
434 {
435 AssertIndexRange(coarse_cell_index,
436 coarse_cell_index_to_coarse_cell_id_vector.size());
437
438 const auto coarse_cell_id =
439 coarse_cell_index_to_coarse_cell_id_vector[coarse_cell_index];
441 ExcMessage("You are trying to access a dummy cell!"));
442 return coarse_cell_id;
443 }
444
445
446 template <int dim, int spacedim>
448 void Triangulation<dim, spacedim>::update_cell_relations()
449 {
450 // Reorganize memory for local_cell_relations.
451 this->local_cell_relations.clear();
452 this->local_cell_relations.reserve(this->n_locally_owned_active_cells());
453
454 for (const auto &cell : this->active_cell_iterators())
455 if (cell->is_locally_owned())
456 this->local_cell_relations.emplace_back(
458 }
459
460
461
462 template <int dim, int spacedim>
464 void Triangulation<dim, spacedim>::save(const std::string &filename) const
465 {
466#ifdef DEAL_II_WITH_MPI
467
468 Assert(
469 this->cell_attached_data.n_attached_deserialize == 0,
471 "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
472 Assert(this->n_cells() > 0,
473 ExcMessage("Can not save() an empty Triangulation."));
474
475 const int myrank =
476 Utilities::MPI::this_mpi_process(this->mpi_communicator);
477 const int mpisize =
478 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
479
480 // Compute global offset for each rank.
481 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
482
483 unsigned int global_first_cell = 0;
484
485 int ierr = MPI_Exscan(&n_locally_owned_cells,
486 &global_first_cell,
487 1,
488 MPI_UNSIGNED,
489 MPI_SUM,
490 this->mpi_communicator);
491 AssertThrowMPI(ierr);
492
493 global_first_cell *= sizeof(unsigned int);
494
495
496 if (myrank == 0)
497 {
498 std::string fname = std::string(filename) + ".info";
499 std::ofstream f(fname);
500 f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
501 << std::endl
503 version_number
504 << " " << Utilities::MPI::n_mpi_processes(this->mpi_communicator)
505 << " " << this->cell_attached_data.pack_callbacks_fixed.size()
506 << " " << this->cell_attached_data.pack_callbacks_variable.size()
507 << " " << this->n_global_active_cells() << std::endl;
508 }
509
510 // Save cell attached data.
511 this->save_attached_data(global_first_cell,
512 this->n_global_active_cells(),
513 filename);
514
515 // Save triangulation description.
516 {
517 MPI_Info info;
518 int ierr = MPI_Info_create(&info);
519 AssertThrowMPI(ierr);
520
521 const std::string fname_tria = filename + "_triangulation.data";
522
523 // Open file.
524 MPI_File fh;
525 ierr = MPI_File_open(this->mpi_communicator,
526 fname_tria.c_str(),
527 MPI_MODE_CREATE | MPI_MODE_WRONLY,
528 info,
529 &fh);
530 AssertThrowMPI(ierr);
531
532 ierr = MPI_File_set_size(fh, 0); // delete the file contents
533 AssertThrowMPI(ierr);
534 // this barrier is necessary, because otherwise others might already
535 // write while one core is still setting the size to zero.
536 ierr = MPI_Barrier(this->mpi_communicator);
537 AssertThrowMPI(ierr);
538 ierr = MPI_Info_free(&info);
539 AssertThrowMPI(ierr);
540 // ------------------
541
542 // Create construction data.
543 const auto construction_data = TriangulationDescription::Utilities::
545 this->mpi_communicator,
546 this->settings);
547
548 // Pack.
549 std::vector<char> buffer;
550 ::Utilities::pack(construction_data, buffer, false);
551
552 // Write offsets to file.
553 const std::uint64_t buffer_size = buffer.size();
554
555 std::uint64_t offset = 0;
556
557 ierr = MPI_Exscan(
558 &buffer_size,
559 &offset,
560 1,
561 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
562 MPI_SUM,
563 this->mpi_communicator);
564 AssertThrowMPI(ierr);
565
566 // Write offsets to file.
567 ierr = MPI_File_write_at(
568 fh,
569 myrank * sizeof(std::uint64_t),
570 &buffer_size,
571 1,
572 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
573 MPI_STATUS_IGNORE);
574 AssertThrowMPI(ierr);
575
576 // global position in file
577 const std::uint64_t global_position =
578 mpisize * sizeof(std::uint64_t) + offset;
579
580 // Write buffers to file.
582 fh,
583 global_position,
584 buffer.data(),
585 buffer.size(), // local buffer
586 MPI_CHAR,
587 MPI_STATUS_IGNORE);
588 AssertThrowMPI(ierr);
589
590 ierr = MPI_File_close(&fh);
591 AssertThrowMPI(ierr);
592 }
593#else
594 (void)filename;
595
596 AssertThrow(false, ExcNeedsMPI());
597#endif
598 }
599
600
601
602 template <int dim, int spacedim>
604 void Triangulation<dim, spacedim>::load(const std::string &filename)
605 {
606#ifdef DEAL_II_WITH_MPI
607 Assert(this->n_cells() == 0,
608 ExcMessage("load() only works if the Triangulation is empty!"));
609
610
611 unsigned int version, numcpus, attached_count_fixed,
612 attached_count_variable, n_global_active_cells;
613 {
614 std::string fname = std::string(filename) + ".info";
615 std::ifstream f(fname);
616 AssertThrow(f.fail() == false, ExcIO());
617 std::string firstline;
618 getline(f, firstline);
619 f >> version >> numcpus >> attached_count_fixed >>
620 attached_count_variable >> n_global_active_cells;
621 }
622
623 const auto expected_version = ::internal::
624 CellAttachedDataSerializer<dim, spacedim>::version_number;
625
626 AssertThrow(version == expected_version,
627 ExcMessage("Incompatible version found in .info file."));
628
629 // Load description and construct the triangulation.
630 {
631 const int myrank =
632 Utilities::MPI::this_mpi_process(this->mpi_communicator);
633 const int mpisize =
634 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
635
636 AssertDimension(numcpus, mpisize);
637
638 // Open file.
639 MPI_Info info;
640 int ierr = MPI_Info_create(&info);
641 AssertThrowMPI(ierr);
642
643 const std::string fname_tria = filename + "_triangulation.data";
644
645 MPI_File fh;
646 ierr = MPI_File_open(this->mpi_communicator,
647 fname_tria.c_str(),
648 MPI_MODE_RDONLY,
649 info,
650 &fh);
651 AssertThrowMPI(ierr);
652
653 ierr = MPI_Info_free(&info);
654 AssertThrowMPI(ierr);
655
656 // Read offsets from file.
657 std::uint64_t buffer_size;
658
659 ierr = MPI_File_read_at(
660 fh,
661 myrank * sizeof(std::uint64_t),
662 &buffer_size,
663 1,
664 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
665 MPI_STATUS_IGNORE);
666 AssertThrowMPI(ierr);
667
668 std::uint64_t offset = 0;
669
670 ierr = MPI_Exscan(
671 &buffer_size,
672 &offset,
673 1,
674 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
675 MPI_SUM,
676 this->mpi_communicator);
677 AssertThrowMPI(ierr);
678
679 // global position in file
680 const std::uint64_t global_position =
681 mpisize * sizeof(std::uint64_t) + offset;
682
683 // Read buffers from file.
684 std::vector<char> buffer(buffer_size);
686 fh,
687 global_position,
688 buffer.data(),
689 buffer.size(), // local buffer
690 MPI_CHAR,
691 MPI_STATUS_IGNORE);
692 AssertThrowMPI(ierr);
693
694 ierr = MPI_File_close(&fh);
695 AssertThrowMPI(ierr);
696
697 auto construction_data = ::Utilities::template unpack<
699
700 // WARNING: serialization cannot handle the MPI communicator
701 // which is the reason why we have to set it here explicitly
702 construction_data.comm = this->mpi_communicator;
703
704 this->create_triangulation(construction_data);
705 }
706
707 // Compute global offset for each rank.
708 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
709
710 unsigned int global_first_cell = 0;
711
712 int ierr = MPI_Exscan(&n_locally_owned_cells,
713 &global_first_cell,
714 1,
715 MPI_UNSIGNED,
716 MPI_SUM,
717 this->mpi_communicator);
718 AssertThrowMPI(ierr);
719
720 global_first_cell *= sizeof(unsigned int);
721
722 Assert(this->n_global_active_cells() == n_global_active_cells,
723 ExcMessage("Number of global active cells differ!"));
724
725 // clear all of the callback data, as explained in the documentation of
726 // register_data_attach()
727 this->cell_attached_data.n_attached_data_sets = 0;
728 this->cell_attached_data.n_attached_deserialize =
729 attached_count_fixed + attached_count_variable;
730
731 // Load attached cell data, if any was stored.
732 this->load_attached_data(global_first_cell,
733 this->n_global_active_cells(),
734 this->n_locally_owned_active_cells(),
735 filename,
736 attached_count_fixed,
737 attached_count_variable);
738
739 this->update_cell_relations();
740 this->update_periodic_face_map();
741 this->update_number_cache();
742#else
743 (void)filename;
744
745 AssertThrow(false, ExcNeedsMPI());
746#endif
747 }
748
749
750
751 template <int dim, int spacedim>
753 void Triangulation<dim, spacedim>::update_number_cache()
754 {
756
757 // additionally update the number of global coarse cells
758 types::coarse_cell_id number_of_global_coarse_cells = 0;
759
760 for (const auto &cell : this->active_cell_iterators())
761 if (!cell->is_artificial())
762 number_of_global_coarse_cells =
763 std::max(number_of_global_coarse_cells,
764 cell->id().get_coarse_cell_id());
765
766 number_of_global_coarse_cells =
767 Utilities::MPI::max(number_of_global_coarse_cells,
768 this->mpi_communicator) +
769 1;
770
771 this->number_cache.number_of_global_coarse_cells =
772 number_of_global_coarse_cells;
773 }
774
775
776 } // namespace fullydistributed
777} // namespace parallel
778
779
780
781/*-------------- Explicit Instantiations -------------------------------*/
782#include "fully_distributed_tria.inst"
783
784
types::coarse_cell_id get_coarse_cell_id() const
Definition cell_id.h:393
Definition point.h:111
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const override
Definition tria_base.cc:92
virtual void update_number_cache()
Definition tria_base.cc:170
virtual void clear() override
Definition tria_base.cc:687
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:175
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
bool colorize
Definition grid_out.cc:4625
unsigned int level
Definition grid_out.cc:4626
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_NOT_IMPLEMENTED()
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1626
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
const types::coarse_cell_id invalid_coarse_cell_id
Definition types.h:265
const types::subdomain_id artificial_subdomain_id
Definition types.h:362
static const unsigned int invalid_unsigned_int
Definition types.h:220
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
std::vector< std::vector< CellData< dim > > > cell_infos