Reference documentation for deal.II version GIT relicensing-426-g7976cfd195 2024-04-18 21:10:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
fully_distributed_tria.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2019 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17#include <deal.II/base/mpi.h>
19
22
24
25#include <fstream>
26#include <memory>
27
29
30// Forward declarations
31namespace GridGenerator
32{
33 template <int dim, int spacedim>
34 void
36 const double left,
37 const double right,
38 const bool colorize);
39} // namespace GridGenerator
40
41namespace parallel
42{
43 namespace fullydistributed
44 {
45 template <int dim, int spacedim>
47 Triangulation<dim, spacedim>::Triangulation(const MPI_Comm mpi_communicator)
48 : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
49 , settings(TriangulationDescription::Settings::default_setting)
50 , partitioner([](::Triangulation<dim, spacedim> &tria,
51 const unsigned int n_partitions) {
53 })
54 , currently_processing_create_triangulation_for_internal_usage(false)
55 , currently_processing_prepare_coarsening_and_refinement_for_internal_usage(
56 false)
57 {}
58
59
60
61 template <int dim, int spacedim>
63 void Triangulation<dim, spacedim>::create_triangulation(
64 const TriangulationDescription::Description<dim, spacedim>
65 &construction_data)
66 {
67 // check if the communicator of this parallel triangulation has been used
68 // to construct the TriangulationDescription::Description
69 Assert(construction_data.comm == this->mpi_communicator,
70 ExcMessage("MPI communicators do not match!"));
71
72 // store internally the settings
73 settings = construction_data.settings;
74
75 // set the smoothing properties
76 if (settings &
78 this->set_mesh_smoothing(
79 static_cast<
80 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
83 else
84 this->set_mesh_smoothing(
85 static_cast<
86 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
88
89 this->set_mesh_smoothing(construction_data.smoothing);
90
91 // clear internal data structures
92 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
93 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
94
95 // check if no locally relevant coarse-grid cells have been provided
96 if (construction_data.coarse_cell_vertices.empty())
97 {
98 // 1) create a dummy hypercube
99 currently_processing_create_triangulation_for_internal_usage = true;
100 GridGenerator::hyper_cube(*this, 0, 1, false);
101 currently_processing_create_triangulation_for_internal_usage = false;
102
103 // 2) mark cell as artificial
104 auto cell = this->begin();
105 cell->set_subdomain_id(::numbers::artificial_subdomain_id);
106 cell->set_level_subdomain_id(
108
109 // 3) set up dummy mapping between locally relevant coarse-grid cells
110 // and global cells
111 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(
113 this->coarse_cell_index_to_coarse_cell_id_vector.emplace_back(
115 }
116 else
117 {
118 // 1) store `coarse-cell index to coarse-cell id`-mapping
119 this->coarse_cell_index_to_coarse_cell_id_vector =
120 construction_data.coarse_cell_index_to_coarse_cell_id;
121
122 // 2) set up `coarse-cell id to coarse-cell index`-mapping
123 std::map<types::coarse_cell_id, unsigned int>
124 coarse_cell_id_to_coarse_cell_index_vector;
125 for (unsigned int i = 0;
126 i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
127 ++i)
128 coarse_cell_id_to_coarse_cell_index_vector
129 [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
130
131 for (auto i : coarse_cell_id_to_coarse_cell_index_vector)
132 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
133
134 // create locally-relevant
135 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
136 true;
137 currently_processing_create_triangulation_for_internal_usage = true;
139 construction_data);
140 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
141 false;
142 currently_processing_create_triangulation_for_internal_usage = false;
143
144 // create a copy of cell_infos such that we can sort them
145 auto cell_infos = construction_data.cell_infos;
146
147 // sort cell_infos on each level separately (as done in
148 // ::Triangulation::create_triangulation())
149 for (auto &cell_info : cell_infos)
150 std::sort(cell_info.begin(),
151 cell_info.end(),
154 const CellId a_id(a.id);
155 const CellId b_id(b.id);
156
157 const auto a_coarse_cell_index =
158 this->coarse_cell_id_to_coarse_cell_index(
159 a_id.get_coarse_cell_id());
160 const auto b_coarse_cell_index =
161 this->coarse_cell_id_to_coarse_cell_index(
162 b_id.get_coarse_cell_id());
163
164 // according to their coarse-cell index and if that is
165 // same according to their cell id (the result is that
166 // cells on each level are sorted according to their
167 // index on that level - what we need in the following
168 // operations)
169 if (a_coarse_cell_index != b_coarse_cell_index)
170 return a_coarse_cell_index < b_coarse_cell_index;
171 else
172 return a_id < b_id;
173 });
174
175 // 4a) set all cells artificial (and set the actual
176 // (level_)subdomain_ids in the next step)
177 for (const auto &cell : this->cell_iterators())
178 {
179 if (cell->is_active())
180 cell->set_subdomain_id(
182
183 cell->set_level_subdomain_id(
185 }
186
187 // 4b) set actual (level_)subdomain_ids
188 for (unsigned int level = 0;
189 level < cell_infos.size() && !cell_infos[level].empty();
190 ++level)
191 {
192 auto cell = this->begin(level);
193 auto cell_info = cell_infos[level].begin();
194 for (; cell_info != cell_infos[level].end(); ++cell_info)
195 {
196 // find cell that has the correct cell
197 while (cell_info->id != cell->id().template to_binary<dim>())
198 ++cell;
199
200 // subdomain id
201 if (cell->is_active())
202 cell->set_subdomain_id(cell_info->subdomain_id);
203
204 // level subdomain id
206 construct_multigrid_hierarchy)
207 cell->set_level_subdomain_id(cell_info->level_subdomain_id);
208 }
209 }
210 }
211
212 this->update_number_cache();
213 this->update_cell_relations();
214 }
215
216
217
218 template <int dim, int spacedim>
220 void Triangulation<dim, spacedim>::create_triangulation(
221 const std::vector<Point<spacedim>> &vertices,
222 const std::vector<::CellData<dim>> &cells,
223 const SubCellData &subcelldata)
224 {
225 Assert(
226 currently_processing_create_triangulation_for_internal_usage,
228 "You have called the method parallel::fullydistributed::Triangulation::create_triangulation() \n"
229 "that takes 3 arguments. If you have not called this function directly, \n"
230 "it might have been called via a function from the GridGenerator or GridIn \n"
231 "namespace. To be able to set up a fully-distributed Triangulation with these \n"
232 "utility functions nevertheless, please follow the following three steps:\n"
233 " 1) call the utility function for a (serial) Triangulation, \n"
234 " a parallel::shared::Triangulation, or a parallel::distributed::Triangulation object,\n"
235 " 2) use the functions TriangulationDescription::Utilities::create_description_from_triangulation() \n"
236 " or ::create_description_from_triangulation_in_groups() to create the \n"
237 " description of the local partition, and\n"
238 " 3) pass the created description to parallel::fullydistributed::Triangulation::create_triangulation()."));
239
241 cells,
242 subcelldata);
243 }
244
245
246
247 template <int dim, int spacedim>
249 void Triangulation<dim, spacedim>::copy_triangulation(
250 const ::Triangulation<dim, spacedim> &other_tria)
251 {
252 // pointer to the triangulation for which the construction data
253 // should be created (normally it is the input triangulation but
254 // in the case of a serial triangulation we create a copy which should
255 // be used)
256 const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
257
258 // temporary serial triangulation (since the input triangulation is const
259 // and we might modify its subdomain_ids and level_subdomain_ids during
260 // partitioning)
262
263 // check if other triangulation is not a parallel one, which needs to be
264 // partitioned
265 if (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
266 *>(&other_tria) == nullptr)
267 {
268 // actually copy the serial triangulation
269 serial_tria.copy_triangulation(other_tria);
270
271 // partition triangulation
272 this->partitioner(serial_tria,
274 this->mpi_communicator));
275
276 // partition multigrid levels
277 if (this->is_multilevel_hierarchy_constructed())
279
280 // use the new serial triangulation to create the construction data
281 other_tria_ptr = &serial_tria;
282 }
283
284 // create construction data
285 const auto construction_data = TriangulationDescription::Utilities::
287 this->mpi_communicator,
288 this->settings);
289
290 // finally create triangulation
291 this->create_triangulation(construction_data);
292 }
293
294
295
296 template <int dim, int spacedim>
298 void Triangulation<dim, spacedim>::set_partitioner(
299 const std::function<void(::Triangulation<dim, spacedim> &,
300 const unsigned int)> &partitioner,
301 const TriangulationDescription::Settings &settings)
302 {
303 this->partitioner = partitioner;
304 this->settings = settings;
305 }
306
307
308
309 template <int dim, int spacedim>
311 void Triangulation<dim, spacedim>::set_partitioner(
312 const RepartitioningPolicyTools::Base<dim, spacedim> &partitioner,
313 const TriangulationDescription::Settings &settings)
314 {
315 this->partitioner_distributed = &partitioner;
316 this->settings = settings;
317 }
318
319
320
321 template <int dim, int spacedim>
323 void Triangulation<dim, spacedim>::repartition()
324 {
325 // signal that repartitioning has started
326 this->signals.pre_distributed_repartition();
327
328 // create construction_data with the help of the partitioner
329 const auto construction_data = TriangulationDescription::Utilities::
331 *this,
332 this->partitioner_distributed->partition(*this),
333 this->settings);
334
335 // clear old content
336 this->clear();
337 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
338 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
339
340 // use construction_data to set up new triangulation
341 this->create_triangulation(construction_data);
342
343 // signal that repartitioning has completed
344 this->signals.post_distributed_repartition();
345 }
346
347
348
349 template <int dim, int spacedim>
351 void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
352 {
354 }
355
356
357
358 template <int dim, int spacedim>
360 bool Triangulation<dim, spacedim>::prepare_coarsening_and_refinement()
361 {
362 Assert(
363 currently_processing_prepare_coarsening_and_refinement_for_internal_usage,
364 ExcMessage("No coarsening and refinement is supported!"));
365
366 return ::Triangulation<dim, spacedim>::
367 prepare_coarsening_and_refinement();
368 }
369
370
371
372 template <int dim, int spacedim>
374 std::size_t Triangulation<dim, spacedim>::memory_consumption() const
375 {
376 const std::size_t mem =
380 coarse_cell_id_to_coarse_cell_index_vector) +
382 coarse_cell_index_to_coarse_cell_id_vector);
383 return mem;
384 }
385
386
387
388 template <int dim, int spacedim>
390 bool Triangulation<dim, spacedim>::is_multilevel_hierarchy_constructed()
391 const
392 {
393 return (
394 settings &
396 }
397
398
399
400 template <int dim, int spacedim>
402 unsigned int Triangulation<dim, spacedim>::
403 coarse_cell_id_to_coarse_cell_index(
404 const types::coarse_cell_id coarse_cell_id) const
405 {
406 const auto coarse_cell_index = std::lower_bound(
407 coarse_cell_id_to_coarse_cell_index_vector.begin(),
408 coarse_cell_id_to_coarse_cell_index_vector.end(),
409 coarse_cell_id,
410 [](const std::pair<types::coarse_cell_id, unsigned int> &pair,
411 const types::coarse_cell_id &val) { return pair.first < val; });
412 if (coarse_cell_index !=
413 coarse_cell_id_to_coarse_cell_index_vector.cend())
414 return coarse_cell_index->second;
415 else
416 return numbers::invalid_unsigned_int; // cell could no be found
417 }
418
419
420
421 template <int dim, int spacedim>
425 const unsigned int coarse_cell_index) const
426 {
427 AssertIndexRange(coarse_cell_index,
428 coarse_cell_index_to_coarse_cell_id_vector.size());
429
430 const auto coarse_cell_id =
431 coarse_cell_index_to_coarse_cell_id_vector[coarse_cell_index];
433 ExcMessage("You are trying to access a dummy cell!"));
434 return coarse_cell_id;
435 }
436
437
438 template <int dim, int spacedim>
440 void Triangulation<dim, spacedim>::update_cell_relations()
441 {
442 // Reorganize memory for local_cell_relations.
443 this->local_cell_relations.clear();
444 this->local_cell_relations.reserve(this->n_locally_owned_active_cells());
445
446 for (const auto &cell : this->active_cell_iterators())
447 if (cell->is_locally_owned())
448 this->local_cell_relations.emplace_back(
450 }
451
452
453
454 template <int dim, int spacedim>
456 void Triangulation<dim, spacedim>::save(const std::string &filename) const
457 {
458#ifdef DEAL_II_WITH_MPI
459
460 Assert(
461 this->cell_attached_data.n_attached_deserialize == 0,
463 "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
464 Assert(this->n_cells() > 0,
465 ExcMessage("Can not save() an empty Triangulation."));
466
467 const int myrank =
468 Utilities::MPI::this_mpi_process(this->mpi_communicator);
469 const int mpisize =
470 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
471
472 // Compute global offset for each rank.
473 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
474
475 unsigned int global_first_cell = 0;
476
477 int ierr = MPI_Exscan(&n_locally_owned_cells,
478 &global_first_cell,
479 1,
480 MPI_UNSIGNED,
481 MPI_SUM,
482 this->mpi_communicator);
483 AssertThrowMPI(ierr);
484
485 global_first_cell *= sizeof(unsigned int);
486
487
488 if (myrank == 0)
489 {
490 std::string fname = std::string(filename) + ".info";
491 std::ofstream f(fname);
492 f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
493 << std::endl
494 << 4 << " "
495 << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
496 << this->cell_attached_data.pack_callbacks_fixed.size() << " "
497 << this->cell_attached_data.pack_callbacks_variable.size() << " "
498 << this->n_global_active_cells() << std::endl;
499 }
500
501 // Save cell attached data.
502 this->save_attached_data(global_first_cell,
503 this->n_global_active_cells(),
504 filename);
505
506 // Save triangulation description.
507 {
508 MPI_Info info;
509 int ierr = MPI_Info_create(&info);
510 AssertThrowMPI(ierr);
511
512 const std::string fname_tria = filename + "_triangulation.data";
513
514 // Open file.
515 MPI_File fh;
516 ierr = MPI_File_open(this->mpi_communicator,
517 fname_tria.c_str(),
518 MPI_MODE_CREATE | MPI_MODE_WRONLY,
519 info,
520 &fh);
521 AssertThrowMPI(ierr);
522
523 ierr = MPI_File_set_size(fh, 0); // delete the file contents
524 AssertThrowMPI(ierr);
525 // this barrier is necessary, because otherwise others might already
526 // write while one core is still setting the size to zero.
527 ierr = MPI_Barrier(this->mpi_communicator);
528 AssertThrowMPI(ierr);
529 ierr = MPI_Info_free(&info);
530 AssertThrowMPI(ierr);
531 // ------------------
532
533 // Create construction data.
534 const auto construction_data = TriangulationDescription::Utilities::
536 this->mpi_communicator,
537 this->settings);
538
539 // Pack.
540 std::vector<char> buffer;
541 ::Utilities::pack(construction_data, buffer, false);
542
543 // Write offsets to file.
544 const std::uint64_t buffer_size = buffer.size();
545
546 std::uint64_t offset = 0;
547
548 ierr = MPI_Exscan(
549 &buffer_size,
550 &offset,
551 1,
552 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
553 MPI_SUM,
554 this->mpi_communicator);
555 AssertThrowMPI(ierr);
556
557 // Write offsets to file.
558 ierr = MPI_File_write_at(
559 fh,
560 myrank * sizeof(std::uint64_t),
561 &buffer_size,
562 1,
563 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
564 MPI_STATUS_IGNORE);
565 AssertThrowMPI(ierr);
566
567 // global position in file
568 const std::uint64_t global_position =
569 mpisize * sizeof(std::uint64_t) + offset;
570
571 // Write buffers to file.
573 fh,
574 global_position,
575 buffer.data(),
576 buffer.size(), // local buffer
577 MPI_CHAR,
578 MPI_STATUS_IGNORE);
579 AssertThrowMPI(ierr);
580
581 ierr = MPI_File_close(&fh);
582 AssertThrowMPI(ierr);
583 }
584#else
585 (void)filename;
586
587 AssertThrow(false, ExcNeedsMPI());
588#endif
589 }
590
591
592
593 template <int dim, int spacedim>
595 void Triangulation<dim, spacedim>::load(const std::string &filename)
596 {
597#ifdef DEAL_II_WITH_MPI
598 Assert(this->n_cells() == 0,
599 ExcMessage("load() only works if the Triangulation is empty!"));
600
601 // Compute global offset for each rank.
602 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
603
604 unsigned int global_first_cell = 0;
605
606 int ierr = MPI_Exscan(&n_locally_owned_cells,
607 &global_first_cell,
608 1,
609 MPI_UNSIGNED,
610 MPI_SUM,
611 this->mpi_communicator);
612 AssertThrowMPI(ierr);
613
614 global_first_cell *= sizeof(unsigned int);
615
616
617 unsigned int version, numcpus, attached_count_fixed,
618 attached_count_variable, n_global_active_cells;
619 {
620 std::string fname = std::string(filename) + ".info";
621 std::ifstream f(fname);
622 AssertThrow(f.fail() == false, ExcIO());
623 std::string firstline;
624 getline(f, firstline); // skip first line
625 f >> version >> numcpus >> attached_count_fixed >>
626 attached_count_variable >> n_global_active_cells;
627 }
628
629 AssertThrow(version == 4,
630 ExcMessage("Incompatible version found in .info file."));
631 Assert(this->n_global_active_cells() == n_global_active_cells,
632 ExcMessage("Number of global active cells differ!"));
633
634 // Load description and construct the triangulation.
635 {
636 const int myrank =
637 Utilities::MPI::this_mpi_process(this->mpi_communicator);
638 const int mpisize =
639 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
640
641 AssertDimension(numcpus, mpisize);
642
643 // Open file.
644 MPI_Info info;
645 int ierr = MPI_Info_create(&info);
646 AssertThrowMPI(ierr);
647
648 const std::string fname_tria = filename + "_triangulation.data";
649
650 MPI_File fh;
651 ierr = MPI_File_open(this->mpi_communicator,
652 fname_tria.c_str(),
653 MPI_MODE_RDONLY,
654 info,
655 &fh);
656 AssertThrowMPI(ierr);
657
658 ierr = MPI_Info_free(&info);
659 AssertThrowMPI(ierr);
660
661 // Read offsets from file.
662 std::uint64_t buffer_size;
663
664 ierr = MPI_File_read_at(
665 fh,
666 myrank * sizeof(std::uint64_t),
667 &buffer_size,
668 1,
669 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
670 MPI_STATUS_IGNORE);
671 AssertThrowMPI(ierr);
672
673 std::uint64_t offset = 0;
674
675 ierr = MPI_Exscan(
676 &buffer_size,
677 &offset,
678 1,
679 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
680 MPI_SUM,
681 this->mpi_communicator);
682 AssertThrowMPI(ierr);
683
684 // global position in file
685 const std::uint64_t global_position =
686 mpisize * sizeof(std::uint64_t) + offset;
687
688 // Read buffers from file.
689 std::vector<char> buffer(buffer_size);
691 fh,
692 global_position,
693 buffer.data(),
694 buffer.size(), // local buffer
695 MPI_CHAR,
696 MPI_STATUS_IGNORE);
697 AssertThrowMPI(ierr);
698
699 ierr = MPI_File_close(&fh);
700 AssertThrowMPI(ierr);
701
702 auto construction_data = ::Utilities::template unpack<
704
705 // WARNING: serialization cannot handle the MPI communicator
706 // which is the reason why we have to set it here explicitly
707 construction_data.comm = this->mpi_communicator;
708
709 this->create_triangulation(construction_data);
710 }
711
712 // clear all of the callback data, as explained in the documentation of
713 // register_data_attach()
714 this->cell_attached_data.n_attached_data_sets = 0;
715 this->cell_attached_data.n_attached_deserialize =
716 attached_count_fixed + attached_count_variable;
717
718 // Load attached cell data, if any was stored.
719 this->load_attached_data(global_first_cell,
720 this->n_global_active_cells(),
721 this->n_locally_owned_active_cells(),
722 filename,
723 attached_count_fixed,
724 attached_count_variable);
725
726 this->update_cell_relations();
727 this->update_periodic_face_map();
728 this->update_number_cache();
729#else
730 (void)filename;
731
732 AssertThrow(false, ExcNeedsMPI());
733#endif
734 }
735
736
737
738 template <int dim, int spacedim>
740 void Triangulation<dim, spacedim>::load(const std::string &filename,
741 const bool autopartition)
742 {
743 (void)autopartition;
744 load(filename);
745 }
746
747
748
749 template <int dim, int spacedim>
751 void Triangulation<dim, spacedim>::update_number_cache()
752 {
754
755 // additionally update the number of global coarse cells
756 types::coarse_cell_id number_of_global_coarse_cells = 0;
757
758 for (const auto &cell : this->active_cell_iterators())
759 if (!cell->is_artificial())
760 number_of_global_coarse_cells =
761 std::max(number_of_global_coarse_cells,
762 cell->id().get_coarse_cell_id());
763
764 number_of_global_coarse_cells =
765 Utilities::MPI::max(number_of_global_coarse_cells,
766 this->mpi_communicator) +
767 1;
768
769 this->number_cache.number_of_global_coarse_cells =
770 number_of_global_coarse_cells;
771 }
772
773
774 } // namespace fullydistributed
775} // namespace parallel
776
777
778
779/*-------------- Explicit Instantiations -------------------------------*/
780#include "fully_distributed_tria.inst"
781
782
types::coarse_cell_id get_coarse_cell_id() const
Definition cell_id.h:393
Definition point.h:111
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const override
Definition tria_base.cc:92
virtual void update_number_cache()
Definition tria_base.cc:170
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:177
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 3 > vertices[4]
bool colorize
Definition grid_out.cc:4625
unsigned int level
Definition grid_out.cc:4626
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_NOT_IMPLEMENTED()
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
const types::coarse_cell_id invalid_coarse_cell_id
Definition types.h:265
const types::subdomain_id artificial_subdomain_id
Definition types.h:362
static const unsigned int invalid_unsigned_int
Definition types.h:220
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
const Iterator & begin
Definition parallel.h:609
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
std::vector< std::vector< CellData< dim > > > cell_infos