Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
fully_distributed_tria.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2019 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17#include <deal.II/base/mpi.h>
19
22
24
25#include <fstream>
26#include <memory>
27
29
30// Forward declarations
31namespace GridGenerator
32{
33 template <int dim, int spacedim>
34 void
36 const double left,
37 const double right,
38 const bool colorize);
39} // namespace GridGenerator
40
41namespace parallel
42{
43 namespace fullydistributed
44 {
45 template <int dim, int spacedim>
47 Triangulation<dim, spacedim>::Triangulation(const MPI_Comm mpi_communicator)
48 : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
49 , settings(TriangulationDescription::Settings::default_setting)
50 , partitioner([](::Triangulation<dim, spacedim> &tria,
51 const unsigned int n_partitions) {
53 })
54 , currently_processing_create_triangulation_for_internal_usage(false)
55 , currently_processing_prepare_coarsening_and_refinement_for_internal_usage(
56 false)
57 {}
58
59
60
61 template <int dim, int spacedim>
63 void Triangulation<dim, spacedim>::create_triangulation(
64 const TriangulationDescription::Description<dim, spacedim>
65 &construction_data)
66 {
67 // check if the communicator of this parallel triangulation has been used
68 // to construct the TriangulationDescription::Description
69 Assert(construction_data.comm == this->mpi_communicator,
70 ExcMessage("MPI communicators do not match!"));
71
72 // store internally the settings
73 settings = construction_data.settings;
74
75 // set the smoothing properties
76 if (settings &
78 this->set_mesh_smoothing(
79 static_cast<
80 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
83 else
84 this->set_mesh_smoothing(
85 static_cast<
86 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
88
89 this->set_mesh_smoothing(construction_data.smoothing);
90
91 // clear internal data structures
92 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
93 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
94
95 // check if no locally relevant coarse-grid cells have been provided
96 if (construction_data.coarse_cell_vertices.empty())
97 {
98 // 1) create a dummy hypercube
99 currently_processing_create_triangulation_for_internal_usage = true;
100 GridGenerator::hyper_cube(*this, 0, 1, false);
101 currently_processing_create_triangulation_for_internal_usage = false;
102
103 // 2) mark cell as artificial
104 auto cell = this->begin();
105 cell->set_subdomain_id(::numbers::artificial_subdomain_id);
106 cell->set_level_subdomain_id(
108
109 // 3) set up dummy mapping between locally relevant coarse-grid cells
110 // and global cells
111 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(
113 this->coarse_cell_index_to_coarse_cell_id_vector.emplace_back(
115 }
116 else
117 {
118 // 1) store `coarse-cell index to coarse-cell id`-mapping
119 this->coarse_cell_index_to_coarse_cell_id_vector =
120 construction_data.coarse_cell_index_to_coarse_cell_id;
121
122 // 2) set up `coarse-cell id to coarse-cell index`-mapping
123 std::map<types::coarse_cell_id, unsigned int>
124 coarse_cell_id_to_coarse_cell_index_vector;
125 for (unsigned int i = 0;
126 i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
127 ++i)
128 coarse_cell_id_to_coarse_cell_index_vector
129 [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
130
131 for (auto i : coarse_cell_id_to_coarse_cell_index_vector)
132 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
133
134 // create locally-relevant
135 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
136 true;
137 currently_processing_create_triangulation_for_internal_usage = true;
139 construction_data);
140 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
141 false;
142 currently_processing_create_triangulation_for_internal_usage = false;
143
144 // create a copy of cell_infos such that we can sort them
145 auto cell_infos = construction_data.cell_infos;
146
147 // sort cell_infos on each level separately (as done in
148 // ::Triangulation::create_triangulation())
149 for (auto &cell_info : cell_infos)
150 std::sort(cell_info.begin(),
151 cell_info.end(),
154 const CellId a_id(a.id);
155 const CellId b_id(b.id);
156
157 const auto a_coarse_cell_index =
158 this->coarse_cell_id_to_coarse_cell_index(
159 a_id.get_coarse_cell_id());
160 const auto b_coarse_cell_index =
161 this->coarse_cell_id_to_coarse_cell_index(
162 b_id.get_coarse_cell_id());
163
164 // according to their coarse-cell index and if that is
165 // same according to their cell id (the result is that
166 // cells on each level are sorted according to their
167 // index on that level - what we need in the following
168 // operations)
169 if (a_coarse_cell_index != b_coarse_cell_index)
170 return a_coarse_cell_index < b_coarse_cell_index;
171 else
172 return a_id < b_id;
173 });
174
175 // 4a) set all cells artificial (and set the actual
176 // (level_)subdomain_ids in the next step)
177 for (const auto &cell : this->cell_iterators())
178 {
179 if (cell->is_active())
180 cell->set_subdomain_id(
182
183 cell->set_level_subdomain_id(
185 }
186
187 // 4b) set actual (level_)subdomain_ids
188 for (unsigned int level = 0;
189 level < cell_infos.size() && !cell_infos[level].empty();
190 ++level)
191 {
192 auto cell = this->begin(level);
193 auto cell_info = cell_infos[level].begin();
194 for (; cell_info != cell_infos[level].end(); ++cell_info)
195 {
196 // find cell that has the correct cell
197 while (cell_info->id != cell->id().template to_binary<dim>())
198 ++cell;
199
200 // subdomain id
201 if (cell->is_active())
202 cell->set_subdomain_id(cell_info->subdomain_id);
203
204 // level subdomain id
206 construct_multigrid_hierarchy)
207 cell->set_level_subdomain_id(cell_info->level_subdomain_id);
208 }
209 }
210 }
211
212 this->update_number_cache();
213 this->update_cell_relations();
214 }
215
216
217
218 template <int dim, int spacedim>
220 void Triangulation<dim, spacedim>::create_triangulation(
221 const std::vector<Point<spacedim>> &vertices,
222 const std::vector<::CellData<dim>> &cells,
223 const SubCellData &subcelldata)
224 {
225 Assert(
226 currently_processing_create_triangulation_for_internal_usage,
228 "You have called the method parallel::fullydistributed::Triangulation::create_triangulation() \n"
229 "that takes 3 arguments. If you have not called this function directly, \n"
230 "it might have been called via a function from the GridGenerator or GridIn \n"
231 "namespace. To be able to set up a fully-distributed Triangulation with these \n"
232 "utility functions nevertheless, please follow the following three steps:\n"
233 " 1) call the utility function for a (serial) Triangulation, \n"
234 " a parallel::shared::Triangulation, or a parallel::distributed::Triangulation object,\n"
235 " 2) use the functions TriangulationDescription::Utilities::create_description_from_triangulation() \n"
236 " or ::create_description_from_triangulation_in_groups() to create the \n"
237 " description of the local partition, and\n"
238 " 3) pass the created description to parallel::fullydistributed::Triangulation::create_triangulation()."));
239
241 cells,
242 subcelldata);
243 }
244
245
246
247 template <int dim, int spacedim>
249 void Triangulation<dim, spacedim>::copy_triangulation(
250 const ::Triangulation<dim, spacedim> &other_tria)
251 {
252 // pointer to the triangulation for which the construction data
253 // should be created (normally it is the input triangulation but
254 // in the case of a serial triangulation we create a copy which should
255 // be used)
256 const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
257
258 // temporary serial triangulation (since the input triangulation is const
259 // and we might modify its subdomain_ids and level_subdomain_ids during
260 // partitioning)
262
263 // check if other triangulation is not a parallel one, which needs to be
264 // partitioned
265 if (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
266 *>(&other_tria) == nullptr)
267 {
268 // actually copy the serial triangulation
269 serial_tria.copy_triangulation(other_tria);
270
271 // partition triangulation
272 this->partitioner(serial_tria,
274 this->mpi_communicator));
275
276 // partition multigrid levels
277 if (this->is_multilevel_hierarchy_constructed())
279
280 // use the new serial triangulation to create the construction data
281 other_tria_ptr = &serial_tria;
282 }
283
284 // create construction data
285 const auto construction_data = TriangulationDescription::Utilities::
287 this->mpi_communicator,
288 this->settings);
289
290 // finally create triangulation
291 this->create_triangulation(construction_data);
292 }
293
294
295
296 template <int dim, int spacedim>
298 void Triangulation<dim, spacedim>::set_partitioner(
299 const std::function<void(::Triangulation<dim, spacedim> &,
300 const unsigned int)> &partitioner,
301 const TriangulationDescription::Settings &settings)
302 {
303 this->partitioner = partitioner;
304 this->settings = settings;
305 }
306
307
308
309 template <int dim, int spacedim>
311 void Triangulation<dim, spacedim>::set_partitioner(
312 const RepartitioningPolicyTools::Base<dim, spacedim> &partitioner,
313 const TriangulationDescription::Settings &settings)
314 {
315 this->partitioner_distributed = &partitioner;
316 this->settings = settings;
317 }
318
319
320
321 template <int dim, int spacedim>
323 void Triangulation<dim, spacedim>::repartition()
324 {
325 // signal that repartitioning has started
326 this->signals.pre_distributed_repartition();
327
328 // create construction_data with the help of the partitioner
329 const auto construction_data = TriangulationDescription::Utilities::
331 *this,
332 this->partitioner_distributed->partition(*this),
333 this->settings);
334
335 // clear old content
336 this->clear();
337 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
338 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
339
340 // use construction_data to set up new triangulation
341 this->create_triangulation(construction_data);
342
343 // signal that repartitioning has completed
344 this->signals.post_distributed_repartition();
345 }
346
347
348
349 template <int dim, int spacedim>
351 void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
352 {
354 }
355
356
357
358 template <int dim, int spacedim>
360 bool Triangulation<dim, spacedim>::prepare_coarsening_and_refinement()
361 {
362 Assert(
363 currently_processing_prepare_coarsening_and_refinement_for_internal_usage,
364 ExcMessage("No coarsening and refinement is supported!"));
365
366 return ::Triangulation<dim, spacedim>::
367 prepare_coarsening_and_refinement();
368 }
369
370
371
372 template <int dim, int spacedim>
374 std::size_t Triangulation<dim, spacedim>::memory_consumption() const
375 {
376 const std::size_t mem =
380 coarse_cell_id_to_coarse_cell_index_vector) +
382 coarse_cell_index_to_coarse_cell_id_vector);
383 return mem;
384 }
385
386
387
388 template <int dim, int spacedim>
390 bool Triangulation<dim, spacedim>::is_multilevel_hierarchy_constructed()
391 const
392 {
393 return (
394 settings &
396 }
397
398
399
400 template <int dim, int spacedim>
402 unsigned int Triangulation<dim, spacedim>::
403 coarse_cell_id_to_coarse_cell_index(
404 const types::coarse_cell_id coarse_cell_id) const
405 {
406 const auto coarse_cell_index = std::lower_bound(
407 coarse_cell_id_to_coarse_cell_index_vector.begin(),
408 coarse_cell_id_to_coarse_cell_index_vector.end(),
409 coarse_cell_id,
410 [](const std::pair<types::coarse_cell_id, unsigned int> &pair,
411 const types::coarse_cell_id &val) { return pair.first < val; });
412 if (coarse_cell_index !=
413 coarse_cell_id_to_coarse_cell_index_vector.cend())
414 return coarse_cell_index->second;
415 else
416 return numbers::invalid_unsigned_int; // cell could no be found
417 }
418
419
420
421 template <int dim, int spacedim>
425 const unsigned int coarse_cell_index) const
426 {
427 AssertIndexRange(coarse_cell_index,
428 coarse_cell_index_to_coarse_cell_id_vector.size());
429
430 const auto coarse_cell_id =
431 coarse_cell_index_to_coarse_cell_id_vector[coarse_cell_index];
433 ExcMessage("You are trying to access a dummy cell!"));
434 return coarse_cell_id;
435 }
436
437
438 template <int dim, int spacedim>
440 void Triangulation<dim, spacedim>::update_cell_relations()
441 {
442 // Reorganize memory for local_cell_relations.
443 this->local_cell_relations.clear();
444 this->local_cell_relations.reserve(this->n_locally_owned_active_cells());
445
446 for (const auto &cell : this->active_cell_iterators())
447 if (cell->is_locally_owned())
448 this->local_cell_relations.emplace_back(
450 }
451
452
453
454 template <int dim, int spacedim>
456 void Triangulation<dim, spacedim>::save(const std::string &filename) const
457 {
458#ifdef DEAL_II_WITH_MPI
459
460 Assert(
461 this->cell_attached_data.n_attached_deserialize == 0,
463 "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
464 Assert(this->n_cells() > 0,
465 ExcMessage("Can not save() an empty Triangulation."));
466
467 const int myrank =
468 Utilities::MPI::this_mpi_process(this->mpi_communicator);
469 const int mpisize =
470 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
471
472 // Compute global offset for each rank.
473 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
474
475 unsigned int global_first_cell = 0;
476
477 int ierr = MPI_Exscan(&n_locally_owned_cells,
478 &global_first_cell,
479 1,
480 MPI_UNSIGNED,
481 MPI_SUM,
482 this->mpi_communicator);
483 AssertThrowMPI(ierr);
484
485 global_first_cell *= sizeof(unsigned int);
486
487
488 if (myrank == 0)
489 {
490 std::string fname = std::string(filename) + ".info";
491 std::ofstream f(fname);
492 f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
493 << std::endl
494 << 4 << " "
495 << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
496 << this->cell_attached_data.pack_callbacks_fixed.size() << " "
497 << this->cell_attached_data.pack_callbacks_variable.size() << " "
498 << this->n_global_active_cells() << std::endl;
499 }
500
501 // Save cell attached data.
502 this->save_attached_data(global_first_cell,
503 this->n_global_active_cells(),
504 filename);
505
506 // Save triangulation description.
507 {
508 MPI_Info info;
509 int ierr = MPI_Info_create(&info);
510 AssertThrowMPI(ierr);
511
512 const std::string fname_tria = filename + "_triangulation.data";
513
514 // Open file.
515 MPI_File fh;
516 ierr = MPI_File_open(this->mpi_communicator,
517 fname_tria.c_str(),
518 MPI_MODE_CREATE | MPI_MODE_WRONLY,
519 info,
520 &fh);
521 AssertThrowMPI(ierr);
522
523 ierr = MPI_File_set_size(fh, 0); // delete the file contents
524 AssertThrowMPI(ierr);
525 // this barrier is necessary, because otherwise others might already
526 // write while one core is still setting the size to zero.
527 ierr = MPI_Barrier(this->mpi_communicator);
528 AssertThrowMPI(ierr);
529 ierr = MPI_Info_free(&info);
530 AssertThrowMPI(ierr);
531 // ------------------
532
533 // Create construction data.
534 const auto construction_data = TriangulationDescription::Utilities::
536 this->mpi_communicator,
537 this->settings);
538
539 // Pack.
540 std::vector<char> buffer;
541 ::Utilities::pack(construction_data, buffer, false);
542
543 // Write offsets to file.
544 const std::uint64_t buffer_size = buffer.size();
545
546 std::uint64_t offset = 0;
547
548 ierr = MPI_Exscan(
549 &buffer_size,
550 &offset,
551 1,
552 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
553 MPI_SUM,
554 this->mpi_communicator);
555 AssertThrowMPI(ierr);
556
557 // Write offsets to file.
558 ierr = MPI_File_write_at(
559 fh,
560 myrank * sizeof(std::uint64_t),
561 &buffer_size,
562 1,
563 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
564 MPI_STATUS_IGNORE);
565 AssertThrowMPI(ierr);
566
567 // global position in file
568 const std::uint64_t global_position =
569 mpisize * sizeof(std::uint64_t) + offset;
570
571 // Write buffers to file.
573 fh,
574 global_position,
575 buffer.data(),
576 buffer.size(), // local buffer
577 MPI_CHAR,
578 MPI_STATUS_IGNORE);
579 AssertThrowMPI(ierr);
580
581 ierr = MPI_File_close(&fh);
582 AssertThrowMPI(ierr);
583 }
584#else
585 (void)filename;
586
587 AssertThrow(false, ExcNeedsMPI());
588#endif
589 }
590
591
592
593 template <int dim, int spacedim>
595 void Triangulation<dim, spacedim>::load(const std::string &filename)
596 {
597#ifdef DEAL_II_WITH_MPI
598 Assert(this->n_cells() == 0,
599 ExcMessage("load() only works if the Triangulation is empty!"));
600
601
602 unsigned int version, numcpus, attached_count_fixed,
603 attached_count_variable, n_global_active_cells;
604 {
605 std::string fname = std::string(filename) + ".info";
606 std::ifstream f(fname);
607 AssertThrow(f.fail() == false, ExcIO());
608 std::string firstline;
609 getline(f, firstline); // skip first line
610 f >> version >> numcpus >> attached_count_fixed >>
611 attached_count_variable >> n_global_active_cells;
612 }
613
614 AssertThrow(version == 4,
615 ExcMessage("Incompatible version found in .info file."));
616
617 // Load description and construct the triangulation.
618 {
619 const int myrank =
620 Utilities::MPI::this_mpi_process(this->mpi_communicator);
621 const int mpisize =
622 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
623
624 AssertDimension(numcpus, mpisize);
625
626 // Open file.
627 MPI_Info info;
628 int ierr = MPI_Info_create(&info);
629 AssertThrowMPI(ierr);
630
631 const std::string fname_tria = filename + "_triangulation.data";
632
633 MPI_File fh;
634 ierr = MPI_File_open(this->mpi_communicator,
635 fname_tria.c_str(),
636 MPI_MODE_RDONLY,
637 info,
638 &fh);
639 AssertThrowMPI(ierr);
640
641 ierr = MPI_Info_free(&info);
642 AssertThrowMPI(ierr);
643
644 // Read offsets from file.
645 std::uint64_t buffer_size;
646
647 ierr = MPI_File_read_at(
648 fh,
649 myrank * sizeof(std::uint64_t),
650 &buffer_size,
651 1,
652 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
653 MPI_STATUS_IGNORE);
654 AssertThrowMPI(ierr);
655
656 std::uint64_t offset = 0;
657
658 ierr = MPI_Exscan(
659 &buffer_size,
660 &offset,
661 1,
662 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
663 MPI_SUM,
664 this->mpi_communicator);
665 AssertThrowMPI(ierr);
666
667 // global position in file
668 const std::uint64_t global_position =
669 mpisize * sizeof(std::uint64_t) + offset;
670
671 // Read buffers from file.
672 std::vector<char> buffer(buffer_size);
674 fh,
675 global_position,
676 buffer.data(),
677 buffer.size(), // local buffer
678 MPI_CHAR,
679 MPI_STATUS_IGNORE);
680 AssertThrowMPI(ierr);
681
682 ierr = MPI_File_close(&fh);
683 AssertThrowMPI(ierr);
684
685 auto construction_data = ::Utilities::template unpack<
687
688 // WARNING: serialization cannot handle the MPI communicator
689 // which is the reason why we have to set it here explicitly
690 construction_data.comm = this->mpi_communicator;
691
692 this->create_triangulation(construction_data);
693 }
694
695 // Compute global offset for each rank.
696 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
697
698 unsigned int global_first_cell = 0;
699
700 int ierr = MPI_Exscan(&n_locally_owned_cells,
701 &global_first_cell,
702 1,
703 MPI_UNSIGNED,
704 MPI_SUM,
705 this->mpi_communicator);
706 AssertThrowMPI(ierr);
707
708 global_first_cell *= sizeof(unsigned int);
709
710 Assert(this->n_global_active_cells() == n_global_active_cells,
711 ExcMessage("Number of global active cells differ!"));
712
713 // clear all of the callback data, as explained in the documentation of
714 // register_data_attach()
715 this->cell_attached_data.n_attached_data_sets = 0;
716 this->cell_attached_data.n_attached_deserialize =
717 attached_count_fixed + attached_count_variable;
718
719 // Load attached cell data, if any was stored.
720 this->load_attached_data(global_first_cell,
721 this->n_global_active_cells(),
722 this->n_locally_owned_active_cells(),
723 filename,
724 attached_count_fixed,
725 attached_count_variable);
726
727 this->update_cell_relations();
728 this->update_periodic_face_map();
729 this->update_number_cache();
730#else
731 (void)filename;
732
733 AssertThrow(false, ExcNeedsMPI());
734#endif
735 }
736
737
738
739 template <int dim, int spacedim>
741 void Triangulation<dim, spacedim>::load(const std::string &filename,
742 const bool autopartition)
743 {
744 (void)autopartition;
745 load(filename);
746 }
747
748
749
750 template <int dim, int spacedim>
752 void Triangulation<dim, spacedim>::update_number_cache()
753 {
755
756 // additionally update the number of global coarse cells
757 types::coarse_cell_id number_of_global_coarse_cells = 0;
758
759 for (const auto &cell : this->active_cell_iterators())
760 if (!cell->is_artificial())
761 number_of_global_coarse_cells =
762 std::max(number_of_global_coarse_cells,
763 cell->id().get_coarse_cell_id());
764
765 number_of_global_coarse_cells =
766 Utilities::MPI::max(number_of_global_coarse_cells,
767 this->mpi_communicator) +
768 1;
769
770 this->number_cache.number_of_global_coarse_cells =
771 number_of_global_coarse_cells;
772 }
773
774
775 } // namespace fullydistributed
776} // namespace parallel
777
778
779
780/*-------------- Explicit Instantiations -------------------------------*/
781#include "fully_distributed_tria.inst"
782
783
types::coarse_cell_id get_coarse_cell_id() const
Definition cell_id.h:393
Definition point.h:111
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const override
Definition tria_base.cc:92
virtual void update_number_cache()
Definition tria_base.cc:170
virtual void clear() override
Definition tria_base.cc:687
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:177
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
Point< 3 > vertices[4]
bool colorize
Definition grid_out.cc:4625
unsigned int level
Definition grid_out.cc:4626
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_NOT_IMPLEMENTED()
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
const types::coarse_cell_id invalid_coarse_cell_id
Definition types.h:265
const types::subdomain_id artificial_subdomain_id
Definition types.h:362
static const unsigned int invalid_unsigned_int
Definition types.h:220
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
std::vector< std::vector< CellData< dim > > > cell_infos