Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.4.1
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fully_distributed_tria.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2019 - 2022 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18#include <deal.II/base/mpi.h>
20
23
25
26#include <fstream>
27#include <memory>
28
30
31// Forward declarations
32namespace GridGenerator
33{
34 template <int dim, int spacedim>
35 void
37 const double left,
38 const double right,
39 const bool colorize);
40} // namespace GridGenerator
41
42namespace parallel
43{
44 namespace fullydistributed
45 {
46 template <int dim, int spacedim>
48 const MPI_Comm &mpi_communicator)
49 : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
50 , settings(TriangulationDescription::Settings::default_setting)
51 , partitioner([](::Triangulation<dim, spacedim> &tria,
52 const unsigned int n_partitions) {
54 })
55 , currently_processing_create_triangulation_for_internal_usage(false)
56 , currently_processing_prepare_coarsening_and_refinement_for_internal_usage(
57 false)
58 {}
59
60
61
62 template <int dim, int spacedim>
63 void
66 &construction_data)
67 {
68 // check if the communicator of this parallel triangulation has been used
69 // to construct the TriangulationDescription::Description
70 Assert(construction_data.comm == this->mpi_communicator,
71 ExcMessage("MPI communicators do not match!"));
72
73 // store internally the settings
74 settings = construction_data.settings;
75
76 // set the smoothing properties
77 if (settings &
79 this->set_mesh_smoothing(
80 static_cast<
81 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
84 else
85 this->set_mesh_smoothing(
86 static_cast<
87 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
89
90 this->set_mesh_smoothing(construction_data.smoothing);
91
92 // clear internal data structures
93 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
94 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
95
96 // check if no locally relevant coarse-grid cells have been provided
97 if (construction_data.coarse_cell_vertices.empty())
98 {
99 // 1) create a dummy hypercube
100 currently_processing_create_triangulation_for_internal_usage = true;
101 GridGenerator::hyper_cube(*this, 0, 1, false);
102 currently_processing_create_triangulation_for_internal_usage = false;
103
104 // 2) mark cell as artificial
105 auto cell = this->begin();
106 cell->set_subdomain_id(::numbers::artificial_subdomain_id);
107 cell->set_level_subdomain_id(
109
110 // 3) set up dummy mapping between locally relevant coarse-grid cells
111 // and global cells
112 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(
114 this->coarse_cell_index_to_coarse_cell_id_vector.emplace_back(
116 }
117 else
118 {
119 // 1) store `coarse-cell index to coarse-cell id`-mapping
120 this->coarse_cell_index_to_coarse_cell_id_vector =
121 construction_data.coarse_cell_index_to_coarse_cell_id;
122
123 // 2) set up `coarse-cell id to coarse-cell index`-mapping
124 std::map<types::coarse_cell_id, unsigned int>
125 coarse_cell_id_to_coarse_cell_index_vector;
126 for (unsigned int i = 0;
127 i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
128 ++i)
129 coarse_cell_id_to_coarse_cell_index_vector
130 [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
131
132 for (auto i : coarse_cell_id_to_coarse_cell_index_vector)
133 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
134
135 // create locally-relevant
136 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
137 true;
138 currently_processing_create_triangulation_for_internal_usage = true;
140 construction_data);
141 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
142 false;
143 currently_processing_create_triangulation_for_internal_usage = false;
144
145 // create a copy of cell_infos such that we can sort them
146 auto cell_infos = construction_data.cell_infos;
147
148 // sort cell_infos on each level separately (as done in
149 // ::Triangulation::create_triangulation())
150 for (auto &cell_info : cell_infos)
151 std::sort(cell_info.begin(),
152 cell_info.end(),
155 const CellId a_id(a.id);
156 const CellId b_id(b.id);
157
158 const auto a_coarse_cell_index =
159 this->coarse_cell_id_to_coarse_cell_index(
160 a_id.get_coarse_cell_id());
161 const auto b_coarse_cell_index =
162 this->coarse_cell_id_to_coarse_cell_index(
163 b_id.get_coarse_cell_id());
164
165 // according to their coarse-cell index and if that is
166 // same according to their cell id (the result is that
167 // cells on each level are sorted according to their
168 // index on that level - what we need in the following
169 // operations)
170 if (a_coarse_cell_index != b_coarse_cell_index)
171 return a_coarse_cell_index < b_coarse_cell_index;
172 else
173 return a_id < b_id;
174 });
175
176 // 4a) set all cells artificial (and set the actual
177 // (level_)subdomain_ids in the next step)
178 for (const auto &cell : this->cell_iterators())
179 {
180 if (cell->is_active())
181 cell->set_subdomain_id(
183
184 cell->set_level_subdomain_id(
186 }
187
188 // 4b) set actual (level_)subdomain_ids
189 for (unsigned int level = 0;
190 level < cell_infos.size() && !cell_infos[level].empty();
191 ++level)
192 {
193 auto cell = this->begin(level);
194 auto cell_info = cell_infos[level].begin();
195 for (; cell_info != cell_infos[level].end(); ++cell_info)
196 {
197 // find cell that has the correct cell
198 while (cell_info->id != cell->id().template to_binary<dim>())
199 ++cell;
200
201 // subdomain id
202 if (cell->is_active())
203 cell->set_subdomain_id(cell_info->subdomain_id);
204
205 // level subdomain id
207 construct_multigrid_hierarchy)
208 cell->set_level_subdomain_id(cell_info->level_subdomain_id);
209 }
210 }
211 }
212
213 this->update_number_cache();
214 this->update_cell_relations();
215 }
216
217
218
219 template <int dim, int spacedim>
220 void
222 const std::vector<Point<spacedim>> & vertices,
223 const std::vector<::CellData<dim>> &cells,
224 const SubCellData & subcelldata)
225 {
226 Assert(
227 currently_processing_create_triangulation_for_internal_usage,
229 "You have called the method parallel::fullydistributed::Triangulation::create_triangulation() \n"
230 "that takes 3 arguments. If you have not called this function directly, \n"
231 "it might have been called via a function from the GridGenerator or GridIn \n"
232 "namespace. To be able to setup a fully-distributed Triangulation with these \n"
233 "utility functions nevertheless, please follow the following three steps:\n"
234 " 1) call the utility function for a (serial) Triangulation, \n"
235 " a parallel::shared::Triangulation, or a parallel::distributed::Triangulation object,\n"
236 " 2) use the functions TriangulationDescription::Utilities::create_description_from_triangulation() \n"
237 " or ::create_description_from_triangulation_in_groups() to create the \n"
238 " description of the local partition, and\n"
239 " 3) pass the created description to parallel::fullydistributed::Triangulation::create_triangulation()."));
240
242 cells,
243 subcelldata);
244 }
245
246
247
248 template <int dim, int spacedim>
249 void
251 const ::Triangulation<dim, spacedim> &other_tria)
252 {
253 // pointer to the triangulation for which the construction data
254 // should be created (normally it is the input triangulation but
255 // in the case of a serial triangulation we create a copy which should
256 // be used)
257 const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
258
259 // temporary serial triangulation (since the input triangulation is const
260 // and we might modify its subdomain_ids and level_subdomain_ids during
261 // partitioning)
263
264 // check if other triangulation is not a parallel one, which needs to be
265 // partitioned
266 if (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
267 *>(&other_tria) == nullptr)
268 {
269 // actually copy the serial triangulation
270 serial_tria.copy_triangulation(other_tria);
271
272 // partition triangulation
273 this->partitioner(serial_tria,
275 this->mpi_communicator));
276
277 // partition multigrid levels
278 if (this->is_multilevel_hierarchy_constructed())
280
281 // use the new serial triangulation to create the construction data
282 other_tria_ptr = &serial_tria;
283 }
284
285 // create construction data
286 const auto construction_data = TriangulationDescription::Utilities::
288 this->mpi_communicator,
289 this->settings);
290
291 // finally create triangulation
292 this->create_triangulation(construction_data);
293 }
294
295
296
297 template <int dim, int spacedim>
298 void
300 const std::function<void(::Triangulation<dim, spacedim> &,
301 const unsigned int)> &partitioner,
303 {
304 this->partitioner = partitioner;
305 this->settings = settings;
306 }
307
308
309
310 template <int dim, int spacedim>
311 void
315 {
316 this->partitioner_distributed = &partitioner;
317 this->settings = settings;
318 }
319
320
321
322 template <int dim, int spacedim>
323 void
325 {
326 // signal that repartitioning has started
327 this->signals.pre_distributed_repartition();
328
329 // create construction_data with the help of the partitioner
330 const auto construction_data = TriangulationDescription::Utilities::
332 *this,
333 this->partitioner_distributed->partition(*this),
334 this->settings);
335
336 // clear old content
337 this->clear();
338 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
339 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
340
341 // use construction_data to set up new triangulation
342 this->create_triangulation(construction_data);
343
344 // signal that repartitioning has completed
345 this->signals.post_distributed_repartition();
346 }
347
348
349
350 template <int dim, int spacedim>
351 void
353 {
354 Assert(false, ExcNotImplemented());
355 }
356
357
358
359 template <int dim, int spacedim>
360 bool
362 {
363 Assert(
364 currently_processing_prepare_coarsening_and_refinement_for_internal_usage,
365 ExcMessage("No coarsening and refinement is supported!"));
366
367 return ::Triangulation<dim, spacedim>::
368 prepare_coarsening_and_refinement();
369 }
370
371
372
373 template <int dim, int spacedim>
374 std::size_t
376 {
377 const std::size_t mem =
381 coarse_cell_id_to_coarse_cell_index_vector) +
383 coarse_cell_index_to_coarse_cell_id_vector);
384 return mem;
385 }
386
387
388
389 template <int dim, int spacedim>
390 bool
392 {
393 return (
394 settings &
396 }
397
398
399
400 template <int dim, int spacedim>
401 unsigned int
403 const types::coarse_cell_id coarse_cell_id) const
404 {
405 const auto coarse_cell_index = std::lower_bound(
406 coarse_cell_id_to_coarse_cell_index_vector.begin(),
407 coarse_cell_id_to_coarse_cell_index_vector.end(),
408 coarse_cell_id,
409 [](const std::pair<types::coarse_cell_id, unsigned int> &pair,
410 const types::coarse_cell_id &val) { return pair.first < val; });
411 Assert(coarse_cell_index !=
412 coarse_cell_id_to_coarse_cell_index_vector.cend(),
413 ExcMessage("Coarse cell index not found!"));
414 return coarse_cell_index->second;
415 }
416
417
418
419 template <int dim, int spacedim>
422 const unsigned int coarse_cell_index) const
423 {
424 AssertIndexRange(coarse_cell_index,
425 coarse_cell_index_to_coarse_cell_id_vector.size());
426
427 const auto coarse_cell_id =
428 coarse_cell_index_to_coarse_cell_id_vector[coarse_cell_index];
430 ExcMessage("You are trying to access a dummy cell!"));
431 return coarse_cell_id;
432 }
433
434
435 template <int dim, int spacedim>
436 void
438 {
439 // Reorganize memory for local_cell_relations.
440 this->local_cell_relations.clear();
441 this->local_cell_relations.reserve(this->n_locally_owned_active_cells());
442
443 for (const auto &cell : this->active_cell_iterators())
444 if (cell->is_locally_owned())
445 this->local_cell_relations.emplace_back(
447 }
448
449
450
451 template <int dim, int spacedim>
452 void
453 Triangulation<dim, spacedim>::save(const std::string &filename) const
454 {
455#ifdef DEAL_II_WITH_MPI
456 AssertThrow(this->cell_attached_data.pack_callbacks_variable.size() == 0,
458
459 Assert(
460 this->cell_attached_data.n_attached_deserialize == 0,
462 "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
463 Assert(this->n_cells() > 0,
464 ExcMessage("Can not save() an empty Triangulation."));
465
466 const int myrank =
467 Utilities::MPI::this_mpi_process(this->mpi_communicator);
468 const int mpisize =
469 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
470
471 // Compute global offset for each rank.
472 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
473
474 unsigned int global_first_cell = 0;
475
476 int ierr = MPI_Exscan(&n_locally_owned_cells,
477 &global_first_cell,
478 1,
479 MPI_UNSIGNED,
480 MPI_SUM,
481 this->mpi_communicator);
482 AssertThrowMPI(ierr);
483
484 global_first_cell *= sizeof(unsigned int);
485
486
487 if (myrank == 0)
488 {
489 std::string fname = std::string(filename) + ".info";
490 std::ofstream f(fname.c_str());
491 f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
492 << std::endl
493 << 4 << " "
494 << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
495 << this->cell_attached_data.pack_callbacks_fixed.size() << " "
496 << this->cell_attached_data.pack_callbacks_variable.size() << " "
497 << this->n_global_active_cells() << std::endl;
498 }
499
500 // Save cell attached data.
501 this->save_attached_data(global_first_cell,
502 this->n_global_active_cells(),
503 filename);
504
505 // Save triangulation description.
506 {
507 MPI_Info info;
508 int ierr = MPI_Info_create(&info);
509 AssertThrowMPI(ierr);
510
511 const std::string fname_tria = filename + "_triangulation.data";
512
513 // Open file.
514 MPI_File fh;
515 ierr = MPI_File_open(this->mpi_communicator,
516 fname_tria.c_str(),
517 MPI_MODE_CREATE | MPI_MODE_WRONLY,
518 info,
519 &fh);
520 AssertThrowMPI(ierr);
521
522 ierr = MPI_File_set_size(fh, 0); // delete the file contents
523 AssertThrowMPI(ierr);
524 // this barrier is necessary, because otherwise others might already
525 // write while one core is still setting the size to zero.
526 ierr = MPI_Barrier(this->mpi_communicator);
527 AssertThrowMPI(ierr);
528 ierr = MPI_Info_free(&info);
529 AssertThrowMPI(ierr);
530 // ------------------
531
532 // Create construction data.
533 const auto construction_data = TriangulationDescription::Utilities::
535 this->mpi_communicator,
536 this->settings);
537
538 // Pack.
539 std::vector<char> buffer;
540 ::Utilities::pack(construction_data, buffer, false);
541
542 // Write offsets to file.
543 const std::uint64_t buffer_size = buffer.size();
544
545 std::uint64_t offset = 0;
546
547 ierr = MPI_Exscan(
548 &buffer_size,
549 &offset,
550 1,
551 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
552 MPI_SUM,
553 this->mpi_communicator);
554 AssertThrowMPI(ierr);
555
556 // Write offsets to file.
557 ierr = MPI_File_write_at(
558 fh,
559 myrank * sizeof(std::uint64_t),
560 &buffer_size,
561 1,
562 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
563 MPI_STATUS_IGNORE);
564 AssertThrowMPI(ierr);
565
566 // global position in file
567 const std::uint64_t global_position =
568 mpisize * sizeof(std::uint64_t) + offset;
569
570 // Write buffers to file.
572 fh,
573 global_position,
574 buffer.data(),
575 buffer.size(), // local buffer
576 MPI_CHAR,
577 MPI_STATUS_IGNORE);
578 AssertThrowMPI(ierr);
579
580 ierr = MPI_File_close(&fh);
581 AssertThrowMPI(ierr);
582 }
583#else
584 (void)filename;
585
586 AssertThrow(false, ExcNeedsMPI());
587#endif
588 }
589
590
591
592 template <int dim, int spacedim>
593 void
594 Triangulation<dim, spacedim>::load(const std::string &filename)
595 {
596#ifdef DEAL_II_WITH_MPI
597 Assert(this->n_cells() == 0,
598 ExcMessage("load() only works if the Triangulation is empty!"));
599
600 // Compute global offset for each rank.
601 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
602
603 unsigned int global_first_cell = 0;
604
605 int ierr = MPI_Exscan(&n_locally_owned_cells,
606 &global_first_cell,
607 1,
608 MPI_UNSIGNED,
609 MPI_SUM,
610 this->mpi_communicator);
611 AssertThrowMPI(ierr);
612
613 global_first_cell *= sizeof(unsigned int);
614
615
616 unsigned int version, numcpus, attached_count_fixed,
617 attached_count_variable, n_global_active_cells;
618 {
619 std::string fname = std::string(filename) + ".info";
620 std::ifstream f(fname.c_str());
621 AssertThrow(f.fail() == false, ExcIO());
622 std::string firstline;
623 getline(f, firstline); // skip first line
624 f >> version >> numcpus >> attached_count_fixed >>
625 attached_count_variable >> n_global_active_cells;
626 }
627
628 AssertThrow(version == 4,
629 ExcMessage("Incompatible version found in .info file."));
630 Assert(this->n_global_active_cells() == n_global_active_cells,
631 ExcMessage("Number of global active cells differ!"));
632
633 // Load description and construct the triangulation.
634 {
635 const int myrank =
636 Utilities::MPI::this_mpi_process(this->mpi_communicator);
637 const int mpisize =
638 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
639
640 AssertDimension(numcpus, mpisize);
641
642 // Open file.
643 MPI_Info info;
644 int ierr = MPI_Info_create(&info);
645 AssertThrowMPI(ierr);
646
647 const std::string fname_tria = filename + "_triangulation.data";
648
649 MPI_File fh;
650 ierr = MPI_File_open(this->mpi_communicator,
651 fname_tria.c_str(),
652 MPI_MODE_RDONLY,
653 info,
654 &fh);
655 AssertThrowMPI(ierr);
656
657 ierr = MPI_Info_free(&info);
658 AssertThrowMPI(ierr);
659
660 // Read offsets from file.
661 std::uint64_t buffer_size;
662
663 ierr = MPI_File_read_at(
664 fh,
665 myrank * sizeof(std::uint64_t),
666 &buffer_size,
667 1,
668 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
669 MPI_STATUS_IGNORE);
670 AssertThrowMPI(ierr);
671
672 std::uint64_t offset = 0;
673
674 ierr = MPI_Exscan(
675 &buffer_size,
676 &offset,
677 1,
678 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
679 MPI_SUM,
680 this->mpi_communicator);
681 AssertThrowMPI(ierr);
682
683 // global position in file
684 const std::uint64_t global_position =
685 mpisize * sizeof(std::uint64_t) + offset;
686
687 // Read buffers from file.
688 std::vector<char> buffer(buffer_size);
690 fh,
691 global_position,
692 buffer.data(),
693 buffer.size(), // local buffer
694 MPI_CHAR,
695 MPI_STATUS_IGNORE);
696 AssertThrowMPI(ierr);
697
698 ierr = MPI_File_close(&fh);
699 AssertThrowMPI(ierr);
700
701 auto construction_data = ::Utilities::template unpack<
703
704 // WARNING: serialization cannot handle the MPI communicator
705 // which is the reason why we have to set it here explicitly
706 construction_data.comm = this->mpi_communicator;
707
708 this->create_triangulation(construction_data);
709 }
710
711 // clear all of the callback data, as explained in the documentation of
712 // register_data_attach()
713 this->cell_attached_data.n_attached_data_sets = 0;
714 this->cell_attached_data.n_attached_deserialize =
715 attached_count_fixed + attached_count_variable;
716
717 // Load attached cell data, if any was stored.
718 this->load_attached_data(global_first_cell,
719 this->n_global_active_cells(),
720 this->n_locally_owned_active_cells(),
721 filename,
722 attached_count_fixed,
723 attached_count_variable);
724
725 this->update_cell_relations();
726 this->update_periodic_face_map();
727 this->update_number_cache();
728#else
729 (void)filename;
730
731 AssertThrow(false, ExcNeedsMPI());
732#endif
733 }
734
735
736
737 template <int dim, int spacedim>
738 void
739 Triangulation<dim, spacedim>::load(const std::string &filename,
740 const bool autopartition)
741 {
742 (void)autopartition;
743 load(filename);
744 }
745
746
747
748 template <int dim, int spacedim>
749 void
751 {
753
754 // additionally update the number of global coarse cells
755 types::coarse_cell_id number_of_global_coarse_cells = 0;
756
757 for (const auto &cell : this->active_cell_iterators())
758 if (!cell->is_artificial())
759 number_of_global_coarse_cells =
760 std::max(number_of_global_coarse_cells,
761 cell->id().get_coarse_cell_id());
762
763 number_of_global_coarse_cells =
764 Utilities::MPI::max(number_of_global_coarse_cells,
765 this->mpi_communicator) +
766 1;
767
768 this->number_cache.number_of_global_coarse_cells =
769 number_of_global_coarse_cells;
770 }
771
772
773 } // namespace fullydistributed
774} // namespace parallel
775
776
777
778/*-------------- Explicit Instantiations -------------------------------*/
779#include "fully_distributed_tria.inst"
780
781
Definition: point.h:111
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const override
Definition: tria_base.cc:93
virtual void update_number_cache()
Definition: tria_base.cc:152
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
void copy_triangulation(const ::Triangulation< dim, spacedim > &other_tria) override
void set_partitioner(const std::function< void(::Triangulation< dim, spacedim > &, const unsigned int)> &partitioner, const TriangulationDescription::Settings &settings)
Triangulation(const MPI_Comm &mpi_communicator)
virtual unsigned int coarse_cell_id_to_coarse_cell_index(const types::coarse_cell_id coarse_cell_id) const override
virtual void save(const std::string &filename) const override
virtual bool is_multilevel_hierarchy_constructed() const override
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
virtual void load(const std::string &filename) override
virtual std::size_t memory_consumption() const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
Point< 3 > vertices[4]
bool colorize
Definition: grid_out.cc:4605
unsigned int level
Definition: grid_out.cc:4606
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
Definition: grid_tools.cc:4072
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:4178
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm &comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1552
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:140
T max(const T &t, const MPI_Comm &mpi_communicator)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1483
const types::coarse_cell_id invalid_coarse_cell_id
Definition: types.h:225
const types::subdomain_id artificial_subdomain_id
Definition: types.h:298
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
std::vector< std::vector< CellData< dim > > > cell_infos
Triangulation< dim, spacedim >::MeshSmoothing smoothing
std::vector< Point< spacedim > > coarse_cell_vertices
std::vector< types::coarse_cell_id > coarse_cell_index_to_coarse_cell_id
const TriangulationDescription::Settings settings
std::vector< std::vector< CellData< dim > > > cell_infos
const ::Triangulation< dim, spacedim > & tria