Reference documentation for deal.II version Git b0c9756717 2021-05-17 18:12:11 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
fully_distributed_tria.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2019 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/mpi.h>
19 
21 
23 
24 #include <fstream>
25 #include <memory>
26 
28 
29 // Forward declarations
30 namespace GridGenerator
31 {
32  template <int dim, int spacedim>
33  void
35  const double left,
36  const double right,
37  const bool colorize);
38 } // namespace GridGenerator
39 
40 namespace parallel
41 {
42  namespace fullydistributed
43  {
44  template <int dim, int spacedim>
46  const MPI_Comm &mpi_communicator)
47  : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
49  , partitioner([](::Triangulation<dim, spacedim> &tria,
50  const unsigned int n_partitions) {
52  })
55  false)
56  {}
57 
58 
59 
60  template <int dim, int spacedim>
61  void
64  &construction_data)
65  {
66  // check if the communicator of this parallel triangulation has been used
67  // to construct the TriangulationDescription::Description
68  Assert(construction_data.comm == this->mpi_communicator,
69  ExcMessage("MPI communicators do not match!"));
70 
71  // store internally the settings
72  settings = construction_data.settings;
73 
74  // set the smoothing properties
75  if (settings &
77  this->set_mesh_smoothing(
78  static_cast<
79  typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
82  else
83  this->set_mesh_smoothing(
84  static_cast<
85  typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
87 
88  this->set_mesh_smoothing(construction_data.smoothing);
89 
90  // clear internal data structures
93 
94  // check if no locally relevant coarse-grid cells have been provided
95  if (construction_data.coarse_cell_vertices.empty())
96  {
97  // 1) create a dummy hypercube
99  GridGenerator::hyper_cube(*this, 0, 1, false);
101 
102  // 2) mark cell as artificial
103  auto cell = this->begin();
104  cell->set_subdomain_id(::numbers::artificial_subdomain_id);
105  cell->set_level_subdomain_id(
107 
108  // 3) set up dummy mapping between locally relevant coarse-grid cells
109  // and global cells
114  }
115  else
116  {
117  // 1) store `coarse-cell index to coarse-cell id`-mapping
119  construction_data.coarse_cell_index_to_coarse_cell_id;
120 
121  // 2) set up `coarse-cell id to coarse-cell index`-mapping
122  std::map<types::coarse_cell_id, unsigned int>
124  for (unsigned int i = 0;
125  i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
126  ++i)
127  coarse_cell_id_to_coarse_cell_index_vector
128  [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
129 
130  for (auto i : coarse_cell_id_to_coarse_cell_index_vector)
131  this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
132 
133  // create locally-relevant
135  true;
138  construction_data);
140  false;
142 
143  // create a copy of cell_infos such that we can sort them
144  auto cell_infos = construction_data.cell_infos;
145 
146  // sort cell_infos on each level separately (as done in
147  // ::Triangulation::create_triangulation())
148  for (auto &cell_info : cell_infos)
149  std::sort(cell_info.begin(),
150  cell_info.end(),
153  const CellId a_id(a.id);
154  const CellId b_id(b.id);
155 
156  const auto a_coarse_cell_index =
158  a_id.get_coarse_cell_id());
159  const auto b_coarse_cell_index =
161  b_id.get_coarse_cell_id());
162 
163  // according to their coarse-cell index and if that is
164  // same according to their cell id (the result is that
165  // cells on each level are sorted according to their
166  // index on that level - what we need in the following
167  // operations)
168  if (a_coarse_cell_index != b_coarse_cell_index)
169  return a_coarse_cell_index < b_coarse_cell_index;
170  else
171  return a_id < b_id;
172  });
173 
174  // 4a) set all cells artificial (and set the actual
175  // (level_)subdomain_ids in the next step)
176  for (auto cell = this->begin(); cell != this->end(); ++cell)
177  {
178  if (cell->is_active())
179  cell->set_subdomain_id(
181 
182  cell->set_level_subdomain_id(
184  }
185 
186  // 4b) set actual (level_)subdomain_ids
187  for (unsigned int level = 0;
188  level < cell_infos.size() && !cell_infos[level].empty();
189  ++level)
190  {
191  auto cell = this->begin(level);
192  auto cell_info = cell_infos[level].begin();
193  for (; cell_info != cell_infos[level].end(); ++cell_info)
194  {
195  // find cell that has the correct cell
196  while (cell_info->id != cell->id().template to_binary<dim>())
197  ++cell;
198 
199  // subdomain id
200  if (cell->is_active())
201  cell->set_subdomain_id(cell_info->subdomain_id);
202 
203  // level subdomain id
206  cell->set_level_subdomain_id(cell_info->level_subdomain_id);
207  }
208  }
209  }
210 
211  this->update_number_cache();
212  this->update_cell_relations();
213  }
214 
215 
216 
217  template <int dim, int spacedim>
218  void
220  const std::vector<Point<spacedim>> & vertices,
221  const std::vector<::CellData<dim>> &cells,
222  const SubCellData & subcelldata)
223  {
224  Assert(
226  ExcMessage(
227  "You have called the method parallel::fullydistributed::Triangulation::create_triangulation() \n"
228  "that takes 3 arguments. If you have not called this function directly, \n"
229  "it might have been called via a function from the GridGenerator or GridIn \n"
230  "namespace. To be able to setup a fully-distributed Triangulation with these \n"
231  "utility functions nevertheless, please follow the following three steps:\n"
232  " 1) call the utility function for a (serial) Triangulation, \n"
233  " a parallel::shared::Triangulation, or a parallel::distributed::Triangulation object,\n"
234  " 2) use the functions TriangulationDescription::Utilities::create_description_from_triangulation() \n"
235  " or ::create_description_from_triangulation_in_groups() to create the \n"
236  " description of the local partition, and\n"
237  " 3) pass the created description to parallel::fullydistributed::Triangulation::create_triangulation()."));
238 
240  cells,
241  subcelldata);
242  }
243 
244 
245 
246  template <int dim, int spacedim>
247  void
249  const ::Triangulation<dim, spacedim> &other_tria)
250  {
251  // pointer to the triangulation for which the construction data
252  // should be created (normally it is the input triangulation but
253  // in the case of a serial triangulation we create a copy which should
254  // be used)
255  const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
256 
257  // temporary serial triangulation (since the input triangulation is const
258  // and we might modify its subdomain_ids
259  // and level_subdomain_ids during partitioning); this pointer only points
260  // to anything if the source triangulation is serial, and ensures that our
261  // copy is eventually deleted
262  std::unique_ptr<::Triangulation<dim, spacedim>> serial_tria;
263 
264  // check if other triangulation is not a parallel one, which needs to be
265  // partitioned
266  if (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
267  *>(&other_tria) == nullptr)
268  {
269  serial_tria =
270  std::make_unique<::Triangulation<dim, spacedim>>();
271 
272  // actually copy the serial triangulation
273  serial_tria->copy_triangulation(other_tria);
274 
275  // partition triangulation
276  this->partitioner(*serial_tria,
278  this->mpi_communicator));
279 
280  // partition multigrid levels
283 
284  // use the new serial triangulation to create the construction data
285  other_tria_ptr = serial_tria.get();
286  }
287 
288  // create construction data
289  const auto construction_data = TriangulationDescription::Utilities::
291  this->mpi_communicator,
292  this->settings);
293 
294  // finally create triangulation
295  this->create_triangulation(construction_data);
296  }
297 
298 
299 
300  template <int dim, int spacedim>
301  void
303  const std::function<void(::Triangulation<dim, spacedim> &,
304  const unsigned int)> &partitioner,
306  {
307  this->partitioner = partitioner;
308  this->settings = settings;
309  }
310 
311 
312 
313  template <int dim, int spacedim>
314  void
316  {
317  Assert(false, ExcNotImplemented());
318  }
319 
320 
321 
322  template <int dim, int spacedim>
323  bool
325  {
326  Assert(
328  ExcMessage("No coarsening and refinement is supported!"));
329 
330  return ::Triangulation<dim, spacedim>::
331  prepare_coarsening_and_refinement();
332  }
333 
334 
335 
336  template <int dim, int spacedim>
337  bool
339  {
340  Assert(false, ExcNotImplemented());
341  return false;
342  }
343 
344 
345 
346  template <int dim, int spacedim>
347  std::size_t
349  {
350  const std::size_t mem =
357  return mem;
358  }
359 
360 
361 
362  template <int dim, int spacedim>
363  bool
365  {
366  return (
367  settings &
369  }
370 
371 
372 
373  template <int dim, int spacedim>
374  unsigned int
377  {
378  const auto coarse_cell_index = std::lower_bound(
382  [](const std::pair<types::coarse_cell_id, unsigned int> &pair,
383  const types::coarse_cell_id &val) { return pair.first < val; });
384  Assert(coarse_cell_index !=
386  ExcMessage("Coarse cell index not found!"));
387  return coarse_cell_index->second;
388  }
389 
390 
391 
392  template <int dim, int spacedim>
395  const unsigned int coarse_cell_index) const
396  {
397  AssertIndexRange(coarse_cell_index,
399 
400  const auto coarse_cell_id =
403  ExcMessage("You are trying to access a dummy cell!"));
404  return coarse_cell_id;
405  }
406 
407 
408  template <int dim, int spacedim>
409  void
411  {
412  // Reorganize memory for local_cell_relations.
413  this->local_cell_relations.clear();
414  this->local_cell_relations.reserve(this->n_locally_owned_active_cells());
415 
416  for (const auto &cell : this->active_cell_iterators())
417  if (cell->is_locally_owned())
418  this->local_cell_relations.emplace_back(
420  }
421 
422 
423 
424  template <int dim, int spacedim>
425  void
426  Triangulation<dim, spacedim>::save(const std::string &filename) const
427  {
428 #ifdef DEAL_II_WITH_MPI
431 
432  Assert(
434  ExcMessage(
435  "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
436  Assert(this->n_cells() > 0,
437  ExcMessage("Can not save() an empty Triangulation."));
438 
439  const int myrank =
441  const int mpisize =
443 
444  // Compute global offset for each rank.
445  unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
446 
447  unsigned int global_first_cell = 0;
448 
449  int ierr = MPI_Exscan(&n_locally_owned_cells,
450  &global_first_cell,
451  1,
452  MPI_UNSIGNED,
453  MPI_SUM,
454  this->mpi_communicator);
455  AssertThrowMPI(ierr);
456 
457  global_first_cell *= sizeof(unsigned int);
458 
459 
460  if (myrank == 0)
461  {
462  std::string fname = std::string(filename) + ".info";
463  std::ofstream f(fname.c_str());
464  f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
465  << std::endl
466  << 4 << " "
468  << this->cell_attached_data.pack_callbacks_fixed.size() << " "
469  << this->cell_attached_data.pack_callbacks_variable.size() << " "
470  << this->n_global_active_cells() << std::endl;
471  }
472 
473  // Save cell attached data.
474  this->save_attached_data(global_first_cell,
475  this->n_global_active_cells(),
476  filename);
477 
478  // Save triangulation description.
479  {
480  MPI_Info info;
481  int ierr = MPI_Info_create(&info);
482  AssertThrowMPI(ierr);
483 
484  const std::string fname_tria = filename + "_triangulation.data";
485 
486  // Open file.
487  MPI_File fh;
488  ierr = MPI_File_open(this->mpi_communicator,
489  DEAL_II_MPI_CONST_CAST(fname_tria.c_str()),
490  MPI_MODE_CREATE | MPI_MODE_WRONLY,
491  info,
492  &fh);
493  AssertThrowMPI(ierr);
494 
495  ierr = MPI_File_set_size(fh, 0); // delete the file contents
496  AssertThrowMPI(ierr);
497  // this barrier is necessary, because otherwise others might already
498  // write while one core is still setting the size to zero.
499  ierr = MPI_Barrier(this->mpi_communicator);
500  AssertThrowMPI(ierr);
501  ierr = MPI_Info_free(&info);
502  AssertThrowMPI(ierr);
503  // ------------------
504 
505  // Create construction data.
506  const auto construction_data = TriangulationDescription::Utilities::
508  this->mpi_communicator,
509  this->settings);
510 
511  // Pack.
512  std::vector<char> buffer;
513  ::Utilities::pack(construction_data, buffer, false);
514 
515  // Write offsets to file.
516  unsigned int buffer_size = buffer.size();
517 
518  unsigned int offset = 0;
519 
520  ierr = MPI_Exscan(&buffer_size,
521  &offset,
522  1,
523  MPI_UNSIGNED,
524  MPI_SUM,
525  this->mpi_communicator);
526  AssertThrowMPI(ierr);
527 
528  // Write offsets to file.
529  ierr = MPI_File_write_at(fh,
530  myrank * sizeof(unsigned int),
531  DEAL_II_MPI_CONST_CAST(&buffer_size),
532  1,
533  MPI_UNSIGNED,
534  MPI_STATUS_IGNORE);
535  AssertThrowMPI(ierr);
536 
537  // Write buffers to file.
538  ierr = MPI_File_write_at(fh,
539  mpisize * sizeof(unsigned int) +
540  offset, // global position in file
541  DEAL_II_MPI_CONST_CAST(buffer.data()),
542  buffer.size(), // local buffer
543  MPI_CHAR,
544  MPI_STATUS_IGNORE);
545  AssertThrowMPI(ierr);
546 
547  ierr = MPI_File_close(&fh);
548  AssertThrowMPI(ierr);
549  }
550 #else
551  (void)filename;
552 
553  AssertThrow(false, ExcNeedsMPI());
554 #endif
555  }
556 
557 
558 
559  template <int dim, int spacedim>
560  void
561  Triangulation<dim, spacedim>::load(const std::string &filename,
562  const bool autopartition)
563  {
564 #ifdef DEAL_II_WITH_MPI
565  AssertThrow(
566  autopartition == false,
567  ExcMessage(
568  "load() only works if run with the same number of MPI processes used for saving the triangulation, hence autopartition is disabled."));
569 
570  Assert(this->n_cells() == 0,
571  ExcMessage("load() only works if the Triangulation is empty!"));
572 
573  // Compute global offset for each rank.
574  unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
575 
576  unsigned int global_first_cell = 0;
577 
578  int ierr = MPI_Exscan(&n_locally_owned_cells,
579  &global_first_cell,
580  1,
581  MPI_UNSIGNED,
582  MPI_SUM,
583  this->mpi_communicator);
584  AssertThrowMPI(ierr);
585 
586  global_first_cell *= sizeof(unsigned int);
587 
588 
589  unsigned int version, numcpus, attached_count_fixed,
590  attached_count_variable, n_global_active_cells;
591  {
592  std::string fname = std::string(filename) + ".info";
593  std::ifstream f(fname.c_str());
594  AssertThrow(f, ExcIO());
595  std::string firstline;
596  getline(f, firstline); // skip first line
597  f >> version >> numcpus >> attached_count_fixed >>
598  attached_count_variable >> n_global_active_cells;
599  }
600 
601  AssertThrow(version == 4,
602  ExcMessage("Incompatible version found in .info file."));
603  Assert(this->n_global_active_cells() == n_global_active_cells,
604  ExcMessage("Number of global active cells differ!"));
605 
606  // Load description and construct the triangulation.
607  {
608  const int myrank =
610  const int mpisize =
612 
613  AssertDimension(numcpus, mpisize);
614 
615  // Open file.
616  MPI_Info info;
617  int ierr = MPI_Info_create(&info);
618  AssertThrowMPI(ierr);
619 
620  const std::string fname_tria = filename + "_triangulation.data";
621 
622  MPI_File fh;
623  ierr = MPI_File_open(this->mpi_communicator,
624  DEAL_II_MPI_CONST_CAST(fname_tria.c_str()),
625  MPI_MODE_RDONLY,
626  info,
627  &fh);
628  AssertThrowMPI(ierr);
629 
630  ierr = MPI_Info_free(&info);
631  AssertThrowMPI(ierr);
632 
633  // Read offsets from file.
634  unsigned int buffer_size;
635 
636  ierr = MPI_File_read_at(fh,
637  myrank * sizeof(unsigned int),
638  DEAL_II_MPI_CONST_CAST(&buffer_size),
639  1,
640  MPI_UNSIGNED,
641  MPI_STATUS_IGNORE);
642  AssertThrowMPI(ierr);
643 
644  unsigned int offset = 0;
645 
646  ierr = MPI_Exscan(&buffer_size,
647  &offset,
648  1,
649  MPI_UNSIGNED,
650  MPI_SUM,
651  this->mpi_communicator);
652  AssertThrowMPI(ierr);
653 
654  // Read buffers from file.
655  std::vector<char> buffer(buffer_size);
656  ierr = MPI_File_read_at(fh,
657  mpisize * sizeof(unsigned int) +
658  offset, // global position in file
659  DEAL_II_MPI_CONST_CAST(buffer.data()),
660  buffer.size(), // local buffer
661  MPI_CHAR,
662  MPI_STATUS_IGNORE);
663  AssertThrowMPI(ierr);
664 
665  ierr = MPI_File_close(&fh);
666  AssertThrowMPI(ierr);
667 
668  auto construction_data = ::Utilities::template unpack<
670 
671  // WARNING: serialization cannot handle the MPI communicator
672  // which is the reason why we have to set it here explicitly
673  construction_data.comm = this->mpi_communicator;
674 
675  this->create_triangulation(construction_data);
676  }
677 
678  // clear all of the callback data, as explained in the documentation of
679  // register_data_attach()
682  attached_count_fixed + attached_count_variable;
683 
684  // Load attached cell data, if any was stored.
685  this->load_attached_data(global_first_cell,
686  this->n_global_active_cells(),
688  filename,
689  attached_count_fixed,
690  attached_count_variable);
691 
692  this->update_cell_relations();
693  this->update_periodic_face_map();
694  this->update_number_cache();
695 #else
696  (void)filename;
697  (void)autopartition;
698 
699  AssertThrow(false, ExcNeedsMPI());
700 #endif
701  }
702 
703 
704  } // namespace fullydistributed
705 } // namespace parallel
706 
707 
708 
709 /*-------------- Explicit Instantiations -------------------------------*/
710 #include "fully_distributed_tria.inst"
711 
712 
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:1111
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:133
static ::ExceptionBase & ExcNeedsMPI()
const types::coarse_cell_id invalid_coarse_cell_id
Definition: types.h:220
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1622
types::coarse_cell_id get_coarse_cell_id() const
Definition: cell_id.h:402
void set_partitioner(const std::function< void(::Triangulation< dim, spacedim > &, const unsigned int)> &partitioner, const TriangulationDescription::Settings &settings)
virtual void save(const std::string &filename) const override
unsigned int n_cells() const
Definition: tria.cc:12630
static ::ExceptionBase & ExcIO()
std::vector< Point< spacedim > > vertices
Definition: tria.h:3992
IteratorRange< active_cell_iterator > active_cell_iterators() const
Definition: tria.cc:12148
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
std::vector< types::coarse_cell_id > coarse_cell_index_to_coarse_cell_id
virtual bool is_multilevel_hierarchy_constructed() const override
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
std::function< void(::Triangulation< dim, spacedim > &, const unsigned int)> partitioner
cell_iterator begin(const unsigned int level=0) const
Definition: tria.cc:11931
std::vector< types::coarse_cell_id > coarse_cell_index_to_coarse_cell_id_vector
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:4094
Triangulation< dim, spacedim >::MeshSmoothing smoothing
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm &comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
cell_iterator end() const
Definition: tria.cc:12042
std::vector< pack_callback_t > pack_callbacks_variable
Definition: tria_base.h:745
void copy_triangulation(const ::Triangulation< dim, spacedim > &other_tria) override
virtual std::size_t memory_consumption() const override
Definition: tria_base.cc:90
static ::ExceptionBase & ExcMessage(std::string arg1)
Triangulation(const MeshSmoothing smooth_grid=none, const bool check_for_distorted_cells=false)
Definition: tria.cc:9998
virtual std::size_t memory_consumption() const override
virtual void create_triangulation(const std::vector< Point< spacedim >> &vertices, const std::vector< CellData< dim >> &cells, const SubCellData &subcelldata)
Definition: tria.cc:10440
#define Assert(cond, exc)
Definition: exceptions.h:1465
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:396
unsigned int level
Definition: grid_out.cc:4590
virtual void set_mesh_smoothing(const MeshSmoothing mesh_smoothing)
Definition: tria.cc:10127
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1218
std::vector< std::pair< types::coarse_cell_id, unsigned int > > coarse_cell_id_to_coarse_cell_index_vector
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
const MPI_Comm mpi_communicator
Definition: tria_base.h:310
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
Definition: cell_id.h:70
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
#define DEAL_II_MPI_CONST_CAST(expr)
Definition: mpi.h:82
const types::subdomain_id artificial_subdomain_id
Definition: types.h:293
virtual void update_number_cache()
Definition: tria_base.cc:148
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:119
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
std::vector< std::vector< CellData< dim > > > cell_infos
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:395
void update_periodic_face_map()
Definition: tria.cc:13417
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void save_attached_data(const unsigned int global_first_cell, const unsigned int global_num_cells, const std::string &filename) const
Definition: tria_base.cc:692
T unpack(const std::vector< char > &buffer, const bool allow_compression=true)
Definition: utilities.h:1321
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
static ::ExceptionBase & ExcNotImplemented()
virtual unsigned int coarse_cell_id_to_coarse_cell_index(const types::coarse_cell_id coarse_cell_id) const override
bool colorize
Definition: grid_out.cc:4589
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
Definition: grid_tools.cc:3989
global_cell_index coarse_cell_id
Definition: types.h:114
TriangulationDescription::Settings settings
std::vector< Point< spacedim > > coarse_cell_vertices
virtual void load(const std::string &filename, const bool autopartition=false) override
std::vector< cell_relation_t > local_cell_relations
Definition: tria_base.h:715
std::vector< pack_callback_t > pack_callbacks_fixed
Definition: tria_base.h:744
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
void load_attached_data(const unsigned int global_first_cell, const unsigned int global_num_cells, const unsigned int local_num_cells, const std::string &filename, const unsigned int n_attached_deserialize_fixed, const unsigned int n_attached_deserialize_variable)
Definition: tria_base.cc:728