Reference documentation for deal.II version GIT 58febcd5cf 2023-09-30 20:00:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
tria_base.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
19 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/utilities.h>
22 
25 
28 #include <deal.II/grid/tria.h>
31 
33 
34 #include <algorithm>
35 #include <cstdint>
36 #include <fstream>
37 #include <iostream>
38 #include <limits>
39 #include <numeric>
40 
41 
43 
44 namespace parallel
45 {
46  template <int dim, int spacedim>
47  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
49  const MPI_Comm mpi_communicator,
50  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
51  smooth_grid,
52  const bool check_for_distorted_cells)
53  : ::Triangulation<dim, spacedim>(smooth_grid,
54  check_for_distorted_cells)
55  , mpi_communicator(mpi_communicator)
56  , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
57  , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
58  {
59 #ifndef DEAL_II_WITH_MPI
60  Assert(false, ExcNeedsMPI());
61 #endif
62  }
63 
64 
65 
66  template <int dim, int spacedim>
67  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
68  void TriangulationBase<dim, spacedim>::copy_triangulation(
69  const ::Triangulation<dim, spacedim> &other_tria)
70  {
71 #ifndef DEAL_II_WITH_MPI
72  (void)other_tria;
73  Assert(false, ExcNeedsMPI());
74 #else
76 
77  if (const ::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
78  dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
79  *>(&other_tria))
80  {
81  // release unused vector memory because we will have very different
82  // vectors now
85  }
86 #endif
87  }
88 
89 
90 
91  template <int dim, int spacedim>
92  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
93  std::size_t TriangulationBase<dim, spacedim>::memory_consumption() const
94  {
95  std::size_t mem =
97  MemoryConsumption::memory_consumption(this->mpi_communicator) +
100  number_cache.n_global_active_cells) +
101  MemoryConsumption::memory_consumption(number_cache.n_global_levels);
102  return mem;
103  }
104 
105 
106 
107  template <int dim, int spacedim>
108  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
110  {
111  // release unused vector memory because the vector layout is going to look
112  // very different now
115  }
116 
117 
118 
119  template <int dim, int spacedim>
120  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
122  : n_locally_owned_active_cells(0)
123  , number_of_global_coarse_cells(0)
124  , n_global_levels(0)
125  {}
126 
127 
128 
129  template <int dim, int spacedim>
130  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
131  unsigned int TriangulationBase<dim, spacedim>::n_locally_owned_active_cells()
132  const
133  {
134  return number_cache.n_locally_owned_active_cells;
135  }
136 
137 
138 
139  template <int dim, int spacedim>
140  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
141  unsigned int TriangulationBase<dim, spacedim>::n_global_levels() const
142  {
143  return number_cache.n_global_levels;
144  }
145 
146 
147 
148  template <int dim, int spacedim>
149  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
152  {
153  return number_cache.n_global_active_cells;
154  }
155 
156 
157 
158  template <int dim, int spacedim>
159  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
160  MPI_Comm TriangulationBase<dim, spacedim>::get_communicator() const
161  {
162  return mpi_communicator;
163  }
164 
165 
166 
167 #ifdef DEAL_II_WITH_MPI
168  template <int dim, int spacedim>
169  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
170  void TriangulationBase<dim, spacedim>::update_number_cache()
171  {
172  number_cache.ghost_owners.clear();
173  number_cache.level_ghost_owners.clear();
174  number_cache.n_locally_owned_active_cells = 0;
175 
176  if (this->n_levels() == 0)
177  {
178  // Skip communication done below if we do not have any cells
179  // (meaning the Triangulation is empty on all processors). This will
180  // happen when called from the destructor of Triangulation, which
181  // can get called during exception handling causing a hang in this
182  // function.
183  number_cache.n_global_active_cells = 0;
184  number_cache.n_global_levels = 0;
185  return;
186  }
187 
188 
189  {
190  // find ghost owners
191  for (const auto &cell : this->active_cell_iterators())
192  if (cell->is_ghost())
193  number_cache.ghost_owners.insert(cell->subdomain_id());
194 
195  Assert(number_cache.ghost_owners.size() <
196  Utilities::MPI::n_mpi_processes(this->mpi_communicator),
197  ExcInternalError());
198  }
199 
200  if (this->n_levels() > 0)
201  number_cache.n_locally_owned_active_cells = std::count_if(
202  this->begin_active(),
204  this->end()),
205  [](const auto &i) { return i.is_locally_owned(); });
206  else
207  number_cache.n_locally_owned_active_cells = 0;
208 
209  // Potentially cast to a 64 bit type before accumulating to avoid
210  // overflow:
211  number_cache.n_global_active_cells =
213  number_cache.n_locally_owned_active_cells),
214  this->mpi_communicator);
215 
216  number_cache.n_global_levels =
217  Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
218 
219  // Store MPI ranks of level ghost owners of this processor on all
220  // levels.
221  if (this->is_multilevel_hierarchy_constructed() == true)
222  {
223  number_cache.level_ghost_owners.clear();
224 
225  // if there is nothing to do, then do nothing
226  if (this->n_levels() == 0)
227  return;
228 
229  // find level ghost owners
230  for (const auto &cell : this->cell_iterators())
231  if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
232  cell->level_subdomain_id() != this->locally_owned_subdomain())
233  this->number_cache.level_ghost_owners.insert(
234  cell->level_subdomain_id());
235 
236 # ifdef DEBUG
237  // Check that level_ghost_owners is symmetric by sending a message
238  // to everyone
239  {
240  int ierr = MPI_Barrier(this->mpi_communicator);
241  AssertThrowMPI(ierr);
242 
243  const int mpi_tag = Utilities::MPI::internal::Tags::
245 
246  // important: preallocate to avoid (re)allocation:
247  std::vector<MPI_Request> requests(
248  this->number_cache.level_ghost_owners.size());
249  unsigned int dummy = 0;
250  unsigned int req_counter = 0;
251 
252  for (const auto &it : this->number_cache.level_ghost_owners)
253  {
254  ierr = MPI_Isend(&dummy,
255  1,
256  MPI_UNSIGNED,
257  it,
258  mpi_tag,
259  this->mpi_communicator,
260  &requests[req_counter]);
261  AssertThrowMPI(ierr);
262  ++req_counter;
263  }
264 
265  for (const auto &it : this->number_cache.level_ghost_owners)
266  {
267  unsigned int dummy;
268  ierr = MPI_Recv(&dummy,
269  1,
270  MPI_UNSIGNED,
271  it,
272  mpi_tag,
273  this->mpi_communicator,
274  MPI_STATUS_IGNORE);
275  AssertThrowMPI(ierr);
276  }
277 
278  if (requests.size() > 0)
279  {
280  ierr = MPI_Waitall(requests.size(),
281  requests.data(),
282  MPI_STATUSES_IGNORE);
283  AssertThrowMPI(ierr);
284  }
285 
286  ierr = MPI_Barrier(this->mpi_communicator);
287  AssertThrowMPI(ierr);
288  }
289 # endif
290 
291  Assert(this->number_cache.level_ghost_owners.size() <
292  Utilities::MPI::n_mpi_processes(this->mpi_communicator),
293  ExcInternalError());
294  }
295 
296  this->number_cache.number_of_global_coarse_cells = this->n_cells(0);
297 
298  // reset global cell ids
299  this->reset_global_cell_indices();
300  }
301 
302 #else
303 
304  template <int dim, int spacedim>
305  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
307  {
308  Assert(false, ExcNeedsMPI());
309  }
310 
311 #endif
312 
313  template <int dim, int spacedim>
314  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
315  void TriangulationBase<dim, spacedim>::update_reference_cells()
316  {
317  // run algorithm for locally-owned cells
319 
320  // translate ReferenceCell to unsigned int (needed by
321  // Utilities::MPI::compute_set_union)
322  std::vector<unsigned int> reference_cells_ui;
323 
324  reference_cells_ui.reserve(this->reference_cells.size());
325  for (const auto &i : this->reference_cells)
326  reference_cells_ui.push_back(static_cast<unsigned int>(i));
327 
328  // create union
329  reference_cells_ui =
330  Utilities::MPI::compute_set_union(reference_cells_ui,
331  this->mpi_communicator);
332 
333  // transform back and store result
334  this->reference_cells.clear();
335  for (const auto &i : reference_cells_ui)
336  this->reference_cells.emplace_back(
338  }
339 
340 
341 
342  template <int dim, int spacedim>
343  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
346  {
347  return my_subdomain;
348  }
349 
350 
351 
352  template <int dim, int spacedim>
353  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
354  const std::set<types::subdomain_id>
356  {
357  return number_cache.ghost_owners;
358  }
359 
360 
361 
362  template <int dim, int spacedim>
363  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
364  const std::set<types::subdomain_id>
366  {
367  return number_cache.level_ghost_owners;
368  }
369 
370 
371 
372  template <int dim, int spacedim>
373  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
374  std::vector<types::boundary_id> TriangulationBase<dim, spacedim>::
375  get_boundary_ids() const
376  {
379  this->mpi_communicator);
380  }
381 
382 
383 
384  template <int dim, int spacedim>
385  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
386  std::vector<types::manifold_id> TriangulationBase<dim, spacedim>::
387  get_manifold_ids() const
388  {
391  this->mpi_communicator);
392  }
393 
394 
395 
396  template <int dim, int spacedim>
397  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
398  void TriangulationBase<dim, spacedim>::reset_global_cell_indices()
399  {
400 #ifndef DEAL_II_WITH_MPI
401  Assert(false, ExcNeedsMPI());
402 #else
403  if (const auto pst =
405  this))
406  if (pst->with_artificial_cells() == false)
407  {
408  // Specialization for parallel::shared::Triangulation without
409  // artificial cells. The code below only works if a halo of a single
410  // ghost cells is needed.
411 
412  std::vector<unsigned int> cell_counter(n_subdomains + 1);
413 
414  // count number of cells of each process
415  for (const auto &cell : this->active_cell_iterators())
416  cell_counter[cell->subdomain_id() + 1]++;
417 
418  // take prefix sum to obtain offset of each process
419  for (unsigned int i = 0; i < n_subdomains; ++i)
420  cell_counter[i + 1] += cell_counter[i];
421 
422  AssertDimension(cell_counter.back(), this->n_active_cells());
423 
424  // create partitioners
425  IndexSet is_local(this->n_active_cells());
426  is_local.add_range(cell_counter[my_subdomain],
427  cell_counter[my_subdomain + 1]);
428  number_cache.active_cell_index_partitioner =
429  std::make_shared<const Utilities::MPI::Partitioner>(
430  is_local,
432  this->mpi_communicator);
433 
434  // set global active cell indices and increment process-local counters
435  for (const auto &cell : this->active_cell_iterators())
436  cell->set_global_active_cell_index(
437  cell_counter[cell->subdomain_id()]++);
438 
439  Assert(this->is_multilevel_hierarchy_constructed() == false,
441 
442  return;
443  }
444 
445  // 1) determine number of active locally-owned cells
446  const types::global_cell_index n_locally_owned_cells =
447  this->n_locally_owned_active_cells();
448 
449  // 2) determine the offset of each process
451 
452  const int ierr = MPI_Exscan(
453  &n_locally_owned_cells,
454  &cell_index,
455  1,
456  Utilities::MPI::mpi_type_id_for_type<decltype(n_locally_owned_cells)>,
457  MPI_SUM,
458  this->mpi_communicator);
459  AssertThrowMPI(ierr);
460 
461  // 3) give global indices to locally-owned cells and mark all other cells as
462  // invalid
463  std::pair<types::global_cell_index, types::global_cell_index> my_range;
464  my_range.first = cell_index;
465 
466  for (const auto &cell : this->active_cell_iterators())
467  if (cell->is_locally_owned())
468  cell->set_global_active_cell_index(cell_index++);
469  else
470  cell->set_global_active_cell_index(numbers::invalid_dof_index);
471 
472  my_range.second = cell_index;
473 
474  // 4) determine the global indices of ghost cells
475  std::vector<types::global_dof_index> is_ghost_vector;
476  GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
477  static_cast<::Triangulation<dim, spacedim> &>(*this),
478  [](const auto &cell) { return cell->global_active_cell_index(); },
479  [&is_ghost_vector](const auto &cell, const auto &id) {
480  cell->set_global_active_cell_index(id);
481  is_ghost_vector.push_back(id);
482  });
483 
484  // 5) set up new partitioner
485  IndexSet is_local(this->n_global_active_cells());
486  is_local.add_range(my_range.first, my_range.second);
487 
488  std::sort(is_ghost_vector.begin(), is_ghost_vector.end());
489  IndexSet is_ghost(this->n_global_active_cells());
490  is_ghost.add_indices(is_ghost_vector.begin(), is_ghost_vector.end());
491 
492  number_cache.active_cell_index_partitioner =
493  std::make_shared<const Utilities::MPI::Partitioner>(
494  is_local, is_ghost, this->mpi_communicator);
495 
496  // 6) proceed with multigrid levels if requested
497  if (this->is_multilevel_hierarchy_constructed() == true)
498  {
499  // 1) determine number of locally-owned cells on levels
500  std::vector<types::global_cell_index> n_cells_level(
501  this->n_global_levels(), 0);
502 
503  for (auto cell : this->cell_iterators())
504  if (cell->level_subdomain_id() == this->locally_owned_subdomain())
505  n_cells_level[cell->level()]++;
506 
507  // 2) determine the offset of each process
508  std::vector<types::global_cell_index> cell_index(
509  this->n_global_levels(), 0);
510 
511  int ierr = MPI_Exscan(
512  n_cells_level.data(),
513  cell_index.data(),
514  this->n_global_levels(),
515  Utilities::MPI::mpi_type_id_for_type<decltype(*n_cells_level.data())>,
516  MPI_SUM,
517  this->mpi_communicator);
518  AssertThrowMPI(ierr);
519 
520  // 3) determine global number of "active" cells on each level
521  Utilities::MPI::sum(n_cells_level,
522  this->mpi_communicator,
523  n_cells_level);
524 
525  // 4) give global indices to locally-owned cells on level and mark
526  // all other cells as invalid
527  std::vector<
528  std::pair<types::global_cell_index, types::global_cell_index>>
529  my_ranges(this->n_global_levels());
530  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
531  my_ranges[l].first = cell_index[l];
532 
533  for (auto cell : this->cell_iterators())
534  if (cell->level_subdomain_id() == this->locally_owned_subdomain())
535  cell->set_global_level_cell_index(cell_index[cell->level()]++);
536  else
537  cell->set_global_level_cell_index(numbers::invalid_dof_index);
538 
539  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
540  my_ranges[l].second = cell_index[l];
541 
542  // 5) update the numbers of ghost level cells
543  std::vector<std::vector<types::global_dof_index>> is_ghost_vectors(
544  this->n_global_levels());
548  *this,
549  [](const auto &cell) { return cell->global_level_cell_index(); },
550  [&is_ghost_vectors](const auto &cell, const auto &id) {
551  cell->set_global_level_cell_index(id);
552  is_ghost_vectors[cell->level()].push_back(id);
553  });
554 
555  number_cache.level_cell_index_partitioners.resize(
556  this->n_global_levels());
557 
558  // 6) set up cell partitioners for each level
559  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
560  {
561  IndexSet is_local(n_cells_level[l]);
562  is_local.add_range(my_ranges[l].first, my_ranges[l].second);
563 
564  IndexSet is_ghost(n_cells_level[l]);
565  std::sort(is_ghost_vectors[l].begin(), is_ghost_vectors[l].end());
566  is_ghost.add_indices(is_ghost_vectors[l].begin(),
567  is_ghost_vectors[l].end());
568 
569  number_cache.level_cell_index_partitioners[l] =
570  std::make_shared<const Utilities::MPI::Partitioner>(
571  is_local, is_ghost, this->mpi_communicator);
572  }
573  }
574 
575 #endif
576  }
577 
578 
579 
580  template <int dim, int spacedim>
581  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
582  void TriangulationBase<dim, spacedim>::communicate_locally_moved_vertices(
583  const std::vector<bool> &vertex_locally_moved)
584  {
585  AssertDimension(vertex_locally_moved.size(), this->n_vertices());
586 #ifdef DEBUG
587  {
588  const std::vector<bool> locally_owned_vertices =
590  for (unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
591  Assert((vertex_locally_moved[i] == false) ||
592  (locally_owned_vertices[i] == true),
593  ExcMessage("The vertex_locally_moved argument must not "
594  "contain vertices that are not locally owned"));
595  }
596 #endif
597 
598  Point<spacedim> invalid_point;
599  for (unsigned int d = 0; d < spacedim; ++d)
600  invalid_point[d] = std::numeric_limits<double>::quiet_NaN();
601 
602  const auto pack = [&](const auto &cell) {
603  std::vector<Point<spacedim>> vertices(cell->n_vertices());
604 
605  for (const auto v : cell->vertex_indices())
606  if (vertex_locally_moved[cell->vertex_index(v)])
607  vertices[v] = cell->vertex(v);
608  else
609  vertices[v] = invalid_point;
610 
611  return vertices;
612  };
613 
614  const auto unpack = [&](const auto &cell, const auto &vertices) {
615  for (const auto v : cell->vertex_indices())
616  if (numbers::is_nan(vertices[v][0]) == false)
617  cell->vertex(v) = vertices[v];
618  };
619 
620  if (this->is_multilevel_hierarchy_constructed())
622  std::vector<Point<spacedim>>>(
623  static_cast<::Triangulation<dim, spacedim> &>(*this),
624  pack,
625  unpack);
626  else
627  GridTools::exchange_cell_data_to_ghosts<std::vector<Point<spacedim>>>(
628  static_cast<::Triangulation<dim, spacedim> &>(*this),
629  pack,
630  unpack);
631  }
632 
633 
634 
635  template <int dim, int spacedim>
636  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
637  std::weak_ptr<const Utilities::MPI::Partitioner> TriangulationBase<
638  dim,
639  spacedim>::global_active_cell_index_partitioner() const
640  {
641  return number_cache.active_cell_index_partitioner;
642  }
643 
644 
645 
646  template <int dim, int spacedim>
647  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
648  std::weak_ptr<const Utilities::MPI::Partitioner> TriangulationBase<dim,
649  spacedim>::
650  global_level_cell_index_partitioner(const unsigned int level) const
651  {
652  Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented());
653  AssertIndexRange(level, this->n_global_levels());
654 
655  return number_cache.level_cell_index_partitioners[level];
656  }
657 
658 
659 
660  template <int dim, int spacedim>
661  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
664  {
665  return number_cache.number_of_global_coarse_cells;
666  }
667 
668 
669 
670  template <int dim, int spacedim>
671  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
673  const MPI_Comm mpi_communicator,
674  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
675  smooth_grid,
676  const bool check_for_distorted_cells)
677  : ::parallel::TriangulationBase<dim, spacedim>(
678  mpi_communicator,
679  smooth_grid,
680  check_for_distorted_cells)
681  {}
682 
683 
684 
685  template <int dim, int spacedim>
686  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
687  void DistributedTriangulationBase<dim, spacedim>::clear()
688  {
690  }
691 
692 
693 
694  template <int dim, int spacedim>
695  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
696  bool DistributedTriangulationBase<dim, spacedim>::has_hanging_nodes() const
697  {
698  if (this->n_global_levels() <= 1)
699  return false; // can not have hanging nodes without refined cells
700 
701  // if there are any active cells with level less than n_global_levels()-1,
702  // then there is obviously also one with level n_global_levels()-1, and
703  // consequently there must be a hanging node somewhere.
704  //
705  // The problem is that we cannot just ask for the first active cell, but
706  // instead need to filter over locally owned cells.
707  const bool have_coarser_cell =
708  std::any_of(this->begin_active(this->n_global_levels() - 2),
709  this->end_active(this->n_global_levels() - 2),
710  [](const CellAccessor<dim, spacedim> &cell) {
711  return cell.is_locally_owned();
712  });
713 
714  // return true if at least one process has a coarser cell
715  return Utilities::MPI::max(have_coarser_cell ? 1 : 0,
716  this->mpi_communicator) != 0;
717  }
718 
719 
720 } // end namespace parallel
721 
722 
723 
724 /*-------------- Explicit Instantiations -------------------------------*/
725 #include "tria_base.inst"
726 
bool is_locally_owned() const
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1723
virtual void clear()
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const
virtual void update_reference_cells()
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:365
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:151
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:355
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:345
virtual unsigned int n_global_levels() const override
Definition: tria_base.cc:141
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:131
virtual types::coarse_cell_id n_global_coarse_cells() const override
Definition: tria_base.cc:663
virtual void update_number_cache()
Definition: tria_base.cc:170
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_CXX20_REQUIRES(condition)
Definition: config.h:166
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
Point< 3 > vertices[4]
Point< 2 > second
Definition: grid_out.cc:4615
Point< 2 > first
Definition: grid_out.cc:4614
unsigned int level
Definition: grid_out.cc:4617
unsigned int cell_index
Definition: grid_tools.cc:1192
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1616
static ::ExceptionBase & ExcNotImplemented()
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1789
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1916
#define AssertIndexRange(index, range)
Definition: exceptions.h:1857
static ::ExceptionBase & ExcMessage(std::string arg1)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1124
std::vector< bool > get_locally_owned_vertices(const Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:4420
void exchange_cell_data_to_level_ghosts(const MeshType &mesh, const std::function< std::optional< DataType >(const typename MeshType::level_cell_iterator &)> &pack, const std::function< void(const typename MeshType::level_cell_iterator &, const DataType &)> &unpack, const std::function< bool(const typename MeshType::level_cell_iterator &)> &cell_filter=always_return< typename MeshType::level_cell_iterator, bool >{ true})
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
@ triangulation_base_fill_level_ghost_owners
TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
Definition: mpi_tags.h:102
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:149
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:164
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1731
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1343
T unpack(const std::vector< char > &buffer, const bool allow_compression=true)
Definition: utilities.h:1500
unsigned int n_active_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:14840
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:14833
constexpr ReferenceCell make_reference_cell_from_int(const std::uint8_t kind)
const types::subdomain_id artificial_subdomain_id
Definition: types.h:315
const types::global_dof_index invalid_dof_index
Definition: types.h:233
bool is_nan(const double x)
Definition: numbers.h:531
Definition: types.h:33
unsigned int manifold_id
Definition: types.h:153
global_cell_index coarse_cell_id
Definition: types.h:127
unsigned int subdomain_id
Definition: types.h:44
unsigned int global_cell_index
Definition: types.h:118
unsigned int boundary_id
Definition: types.h:141