Loading [MathJax]/extensions/TeX/AMSsymbols.js
 deal.II version GIT relicensing-2968-g5f01c80b02 2025-03-29 13:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
tria_base.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2015 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.templates.h>
21
24
27#include <deal.II/grid/tria.h>
30
32
33#include <algorithm>
34#include <cstdint>
35#include <fstream>
36#include <iostream>
37#include <limits>
38#include <numeric>
39
40
42
43namespace parallel
44{
45 template <int dim, int spacedim>
48 const MPI_Comm mpi_communicator,
49 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
50 smooth_grid,
51 const bool check_for_distorted_cells)
52 : ::Triangulation<dim, spacedim>(smooth_grid,
53 check_for_distorted_cells)
54 , mpi_communicator(mpi_communicator)
55 , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
56 , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
57 {
58#ifndef DEAL_II_WITH_MPI
59 Assert(false, ExcNeedsMPI());
60#endif
61 }
62
63
64
65 template <int dim, int spacedim>
67 void TriangulationBase<dim, spacedim>::copy_triangulation(
68 const ::Triangulation<dim, spacedim> &other_tria)
69 {
70#ifndef DEAL_II_WITH_MPI
71 (void)other_tria;
72 Assert(false, ExcNeedsMPI());
73#else
75
76 if (const ::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
77 dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
78 *>(&other_tria))
79 {
80 // release unused vector memory because we will have very different
81 // vectors now
84 }
85#endif
86 }
87
88
89
90 template <int dim, int spacedim>
92 std::size_t TriangulationBase<dim, spacedim>::memory_consumption() const
93 {
94 std::size_t mem =
96 MemoryConsumption::memory_consumption(this->mpi_communicator) +
99 number_cache.n_global_active_cells) +
100 MemoryConsumption::memory_consumption(number_cache.n_global_levels);
101 return mem;
102 }
103
104
105
106 template <int dim, int spacedim>
109 {
110 // release unused vector memory because the vector layout is going to look
111 // very different now
114 }
115
116
118 template <int dim, int spacedim>
121 : n_locally_owned_active_cells(0)
122 , n_global_active_cells(0)
123 , number_of_global_coarse_cells(0)
124 , n_global_levels(0)
125 {}
126
127
128
129 template <int dim, int spacedim>
131 unsigned int TriangulationBase<dim, spacedim>::n_locally_owned_active_cells()
132 const
133 {
134 return number_cache.n_locally_owned_active_cells;
135 }
136
137
138
139 template <int dim, int spacedim>
141 unsigned int TriangulationBase<dim, spacedim>::n_global_levels() const
142 {
143 return number_cache.n_global_levels;
144 }
145
146
148 template <int dim, int spacedim>
152 {
153 return number_cache.n_global_active_cells;
154 }
155
156
157
158 template <int dim, int spacedim>
160 MPI_Comm TriangulationBase<dim, spacedim>::get_mpi_communicator() const
161 {
162 return mpi_communicator;
163 }
165
166
167#ifdef DEAL_II_WITH_MPI
168 template <int dim, int spacedim>
170 void TriangulationBase<dim, spacedim>::update_number_cache()
171 {
172 number_cache.ghost_owners.clear();
173 number_cache.level_ghost_owners.clear();
174 number_cache.n_locally_owned_active_cells = 0;
175
176 if (this->n_levels() == 0)
177 {
178 // Skip communication done below if we do not have any cells
179 // (meaning the Triangulation is empty on all processors). This will
180 // happen when called from the destructor of Triangulation, which
181 // can get called during exception handling causing a hang in this
182 // function.
183 number_cache.n_global_active_cells = 0;
184 number_cache.n_global_levels = 0;
185 return;
186 }
187
188
189 {
190 // find ghost owners
191 for (const auto &cell : this->active_cell_iterators())
192 if (cell->is_ghost())
193 number_cache.ghost_owners.insert(cell->subdomain_id());
194
195 Assert(number_cache.ghost_owners.size() <
196 Utilities::MPI::n_mpi_processes(this->mpi_communicator),
198 }
199
200 if (this->n_levels() > 0)
201 number_cache.n_locally_owned_active_cells = std::count_if(
202 this->begin_active(),
204 this->end()),
205 [](const auto &i) { return i.is_locally_owned(); });
206 else
207 number_cache.n_locally_owned_active_cells = 0;
208
209 // Potentially cast to a 64 bit type before accumulating to avoid
210 // overflow:
211 number_cache.n_global_active_cells =
213 number_cache.n_locally_owned_active_cells),
214 this->mpi_communicator);
215
216 number_cache.n_global_levels =
217 Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
218
219 // Store MPI ranks of level ghost owners of this processor on all
220 // levels.
221 if (this->is_multilevel_hierarchy_constructed() == true)
222 {
223 number_cache.level_ghost_owners.clear();
224
225 // if there is nothing to do, then do nothing
226 if (this->n_levels() == 0)
227 return;
228
229 // find level ghost owners
230 for (const auto &cell : this->cell_iterators())
231 if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
232 cell->level_subdomain_id() != this->locally_owned_subdomain())
233 this->number_cache.level_ghost_owners.insert(
234 cell->level_subdomain_id());
235
236 if constexpr (running_in_debug_mode())
237 {
238 // Check that level_ghost_owners is symmetric by sending a message
239 // to everyone
240 {
241 int ierr = MPI_Barrier(this->mpi_communicator);
242 AssertThrowMPI(ierr);
243
246
247 // important: preallocate to avoid (re)allocation:
248 std::vector<MPI_Request> requests(
249 this->number_cache.level_ghost_owners.size());
250 unsigned int dummy = 0;
251 unsigned int req_counter = 0;
252
253 for (const auto &it : this->number_cache.level_ghost_owners)
254 {
255 ierr = MPI_Isend(&dummy,
256 1,
257 MPI_UNSIGNED,
258 it,
259 mpi_tag,
260 this->mpi_communicator,
261 &requests[req_counter]);
262 AssertThrowMPI(ierr);
263 ++req_counter;
264 }
265
266 for (const auto &it : this->number_cache.level_ghost_owners)
267 {
268 unsigned int dummy;
269 ierr = MPI_Recv(&dummy,
270 1,
271 MPI_UNSIGNED,
272 it,
273 mpi_tag,
274 this->mpi_communicator,
275 MPI_STATUS_IGNORE);
276 AssertThrowMPI(ierr);
277 }
278
279 if (requests.size() > 0)
280 {
281 ierr = MPI_Waitall(requests.size(),
282 requests.data(),
283 MPI_STATUSES_IGNORE);
284 AssertThrowMPI(ierr);
285 }
286
287 ierr = MPI_Barrier(this->mpi_communicator);
288 AssertThrowMPI(ierr);
289 }
290 }
291
292 Assert(this->number_cache.level_ghost_owners.size() <
293 Utilities::MPI::n_mpi_processes(this->mpi_communicator),
295 }
296
297 this->number_cache.number_of_global_coarse_cells = this->n_cells(0);
298
299 // reset global cell ids
300 this->reset_global_cell_indices();
301 }
302
303#else
304
305 template <int dim, int spacedim>
308 {
309 Assert(false, ExcNeedsMPI());
310 }
311
312#endif
313
314 template <int dim, int spacedim>
316 void TriangulationBase<dim, spacedim>::update_reference_cells()
317 {
318 // run algorithm for locally-owned cells
320
321 // translate ReferenceCell to unsigned int (needed by
322 // Utilities::MPI::compute_set_union)
323 std::vector<unsigned int> reference_cells_ui;
324
325 reference_cells_ui.reserve(this->reference_cells.size());
326 for (const auto &i : this->reference_cells)
327 reference_cells_ui.push_back(static_cast<unsigned int>(i));
328
329 // create union
330 reference_cells_ui =
331 Utilities::MPI::compute_set_union(reference_cells_ui,
332 this->mpi_communicator);
333
334 // transform back and store result
335 this->reference_cells.clear();
336 for (const auto &i : reference_cells_ui)
337 this->reference_cells.emplace_back(
339 }
340
341
342
343 template <int dim, int spacedim>
347 {
348 return my_subdomain;
349 }
350
351
352
353 template <int dim, int spacedim>
355 const std::set<types::subdomain_id>
357 {
358 return number_cache.ghost_owners;
359 }
360
361
362
363 template <int dim, int spacedim>
365 const std::set<types::subdomain_id>
367 {
368 return number_cache.level_ghost_owners;
369 }
370
371
372
373 template <int dim, int spacedim>
375 std::vector<types::boundary_id> TriangulationBase<dim, spacedim>::
376 get_boundary_ids() const
377 {
380 this->mpi_communicator);
381 }
382
383
384
385 template <int dim, int spacedim>
387 std::vector<types::manifold_id> TriangulationBase<dim, spacedim>::
388 get_manifold_ids() const
389 {
392 this->mpi_communicator);
393 }
394
396
397 template <int dim, int spacedim>
399 void TriangulationBase<dim, spacedim>::reset_global_cell_indices()
400 {
401#ifndef DEAL_II_WITH_MPI
402 Assert(false, ExcNeedsMPI());
403#else
404 if (const auto pst =
406 this))
407 if (pst->with_artificial_cells() == false)
408 {
409 // Specialization for parallel::shared::Triangulation without
410 // artificial cells. The code below only works if a halo of a single
411 // ghost cells is needed.
412
413 std::vector<unsigned int> cell_counter(n_subdomains + 1);
414
415 // count number of cells of each process
416 for (const auto &cell : this->active_cell_iterators())
417 cell_counter[cell->subdomain_id() + 1]++;
418
419 // take prefix sum to obtain offset of each process
420 for (unsigned int i = 0; i < n_subdomains; ++i)
421 cell_counter[i + 1] += cell_counter[i];
422
423 AssertDimension(cell_counter.back(), this->n_active_cells());
424
425 // create partitioners
426 IndexSet is_local(this->n_active_cells());
427 is_local.add_range(cell_counter[my_subdomain],
428 cell_counter[my_subdomain + 1]);
429 number_cache.active_cell_index_partitioner =
430 std::make_shared<const Utilities::MPI::Partitioner>(
431 is_local,
432 complete_index_set(this->n_active_cells()),
433 this->mpi_communicator);
434
435 // set global active cell indices and increment process-local counters
436 for (const auto &cell : this->active_cell_iterators())
437 cell->set_global_active_cell_index(
438 cell_counter[cell->subdomain_id()]++);
439
440 Assert(this->is_multilevel_hierarchy_constructed() == false,
442
443 return;
444 }
445
446 // 1) determine number of active locally-owned cells
447 const types::global_cell_index n_locally_owned_cells =
448 this->n_locally_owned_active_cells();
449
450 // 2) determine the offset of each process
452
453 const int ierr = MPI_Exscan(
454 &n_locally_owned_cells,
455 &cell_index,
456 1,
457 Utilities::MPI::mpi_type_id_for_type<decltype(n_locally_owned_cells)>,
458 MPI_SUM,
459 this->mpi_communicator);
460 AssertThrowMPI(ierr);
461
462 // 3) give global indices to locally-owned cells and mark all other cells as
463 // invalid
464 std::pair<types::global_cell_index, types::global_cell_index> my_range;
465 my_range.first = cell_index;
466
467 for (const auto &cell : this->active_cell_iterators())
468 if (cell->is_locally_owned())
469 cell->set_global_active_cell_index(cell_index++);
470 else
471 cell->set_global_active_cell_index(numbers::invalid_dof_index);
472
473 my_range.second = cell_index;
474
475 // 4) determine the global indices of ghost cells
476 std::vector<types::global_dof_index> is_ghost_vector;
477 GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
478 static_cast<::Triangulation<dim, spacedim> &>(*this),
479 [](const auto &cell) { return cell->global_active_cell_index(); },
480 [&is_ghost_vector](const auto &cell, const auto &id) {
481 cell->set_global_active_cell_index(id);
482 is_ghost_vector.push_back(id);
483 });
484
485 // 5) set up new partitioner
486 IndexSet is_local(this->n_global_active_cells());
487 is_local.add_range(my_range.first, my_range.second);
488
489 std::sort(is_ghost_vector.begin(), is_ghost_vector.end());
490 IndexSet is_ghost(this->n_global_active_cells());
491 is_ghost.add_indices(is_ghost_vector.begin(), is_ghost_vector.end());
492
493 number_cache.active_cell_index_partitioner =
494 std::make_shared<const Utilities::MPI::Partitioner>(
495 is_local, is_ghost, this->mpi_communicator);
496
497 // 6) proceed with multigrid levels if requested
498 if (this->is_multilevel_hierarchy_constructed() == true)
499 {
500 // 1) determine number of locally-owned cells on levels
501 std::vector<types::global_cell_index> n_cells_level(
502 this->n_global_levels(), 0);
503
504 for (auto cell : this->cell_iterators())
505 if (cell->level_subdomain_id() == this->locally_owned_subdomain())
506 n_cells_level[cell->level()]++;
507
508 // 2) determine the offset of each process
509 std::vector<types::global_cell_index> cell_index(
510 this->n_global_levels(), 0);
511
512 int ierr = MPI_Exscan(
513 n_cells_level.data(),
514 cell_index.data(),
515 this->n_global_levels(),
516 Utilities::MPI::mpi_type_id_for_type<decltype(*n_cells_level.data())>,
517 MPI_SUM,
518 this->mpi_communicator);
519 AssertThrowMPI(ierr);
520
521 // 3) determine global number of "active" cells on each level
522 Utilities::MPI::sum(n_cells_level,
523 this->mpi_communicator,
524 n_cells_level);
525
526 // 4) give global indices to locally-owned cells on level and mark
527 // all other cells as invalid
528 std::vector<
529 std::pair<types::global_cell_index, types::global_cell_index>>
530 my_ranges(this->n_global_levels());
531 for (unsigned int l = 0; l < this->n_global_levels(); ++l)
532 my_ranges[l].first = cell_index[l];
533
534 for (auto cell : this->cell_iterators())
535 if (cell->level_subdomain_id() == this->locally_owned_subdomain())
536 cell->set_global_level_cell_index(cell_index[cell->level()]++);
537 else
538 cell->set_global_level_cell_index(numbers::invalid_dof_index);
539
540 for (unsigned int l = 0; l < this->n_global_levels(); ++l)
541 my_ranges[l].second = cell_index[l];
542
543 // 5) update the numbers of ghost level cells
544 std::vector<std::vector<types::global_dof_index>> is_ghost_vectors(
545 this->n_global_levels());
549 *this,
550 [](const auto &cell) { return cell->global_level_cell_index(); },
551 [&is_ghost_vectors](const auto &cell, const auto &id) {
552 cell->set_global_level_cell_index(id);
553 is_ghost_vectors[cell->level()].push_back(id);
554 });
555
556 number_cache.level_cell_index_partitioners.resize(
557 this->n_global_levels());
558
559 // 6) set up cell partitioners for each level
560 for (unsigned int l = 0; l < this->n_global_levels(); ++l)
561 {
562 IndexSet is_local(n_cells_level[l]);
563 is_local.add_range(my_ranges[l].first, my_ranges[l].second);
564
565 IndexSet is_ghost(n_cells_level[l]);
566 std::sort(is_ghost_vectors[l].begin(), is_ghost_vectors[l].end());
567 is_ghost.add_indices(is_ghost_vectors[l].begin(),
568 is_ghost_vectors[l].end());
569
570 number_cache.level_cell_index_partitioners[l] =
571 std::make_shared<const Utilities::MPI::Partitioner>(
572 is_local, is_ghost, this->mpi_communicator);
573 }
574 }
575
576#endif
577 }
578
579
580
581 template <int dim, int spacedim>
583 void TriangulationBase<dim, spacedim>::communicate_locally_moved_vertices(
584 const std::vector<bool> &vertex_locally_moved)
585 {
586 AssertDimension(vertex_locally_moved.size(), this->n_vertices());
587 if constexpr (running_in_debug_mode())
588 {
589 {
590 const std::vector<bool> locally_owned_vertices =
592 for (unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
593 Assert((vertex_locally_moved[i] == false) ||
594 (locally_owned_vertices[i] == true),
595 ExcMessage("The vertex_locally_moved argument must not "
596 "contain vertices that are not locally owned"));
597 }
598 }
599
600 Point<spacedim> invalid_point;
601 for (unsigned int d = 0; d < spacedim; ++d)
602 invalid_point[d] = std::numeric_limits<double>::quiet_NaN();
603
604 const auto pack = [&](const auto &cell) {
605 std::vector<Point<spacedim>> vertices(cell->n_vertices());
606
607 for (const auto v : cell->vertex_indices())
608 if (vertex_locally_moved[cell->vertex_index(v)])
609 vertices[v] = cell->vertex(v);
610 else
611 vertices[v] = invalid_point;
612
613 return vertices;
614 };
615
616 const auto unpack = [&](const auto &cell, const auto &vertices) {
617 for (const auto v : cell->vertex_indices())
618 if (numbers::is_nan(vertices[v][0]) == false)
619 cell->vertex(v) = vertices[v];
620 };
621
622 if (this->is_multilevel_hierarchy_constructed())
624 std::vector<Point<spacedim>>>(
625 static_cast<::Triangulation<dim, spacedim> &>(*this),
626 pack,
627 unpack);
628 else
629 GridTools::exchange_cell_data_to_ghosts<std::vector<Point<spacedim>>>(
630 static_cast<::Triangulation<dim, spacedim> &>(*this),
631 pack,
632 unpack);
633 }
634
635
636
637 template <int dim, int spacedim>
639 std::weak_ptr<const Utilities::MPI::Partitioner> TriangulationBase<
640 dim,
641 spacedim>::global_active_cell_index_partitioner() const
642 {
643 return number_cache.active_cell_index_partitioner;
644 }
645
646
647
648 template <int dim, int spacedim>
650 std::weak_ptr<const Utilities::MPI::Partitioner> TriangulationBase<dim,
651 spacedim>::
652 global_level_cell_index_partitioner(const unsigned int level) const
653 {
654 Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented());
655 AssertIndexRange(level, this->n_global_levels());
656
657 return number_cache.level_cell_index_partitioners[level];
658 }
659
660
661
662 template <int dim, int spacedim>
666 {
667 return number_cache.number_of_global_coarse_cells;
668 }
669
670
671
672 template <int dim, int spacedim>
675 const MPI_Comm mpi_communicator,
676 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
677 smooth_grid,
678 const bool check_for_distorted_cells)
679 : ::parallel::TriangulationBase<dim, spacedim>(
680 mpi_communicator,
681 smooth_grid,
682 check_for_distorted_cells)
683 {}
684
685
686
687 template <int dim, int spacedim>
689 void TriangulationBase<dim, spacedim>::clear()
690 {
692
693 number_cache = {};
694 }
695
696
697
698 template <int dim, int spacedim>
700 bool DistributedTriangulationBase<dim, spacedim>::has_hanging_nodes() const
701 {
702 if (this->n_global_levels() <= 1)
703 return false; // can not have hanging nodes without refined cells
704
705 // if there are any active cells with level less than n_global_levels()-1,
706 // then there is obviously also one with level n_global_levels()-1, and
707 // consequently there must be a hanging node somewhere.
708 //
709 // The problem is that we cannot just ask for the first active cell, but
710 // instead need to filter over locally owned cells.
711 const bool have_coarser_cell =
712 std::any_of(this->begin_active(this->n_global_levels() - 2),
713 this->end_active(this->n_global_levels() - 2),
714 [](const CellAccessor<dim, spacedim> &cell) {
715 return cell.is_locally_owned();
716 });
717
718 // return true if at least one process has a coarser cell
719 return Utilities::MPI::max(have_coarser_cell ? 1 : 0,
720 this->mpi_communicator) != 0;
721 }
722
723
724} // end namespace parallel
725
726
727
728/*-------------- Explicit Instantiations -------------------------------*/
729#include "distributed/tria_base.inst"
730
bool is_locally_owned() const
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1814
Definition point.h:113
virtual void clear()
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const
virtual void update_reference_cells()
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition tria_base.cc:366
virtual types::global_cell_index n_global_active_cells() const override
Definition tria_base.cc:151
const std::set< types::subdomain_id > & ghost_owners() const
Definition tria_base.cc:356
types::subdomain_id locally_owned_subdomain() const override
Definition tria_base.cc:346
virtual unsigned int n_global_levels() const override
Definition tria_base.cc:141
unsigned int n_locally_owned_active_cells() const
Definition tria_base.cc:131
virtual types::coarse_cell_id n_global_coarse_cells() const override
Definition tria_base.cc:665
virtual void update_number_cache()
Definition tria_base.cc:170
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:40
constexpr bool running_in_debug_mode()
Definition config.h:78
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:245
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:41
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
unsigned int level
Definition grid_out.cc:4635
unsigned int cell_index
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1215
std::vector< bool > get_locally_owned_vertices(const Triangulation< dim, spacedim > &triangulation)
void exchange_cell_data_to_level_ghosts(const MeshType &mesh, const std::function< std::optional< DataType >(const typename MeshType::level_cell_iterator &)> &pack, const std::function< void(const typename MeshType::level_cell_iterator &, const DataType &)> &unpack, const std::function< bool(const typename MeshType::level_cell_iterator &)> &cell_filter=always_return< typename MeshType::level_cell_iterator, bool >{ true})
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
@ triangulation_base_fill_level_ghost_owners
TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
Definition mpi_tags.h:102
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:99
T max(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1634
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition tria.cc:14902
constexpr ReferenceCell make_reference_cell_from_int(const std::uint8_t kind)
constexpr types::global_dof_index invalid_dof_index
Definition types.h:263
bool is_nan(const double x)
Definition numbers.h:510
STL namespace.
Definition types.h:32
unsigned int global_cell_index
Definition types.h:130