deal.II version GIT relicensing-2289-g1e5549a87a 2024-12-21 21:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
dof_info.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2020 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
19
22
23#include <deal.II/matrix_free/dof_info.templates.h>
25
26#include <iostream>
27
29
30namespace internal
31{
32 namespace MatrixFreeFunctions
33 {
34 // ensure that the type defined in both dof_info.h and
35 // hanging_nodes_internal.h is consistent
36 static_assert(std::is_same_v<compressed_constraint_kind, std::uint8_t>,
37 "Unexpected type for compressed hanging node indicators!");
38
39
40
42 {
43 clear();
44 }
45
46
47
48 void
50 {
51 row_starts.clear();
52 dof_indices.clear();
54 vector_partitioner.reset();
55 ghost_dofs.clear();
56 dofs_per_cell.clear();
57 dofs_per_face.clear();
61 n_components.clear();
62 start_components.clear();
64 plain_dof_indices.clear();
66 for (unsigned int i = 0; i < 3; ++i)
67 {
68 index_storage_variants[i].clear();
69 dof_indices_contiguous[i].clear();
72 }
73 store_plain_indices = false;
75 max_fe_index = 0;
76 fe_index_conversion.clear();
77 }
78
79
80
81 void
82 DoFInfo::get_dof_indices_on_cell_batch(std::vector<unsigned int> &my_rows,
83 const unsigned int cell,
84 const bool apply_constraints) const
85 {
86 const unsigned int n_fe_components = start_components.back();
87 const unsigned int fe_index =
88 dofs_per_cell.size() == 1 ? 0 : cell_active_fe_index[cell];
89 const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
90
91 const unsigned int n_vectorization = vectorization_length;
92 constexpr auto dof_access_index = dof_access_cell;
94 n_vectorization_lanes_filled[dof_access_index].size());
95 const unsigned int n_vectorization_actual =
96 n_vectorization_lanes_filled[dof_access_index][cell];
97
98 // we might have constraints, so the final number
99 // of indices is not known a priori.
100 // conservatively reserve the maximum without constraints
101 my_rows.reserve(n_vectorization * dofs_this_cell);
102 my_rows.resize(0);
103 unsigned int total_size = 0;
104 for (unsigned int v = 0; v < n_vectorization_actual; ++v)
105 {
106 const unsigned int ib =
107 (cell * n_vectorization + v) * n_fe_components;
108 const unsigned int ie =
109 (cell * n_vectorization + v + 1) * n_fe_components;
110
111 // figure out constraints by comparing constraint_indicator row
112 // shift for this cell within the block as compared to the next
113 // one
114 const bool has_constraints =
115 (hanging_node_constraint_masks.size() != 0 &&
117 hanging_node_constraint_masks[cell * n_vectorization + v] !=
119 hanging_node_constraint_masks_comp[fe_index][0 /*TODO*/]) ||
120 (row_starts[ib].second != row_starts[ib + n_fe_components].second);
121
122 auto do_copy = [&](const unsigned int *begin,
123 const unsigned int *end) {
124 const unsigned int shift = total_size;
125 total_size += (end - begin);
126 my_rows.resize(total_size);
127 std::copy(begin, end, my_rows.begin() + shift);
128 };
129
130 if (!has_constraints || apply_constraints)
131 {
132 const unsigned int *begin =
133 dof_indices.data() + row_starts[ib].first;
134 const unsigned int *end =
135 dof_indices.data() + row_starts[ie].first;
136 do_copy(begin, end);
137 }
138 else
139 {
140 Assert(row_starts_plain_indices[cell * n_vectorization + v] !=
143 const unsigned int *begin =
144 plain_dof_indices.data() +
145 row_starts_plain_indices[cell * n_vectorization + v];
146 const unsigned int *end = begin + dofs_this_cell;
147 do_copy(begin, end);
148 }
149 }
150 }
151
152
153
154 void
155 DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
156 const MPI_Comm communicator_sm,
157 const bool use_vector_data_exchanger_full)
158 {
159 Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
160
161 // sort ghost dofs and compress out duplicates
162 const unsigned int n_owned = (vector_partitioner->local_range().second -
163 vector_partitioner->local_range().first);
164 const std::size_t n_ghosts = ghost_dofs.size();
165#ifdef DEBUG
166 for (const auto dof_index : dof_indices)
167 AssertIndexRange(dof_index, n_owned + n_ghosts);
168#endif
169
170 const unsigned int n_components = start_components.back();
171 std::vector<unsigned int> ghost_numbering(n_ghosts);
172 IndexSet ghost_indices(vector_partitioner->size());
173 if (n_ghosts > 0)
174 {
175 unsigned int n_unique_ghosts = 0;
176 // since we need to go back to the local_to_global indices and
177 // replace the temporary numbering of ghosts by the real number in
178 // the index set, we need to store these values
179 std::vector<std::pair<types::global_dof_index, unsigned int>>
180 ghost_origin(n_ghosts);
181 for (std::size_t i = 0; i < n_ghosts; ++i)
182 {
183 ghost_origin[i].first = ghost_dofs[i];
184 ghost_origin[i].second = i;
185 }
186 std::sort(ghost_origin.begin(), ghost_origin.end());
187
188 types::global_dof_index last_contiguous_start = ghost_origin[0].first;
189 ghost_numbering[ghost_origin[0].second] = 0;
190 for (std::size_t i = 1; i < n_ghosts; ++i)
191 {
192 if (ghost_origin[i].first > ghost_origin[i - 1].first + 1)
193 {
194 ghost_indices.add_range(last_contiguous_start,
195 ghost_origin[i - 1].first + 1);
196 last_contiguous_start = ghost_origin[i].first;
197 }
198 if (ghost_origin[i].first > ghost_origin[i - 1].first)
199 ++n_unique_ghosts;
200 ghost_numbering[ghost_origin[i].second] = n_unique_ghosts;
201 }
202 ++n_unique_ghosts;
203 ghost_indices.add_range(last_contiguous_start,
204 ghost_origin.back().first + 1);
205 ghost_indices.compress();
206
207 // make sure that we got the correct local numbering of the ghost
208 // dofs. the ghost index set should store the same number
209 {
210 AssertDimension(n_unique_ghosts, ghost_indices.n_elements());
211 for (std::size_t i = 0; i < n_ghosts; ++i)
212 Assert(ghost_numbering[i] ==
213 ghost_indices.index_within_set(ghost_dofs[i]),
215 }
216
217 // apply correct numbering for ghost indices: We previously just
218 // enumerated them according to their appearance in the
219 // local_to_global structure. Above, we derived a relation between
220 // this enumeration and the actual number
221 const unsigned int n_boundary_cells = boundary_cells.size();
222 for (unsigned int i = 0; i < n_boundary_cells; ++i)
223 {
224 unsigned int *data_ptr =
225 dof_indices.data() +
226 row_starts[boundary_cells[i] * n_components].first;
227 const unsigned int *row_end =
228 dof_indices.data() +
229 row_starts[(boundary_cells[i] + 1) * n_components].first;
230 for (; data_ptr != row_end; ++data_ptr)
231 *data_ptr = ((*data_ptr < n_owned) ?
232 *data_ptr :
233 n_owned + ghost_numbering[*data_ptr - n_owned]);
234
235 // now the same procedure for plain indices
236 if (store_plain_indices == true)
237 {
238 bool has_hanging_nodes = false;
239
240 const unsigned int fe_index =
241 (cell_active_fe_index.empty() ||
242 dofs_per_cell.size() == 1) ?
243 0 :
244 cell_active_fe_index[boundary_cells[i]];
245 AssertIndexRange(fe_index, dofs_per_cell.size());
246
247 if (hanging_node_constraint_masks.size() > 0 &&
249 hanging_node_constraint_masks[boundary_cells[i]] !=
251 for (unsigned int comp = 0; comp < n_components; ++comp)
252 has_hanging_nodes |=
254
255 if (has_hanging_nodes ||
256 row_starts[boundary_cells[i] * n_components].second !=
257 row_starts[(boundary_cells[i] + 1) * n_components]
258 .second)
259 {
260 unsigned int *data_ptr =
261 plain_dof_indices.data() +
262 row_starts_plain_indices[boundary_cells[i]];
263 const unsigned int *row_end =
264 data_ptr + dofs_per_cell[fe_index];
265 for (; data_ptr != row_end; ++data_ptr)
266 *data_ptr =
267 ((*data_ptr < n_owned) ?
268 *data_ptr :
269 n_owned + ghost_numbering[*data_ptr - n_owned]);
270 }
271 }
272 }
273 }
274
275 std::vector<types::global_dof_index> empty;
276 ghost_dofs.swap(empty);
277
278 // set the ghost indices now. need to cast away constness here, but that
279 // is uncritical since we reset the Partitioner in the same initialize
280 // call as this call here.
283 vec_part->set_ghost_indices(ghost_indices);
284
285 if (use_vector_data_exchanger_full == false)
286 vector_exchanger = std::make_shared<
289 else
291 std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
292 vector_partitioner, communicator_sm);
293 }
294
295
296
297 void
299 const TaskInfo &task_info,
300 const std::vector<unsigned int> &renumbering,
301 const std::vector<unsigned int> &constraint_pool_row_index,
302 const std::vector<unsigned char> &irregular_cells)
303 {
304 (void)constraint_pool_row_index;
305
306 // first reorder the active FE index.
307 const bool have_hp = dofs_per_cell.size() > 1;
308 if (cell_active_fe_index.size() > 0)
309 {
310 std::vector<unsigned int> new_active_fe_index;
311 new_active_fe_index.reserve(task_info.cell_partition_data.back());
312 unsigned int position_cell = 0;
313 for (unsigned int cell = 0;
314 cell < task_info.cell_partition_data.back();
315 ++cell)
316 {
317 const unsigned int n_comp =
318 (irregular_cells[cell] > 0 ? irregular_cells[cell] :
320
321 // take maximum FE index among the ones present (we might have
322 // lumped some lower indices into higher ones)
323 unsigned int fe_index =
324 cell_active_fe_index[renumbering[position_cell]];
325 for (unsigned int j = 1; j < n_comp; ++j)
326 fe_index = std::max(
327 fe_index,
328 cell_active_fe_index[renumbering[position_cell + j]]);
329
330 new_active_fe_index.push_back(fe_index);
331 position_cell += n_comp;
332 }
333 std::swap(new_active_fe_index, cell_active_fe_index);
334 }
335 if (have_hp)
337 task_info.cell_partition_data.back());
338
339 const unsigned int n_components = start_components.back();
340
341 std::vector<std::pair<unsigned int, unsigned int>> new_row_starts(
343 task_info.cell_partition_data.back() +
344 1);
345 std::vector<unsigned int> new_dof_indices;
346 std::vector<std::pair<unsigned short, unsigned short>>
347 new_constraint_indicator;
348 std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
349 unsigned int position_cell = 0;
350 new_dof_indices.reserve(dof_indices.size());
351 new_constraint_indicator.reserve(constraint_indicator.size());
352
353 std::vector<compressed_constraint_kind> new_hanging_node_constraint_masks;
354 new_hanging_node_constraint_masks.reserve(
356
357 if (store_plain_indices == true)
358 {
359 new_rowstart_plain.resize(vectorization_length *
360 task_info.cell_partition_data.back() +
361 1,
363 new_plain_indices.reserve(plain_dof_indices.size());
364 }
365
366 // copy the indices and the constraint indicators to the new data field,
367 // where we will go through the cells in the renumbered way. in case the
368 // vectorization length does not exactly match up, we fill invalid
369 // numbers to the rowstart data. for contiguous cell indices, we skip
370 // the rowstarts field completely and directly go into the
371 // new_dof_indices field (this layout is used in FEEvaluation).
372 for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i)
373 {
374 const unsigned int n_vect =
375 (irregular_cells[i] > 0 ? irregular_cells[i] :
377 const unsigned int dofs_per_cell =
378 have_hp ? this->dofs_per_cell[cell_active_fe_index[i]] :
379 this->dofs_per_cell[0];
380
381 for (unsigned int j = 0; j < n_vect; ++j)
382 {
383 const unsigned int cell_no =
384 renumbering[position_cell + j] * n_components;
385
386 bool has_hanging_nodes = false;
387
388 if (hanging_node_constraint_masks.size() > 0 &&
390 {
391 const auto mask =
392 hanging_node_constraint_masks[renumbering[position_cell +
393 j]];
394 new_hanging_node_constraint_masks.push_back(mask);
395
397 for (unsigned int comp = 0; comp < n_components; ++comp)
398 has_hanging_nodes |= hanging_node_constraint_masks_comp
399 [have_hp ? cell_active_fe_index[i] : 0][comp];
400 }
401
402 for (unsigned int comp = 0; comp < n_components; ++comp)
403 {
404 new_row_starts[(i * vectorization_length + j) * n_components +
405 comp]
406 .first = new_dof_indices.size();
407 new_row_starts[(i * vectorization_length + j) * n_components +
408 comp]
409 .second = new_constraint_indicator.size();
410
411 new_dof_indices.insert(
412 new_dof_indices.end(),
413 dof_indices.data() + row_starts[cell_no + comp].first,
414 dof_indices.data() + row_starts[cell_no + comp + 1].first);
415 for (unsigned int index = row_starts[cell_no + comp].second;
416 index != row_starts[cell_no + comp + 1].second;
417 ++index)
418 new_constraint_indicator.push_back(
420 }
422 ((row_starts[cell_no].second !=
423 row_starts[cell_no + n_components].second) ||
424 has_hanging_nodes))
425 {
426 new_rowstart_plain[i * vectorization_length + j] =
427 new_plain_indices.size();
428 new_plain_indices.insert(
429 new_plain_indices.end(),
430 plain_dof_indices.data() +
432 plain_dof_indices.data() +
435 }
436 }
437 for (unsigned int j = n_vect; j < vectorization_length; ++j)
438 for (unsigned int comp = 0; comp < n_components; ++comp)
439 {
440 new_row_starts[(i * vectorization_length + j) * n_components +
441 comp]
442 .first = new_dof_indices.size();
443 new_row_starts[(i * vectorization_length + j) * n_components +
444 comp]
445 .second = new_constraint_indicator.size();
446 }
447
448 for (unsigned int j = n_vect; j < vectorization_length; ++j)
449 if (hanging_node_constraint_masks.size() > 0)
450 new_hanging_node_constraint_masks.push_back(
452
453 position_cell += n_vect;
454 }
455 AssertDimension(position_cell * n_components + 1, row_starts.size());
456
457 AssertDimension(dof_indices.size(), new_dof_indices.size());
458 new_row_starts[task_info.cell_partition_data.back() *
460 .first = new_dof_indices.size();
461 new_row_starts[task_info.cell_partition_data.back() *
463 .second = new_constraint_indicator.size();
464
466 new_constraint_indicator.size());
467
468 new_row_starts.swap(row_starts);
469 new_dof_indices.swap(dof_indices);
470 new_constraint_indicator.swap(constraint_indicator);
471 new_plain_indices.swap(plain_dof_indices);
472 new_rowstart_plain.swap(row_starts_plain_indices);
473 new_hanging_node_constraint_masks.swap(hanging_node_constraint_masks);
474
475#ifdef DEBUG
476 // sanity check 1: all indices should be smaller than the number of dofs
477 // locally owned plus the number of ghosts
478 const unsigned int index_range =
479 (vector_partitioner->local_range().second -
480 vector_partitioner->local_range().first) +
481 vector_partitioner->ghost_indices().n_elements();
482 for (const auto dof_index : dof_indices)
483 AssertIndexRange(dof_index, index_range);
484
485 // sanity check 2: for the constraint indicators, the first index should
486 // be smaller than the number of indices in the row, and the second
487 // index should be smaller than the number of constraints in the
488 // constraint pool.
489 for (unsigned int row = 0; row < task_info.cell_partition_data.back();
490 ++row)
491 {
492 const unsigned int row_length_ind =
497 constraint_indicator.size() + 1);
498 const std::pair<unsigned short, unsigned short> *
499 con_it =
500 constraint_indicator.data() +
502 *end_con =
503 constraint_indicator.data() +
505 for (; con_it != end_con; ++con_it)
506 {
507 AssertIndexRange(con_it->first, row_length_ind + 1);
508 AssertIndexRange(con_it->second,
509 constraint_pool_row_index.size() - 1);
510 }
511 }
512
513 // sanity check 3: check the number of cells once again
514 unsigned int n_active_cells = 0;
515 for (unsigned int c = 0; c < *(task_info.cell_partition_data.end() - 2);
516 ++c)
517 if (irregular_cells[c] > 0)
518 n_active_cells += irregular_cells[c];
519 else
520 n_active_cells += vectorization_length;
521 AssertDimension(n_active_cells, task_info.n_active_cells);
522#endif
523
524 compute_cell_index_compression(irregular_cells);
525 }
526
527
528
529 void
531 const std::vector<unsigned char> &irregular_cells)
532 {
533 const bool have_hp = dofs_per_cell.size() > 1;
534 const unsigned int n_components = start_components.back();
535
537 row_starts.size() % vectorization_length == 1,
539 if (vectorization_length > 1)
541 irregular_cells.size());
543 irregular_cells.size(), IndexStorageVariants::full);
545 irregular_cells.size());
546 for (unsigned int i = 0; i < irregular_cells.size(); ++i)
547 if (irregular_cells[i] > 0)
548 n_vectorization_lanes_filled[dof_access_cell][i] = irregular_cells[i];
549 else
552
554 irregular_cells.size() * vectorization_length,
559 irregular_cells.size() * vectorization_length,
561
562 std::vector<unsigned int> index_kinds(
563 static_cast<unsigned int>(
565 1);
566 std::vector<unsigned int> offsets(vectorization_length);
567 for (unsigned int i = 0; i < irregular_cells.size(); ++i)
568 {
569 const unsigned int ndofs =
570 dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
571 const unsigned int n_comp =
573
574 // check 1: Check if there are constraints -> no compression possible
575 bool has_constraints = false;
576 for (unsigned int j = 0; j < n_comp; ++j)
577 {
578 const unsigned int cell_no = i * vectorization_length + j;
579 if (row_starts[cell_no * n_components].second !=
580 row_starts[(cell_no + 1) * n_components].second)
581 {
582 has_constraints = true;
583 break;
584 }
585 }
586 if (has_constraints)
589 else
590 {
591 bool indices_are_contiguous = (ndofs > 0);
592 for (unsigned int j = 0; j < n_comp; ++j)
593 {
594 const unsigned int cell_no = i * vectorization_length + j;
595 const unsigned int *dof_indices =
596 this->dof_indices.data() +
597 row_starts[cell_no * n_components].first;
599 ndofs,
600 row_starts[(cell_no + 1) * n_components].first -
601 row_starts[cell_no * n_components].first);
602 for (unsigned int i = 1; i < ndofs; ++i)
603 if (dof_indices[i] != dof_indices[0] + i)
604 {
605 indices_are_contiguous = false;
606 break;
607 }
608 }
609
610 bool indices_are_interleaved_and_contiguous =
611 (ndofs > 1 && n_comp == vectorization_length);
612
613 {
614 const unsigned int *dof_indices =
615 this->dof_indices.data() +
617 for (unsigned int k = 0;
618 k < ndofs && indices_are_interleaved_and_contiguous;
619 ++k)
620 for (unsigned int j = 0; j < n_comp; ++j)
621 if (dof_indices[j * ndofs + k] !=
622 dof_indices[0] + k * n_comp + j)
623 {
624 indices_are_interleaved_and_contiguous = false;
625 break;
626 }
627 }
628
629 if (indices_are_contiguous ||
630 indices_are_interleaved_and_contiguous)
631 {
632 for (unsigned int j = 0; j < n_comp; ++j)
633 {
634 const unsigned int start_index =
637 .first;
638 AssertIndexRange(start_index, dof_indices.size());
640 [i * vectorization_length + j] =
641 this->dof_indices.empty() ?
642 0 :
643 this->dof_indices[start_index];
644 }
645 }
646
647 if (indices_are_interleaved_and_contiguous)
648 {
652 for (unsigned int j = 0; j < n_comp; ++j)
654 j] = n_comp;
655 }
656 else if (indices_are_contiguous)
657 {
660 for (unsigned int j = 0; j < n_comp; ++j)
662 j] = 1;
663 }
664 else if (ndofs > 0)
665 {
666 int indices_are_interleaved_and_mixed = 2;
667 const unsigned int *dof_indices =
668 &this->dof_indices[row_starts[i * vectorization_length *
670 .first];
671 for (unsigned int j = 0; j < n_comp; ++j)
672 offsets[j] =
673 dof_indices[j * ndofs + 1] - dof_indices[j * ndofs];
674 for (unsigned int k = 0;
675 k < ndofs && indices_are_interleaved_and_mixed != 0;
676 ++k)
677 for (unsigned int j = 0; j < n_comp; ++j)
678 // the first if case is to avoid negative offsets
679 // (invalid)
680 if (dof_indices[j * ndofs + 1] < dof_indices[j * ndofs] ||
681 dof_indices[j * ndofs + k] !=
682 dof_indices[j * ndofs] + k * offsets[j])
683 {
684 indices_are_interleaved_and_mixed = 0;
685 break;
686 }
687 if (indices_are_interleaved_and_mixed == 2)
688 {
689 for (unsigned int j = 0; j < n_comp; ++j)
692 offsets[j];
693 for (unsigned int j = 0; j < n_comp; ++j)
695 [i * vectorization_length + j] =
696 dof_indices[j * ndofs];
697 for (unsigned int j = 0; j < n_comp; ++j)
698 if (offsets[j] != vectorization_length)
699 {
700 indices_are_interleaved_and_mixed = 1;
701 break;
702 }
703 if (indices_are_interleaved_and_mixed == 1 ||
704 n_comp != vectorization_length)
708 else
711 }
712 else
713 {
714 const unsigned int *dof_indices =
715 this->dof_indices.data() +
717 .first;
718 if (n_comp == vectorization_length)
721 else
724
725 // do not use interleaved storage if two entries within
726 // vectorized array point to the same index (scatter not
727 // possible due to race condition)
728 for (const unsigned int *indices = dof_indices;
729 indices != dof_indices + ndofs;
730 ++indices)
731 {
732 bool is_sorted = true;
733 unsigned int previous = indices[0];
734 for (unsigned int l = 1; l < n_comp; ++l)
735 {
736 const unsigned int current = indices[l * ndofs];
737 if (current <= previous)
738 is_sorted = false;
739
740 // the simple check failed, must compare all
741 // indices manually - due to short sizes this
742 // O(n^2) algorithm is better than sorting
743 if (!is_sorted)
744 for (unsigned int j = 0; j < l; ++j)
745 if (indices[j * ndofs] == current)
746 {
748 [dof_access_cell][i] =
750 break;
751 }
752 previous = current;
753 }
754 }
755 }
756 }
757 else // ndofs == 0
760 }
761 index_kinds[static_cast<unsigned int>(
763 }
764
765 // Cleanup phase: we want to avoid single cells with different properties
766 // than the bulk of the domain in order to avoid extra checks in the face
767 // identification.
768
769 // Step 1: check whether the interleaved indices were only assigned to
770 // the single cell within a vectorized array.
771 auto fix_single_interleaved_indices =
772 [&](const IndexStorageVariants variant) {
773 if (index_kinds[static_cast<unsigned int>(
775 0 &&
776 index_kinds[static_cast<unsigned int>(variant)] > 0)
777 for (unsigned int i = 0; i < irregular_cells.size(); ++i)
778 {
785 [i * vectorization_length] ==
786 1))
787 {
789 index_kinds[static_cast<unsigned int>(
792 index_kinds[static_cast<unsigned int>(variant)]++;
793 }
794 }
795 };
796
797 fix_single_interleaved_indices(IndexStorageVariants::full);
798 fix_single_interleaved_indices(IndexStorageVariants::contiguous);
799 fix_single_interleaved_indices(IndexStorageVariants::interleaved);
800
801 unsigned int n_interleaved =
802 index_kinds[static_cast<unsigned int>(
804 index_kinds[static_cast<unsigned int>(
806 index_kinds[static_cast<unsigned int>(
808
809 // Step 2: fix single contiguous cell among others with interleaved
810 // storage
811 if (n_interleaved > 0 && index_kinds[static_cast<unsigned int>(
813 for (unsigned int i = 0; i < irregular_cells.size(); ++i)
816 {
819 index_kinds[static_cast<unsigned int>(
821 index_kinds[static_cast<unsigned int>(
823 }
824
825 // Step 3: Interleaved cells are left but also some non-contiguous ones
826 // -> revert all to full storage
827 if (n_interleaved > 0 &&
828 index_kinds[static_cast<unsigned int>(IndexStorageVariants::full)] +
829 index_kinds[static_cast<unsigned int>(
831 0)
832 for (unsigned int i = 0; i < irregular_cells.size(); ++i)
835 {
836 index_kinds[static_cast<unsigned int>(
837 index_storage_variants[2][i])]--;
842 else
845 index_kinds[static_cast<unsigned int>(
847 }
848
849 // Step 4: Copy the interleaved indices into their own data structure
850 for (unsigned int i = 0; i < irregular_cells.size(); ++i)
853 {
856 {
859 continue;
860 }
861 const unsigned int ndofs =
862 dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
863 const unsigned int *dof_indices =
864 &this->dof_indices
866 unsigned int *interleaved_dof_indices =
868 [row_starts[i * vectorization_length * n_components].first];
869 AssertDimension(this->dof_indices.size(),
870 this->dof_indices_interleaved.size());
875 this->dof_indices_interleaved.size() + 1);
877 row_starts[i * vectorization_length * n_components].first +
878 ndofs * vectorization_length,
879 this->dof_indices_interleaved.size() + 1);
880 for (unsigned int k = 0; k < ndofs; ++k)
881 {
882 const unsigned int *my_dof_indices = dof_indices + k;
883 const unsigned int *end =
884 interleaved_dof_indices + vectorization_length;
885 for (; interleaved_dof_indices != end;
886 ++interleaved_dof_indices, my_dof_indices += ndofs)
887 *interleaved_dof_indices = *my_dof_indices;
888 }
889 }
890 }
891
892
893
894 void
896 const Table<2, ShapeInfo<double>> &shape_info,
897 const unsigned int n_owned_cells,
898 const unsigned int n_lanes,
899 const std::vector<FaceToCellTopology<1>> &inner_faces,
900 const std::vector<FaceToCellTopology<1>> &ghosted_faces,
901 const bool fill_cell_centric,
902 const MPI_Comm communicator_sm,
903 const bool use_vector_data_exchanger_full)
904 {
906
907 // partitioner 0: no face integrals, simply use the indices present
908 // on the cells
909 std::vector<types::global_dof_index> ghost_indices;
910 {
911 const unsigned int n_components = start_components.back();
912 for (unsigned int cell = 0; cell < n_owned_cells; ++cell)
913 {
914 for (unsigned int i = row_starts[cell * n_components].first;
915 i < row_starts[(cell + 1) * n_components].first;
916 ++i)
917 if (dof_indices[i] >= part.locally_owned_size())
918 ghost_indices.push_back(part.local_to_global(dof_indices[i]));
919
920 const unsigned int fe_index =
921 dofs_per_cell.size() == 1 ? 0 :
922 cell_active_fe_index[cell / n_lanes];
923 const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
924
925 for (unsigned int i = row_starts_plain_indices[cell];
926 i < row_starts_plain_indices[cell] + dofs_this_cell;
927 ++i)
928 if (plain_dof_indices[i] >= part.locally_owned_size())
929 ghost_indices.push_back(
931 }
932 std::sort(ghost_indices.begin(), ghost_indices.end());
933 IndexSet compressed_set(part.size());
934 compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
935 compressed_set.subtract_set(part.locally_owned_range());
936 const bool all_ghosts_equal =
937 Utilities::MPI::min<int>(static_cast<int>(
938 compressed_set.n_elements() ==
939 part.ghost_indices().n_elements()),
940 part.get_mpi_communicator()) != 0;
941
942 std::shared_ptr<const Utilities::MPI::Partitioner> temp_0;
943
944 if (all_ghosts_equal)
945 temp_0 = vector_partitioner;
946 else
947 {
948 temp_0 = std::make_shared<Utilities::MPI::Partitioner>(
950 const_cast<Utilities::MPI::Partitioner *>(temp_0.get())
951 ->set_ghost_indices(compressed_set, part.ghost_indices());
952 }
953
954 if (use_vector_data_exchanger_full == false)
955 vector_exchanger_face_variants[0] = std::make_shared<
957 temp_0);
958 else
960 std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
961 temp_0, communicator_sm);
962 }
963
964 // construct a numbering of faces
965 std::vector<FaceToCellTopology<1>> all_faces(inner_faces);
966 all_faces.insert(all_faces.end(),
967 ghosted_faces.begin(),
968 ghosted_faces.end());
969 Table<2, unsigned int> cell_and_face_to_faces(
970 (row_starts.size() - 1) / start_components.back(),
971 2 * shape_info(0, 0).n_dimensions);
972 cell_and_face_to_faces.fill(numbers::invalid_unsigned_int);
973 for (unsigned int f = 0; f < all_faces.size(); ++f)
974 {
975 cell_and_face_to_faces(all_faces[f].cells_interior[0],
976 all_faces[f].interior_face_no) = f;
977 Assert(all_faces[f].cells_exterior[0] !=
980 cell_and_face_to_faces(all_faces[f].cells_exterior[0],
981 all_faces[f].exterior_face_no) = f;
982 }
983
984 // lambda function to detect objects on face pairs
985 const auto loop_over_faces =
986 [&](const std::function<
987 void(const unsigned int, const unsigned int, const bool)> &fu) {
988 for (const auto &face : inner_faces)
989 {
990 AssertIndexRange(face.cells_interior[0], n_owned_cells);
991 fu(face.cells_exterior[0], face.exterior_face_no, false /*flag*/);
992 }
993 };
994
995 const auto loop_over_all_faces =
996 [&](const std::function<
997 void(const unsigned int, const unsigned int, const bool)> &fu) {
998 for (unsigned int c = 0; c < cell_and_face_to_faces.size(0); ++c)
999 for (unsigned int d = 0; d < cell_and_face_to_faces.size(1); ++d)
1000 {
1001 const unsigned int f = cell_and_face_to_faces(c, d);
1003 continue;
1004
1005 const unsigned int cell_m = all_faces[f].cells_interior[0];
1006 const unsigned int cell_p = all_faces[f].cells_exterior[0];
1007
1008 const bool ext = c == cell_m;
1009
1010 if (ext && cell_p == numbers::invalid_unsigned_int)
1011 continue;
1012
1013 const unsigned int p = ext ? cell_p : cell_m;
1014 const unsigned int face_no = ext ?
1015 all_faces[f].exterior_face_no :
1016 all_faces[f].interior_face_no;
1017
1018 fu(p, face_no, true);
1019 }
1020 };
1021
1022 const auto process_values =
1023 [&](
1024 std::shared_ptr<const Utilities::MPI::Partitioner>
1025 &vector_partitioner_values,
1026 const std::function<void(
1027 const std::function<void(
1028 const unsigned int, const unsigned int, const bool)> &)> &loop) {
1029 bool all_nodal_and_tensorial = shape_info.size(1) == 1;
1030
1031 if (all_nodal_and_tensorial)
1032 for (unsigned int c = 0; c < n_base_elements; ++c)
1033 {
1034 const auto &si =
1035 shape_info(global_base_element_offset + c, 0).data.front();
1036 if (!si.nodal_at_cell_boundaries ||
1037 (si.element_type ==
1039 all_nodal_and_tensorial = false;
1040 }
1041
1042 if (all_nodal_and_tensorial == false)
1043 vector_partitioner_values = vector_partitioner;
1044 else
1045 {
1046 bool has_noncontiguous_cell = false;
1047
1048 loop([&](const unsigned int cell_no,
1049 const unsigned int face_no,
1050 const bool flag) {
1051 const unsigned int index =
1053 if (flag || (index != numbers::invalid_unsigned_int &&
1054 index >= part.locally_owned_size()))
1055 {
1056 const unsigned int stride =
1057 dof_indices_interleave_strides[dof_access_cell][cell_no];
1058 unsigned int i = 0;
1059 for (unsigned int e = 0; e < n_base_elements; ++e)
1060 for (unsigned int c = 0; c < n_components[e]; ++c)
1061 {
1062 const ShapeInfo<double> &shape =
1063 shape_info(global_base_element_offset + e, 0);
1064 for (unsigned int j = 0;
1065 j < shape.dofs_per_component_on_face;
1066 ++j)
1067 ghost_indices.push_back(part.local_to_global(
1068 index + i +
1069 shape.face_to_cell_index_nodal(face_no, j) *
1070 stride));
1071 i += shape.dofs_per_component_on_cell * stride;
1072 }
1073 AssertDimension(i, dofs_per_cell[0] * stride);
1074 }
1076 has_noncontiguous_cell = true;
1077 });
1078 has_noncontiguous_cell =
1079 Utilities::MPI::min<int>(static_cast<int>(
1080 has_noncontiguous_cell),
1081 part.get_mpi_communicator()) != 0;
1082
1083 std::sort(ghost_indices.begin(), ghost_indices.end());
1084 IndexSet compressed_set(part.size());
1085 compressed_set.add_indices(ghost_indices.begin(),
1086 ghost_indices.end());
1087 compressed_set.subtract_set(part.locally_owned_range());
1088 const bool all_ghosts_equal =
1089 Utilities::MPI::min<int>(static_cast<int>(
1090 compressed_set.n_elements() ==
1091 part.ghost_indices().n_elements()),
1092 part.get_mpi_communicator()) != 0;
1093 if (all_ghosts_equal || has_noncontiguous_cell)
1094 vector_partitioner_values = vector_partitioner;
1095 else
1096 {
1097 vector_partitioner_values =
1098 std::make_shared<Utilities::MPI::Partitioner>(
1100 const_cast<Utilities::MPI::Partitioner *>(
1101 vector_partitioner_values.get())
1102 ->set_ghost_indices(compressed_set, part.ghost_indices());
1103 }
1104 }
1105 };
1106
1107
1108 const auto process_gradients =
1109 [&](
1110 const std::shared_ptr<const Utilities::MPI::Partitioner>
1111 &vector_partitoner_values,
1112 std::shared_ptr<const Utilities::MPI::Partitioner>
1113 &vector_partitioner_gradients,
1114 const std::function<void(
1115 const std::function<void(
1116 const unsigned int, const unsigned int, const bool)> &)> &loop) {
1117 bool all_hermite = shape_info.size(1) == 1;
1118
1119 if (all_hermite)
1120 for (unsigned int c = 0; c < n_base_elements; ++c)
1121 if (shape_info(global_base_element_offset + c, 0).element_type !=
1123 all_hermite = false;
1124 if (all_hermite == false ||
1125 vector_partitoner_values.get() == vector_partitioner.get())
1126 vector_partitioner_gradients = vector_partitioner;
1127 else
1128 {
1129 loop([&](const unsigned int cell_no,
1130 const unsigned int face_no,
1131 const bool flag) {
1132 const unsigned int index =
1133 dof_indices_contiguous[dof_access_cell][cell_no];
1134 if (flag || (index != numbers::invalid_unsigned_int &&
1135 index >= part.locally_owned_size()))
1136 {
1137 const unsigned int stride =
1138 dof_indices_interleave_strides[dof_access_cell][cell_no];
1139 unsigned int i = 0;
1140 for (unsigned int e = 0; e < n_base_elements; ++e)
1141 for (unsigned int c = 0; c < n_components[e]; ++c)
1142 {
1143 const ShapeInfo<double> &shape =
1144 shape_info(global_base_element_offset + e, 0);
1145 for (unsigned int j = 0;
1146 j < 2 * shape.dofs_per_component_on_face;
1147 ++j)
1148 ghost_indices.push_back(part.local_to_global(
1149 index + i +
1150 shape.face_to_cell_index_hermite(face_no, j) *
1151 stride));
1152 i += shape.dofs_per_component_on_cell * stride;
1153 }
1154 AssertDimension(i, dofs_per_cell[0] * stride);
1155 }
1156 });
1157 std::sort(ghost_indices.begin(), ghost_indices.end());
1158 IndexSet compressed_set(part.size());
1159 compressed_set.add_indices(ghost_indices.begin(),
1160 ghost_indices.end());
1161 compressed_set.subtract_set(part.locally_owned_range());
1162 const bool all_ghosts_equal =
1163 Utilities::MPI::min<int>(static_cast<int>(
1164 compressed_set.n_elements() ==
1165 part.ghost_indices().n_elements()),
1166 part.get_mpi_communicator()) != 0;
1167 if (all_ghosts_equal)
1168 vector_partitioner_gradients = vector_partitioner;
1169 else
1170 {
1171 vector_partitioner_gradients =
1172 std::make_shared<Utilities::MPI::Partitioner>(
1173 part.locally_owned_range(), part.get_mpi_communicator());
1174 const_cast<Utilities::MPI::Partitioner *>(
1175 vector_partitioner_gradients.get())
1176 ->set_ghost_indices(compressed_set, part.ghost_indices());
1177 }
1178 }
1179 };
1180
1181 std::shared_ptr<const Utilities::MPI::Partitioner> temp_1, temp_2, temp_3,
1182 temp_4;
1183
1184 // partitioner 1: values on faces
1185 process_values(temp_1, loop_over_faces);
1186
1187 // partitioner 2: values and gradients on faces
1188 process_gradients(temp_1, temp_2, loop_over_faces);
1189
1190 if (fill_cell_centric)
1191 {
1192 ghost_indices.clear();
1193 // partitioner 3: values on all faces
1194 process_values(temp_3, loop_over_all_faces);
1195 // partitioner 4: values and gradients on faces
1196 process_gradients(temp_3, temp_4, loop_over_all_faces);
1197 }
1198 else
1199 {
1200 temp_3 = std::make_shared<Utilities::MPI::Partitioner>(
1201 part.locally_owned_range(), part.get_mpi_communicator());
1202 temp_4 = std::make_shared<Utilities::MPI::Partitioner>(
1203 part.locally_owned_range(), part.get_mpi_communicator());
1204 }
1205
1206 if (use_vector_data_exchanger_full == false)
1207 {
1208 vector_exchanger_face_variants[1] = std::make_shared<
1209 MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1210 temp_1);
1211 vector_exchanger_face_variants[2] = std::make_shared<
1212 MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1213 temp_2);
1214 vector_exchanger_face_variants[3] = std::make_shared<
1215 MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1216 temp_3);
1217 vector_exchanger_face_variants[4] = std::make_shared<
1218 MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1219 temp_4);
1220 }
1221 else
1222 {
1223 vector_exchanger_face_variants[1] =
1224 std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1225 temp_1, communicator_sm);
1226 vector_exchanger_face_variants[2] =
1227 std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1228 temp_2, communicator_sm);
1229 vector_exchanger_face_variants[3] =
1230 std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1231 temp_3, communicator_sm);
1232 vector_exchanger_face_variants[4] =
1233 std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1234 temp_4, communicator_sm);
1235 }
1236 }
1237
1238
1239
1240 void
1241 DoFInfo::compute_shared_memory_contiguous_indices(
1242 std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
1243 &cell_indices_contiguous_sm)
1244 {
1245 AssertDimension(dofs_per_cell.size(), 1);
1246
1247 for (unsigned int i = 0; i < 3; ++i)
1248 {
1249 dof_indices_contiguous_sm[i].resize(
1250 cell_indices_contiguous_sm[i].size());
1251
1252 for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size();
1253 ++j)
1254 if (cell_indices_contiguous_sm[i][j].first !=
1256 dof_indices_contiguous_sm[i][j] = {
1257 cell_indices_contiguous_sm[i][j].first,
1258 cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]};
1259 else
1260 dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int,
1262 }
1263 }
1264
1265
1266
1267 namespace internal
1268 {
1269 // We construct the connectivity graph in parallel. we use one lock for
1270 // 256 degrees of freedom to keep the number of locks down to a
1271 // reasonable level and reduce the cost of locking to some extent.
1272 static constexpr unsigned int bucket_size_threading = 256;
1273
1274
1275
1276 void
1277 compute_row_lengths(const unsigned int begin,
1278 const unsigned int end,
1279 const DoFInfo &dof_info,
1280 std::vector<std::mutex> &mutexes,
1281 std::vector<unsigned int> &row_lengths)
1282 {
1283 std::vector<unsigned int> scratch;
1284 const unsigned int n_components = dof_info.start_components.back();
1285 for (unsigned int block = begin; block < end; ++block)
1286 {
1287 scratch.assign(
1288 dof_info.dof_indices.data() +
1289 dof_info.row_starts[block * n_components].first,
1290 dof_info.dof_indices.data() +
1291 dof_info.row_starts[(block + 1) * n_components].first);
1292 std::sort(scratch.begin(), scratch.end());
1293
1294 const std::vector<unsigned int>::const_iterator end_unique =
1295 std::unique(scratch.begin(), scratch.end());
1296 for (std::vector<unsigned int>::const_iterator it = scratch.begin();
1297 it != end_unique;
1298 /* update in loop body */)
1299 {
1300 // In this code, the procedure is that we insert all elements
1301 // that are within the range of one lock at once
1302 const unsigned int next_bucket =
1304
1305 std::lock_guard<std::mutex> lock(
1306 mutexes[*it / bucket_size_threading]);
1307 for (; it != end_unique && *it < next_bucket; ++it)
1308 {
1309 AssertIndexRange(*it, row_lengths.size());
1310 ++row_lengths[*it];
1311 }
1312 }
1313 }
1314 }
1315
1316 void
1317 fill_connectivity_dofs(const unsigned int begin,
1318 const unsigned int end,
1319 const DoFInfo &dof_info,
1320 const std::vector<unsigned int> &row_lengths,
1321 std::vector<std::mutex> &mutexes,
1322 ::SparsityPattern &connectivity_dof)
1323 {
1324 std::vector<unsigned int> scratch;
1325 const unsigned int n_components = dof_info.start_components.back();
1326 for (unsigned int block = begin; block < end; ++block)
1327 {
1328 scratch.assign(
1329 dof_info.dof_indices.data() +
1330 dof_info.row_starts[block * n_components].first,
1331 dof_info.dof_indices.data() +
1332 dof_info.row_starts[(block + 1) * n_components].first);
1333 std::sort(scratch.begin(), scratch.end());
1334
1335 const std::vector<unsigned int>::const_iterator end_unique =
1336 std::unique(scratch.begin(), scratch.end());
1337 for (std::vector<unsigned int>::const_iterator it = scratch.begin();
1338 it != end_unique;
1339 /* update in loop body */)
1340 {
1341 const unsigned int next_bucket =
1343
1344 std::lock_guard<std::mutex> lock(
1345 mutexes[*it / bucket_size_threading]);
1346 for (; it != end_unique && *it < next_bucket; ++it)
1347 if (row_lengths[*it] > 0)
1348 connectivity_dof.add(*it, block);
1349 }
1350 }
1351 }
1352
1353
1354
1355 void
1356 fill_connectivity(const unsigned int begin,
1357 const unsigned int end,
1358 const DoFInfo &dof_info,
1359 const std::vector<unsigned int> &renumbering,
1360 const ::SparsityPattern &connectivity_dof,
1361 DynamicSparsityPattern &connectivity)
1362 {
1363 ordered_vector row_entries;
1364 const unsigned int n_components = dof_info.start_components.back();
1365 for (unsigned int block = begin; block < end; ++block)
1366 {
1367 row_entries.clear();
1368
1369 const unsigned int
1370 *it = dof_info.dof_indices.data() +
1371 dof_info.row_starts[block * n_components].first,
1372 *end_cell = dof_info.dof_indices.data() +
1373 dof_info.row_starts[(block + 1) * n_components].first;
1374 for (; it != end_cell; ++it)
1375 {
1376 SparsityPattern::iterator sp = connectivity_dof.begin(*it);
1377 std::vector<types::global_dof_index>::iterator insert_pos =
1378 row_entries.begin();
1379 for (; sp != connectivity_dof.end(*it); ++sp)
1380 if (sp->column() != block)
1381 row_entries.insert(renumbering[sp->column()], insert_pos);
1382 }
1383 connectivity.add_entries(renumbering[block],
1384 row_entries.begin(),
1385 row_entries.end());
1386 }
1387 }
1388
1389 } // namespace internal
1390
1391 void
1392 DoFInfo::make_connectivity_graph(
1393 const TaskInfo &task_info,
1394 const std::vector<unsigned int> &renumbering,
1395 DynamicSparsityPattern &connectivity) const
1396 {
1397 unsigned int n_rows = (vector_partitioner->local_range().second -
1398 vector_partitioner->local_range().first) +
1399 vector_partitioner->ghost_indices().n_elements();
1400
1401 // Avoid square sparsity patterns that allocate the diagonal entry
1402 if (n_rows == task_info.n_active_cells)
1403 ++n_rows;
1404
1405 // first determine row lengths
1406 std::vector<unsigned int> row_lengths(n_rows);
1407 std::vector<std::mutex> mutexes(n_rows / internal::bucket_size_threading +
1408 1);
1410 0,
1411 task_info.n_active_cells,
1412 [this, &mutexes, &row_lengths](const unsigned int begin,
1413 const unsigned int end) {
1414 internal::compute_row_lengths(
1415 begin, end, *this, mutexes, row_lengths);
1416 },
1417 20);
1418
1419 // disregard dofs that only sit on a single cell because they cannot
1420 // couple
1421 for (unsigned int row = 0; row < n_rows; ++row)
1422 if (row_lengths[row] <= 1)
1423 row_lengths[row] = 0;
1424
1425 // Create a temporary sparsity pattern that holds to each degree of
1426 // freedom on which cells it appears, i.e., store the connectivity
1427 // between cells and dofs
1428 SparsityPattern connectivity_dof(n_rows,
1429 task_info.n_active_cells,
1430 row_lengths);
1432 0,
1433 task_info.n_active_cells,
1434 [this, &row_lengths, &mutexes, &connectivity_dof](
1435 const unsigned int begin, const unsigned int end) {
1436 internal::fill_connectivity_dofs(
1437 begin, end, *this, row_lengths, mutexes, connectivity_dof);
1438 },
1439 20);
1440 connectivity_dof.compress();
1441
1442
1443 // Invert renumbering for use in fill_connectivity.
1444 std::vector<unsigned int> reverse_numbering(task_info.n_active_cells);
1445 reverse_numbering = Utilities::invert_permutation(renumbering);
1446
1447 // From the above connectivity between dofs and cells, we can finally
1448 // create a connectivity list between cells. The connectivity graph
1449 // should apply the renumbering, i.e., the entry for cell j is the entry
1450 // for cell renumbering[j] in the original ordering.
1452 0,
1453 task_info.n_active_cells,
1454 [this, &reverse_numbering, &connectivity_dof, &connectivity](
1455 const unsigned int begin, const unsigned int end) {
1456 internal::fill_connectivity(begin,
1457 end,
1458 *this,
1459 reverse_numbering,
1460 connectivity_dof,
1461 connectivity);
1462 },
1463 20);
1464 }
1465
1466
1467
1468 void
1469 DoFInfo::compute_dof_renumbering(
1470 std::vector<types::global_dof_index> &renumbering)
1471 {
1472 const unsigned int locally_owned_size =
1473 vector_partitioner->locally_owned_size();
1474 renumbering.resize(0);
1476
1477 types::global_dof_index counter = 0;
1478 const unsigned int n_components = start_components.back();
1479 const unsigned int n_cell_batches =
1480 n_vectorization_lanes_filled[dof_access_cell].size();
1481 Assert(n_cell_batches <=
1482 (row_starts.size() - 1) / vectorization_length / n_components,
1484 for (unsigned int cell_no = 0; cell_no < n_cell_batches; ++cell_no)
1485 {
1486 // do not renumber in case we have constraints
1487 if (row_starts[cell_no * n_components * vectorization_length]
1488 .second ==
1489 row_starts[(cell_no + 1) * n_components * vectorization_length]
1490 .second)
1491 {
1492 const unsigned int ndofs =
1493 dofs_per_cell.size() == 1 ?
1494 dofs_per_cell[0] :
1495 (dofs_per_cell[cell_active_fe_index.size() > 0 ?
1496 cell_active_fe_index[cell_no] :
1497 0]);
1498 const unsigned int *dof_ind =
1499 dof_indices.data() +
1500 row_starts[cell_no * n_components * vectorization_length].first;
1501 for (unsigned int i = 0; i < ndofs; ++i)
1502 for (unsigned int j = 0;
1503 j < n_vectorization_lanes_filled[dof_access_cell][cell_no];
1504 ++j)
1505 if (dof_ind[j * ndofs + i] < locally_owned_size)
1506 if (renumbering[dof_ind[j * ndofs + i]] ==
1508 renumbering[dof_ind[j * ndofs + i]] = counter++;
1509 }
1510 }
1511
1513 for (types::global_dof_index &dof_index : renumbering)
1514 if (dof_index == numbers::invalid_dof_index)
1515 dof_index = counter++;
1516
1517 // transform indices to global index space
1518 for (types::global_dof_index &dof_index : renumbering)
1519 dof_index = vector_partitioner->local_to_global(dof_index);
1520
1521 AssertDimension(counter, renumbering.size());
1522 }
1523
1524
1525
1526 std::size_t
1527 DoFInfo::memory_consumption() const
1528 {
1529 std::size_t memory = sizeof(*this);
1530 for (const auto &storage : index_storage_variants)
1531 memory += storage.capacity() * sizeof(storage[0]);
1532 memory +=
1533 (row_starts.capacity() * sizeof(std::pair<unsigned int, unsigned int>));
1534 memory += MemoryConsumption::memory_consumption(dof_indices);
1535 memory += MemoryConsumption::memory_consumption(dof_indices_interleaved);
1536 memory += MemoryConsumption::memory_consumption(dof_indices_contiguous);
1537 memory +=
1538 MemoryConsumption::memory_consumption(dof_indices_contiguous_sm);
1539 memory +=
1540 MemoryConsumption::memory_consumption(dof_indices_interleave_strides);
1541 memory +=
1542 MemoryConsumption::memory_consumption(n_vectorization_lanes_filled);
1544 hanging_node_constraint_masks_comp);
1545 memory +=
1546 MemoryConsumption::memory_consumption(hanging_node_constraint_masks);
1547 memory += MemoryConsumption::memory_consumption(constrained_dofs);
1548 memory += MemoryConsumption::memory_consumption(row_starts_plain_indices);
1549 memory += MemoryConsumption::memory_consumption(plain_dof_indices);
1550 memory += MemoryConsumption::memory_consumption(constraint_indicator);
1551 memory += MemoryConsumption::memory_consumption(*vector_partitioner);
1552 memory += MemoryConsumption::memory_consumption(n_components);
1553 memory += MemoryConsumption::memory_consumption(start_components);
1554 memory += MemoryConsumption::memory_consumption(component_to_base_index);
1555 memory +=
1556 MemoryConsumption::memory_consumption(component_dof_indices_offset);
1557 memory += MemoryConsumption::memory_consumption(dofs_per_cell);
1558 memory += MemoryConsumption::memory_consumption(dofs_per_face);
1559 memory += MemoryConsumption::memory_consumption(cell_active_fe_index);
1560 memory += MemoryConsumption::memory_consumption(fe_index_conversion);
1561 memory +=
1562 MemoryConsumption::memory_consumption(vector_zero_range_list_index);
1563 memory += MemoryConsumption::memory_consumption(vector_zero_range_list);
1564 memory += MemoryConsumption::memory_consumption(cell_loop_pre_list_index);
1565 memory += MemoryConsumption::memory_consumption(cell_loop_pre_list);
1566 memory +=
1567 MemoryConsumption::memory_consumption(cell_loop_post_list_index);
1568 memory += MemoryConsumption::memory_consumption(cell_loop_post_list);
1569 return memory;
1570 }
1571 } // namespace MatrixFreeFunctions
1572} // namespace internal
1573
1574namespace internal
1575{
1576 namespace MatrixFreeFunctions
1577 {
1578 template void
1579 DoFInfo::read_dof_indices<double>(
1580 const std::vector<types::global_dof_index> &,
1581 const std::vector<types::global_dof_index> &,
1582 const bool,
1583 const ::AffineConstraints<double> &,
1584 const unsigned int,
1585 ConstraintValues<double> &,
1586 bool &);
1587
1588 template void
1589 DoFInfo::read_dof_indices<float>(
1590 const std::vector<types::global_dof_index> &,
1591 const std::vector<types::global_dof_index> &,
1592 const bool,
1593 const ::AffineConstraints<float> &,
1594 const unsigned int,
1595 ConstraintValues<double> &,
1596 bool &);
1597
1598 template bool
1599 DoFInfo::process_hanging_node_constraints<1>(
1600 const HangingNodes<1> &,
1601 const std::vector<std::vector<unsigned int>> &,
1602 const unsigned int,
1604 std::vector<types::global_dof_index> &);
1605 template bool
1606 DoFInfo::process_hanging_node_constraints<2>(
1607 const HangingNodes<2> &,
1608 const std::vector<std::vector<unsigned int>> &,
1609 const unsigned int,
1611 std::vector<types::global_dof_index> &);
1612 template bool
1613 DoFInfo::process_hanging_node_constraints<3>(
1614 const HangingNodes<3> &,
1615 const std::vector<std::vector<unsigned int>> &,
1616 const unsigned int,
1618 std::vector<types::global_dof_index> &);
1619
1620 template void
1621 DoFInfo::compute_face_index_compression<1>(
1622 const std::vector<FaceToCellTopology<1>> &,
1623 bool);
1624 template void
1625 DoFInfo::compute_face_index_compression<2>(
1626 const std::vector<FaceToCellTopology<2>> &,
1627 bool);
1628 template void
1629 DoFInfo::compute_face_index_compression<4>(
1630 const std::vector<FaceToCellTopology<4>> &,
1631 bool);
1632 template void
1633 DoFInfo::compute_face_index_compression<8>(
1634 const std::vector<FaceToCellTopology<8>> &,
1635 bool);
1636 template void
1637 DoFInfo::compute_face_index_compression<16>(
1638 const std::vector<FaceToCellTopology<16>> &,
1639 bool);
1640
1641 template void
1642 DoFInfo::compute_vector_zero_access_pattern<1>(
1643 const TaskInfo &,
1644 const std::vector<FaceToCellTopology<1>> &);
1645 template void
1646 DoFInfo::compute_vector_zero_access_pattern<2>(
1647 const TaskInfo &,
1648 const std::vector<FaceToCellTopology<2>> &);
1649 template void
1650 DoFInfo::compute_vector_zero_access_pattern<4>(
1651 const TaskInfo &,
1652 const std::vector<FaceToCellTopology<4>> &);
1653 template void
1654 DoFInfo::compute_vector_zero_access_pattern<8>(
1655 const TaskInfo &,
1656 const std::vector<FaceToCellTopology<8>> &);
1657 template void
1658 DoFInfo::compute_vector_zero_access_pattern<16>(
1659 const TaskInfo &,
1660 const std::vector<FaceToCellTopology<16>> &);
1661
1662 template void
1663 DoFInfo::print_memory_consumption<std::ostream>(std::ostream &,
1664 const TaskInfo &) const;
1665 template void
1666 DoFInfo::print_memory_consumption<ConditionalOStream>(
1668 const TaskInfo &) const;
1669
1670 template void
1671 DoFInfo::print<double>(const std::vector<double> &,
1672 const std::vector<unsigned int> &,
1673 std::ostream &) const;
1674
1675 template void
1676 DoFInfo::print<float>(const std::vector<float> &,
1677 const std::vector<unsigned int> &,
1678 std::ostream &) const;
1679 } // namespace MatrixFreeFunctions
1680} // namespace internal
1681
void add_entries(const size_type row, ForwardIterator begin, ForwardIterator end, const bool indices_are_unique_and_sorted=false)
size_type index_within_set(const size_type global_index) const
Definition index_set.h:2001
size_type n_elements() const
Definition index_set.h:1934
void subtract_set(const IndexSet &other)
Definition index_set.cc:473
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
void compress() const
Definition index_set.h:1784
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1831
void add(const size_type i, const size_type j)
const IndexSet & locally_owned_range() const
const IndexSet & ghost_indices() const
unsigned int locally_owned_size() const
types::global_dof_index local_to_global(const unsigned int local_index) const
virtual MPI_Comm get_mpi_communicator() const override
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
types::global_dof_index size() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNotInitialized()
std::size_t size
Definition mpi.cc:734
types::global_dof_index locally_owned_size
Definition mpi.cc:822
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
Definition utilities.h:1699
void fill_connectivity_dofs(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, const std::vector< unsigned int > &row_lengths, std::vector< std::mutex > &mutexes, ::SparsityPattern &connectivity_dof)
Definition dof_info.cc:1317
void fill_connectivity(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, const std::vector< unsigned int > &renumbering, const ::SparsityPattern &connectivity_dof, DynamicSparsityPattern &connectivity)
Definition dof_info.cc:1356
void compute_row_lengths(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, std::vector< std::mutex > &mutexes, std::vector< unsigned int > &row_lengths)
Definition dof_info.cc:1277
static constexpr unsigned int bucket_size_threading
Definition dof_info.cc:1272
constexpr compressed_constraint_kind unconstrained_compressed_constraint_kind
static const unsigned int invalid_unsigned_int
Definition types.h:220
const types::global_dof_index invalid_dof_index
Definition types.h:252
void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
Definition parallel.h:452
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
void reorder_cells(const TaskInfo &task_info, const std::vector< unsigned int > &renumbering, const std::vector< unsigned int > &constraint_pool_row_index, const std::vector< unsigned char > &irregular_cells)
Definition dof_info.cc:298
std::vector< std::pair< unsigned short, unsigned short > > constraint_indicator
Definition dof_info.h:538
std::vector< std::pair< unsigned int, unsigned int > > row_starts
Definition dof_info.h:497
void compute_tight_partitioners(const Table< 2, ShapeInfo< double > > &shape_info, const unsigned int n_owned_cells, const unsigned int n_lanes, const std::vector< FaceToCellTopology< 1 > > &inner_faces, const std::vector< FaceToCellTopology< 1 > > &ghosted_faces, const bool fill_cell_centric, const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full)
Definition dof_info.cc:895
std::vector< std::vector< bool > > hanging_node_constraint_masks_comp
Definition dof_info.h:520
void get_dof_indices_on_cell_batch(std::vector< unsigned int > &local_indices, const unsigned int cell_batch, const bool with_constraints=true) const
Definition dof_info.cc:82
void compute_cell_index_compression(const std::vector< unsigned char > &irregular_cells)
Definition dof_info.cc:530
std::vector< unsigned int > dofs_per_cell
Definition dof_info.h:695
void assign_ghosts(const std::vector< unsigned int > &boundary_cells, const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full)
Definition dof_info.cc:155
std::shared_ptr< const internal::MatrixFreeFunctions::VectorDataExchange::Base > vector_exchanger
Definition dof_info.h:600
std::vector< std::vector< unsigned int > > fe_index_conversion
Definition dof_info.h:723
std::vector< unsigned int > dof_indices
Definition dof_info.h:514
std::shared_ptr< const Utilities::MPI::Partitioner > vector_partitioner
Definition dof_info.h:593
std::vector< compressed_constraint_kind > hanging_node_constraint_masks
Definition dof_info.h:526
std::array< std::vector< unsigned int >, 3 > dof_indices_interleave_strides
Definition dof_info.h:574
std::vector< unsigned int > row_starts_plain_indices
Definition dof_info.h:637
std::vector< unsigned int > dofs_per_face
Definition dof_info.h:700
std::vector< unsigned int > n_components
Definition dof_info.h:665
std::vector< types::global_dof_index > ghost_dofs
Definition dof_info.h:730
std::array< std::vector< unsigned int >, 3 > dof_indices_contiguous
Definition dof_info.h:553
std::vector< unsigned int > cell_active_fe_index
Definition dof_info.h:710
std::array< std::shared_ptr< const internal::MatrixFreeFunctions::VectorDataExchange::Base >, 5 > vector_exchanger_face_variants
Definition dof_info.h:625
std::vector< unsigned int > plain_dof_indices
Definition dof_info.h:647
std::array< std::vector< unsigned char >, 3 > n_vectorization_lanes_filled
Definition dof_info.h:585
std::vector< unsigned int > start_components
Definition dof_info.h:671
std::vector< unsigned int > dof_indices_interleaved
Definition dof_info.h:543
std::array< std::vector< IndexStorageVariants >, 3 > index_storage_variants
Definition dof_info.h:489
std::vector< unsigned int > cell_partition_data
Definition task_info.h:474