Reference documentation for deal.II version GIT 7deb6c54a6 2023-06-09 18:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
dof_info.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/utilities.h>
20 
23 
24 #include <deal.II/matrix_free/dof_info.templates.h>
25 
26 #include <iostream>
27 
29 
30 namespace internal
31 {
32  namespace MatrixFreeFunctions
33  {
34  // ensure that the type defined in both dof_info.h and
35  // hanging_nodes_internal.h is consistent
36  static_assert(std::is_same<compressed_constraint_kind, std::uint8_t>::value,
37  "Unexpected type for compressed hanging node indicators!");
38 
39 
40 
42  {
43  clear();
44  }
45 
46 
47 
48  void
50  {
51  row_starts.clear();
52  dof_indices.clear();
53  constraint_indicator.clear();
54  vector_partitioner.reset();
55  ghost_dofs.clear();
56  dofs_per_cell.clear();
57  dofs_per_face.clear();
60  n_base_elements = 0;
61  n_components.clear();
62  start_components.clear();
64  plain_dof_indices.clear();
66  for (unsigned int i = 0; i < 3; ++i)
67  {
68  index_storage_variants[i].clear();
69  dof_indices_contiguous[i].clear();
72  }
73  store_plain_indices = false;
74  cell_active_fe_index.clear();
75  max_fe_index = 0;
76  fe_index_conversion.clear();
77  }
78 
79 
80 
81  void
82  DoFInfo::get_dof_indices_on_cell_batch(std::vector<unsigned int> &my_rows,
83  const unsigned int cell,
84  const bool apply_constraints) const
85  {
86  const unsigned int n_fe_components = start_components.back();
87  const unsigned int fe_index =
88  dofs_per_cell.size() == 1 ? 0 : cell_active_fe_index[cell];
89  const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
90 
91  const unsigned int n_vectorization = vectorization_length;
92  constexpr auto dof_access_index = dof_access_cell;
93  AssertIndexRange(cell,
94  n_vectorization_lanes_filled[dof_access_index].size());
95  const unsigned int n_vectorization_actual =
96  n_vectorization_lanes_filled[dof_access_index][cell];
97 
98  // we might have constraints, so the final number
99  // of indices is not known a priori.
100  // conservatively reserve the maximum without constraints
101  my_rows.reserve(n_vectorization * dofs_this_cell);
102  my_rows.resize(0);
103  unsigned int total_size = 0;
104  for (unsigned int v = 0; v < n_vectorization_actual; ++v)
105  {
106  const unsigned int ib =
107  (cell * n_vectorization + v) * n_fe_components;
108  const unsigned int ie =
109  (cell * n_vectorization + v + 1) * n_fe_components;
110 
111  // figure out constraints by comparing constraint_indicator row
112  // shift for this cell within the block as compared to the next
113  // one
114  const bool has_constraints =
115  (hanging_node_constraint_masks.size() != 0 &&
117  hanging_node_constraint_masks[cell * n_vectorization + v] !=
120  (row_starts[ib].second != row_starts[ib + n_fe_components].second);
121 
122  auto do_copy = [&](const unsigned int *begin,
123  const unsigned int *end) {
124  const unsigned int shift = total_size;
125  total_size += (end - begin);
126  my_rows.resize(total_size);
127  std::copy(begin, end, my_rows.begin() + shift);
128  };
129 
130  if (!has_constraints || apply_constraints)
131  {
132  const unsigned int *begin =
133  dof_indices.data() + row_starts[ib].first;
134  const unsigned int *end =
135  dof_indices.data() + row_starts[ie].first;
136  do_copy(begin, end);
137  }
138  else
139  {
140  Assert(row_starts_plain_indices[cell * n_vectorization + v] !=
143  const unsigned int *begin =
144  plain_dof_indices.data() +
145  row_starts_plain_indices[cell * n_vectorization + v];
146  const unsigned int *end = begin + dofs_this_cell;
147  do_copy(begin, end);
148  }
149  }
150  }
151 
152 
153 
154  void
155  DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
156  const MPI_Comm communicator_sm,
157  const bool use_vector_data_exchanger_full)
158  {
159  Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
160 
161  // sort ghost dofs and compress out duplicates
162  const unsigned int n_owned = (vector_partitioner->local_range().second -
163  vector_partitioner->local_range().first);
164  const std::size_t n_ghosts = ghost_dofs.size();
165 #ifdef DEBUG
166  for (const auto dof_index : dof_indices)
167  AssertIndexRange(dof_index, n_owned + n_ghosts);
168 #endif
169 
170  const unsigned int n_components = start_components.back();
171  std::vector<unsigned int> ghost_numbering(n_ghosts);
172  IndexSet ghost_indices(vector_partitioner->size());
173  if (n_ghosts > 0)
174  {
175  unsigned int n_unique_ghosts = 0;
176  // since we need to go back to the local_to_global indices and
177  // replace the temporary numbering of ghosts by the real number in
178  // the index set, we need to store these values
179  std::vector<std::pair<types::global_dof_index, unsigned int>>
180  ghost_origin(n_ghosts);
181  for (std::size_t i = 0; i < n_ghosts; ++i)
182  {
183  ghost_origin[i].first = ghost_dofs[i];
184  ghost_origin[i].second = i;
185  }
186  std::sort(ghost_origin.begin(), ghost_origin.end());
187 
188  types::global_dof_index last_contiguous_start = ghost_origin[0].first;
189  ghost_numbering[ghost_origin[0].second] = 0;
190  for (std::size_t i = 1; i < n_ghosts; ++i)
191  {
192  if (ghost_origin[i].first > ghost_origin[i - 1].first + 1)
193  {
194  ghost_indices.add_range(last_contiguous_start,
195  ghost_origin[i - 1].first + 1);
196  last_contiguous_start = ghost_origin[i].first;
197  }
198  if (ghost_origin[i].first > ghost_origin[i - 1].first)
199  ++n_unique_ghosts;
200  ghost_numbering[ghost_origin[i].second] = n_unique_ghosts;
201  }
202  ++n_unique_ghosts;
203  ghost_indices.add_range(last_contiguous_start,
204  ghost_origin.back().first + 1);
205  ghost_indices.compress();
206 
207  // make sure that we got the correct local numbering of the ghost
208  // dofs. the ghost index set should store the same number
209  {
210  AssertDimension(n_unique_ghosts, ghost_indices.n_elements());
211  for (std::size_t i = 0; i < n_ghosts; ++i)
212  Assert(ghost_numbering[i] ==
213  ghost_indices.index_within_set(ghost_dofs[i]),
214  ExcInternalError());
215  }
216 
217  // apply correct numbering for ghost indices: We previously just
218  // enumerated them according to their appearance in the
219  // local_to_global structure. Above, we derived a relation between
220  // this enumeration and the actual number
221  const unsigned int n_boundary_cells = boundary_cells.size();
222  for (unsigned int i = 0; i < n_boundary_cells; ++i)
223  {
224  unsigned int *data_ptr =
225  dof_indices.data() +
226  row_starts[boundary_cells[i] * n_components].first;
227  const unsigned int *row_end =
228  dof_indices.data() +
229  row_starts[(boundary_cells[i] + 1) * n_components].first;
230  for (; data_ptr != row_end; ++data_ptr)
231  *data_ptr = ((*data_ptr < n_owned) ?
232  *data_ptr :
233  n_owned + ghost_numbering[*data_ptr - n_owned]);
234 
235  // now the same procedure for plain indices
236  if (store_plain_indices == true)
237  {
238  bool has_hanging_nodes = false;
239 
240  const unsigned int fe_index =
241  (cell_active_fe_index.size() == 0 ||
242  dofs_per_cell.size() == 1) ?
243  0 :
244  cell_active_fe_index[boundary_cells[i]];
246 
247  if (hanging_node_constraint_masks.size() > 0 &&
249  hanging_node_constraint_masks[boundary_cells[i]] !=
251  for (unsigned int comp = 0; comp < n_components; ++comp)
252  has_hanging_nodes |=
254 
255  if (has_hanging_nodes ||
256  row_starts[boundary_cells[i] * n_components].second !=
257  row_starts[(boundary_cells[i] + 1) * n_components]
258  .second)
259  {
260  unsigned int *data_ptr =
261  plain_dof_indices.data() +
262  row_starts_plain_indices[boundary_cells[i]];
263  const unsigned int *row_end =
264  data_ptr + dofs_per_cell[fe_index];
265  for (; data_ptr != row_end; ++data_ptr)
266  *data_ptr =
267  ((*data_ptr < n_owned) ?
268  *data_ptr :
269  n_owned + ghost_numbering[*data_ptr - n_owned]);
270  }
271  }
272  }
273  }
274 
275  std::vector<types::global_dof_index> empty;
276  ghost_dofs.swap(empty);
277 
278  // set the ghost indices now. need to cast away constness here, but that
279  // is uncritical since we reset the Partitioner in the same initialize
280  // call as this call here.
281  Utilities::MPI::Partitioner *vec_part =
282  const_cast<Utilities::MPI::Partitioner *>(vector_partitioner.get());
283  vec_part->set_ghost_indices(ghost_indices);
284 
285  if (use_vector_data_exchanger_full == false)
286  vector_exchanger = std::make_shared<
289  else
291  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
292  vector_partitioner, communicator_sm);
293  }
294 
295 
296 
297  void
299  const TaskInfo & task_info,
300  const std::vector<unsigned int> & renumbering,
301  const std::vector<unsigned int> & constraint_pool_row_index,
302  const std::vector<unsigned char> &irregular_cells)
303  {
304  (void)constraint_pool_row_index;
305 
306  // first reorder the active FE index.
307  const bool have_hp = dofs_per_cell.size() > 1;
308  if (cell_active_fe_index.size() > 0)
309  {
310  std::vector<unsigned int> new_active_fe_index;
311  new_active_fe_index.reserve(task_info.cell_partition_data.back());
312  unsigned int position_cell = 0;
313  for (unsigned int cell = 0;
314  cell < task_info.cell_partition_data.back();
315  ++cell)
316  {
317  const unsigned int n_comp =
318  (irregular_cells[cell] > 0 ? irregular_cells[cell] :
320 
321  // take maximum FE index among the ones present (we might have
322  // lumped some lower indices into higher ones)
323  unsigned int fe_index =
324  cell_active_fe_index[renumbering[position_cell]];
325  for (unsigned int j = 1; j < n_comp; ++j)
326  fe_index = std::max(
327  fe_index,
328  cell_active_fe_index[renumbering[position_cell + j]]);
329 
330  new_active_fe_index.push_back(fe_index);
331  position_cell += n_comp;
332  }
333  std::swap(new_active_fe_index, cell_active_fe_index);
334  }
335  if (have_hp)
337  task_info.cell_partition_data.back());
338 
339  const unsigned int n_components = start_components.back();
340 
341  std::vector<std::pair<unsigned int, unsigned int>> new_row_starts(
343  task_info.cell_partition_data.back() +
344  1);
345  std::vector<unsigned int> new_dof_indices;
346  std::vector<std::pair<unsigned short, unsigned short>>
347  new_constraint_indicator;
348  std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
349  unsigned int position_cell = 0;
350  new_dof_indices.reserve(dof_indices.size());
351  new_constraint_indicator.reserve(constraint_indicator.size());
352 
353  std::vector<compressed_constraint_kind> new_hanging_node_constraint_masks;
354  new_hanging_node_constraint_masks.reserve(
356 
357  if (store_plain_indices == true)
358  {
359  new_rowstart_plain.resize(vectorization_length *
360  task_info.cell_partition_data.back() +
361  1,
363  new_plain_indices.reserve(plain_dof_indices.size());
364  }
365 
366  // copy the indices and the constraint indicators to the new data field,
367  // where we will go through the cells in the renumbered way. in case the
368  // vectorization length does not exactly match up, we fill invalid
369  // numbers to the rowstart data. for contiguous cell indices, we skip
370  // the rowstarts field completely and directly go into the
371  // new_dof_indices field (this layout is used in FEEvaluation).
372  for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i)
373  {
374  const unsigned int n_vect =
375  (irregular_cells[i] > 0 ? irregular_cells[i] :
377  const unsigned int dofs_per_cell =
378  have_hp ? this->dofs_per_cell[cell_active_fe_index[i]] :
379  this->dofs_per_cell[0];
380 
381  for (unsigned int j = 0; j < n_vect; ++j)
382  {
383  const unsigned int cell_no =
384  renumbering[position_cell + j] * n_components;
385 
386  bool has_hanging_nodes = false;
387 
388  if (hanging_node_constraint_masks.size() > 0 &&
390  {
391  const auto mask =
392  hanging_node_constraint_masks[renumbering[position_cell +
393  j]];
394  new_hanging_node_constraint_masks.push_back(mask);
395 
397  for (unsigned int comp = 0; comp < n_components; ++comp)
398  has_hanging_nodes |= hanging_node_constraint_masks_comp
399  [have_hp ? cell_active_fe_index[i] : 0][comp];
400  }
401 
402  for (unsigned int comp = 0; comp < n_components; ++comp)
403  {
404  new_row_starts[(i * vectorization_length + j) * n_components +
405  comp]
406  .first = new_dof_indices.size();
407  new_row_starts[(i * vectorization_length + j) * n_components +
408  comp]
409  .second = new_constraint_indicator.size();
410 
411  new_dof_indices.insert(
412  new_dof_indices.end(),
413  dof_indices.data() + row_starts[cell_no + comp].first,
414  dof_indices.data() + row_starts[cell_no + comp + 1].first);
415  for (unsigned int index = row_starts[cell_no + comp].second;
416  index != row_starts[cell_no + comp + 1].second;
417  ++index)
418  new_constraint_indicator.push_back(
420  }
421  if (store_plain_indices &&
422  ((row_starts[cell_no].second !=
423  row_starts[cell_no + n_components].second) ||
424  has_hanging_nodes))
425  {
426  new_rowstart_plain[i * vectorization_length + j] =
427  new_plain_indices.size();
428  new_plain_indices.insert(
429  new_plain_indices.end(),
430  plain_dof_indices.data() +
432  plain_dof_indices.data() +
434  dofs_per_cell);
435  }
436  }
437  for (unsigned int j = n_vect; j < vectorization_length; ++j)
438  for (unsigned int comp = 0; comp < n_components; ++comp)
439  {
440  new_row_starts[(i * vectorization_length + j) * n_components +
441  comp]
442  .first = new_dof_indices.size();
443  new_row_starts[(i * vectorization_length + j) * n_components +
444  comp]
445  .second = new_constraint_indicator.size();
446  }
447 
448  for (unsigned int j = n_vect; j < vectorization_length; ++j)
449  if (hanging_node_constraint_masks.size() > 0)
450  new_hanging_node_constraint_masks.push_back(
452 
453  position_cell += n_vect;
454  }
455  AssertDimension(position_cell * n_components + 1, row_starts.size());
456 
457  AssertDimension(dof_indices.size(), new_dof_indices.size());
458  new_row_starts[task_info.cell_partition_data.back() *
460  .first = new_dof_indices.size();
461  new_row_starts[task_info.cell_partition_data.back() *
463  .second = new_constraint_indicator.size();
464 
466  new_constraint_indicator.size());
467 
468  new_row_starts.swap(row_starts);
469  new_dof_indices.swap(dof_indices);
470  new_constraint_indicator.swap(constraint_indicator);
471  new_plain_indices.swap(plain_dof_indices);
472  new_rowstart_plain.swap(row_starts_plain_indices);
473  new_hanging_node_constraint_masks.swap(hanging_node_constraint_masks);
474 
475 #ifdef DEBUG
476  // sanity check 1: all indices should be smaller than the number of dofs
477  // locally owned plus the number of ghosts
478  const unsigned int index_range =
479  (vector_partitioner->local_range().second -
480  vector_partitioner->local_range().first) +
481  vector_partitioner->ghost_indices().n_elements();
482  for (const auto dof_index : dof_indices)
483  AssertIndexRange(dof_index, index_range);
484 
485  // sanity check 2: for the constraint indicators, the first index should
486  // be smaller than the number of indices in the row, and the second
487  // index should be smaller than the number of constraints in the
488  // constraint pool.
489  for (unsigned int row = 0; row < task_info.cell_partition_data.back();
490  ++row)
491  {
492  const unsigned int row_length_ind =
497  constraint_indicator.size() + 1);
498  const std::pair<unsigned short, unsigned short> *
499  con_it =
500  constraint_indicator.data() +
502  *end_con =
503  constraint_indicator.data() +
505  for (; con_it != end_con; ++con_it)
506  {
507  AssertIndexRange(con_it->first, row_length_ind + 1);
508  AssertIndexRange(con_it->second,
509  constraint_pool_row_index.size() - 1);
510  }
511  }
512 
513  // sanity check 3: check the number of cells once again
514  unsigned int n_active_cells = 0;
515  for (unsigned int c = 0; c < *(task_info.cell_partition_data.end() - 2);
516  ++c)
517  if (irregular_cells[c] > 0)
518  n_active_cells += irregular_cells[c];
519  else
522 #endif
523 
524  compute_cell_index_compression(irregular_cells);
525  }
526 
527 
528 
529  void
531  const std::vector<unsigned char> &irregular_cells)
532  {
533  const bool have_hp = dofs_per_cell.size() > 1;
534  const unsigned int n_components = start_components.back();
535 
537  row_starts.size() % vectorization_length == 1,
538  ExcInternalError());
539  if (vectorization_length > 1)
541  irregular_cells.size());
543  irregular_cells.size(), IndexStorageVariants::full);
545  irregular_cells.size());
546  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
547  if (irregular_cells[i] > 0)
548  n_vectorization_lanes_filled[dof_access_cell][i] = irregular_cells[i];
549  else
552 
554  irregular_cells.size() * vectorization_length,
556  dof_indices_interleaved.resize(dof_indices.size(),
559  irregular_cells.size() * vectorization_length,
561 
562  std::vector<unsigned int> index_kinds(
563  static_cast<unsigned int>(
565  1);
566  std::vector<unsigned int> offsets(vectorization_length);
567  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
568  {
569  const unsigned int ndofs =
570  dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
571  const unsigned int n_comp =
573 
574  // check 1: Check if there are constraints -> no compression possible
575  bool has_constraints = false;
576  for (unsigned int j = 0; j < n_comp; ++j)
577  {
578  const unsigned int cell_no = i * vectorization_length + j;
579  if (row_starts[cell_no * n_components].second !=
580  row_starts[(cell_no + 1) * n_components].second)
581  {
582  has_constraints = true;
583  break;
584  }
585  }
586  if (has_constraints)
589  else
590  {
591  bool indices_are_contiguous = true;
592  for (unsigned int j = 0; j < n_comp; ++j)
593  {
594  const unsigned int cell_no = i * vectorization_length + j;
595  const unsigned int *dof_indices =
596  this->dof_indices.data() +
597  row_starts[cell_no * n_components].first;
599  ndofs,
600  row_starts[(cell_no + 1) * n_components].first -
601  row_starts[cell_no * n_components].first);
602  for (unsigned int i = 1; i < ndofs; ++i)
603  if (dof_indices[i] != dof_indices[0] + i)
604  {
605  indices_are_contiguous = false;
606  break;
607  }
608  }
609 
610  bool indices_are_interleaved_and_contiguous =
611  (ndofs > 1 && n_comp == vectorization_length);
612 
613  {
614  const unsigned int *dof_indices =
615  this->dof_indices.data() +
617  for (unsigned int k = 0;
618  k < ndofs && indices_are_interleaved_and_contiguous;
619  ++k)
620  for (unsigned int j = 0; j < n_comp; ++j)
621  if (dof_indices[j * ndofs + k] !=
622  dof_indices[0] + k * n_comp + j)
623  {
624  indices_are_interleaved_and_contiguous = false;
625  break;
626  }
627  }
628 
629  if (indices_are_contiguous ||
630  indices_are_interleaved_and_contiguous)
631  {
632  for (unsigned int j = 0; j < n_comp; ++j)
635  this->dof_indices.size() == 0 ?
636  0 :
637  this->dof_indices
638  [row_starts[(i * vectorization_length + j) *
639  n_components]
640  .first];
641  }
642 
643  if (indices_are_interleaved_and_contiguous)
644  {
648  for (unsigned int j = 0; j < n_comp; ++j)
650  j] = n_comp;
651  }
652  else if (indices_are_contiguous)
653  {
656  for (unsigned int j = 0; j < n_comp; ++j)
658  j] = 1;
659  }
660  else
661  {
662  int indices_are_interleaved_and_mixed = 2;
663  const unsigned int *dof_indices =
664  &this->dof_indices[row_starts[i * vectorization_length *
665  n_components]
666  .first];
667  for (unsigned int j = 0; j < n_comp; ++j)
668  offsets[j] =
669  dof_indices[j * ndofs + 1] - dof_indices[j * ndofs];
670  for (unsigned int k = 0;
671  k < ndofs && indices_are_interleaved_and_mixed != 0;
672  ++k)
673  for (unsigned int j = 0; j < n_comp; ++j)
674  // the first if case is to avoid negative offsets
675  // (invalid)
676  if (dof_indices[j * ndofs + 1] < dof_indices[j * ndofs] ||
677  dof_indices[j * ndofs + k] !=
678  dof_indices[j * ndofs] + k * offsets[j])
679  {
680  indices_are_interleaved_and_mixed = 0;
681  break;
682  }
683  if (indices_are_interleaved_and_mixed == 2)
684  {
685  for (unsigned int j = 0; j < n_comp; ++j)
688  offsets[j];
689  for (unsigned int j = 0; j < n_comp; ++j)
691  [i * vectorization_length + j] =
692  dof_indices[j * ndofs];
693  for (unsigned int j = 0; j < n_comp; ++j)
694  if (offsets[j] != vectorization_length)
695  {
696  indices_are_interleaved_and_mixed = 1;
697  break;
698  }
699  if (indices_are_interleaved_and_mixed == 1 ||
700  n_comp != vectorization_length)
704  else
707  }
708  else
709  {
710  const unsigned int *dof_indices =
711  this->dof_indices.data() +
713  .first;
714  if (n_comp == vectorization_length)
717  else
720 
721  // do not use interleaved storage if two entries within
722  // vectorized array point to the same index (scatter not
723  // possible due to race condition)
724  for (const unsigned int *indices = dof_indices;
725  indices != dof_indices + ndofs;
726  ++indices)
727  {
728  bool is_sorted = true;
729  unsigned int previous = indices[0];
730  for (unsigned int l = 1; l < n_comp; ++l)
731  {
732  const unsigned int current = indices[l * ndofs];
733  if (current <= previous)
734  is_sorted = false;
735 
736  // the simple check failed, must compare all
737  // indices manually - due to short sizes this
738  // O(n^2) algorithm is better than sorting
739  if (!is_sorted)
740  for (unsigned int j = 0; j < l; ++j)
741  if (indices[j * ndofs] == current)
742  {
744  [dof_access_cell][i] =
746  break;
747  }
748  previous = current;
749  }
750  }
751  }
752  }
753  }
754  index_kinds[static_cast<unsigned int>(
756  }
757 
758  // Cleanup phase: we want to avoid single cells with different properties
759  // than the bulk of the domain in order to avoid extra checks in the face
760  // identification.
761 
762  // Step 1: check whether the interleaved indices were only assigned to
763  // the single cell within a vectorized array.
764  auto fix_single_interleaved_indices =
765  [&](const IndexStorageVariants variant) {
766  if (index_kinds[static_cast<unsigned int>(
768  0 &&
769  index_kinds[static_cast<unsigned int>(variant)] > 0)
770  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
771  {
774  interleaved_contiguous_mixed_strides &&
776  (variant != IndexStorageVariants::contiguous ||
778  [i * vectorization_length] ==
779  1))
780  {
782  index_kinds[static_cast<unsigned int>(
785  index_kinds[static_cast<unsigned int>(variant)]++;
786  }
787  }
788  };
789 
790  fix_single_interleaved_indices(IndexStorageVariants::full);
791  fix_single_interleaved_indices(IndexStorageVariants::contiguous);
792  fix_single_interleaved_indices(IndexStorageVariants::interleaved);
793 
794  unsigned int n_interleaved =
795  index_kinds[static_cast<unsigned int>(
797  index_kinds[static_cast<unsigned int>(
799  index_kinds[static_cast<unsigned int>(
801 
802  // Step 2: fix single contiguous cell among others with interleaved
803  // storage
804  if (n_interleaved > 0 && index_kinds[static_cast<unsigned int>(
806  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
809  {
812  index_kinds[static_cast<unsigned int>(
814  index_kinds[static_cast<unsigned int>(
816  }
817 
818  // Step 3: Interleaved cells are left but also some non-contiguous ones
819  // -> revert all to full storage
820  if (n_interleaved > 0 &&
821  index_kinds[static_cast<unsigned int>(IndexStorageVariants::full)] +
822  index_kinds[static_cast<unsigned int>(
824  0)
825  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
828  {
829  index_kinds[static_cast<unsigned int>(
830  index_storage_variants[2][i])]--;
835  else
838  index_kinds[static_cast<unsigned int>(
840  }
841 
842  // Step 4: Copy the interleaved indices into their own data structure
843  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
846  {
849  {
852  continue;
853  }
854  const unsigned int ndofs =
855  dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
856  const unsigned int *dof_indices =
857  &this->dof_indices
859  unsigned int *interleaved_dof_indices =
861  [row_starts[i * vectorization_length * n_components].first];
862  AssertDimension(this->dof_indices.size(),
863  this->dof_indices_interleaved.size());
868  this->dof_indices_interleaved.size() + 1);
870  row_starts[i * vectorization_length * n_components].first +
871  ndofs * vectorization_length,
872  this->dof_indices_interleaved.size() + 1);
873  for (unsigned int k = 0; k < ndofs; ++k)
874  {
875  const unsigned int *my_dof_indices = dof_indices + k;
876  const unsigned int *end =
877  interleaved_dof_indices + vectorization_length;
878  for (; interleaved_dof_indices != end;
879  ++interleaved_dof_indices, my_dof_indices += ndofs)
880  *interleaved_dof_indices = *my_dof_indices;
881  }
882  }
883  }
884 
885 
886 
887  void
889  const Table<2, ShapeInfo<double>> & shape_info,
890  const unsigned int n_owned_cells,
891  const unsigned int n_lanes,
892  const std::vector<FaceToCellTopology<1>> &inner_faces,
893  const std::vector<FaceToCellTopology<1>> &ghosted_faces,
894  const bool fill_cell_centric,
895  const MPI_Comm communicator_sm,
896  const bool use_vector_data_exchanger_full)
897  {
899 
900  // partitioner 0: no face integrals, simply use the indices present
901  // on the cells
902  std::vector<types::global_dof_index> ghost_indices;
903  {
904  const unsigned int n_components = start_components.back();
905  for (unsigned int cell = 0; cell < n_owned_cells; ++cell)
906  {
907  for (unsigned int i = row_starts[cell * n_components].first;
908  i < row_starts[(cell + 1) * n_components].first;
909  ++i)
910  if (dof_indices[i] >= part.locally_owned_size())
911  ghost_indices.push_back(part.local_to_global(dof_indices[i]));
912 
913  const unsigned int fe_index =
914  dofs_per_cell.size() == 1 ? 0 :
915  cell_active_fe_index[cell / n_lanes];
916  const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
917 
918  for (unsigned int i = row_starts_plain_indices[cell];
919  i < row_starts_plain_indices[cell] + dofs_this_cell;
920  ++i)
921  if (plain_dof_indices[i] >= part.locally_owned_size())
922  ghost_indices.push_back(
924  }
925  std::sort(ghost_indices.begin(), ghost_indices.end());
926  ghost_indices.erase(std::unique(ghost_indices.begin(),
927  ghost_indices.end()),
928  ghost_indices.end());
929  IndexSet compressed_set(part.size());
930  compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
931  compressed_set.subtract_set(part.locally_owned_range());
932  const bool all_ghosts_equal =
933  Utilities::MPI::min<int>(static_cast<int>(
934  compressed_set.n_elements() ==
935  part.ghost_indices().n_elements()),
936  part.get_mpi_communicator()) != 0;
937 
938  std::shared_ptr<const Utilities::MPI::Partitioner> temp_0;
939 
940  if (all_ghosts_equal)
941  temp_0 = vector_partitioner;
942  else
943  {
944  temp_0 = std::make_shared<Utilities::MPI::Partitioner>(
946  const_cast<Utilities::MPI::Partitioner *>(temp_0.get())
947  ->set_ghost_indices(compressed_set, part.ghost_indices());
948  }
949 
950  if (use_vector_data_exchanger_full == false)
951  vector_exchanger_face_variants[0] = std::make_shared<
953  temp_0);
954  else
956  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
957  temp_0, communicator_sm);
958  }
959 
960  // construct a numbering of faces
961  std::vector<FaceToCellTopology<1>> all_faces(inner_faces);
962  all_faces.insert(all_faces.end(),
963  ghosted_faces.begin(),
964  ghosted_faces.end());
965  Table<2, unsigned int> cell_and_face_to_faces(
966  (row_starts.size() - 1) / start_components.back(),
967  2 * shape_info(0, 0).n_dimensions);
968  cell_and_face_to_faces.fill(numbers::invalid_unsigned_int);
969  for (unsigned int f = 0; f < all_faces.size(); ++f)
970  {
971  cell_and_face_to_faces(all_faces[f].cells_interior[0],
972  all_faces[f].interior_face_no) = f;
973  Assert(all_faces[f].cells_exterior[0] !=
975  ExcInternalError());
976  cell_and_face_to_faces(all_faces[f].cells_exterior[0],
977  all_faces[f].exterior_face_no) = f;
978  }
979 
980  // lambda function to detect objects on face pairs
981  const auto loop_over_faces =
982  [&](const std::function<
983  void(const unsigned int, const unsigned int, const bool)> &fu) {
984  for (const auto &face : inner_faces)
985  {
986  AssertIndexRange(face.cells_interior[0], n_owned_cells);
987  fu(face.cells_exterior[0], face.exterior_face_no, false /*flag*/);
988  }
989  };
990 
991  const auto loop_over_all_faces =
992  [&](const std::function<
993  void(const unsigned int, const unsigned int, const bool)> &fu) {
994  for (unsigned int c = 0; c < cell_and_face_to_faces.size(0); ++c)
995  for (unsigned int d = 0; d < cell_and_face_to_faces.size(1); ++d)
996  {
997  const unsigned int f = cell_and_face_to_faces(c, d);
999  continue;
1000 
1001  const unsigned int cell_m = all_faces[f].cells_interior[0];
1002  const unsigned int cell_p = all_faces[f].cells_exterior[0];
1003 
1004  const bool ext = c == cell_m;
1005 
1006  if (ext && cell_p == numbers::invalid_unsigned_int)
1007  continue;
1008 
1009  const unsigned int p = ext ? cell_p : cell_m;
1010  const unsigned int face_no = ext ?
1011  all_faces[f].exterior_face_no :
1012  all_faces[f].interior_face_no;
1013 
1014  fu(p, face_no, true);
1015  }
1016  };
1017 
1018  const auto process_values =
1019  [&](
1020  std::shared_ptr<const Utilities::MPI::Partitioner>
1021  &vector_partitioner_values,
1022  const std::function<void(
1023  const std::function<void(
1024  const unsigned int, const unsigned int, const bool)> &)> &loop) {
1025  bool all_nodal_and_tensorial = shape_info.size(1) == 1;
1026 
1027  if (all_nodal_and_tensorial)
1028  for (unsigned int c = 0; c < n_base_elements; ++c)
1029  {
1030  const auto &si =
1031  shape_info(global_base_element_offset + c, 0).data.front();
1032  if (!si.nodal_at_cell_boundaries ||
1033  (si.element_type ==
1035  all_nodal_and_tensorial = false;
1036  }
1037 
1038  if (all_nodal_and_tensorial == false)
1039  vector_partitioner_values = vector_partitioner;
1040  else
1041  {
1042  bool has_noncontiguous_cell = false;
1043 
1044  loop([&](const unsigned int cell_no,
1045  const unsigned int face_no,
1046  const bool flag) {
1047  const unsigned int index =
1049  if (flag || (index != numbers::invalid_unsigned_int &&
1050  index >= part.locally_owned_size()))
1051  {
1052  const unsigned int stride =
1053  dof_indices_interleave_strides[dof_access_cell][cell_no];
1054  unsigned int i = 0;
1055  for (unsigned int e = 0; e < n_base_elements; ++e)
1056  for (unsigned int c = 0; c < n_components[e]; ++c)
1057  {
1058  const ShapeInfo<double> &shape =
1059  shape_info(global_base_element_offset + e, 0);
1060  for (unsigned int j = 0;
1061  j < shape.dofs_per_component_on_face;
1062  ++j)
1063  ghost_indices.push_back(part.local_to_global(
1064  index + i +
1065  shape.face_to_cell_index_nodal(face_no, j) *
1066  stride));
1067  i += shape.dofs_per_component_on_cell * stride;
1068  }
1069  AssertDimension(i, dofs_per_cell[0] * stride);
1070  }
1072  has_noncontiguous_cell = true;
1073  });
1074  has_noncontiguous_cell =
1075  Utilities::MPI::min<int>(static_cast<int>(
1076  has_noncontiguous_cell),
1077  part.get_mpi_communicator()) != 0;
1078 
1079  std::sort(ghost_indices.begin(), ghost_indices.end());
1080  ghost_indices.erase(std::unique(ghost_indices.begin(),
1081  ghost_indices.end()),
1082  ghost_indices.end());
1083  IndexSet compressed_set(part.size());
1084  compressed_set.add_indices(ghost_indices.begin(),
1085  ghost_indices.end());
1086  compressed_set.subtract_set(part.locally_owned_range());
1087  const bool all_ghosts_equal =
1088  Utilities::MPI::min<int>(static_cast<int>(
1089  compressed_set.n_elements() ==
1090  part.ghost_indices().n_elements()),
1091  part.get_mpi_communicator()) != 0;
1092  if (all_ghosts_equal || has_noncontiguous_cell)
1093  vector_partitioner_values = vector_partitioner;
1094  else
1095  {
1096  vector_partitioner_values =
1097  std::make_shared<Utilities::MPI::Partitioner>(
1098  part.locally_owned_range(), part.get_mpi_communicator());
1099  const_cast<Utilities::MPI::Partitioner *>(
1100  vector_partitioner_values.get())
1101  ->set_ghost_indices(compressed_set, part.ghost_indices());
1102  }
1103  }
1104  };
1105 
1106 
1107  const auto process_gradients =
1108  [&](
1109  const std::shared_ptr<const Utilities::MPI::Partitioner>
1110  &vector_partitoner_values,
1111  std::shared_ptr<const Utilities::MPI::Partitioner>
1112  &vector_partitioner_gradients,
1113  const std::function<void(
1114  const std::function<void(
1115  const unsigned int, const unsigned int, const bool)> &)> &loop) {
1116  bool all_hermite = shape_info.size(1) == 1;
1117 
1118  if (all_hermite)
1119  for (unsigned int c = 0; c < n_base_elements; ++c)
1120  if (shape_info(global_base_element_offset + c, 0).element_type !=
1122  all_hermite = false;
1123  if (all_hermite == false ||
1124  vector_partitoner_values.get() == vector_partitioner.get())
1125  vector_partitioner_gradients = vector_partitioner;
1126  else
1127  {
1128  loop([&](const unsigned int cell_no,
1129  const unsigned int face_no,
1130  const bool flag) {
1131  const unsigned int index =
1132  dof_indices_contiguous[dof_access_cell][cell_no];
1133  if (flag || (index != numbers::invalid_unsigned_int &&
1134  index >= part.locally_owned_size()))
1135  {
1136  const unsigned int stride =
1137  dof_indices_interleave_strides[dof_access_cell][cell_no];
1138  unsigned int i = 0;
1139  for (unsigned int e = 0; e < n_base_elements; ++e)
1140  for (unsigned int c = 0; c < n_components[e]; ++c)
1141  {
1142  const ShapeInfo<double> &shape =
1143  shape_info(global_base_element_offset + e, 0);
1144  for (unsigned int j = 0;
1145  j < 2 * shape.dofs_per_component_on_face;
1146  ++j)
1147  ghost_indices.push_back(part.local_to_global(
1148  index + i +
1149  shape.face_to_cell_index_hermite(face_no, j) *
1150  stride));
1151  i += shape.dofs_per_component_on_cell * stride;
1152  }
1153  AssertDimension(i, dofs_per_cell[0] * stride);
1154  }
1155  });
1156  std::sort(ghost_indices.begin(), ghost_indices.end());
1157  ghost_indices.erase(std::unique(ghost_indices.begin(),
1158  ghost_indices.end()),
1159  ghost_indices.end());
1160  IndexSet compressed_set(part.size());
1161  compressed_set.add_indices(ghost_indices.begin(),
1162  ghost_indices.end());
1163  compressed_set.subtract_set(part.locally_owned_range());
1164  const bool all_ghosts_equal =
1165  Utilities::MPI::min<int>(static_cast<int>(
1166  compressed_set.n_elements() ==
1167  part.ghost_indices().n_elements()),
1168  part.get_mpi_communicator()) != 0;
1169  if (all_ghosts_equal)
1170  vector_partitioner_gradients = vector_partitioner;
1171  else
1172  {
1173  vector_partitioner_gradients =
1174  std::make_shared<Utilities::MPI::Partitioner>(
1175  part.locally_owned_range(), part.get_mpi_communicator());
1176  const_cast<Utilities::MPI::Partitioner *>(
1177  vector_partitioner_gradients.get())
1178  ->set_ghost_indices(compressed_set, part.ghost_indices());
1179  }
1180  }
1181  };
1182 
1183  std::shared_ptr<const Utilities::MPI::Partitioner> temp_1, temp_2, temp_3,
1184  temp_4;
1185 
1186  // partitioner 1: values on faces
1187  process_values(temp_1, loop_over_faces);
1188 
1189  // partitioner 2: values and gradients on faces
1190  process_gradients(temp_1, temp_2, loop_over_faces);
1191 
1192  if (fill_cell_centric)
1193  {
1194  ghost_indices.clear();
1195  // partitioner 3: values on all faces
1196  process_values(temp_3, loop_over_all_faces);
1197  // partitioner 4: values and gradients on faces
1198  process_gradients(temp_3, temp_4, loop_over_all_faces);
1199  }
1200  else
1201  {
1202  temp_3 = std::make_shared<Utilities::MPI::Partitioner>(
1203  part.locally_owned_range(), part.get_mpi_communicator());
1204  temp_4 = std::make_shared<Utilities::MPI::Partitioner>(
1205  part.locally_owned_range(), part.get_mpi_communicator());
1206  }
1207 
1208  if (use_vector_data_exchanger_full == false)
1209  {
1210  vector_exchanger_face_variants[1] = std::make_shared<
1211  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1212  temp_1);
1213  vector_exchanger_face_variants[2] = std::make_shared<
1214  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1215  temp_2);
1216  vector_exchanger_face_variants[3] = std::make_shared<
1217  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1218  temp_3);
1219  vector_exchanger_face_variants[4] = std::make_shared<
1220  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1221  temp_4);
1222  }
1223  else
1224  {
1225  vector_exchanger_face_variants[1] =
1226  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1227  temp_1, communicator_sm);
1228  vector_exchanger_face_variants[2] =
1229  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1230  temp_2, communicator_sm);
1231  vector_exchanger_face_variants[3] =
1232  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1233  temp_3, communicator_sm);
1234  vector_exchanger_face_variants[4] =
1235  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1236  temp_4, communicator_sm);
1237  }
1238  }
1239 
1240 
1241 
1242  void
1243  DoFInfo::compute_shared_memory_contiguous_indices(
1244  std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
1245  &cell_indices_contiguous_sm)
1246  {
1247  AssertDimension(dofs_per_cell.size(), 1);
1248 
1249  for (unsigned int i = 0; i < 3; ++i)
1250  {
1251  dof_indices_contiguous_sm[i].resize(
1252  cell_indices_contiguous_sm[i].size());
1253 
1254  for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size();
1255  ++j)
1256  if (cell_indices_contiguous_sm[i][j].first !=
1258  dof_indices_contiguous_sm[i][j] = {
1259  cell_indices_contiguous_sm[i][j].first,
1260  cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]};
1261  else
1262  dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int,
1264  }
1265  }
1266 
1267 
1268 
1269  namespace internal
1270  {
1271  // We construct the connectivity graph in parallel. we use one lock for
1272  // 256 degrees of freedom to keep the number of locks down to a
1273  // reasonable level and reduce the cost of locking to some extent.
1274  static constexpr unsigned int bucket_size_threading = 256;
1275 
1276 
1277 
1278  void
1279  compute_row_lengths(const unsigned int begin,
1280  const unsigned int end,
1281  const DoFInfo & dof_info,
1282  std::vector<std::mutex> & mutexes,
1283  std::vector<unsigned int> &row_lengths)
1284  {
1285  std::vector<unsigned int> scratch;
1286  const unsigned int n_components = dof_info.start_components.back();
1287  for (unsigned int block = begin; block < end; ++block)
1288  {
1289  scratch.clear();
1290  scratch.insert(
1291  scratch.end(),
1292  dof_info.dof_indices.data() +
1293  dof_info.row_starts[block * n_components].first,
1294  dof_info.dof_indices.data() +
1295  dof_info.row_starts[(block + 1) * n_components].first);
1296  std::sort(scratch.begin(), scratch.end());
1297  std::vector<unsigned int>::const_iterator end_unique =
1298  std::unique(scratch.begin(), scratch.end());
1299  std::vector<unsigned int>::const_iterator it = scratch.begin();
1300  while (it != end_unique)
1301  {
1302  // In this code, the procedure is that we insert all elements
1303  // that are within the range of one lock at once
1304  const unsigned int next_bucket =
1306  std::lock_guard<std::mutex> lock(
1307  mutexes[*it / bucket_size_threading]);
1308  for (; it != end_unique && *it < next_bucket; ++it)
1309  {
1310  AssertIndexRange(*it, row_lengths.size());
1311  row_lengths[*it]++;
1312  }
1313  }
1314  }
1315  }
1316 
1317  void
1318  fill_connectivity_dofs(const unsigned int begin,
1319  const unsigned int end,
1320  const DoFInfo & dof_info,
1321  const std::vector<unsigned int> &row_lengths,
1322  std::vector<std::mutex> & mutexes,
1323  ::SparsityPattern & connectivity_dof)
1324  {
1325  std::vector<unsigned int> scratch;
1326  const unsigned int n_components = dof_info.start_components.back();
1327  for (unsigned int block = begin; block < end; ++block)
1328  {
1329  scratch.clear();
1330  scratch.insert(
1331  scratch.end(),
1332  dof_info.dof_indices.data() +
1333  dof_info.row_starts[block * n_components].first,
1334  dof_info.dof_indices.data() +
1335  dof_info.row_starts[(block + 1) * n_components].first);
1336  std::sort(scratch.begin(), scratch.end());
1337  std::vector<unsigned int>::const_iterator end_unique =
1338  std::unique(scratch.begin(), scratch.end());
1339  std::vector<unsigned int>::const_iterator it = scratch.begin();
1340  while (it != end_unique)
1341  {
1342  const unsigned int next_bucket =
1344  std::lock_guard<std::mutex> lock(
1345  mutexes[*it / bucket_size_threading]);
1346  for (; it != end_unique && *it < next_bucket; ++it)
1347  if (row_lengths[*it] > 0)
1348  connectivity_dof.add(*it, block);
1349  }
1350  }
1351  }
1352 
1353 
1354 
1355  void
1356  fill_connectivity(const unsigned int begin,
1357  const unsigned int end,
1358  const DoFInfo & dof_info,
1359  const std::vector<unsigned int> &renumbering,
1360  const ::SparsityPattern & connectivity_dof,
1361  DynamicSparsityPattern & connectivity)
1362  {
1363  ordered_vector row_entries;
1364  const unsigned int n_components = dof_info.start_components.back();
1365  for (unsigned int block = begin; block < end; ++block)
1366  {
1367  row_entries.clear();
1368 
1369  const unsigned int
1370  *it = dof_info.dof_indices.data() +
1371  dof_info.row_starts[block * n_components].first,
1372  *end_cell = dof_info.dof_indices.data() +
1373  dof_info.row_starts[(block + 1) * n_components].first;
1374  for (; it != end_cell; ++it)
1375  {
1376  SparsityPattern::iterator sp = connectivity_dof.begin(*it);
1377  std::vector<types::global_dof_index>::iterator insert_pos =
1378  row_entries.begin();
1379  for (; sp != connectivity_dof.end(*it); ++sp)
1380  if (sp->column() != block)
1381  row_entries.insert(renumbering[sp->column()], insert_pos);
1382  }
1383  connectivity.add_entries(renumbering[block],
1384  row_entries.begin(),
1385  row_entries.end());
1386  }
1387  }
1388 
1389  } // namespace internal
1390 
1391  void
1392  DoFInfo::make_connectivity_graph(
1393  const TaskInfo & task_info,
1394  const std::vector<unsigned int> &renumbering,
1395  DynamicSparsityPattern & connectivity) const
1396  {
1397  unsigned int n_rows = (vector_partitioner->local_range().second -
1398  vector_partitioner->local_range().first) +
1399  vector_partitioner->ghost_indices().n_elements();
1400 
1401  // Avoid square sparsity patterns that allocate the diagonal entry
1402  if (n_rows == task_info.n_active_cells)
1403  ++n_rows;
1404 
1405  // first determine row lengths
1406  std::vector<unsigned int> row_lengths(n_rows);
1407  std::vector<std::mutex> mutexes(n_rows / internal::bucket_size_threading +
1408  1);
1410  0,
1411  task_info.n_active_cells,
1412  [this, &mutexes, &row_lengths](const unsigned int begin,
1413  const unsigned int end) {
1414  internal::compute_row_lengths(
1415  begin, end, *this, mutexes, row_lengths);
1416  },
1417  20);
1418 
1419  // disregard dofs that only sit on a single cell because they cannot
1420  // couple
1421  for (unsigned int row = 0; row < n_rows; ++row)
1422  if (row_lengths[row] <= 1)
1423  row_lengths[row] = 0;
1424 
1425  // Create a temporary sparsity pattern that holds to each degree of
1426  // freedom on which cells it appears, i.e., store the connectivity
1427  // between cells and dofs
1428  SparsityPattern connectivity_dof(n_rows,
1429  task_info.n_active_cells,
1430  row_lengths);
1432  0,
1433  task_info.n_active_cells,
1434  [this, &row_lengths, &mutexes, &connectivity_dof](
1435  const unsigned int begin, const unsigned int end) {
1436  internal::fill_connectivity_dofs(
1437  begin, end, *this, row_lengths, mutexes, connectivity_dof);
1438  },
1439  20);
1440  connectivity_dof.compress();
1441 
1442 
1443  // Invert renumbering for use in fill_connectivity.
1444  std::vector<unsigned int> reverse_numbering(task_info.n_active_cells);
1445  reverse_numbering = Utilities::invert_permutation(renumbering);
1446 
1447  // From the above connectivity between dofs and cells, we can finally
1448  // create a connectivity list between cells. The connectivity graph
1449  // should apply the renumbering, i.e., the entry for cell j is the entry
1450  // for cell renumbering[j] in the original ordering.
1452  0,
1453  task_info.n_active_cells,
1454  [this, &reverse_numbering, &connectivity_dof, &connectivity](
1455  const unsigned int begin, const unsigned int end) {
1456  internal::fill_connectivity(begin,
1457  end,
1458  *this,
1459  reverse_numbering,
1460  connectivity_dof,
1461  connectivity);
1462  },
1463  20);
1464  }
1465 
1466 
1467 
1468  void
1469  DoFInfo::compute_dof_renumbering(
1470  std::vector<types::global_dof_index> &renumbering)
1471  {
1472  const unsigned int locally_owned_size =
1473  vector_partitioner->locally_owned_size();
1474  renumbering.resize(0);
1475  renumbering.resize(locally_owned_size, numbers::invalid_dof_index);
1476 
1477  types::global_dof_index counter = 0;
1478  const unsigned int n_components = start_components.back();
1479  const unsigned int n_cell_batches =
1480  n_vectorization_lanes_filled[dof_access_cell].size();
1481  Assert(n_cell_batches <=
1482  (row_starts.size() - 1) / vectorization_length / n_components,
1483  ExcInternalError());
1484  for (unsigned int cell_no = 0; cell_no < n_cell_batches; ++cell_no)
1485  {
1486  // do not renumber in case we have constraints
1487  if (row_starts[cell_no * n_components * vectorization_length]
1488  .second ==
1489  row_starts[(cell_no + 1) * n_components * vectorization_length]
1490  .second)
1491  {
1492  const unsigned int ndofs =
1493  dofs_per_cell.size() == 1 ?
1494  dofs_per_cell[0] :
1495  (dofs_per_cell[cell_active_fe_index.size() > 0 ?
1496  cell_active_fe_index[cell_no] :
1497  0]);
1498  const unsigned int *dof_ind =
1499  dof_indices.data() +
1500  row_starts[cell_no * n_components * vectorization_length].first;
1501  for (unsigned int i = 0; i < ndofs; ++i)
1502  for (unsigned int j = 0;
1503  j < n_vectorization_lanes_filled[dof_access_cell][cell_no];
1504  ++j)
1505  if (dof_ind[j * ndofs + i] < locally_owned_size)
1506  if (renumbering[dof_ind[j * ndofs + i]] ==
1508  renumbering[dof_ind[j * ndofs + i]] = counter++;
1509  }
1510  }
1511 
1512  AssertIndexRange(counter, locally_owned_size + 1);
1513  for (types::global_dof_index &dof_index : renumbering)
1514  if (dof_index == numbers::invalid_dof_index)
1515  dof_index = counter++;
1516 
1517  // transform indices to global index space
1518  for (types::global_dof_index &dof_index : renumbering)
1519  dof_index = vector_partitioner->local_to_global(dof_index);
1520 
1521  AssertDimension(counter, renumbering.size());
1522  }
1523 
1524 
1525 
1526  std::size_t
1528  {
1529  std::size_t memory = sizeof(*this);
1530  memory +=
1531  (row_starts.capacity() * sizeof(std::pair<unsigned int, unsigned int>));
1532  memory += MemoryConsumption::memory_consumption(dof_indices);
1533  memory +=
1534  MemoryConsumption::memory_consumption(hanging_node_constraint_masks);
1535  memory += MemoryConsumption::memory_consumption(row_starts_plain_indices);
1536  memory += MemoryConsumption::memory_consumption(plain_dof_indices);
1537  memory += MemoryConsumption::memory_consumption(constraint_indicator);
1538  memory += MemoryConsumption::memory_consumption(*vector_partitioner);
1539  return memory;
1540  }
1541  } // namespace MatrixFreeFunctions
1542 } // namespace internal
1543 
1544 namespace internal
1545 {
1546  namespace MatrixFreeFunctions
1547  {
1548  template void
1549  DoFInfo::read_dof_indices<double>(
1550  const std::vector<types::global_dof_index> &,
1551  const std::vector<types::global_dof_index> &,
1552  const bool,
1553  const ::AffineConstraints<double> &,
1554  const unsigned int,
1555  ConstraintValues<double> &,
1556  bool &);
1557 
1558  template void
1559  DoFInfo::read_dof_indices<float>(
1560  const std::vector<types::global_dof_index> &,
1561  const std::vector<types::global_dof_index> &,
1562  const bool,
1563  const ::AffineConstraints<float> &,
1564  const unsigned int,
1565  ConstraintValues<double> &,
1566  bool &);
1567 
1568  template bool
1569  DoFInfo::process_hanging_node_constraints<1>(
1570  const HangingNodes<1> &,
1571  const std::vector<std::vector<unsigned int>> &,
1572  const unsigned int,
1574  std::vector<types::global_dof_index> &);
1575  template bool
1576  DoFInfo::process_hanging_node_constraints<2>(
1577  const HangingNodes<2> &,
1578  const std::vector<std::vector<unsigned int>> &,
1579  const unsigned int,
1581  std::vector<types::global_dof_index> &);
1582  template bool
1583  DoFInfo::process_hanging_node_constraints<3>(
1584  const HangingNodes<3> &,
1585  const std::vector<std::vector<unsigned int>> &,
1586  const unsigned int,
1588  std::vector<types::global_dof_index> &);
1589 
1590  template void
1591  DoFInfo::compute_face_index_compression<1>(
1592  const std::vector<FaceToCellTopology<1>> &);
1593  template void
1594  DoFInfo::compute_face_index_compression<2>(
1595  const std::vector<FaceToCellTopology<2>> &);
1596  template void
1597  DoFInfo::compute_face_index_compression<4>(
1598  const std::vector<FaceToCellTopology<4>> &);
1599  template void
1600  DoFInfo::compute_face_index_compression<8>(
1601  const std::vector<FaceToCellTopology<8>> &);
1602  template void
1603  DoFInfo::compute_face_index_compression<16>(
1604  const std::vector<FaceToCellTopology<16>> &);
1605 
1606  template void
1607  DoFInfo::compute_vector_zero_access_pattern<1>(
1608  const TaskInfo &,
1609  const std::vector<FaceToCellTopology<1>> &);
1610  template void
1611  DoFInfo::compute_vector_zero_access_pattern<2>(
1612  const TaskInfo &,
1613  const std::vector<FaceToCellTopology<2>> &);
1614  template void
1615  DoFInfo::compute_vector_zero_access_pattern<4>(
1616  const TaskInfo &,
1617  const std::vector<FaceToCellTopology<4>> &);
1618  template void
1619  DoFInfo::compute_vector_zero_access_pattern<8>(
1620  const TaskInfo &,
1621  const std::vector<FaceToCellTopology<8>> &);
1622  template void
1623  DoFInfo::compute_vector_zero_access_pattern<16>(
1624  const TaskInfo &,
1625  const std::vector<FaceToCellTopology<16>> &);
1626 
1627  template void
1628  DoFInfo::print_memory_consumption<std::ostream>(std::ostream &,
1629  const TaskInfo &) const;
1630  template void
1631  DoFInfo::print_memory_consumption<ConditionalOStream>(
1633  const TaskInfo &) const;
1634 
1635  template void
1636  DoFInfo::print<double>(const std::vector<double> &,
1637  const std::vector<unsigned int> &,
1638  std::ostream &) const;
1639 
1640  template void
1641  DoFInfo::print<float>(const std::vector<float> &,
1642  const std::vector<unsigned int> &,
1643  std::ostream &) const;
1644  } // namespace MatrixFreeFunctions
1645 } // namespace internal
1646 
void add_entries(const size_type row, ForwardIterator begin, ForwardIterator end, const bool indices_are_unique_and_sorted=false)
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1883
size_type n_elements() const
Definition: index_set.h:1816
void subtract_set(const IndexSet &other)
Definition: index_set.cc:268
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1697
void compress() const
Definition: index_set.h:1669
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1727
void add(const size_type i, const size_type j)
unsigned int locally_owned_size() const
types::global_dof_index local_to_global(const unsigned int local_index) const
virtual MPI_Comm get_mpi_communicator() const override
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
types::global_dof_index size() const
const IndexSet & locally_owned_range() const
const IndexSet & ghost_indices() const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:475
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:476
Point< 2 > second
Definition: grid_out.cc:4616
Point< 2 > first
Definition: grid_out.cc:4615
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNotInitialized()
#define Assert(cond, exc)
Definition: exceptions.h:1614
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1787
#define AssertIndexRange(index, range)
Definition: exceptions.h:1855
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:439
void shift(const Tensor< 1, spacedim > &shift_vector, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2136
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
void swap(MemorySpaceData< T, MemorySpace > &u, MemorySpaceData< T, MemorySpace > &v)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
Definition: utilities.h:1672
void fill_connectivity_dofs(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, const std::vector< unsigned int > &row_lengths, std::vector< std::mutex > &mutexes, ::SparsityPattern &connectivity_dof)
Definition: dof_info.cc:1318
void fill_connectivity(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, const std::vector< unsigned int > &renumbering, const ::SparsityPattern &connectivity_dof, DynamicSparsityPattern &connectivity)
Definition: dof_info.cc:1356
void compute_row_lengths(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, std::vector< std::mutex > &mutexes, std::vector< unsigned int > &row_lengths)
Definition: dof_info.cc:1279
static constexpr unsigned int bucket_size_threading
Definition: dof_info.cc:1274
constexpr compressed_constraint_kind unconstrained_compressed_constraint_kind
unsigned int n_active_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:13833
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:213
const types::global_dof_index invalid_dof_index
Definition: types.h:233
void apply_to_subranges(const tbb::blocked_range< Iterator > &range, const Function &f)
Definition: parallel.h:353
unsigned int global_dof_index
Definition: types.h:82
unsigned short int fe_index
Definition: types.h:60
void reorder_cells(const TaskInfo &task_info, const std::vector< unsigned int > &renumbering, const std::vector< unsigned int > &constraint_pool_row_index, const std::vector< unsigned char > &irregular_cells)
Definition: dof_info.cc:298
std::vector< std::pair< unsigned short, unsigned short > > constraint_indicator
Definition: dof_info.h:534
void compute_tight_partitioners(const Table< 2, ShapeInfo< double >> &shape_info, const unsigned int n_owned_cells, const unsigned int n_lanes, const std::vector< FaceToCellTopology< 1 >> &inner_faces, const std::vector< FaceToCellTopology< 1 >> &ghosted_faces, const bool fill_cell_centric, const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full)
Definition: dof_info.cc:888
std::vector< std::pair< unsigned int, unsigned int > > row_starts
Definition: dof_info.h:493
std::vector< std::vector< bool > > hanging_node_constraint_masks_comp
Definition: dof_info.h:516
void get_dof_indices_on_cell_batch(std::vector< unsigned int > &local_indices, const unsigned int cell_batch, const bool with_constraints=true) const
Definition: dof_info.cc:82
void compute_cell_index_compression(const std::vector< unsigned char > &irregular_cells)
Definition: dof_info.cc:530
std::vector< unsigned int > dofs_per_cell
Definition: dof_info.h:691
void assign_ghosts(const std::vector< unsigned int > &boundary_cells, const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full)
Definition: dof_info.cc:155
std::shared_ptr< const internal::MatrixFreeFunctions::VectorDataExchange::Base > vector_exchanger
Definition: dof_info.h:596
std::vector< std::vector< unsigned int > > fe_index_conversion
Definition: dof_info.h:719
std::vector< unsigned int > dof_indices
Definition: dof_info.h:510
std::shared_ptr< const Utilities::MPI::Partitioner > vector_partitioner
Definition: dof_info.h:589
std::vector< compressed_constraint_kind > hanging_node_constraint_masks
Definition: dof_info.h:522
std::array< std::vector< unsigned int >, 3 > dof_indices_interleave_strides
Definition: dof_info.h:570
std::vector< unsigned int > row_starts_plain_indices
Definition: dof_info.h:633
std::vector< unsigned int > dofs_per_face
Definition: dof_info.h:696
std::vector< unsigned int > n_components
Definition: dof_info.h:661
std::vector< types::global_dof_index > ghost_dofs
Definition: dof_info.h:726
std::array< std::vector< unsigned int >, 3 > dof_indices_contiguous
Definition: dof_info.h:549
std::vector< unsigned int > cell_active_fe_index
Definition: dof_info.h:706
std::array< std::shared_ptr< const internal::MatrixFreeFunctions::VectorDataExchange::Base >, 5 > vector_exchanger_face_variants
Definition: dof_info.h:621
std::vector< unsigned int > plain_dof_indices
Definition: dof_info.h:643
std::array< std::vector< unsigned char >, 3 > n_vectorization_lanes_filled
Definition: dof_info.h:581
std::vector< unsigned int > start_components
Definition: dof_info.h:667
std::vector< unsigned int > dof_indices_interleaved
Definition: dof_info.h:539
std::array< std::vector< IndexStorageVariants >, 3 > index_storage_variants
Definition: dof_info.h:485
std::vector< unsigned int > cell_partition_data
Definition: task_info.h:475