Reference documentation for deal.II version GIT d77e5ebb0a 2023-01-27 22:35:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
dof_info.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/utilities.h>
20 
23 
24 #include <deal.II/matrix_free/dof_info.templates.h>
25 
26 #include <iostream>
27 
29 
30 namespace internal
31 {
32  namespace MatrixFreeFunctions
33  {
34  // ensure that the type defined in both dof_info.h and
35  // hanging_nodes_internal.h is consistent
36  static_assert(std::is_same<compressed_constraint_kind, std::uint8_t>::value,
37  "Unexpected type for compressed hanging node indicators!");
38 
39 
40 
42  {
43  clear();
44  }
45 
46 
47 
48  void
50  {
51  row_starts.clear();
52  dof_indices.clear();
53  constraint_indicator.clear();
54  vector_partitioner.reset();
55  ghost_dofs.clear();
56  dofs_per_cell.clear();
57  dofs_per_face.clear();
59  dimension = 2;
61  n_base_elements = 0;
62  n_components.clear();
63  start_components.clear();
65  plain_dof_indices.clear();
67  for (unsigned int i = 0; i < 3; ++i)
68  {
69  index_storage_variants[i].clear();
70  dof_indices_contiguous[i].clear();
73  }
74  store_plain_indices = false;
75  cell_active_fe_index.clear();
76  max_fe_index = 0;
77  fe_index_conversion.clear();
78  }
79 
80 
81 
82  void
83  DoFInfo::get_dof_indices_on_cell_batch(std::vector<unsigned int> &my_rows,
84  const unsigned int cell,
85  const bool apply_constraints) const
86  {
87  const unsigned int n_fe_components = start_components.back();
88  const unsigned int fe_index =
89  dofs_per_cell.size() == 1 ? 0 : cell_active_fe_index[cell];
90  const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
91 
92  const unsigned int n_vectorization = vectorization_length;
93  constexpr auto dof_access_index = dof_access_cell;
94  AssertIndexRange(cell,
95  n_vectorization_lanes_filled[dof_access_index].size());
96  const unsigned int n_vectorization_actual =
97  n_vectorization_lanes_filled[dof_access_index][cell];
98 
99  // we might have constraints, so the final number
100  // of indices is not known a priori.
101  // conservatively reserve the maximum without constraints
102  my_rows.reserve(n_vectorization * dofs_this_cell);
103  my_rows.resize(0);
104  unsigned int total_size = 0;
105  for (unsigned int v = 0; v < n_vectorization_actual; ++v)
106  {
107  const unsigned int ib =
108  (cell * n_vectorization + v) * n_fe_components;
109  const unsigned int ie =
110  (cell * n_vectorization + v + 1) * n_fe_components;
111 
112  // figure out constraints by comparing constraint_indicator row
113  // shift for this cell within the block as compared to the next
114  // one
115  const bool has_constraints =
116  (hanging_node_constraint_masks.size() != 0 &&
118  hanging_node_constraint_masks[cell * n_vectorization + v] !=
121  (row_starts[ib].second != row_starts[ib + n_fe_components].second);
122 
123  auto do_copy = [&](const unsigned int *begin,
124  const unsigned int *end) {
125  const unsigned int shift = total_size;
126  total_size += (end - begin);
127  my_rows.resize(total_size);
128  std::copy(begin, end, my_rows.begin() + shift);
129  };
130 
131  if (!has_constraints || apply_constraints)
132  {
133  const unsigned int *begin =
134  dof_indices.data() + row_starts[ib].first;
135  const unsigned int *end =
136  dof_indices.data() + row_starts[ie].first;
137  do_copy(begin, end);
138  }
139  else
140  {
141  Assert(row_starts_plain_indices[cell * n_vectorization + v] !=
144  const unsigned int *begin =
145  plain_dof_indices.data() +
146  row_starts_plain_indices[cell * n_vectorization + v];
147  const unsigned int *end = begin + dofs_this_cell;
148  do_copy(begin, end);
149  }
150  }
151  }
152 
153 
154 
155  void
156  DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
157  const MPI_Comm & communicator_sm,
158  const bool use_vector_data_exchanger_full)
159  {
160  Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
161 
162  // sort ghost dofs and compress out duplicates
163  const unsigned int n_owned = (vector_partitioner->local_range().second -
164  vector_partitioner->local_range().first);
165  const std::size_t n_ghosts = ghost_dofs.size();
166 #ifdef DEBUG
167  for (const auto dof_index : dof_indices)
168  AssertIndexRange(dof_index, n_owned + n_ghosts);
169 #endif
170 
171  const unsigned int n_components = start_components.back();
172  std::vector<unsigned int> ghost_numbering(n_ghosts);
173  IndexSet ghost_indices(vector_partitioner->size());
174  if (n_ghosts > 0)
175  {
176  unsigned int n_unique_ghosts = 0;
177  // since we need to go back to the local_to_global indices and
178  // replace the temporary numbering of ghosts by the real number in
179  // the index set, we need to store these values
180  std::vector<std::pair<types::global_dof_index, unsigned int>>
181  ghost_origin(n_ghosts);
182  for (std::size_t i = 0; i < n_ghosts; ++i)
183  {
184  ghost_origin[i].first = ghost_dofs[i];
185  ghost_origin[i].second = i;
186  }
187  std::sort(ghost_origin.begin(), ghost_origin.end());
188 
189  types::global_dof_index last_contiguous_start = ghost_origin[0].first;
190  ghost_numbering[ghost_origin[0].second] = 0;
191  for (std::size_t i = 1; i < n_ghosts; ++i)
192  {
193  if (ghost_origin[i].first > ghost_origin[i - 1].first + 1)
194  {
195  ghost_indices.add_range(last_contiguous_start,
196  ghost_origin[i - 1].first + 1);
197  last_contiguous_start = ghost_origin[i].first;
198  }
199  if (ghost_origin[i].first > ghost_origin[i - 1].first)
200  ++n_unique_ghosts;
201  ghost_numbering[ghost_origin[i].second] = n_unique_ghosts;
202  }
203  ++n_unique_ghosts;
204  ghost_indices.add_range(last_contiguous_start,
205  ghost_origin.back().first + 1);
206  ghost_indices.compress();
207 
208  // make sure that we got the correct local numbering of the ghost
209  // dofs. the ghost index set should store the same number
210  {
211  AssertDimension(n_unique_ghosts, ghost_indices.n_elements());
212  for (std::size_t i = 0; i < n_ghosts; ++i)
213  Assert(ghost_numbering[i] ==
214  ghost_indices.index_within_set(ghost_dofs[i]),
215  ExcInternalError());
216  }
217 
218  // apply correct numbering for ghost indices: We previously just
219  // enumerated them according to their appearance in the
220  // local_to_global structure. Above, we derived a relation between
221  // this enumeration and the actual number
222  const unsigned int n_boundary_cells = boundary_cells.size();
223  for (unsigned int i = 0; i < n_boundary_cells; ++i)
224  {
225  unsigned int *data_ptr =
226  dof_indices.data() +
227  row_starts[boundary_cells[i] * n_components].first;
228  const unsigned int *row_end =
229  dof_indices.data() +
230  row_starts[(boundary_cells[i] + 1) * n_components].first;
231  for (; data_ptr != row_end; ++data_ptr)
232  *data_ptr = ((*data_ptr < n_owned) ?
233  *data_ptr :
234  n_owned + ghost_numbering[*data_ptr - n_owned]);
235 
236  // now the same procedure for plain indices
237  if (store_plain_indices == true)
238  {
239  bool has_hanging_nodes = false;
240 
241  const unsigned int fe_index =
242  (cell_active_fe_index.size() == 0 ||
243  dofs_per_cell.size() == 1) ?
244  0 :
245  cell_active_fe_index[boundary_cells[i]];
247 
248  if (hanging_node_constraint_masks.size() > 0 &&
250  hanging_node_constraint_masks[boundary_cells[i]] !=
252  for (unsigned int comp = 0; comp < n_components; ++comp)
253  has_hanging_nodes |=
255 
256  if (has_hanging_nodes ||
257  row_starts[boundary_cells[i] * n_components].second !=
258  row_starts[(boundary_cells[i] + 1) * n_components]
259  .second)
260  {
261  unsigned int *data_ptr =
262  plain_dof_indices.data() +
263  row_starts_plain_indices[boundary_cells[i]];
264  const unsigned int *row_end =
265  data_ptr + dofs_per_cell[fe_index];
266  for (; data_ptr != row_end; ++data_ptr)
267  *data_ptr =
268  ((*data_ptr < n_owned) ?
269  *data_ptr :
270  n_owned + ghost_numbering[*data_ptr - n_owned]);
271  }
272  }
273  }
274  }
275 
276  std::vector<types::global_dof_index> empty;
277  ghost_dofs.swap(empty);
278 
279  // set the ghost indices now. need to cast away constness here, but that
280  // is uncritical since we reset the Partitioner in the same initialize
281  // call as this call here.
282  Utilities::MPI::Partitioner *vec_part =
283  const_cast<Utilities::MPI::Partitioner *>(vector_partitioner.get());
284  vec_part->set_ghost_indices(ghost_indices);
285 
286  if (use_vector_data_exchanger_full == false)
287  vector_exchanger = std::make_shared<
290  else
292  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
293  vector_partitioner, communicator_sm);
294  }
295 
296 
297 
298  void
300  const TaskInfo & task_info,
301  const std::vector<unsigned int> & renumbering,
302  const std::vector<unsigned int> & constraint_pool_row_index,
303  const std::vector<unsigned char> &irregular_cells)
304  {
305  (void)constraint_pool_row_index;
306 
307  // first reorder the active FE index.
308  const bool have_hp = dofs_per_cell.size() > 1;
309  if (cell_active_fe_index.size() > 0)
310  {
311  std::vector<unsigned int> new_active_fe_index;
312  new_active_fe_index.reserve(task_info.cell_partition_data.back());
313  unsigned int position_cell = 0;
314  for (unsigned int cell = 0;
315  cell < task_info.cell_partition_data.back();
316  ++cell)
317  {
318  const unsigned int n_comp =
319  (irregular_cells[cell] > 0 ? irregular_cells[cell] :
321 
322  // take maximum FE index among the ones present (we might have
323  // lumped some lower indices into higher ones)
324  unsigned int fe_index =
325  cell_active_fe_index[renumbering[position_cell]];
326  for (unsigned int j = 1; j < n_comp; ++j)
327  fe_index = std::max(
328  fe_index,
329  cell_active_fe_index[renumbering[position_cell + j]]);
330 
331  new_active_fe_index.push_back(fe_index);
332  position_cell += n_comp;
333  }
334  std::swap(new_active_fe_index, cell_active_fe_index);
335  }
336  if (have_hp)
338  task_info.cell_partition_data.back());
339 
340  const unsigned int n_components = start_components.back();
341 
342  std::vector<std::pair<unsigned int, unsigned int>> new_row_starts(
344  task_info.cell_partition_data.back() +
345  1);
346  std::vector<unsigned int> new_dof_indices;
347  std::vector<std::pair<unsigned short, unsigned short>>
348  new_constraint_indicator;
349  std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
350  unsigned int position_cell = 0;
351  new_dof_indices.reserve(dof_indices.size());
352  new_constraint_indicator.reserve(constraint_indicator.size());
353 
354  std::vector<compressed_constraint_kind> new_hanging_node_constraint_masks;
355  new_hanging_node_constraint_masks.reserve(
357 
358  if (store_plain_indices == true)
359  {
360  new_rowstart_plain.resize(vectorization_length *
361  task_info.cell_partition_data.back() +
362  1,
364  new_plain_indices.reserve(plain_dof_indices.size());
365  }
366 
367  // copy the indices and the constraint indicators to the new data field,
368  // where we will go through the cells in the renumbered way. in case the
369  // vectorization length does not exactly match up, we fill invalid
370  // numbers to the rowstart data. for contiguous cell indices, we skip
371  // the rowstarts field completely and directly go into the
372  // new_dof_indices field (this layout is used in FEEvaluation).
373  for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i)
374  {
375  const unsigned int n_vect =
376  (irregular_cells[i] > 0 ? irregular_cells[i] :
378  const unsigned int dofs_per_cell =
379  have_hp ? this->dofs_per_cell[cell_active_fe_index[i]] :
380  this->dofs_per_cell[0];
381 
382  for (unsigned int j = 0; j < n_vect; ++j)
383  {
384  const unsigned int cell_no =
385  renumbering[position_cell + j] * n_components;
386 
387  bool has_hanging_nodes = false;
388 
389  if (hanging_node_constraint_masks.size() > 0 &&
391  {
392  const auto mask =
393  hanging_node_constraint_masks[renumbering[position_cell +
394  j]];
395  new_hanging_node_constraint_masks.push_back(mask);
396 
398  for (unsigned int comp = 0; comp < n_components; ++comp)
399  has_hanging_nodes |= hanging_node_constraint_masks_comp
400  [have_hp ? cell_active_fe_index[i] : 0][comp];
401  }
402 
403  for (unsigned int comp = 0; comp < n_components; ++comp)
404  {
405  new_row_starts[(i * vectorization_length + j) * n_components +
406  comp]
407  .first = new_dof_indices.size();
408  new_row_starts[(i * vectorization_length + j) * n_components +
409  comp]
410  .second = new_constraint_indicator.size();
411 
412  new_dof_indices.insert(
413  new_dof_indices.end(),
414  dof_indices.data() + row_starts[cell_no + comp].first,
415  dof_indices.data() + row_starts[cell_no + comp + 1].first);
416  for (unsigned int index = row_starts[cell_no + comp].second;
417  index != row_starts[cell_no + comp + 1].second;
418  ++index)
419  new_constraint_indicator.push_back(
421  }
422  if (store_plain_indices &&
423  ((row_starts[cell_no].second !=
424  row_starts[cell_no + n_components].second) ||
425  has_hanging_nodes))
426  {
427  new_rowstart_plain[i * vectorization_length + j] =
428  new_plain_indices.size();
429  new_plain_indices.insert(
430  new_plain_indices.end(),
431  plain_dof_indices.data() +
433  plain_dof_indices.data() +
435  dofs_per_cell);
436  }
437  }
438  for (unsigned int j = n_vect; j < vectorization_length; ++j)
439  for (unsigned int comp = 0; comp < n_components; ++comp)
440  {
441  new_row_starts[(i * vectorization_length + j) * n_components +
442  comp]
443  .first = new_dof_indices.size();
444  new_row_starts[(i * vectorization_length + j) * n_components +
445  comp]
446  .second = new_constraint_indicator.size();
447  }
448 
449  for (unsigned int j = n_vect; j < vectorization_length; ++j)
450  if (hanging_node_constraint_masks.size() > 0)
451  new_hanging_node_constraint_masks.push_back(
453 
454  position_cell += n_vect;
455  }
456  AssertDimension(position_cell * n_components + 1, row_starts.size());
457 
458  AssertDimension(dof_indices.size(), new_dof_indices.size());
459  new_row_starts[task_info.cell_partition_data.back() *
461  .first = new_dof_indices.size();
462  new_row_starts[task_info.cell_partition_data.back() *
464  .second = new_constraint_indicator.size();
465 
467  new_constraint_indicator.size());
468 
469  new_row_starts.swap(row_starts);
470  new_dof_indices.swap(dof_indices);
471  new_constraint_indicator.swap(constraint_indicator);
472  new_plain_indices.swap(plain_dof_indices);
473  new_rowstart_plain.swap(row_starts_plain_indices);
474  new_hanging_node_constraint_masks.swap(hanging_node_constraint_masks);
475 
476 #ifdef DEBUG
477  // sanity check 1: all indices should be smaller than the number of dofs
478  // locally owned plus the number of ghosts
479  const unsigned int index_range =
480  (vector_partitioner->local_range().second -
481  vector_partitioner->local_range().first) +
482  vector_partitioner->ghost_indices().n_elements();
483  for (const auto dof_index : dof_indices)
484  AssertIndexRange(dof_index, index_range);
485 
486  // sanity check 2: for the constraint indicators, the first index should
487  // be smaller than the number of indices in the row, and the second
488  // index should be smaller than the number of constraints in the
489  // constraint pool.
490  for (unsigned int row = 0; row < task_info.cell_partition_data.back();
491  ++row)
492  {
493  const unsigned int row_length_ind =
498  constraint_indicator.size() + 1);
499  const std::pair<unsigned short, unsigned short> *
500  con_it =
501  constraint_indicator.data() +
503  *end_con =
504  constraint_indicator.data() +
506  for (; con_it != end_con; ++con_it)
507  {
508  AssertIndexRange(con_it->first, row_length_ind + 1);
509  AssertIndexRange(con_it->second,
510  constraint_pool_row_index.size() - 1);
511  }
512  }
513 
514  // sanity check 3: check the number of cells once again
515  unsigned int n_active_cells = 0;
516  for (unsigned int c = 0; c < *(task_info.cell_partition_data.end() - 2);
517  ++c)
518  if (irregular_cells[c] > 0)
519  n_active_cells += irregular_cells[c];
520  else
523 #endif
524 
525  compute_cell_index_compression(irregular_cells);
526  }
527 
528 
529 
530  void
532  const std::vector<unsigned char> &irregular_cells)
533  {
534  const bool have_hp = dofs_per_cell.size() > 1;
535  const unsigned int n_components = start_components.back();
536 
538  row_starts.size() % vectorization_length == 1,
539  ExcInternalError());
540  if (vectorization_length > 1)
542  irregular_cells.size());
544  irregular_cells.size(), IndexStorageVariants::full);
546  irregular_cells.size());
547  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
548  if (irregular_cells[i] > 0)
549  n_vectorization_lanes_filled[dof_access_cell][i] = irregular_cells[i];
550  else
553 
555  irregular_cells.size() * vectorization_length,
557  dof_indices_interleaved.resize(dof_indices.size(),
560  irregular_cells.size() * vectorization_length,
562 
563  std::vector<unsigned int> index_kinds(
564  static_cast<unsigned int>(
566  1);
567  std::vector<unsigned int> offsets(vectorization_length);
568  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
569  {
570  const unsigned int ndofs =
571  dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
572  const unsigned int n_comp =
574 
575  // check 1: Check if there are constraints -> no compression possible
576  bool has_constraints = false;
577  for (unsigned int j = 0; j < n_comp; ++j)
578  {
579  const unsigned int cell_no = i * vectorization_length + j;
580  if (row_starts[cell_no * n_components].second !=
581  row_starts[(cell_no + 1) * n_components].second)
582  {
583  has_constraints = true;
584  break;
585  }
586  }
587  if (has_constraints)
590  else
591  {
592  bool indices_are_contiguous = true;
593  for (unsigned int j = 0; j < n_comp; ++j)
594  {
595  const unsigned int cell_no = i * vectorization_length + j;
596  const unsigned int *dof_indices =
597  this->dof_indices.data() +
598  row_starts[cell_no * n_components].first;
600  ndofs,
601  row_starts[(cell_no + 1) * n_components].first -
602  row_starts[cell_no * n_components].first);
603  for (unsigned int i = 1; i < ndofs; ++i)
604  if (dof_indices[i] != dof_indices[0] + i)
605  {
606  indices_are_contiguous = false;
607  break;
608  }
609  }
610 
611  bool indices_are_interleaved_and_contiguous =
612  (ndofs > 1 && n_comp == vectorization_length);
613 
614  {
615  const unsigned int *dof_indices =
616  this->dof_indices.data() +
618  for (unsigned int k = 0;
619  k < ndofs && indices_are_interleaved_and_contiguous;
620  ++k)
621  for (unsigned int j = 0; j < n_comp; ++j)
622  if (dof_indices[j * ndofs + k] !=
623  dof_indices[0] + k * n_comp + j)
624  {
625  indices_are_interleaved_and_contiguous = false;
626  break;
627  }
628  }
629 
630  if (indices_are_contiguous ||
631  indices_are_interleaved_and_contiguous)
632  {
633  for (unsigned int j = 0; j < n_comp; ++j)
636  this->dof_indices.size() == 0 ?
637  0 :
638  this->dof_indices
639  [row_starts[(i * vectorization_length + j) *
640  n_components]
641  .first];
642  }
643 
644  if (indices_are_interleaved_and_contiguous)
645  {
649  for (unsigned int j = 0; j < n_comp; ++j)
651  j] = n_comp;
652  }
653  else if (indices_are_contiguous)
654  {
657  for (unsigned int j = 0; j < n_comp; ++j)
659  j] = 1;
660  }
661  else
662  {
663  int indices_are_interleaved_and_mixed = 2;
664  const unsigned int *dof_indices =
665  &this->dof_indices[row_starts[i * vectorization_length *
666  n_components]
667  .first];
668  for (unsigned int j = 0; j < n_comp; ++j)
669  offsets[j] =
670  dof_indices[j * ndofs + 1] - dof_indices[j * ndofs];
671  for (unsigned int k = 0;
672  k < ndofs && indices_are_interleaved_and_mixed != 0;
673  ++k)
674  for (unsigned int j = 0; j < n_comp; ++j)
675  // the first if case is to avoid negative offsets
676  // (invalid)
677  if (dof_indices[j * ndofs + 1] < dof_indices[j * ndofs] ||
678  dof_indices[j * ndofs + k] !=
679  dof_indices[j * ndofs] + k * offsets[j])
680  {
681  indices_are_interleaved_and_mixed = 0;
682  break;
683  }
684  if (indices_are_interleaved_and_mixed == 2)
685  {
686  for (unsigned int j = 0; j < n_comp; ++j)
689  offsets[j];
690  for (unsigned int j = 0; j < n_comp; ++j)
692  [i * vectorization_length + j] =
693  dof_indices[j * ndofs];
694  for (unsigned int j = 0; j < n_comp; ++j)
695  if (offsets[j] != vectorization_length)
696  {
697  indices_are_interleaved_and_mixed = 1;
698  break;
699  }
700  if (indices_are_interleaved_and_mixed == 1 ||
701  n_comp != vectorization_length)
705  else
708  }
709  else
710  {
711  const unsigned int *dof_indices =
712  this->dof_indices.data() +
714  .first;
715  if (n_comp == vectorization_length)
718  else
721 
722  // do not use interleaved storage if two entries within
723  // vectorized array point to the same index (scatter not
724  // possible due to race condition)
725  for (const unsigned int *indices = dof_indices;
726  indices != dof_indices + ndofs;
727  ++indices)
728  {
729  bool is_sorted = true;
730  unsigned int previous = indices[0];
731  for (unsigned int l = 1; l < n_comp; ++l)
732  {
733  const unsigned int current = indices[l * ndofs];
734  if (current <= previous)
735  is_sorted = false;
736 
737  // the simple check failed, must compare all
738  // indices manually - due to short sizes this
739  // O(n^2) algorithm is better than sorting
740  if (!is_sorted)
741  for (unsigned int j = 0; j < l; ++j)
742  if (indices[j * ndofs] == current)
743  {
745  [dof_access_cell][i] =
747  break;
748  }
749  previous = current;
750  }
751  }
752  }
753  }
754  }
755  index_kinds[static_cast<unsigned int>(
757  }
758 
759  // Cleanup phase: we want to avoid single cells with different properties
760  // than the bulk of the domain in order to avoid extra checks in the face
761  // identification.
762 
763  // Step 1: check whether the interleaved indices were only assigned to
764  // the single cell within a vectorized array.
765  auto fix_single_interleaved_indices =
766  [&](const IndexStorageVariants variant) {
767  if (index_kinds[static_cast<unsigned int>(
769  0 &&
770  index_kinds[static_cast<unsigned int>(variant)] > 0)
771  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
772  {
775  interleaved_contiguous_mixed_strides &&
777  (variant != IndexStorageVariants::contiguous ||
779  [i * vectorization_length] ==
780  1))
781  {
783  index_kinds[static_cast<unsigned int>(
786  index_kinds[static_cast<unsigned int>(variant)]++;
787  }
788  }
789  };
790 
791  fix_single_interleaved_indices(IndexStorageVariants::full);
792  fix_single_interleaved_indices(IndexStorageVariants::contiguous);
793  fix_single_interleaved_indices(IndexStorageVariants::interleaved);
794 
795  unsigned int n_interleaved =
796  index_kinds[static_cast<unsigned int>(
798  index_kinds[static_cast<unsigned int>(
800  index_kinds[static_cast<unsigned int>(
802 
803  // Step 2: fix single contiguous cell among others with interleaved
804  // storage
805  if (n_interleaved > 0 && index_kinds[static_cast<unsigned int>(
807  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
810  {
813  index_kinds[static_cast<unsigned int>(
815  index_kinds[static_cast<unsigned int>(
817  }
818 
819  // Step 3: Interleaved cells are left but also some non-contiguous ones
820  // -> revert all to full storage
821  if (n_interleaved > 0 &&
822  index_kinds[static_cast<unsigned int>(IndexStorageVariants::full)] +
823  index_kinds[static_cast<unsigned int>(
825  0)
826  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
829  {
830  index_kinds[static_cast<unsigned int>(
831  index_storage_variants[2][i])]--;
836  else
839  index_kinds[static_cast<unsigned int>(
841  }
842 
843  // Step 4: Copy the interleaved indices into their own data structure
844  for (unsigned int i = 0; i < irregular_cells.size(); ++i)
847  {
850  {
853  continue;
854  }
855  const unsigned int ndofs =
856  dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
857  const unsigned int *dof_indices =
858  &this->dof_indices
860  unsigned int *interleaved_dof_indices =
862  [row_starts[i * vectorization_length * n_components].first];
863  AssertDimension(this->dof_indices.size(),
864  this->dof_indices_interleaved.size());
869  this->dof_indices_interleaved.size() + 1);
871  row_starts[i * vectorization_length * n_components].first +
872  ndofs * vectorization_length,
873  this->dof_indices_interleaved.size() + 1);
874  for (unsigned int k = 0; k < ndofs; ++k)
875  {
876  const unsigned int *my_dof_indices = dof_indices + k;
877  const unsigned int *end =
878  interleaved_dof_indices + vectorization_length;
879  for (; interleaved_dof_indices != end;
880  ++interleaved_dof_indices, my_dof_indices += ndofs)
881  *interleaved_dof_indices = *my_dof_indices;
882  }
883  }
884  }
885 
886 
887 
888  void
890  const Table<2, ShapeInfo<double>> & shape_info,
891  const unsigned int n_owned_cells,
892  const unsigned int n_lanes,
893  const std::vector<FaceToCellTopology<1>> &inner_faces,
894  const std::vector<FaceToCellTopology<1>> &ghosted_faces,
895  const bool fill_cell_centric,
896  const MPI_Comm & communicator_sm,
897  const bool use_vector_data_exchanger_full)
898  {
900 
901  // partitioner 0: no face integrals, simply use the indices present
902  // on the cells
903  std::vector<types::global_dof_index> ghost_indices;
904  {
905  const unsigned int n_components = start_components.back();
906  for (unsigned int cell = 0; cell < n_owned_cells; ++cell)
907  {
908  for (unsigned int i = row_starts[cell * n_components].first;
909  i < row_starts[(cell + 1) * n_components].first;
910  ++i)
911  if (dof_indices[i] >= part.locally_owned_size())
912  ghost_indices.push_back(part.local_to_global(dof_indices[i]));
913 
914  const unsigned int fe_index =
915  dofs_per_cell.size() == 1 ? 0 :
916  cell_active_fe_index[cell / n_lanes];
917  const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
918 
919  for (unsigned int i = row_starts_plain_indices[cell];
920  i < row_starts_plain_indices[cell] + dofs_this_cell;
921  ++i)
922  if (plain_dof_indices[i] >= part.locally_owned_size())
923  ghost_indices.push_back(
925  }
926  std::sort(ghost_indices.begin(), ghost_indices.end());
927  ghost_indices.erase(std::unique(ghost_indices.begin(),
928  ghost_indices.end()),
929  ghost_indices.end());
930  IndexSet compressed_set(part.size());
931  compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
932  compressed_set.subtract_set(part.locally_owned_range());
933  const bool all_ghosts_equal =
934  Utilities::MPI::min<int>(static_cast<int>(
935  compressed_set.n_elements() ==
936  part.ghost_indices().n_elements()),
937  part.get_mpi_communicator()) != 0;
938 
939  std::shared_ptr<const Utilities::MPI::Partitioner> temp_0;
940 
941  if (all_ghosts_equal)
942  temp_0 = vector_partitioner;
943  else
944  {
945  temp_0 = std::make_shared<Utilities::MPI::Partitioner>(
947  const_cast<Utilities::MPI::Partitioner *>(temp_0.get())
948  ->set_ghost_indices(compressed_set, part.ghost_indices());
949  }
950 
951  if (use_vector_data_exchanger_full == false)
952  vector_exchanger_face_variants[0] = std::make_shared<
954  temp_0);
955  else
957  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
958  temp_0, communicator_sm);
959  }
960 
961  // construct a numbering of faces
962  std::vector<FaceToCellTopology<1>> all_faces(inner_faces);
963  all_faces.insert(all_faces.end(),
964  ghosted_faces.begin(),
965  ghosted_faces.end());
966  Table<2, unsigned int> cell_and_face_to_faces(
967  (row_starts.size() - 1) / start_components.back(),
968  2 * shape_info(0, 0).n_dimensions);
969  cell_and_face_to_faces.fill(numbers::invalid_unsigned_int);
970  for (unsigned int f = 0; f < all_faces.size(); ++f)
971  {
972  cell_and_face_to_faces(all_faces[f].cells_interior[0],
973  all_faces[f].interior_face_no) = f;
974  Assert(all_faces[f].cells_exterior[0] !=
976  ExcInternalError());
977  cell_and_face_to_faces(all_faces[f].cells_exterior[0],
978  all_faces[f].exterior_face_no) = f;
979  }
980 
981  // lambda function to detect objects on face pairs
982  const auto loop_over_faces =
983  [&](const std::function<
984  void(const unsigned int, const unsigned int, const bool)> &fu) {
985  for (const auto &face : inner_faces)
986  {
987  AssertIndexRange(face.cells_interior[0], n_owned_cells);
988  fu(face.cells_exterior[0], face.exterior_face_no, false /*flag*/);
989  }
990  };
991 
992  const auto loop_over_all_faces =
993  [&](const std::function<
994  void(const unsigned int, const unsigned int, const bool)> &fu) {
995  for (unsigned int c = 0; c < cell_and_face_to_faces.size(0); ++c)
996  for (unsigned int d = 0; d < cell_and_face_to_faces.size(1); ++d)
997  {
998  const unsigned int f = cell_and_face_to_faces(c, d);
1000  continue;
1001 
1002  const unsigned int cell_m = all_faces[f].cells_interior[0];
1003  const unsigned int cell_p = all_faces[f].cells_exterior[0];
1004 
1005  const bool ext = c == cell_m;
1006 
1007  if (ext && cell_p == numbers::invalid_unsigned_int)
1008  continue;
1009 
1010  const unsigned int p = ext ? cell_p : cell_m;
1011  const unsigned int face_no = ext ?
1012  all_faces[f].exterior_face_no :
1013  all_faces[f].interior_face_no;
1014 
1015  fu(p, face_no, true);
1016  }
1017  };
1018 
1019  const auto process_values =
1020  [&](
1021  std::shared_ptr<const Utilities::MPI::Partitioner>
1022  &vector_partitioner_values,
1023  const std::function<void(
1024  const std::function<void(
1025  const unsigned int, const unsigned int, const bool)> &)> &loop) {
1026  bool all_nodal_and_tensorial = shape_info.size(1) == 1;
1027 
1028  if (all_nodal_and_tensorial)
1029  for (unsigned int c = 0; c < n_base_elements; ++c)
1030  {
1031  const auto &si =
1032  shape_info(global_base_element_offset + c, 0).data.front();
1033  if (!si.nodal_at_cell_boundaries ||
1034  (si.element_type ==
1036  all_nodal_and_tensorial = false;
1037  }
1038 
1039  if (all_nodal_and_tensorial == false)
1040  vector_partitioner_values = vector_partitioner;
1041  else
1042  {
1043  bool has_noncontiguous_cell = false;
1044 
1045  loop([&](const unsigned int cell_no,
1046  const unsigned int face_no,
1047  const bool flag) {
1048  const unsigned int index =
1050  if (flag || (index != numbers::invalid_unsigned_int &&
1051  index >= part.locally_owned_size()))
1052  {
1053  const unsigned int stride =
1054  dof_indices_interleave_strides[dof_access_cell][cell_no];
1055  unsigned int i = 0;
1056  for (unsigned int e = 0; e < n_base_elements; ++e)
1057  for (unsigned int c = 0; c < n_components[e]; ++c)
1058  {
1059  const ShapeInfo<double> &shape =
1060  shape_info(global_base_element_offset + e, 0);
1061  for (unsigned int j = 0;
1062  j < shape.dofs_per_component_on_face;
1063  ++j)
1064  ghost_indices.push_back(part.local_to_global(
1065  index + i +
1066  shape.face_to_cell_index_nodal(face_no, j) *
1067  stride));
1068  i += shape.dofs_per_component_on_cell * stride;
1069  }
1070  AssertDimension(i, dofs_per_cell[0] * stride);
1071  }
1073  has_noncontiguous_cell = true;
1074  });
1075  has_noncontiguous_cell =
1076  Utilities::MPI::min<int>(static_cast<int>(
1077  has_noncontiguous_cell),
1078  part.get_mpi_communicator()) != 0;
1079 
1080  std::sort(ghost_indices.begin(), ghost_indices.end());
1081  ghost_indices.erase(std::unique(ghost_indices.begin(),
1082  ghost_indices.end()),
1083  ghost_indices.end());
1084  IndexSet compressed_set(part.size());
1085  compressed_set.add_indices(ghost_indices.begin(),
1086  ghost_indices.end());
1087  compressed_set.subtract_set(part.locally_owned_range());
1088  const bool all_ghosts_equal =
1089  Utilities::MPI::min<int>(static_cast<int>(
1090  compressed_set.n_elements() ==
1091  part.ghost_indices().n_elements()),
1092  part.get_mpi_communicator()) != 0;
1093  if (all_ghosts_equal || has_noncontiguous_cell)
1094  vector_partitioner_values = vector_partitioner;
1095  else
1096  {
1097  vector_partitioner_values =
1098  std::make_shared<Utilities::MPI::Partitioner>(
1099  part.locally_owned_range(), part.get_mpi_communicator());
1100  const_cast<Utilities::MPI::Partitioner *>(
1101  vector_partitioner_values.get())
1102  ->set_ghost_indices(compressed_set, part.ghost_indices());
1103  }
1104  }
1105  };
1106 
1107 
1108  const auto process_gradients =
1109  [&](
1110  const std::shared_ptr<const Utilities::MPI::Partitioner>
1111  &vector_partitoner_values,
1112  std::shared_ptr<const Utilities::MPI::Partitioner>
1113  &vector_partitioner_gradients,
1114  const std::function<void(
1115  const std::function<void(
1116  const unsigned int, const unsigned int, const bool)> &)> &loop) {
1117  bool all_hermite = shape_info.size(1) == 1;
1118 
1119  if (all_hermite)
1120  for (unsigned int c = 0; c < n_base_elements; ++c)
1121  if (shape_info(global_base_element_offset + c, 0).element_type !=
1123  all_hermite = false;
1124  if (all_hermite == false ||
1125  vector_partitoner_values.get() == vector_partitioner.get())
1126  vector_partitioner_gradients = vector_partitioner;
1127  else
1128  {
1129  loop([&](const unsigned int cell_no,
1130  const unsigned int face_no,
1131  const bool flag) {
1132  const unsigned int index =
1133  dof_indices_contiguous[dof_access_cell][cell_no];
1134  if (flag || (index != numbers::invalid_unsigned_int &&
1135  index >= part.locally_owned_size()))
1136  {
1137  const unsigned int stride =
1138  dof_indices_interleave_strides[dof_access_cell][cell_no];
1139  unsigned int i = 0;
1140  for (unsigned int e = 0; e < n_base_elements; ++e)
1141  for (unsigned int c = 0; c < n_components[e]; ++c)
1142  {
1143  const ShapeInfo<double> &shape =
1144  shape_info(global_base_element_offset + e, 0);
1145  for (unsigned int j = 0;
1146  j < 2 * shape.dofs_per_component_on_face;
1147  ++j)
1148  ghost_indices.push_back(part.local_to_global(
1149  index + i +
1150  shape.face_to_cell_index_hermite(face_no, j) *
1151  stride));
1152  i += shape.dofs_per_component_on_cell * stride;
1153  }
1154  AssertDimension(i, dofs_per_cell[0] * stride);
1155  }
1156  });
1157  std::sort(ghost_indices.begin(), ghost_indices.end());
1158  ghost_indices.erase(std::unique(ghost_indices.begin(),
1159  ghost_indices.end()),
1160  ghost_indices.end());
1161  IndexSet compressed_set(part.size());
1162  compressed_set.add_indices(ghost_indices.begin(),
1163  ghost_indices.end());
1164  compressed_set.subtract_set(part.locally_owned_range());
1165  const bool all_ghosts_equal =
1166  Utilities::MPI::min<int>(static_cast<int>(
1167  compressed_set.n_elements() ==
1168  part.ghost_indices().n_elements()),
1169  part.get_mpi_communicator()) != 0;
1170  if (all_ghosts_equal)
1171  vector_partitioner_gradients = vector_partitioner;
1172  else
1173  {
1174  vector_partitioner_gradients =
1175  std::make_shared<Utilities::MPI::Partitioner>(
1176  part.locally_owned_range(), part.get_mpi_communicator());
1177  const_cast<Utilities::MPI::Partitioner *>(
1178  vector_partitioner_gradients.get())
1179  ->set_ghost_indices(compressed_set, part.ghost_indices());
1180  }
1181  }
1182  };
1183 
1184  std::shared_ptr<const Utilities::MPI::Partitioner> temp_1, temp_2, temp_3,
1185  temp_4;
1186 
1187  // partitioner 1: values on faces
1188  process_values(temp_1, loop_over_faces);
1189 
1190  // partitioner 2: values and gradients on faces
1191  process_gradients(temp_1, temp_2, loop_over_faces);
1192 
1193  if (fill_cell_centric)
1194  {
1195  ghost_indices.clear();
1196  // partitioner 3: values on all faces
1197  process_values(temp_3, loop_over_all_faces);
1198  // partitioner 4: values and gradients on faces
1199  process_gradients(temp_3, temp_4, loop_over_all_faces);
1200  }
1201  else
1202  {
1203  temp_3 = std::make_shared<Utilities::MPI::Partitioner>(
1204  part.locally_owned_range(), part.get_mpi_communicator());
1205  temp_4 = std::make_shared<Utilities::MPI::Partitioner>(
1206  part.locally_owned_range(), part.get_mpi_communicator());
1207  }
1208 
1209  if (use_vector_data_exchanger_full == false)
1210  {
1211  vector_exchanger_face_variants[1] = std::make_shared<
1212  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1213  temp_1);
1214  vector_exchanger_face_variants[2] = std::make_shared<
1215  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1216  temp_2);
1217  vector_exchanger_face_variants[3] = std::make_shared<
1218  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1219  temp_3);
1220  vector_exchanger_face_variants[4] = std::make_shared<
1221  MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
1222  temp_4);
1223  }
1224  else
1225  {
1226  vector_exchanger_face_variants[1] =
1227  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1228  temp_1, communicator_sm);
1229  vector_exchanger_face_variants[2] =
1230  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1231  temp_2, communicator_sm);
1232  vector_exchanger_face_variants[3] =
1233  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1234  temp_3, communicator_sm);
1235  vector_exchanger_face_variants[4] =
1236  std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
1237  temp_4, communicator_sm);
1238  }
1239  }
1240 
1241 
1242 
1243  void
1244  DoFInfo::compute_shared_memory_contiguous_indices(
1245  std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
1246  &cell_indices_contiguous_sm)
1247  {
1248  AssertDimension(dofs_per_cell.size(), 1);
1249 
1250  for (unsigned int i = 0; i < 3; ++i)
1251  {
1252  dof_indices_contiguous_sm[i].resize(
1253  cell_indices_contiguous_sm[i].size());
1254 
1255  for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size();
1256  ++j)
1257  if (cell_indices_contiguous_sm[i][j].first !=
1259  dof_indices_contiguous_sm[i][j] = {
1260  cell_indices_contiguous_sm[i][j].first,
1261  cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]};
1262  else
1263  dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int,
1265  }
1266  }
1267 
1268 
1269 
1270  namespace internal
1271  {
1272  // We construct the connectivity graph in parallel. we use one lock for
1273  // 256 degrees of freedom to keep the number of locks down to a
1274  // reasonable level and reduce the cost of locking to some extent.
1275  static constexpr unsigned int bucket_size_threading = 256;
1276 
1277 
1278 
1279  void
1280  compute_row_lengths(const unsigned int begin,
1281  const unsigned int end,
1282  const DoFInfo & dof_info,
1283  std::vector<std::mutex> & mutexes,
1284  std::vector<unsigned int> &row_lengths)
1285  {
1286  std::vector<unsigned int> scratch;
1287  const unsigned int n_components = dof_info.start_components.back();
1288  for (unsigned int block = begin; block < end; ++block)
1289  {
1290  scratch.clear();
1291  scratch.insert(
1292  scratch.end(),
1293  dof_info.dof_indices.data() +
1294  dof_info.row_starts[block * n_components].first,
1295  dof_info.dof_indices.data() +
1296  dof_info.row_starts[(block + 1) * n_components].first);
1297  std::sort(scratch.begin(), scratch.end());
1298  std::vector<unsigned int>::const_iterator end_unique =
1299  std::unique(scratch.begin(), scratch.end());
1300  std::vector<unsigned int>::const_iterator it = scratch.begin();
1301  while (it != end_unique)
1302  {
1303  // In this code, the procedure is that we insert all elements
1304  // that are within the range of one lock at once
1305  const unsigned int next_bucket =
1307  std::lock_guard<std::mutex> lock(
1308  mutexes[*it / bucket_size_threading]);
1309  for (; it != end_unique && *it < next_bucket; ++it)
1310  {
1311  AssertIndexRange(*it, row_lengths.size());
1312  row_lengths[*it]++;
1313  }
1314  }
1315  }
1316  }
1317 
1318  void
1319  fill_connectivity_dofs(const unsigned int begin,
1320  const unsigned int end,
1321  const DoFInfo & dof_info,
1322  const std::vector<unsigned int> &row_lengths,
1323  std::vector<std::mutex> & mutexes,
1324  ::SparsityPattern & connectivity_dof)
1325  {
1326  std::vector<unsigned int> scratch;
1327  const unsigned int n_components = dof_info.start_components.back();
1328  for (unsigned int block = begin; block < end; ++block)
1329  {
1330  scratch.clear();
1331  scratch.insert(
1332  scratch.end(),
1333  dof_info.dof_indices.data() +
1334  dof_info.row_starts[block * n_components].first,
1335  dof_info.dof_indices.data() +
1336  dof_info.row_starts[(block + 1) * n_components].first);
1337  std::sort(scratch.begin(), scratch.end());
1338  std::vector<unsigned int>::const_iterator end_unique =
1339  std::unique(scratch.begin(), scratch.end());
1340  std::vector<unsigned int>::const_iterator it = scratch.begin();
1341  while (it != end_unique)
1342  {
1343  const unsigned int next_bucket =
1345  std::lock_guard<std::mutex> lock(
1346  mutexes[*it / bucket_size_threading]);
1347  for (; it != end_unique && *it < next_bucket; ++it)
1348  if (row_lengths[*it] > 0)
1349  connectivity_dof.add(*it, block);
1350  }
1351  }
1352  }
1353 
1354 
1355 
1356  void
1357  fill_connectivity(const unsigned int begin,
1358  const unsigned int end,
1359  const DoFInfo & dof_info,
1360  const std::vector<unsigned int> &renumbering,
1361  const ::SparsityPattern & connectivity_dof,
1362  DynamicSparsityPattern & connectivity)
1363  {
1364  ordered_vector row_entries;
1365  const unsigned int n_components = dof_info.start_components.back();
1366  for (unsigned int block = begin; block < end; ++block)
1367  {
1368  row_entries.clear();
1369 
1370  const unsigned int
1371  *it = dof_info.dof_indices.data() +
1372  dof_info.row_starts[block * n_components].first,
1373  *end_cell = dof_info.dof_indices.data() +
1374  dof_info.row_starts[(block + 1) * n_components].first;
1375  for (; it != end_cell; ++it)
1376  {
1377  SparsityPattern::iterator sp = connectivity_dof.begin(*it);
1378  std::vector<types::global_dof_index>::iterator insert_pos =
1379  row_entries.begin();
1380  for (; sp != connectivity_dof.end(*it); ++sp)
1381  if (sp->column() != block)
1382  row_entries.insert(renumbering[sp->column()], insert_pos);
1383  }
1384  connectivity.add_entries(renumbering[block],
1385  row_entries.begin(),
1386  row_entries.end());
1387  }
1388  }
1389 
1390  } // namespace internal
1391 
1392  void
1393  DoFInfo::make_connectivity_graph(
1394  const TaskInfo & task_info,
1395  const std::vector<unsigned int> &renumbering,
1396  DynamicSparsityPattern & connectivity) const
1397  {
1398  unsigned int n_rows = (vector_partitioner->local_range().second -
1399  vector_partitioner->local_range().first) +
1400  vector_partitioner->ghost_indices().n_elements();
1401 
1402  // Avoid square sparsity patterns that allocate the diagonal entry
1403  if (n_rows == task_info.n_active_cells)
1404  ++n_rows;
1405 
1406  // first determine row lengths
1407  std::vector<unsigned int> row_lengths(n_rows);
1408  std::vector<std::mutex> mutexes(n_rows / internal::bucket_size_threading +
1409  1);
1411  0,
1412  task_info.n_active_cells,
1413  [this, &mutexes, &row_lengths](const unsigned int begin,
1414  const unsigned int end) {
1415  internal::compute_row_lengths(
1416  begin, end, *this, mutexes, row_lengths);
1417  },
1418  20);
1419 
1420  // disregard dofs that only sit on a single cell because they cannot
1421  // couple
1422  for (unsigned int row = 0; row < n_rows; ++row)
1423  if (row_lengths[row] <= 1)
1424  row_lengths[row] = 0;
1425 
1426  // Create a temporary sparsity pattern that holds to each degree of
1427  // freedom on which cells it appears, i.e., store the connectivity
1428  // between cells and dofs
1429  SparsityPattern connectivity_dof(n_rows,
1430  task_info.n_active_cells,
1431  row_lengths);
1433  0,
1434  task_info.n_active_cells,
1435  [this, &row_lengths, &mutexes, &connectivity_dof](
1436  const unsigned int begin, const unsigned int end) {
1437  internal::fill_connectivity_dofs(
1438  begin, end, *this, row_lengths, mutexes, connectivity_dof);
1439  },
1440  20);
1441  connectivity_dof.compress();
1442 
1443 
1444  // Invert renumbering for use in fill_connectivity.
1445  std::vector<unsigned int> reverse_numbering(task_info.n_active_cells);
1446  reverse_numbering = Utilities::invert_permutation(renumbering);
1447 
1448  // From the above connectivity between dofs and cells, we can finally
1449  // create a connectivity list between cells. The connectivity graph
1450  // should apply the renumbering, i.e., the entry for cell j is the entry
1451  // for cell renumbering[j] in the original ordering.
1453  0,
1454  task_info.n_active_cells,
1455  [this, &reverse_numbering, &connectivity_dof, &connectivity](
1456  const unsigned int begin, const unsigned int end) {
1457  internal::fill_connectivity(begin,
1458  end,
1459  *this,
1460  reverse_numbering,
1461  connectivity_dof,
1462  connectivity);
1463  },
1464  20);
1465  }
1466 
1467 
1468 
1469  void
1470  DoFInfo::compute_dof_renumbering(
1471  std::vector<types::global_dof_index> &renumbering)
1472  {
1473  const unsigned int locally_owned_size =
1474  vector_partitioner->locally_owned_size();
1475  renumbering.resize(0);
1476  renumbering.resize(locally_owned_size, numbers::invalid_dof_index);
1477 
1478  types::global_dof_index counter = 0;
1479  const unsigned int n_components = start_components.back();
1480  const unsigned int n_cell_batches =
1481  n_vectorization_lanes_filled[dof_access_cell].size();
1482  Assert(n_cell_batches <=
1483  (row_starts.size() - 1) / vectorization_length / n_components,
1484  ExcInternalError());
1485  for (unsigned int cell_no = 0; cell_no < n_cell_batches; ++cell_no)
1486  {
1487  // do not renumber in case we have constraints
1488  if (row_starts[cell_no * n_components * vectorization_length]
1489  .second ==
1490  row_starts[(cell_no + 1) * n_components * vectorization_length]
1491  .second)
1492  {
1493  const unsigned int ndofs =
1494  dofs_per_cell.size() == 1 ?
1495  dofs_per_cell[0] :
1496  (dofs_per_cell[cell_active_fe_index.size() > 0 ?
1497  cell_active_fe_index[cell_no] :
1498  0]);
1499  const unsigned int *dof_ind =
1500  dof_indices.data() +
1501  row_starts[cell_no * n_components * vectorization_length].first;
1502  for (unsigned int i = 0; i < ndofs; ++i)
1503  for (unsigned int j = 0;
1504  j < n_vectorization_lanes_filled[dof_access_cell][cell_no];
1505  ++j)
1506  if (dof_ind[j * ndofs + i] < locally_owned_size)
1507  if (renumbering[dof_ind[j * ndofs + i]] ==
1509  renumbering[dof_ind[j * ndofs + i]] = counter++;
1510  }
1511  }
1512 
1513  AssertIndexRange(counter, locally_owned_size + 1);
1514  for (types::global_dof_index &dof_index : renumbering)
1515  if (dof_index == numbers::invalid_dof_index)
1516  dof_index = counter++;
1517 
1518  // transform indices to global index space
1519  for (types::global_dof_index &dof_index : renumbering)
1520  dof_index = vector_partitioner->local_to_global(dof_index);
1521 
1522  AssertDimension(counter, renumbering.size());
1523  }
1524 
1525 
1526 
1527  std::size_t
1529  {
1530  std::size_t memory = sizeof(*this);
1531  memory +=
1532  (row_starts.capacity() * sizeof(std::pair<unsigned int, unsigned int>));
1533  memory += MemoryConsumption::memory_consumption(dof_indices);
1534  memory +=
1535  MemoryConsumption::memory_consumption(hanging_node_constraint_masks);
1536  memory += MemoryConsumption::memory_consumption(row_starts_plain_indices);
1537  memory += MemoryConsumption::memory_consumption(plain_dof_indices);
1538  memory += MemoryConsumption::memory_consumption(constraint_indicator);
1539  memory += MemoryConsumption::memory_consumption(*vector_partitioner);
1540  return memory;
1541  }
1542  } // namespace MatrixFreeFunctions
1543 } // namespace internal
1544 
1545 namespace internal
1546 {
1547  namespace MatrixFreeFunctions
1548  {
1549  template void
1550  DoFInfo::read_dof_indices<double>(
1551  const std::vector<types::global_dof_index> &,
1552  const std::vector<types::global_dof_index> &,
1553  const bool,
1554  const ::AffineConstraints<double> &,
1555  const unsigned int,
1556  ConstraintValues<double> &,
1557  bool &);
1558 
1559  template void
1560  DoFInfo::read_dof_indices<float>(
1561  const std::vector<types::global_dof_index> &,
1562  const std::vector<types::global_dof_index> &,
1563  const bool,
1564  const ::AffineConstraints<float> &,
1565  const unsigned int,
1566  ConstraintValues<double> &,
1567  bool &);
1568 
1569  template bool
1570  DoFInfo::process_hanging_node_constraints<1>(
1571  const HangingNodes<1> &,
1572  const std::vector<std::vector<unsigned int>> &,
1573  const unsigned int,
1575  std::vector<types::global_dof_index> &);
1576  template bool
1577  DoFInfo::process_hanging_node_constraints<2>(
1578  const HangingNodes<2> &,
1579  const std::vector<std::vector<unsigned int>> &,
1580  const unsigned int,
1582  std::vector<types::global_dof_index> &);
1583  template bool
1584  DoFInfo::process_hanging_node_constraints<3>(
1585  const HangingNodes<3> &,
1586  const std::vector<std::vector<unsigned int>> &,
1587  const unsigned int,
1589  std::vector<types::global_dof_index> &);
1590 
1591  template void
1592  DoFInfo::compute_face_index_compression<1>(
1593  const std::vector<FaceToCellTopology<1>> &);
1594  template void
1595  DoFInfo::compute_face_index_compression<2>(
1596  const std::vector<FaceToCellTopology<2>> &);
1597  template void
1598  DoFInfo::compute_face_index_compression<4>(
1599  const std::vector<FaceToCellTopology<4>> &);
1600  template void
1601  DoFInfo::compute_face_index_compression<8>(
1602  const std::vector<FaceToCellTopology<8>> &);
1603  template void
1604  DoFInfo::compute_face_index_compression<16>(
1605  const std::vector<FaceToCellTopology<16>> &);
1606 
1607  template void
1608  DoFInfo::compute_vector_zero_access_pattern<1>(
1609  const TaskInfo &,
1610  const std::vector<FaceToCellTopology<1>> &);
1611  template void
1612  DoFInfo::compute_vector_zero_access_pattern<2>(
1613  const TaskInfo &,
1614  const std::vector<FaceToCellTopology<2>> &);
1615  template void
1616  DoFInfo::compute_vector_zero_access_pattern<4>(
1617  const TaskInfo &,
1618  const std::vector<FaceToCellTopology<4>> &);
1619  template void
1620  DoFInfo::compute_vector_zero_access_pattern<8>(
1621  const TaskInfo &,
1622  const std::vector<FaceToCellTopology<8>> &);
1623  template void
1624  DoFInfo::compute_vector_zero_access_pattern<16>(
1625  const TaskInfo &,
1626  const std::vector<FaceToCellTopology<16>> &);
1627 
1628  template void
1629  DoFInfo::print_memory_consumption<std::ostream>(std::ostream &,
1630  const TaskInfo &) const;
1631  template void
1632  DoFInfo::print_memory_consumption<ConditionalOStream>(
1634  const TaskInfo &) const;
1635 
1636  template void
1637  DoFInfo::print<double>(const std::vector<double> &,
1638  const std::vector<unsigned int> &,
1639  std::ostream &) const;
1640 
1641  template void
1642  DoFInfo::print<float>(const std::vector<float> &,
1643  const std::vector<unsigned int> &,
1644  std::ostream &) const;
1645  } // namespace MatrixFreeFunctions
1646 } // namespace internal
1647 
void add_entries(const size_type row, ForwardIterator begin, ForwardIterator end, const bool indices_are_unique_and_sorted=false)
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1856
size_type n_elements() const
Definition: index_set.h:1789
void subtract_set(const IndexSet &other)
Definition: index_set.cc:266
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1677
void compress() const
Definition: index_set.h:1649
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1707
void add(const size_type i, const size_type j)
unsigned int locally_owned_size() const
types::global_dof_index local_to_global(const unsigned int local_index) const
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
virtual const MPI_Comm & get_mpi_communicator() const override
types::global_dof_index size() const
const IndexSet & locally_owned_range() const
const IndexSet & ghost_indices() const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:461
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:462
Point< 2 > second
Definition: grid_out.cc:4606
Point< 2 > first
Definition: grid_out.cc:4605
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNotInitialized()
#define Assert(cond, exc)
Definition: exceptions.h:1509
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1703
#define AssertIndexRange(index, range)
Definition: exceptions.h:1768
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:439
void shift(const Tensor< 1, spacedim > &shift_vector, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2117
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
void swap(MemorySpaceData< T, MemorySpace > &u, MemorySpaceData< T, MemorySpace > &v)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
Definition: utilities.h:1655
void fill_connectivity_dofs(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, const std::vector< unsigned int > &row_lengths, std::vector< std::mutex > &mutexes, ::SparsityPattern &connectivity_dof)
Definition: dof_info.cc:1319
void fill_connectivity(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, const std::vector< unsigned int > &renumbering, const ::SparsityPattern &connectivity_dof, DynamicSparsityPattern &connectivity)
Definition: dof_info.cc:1357
void compute_row_lengths(const unsigned int begin, const unsigned int end, const DoFInfo &dof_info, std::vector< std::mutex > &mutexes, std::vector< unsigned int > &row_lengths)
Definition: dof_info.cc:1280
static constexpr unsigned int bucket_size_threading
Definition: dof_info.cc:1275
constexpr compressed_constraint_kind unconstrained_compressed_constraint_kind
unsigned int n_active_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:13763
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:206
const types::global_dof_index invalid_dof_index
Definition: types.h:226
void apply_to_subranges(const tbb::blocked_range< RangeType > &range, const Function &f)
Definition: parallel.h:302
unsigned int global_dof_index
Definition: types.h:81
unsigned short int fe_index
Definition: types.h:59
void reorder_cells(const TaskInfo &task_info, const std::vector< unsigned int > &renumbering, const std::vector< unsigned int > &constraint_pool_row_index, const std::vector< unsigned char > &irregular_cells)
Definition: dof_info.cc:299
std::vector< std::pair< unsigned short, unsigned short > > constraint_indicator
Definition: dof_info.h:541
std::vector< std::pair< unsigned int, unsigned int > > row_starts
Definition: dof_info.h:500
std::vector< std::vector< bool > > hanging_node_constraint_masks_comp
Definition: dof_info.h:523
void get_dof_indices_on_cell_batch(std::vector< unsigned int > &local_indices, const unsigned int cell_batch, const bool with_constraints=true) const
Definition: dof_info.cc:83
void compute_cell_index_compression(const std::vector< unsigned char > &irregular_cells)
Definition: dof_info.cc:531
std::vector< unsigned int > dofs_per_cell
Definition: dof_info.h:698
std::shared_ptr< const internal::MatrixFreeFunctions::VectorDataExchange::Base > vector_exchanger
Definition: dof_info.h:603
std::vector< std::vector< unsigned int > > fe_index_conversion
Definition: dof_info.h:726
std::vector< unsigned int > dof_indices
Definition: dof_info.h:517
std::shared_ptr< const Utilities::MPI::Partitioner > vector_partitioner
Definition: dof_info.h:596
std::vector< compressed_constraint_kind > hanging_node_constraint_masks
Definition: dof_info.h:529
std::array< std::vector< unsigned int >, 3 > dof_indices_interleave_strides
Definition: dof_info.h:577
std::vector< unsigned int > row_starts_plain_indices
Definition: dof_info.h:640
std::vector< unsigned int > dofs_per_face
Definition: dof_info.h:703
std::vector< unsigned int > n_components
Definition: dof_info.h:668
void assign_ghosts(const std::vector< unsigned int > &boundary_cells, const MPI_Comm &communicator_sm, const bool use_vector_data_exchanger_full)
Definition: dof_info.cc:156
std::vector< types::global_dof_index > ghost_dofs
Definition: dof_info.h:733
std::array< std::vector< unsigned int >, 3 > dof_indices_contiguous
Definition: dof_info.h:556
void compute_tight_partitioners(const Table< 2, ShapeInfo< double >> &shape_info, const unsigned int n_owned_cells, const unsigned int n_lanes, const std::vector< FaceToCellTopology< 1 >> &inner_faces, const std::vector< FaceToCellTopology< 1 >> &ghosted_faces, const bool fill_cell_centric, const MPI_Comm &communicator_sm, const bool use_vector_data_exchanger_full)
Definition: dof_info.cc:889
std::vector< unsigned int > cell_active_fe_index
Definition: dof_info.h:713
std::array< std::shared_ptr< const internal::MatrixFreeFunctions::VectorDataExchange::Base >, 5 > vector_exchanger_face_variants
Definition: dof_info.h:628
std::vector< unsigned int > plain_dof_indices
Definition: dof_info.h:650
std::array< std::vector< unsigned char >, 3 > n_vectorization_lanes_filled
Definition: dof_info.h:588
std::vector< unsigned int > start_components
Definition: dof_info.h:674
std::vector< unsigned int > dof_indices_interleaved
Definition: dof_info.h:546
std::array< std::vector< IndexStorageVariants >, 3 > index_storage_variants
Definition: dof_info.h:492
std::vector< unsigned int > cell_partition_data
Definition: task_info.h:475