Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mg_transfer_internal.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2016 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18
20
21#include <deal.II/fe/fe_tools.h>
22
24
26
28
29namespace internal
30{
31 namespace MGTransfer
32 {
33 // Internal data structure that is used in the MPI communication in
34 // fill_copy_indices(). It represents an entry in the copy_indices* map,
35 // that associates a level dof index with a global dof index.
36 struct DoFPair
37 {
38 unsigned int level;
41
42 DoFPair(const unsigned int level,
45 : level(level)
48 {}
49
54 {}
55 };
56
57 template <int dim, int spacedim>
58 void
60 const ::DoFHandler<dim, spacedim> &dof_handler,
61 const MGConstrainedDoFs * mg_constrained_dofs,
62 std::vector<std::vector<
63 std::pair<types::global_dof_index, types::global_dof_index>>>
64 &copy_indices,
65 std::vector<std::vector<
66 std::pair<types::global_dof_index, types::global_dof_index>>>
67 &copy_indices_global_mine,
68 std::vector<std::vector<
69 std::pair<types::global_dof_index, types::global_dof_index>>>
70 & copy_indices_level_mine,
71 const bool skip_interface_dofs)
72 {
73 // Now we are filling the variables copy_indices*, which are essentially
74 // maps from global to mgdof for each level stored as a std::vector of
75 // pairs. We need to split this map on each level depending on the
76 // ownership of the global and mgdof, so that we later do not access
77 // non-local elements in copy_to/from_mg.
78 // We keep track in the bitfield dof_touched which global dof has been
79 // processed already (on the current level). This is the same as the
80 // multigrid running in serial.
81
82 // map cpu_index -> vector of data
83 // that will be copied into copy_indices_level_mine
84 std::vector<DoFPair> send_data_temp;
85
86 const unsigned int n_levels =
87 dof_handler.get_triangulation().n_global_levels();
88 copy_indices.resize(n_levels);
89 copy_indices_global_mine.resize(n_levels);
90 copy_indices_level_mine.resize(n_levels);
91 IndexSet globally_relevant;
92 DoFTools::extract_locally_relevant_dofs(dof_handler, globally_relevant);
93
94 const unsigned int dofs_per_cell = dof_handler.get_fe().n_dofs_per_cell();
95 std::vector<types::global_dof_index> global_dof_indices(dofs_per_cell);
96 std::vector<types::global_dof_index> level_dof_indices(dofs_per_cell);
97
98 for (unsigned int level = 0; level < n_levels; ++level)
99 {
100 std::vector<bool> dof_touched(globally_relevant.n_elements(), false);
101
102 // for the most common case where copy_indices are locally owned
103 // both globally and on the level, we want to skip collecting pairs
104 // and later sorting them. instead, we insert these indices into a
105 // plain vector
106 std::vector<types::global_dof_index> unrolled_copy_indices;
107
108 copy_indices_level_mine[level].clear();
109 copy_indices_global_mine[level].clear();
110
111 for (const auto &level_cell :
112 dof_handler.active_cell_iterators_on_level(level))
113 {
114 if (dof_handler.get_triangulation().locally_owned_subdomain() !=
116 (level_cell->level_subdomain_id() ==
118 level_cell->subdomain_id() ==
120 continue;
121
122 unrolled_copy_indices.resize(
123 dof_handler.locally_owned_dofs().n_elements(),
125
126 // get the dof numbers of this cell for the global and the
127 // level-wise numbering
128 level_cell->get_dof_indices(global_dof_indices);
129 level_cell->get_mg_dof_indices(level_dof_indices);
130
131 for (unsigned int i = 0; i < dofs_per_cell; ++i)
132 {
133 // we need to ignore if the DoF is on a refinement edge
134 // (hanging node)
135 if (skip_interface_dofs && mg_constrained_dofs != nullptr &&
136 mg_constrained_dofs->at_refinement_edge(
137 level, level_dof_indices[i]))
138 continue;
139
140 // First check whether we own any of the active dof index
141 // and the level one. This check involves locally owned
142 // indices which often consist only of a single range, so
143 // they are cheap to look up.
144 bool global_mine =
145 dof_handler.locally_owned_dofs().is_element(
146 global_dof_indices[i]);
147 bool level_mine =
148 dof_handler.locally_owned_mg_dofs(level).is_element(
149 level_dof_indices[i]);
150
151 if (global_mine && level_mine)
152 {
153 // we own both the active dof index and the level one ->
154 // set them into the vector, indexed by the local index
155 // range of the active dof
156 unrolled_copy_indices[dof_handler.locally_owned_dofs()
157 .index_within_set(
158 global_dof_indices[i])] =
159 level_dof_indices[i];
160 }
161 else
162 {
163 // Get the relevant dofs index - this might be more
164 // expensive to look up than the active indices, so we
165 // only do it for the local-remote case within this loop.
166 const types::global_dof_index relevant_idx =
167 globally_relevant.index_within_set(
168 global_dof_indices[i]);
169
170 // Work on this dof if we haven't already (on this or a
171 // coarser level)
172 if (dof_touched[relevant_idx] == false)
173 {
174 if (global_mine)
175 {
176 copy_indices_global_mine[level].emplace_back(
177 global_dof_indices[i], level_dof_indices[i]);
178
179 // send this to the owner of the level_dof:
180 send_data_temp.emplace_back(level,
181 global_dof_indices[i],
182 level_dof_indices[i]);
183 }
184 else
185 {
186 // somebody will send those to me
187 }
188
189 dof_touched[relevant_idx] = true;
190 }
191 }
192 }
193 }
194
195 // we now translate the plain vector for the copy_indices field into
196 // the expected format of a pair of indices
197 if (!unrolled_copy_indices.empty())
198 {
199 copy_indices[level].clear();
200
201 // reserve the full length in case we did not hit global-mine
202 // indices, so we expect all indices to come into copy_indices
203 if (copy_indices_global_mine[level].empty())
204 copy_indices[level].reserve(unrolled_copy_indices.size());
205
206 // locally_owned_dofs().nth_index_in_set(i) in this query is
207 // usually cheap to look up as there are few ranges in
208 // dof_handler.locally_owned_dofs()
209 for (unsigned int i = 0; i < unrolled_copy_indices.size(); ++i)
210 if (unrolled_copy_indices[i] != numbers::invalid_dof_index)
211 copy_indices[level].emplace_back(
212 dof_handler.locally_owned_dofs().nth_index_in_set(i),
213 unrolled_copy_indices[i]);
214 }
215 }
216
217 const ::parallel::TriangulationBase<dim, spacedim> *tria =
218 (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
219 *>(&dof_handler.get_triangulation()));
221 send_data_temp.size() == 0 || tria != nullptr,
223 "We should only be sending information with a parallel Triangulation!"));
224
225#ifdef DEAL_II_WITH_MPI
226 if (tria && Utilities::MPI::sum(send_data_temp.size(),
227 tria->get_communicator()) > 0)
228 {
229 const std::set<types::subdomain_id> &neighbors =
230 tria->level_ghost_owners();
231 std::map<int, std::vector<DoFPair>> send_data;
232
233 std::sort(send_data_temp.begin(),
234 send_data_temp.end(),
235 [](const DoFPair &lhs, const DoFPair &rhs) {
236 if (lhs.level < rhs.level)
237 return true;
238 if (lhs.level > rhs.level)
239 return false;
240
241 if (lhs.level_dof_index < rhs.level_dof_index)
242 return true;
243 if (lhs.level_dof_index > rhs.level_dof_index)
244 return false;
245
246 if (lhs.global_dof_index < rhs.global_dof_index)
247 return true;
248 else
249 return false;
250 });
251 send_data_temp.erase(
252 std::unique(send_data_temp.begin(),
253 send_data_temp.end(),
254 [](const DoFPair &lhs, const DoFPair &rhs) {
255 return (lhs.level == rhs.level) &&
256 (lhs.level_dof_index == rhs.level_dof_index) &&
257 (lhs.global_dof_index == rhs.global_dof_index);
258 }),
259 send_data_temp.end());
260
261 for (unsigned int level = 0; level < n_levels; ++level)
262 {
263 const IndexSet &is_local =
264 dof_handler.locally_owned_mg_dofs(level);
265
266 std::vector<types::global_dof_index> level_dof_indices;
267 std::vector<types::global_dof_index> global_dof_indices;
268 for (const auto &dofpair : send_data_temp)
269 if (dofpair.level == level)
270 {
271 level_dof_indices.push_back(dofpair.level_dof_index);
272 global_dof_indices.push_back(dofpair.global_dof_index);
273 }
274
275 IndexSet is_ghost(is_local.size());
276 is_ghost.add_indices(level_dof_indices.begin(),
277 level_dof_indices.end());
278
279 AssertThrow(level_dof_indices.size() == is_ghost.n_elements(),
280 ExcMessage("Size does not match!"));
281
282 const auto index_owner =
284 is_ghost,
285 tria->get_communicator());
286
287 AssertThrow(level_dof_indices.size() == index_owner.size(),
288 ExcMessage("Size does not match!"));
289
290 for (unsigned int i = 0; i < index_owner.size(); i++)
291 send_data[index_owner[i]].emplace_back(level,
292 global_dof_indices[i],
293 level_dof_indices[i]);
294 }
295
296
297 // Protect the send/recv logic with a mutex:
300 mutex, tria->get_communicator());
301
302 const int mpi_tag =
304
305 // * send
306 std::vector<MPI_Request> requests;
307 {
308 for (const auto dest : neighbors)
309 {
310 requests.push_back(MPI_Request());
311 std::vector<DoFPair> &data = send_data[dest];
312 // If there is nothing to send, we still need to send a message,
313 // because the receiving end will be waitng. In that case we
314 // just send an empty message.
315 if (data.size())
316 {
317 const int ierr = MPI_Isend(data.data(),
318 data.size() * sizeof(data[0]),
319 MPI_BYTE,
320 dest,
321 mpi_tag,
322 tria->get_communicator(),
323 &*requests.rbegin());
324 AssertThrowMPI(ierr);
325 }
326 else
327 {
328 const int ierr = MPI_Isend(nullptr,
329 0,
330 MPI_BYTE,
331 dest,
332 mpi_tag,
333 tria->get_communicator(),
334 &*requests.rbegin());
335 AssertThrowMPI(ierr);
336 }
337 }
338 }
339
340 // * receive
341 {
342 // We should get one message from each of our neighbors
343 std::vector<DoFPair> receive_buffer;
344 for (unsigned int counter = 0; counter < neighbors.size();
345 ++counter)
346 {
347 MPI_Status status;
348 int ierr = MPI_Probe(MPI_ANY_SOURCE,
349 mpi_tag,
350 tria->get_communicator(),
351 &status);
352 AssertThrowMPI(ierr);
353 int len;
354 ierr = MPI_Get_count(&status, MPI_BYTE, &len);
355 AssertThrowMPI(ierr);
356
357 if (len == 0)
358 {
359 ierr = MPI_Recv(nullptr,
360 0,
361 MPI_BYTE,
362 status.MPI_SOURCE,
363 status.MPI_TAG,
364 tria->get_communicator(),
365 &status);
366 AssertThrowMPI(ierr);
367 continue;
368 }
369
370 int count = len / sizeof(DoFPair);
371 Assert(static_cast<int>(count * sizeof(DoFPair)) == len,
373 receive_buffer.resize(count);
374
375 void *ptr = receive_buffer.data();
376 ierr = MPI_Recv(ptr,
377 len,
378 MPI_BYTE,
379 status.MPI_SOURCE,
380 status.MPI_TAG,
381 tria->get_communicator(),
382 &status);
383 AssertThrowMPI(ierr);
384
385 for (const auto &dof_pair : receive_buffer)
386 {
387 copy_indices_level_mine[dof_pair.level].emplace_back(
388 dof_pair.global_dof_index, dof_pair.level_dof_index);
389 }
390 }
391 }
392
393 // * wait for all MPI_Isend to complete
394 if (requests.size() > 0)
395 {
396 const int ierr = MPI_Waitall(requests.size(),
397 requests.data(),
398 MPI_STATUSES_IGNORE);
399 AssertThrowMPI(ierr);
400 requests.clear();
401 }
402# ifdef DEBUG
403 // Make sure in debug mode, that everybody sent/received all packages
404 // on this level. If a deadlock occurs here, the list of expected
405 // senders is not computed correctly.
406 const int ierr = MPI_Barrier(tria->get_communicator());
407 AssertThrowMPI(ierr);
408# endif
409 }
410#endif
411
412 // Sort the indices, except the copy_indices which already are
413 // sorted. This will produce more reliable debug output for regression
414 // tests and won't hurt performance even in release mode because the
415 // non-owned indices are a small subset of all unknowns.
416 std::less<std::pair<types::global_dof_index, types::global_dof_index>>
417 compare;
418 for (auto &level_indices : copy_indices_level_mine)
419 std::sort(level_indices.begin(), level_indices.end(), compare);
420 for (auto &level_indices : copy_indices_global_mine)
421 std::sort(level_indices.begin(), level_indices.end(), compare);
422 }
423
424
425
426 // initialize the vectors needed for the transfer (and merge with the
427 // content in copy_indices_global_mine)
428 void
430 const IndexSet & locally_owned,
431 std::vector<types::global_dof_index> &ghosted_level_dofs,
432 const std::shared_ptr<const Utilities::MPI::Partitioner>
433 & external_partitioner,
434 const MPI_Comm & communicator,
435 std::shared_ptr<const Utilities::MPI::Partitioner> &target_partitioner,
436 Table<2, unsigned int> &copy_indices_global_mine)
437 {
438 std::sort(ghosted_level_dofs.begin(), ghosted_level_dofs.end());
439 IndexSet ghosted_dofs(locally_owned.size());
440 ghosted_dofs.add_indices(ghosted_level_dofs.begin(),
441 std::unique(ghosted_level_dofs.begin(),
442 ghosted_level_dofs.end()));
443 ghosted_dofs.compress();
444
445 // Add possible ghosts from the previous content in the vector
446 if (target_partitioner.get() != nullptr &&
447 target_partitioner->size() == locally_owned.size())
448 {
449 ghosted_dofs.add_indices(target_partitioner->ghost_indices());
450 }
451
452 // check if the given partitioner's ghosts represent a superset of the
453 // ghosts we require in this function
454 const int ghosts_locally_contained =
455 (external_partitioner.get() != nullptr &&
456 (external_partitioner->ghost_indices() & ghosted_dofs) ==
457 ghosted_dofs) ?
458 1 :
459 0;
460 if (external_partitioner.get() != nullptr &&
461 Utilities::MPI::min(ghosts_locally_contained, communicator) == 1)
462 {
463 // shift the local number of the copy indices according to the new
464 // partitioner that we are going to use during the access to the
465 // entries
466 if (target_partitioner.get() != nullptr &&
467 target_partitioner->size() == locally_owned.size())
468 for (unsigned int i = 0; i < copy_indices_global_mine.n_cols(); ++i)
469 copy_indices_global_mine(1, i) =
470 external_partitioner->global_to_local(
471 target_partitioner->local_to_global(
472 copy_indices_global_mine(1, i)));
473 target_partitioner = external_partitioner;
474 }
475 else
476 {
477 if (target_partitioner.get() != nullptr &&
478 target_partitioner->size() == locally_owned.size())
479 for (unsigned int i = 0; i < copy_indices_global_mine.n_cols(); ++i)
480 copy_indices_global_mine(1, i) =
481 locally_owned.n_elements() +
482 ghosted_dofs.index_within_set(
483 target_partitioner->local_to_global(
484 copy_indices_global_mine(1, i)));
485 target_partitioner.reset(new Utilities::MPI::Partitioner(
486 locally_owned, ghosted_dofs, communicator));
487 }
488 }
489
490
491
492 // Transform the ghost indices to local index space for the vector
493 inline void
495 const Utilities::MPI::Partitioner & part,
496 const std::vector<types::global_dof_index> &mine,
497 const std::vector<types::global_dof_index> &remote,
498 std::vector<unsigned int> & localized_indices)
499 {
500 localized_indices.resize(mine.size() + remote.size(),
502 for (unsigned int i = 0; i < mine.size(); ++i)
503 if (mine[i] != numbers::invalid_dof_index)
504 localized_indices[i] = part.global_to_local(mine[i]);
505
506 for (unsigned int i = 0; i < remote.size(); ++i)
507 if (remote[i] != numbers::invalid_dof_index)
508 localized_indices[i + mine.size()] = part.global_to_local(remote[i]);
509 }
510
511
512
513 // given the collection of child cells in lexicographic ordering as seen
514 // from the parent, compute the first index of the given child
515 template <int dim>
516 unsigned int
517 compute_shift_within_children(const unsigned int child,
518 const unsigned int fe_shift_1d,
519 const unsigned int fe_degree)
520 {
521 // we put the degrees of freedom of all child cells in lexicographic
522 // ordering
523 unsigned int c_tensor_index[dim];
524 unsigned int tmp = child;
525 for (unsigned int d = 0; d < dim; ++d)
526 {
527 c_tensor_index[d] = tmp % 2;
528 tmp /= 2;
529 }
530 const unsigned int n_child_dofs_1d = fe_degree + 1 + fe_shift_1d;
531 unsigned int factor = 1;
532 unsigned int shift = fe_shift_1d * c_tensor_index[0];
533 for (unsigned int d = 1; d < dim; ++d)
534 {
535 factor *= n_child_dofs_1d;
536 shift = shift + factor * fe_shift_1d * c_tensor_index[d];
537 }
538 return shift;
539 }
540
541
542
543 // puts the indices on the given child cell in lexicographic ordering with
544 // respect to the collection of all child cells as seen from the parent
545 template <int dim>
546 void
548 const unsigned int child,
549 const unsigned int fe_shift_1d,
550 const unsigned int fe_degree,
551 const std::vector<unsigned int> & lexicographic_numbering,
552 const std::vector<types::global_dof_index> &local_dof_indices,
553 types::global_dof_index * target_indices)
554 {
555 const unsigned int n_child_dofs_1d = fe_degree + 1 + fe_shift_1d;
556 const unsigned int shift =
557 compute_shift_within_children<dim>(child, fe_shift_1d, fe_degree);
558 const unsigned int n_components =
559 local_dof_indices.size() / Utilities::fixed_power<dim>(fe_degree + 1);
560 types::global_dof_index *indices = target_indices + shift;
561 const unsigned int n_scalar_cell_dofs =
562 Utilities::fixed_power<dim>(n_child_dofs_1d);
563 for (unsigned int c = 0, m = 0; c < n_components; ++c)
564 for (unsigned int k = 0; k < (dim > 2 ? (fe_degree + 1) : 1); ++k)
565 for (unsigned int j = 0; j < (dim > 1 ? (fe_degree + 1) : 1); ++j)
566 for (unsigned int i = 0; i < (fe_degree + 1); ++i, ++m)
567 {
568 const unsigned int index =
569 c * n_scalar_cell_dofs +
570 k * n_child_dofs_1d * n_child_dofs_1d + j * n_child_dofs_1d +
571 i;
572 Assert(indices[index] == numbers::invalid_dof_index ||
573 indices[index] ==
574 local_dof_indices[lexicographic_numbering[m]],
576 indices[index] = local_dof_indices[lexicographic_numbering[m]];
577 }
578 }
579
580
581
582 template <int dim, typename Number>
583 void
585 const FiniteElement<1> & fe,
586 const ::DoFHandler<dim> &dof_handler)
587 {
588 // currently, we have only FE_Q and FE_DGQ type elements implemented
589 elem_info.n_components = dof_handler.get_fe().element_multiplicity(0);
590 AssertDimension(Utilities::fixed_power<dim>(fe.n_dofs_per_cell()) *
591 elem_info.n_components,
592 dof_handler.get_fe().n_dofs_per_cell());
593 AssertDimension(fe.degree, dof_handler.get_fe().degree);
594 elem_info.fe_degree = fe.degree;
595 elem_info.element_is_continuous = fe.n_dofs_per_vertex() > 0;
597
598 // step 1.2: get renumbering of 1D basis functions to lexicographic
599 // numbers. The distinction according to fe.n_dofs_per_vertex() is to
600 // support both continuous and discontinuous bases.
601 std::vector<unsigned int> renumbering(fe.n_dofs_per_cell());
602 {
604 renumbering[0] = 0;
605 for (unsigned int i = 0; i < fe.n_dofs_per_line(); ++i)
606 renumbering[i + fe.n_dofs_per_vertex()] =
608 if (fe.n_dofs_per_vertex() > 0)
609 renumbering[fe.n_dofs_per_cell() - fe.n_dofs_per_vertex()] =
611 }
612
613 // step 1.3: create a dummy 1D quadrature formula to extract the
614 // lexicographic numbering for the elements
615 Assert(fe.n_dofs_per_vertex() == 0 || fe.n_dofs_per_vertex() == 1,
617 const unsigned int shift = fe.n_dofs_per_cell() - fe.n_dofs_per_vertex();
618 const unsigned int n_child_dofs_1d =
619 (fe.n_dofs_per_vertex() > 0 ? (2 * fe.n_dofs_per_cell() - 1) :
620 (2 * fe.n_dofs_per_cell()));
621
622 elem_info.n_child_cell_dofs =
623 elem_info.n_components * Utilities::fixed_power<dim>(n_child_dofs_1d);
624 const Quadrature<1> dummy_quadrature(
625 std::vector<Point<1>>(1, Point<1>()));
627 shape_info.reinit(dummy_quadrature, dof_handler.get_fe(), 0);
629
630 // step 1.4: get the 1d prolongation matrix and combine from both children
631 elem_info.prolongation_matrix_1d.resize(fe.n_dofs_per_cell() *
632 n_child_dofs_1d);
633
634 for (unsigned int c = 0; c < GeometryInfo<1>::max_children_per_cell; ++c)
635 for (unsigned int i = 0; i < fe.n_dofs_per_cell(); ++i)
636 for (unsigned int j = 0; j < fe.n_dofs_per_cell(); ++j)
637 elem_info
638 .prolongation_matrix_1d[i * n_child_dofs_1d + j + c * shift] =
639 fe.get_prolongation_matrix(c)(renumbering[j], renumbering[i]);
640 }
641
642
643
644 namespace
645 {
651 void
652 replace(const MGConstrainedDoFs * mg_constrained_dofs,
653 const unsigned int level,
654 std::vector<types::global_dof_index> &dof_indices)
655 {
656 if (mg_constrained_dofs != nullptr &&
657 mg_constrained_dofs->get_level_constraints(level).n_constraints() >
658 0)
659 for (auto &ind : dof_indices)
660 if (mg_constrained_dofs->get_level_constraints(level)
662 {
663 Assert(mg_constrained_dofs->get_level_constraints(level)
665 ->size() == 1,
667 ind = mg_constrained_dofs->get_level_constraints(level)
669 ->front()
670 .first;
671 }
672 }
673 } // namespace
674
675
676
677 // Sets up most of the internal data structures of the MGTransferMatrixFree
678 // class
679 template <int dim, typename Number>
680 void
682 const ::DoFHandler<dim> &dof_handler,
683 const MGConstrainedDoFs * mg_constrained_dofs,
684 const std::vector<std::shared_ptr<const Utilities::MPI::Partitioner>>
685 & external_partitioners,
686 ElementInfo<Number> & elem_info,
687 std::vector<std::vector<unsigned int>> &level_dof_indices,
688 std::vector<std::vector<std::pair<unsigned int, unsigned int>>>
689 & parent_child_connect,
690 std::vector<unsigned int> &n_owned_level_cells,
691 std::vector<std::vector<std::vector<unsigned short>>> &dirichlet_indices,
692 std::vector<std::vector<Number>> & weights_on_refined,
693 std::vector<Table<2, unsigned int>> &copy_indices_global_mine,
694 MGLevelObject<std::shared_ptr<const Utilities::MPI::Partitioner>>
695 &target_partitioners)
696 {
697 level_dof_indices.clear();
698 parent_child_connect.clear();
699 n_owned_level_cells.clear();
700 dirichlet_indices.clear();
701 weights_on_refined.clear();
702
703 // we collect all child DoFs of a mother cell together. For faster
704 // tensorized operations, we align the degrees of freedom
705 // lexicographically. We distinguish FE_Q elements and FE_DGQ elements
706
707 const ::Triangulation<dim> &tria = dof_handler.get_triangulation();
708
709 // ---------------------------- 1. Extract 1D info about the finite
710 // element step 1.1: create a 1D copy of the finite element from FETools
711 // where we substitute the template argument
712 AssertDimension(dof_handler.get_fe().n_base_elements(), 1);
713 std::string fe_name = dof_handler.get_fe().base_element(0).get_name();
714 {
715 const std::size_t template_starts = fe_name.find_first_of('<');
716 Assert(fe_name[template_starts + 1] ==
717 (dim == 1 ? '1' : (dim == 2 ? '2' : '3')),
719 fe_name[template_starts + 1] = '1';
720 }
721 const std::unique_ptr<FiniteElement<1>> fe(
722 FETools::get_fe_by_name<1, 1>(fe_name));
723
724 setup_element_info(elem_info, *fe, dof_handler);
725
726
727 // -------------- 2. Extract and match dof indices between child and
728 // parent
729 const unsigned int n_levels = tria.n_global_levels();
730 level_dof_indices.resize(n_levels);
731 parent_child_connect.resize(n_levels - 1);
732 n_owned_level_cells.resize(n_levels - 1);
733 std::vector<std::vector<unsigned int>> coarse_level_indices(n_levels - 1);
734 for (unsigned int level = 0;
735 level < std::min(tria.n_levels(), n_levels - 1);
736 ++level)
737 coarse_level_indices[level].resize(tria.n_raw_cells(level),
739 std::vector<types::global_dof_index> local_dof_indices(
740 dof_handler.get_fe().n_dofs_per_cell());
741 dirichlet_indices.resize(n_levels - 1);
742
743 AssertDimension(target_partitioners.max_level(), n_levels - 1);
744 Assert(external_partitioners.empty() ||
745 external_partitioners.size() == n_levels,
746 ExcDimensionMismatch(external_partitioners.size(), n_levels));
747
748 for (unsigned int level = n_levels - 1; level > 0; --level)
749 {
750 unsigned int counter = 0;
751 std::vector<types::global_dof_index> global_level_dof_indices;
752 std::vector<types::global_dof_index> global_level_dof_indices_remote;
753 std::vector<types::global_dof_index> ghosted_level_dofs;
754 std::vector<types::global_dof_index> global_level_dof_indices_l0;
755 std::vector<types::global_dof_index> ghosted_level_dofs_l0;
756
757 // step 2.1: loop over the cells on the coarse side
758 typename ::DoFHandler<dim>::cell_iterator cell,
759 endc = dof_handler.end(level - 1);
760 for (cell = dof_handler.begin(level - 1); cell != endc; ++cell)
761 {
762 // need to look into a cell if it has children and it is locally
763 // owned
764 if (!cell->has_children())
765 continue;
766
767 bool consider_cell =
768 (tria.locally_owned_subdomain() ==
770 cell->level_subdomain_id() == tria.locally_owned_subdomain());
771
772 // due to the particular way we store DoF indices (via children),
773 // we also need to add the DoF indices for coarse cells where we
774 // own at least one child
775 const bool cell_is_remote = !consider_cell;
776 for (unsigned int c = 0;
777 c < GeometryInfo<dim>::max_children_per_cell;
778 ++c)
779 if (cell->child(c)->level_subdomain_id() ==
780 tria.locally_owned_subdomain())
781 {
782 consider_cell = true;
783 break;
784 }
785
786 if (!consider_cell)
787 continue;
788
789 // step 2.2: loop through children and append the dof indices to
790 // the appropriate list. We need separate lists for the owned
791 // coarse cell case (which will be part of
792 // restriction/prolongation between level-1 and level) and the
793 // remote case (which needs to store DoF indices for the
794 // operations between level and level+1).
795 AssertDimension(cell->n_children(),
797 std::vector<types::global_dof_index> &next_indices =
798 cell_is_remote ? global_level_dof_indices_remote :
799 global_level_dof_indices;
800 const std::size_t start_index = next_indices.size();
801 next_indices.resize(start_index + elem_info.n_child_cell_dofs,
803 for (unsigned int c = 0;
804 c < GeometryInfo<dim>::max_children_per_cell;
805 ++c)
806 {
807 if (cell_is_remote && cell->child(c)->level_subdomain_id() !=
808 tria.locally_owned_subdomain())
809 continue;
810 cell->child(c)->get_mg_dof_indices(local_dof_indices);
811
812 replace(mg_constrained_dofs, level, local_dof_indices);
813
814 const IndexSet &owned_level_dofs =
815 dof_handler.locally_owned_mg_dofs(level);
816 for (const auto local_dof_index : local_dof_indices)
817 if (!owned_level_dofs.is_element(local_dof_index))
818 ghosted_level_dofs.push_back(local_dof_index);
819
820 add_child_indices<dim>(c,
821 fe->n_dofs_per_cell() -
822 fe->n_dofs_per_vertex(),
823 fe->degree,
824 elem_info.lexicographic_numbering,
825 local_dof_indices,
826 &next_indices[start_index]);
827
828 // step 2.3 store the connectivity to the parent
829 if (cell->child(c)->has_children() &&
830 (tria.locally_owned_subdomain() ==
832 cell->child(c)->level_subdomain_id() ==
833 tria.locally_owned_subdomain()))
834 {
835 const unsigned int child_index =
836 coarse_level_indices[level][cell->child(c)->index()];
837 AssertIndexRange(child_index,
838 parent_child_connect[level].size());
839 unsigned int parent_index = counter;
840 // remote cells, i.e., cells where we work on a further
841 // level but are not treated on the current level, need to
842 // be placed at the end of the list; however, we do not
843 // yet know the exact position in the array, so shift
844 // their parent index by the number of cells so we can set
845 // the correct number after the end of this loop
846 if (cell_is_remote)
847 parent_index =
848 start_index / elem_info.n_child_cell_dofs +
849 tria.n_cells(level);
850 parent_child_connect[level][child_index] =
851 std::make_pair(parent_index, c);
852 AssertIndexRange(dof_handler.get_fe().n_dofs_per_cell(),
853 static_cast<unsigned short>(-1));
854
855 // set Dirichlet boundary conditions (as a list of
856 // constrained DoFs) for the child
857 if (mg_constrained_dofs != nullptr)
858 for (unsigned int i = 0;
859 i < dof_handler.get_fe().n_dofs_per_cell();
860 ++i)
861 if (mg_constrained_dofs->is_boundary_index(
862 level,
863 local_dof_indices
864 [elem_info.lexicographic_numbering[i]]))
865 dirichlet_indices[level][child_index].push_back(i);
866 }
867 }
868 if (!cell_is_remote)
869 {
870 AssertIndexRange(static_cast<unsigned int>(cell->index()),
871 coarse_level_indices[level - 1].size());
872 coarse_level_indices[level - 1][cell->index()] = counter++;
873 }
874
875 // step 2.4: include indices for the coarsest cells. we still
876 // insert the indices as if they were from a child in order to use
877 // the same code (the coarsest level does not matter much in terms
878 // of memory, so we gain in code simplicity)
879 if (level == 1 && !cell_is_remote)
880 {
881 cell->get_mg_dof_indices(local_dof_indices);
882
883 replace(mg_constrained_dofs, level - 1, local_dof_indices);
884
885 const IndexSet &owned_level_dofs_l0 =
886 dof_handler.locally_owned_mg_dofs(0);
887 for (const auto local_dof_index : local_dof_indices)
888 if (!owned_level_dofs_l0.is_element(local_dof_index))
889 ghosted_level_dofs_l0.push_back(local_dof_index);
890
891 const std::size_t start_index =
892 global_level_dof_indices_l0.size();
893 global_level_dof_indices_l0.resize(
894 start_index + elem_info.n_child_cell_dofs,
896 add_child_indices<dim>(
897 0,
898 fe->n_dofs_per_cell() - fe->n_dofs_per_vertex(),
899 fe->degree,
900 elem_info.lexicographic_numbering,
901 local_dof_indices,
902 &global_level_dof_indices_l0[start_index]);
903
904 dirichlet_indices[0].emplace_back();
905 if (mg_constrained_dofs != nullptr)
906 for (unsigned int i = 0;
907 i < dof_handler.get_fe().n_dofs_per_cell();
908 ++i)
909 if (mg_constrained_dofs->is_boundary_index(
910 0,
911 local_dof_indices[elem_info
913 dirichlet_indices[0].back().push_back(i);
914 }
915 }
916
917 // step 2.5: store information about the current level and prepare the
918 // Dirichlet indices and parent-child relationship for the next
919 // coarser level
920 AssertDimension(counter * elem_info.n_child_cell_dofs,
921 global_level_dof_indices.size());
922 n_owned_level_cells[level - 1] = counter;
923 dirichlet_indices[level - 1].resize(counter);
924 parent_child_connect[level - 1].resize(
925 counter,
926 std::make_pair(numbers::invalid_unsigned_int,
928
929 // step 2.6: put the cells with remotely owned parent to the end of
930 // the list (these are needed for the transfer from level to level+1
931 // but not for the transfer from level-1 to level).
932 if (level < n_levels - 1)
933 for (std::vector<std::pair<unsigned int, unsigned int>>::iterator
934 i = parent_child_connect[level].begin();
935 i != parent_child_connect[level].end();
936 ++i)
937 if (i->first >= tria.n_cells(level))
938 {
939 i->first -= tria.n_cells(level);
940 i->first += counter;
941 }
942
943 // step 2.7: Initialize the partitioner for the ghosted vector
944 //
945 // We use a vector based on the target partitioner handed in also in
946 // the base class for keeping ghosted transfer indices. To avoid
947 // keeping two very similar vectors, we keep one single ghosted
948 // vector that is augmented/filled here.
949 reinit_level_partitioner(dof_handler.locally_owned_mg_dofs(level),
950 ghosted_level_dofs,
951 external_partitioners.empty() ?
952 nullptr :
953 external_partitioners[level],
954 tria.get_communicator(),
955 target_partitioners[level],
956 copy_indices_global_mine[level]);
957
958 copy_indices_to_mpi_local_numbers(*target_partitioners[level],
959 global_level_dof_indices,
960 global_level_dof_indices_remote,
961 level_dof_indices[level]);
962 // step 2.8: Initialize the ghosted vector for level 0
963 if (level == 1)
964 {
965 for (unsigned int i = 0; i < parent_child_connect[0].size(); ++i)
966 parent_child_connect[0][i] = std::make_pair(i, 0U);
967
968 reinit_level_partitioner(dof_handler.locally_owned_mg_dofs(0),
969 ghosted_level_dofs_l0,
970 external_partitioners.empty() ?
971 nullptr :
972 external_partitioners[0],
973 tria.get_communicator(),
974 target_partitioners[0],
975 copy_indices_global_mine[0]);
976
978 *target_partitioners[0],
979 global_level_dof_indices_l0,
980 std::vector<types::global_dof_index>(),
981 level_dof_indices[0]);
982 }
983 }
984
985 // ---------------------- 3. compute weights to make restriction additive
986
987 const unsigned int n_child_dofs_1d =
988 fe->degree + 1 + fe->n_dofs_per_cell() - fe->n_dofs_per_vertex();
989
990 // get the valence of the individual components and compute the weights as
991 // the inverse of the valence
992 weights_on_refined.resize(n_levels - 1);
993 for (unsigned int level = 1; level < n_levels; ++level)
994 {
996 target_partitioners[level]);
997 for (unsigned int c = 0; c < n_owned_level_cells[level - 1]; ++c)
998 for (unsigned int j = 0; j < elem_info.n_child_cell_dofs; ++j)
999 touch_count.local_element(
1000 level_dof_indices[level][elem_info.n_child_cell_dofs * c +
1001 j]) += Number(1.);
1002 touch_count.compress(VectorOperation::add);
1003 touch_count.update_ghost_values();
1004
1005 std::vector<unsigned int> degree_to_3(n_child_dofs_1d);
1006 degree_to_3[0] = 0;
1007 for (unsigned int i = 1; i < n_child_dofs_1d - 1; ++i)
1008 degree_to_3[i] = 1;
1009 degree_to_3.back() = 2;
1010
1011 // we only store 3^dim weights because all dofs on a line have the
1012 // same valence, and all dofs on a quad have the same valence.
1013 weights_on_refined[level - 1].resize(n_owned_level_cells[level - 1] *
1014 Utilities::fixed_power<dim>(3));
1015 for (unsigned int c = 0; c < n_owned_level_cells[level - 1]; ++c)
1016 for (unsigned int k = 0, m = 0; k < (dim > 2 ? n_child_dofs_1d : 1);
1017 ++k)
1018 for (unsigned int j = 0; j < (dim > 1 ? n_child_dofs_1d : 1); ++j)
1019 {
1020 unsigned int shift = 9 * degree_to_3[k] + 3 * degree_to_3[j];
1021 for (unsigned int i = 0; i < n_child_dofs_1d; ++i, ++m)
1022 weights_on_refined[level -
1023 1][c * Utilities::fixed_power<dim>(3) +
1024 shift + degree_to_3[i]] =
1025 Number(1.) /
1026 touch_count.local_element(
1027 level_dof_indices[level]
1028 [elem_info.n_child_cell_dofs * c + m]);
1029 }
1030 }
1031 }
1032
1033 } // namespace MGTransfer
1034} // namespace internal
1035
1036// Explicit instantiations
1037
1038#include "mg_transfer_internal.inst"
1039
const std::vector< std::pair< size_type, number > > * get_constraint_entries(const size_type line_n) const
bool is_identity_constrained(const size_type line_n) const
size_type n_constraints() const
unsigned int n_dofs_per_vertex() const
const unsigned int degree
Definition: fe_base.h:435
unsigned int n_dofs_per_cell() const
unsigned int n_dofs_per_line() const
virtual const FullMatrix< double > & get_prolongation_matrix(const unsigned int child, const RefinementCase< dim > &refinement_case=RefinementCase< dim >::isotropic_refinement) const
size_type size() const
Definition: index_set.h:1634
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1921
size_type n_elements() const
Definition: index_set.h:1832
bool is_element(const size_type index) const
Definition: index_set.h:1765
void compress() const
Definition: index_set.h:1642
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1703
bool at_refinement_edge(const unsigned int level, const types::global_dof_index index) const
bool is_boundary_index(const unsigned int level, const types::global_dof_index index) const
const AffineConstraints< double > & get_level_constraints(const unsigned int level) const
unsigned int global_to_local(const types::global_dof_index global_index) const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
unsigned int level
Definition: grid_out.cc:4590
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1622
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
Number local_element(const size_type local_index) const
virtual void compress(::VectorOperation::values operation) override
void extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:1210
void shift(const Tensor< 1, spacedim > &shift_vector, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2022
static const char U
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * begin(VectorType &V)
@ mg_transfer_fill_copy_indices
mg_transfer_internal.cc: fill_copy_indices()
Definition: mpi_tags.h:72
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1113
T min(const T &t, const MPI_Comm &mpi_communicator)
T sum(const T &t, const MPI_Comm &mpi_communicator)
void reinit_level_partitioner(const IndexSet &locally_owned, std::vector< types::global_dof_index > &ghosted_level_dofs, const std::shared_ptr< const Utilities::MPI::Partitioner > &external_partitioner, const MPI_Comm &communicator, std::shared_ptr< const Utilities::MPI::Partitioner > &target_partitioner, Table< 2, unsigned int > &copy_indices_global_mine)
unsigned int compute_shift_within_children(const unsigned int child, const unsigned int fe_shift_1d, const unsigned int fe_degree)
void setup_element_info(ElementInfo< Number > &elem_info, const FiniteElement< 1 > &fe, const ::DoFHandler< dim > &dof_handler)
void add_child_indices(const unsigned int child, const unsigned int fe_shift_1d, const unsigned int fe_degree, const std::vector< unsigned int > &lexicographic_numbering, const std::vector< types::global_dof_index > &local_dof_indices, types::global_dof_index *target_indices)
void setup_transfer(const DoFHandler< dim > &dof_handler, const MGConstrainedDoFs *mg_constrained_dofs, const std::vector< std::shared_ptr< const Utilities::MPI::Partitioner > > &external_partitioners, ElementInfo< Number > &elem_info, std::vector< std::vector< unsigned int > > &level_dof_indices, std::vector< std::vector< std::pair< unsigned int, unsigned int > > > &parent_child_connect, std::vector< unsigned int > &n_owned_level_cells, std::vector< std::vector< std::vector< unsigned short > > > &dirichlet_indices, std::vector< std::vector< Number > > &weights_on_refined, std::vector< Table< 2, unsigned int > > &copy_indices_global_mine, MGLevelObject< std::shared_ptr< const Utilities::MPI::Partitioner > > &vector_partitioners)
void fill_copy_indices(const DoFHandler< dim, spacedim > &dof_handler, const MGConstrainedDoFs *mg_constrained_dofs, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &copy_indices, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &copy_indices_global_mine, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &copy_indices_level_mine, const bool skip_interface_dofs=true)
void copy_indices_to_mpi_local_numbers(const Utilities::MPI::Partitioner &part, const std::vector< types::global_dof_index > &mine, const std::vector< types::global_dof_index > &remote, std::vector< unsigned int > &localized_indices)
const types::subdomain_id artificial_subdomain_id
Definition: types.h:293
const types::subdomain_id invalid_subdomain_id
Definition: types.h:276
static const unsigned int invalid_unsigned_int
Definition: types.h:196
const types::global_dof_index invalid_dof_index
Definition: types.h:211
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
types::global_dof_index global_dof_index
types::global_dof_index level_dof_index
DoFPair(const unsigned int level, const types::global_dof_index global_dof_index, const types::global_dof_index level_dof_index)
std::vector< Number > prolongation_matrix_1d
std::vector< unsigned int > lexicographic_numbering
std::vector< unsigned int > lexicographic_numbering
Definition: shape_info.h:380
void reinit(const Quadrature< dim_q > &quad, const FiniteElement< dim > &fe_dim, const unsigned int base_element=0)