Reference documentation for deal.II version 9.0.0
dof_handler_policy.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1998 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/geometry_info.h>
18 #include <deal.II/base/work_stream.h>
19 #include <deal.II/base/utilities.h>
20 #include <deal.II/base/memory_consumption.h>
21 #include <deal.II/base/thread_management.h>
22 #include <deal.II/base/partitioner.h>
23 #include <deal.II/grid/grid_tools.h>
24 #include <deal.II/grid/tria.h>
25 #include <deal.II/grid/tria_iterator.h>
26 #include <deal.II/dofs/dof_handler.h>
27 #include <deal.II/dofs/dof_accessor.h>
28 #include <deal.II/dofs/dof_handler_policy.h>
29 #include <deal.II/fe/fe.h>
30 #include <deal.II/distributed/shared_tria.h>
31 #include <deal.II/distributed/tria.h>
32 
33 #include <boost/archive/binary_oarchive.hpp>
34 #include <boost/archive/binary_iarchive.hpp>
35 #ifdef DEAL_II_WITH_ZLIB
36 #include <boost/iostreams/stream.hpp>
37 #include <boost/iostreams/filtering_stream.hpp>
38 #include <boost/iostreams/device/back_inserter.hpp>
39 #include <boost/iostreams/filter/gzip.hpp>
40 #include <boost/serialization/array.hpp>
41 #endif
42 
43 #include <memory>
44 #include <set>
45 #include <algorithm>
46 #include <numeric>
47 
48 DEAL_II_NAMESPACE_OPEN
49 
50 
51 namespace internal
52 {
53  namespace DoFHandlerImplementation
54  {
55  namespace Policy
56  {
57  // use class ::DoFHandler instead
58  // of namespace internal::DoFHandler in
59  // the following
60  using ::DoFHandler;
61 
62  namespace hp
63  {
64  using ::hp::DoFHandler;
65  }
66 
67 
68  namespace
69  {
74  template <class DoFHandlerType>
75  void
76  update_all_active_cell_dof_indices_caches (const DoFHandlerType &dof_handler)
77  {
78  typename DoFHandlerType::active_cell_iterator
79  beginc = dof_handler.begin_active(),
80  endc = dof_handler.end();
81 
82  auto worker
83  = [] (const typename DoFHandlerType::active_cell_iterator &cell,
84  void *,
85  void *)
86  {
87  if (!cell->is_artificial())
88  cell->update_cell_dof_indices_cache ();
89  };
90 
91  // parallelize filling all of the cell caches. by using
92  // WorkStream, we make sure that we only run through the
93  // range of iterators once, whereas a parallel_for loop
94  // for example has to split the range multiple times,
95  // which is expensive because cell iterators are not
96  // random access iterators with a cheap operator-
97  WorkStream::run (beginc, endc,
98  worker,
99  /* copier */ std::function<void (void *)>(),
100  /* scratch_data */ nullptr,
101  /* copy_data */ nullptr,
103  /* chunk_size = */ 32);
104  }
105 
106 
111  template <class DoFHandlerType>
112  void
113  update_all_level_cell_dof_indices_caches (const DoFHandlerType &dof_handler)
114  {
115  typename DoFHandlerType::level_cell_iterator
116  beginc = dof_handler.begin(),
117  endc = dof_handler.end();
118 
119  auto worker
120  = [] (const typename DoFHandlerType::level_cell_iterator &cell,
121  void *,
122  void *)
123  {
124  if (cell->has_children()
125  || !cell->is_artificial())
126  cell->update_cell_dof_indices_cache ();
127  };
128 
129  // parallelize filling all of the cell caches. by using
130  // WorkStream, we make sure that we only run through the
131  // range of iterators once, whereas a parallel_for loop
132  // for example has to split the range multiple times,
133  // which is expensive because cell iterators are not
134  // random access iterators with a cheap operator-
135  WorkStream::run (beginc, endc,
136  worker,
137  /* copier */ std::function<void (void *)>(),
138  /* scratch_data */ nullptr,
139  /* copy_data */ nullptr,
141  /* chunk_size = */ 32);
142  }
143 
144 
145  typedef
146  std::vector<std::pair<unsigned int, unsigned int> > DoFIdentities;
147 
148 
159  template <int structdim, int dim, int spacedim>
160  void
161  ensure_existence_of_dof_identities (const FiniteElement<dim,spacedim> &fe1,
162  const FiniteElement<dim,spacedim> &fe2,
163  std::unique_ptr<DoFIdentities> &identities)
164  {
165  // see if we need to fill this entry, or whether it already
166  // exists
167  if (identities.get() == nullptr)
168  {
169  switch (structdim)
170  {
171  case 0:
172  {
173  identities = std_cxx14::make_unique<DoFIdentities>
174  (fe1.hp_vertex_dof_identities(fe2));
175  break;
176  }
177 
178  case 1:
179  {
180  identities = std_cxx14::make_unique<DoFIdentities>
181  (fe1.hp_line_dof_identities(fe2));
182  break;
183  }
184 
185  case 2:
186  {
187  identities = std_cxx14::make_unique<DoFIdentities>
188  (fe1.hp_quad_dof_identities(fe2));
189  break;
190  }
191 
192  default:
193  Assert (false, ExcNotImplemented());
194  }
195 
196  // double check whether the newly created entries make
197  // any sense at all
198  for (unsigned int i=0; i<identities->size(); ++i)
199  {
200  Assert ((*identities)[i].first < fe1.template n_dofs_per_object<structdim>(),
201  ExcInternalError());
202  Assert ((*identities)[i].second < fe2.template n_dofs_per_object<structdim>(),
203  ExcInternalError());
204  }
205  }
206  }
207 
208 
209 
217  template <int dim, int spacedim, typename iterator>
218  unsigned int
219  get_most_dominating_fe_index (const iterator &object)
220  {
221  unsigned int dominating_fe_index = 0;
222  for (; dominating_fe_index<object->n_active_fe_indices();
223  ++dominating_fe_index)
224  {
225  const FiniteElement<dim, spacedim> &this_fe
226  = object->get_fe (object->nth_active_fe_index(dominating_fe_index));
227 
230  for (unsigned int other_fe_index=0;
231  other_fe_index<object->n_active_fe_indices();
232  ++other_fe_index)
233  if (other_fe_index != dominating_fe_index)
234  {
236  &that_fe
237  = object->get_fe (object->nth_active_fe_index(other_fe_index));
238 
239  domination = domination &
240  this_fe.compare_for_face_domination(that_fe);
241  }
242 
243  // see if this element is able to dominate all the other
244  // ones, and if so take it
246  ||
248  ||
250  break;
251  }
252 
253  // check that we have found one such fe
254  if (dominating_fe_index != object->n_active_fe_indices())
255  {
256  // return the finite element index used on it. note that
257  // only a single fe can be active on such subfaces
258  return object->nth_active_fe_index(dominating_fe_index);
259  }
260  else
261  {
262  // if we couldn't find the most dominating object
264  }
265  }
266  }
267 
268 
269 
270  struct Implementation
271  {
272 
273  /* -------------- distribute_dofs functionality ------------- */
274 
282  template <int spacedim>
283  static
285  distribute_dofs_on_cell (const DoFHandler<1,spacedim> &dof_handler,
287  types::global_dof_index next_free_dof)
288  {
289 
290  // distribute dofs of vertices
291  if (dof_handler.get_fe().dofs_per_vertex > 0)
292  for (unsigned int v=0; v<GeometryInfo<1>::vertices_per_cell; ++v)
293  {
294  if (cell->vertex_dof_index (v,0) == numbers::invalid_dof_index)
295  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
296  {
297  Assert ((cell->vertex_dof_index (v,d) ==
299  ExcInternalError());
300  cell->set_vertex_dof_index (v, d, next_free_dof++);
301  }
302  else
303  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
304  Assert ((cell->vertex_dof_index (v,d) !=
306  ExcInternalError());
307  }
308 
309  // dofs of line
310  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
311  cell->set_dof_index (d, next_free_dof++);
312 
313  return next_free_dof;
314  }
315 
316 
317 
318  template <int spacedim>
319  static
321  distribute_dofs_on_cell (const DoFHandler<2,spacedim> &dof_handler,
323  types::global_dof_index next_free_dof)
324  {
325  if (dof_handler.get_fe().dofs_per_vertex > 0)
326  // number dofs on vertices
327  for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
328  // check whether dofs for this vertex have been distributed
329  // (checking the first dof should be good enough)
330  if (cell->vertex_dof_index(vertex, 0) == numbers::invalid_dof_index)
331  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
332  cell->set_vertex_dof_index (vertex, d, next_free_dof++);
333 
334  // for the four sides
335  if (dof_handler.get_fe().dofs_per_line > 0)
336  for (unsigned int side=0; side<GeometryInfo<2>::faces_per_cell; ++side)
337  {
338  const typename DoFHandler<2,spacedim>::line_iterator
339  line = cell->line(side);
340 
341  // distribute dofs if necessary: check whether line dof is already
342  // numbered (checking the first dof should be good enough)
343  if (line->dof_index(0) == numbers::invalid_dof_index)
344  // if not: distribute dofs
345  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
346  line->set_dof_index (d, next_free_dof++);
347  }
348 
349 
350  // dofs of quad
351  if (dof_handler.get_fe().dofs_per_quad > 0)
352  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
353  cell->set_dof_index (d, next_free_dof++);
354 
355  return next_free_dof;
356  }
357 
358 
359 
360  template <int spacedim>
361  static
363  distribute_dofs_on_cell (const DoFHandler<3,spacedim> &dof_handler,
365  types::global_dof_index next_free_dof)
366  {
367  if (dof_handler.get_fe().dofs_per_vertex > 0)
368  // number dofs on vertices
369  for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
370  // check whether dofs for this vertex have been distributed
371  // (checking the first dof should be good enough)
372  if (cell->vertex_dof_index(vertex, 0) == numbers::invalid_dof_index)
373  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
374  cell->set_vertex_dof_index (vertex, d, next_free_dof++);
375 
376  // for the lines
377  if (dof_handler.get_fe().dofs_per_line > 0)
378  for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
379  {
380  const typename DoFHandler<3,spacedim>::line_iterator
381  line = cell->line(l);
382 
383  // distribute dofs if necessary: check whether line dof is already
384  // numbered (checking the first dof should be good enough)
385  if (line->dof_index(0) == numbers::invalid_dof_index)
386  // if not: distribute dofs
387  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
388  line->set_dof_index (d, next_free_dof++);
389  }
390 
391  // for the quads
392  if (dof_handler.get_fe().dofs_per_quad > 0)
393  for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
394  {
395  const typename DoFHandler<3,spacedim>::quad_iterator
396  quad = cell->quad(q);
397 
398  // distribute dofs if necessary: check whether line dof is already
399  // numbered (checking the first dof should be good enough)
400  if (quad->dof_index(0) == numbers::invalid_dof_index)
401  // if not: distribute dofs
402  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
403  quad->set_dof_index (d, next_free_dof++);
404  }
405 
406 
407  // dofs of hex
408  if (dof_handler.get_fe().dofs_per_hex > 0)
409  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_hex; ++d)
410  cell->set_dof_index (d, next_free_dof++);
411 
412  return next_free_dof;
413  }
414 
415 
416 
417  // same for the hp::DoFHandler
418  template <int spacedim>
419  static
421  distribute_dofs_on_cell (const hp::DoFHandler<1,spacedim> &,
423  types::global_dof_index next_free_dof)
424  {
425  const unsigned int dim = 1;
426 
427  const FiniteElement<dim,spacedim> &fe = cell->get_fe();
428  const unsigned int fe_index = cell->active_fe_index ();
429 
430  // number dofs on vertices. to do so, check whether dofs for
431  // this vertex have been distributed and for the present fe
432  // (only check the first dof), and if this isn't the case
433  // distribute new ones there
434  if (fe.dofs_per_vertex > 0)
435  for (unsigned int vertex=0; vertex<GeometryInfo<1>::vertices_per_cell; ++vertex)
436  if (cell->vertex_dof_index(vertex, 0, fe_index) ==
438  for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
439  cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
440 
441  // finally for the line. this one shouldn't be numbered yet
442  if (fe.dofs_per_line > 0)
443  {
444  Assert ((cell->dof_index(0, fe_index) ==
446  ExcInternalError());
447 
448  for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
449  cell->set_dof_index (d, next_free_dof, fe_index);
450  }
451 
452  // note that this cell has been processed
453  cell->set_user_flag ();
454 
455  return next_free_dof;
456  }
457 
458 
459 
460  template <int spacedim>
461  static
463  distribute_dofs_on_cell (const hp::DoFHandler<2,spacedim> &,
465  types::global_dof_index next_free_dof)
466  {
467  const unsigned int dim = 2;
468 
469  const FiniteElement<dim,spacedim> &fe = cell->get_fe();
470  const unsigned int fe_index = cell->active_fe_index ();
471 
472  // number dofs on vertices. to do so, check whether dofs for
473  // this vertex have been distributed and for the present fe
474  // (only check the first dof), and if this isn't the case
475  // distribute new ones there
476  if (fe.dofs_per_vertex > 0)
477  for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
478  if (cell->vertex_dof_index(vertex, 0, fe_index) ==
480  for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
481  cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
482 
483  // next the sides. do the same as above: check whether the
484  // line is already numbered for the present fe_index, and if
485  // not do it
486  if (fe.dofs_per_line > 0)
487  for (unsigned int l=0; l<GeometryInfo<2>::lines_per_cell; ++l)
488  {
489  typename hp::DoFHandler<dim,spacedim>::line_iterator
490  line = cell->line(l);
491 
492  if (line->dof_index(0,fe_index) ==
494  for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
495  line->set_dof_index (d, next_free_dof, fe_index);
496  }
497 
498 
499  // finally for the quad. this one shouldn't be numbered yet
500  if (fe.dofs_per_quad > 0)
501  {
502  Assert ((cell->dof_index(0, fe_index) ==
504  ExcInternalError());
505 
506  for (unsigned int d=0; d<fe.dofs_per_quad; ++d, ++next_free_dof)
507  cell->set_dof_index (d, next_free_dof, fe_index);
508  }
509 
510  // note that this cell has been processed
511  cell->set_user_flag ();
512 
513  return next_free_dof;
514  }
515 
516 
517 
518  template <int spacedim>
519  static
521  distribute_dofs_on_cell (const hp::DoFHandler<3,spacedim> &,
523  types::global_dof_index next_free_dof)
524  {
525  const unsigned int dim = 3;
526 
527  const FiniteElement<dim,spacedim> &fe = cell->get_fe();
528  const unsigned int fe_index = cell->active_fe_index ();
529 
530  // number dofs on vertices. to do so, check whether dofs for
531  // this vertex have been distributed and for the present fe
532  // (only check the first dof), and if this isn't the case
533  // distribute new ones there
534  if (fe.dofs_per_vertex > 0)
535  for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
536  if (cell->vertex_dof_index(vertex, 0, fe_index) ==
538  for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
539  cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
540 
541  // next the four lines. do the same as above: check whether
542  // the line is already numbered for the present fe_index,
543  // and if not do it
544  if (fe.dofs_per_line > 0)
545  for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
546  {
547  typename hp::DoFHandler<dim,spacedim>::line_iterator
548  line = cell->line(l);
549 
550  if (line->dof_index(0,fe_index) ==
552  for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
553  line->set_dof_index (d, next_free_dof, fe_index);
554  }
555 
556  // same for quads
557  if (fe.dofs_per_quad > 0)
558  for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
559  {
560  typename hp::DoFHandler<dim,spacedim>::quad_iterator
561  quad = cell->quad(q);
562 
563  if (quad->dof_index(0,fe_index) ==
565  for (unsigned int d=0; d<fe.dofs_per_quad; ++d, ++next_free_dof)
566  quad->set_dof_index (d, next_free_dof, fe_index);
567  }
568 
569 
570  // finally for the hex. this one shouldn't be numbered yet
571  if (fe.dofs_per_hex > 0)
572  {
573  Assert ((cell->dof_index(0, fe_index) ==
575  ExcInternalError());
576 
577  for (unsigned int d=0; d<fe.dofs_per_hex; ++d, ++next_free_dof)
578  cell->set_dof_index (d, next_free_dof, fe_index);
579  }
580 
581  // note that this cell has been processed
582  cell->set_user_flag ();
583 
584  return next_free_dof;
585  }
586 
587 
588 
593  template <int dim, int spacedim>
594  static
595  std::map<types::global_dof_index, types::global_dof_index>
596  compute_vertex_dof_identities (hp::DoFHandler<dim,spacedim> &dof_handler)
597  {
598  std::map<types::global_dof_index, types::global_dof_index> dof_identities;
599 
600  // Note: we may wish to have something here similar to what
601  // we do for lines and quads, namely that we only identify
602  // dofs for any fe towards the most dominating one. however,
603  // it is not clear whether this is actually necessary for
604  // vertices at all, I can't think of a finite element that
605  // would make that necessary...
607  vertex_dof_identities (dof_handler.get_fe_collection().size(),
608  dof_handler.get_fe_collection().size());
609 
610  // first identify vertices we want to exclude from working on.
611  // specifically, these are the vertices of artificial and ghost
612  // cells because at the time when we get here, we do not yet
613  // know DoF indices on ghost cells (and we will never know
614  // them for artificial cells). this is, at least the case for
615  // parallel::distributed::Triangulations.
616  //
617  // this means that we will not unify DoF indices between locally
618  // owned cells and ghost cells, and this is different from what
619  // we would do if the triangulation were not split into subdomains.
620  // on the other hand, DoF unification is only an optimization: we
621  // will still record these identities when we compute hanging
622  // node constraints; we just end up with more DoFs than we would
623  // if we unified DoF indices also between locally owned and ghost
624  // cells, but we end up with a simpler algorithm in return.
625  std::vector<bool> include_vertex = dof_handler.get_triangulation().get_used_vertices();
627  (&dof_handler.get_triangulation())
628  != nullptr)
629  for (const auto &cell : dof_handler.active_cell_iterators())
630  if (! cell->is_locally_owned())
631  for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
632  include_vertex[cell->vertex_index(v)] = false;
633 
634  // loop over all vertices and see which one we need to work
635  // on
636  for (unsigned int vertex_index=0; vertex_index<dof_handler.get_triangulation().n_vertices();
637  ++vertex_index)
638  if ((dof_handler.get_triangulation().get_used_vertices()[vertex_index] == true)
639  &&
640  (include_vertex[vertex_index] == true))
641  {
642  const unsigned int n_active_fe_indices
643  = ::internal::DoFAccessorImplementation::Implementation::
644  n_active_vertex_fe_indices (dof_handler, vertex_index);
645  if (n_active_fe_indices > 1)
646  {
647  const unsigned int
648  first_fe_index
649  = ::internal::DoFAccessorImplementation::Implementation::
650  nth_active_vertex_fe_index (dof_handler, vertex_index, 0);
651 
652  // loop over all the other FEs with which we want
653  // to identify the DoF indices of the first FE of
654  for (unsigned int f=1; f<n_active_fe_indices; ++f)
655  {
656  const unsigned int
657  other_fe_index
658  = ::internal::DoFAccessorImplementation::Implementation::
659  nth_active_vertex_fe_index (dof_handler, vertex_index, f);
660 
661  // make sure the entry in the equivalence
662  // table exists
663  ensure_existence_of_dof_identities<0>
664  (dof_handler.get_fe(first_fe_index),
665  dof_handler.get_fe(other_fe_index),
666  vertex_dof_identities[first_fe_index][other_fe_index]);
667 
668  // then loop through the identities we
669  // have. first get the global numbers of the
670  // dofs we want to identify and make sure they
671  // are not yet constrained to anything else,
672  // except for to each other. use the rule that
673  // we will always constrain the dof with the
674  // higher fe index to the one with the lower,
675  // to avoid circular reasoning.
676  DoFIdentities &identities
677  = *vertex_dof_identities[first_fe_index][other_fe_index];
678  for (unsigned int i=0; i<identities.size(); ++i)
679  {
680  const types::global_dof_index lower_dof_index
681  = ::internal::DoFAccessorImplementation::Implementation::
682  get_vertex_dof_index (dof_handler,
683  vertex_index,
684  first_fe_index,
685  identities[i].first);
686  const types::global_dof_index higher_dof_index
687  = ::internal::DoFAccessorImplementation::Implementation::
688  get_vertex_dof_index (dof_handler,
689  vertex_index,
690  other_fe_index,
691  identities[i].second);
692 
693  Assert ((dof_identities.find(higher_dof_index) == dof_identities.end())
694  ||
695  (dof_identities[higher_dof_index] ==
696  lower_dof_index),
697  ExcInternalError());
698 
699  dof_identities[higher_dof_index] = lower_dof_index;
700  }
701  }
702  }
703  }
704 
705  return dof_identities;
706  }
707 
708 
713  template <int spacedim>
714  static
715  std::map<types::global_dof_index, types::global_dof_index>
716  compute_line_dof_identities (hp::DoFHandler<1,spacedim> &)
717  {
718  return std::map<types::global_dof_index, types::global_dof_index>();
719  }
720 
721 
722  template <int dim, int spacedim>
723  static
724  std::map<types::global_dof_index, types::global_dof_index>
725  compute_line_dof_identities (hp::DoFHandler<dim,spacedim> &dof_handler)
726  {
727  std::map<types::global_dof_index, types::global_dof_index> dof_identities;
728 
729  // we will mark lines that we have already treated, so first save and clear
730  // the user flags on lines and later restore them
731  std::vector<bool> user_flags;
732  dof_handler.get_triangulation().save_user_flags_line(user_flags);
733  const_cast<::Triangulation<dim,spacedim> &>(dof_handler.get_triangulation()).clear_user_flags_line ();
734 
735  // exclude lines that bound cells we don't locally own, because
736  // we do not have information about their dofs at this point.
737  // this is, at least the case for parallel::distributed::Triangulations.
738  //
739  // this means that we will not unify DoF indices between locally
740  // owned cells and ghost cells, and this is different from what
741  // we would do if the triangulation were not split into subdomains.
742  // on the other hand, DoF unification is only an optimization: we
743  // will still record these identities when we compute hanging
744  // node constraints; we just end up with more DoFs than we would
745  // if we unified DoF indices also between locally owned and ghost
746  // cells, but we end up with a simpler algorithm in return.
748  (&dof_handler.get_triangulation())
749  != nullptr)
751  cell=dof_handler.begin_active();
752  cell!=dof_handler.end(); ++cell)
753  if (cell->is_locally_owned() == false)
754  for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
755  cell->line(l)->set_user_flag();
756 
757  // An implementation of the algorithm described in the hp paper, including
758  // the modification mentioned later in the "complications in 3-d" subsections
759  //
760  // as explained there, we do something only if there are exactly 2 finite
761  // elements associated with an object. if there is only one, then there is
762  // nothing to do anyway, and if there are 3 or more, then we can get into
763  // trouble. note that this only happens for lines in 3d and higher, and for
764  // quads only in 4d and higher, so this isn't a particularly frequent case
765  //
766  // there is one case, however, that we would like to handle (see, for
767  // example, the hp/crash_15 testcase): if we have FESystem(FE_Q(2),FE_DGQ(i))
768  // elements for a bunch of values 'i', then we should be able to handle this
769  // because we can simply unify *all* dofs, not only a some. so what we do
770  // is to first treat all pairs of finite elements that have *identical* dofs,
771  // and then only deal with those that are not identical of which we can
772  // handle at most 2
774  line_dof_identities (dof_handler.fe_collection.size(),
775  dof_handler.fe_collection.size());
776 
778  cell=dof_handler.begin_active();
779  cell!=dof_handler.end(); ++cell)
780  for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
781  if (cell->line(l)->user_flag_set() == false)
782  {
783  const typename hp::DoFHandler<dim,spacedim>::line_iterator line = cell->line(l);
784  line->set_user_flag ();
785 
786  unsigned int unique_sets_of_dofs
787  = line->n_active_fe_indices();
788 
789  // do a first loop over all sets of dofs and do identity
790  // uniquification
791  const unsigned int n_active_fe_indices = line->n_active_fe_indices();
792  for (unsigned int f=0; f<n_active_fe_indices; ++f)
793  for (unsigned int g=f+1; g<n_active_fe_indices; ++g)
794  {
795  const unsigned int fe_index_1 = line->nth_active_fe_index (f),
796  fe_index_2 = line->nth_active_fe_index (g);
797 
798  if ((dof_handler.get_fe(fe_index_1).dofs_per_line
799  ==
800  dof_handler.get_fe(fe_index_2).dofs_per_line)
801  &&
802  (dof_handler.get_fe(fe_index_1).dofs_per_line > 0))
803  {
804  ensure_existence_of_dof_identities<1>
805  (dof_handler.get_fe(fe_index_1),
806  dof_handler.get_fe(fe_index_2),
807  line_dof_identities[fe_index_1][fe_index_2]);
808  // see if these sets of dofs are identical. the first
809  // condition for this is that indeed there are n identities
810  if (line_dof_identities[fe_index_1][fe_index_2]->size()
811  ==
812  dof_handler.get_fe(fe_index_1).dofs_per_line)
813  {
814  unsigned int i=0;
815  for (; i<dof_handler.get_fe(fe_index_1).dofs_per_line; ++i)
816  if (((*(line_dof_identities[fe_index_1][fe_index_2]))[i].first != i)
817  &&
818  ((*(line_dof_identities[fe_index_1][fe_index_2]))[i].second != i))
819  // not an identity
820  break;
821 
822  if (i == dof_handler.get_fe(fe_index_1).dofs_per_line)
823  {
824  // The line dofs (i.e., the ones interior to a line) of these two finite elements are identical.
825  // Note that there could be situations when one element still dominates another, e.g.:
826  // FE_Q(2) x FE_Nothing(dominate) vs
827  // FE_Q(2) x FE_Q(1)
828 
829  --unique_sets_of_dofs;
830 
831  for (unsigned int j=0; j<dof_handler.get_fe(fe_index_1).dofs_per_line; ++j)
832  {
833  const types::global_dof_index master_dof_index
834  = line->dof_index (j, fe_index_1);
835  const types::global_dof_index slave_dof_index
836  = line->dof_index (j, fe_index_2);
837 
838  // if master dof was already constrained,
839  // constrain to that one, otherwise constrain
840  // slave to master
841  if (dof_identities.find(master_dof_index) != dof_identities.end())
842  {
843  Assert (dof_identities.find(dof_identities[master_dof_index])
844  == dof_identities.end(),
845  ExcInternalError());
846 
847  dof_identities[slave_dof_index]
848  = dof_identities[master_dof_index];
849  }
850  else
851  {
852  Assert ((dof_identities.find(master_dof_index) == dof_identities.end())
853  ||
854  (dof_identities[slave_dof_index] ==
855  master_dof_index),
856  ExcInternalError());
857 
858  dof_identities[slave_dof_index] = master_dof_index;
859  }
860  }
861  }
862  }
863  }
864  }
865 
866  // if at this point, there is only one unique set of dofs left, then
867  // we have taken care of everything above. if there are two, then we
868  // need to deal with them here. if there are more, then we punt, as
869  // described in the paper (and mentioned above)
870  //TODO: The check for 'dim==2' was inserted by intuition. It fixes
871  // the previous problems with @ref step_27 "step-27" in 3D. But an explanation
872  // for this is still required, and what we do here is not what we
873  // describe in the paper!.
874  if ((unique_sets_of_dofs == 2) && (dim == 2))
875  {
876  // find out which is the most dominating finite element of the
877  // ones that are used on this line
878  const unsigned int most_dominating_fe_index
879  = get_most_dominating_fe_index<dim,spacedim> (line);
880 
881  // if we found the most dominating element, then use this to eliminate some of
882  // the degrees of freedom by identification. otherwise, the code that computes
883  // hanging node constraints will have to deal with it by computing
884  // appropriate constraints along this face/edge
885  if (most_dominating_fe_index != numbers::invalid_unsigned_int)
886  {
887  const unsigned int n_active_fe_indices
888  = line->n_active_fe_indices ();
889 
890  // loop over the indices of all the finite elements that are not
891  // dominating, and identify their dofs to the most dominating
892  // one
893  for (unsigned int f=0; f<n_active_fe_indices; ++f)
894  if (line->nth_active_fe_index (f) !=
895  most_dominating_fe_index)
896  {
897  const unsigned int
898  other_fe_index = line->nth_active_fe_index (f);
899 
900  ensure_existence_of_dof_identities<1>
901  (dof_handler.get_fe(most_dominating_fe_index),
902  dof_handler.get_fe(other_fe_index),
903  line_dof_identities[most_dominating_fe_index][other_fe_index]);
904 
905  DoFIdentities &identities
906  = *line_dof_identities[most_dominating_fe_index][other_fe_index];
907  for (unsigned int i=0; i<identities.size(); ++i)
908  {
909  const types::global_dof_index master_dof_index
910  = line->dof_index (identities[i].first, most_dominating_fe_index);
911  const types::global_dof_index slave_dof_index
912  = line->dof_index (identities[i].second, other_fe_index);
913 
914  Assert ((dof_identities.find(master_dof_index) == dof_identities.end())
915  ||
916  (dof_identities[slave_dof_index] ==
917  master_dof_index),
918  ExcInternalError());
919 
920  dof_identities[slave_dof_index] = master_dof_index;
921  }
922  }
923  }
924  }
925  }
926 
927  // finally restore the user flags
928  const_cast<::Triangulation<dim,spacedim> &>(dof_handler.get_triangulation())
929  .load_user_flags_line(user_flags);
930 
931  return dof_identities;
932  }
933 
934 
935 
940  template <int dim, int spacedim>
941  static
942  std::map<types::global_dof_index, types::global_dof_index>
943  compute_quad_dof_identities (hp::DoFHandler<dim,spacedim> &)
944  {
945  // this function should only be called for dim<3 where there are
946  // no quad dof identies. for dim>=3, the specialization below should
947  // take care of it
948  Assert (dim < 3, ExcInternalError());
949 
950  return std::map<types::global_dof_index, types::global_dof_index>();
951  }
952 
953 
954  template <int spacedim>
955  static
956  std::map<types::global_dof_index, types::global_dof_index>
957  compute_quad_dof_identities (hp::DoFHandler<3,spacedim> &dof_handler)
958  {
959  const int dim = 3;
960 
961  std::map<types::global_dof_index, types::global_dof_index> dof_identities;
962 
963 
964  // we will mark quads that we have already treated, so first
965  // save and clear the user flags on quads and later restore
966  // them
967  std::vector<bool> user_flags;
968  dof_handler.get_triangulation().save_user_flags_quad(user_flags);
969  const_cast<::Triangulation<dim,spacedim> &>(dof_handler.get_triangulation()).clear_user_flags_quad ();
970 
971  // exclude quads that bound cells we don't locally own, because
972  // we do not have information about their dofs at this point.
973  // this is, at least the case for parallel::distributed::Triangulations.
974  //
975  // this means that we will not unify DoF indices between locally
976  // owned cells and ghost cells, and this is different from what
977  // we would do if the triangulation were not split into subdomains.
978  // on the other hand, DoF unification is only an optimization: we
979  // will still record these identities when we compute hanging
980  // node constraints; we just end up with more DoFs than we would
981  // if we unified DoF indices also between locally owned and ghost
982  // cells, but we end up with a simpler algorithm in return.
984  (&dof_handler.get_triangulation())
985  != nullptr)
987  cell=dof_handler.begin_active();
988  cell!=dof_handler.end(); ++cell)
989  if (cell->is_locally_owned() == false)
990  for (unsigned int q=0; q<GeometryInfo<dim>::quads_per_cell; ++q)
991  cell->quad(q)->set_user_flag();
992 
993 
994  // An implementation of the algorithm described in the hp
995  // paper, including the modification mentioned later in the
996  // "complications in 3-d" subsections
997  //
998  // as explained there, we do something only if there are
999  // exactly 2 finite elements associated with an object. if
1000  // there is only one, then there is nothing to do anyway,
1001  // and if there are 3 or more, then we can get into
1002  // trouble. note that this only happens for lines in 3d and
1003  // higher, and for quads only in 4d and higher, so this
1004  // isn't a particularly frequent case
1006  quad_dof_identities (dof_handler.fe_collection.size(),
1007  dof_handler.fe_collection.size());
1008 
1010  cell=dof_handler.begin_active();
1011  cell!=dof_handler.end(); ++cell)
1012  for (unsigned int q=0; q<GeometryInfo<dim>::quads_per_cell; ++q)
1013  if ((cell->quad(q)->user_flag_set() == false)
1014  &&
1015  (cell->quad(q)->n_active_fe_indices() == 2))
1016  {
1017  const typename hp::DoFHandler<dim,spacedim>::quad_iterator quad = cell->quad(q);
1018  quad->set_user_flag ();
1019 
1020  // find out which is the most dominating finite
1021  // element of the ones that are used on this quad
1022  const unsigned int most_dominating_fe_index
1023  = get_most_dominating_fe_index<dim,spacedim> (quad);
1024 
1025  // if we found the most dominating element, then use
1026  // this to eliminate some of the degrees of freedom
1027  // by identification. otherwise, the code that
1028  // computes hanging node constraints will have to
1029  // deal with it by computing appropriate constraints
1030  // along this face/edge
1031  if (most_dominating_fe_index != numbers::invalid_unsigned_int)
1032  {
1033  const unsigned int n_active_fe_indices
1034  = quad->n_active_fe_indices ();
1035 
1036  // loop over the indices of all the finite
1037  // elements that are not dominating, and
1038  // identify their dofs to the most dominating
1039  // one
1040  for (unsigned int f=0; f<n_active_fe_indices; ++f)
1041  if (quad->nth_active_fe_index (f) !=
1042  most_dominating_fe_index)
1043  {
1044  const unsigned int
1045  other_fe_index = quad->nth_active_fe_index (f);
1046 
1047  ensure_existence_of_dof_identities<2>
1048  (dof_handler.get_fe(most_dominating_fe_index),
1049  dof_handler.get_fe(other_fe_index),
1050  quad_dof_identities[most_dominating_fe_index][other_fe_index]);
1051 
1052  DoFIdentities &identities
1053  = *quad_dof_identities[most_dominating_fe_index][other_fe_index];
1054  for (unsigned int i=0; i<identities.size(); ++i)
1055  {
1056  const types::global_dof_index master_dof_index
1057  = quad->dof_index (identities[i].first, most_dominating_fe_index);
1058  const types::global_dof_index slave_dof_index
1059  = quad->dof_index (identities[i].second, other_fe_index);
1060 
1061  Assert ((dof_identities.find(master_dof_index) == dof_identities.end())
1062  ||
1063  (dof_identities[slave_dof_index] ==
1064  master_dof_index),
1065  ExcInternalError());
1066 
1067  dof_identities[slave_dof_index] = master_dof_index;
1068  }
1069  }
1070  }
1071  }
1072 
1073  // finally restore the user flags
1074  const_cast<::Triangulation<dim,spacedim> &>(dof_handler.get_triangulation())
1075  .load_user_flags_quad(user_flags);
1076 
1077  return dof_identities;
1078  }
1079 
1080 
1081 
1091  template <int dim, int spacedim>
1092  static
1093  unsigned int
1094  unify_dof_indices (const DoFHandler<dim,spacedim> &,
1095  const unsigned int n_dofs_before_identification,
1096  const bool)
1097  {
1098  return n_dofs_before_identification;
1099  }
1100 
1101 
1102 
1103  template <int dim, int spacedim>
1104  static
1105  unsigned int
1106  unify_dof_indices (hp::DoFHandler<dim,spacedim> &dof_handler,
1107  const unsigned int n_dofs_before_identification,
1108  const bool check_validity)
1109  {
1110  // compute the constraints that correspond to unifying
1111  // dof indices on vertices, lines, and quads. do so
1112  // in parallel
1113  std::map<types::global_dof_index, types::global_dof_index> all_constrained_indices[dim];
1114 
1115  {
1116  Threads::TaskGroup<> tasks;
1117 
1118  unsigned int i=0;
1119  tasks += Threads::new_task ([ &,i] ()
1120  {
1121  all_constrained_indices[i] = compute_vertex_dof_identities (dof_handler);
1122  });
1123 
1124  if (dim > 1)
1125  {
1126  ++i;
1127  tasks += Threads::new_task ([ &,i] ()
1128  {
1129  all_constrained_indices[i] = compute_line_dof_identities (dof_handler);
1130  });
1131  }
1132 
1133  if (dim > 2)
1134  {
1135  ++i;
1136  tasks += Threads::new_task ([ &,i] ()
1137  {
1138  all_constrained_indices[i] = compute_quad_dof_identities (dof_handler);
1139  });
1140  }
1141 
1142  tasks.join_all ();
1143  }
1144 
1145  // create a vector that contains the new DoF indices; first preset the
1146  // ones that are identities as determined above, then enumerate the rest
1147  std::vector<types::global_dof_index>
1148  new_dof_indices (n_dofs_before_identification, numbers::invalid_dof_index);
1149 
1150  for (const auto &constrained_dof_indices : all_constrained_indices)
1151  for (const auto &p : constrained_dof_indices)
1152  {
1153  Assert (new_dof_indices[p.first] == numbers::invalid_dof_index,
1154  ExcInternalError());
1155  new_dof_indices[p.first] = p.second;
1156  }
1157 
1158  types::global_dof_index next_free_dof = 0;
1159  for (types::global_dof_index i=0; i<n_dofs_before_identification; ++i)
1160  if (new_dof_indices[i] == numbers::invalid_dof_index)
1161  {
1162  new_dof_indices[i] = next_free_dof;
1163  ++next_free_dof;
1164  }
1165 
1166  // then loop over all those that are constrained and record the
1167  // new dof number for those:
1168  for (const auto &constrained_dof_indices : all_constrained_indices)
1169  for (const auto &p : constrained_dof_indices)
1170  {
1171  Assert (new_dof_indices[p.first] != numbers::invalid_dof_index,
1172  ExcInternalError());
1173 
1174  new_dof_indices[p.first] = new_dof_indices[p.second];
1175  }
1176 
1177  for (types::global_dof_index i=0; i<n_dofs_before_identification; ++i)
1178  {
1179  Assert (new_dof_indices[i] != numbers::invalid_dof_index,
1180  ExcInternalError());
1181  Assert (new_dof_indices[i] < next_free_dof,
1182  ExcInternalError());
1183  }
1184 
1185  // finally, do the renumbering. verify that previous dof indices
1186  // were indeed all valid on all cells that we touch if we were
1187  // told to do so
1188  renumber_dofs (new_dof_indices,
1189  IndexSet(0),
1190  dof_handler,
1191  check_validity);
1192 
1193 
1194  return next_free_dof;
1195  }
1196 
1197 
1198 
1205  template <class DoFHandlerType>
1206  static
1208  distribute_dofs (const types::subdomain_id subdomain_id,
1209  DoFHandlerType &dof_handler)
1210  {
1211  Assert (dof_handler.get_triangulation().n_levels() > 0,
1212  ExcMessage("Empty triangulation"));
1213 
1214  // Step 1: distribute dofs on all cells, but definitely
1215  // exclude artificial cells
1216  types::global_dof_index next_free_dof = 0;
1217  typename DoFHandlerType::active_cell_iterator
1218  cell = dof_handler.begin_active(),
1219  endc = dof_handler.end();
1220 
1221  for (; cell != endc; ++cell)
1222  if (! cell->is_artificial())
1223  if ((subdomain_id == numbers::invalid_subdomain_id)
1224  ||
1225  (cell->subdomain_id() == subdomain_id))
1226  next_free_dof
1227  = Implementation::distribute_dofs_on_cell (dof_handler,
1228  cell,
1229  next_free_dof);
1230 
1231  // Step 2: unify dof indices in case this is an hp DoFHandler
1232  //
1233  // during unification, we need to renumber DoF indices. there,
1234  // we can check that all previous DoF indices were valid, but
1235  // this only makes sense if we really distributed DoFs on
1236  // all (non-artificial) cells above
1237  next_free_dof = unify_dof_indices (dof_handler, next_free_dof,
1238  /* check_validity = */ (subdomain_id == numbers::invalid_subdomain_id));
1239 
1240  update_all_active_cell_dof_indices_caches (dof_handler);
1241 
1242  return next_free_dof;
1243  }
1244 
1245 
1246 
1247  /* -------------- distribute_mg_dofs functionality ------------- */
1248 
1249 
1257  template <int dim, int spacedim>
1258  static
1260  distribute_mg_dofs_on_cell (const typename DoFHandler<dim,spacedim>::level_cell_iterator &cell,
1261  types::global_dof_index next_free_dof,
1262  const std::integral_constant<int, 1> &)
1263  {
1264  // distribute dofs of vertices
1265  if (cell->get_fe().dofs_per_vertex > 0)
1266  for (unsigned int v=0; v<GeometryInfo<1>::vertices_per_cell; ++v)
1267  {
1268  typename DoFHandler<dim,spacedim>::level_cell_iterator neighbor = cell->neighbor(v);
1269 
1270  if (neighbor.state() == IteratorState::valid)
1271  {
1272  // has neighbor already been processed?
1273  if (neighbor->user_flag_set() &&
1274  (neighbor->level() == cell->level()))
1275  // copy dofs if the neighbor is on the same level (only then are
1276  // mg dofs the same)
1277  {
1278  if (v==0)
1279  for (unsigned int d=0; d<cell->get_fe().dofs_per_vertex; ++d)
1280  cell->set_mg_vertex_dof_index (cell->level(), 0, d,
1281  neighbor->mg_vertex_dof_index (cell->level(), 1, d));
1282  else
1283  for (unsigned int d=0; d<cell->get_fe().dofs_per_vertex; ++d)
1284  cell->set_mg_vertex_dof_index (cell->level(), 1, d,
1285  neighbor->mg_vertex_dof_index (cell->level(), 0, d));
1286 
1287  // next neighbor
1288  continue;
1289  }
1290  }
1291 
1292  // otherwise: create dofs newly
1293  for (unsigned int d=0; d<cell->get_fe().dofs_per_vertex; ++d)
1294  cell->set_mg_vertex_dof_index (cell->level(), v, d, next_free_dof++);
1295  }
1296 
1297  // dofs of line
1298  if (cell->get_fe().dofs_per_line > 0)
1299  for (unsigned int d=0; d<cell->get_fe().dofs_per_line; ++d)
1300  cell->set_mg_dof_index (cell->level(), d, next_free_dof++);
1301 
1302  // note that this cell has been processed
1303  cell->set_user_flag ();
1304 
1305  return next_free_dof;
1306  }
1307 
1308 
1309 
1310  template <int dim, int spacedim>
1311  static
1313  distribute_mg_dofs_on_cell (const typename DoFHandler<dim,spacedim>::level_cell_iterator &cell,
1314  types::global_dof_index next_free_dof,
1315  const std::integral_constant<int, 2> &)
1316  {
1317  if (cell->get_fe().dofs_per_vertex > 0)
1318  // number dofs on vertices
1319  for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
1320  // check whether dofs for this
1321  // vertex have been distributed
1322  // (only check the first dof)
1323  if (cell->mg_vertex_dof_index(cell->level(), vertex, 0) == numbers::invalid_dof_index)
1324  for (unsigned int d=0; d<cell->get_fe().dofs_per_vertex; ++d)
1325  cell->set_mg_vertex_dof_index (cell->level(), vertex, d, next_free_dof++);
1326 
1327  // for the four sides
1328  if (cell->get_fe().dofs_per_line > 0)
1329  for (unsigned int side=0; side<GeometryInfo<2>::faces_per_cell; ++side)
1330  {
1331  typename DoFHandler<dim,spacedim>::line_iterator line = cell->line(side);
1332 
1333  // distribute dofs if necessary: check whether line dof is already
1334  // numbered (check only first dof)
1335  if (line->mg_dof_index(cell->level(), 0) == numbers::invalid_dof_index)
1336  // if not: distribute dofs
1337  for (unsigned int d=0; d<cell->get_fe().dofs_per_line; ++d)
1338  line->set_mg_dof_index (cell->level(), d, next_free_dof++);
1339  }
1340 
1341 
1342  // dofs of quad
1343  if (cell->get_fe().dofs_per_quad > 0)
1344  for (unsigned int d=0; d<cell->get_fe().dofs_per_quad; ++d)
1345  cell->set_mg_dof_index (cell->level(), d, next_free_dof++);
1346 
1347 
1348  // note that this cell has been processed
1349  cell->set_user_flag ();
1350 
1351  return next_free_dof;
1352  }
1353 
1354 
1355 
1356  template <int dim, int spacedim>
1357  static
1359  distribute_mg_dofs_on_cell (const typename DoFHandler<dim,spacedim>::level_cell_iterator &cell,
1360  types::global_dof_index next_free_dof,
1361  const std::integral_constant<int, 3> &)
1362  {
1363  if (cell->get_fe().dofs_per_vertex > 0)
1364  // number dofs on vertices
1365  for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
1366  // check whether dofs for this vertex have been distributed
1367  // (only check the first dof)
1368  if (cell->mg_vertex_dof_index(cell->level(), vertex, 0) == numbers::invalid_dof_index)
1369  for (unsigned int d=0; d<cell->get_fe().dofs_per_vertex; ++d)
1370  cell->set_mg_vertex_dof_index (cell->level(), vertex, d, next_free_dof++);
1371 
1372  // for the lines
1373  if (cell->get_fe().dofs_per_line > 0)
1374  for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
1375  {
1376  typename DoFHandler<dim,spacedim>::line_iterator line = cell->line(l);
1377 
1378  // distribute dofs if necessary:
1379  // check whether line dof is already
1380  // numbered (check only first dof)
1381  if (line->mg_dof_index(cell->level(), 0) == numbers::invalid_dof_index)
1382  // if not: distribute dofs
1383  for (unsigned int d=0; d<cell->get_fe().dofs_per_line; ++d)
1384  line->set_mg_dof_index (cell->level(), d, next_free_dof++);
1385  }
1386 
1387  // for the quads
1388  if (cell->get_fe().dofs_per_quad > 0)
1389  for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
1390  {
1391  typename DoFHandler<dim,spacedim>::quad_iterator quad = cell->quad(q);
1392 
1393  // distribute dofs if necessary:
1394  // check whether line dof is already
1395  // numbered (check only first dof)
1396  if (quad->mg_dof_index(cell->level(), 0) == numbers::invalid_dof_index)
1397  // if not: distribute dofs
1398  for (unsigned int d=0; d<cell->get_fe().dofs_per_quad; ++d)
1399  quad->set_mg_dof_index (cell->level(), d, next_free_dof++);
1400  }
1401 
1402 
1403  // dofs of cell
1404  if (cell->get_fe().dofs_per_hex > 0)
1405  for (unsigned int d=0; d<cell->get_fe().dofs_per_hex; ++d)
1406  cell->set_mg_dof_index (cell->level(), d, next_free_dof++);
1407 
1408 
1409  // note that this cell has been processed
1410  cell->set_user_flag ();
1411 
1412  return next_free_dof;
1413  }
1414 
1415 
1416 
1417  // same for the hp::DoFHandler
1418  template <int spacedim>
1419  static
1421  distribute_mg_dofs_on_cell (const hp::DoFHandler<1,spacedim> &dof_handler,
1423  types::global_dof_index next_free_dof)
1424  {
1425  (void)dof_handler;
1426  (void)cell;
1427  (void)next_free_dof;
1428  return 0;
1429  }
1430 
1431 
1432 
1433  template <int spacedim>
1434  static
1436  distribute_mg_dofs_on_cell (const hp::DoFHandler<2,spacedim> &dof_handler,
1438  types::global_dof_index next_free_dof)
1439  {
1440  (void)dof_handler;
1441  (void)cell;
1442  (void)next_free_dof;
1443  return 0;
1444  }
1445 
1446 
1447 
1448  template <int spacedim>
1449  static
1451  distribute_mg_dofs_on_cell (const hp::DoFHandler<3,spacedim> &dof_handler,
1453  types::global_dof_index next_free_dof)
1454  {
1455  (void)dof_handler;
1456  (void)cell;
1457  (void)next_free_dof;
1458  return 0;
1459  }
1460 
1461 
1462 
1463  template <class DoFHandlerType>
1464  static
1466  distribute_dofs_on_level (const types::subdomain_id level_subdomain_id,
1467  DoFHandlerType &dof_handler,
1468  const unsigned int level)
1469  {
1470  const unsigned int dim = DoFHandlerType::dimension;
1471  const unsigned int spacedim = DoFHandlerType::space_dimension;
1472 
1473  const ::Triangulation<dim,spacedim> &tria
1474  = dof_handler.get_triangulation();
1475  Assert (tria.n_levels() > 0, ExcMessage("Empty triangulation"));
1476  if (level>=tria.n_levels())
1477  return 0; //this is allowed for multigrid
1478 
1479  // Clear user flags because we will need them. But first we save
1480  // them and make sure that we restore them later such that at
1481  // the end of this function the Triangulation will be in the
1482  // same state as it was at the beginning of this function.
1483  std::vector<bool> user_flags;
1484  tria.save_user_flags(user_flags);
1485  const_cast<::Triangulation<dim,spacedim> &>(tria).clear_user_flags ();
1486 
1487  types::global_dof_index next_free_dof = 0;
1488  typename DoFHandler<dim,spacedim>::level_cell_iterator
1489  cell = dof_handler.begin(level),
1490  endc = dof_handler.end(level);
1491 
1492  for (; cell != endc; ++cell)
1493  if ((level_subdomain_id == numbers::invalid_subdomain_id)
1494  ||
1495  (cell->level_subdomain_id() == level_subdomain_id))
1496  next_free_dof
1497  = Implementation::distribute_mg_dofs_on_cell<dim,spacedim> (cell, next_free_dof,
1498  std::integral_constant<int, dim>());
1499 
1500  // finally restore the user flags
1501  const_cast<::Triangulation<dim,spacedim> &>(tria).load_user_flags(user_flags);
1502 
1503  return next_free_dof;
1504  }
1505 
1506 
1507 
1508  /* --------------------- renumber_dofs functionality ---------------- */
1509 
1510 
1518  template <int dim, int spacedim>
1519  static
1520  void
1521  renumber_vertex_dofs (const std::vector<types::global_dof_index> &new_numbers,
1522  const IndexSet &indices,
1523  DoFHandler<dim,spacedim> &dof_handler,
1524  const bool check_validity)
1525  {
1526  // we can not use cell iterators in this function since then
1527  // we would renumber the dofs on the interface of two cells
1528  // more than once. Anyway, this way it's not only more
1529  // correct but also faster; note, however, that dof numbers
1530  // may be invalid_dof_index, namely when the appropriate
1531  // vertex/line/etc is unused
1532  for (std::vector<types::global_dof_index>::iterator
1533  i=dof_handler.vertex_dofs.begin();
1534  i!=dof_handler.vertex_dofs.end(); ++i)
1535  if (*i != numbers::invalid_dof_index)
1536  *i = (indices.size() == 0)?
1537  (new_numbers[*i]) :
1538  (new_numbers[indices.index_within_set(*i)]);
1539  else if (check_validity)
1540  // if index is invalid_dof_index: check if this one
1541  // really is unused
1542  Assert (dof_handler.get_triangulation()
1543  .vertex_used((i-dof_handler.vertex_dofs.begin()) /
1544  dof_handler.get_fe().dofs_per_vertex)
1545  == false,
1546  ExcInternalError ());
1547  }
1548 
1549 
1550 
1558  template <int dim, int spacedim>
1559  static
1560  void
1561  renumber_cell_dofs (const std::vector<types::global_dof_index> &new_numbers,
1562  const IndexSet &indices,
1563  DoFHandler<dim,spacedim> &dof_handler)
1564  {
1565  for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
1566  for (std::vector<types::global_dof_index>::iterator
1567  i=dof_handler.levels[level]->dof_object.dofs.begin();
1568  i!=dof_handler.levels[level]->dof_object.dofs.end(); ++i)
1569  if (*i != numbers::invalid_dof_index)
1570  *i = ((indices.size() == 0) ?
1571  new_numbers[*i] :
1572  new_numbers[indices.index_within_set(*i)]);
1573  }
1574 
1575 
1576 
1584  template <int spacedim>
1585  static
1586  void
1587  renumber_face_dofs (const std::vector<types::global_dof_index> &/*new_numbers*/,
1588  const IndexSet &/*indices*/,
1589  DoFHandler<1,spacedim> &/*dof_handler*/)
1590  {
1591  // nothing to do in 1d since there are no separate faces
1592  }
1593 
1594 
1595 
1596  template <int spacedim>
1597  static
1598  void
1599  renumber_face_dofs (const std::vector<types::global_dof_index> &new_numbers,
1600  const IndexSet &indices,
1601  DoFHandler<2,spacedim> &dof_handler)
1602  {
1603  // treat dofs on lines
1604  for (std::vector<types::global_dof_index>::iterator
1605  i=dof_handler.faces->lines.dofs.begin();
1606  i!=dof_handler.faces->lines.dofs.end(); ++i)
1607  if (*i != numbers::invalid_dof_index)
1608  *i = ((indices.size() == 0) ?
1609  new_numbers[*i] :
1610  new_numbers[indices.index_within_set(*i)]);
1611  }
1612 
1613 
1614 
1615  template <int spacedim>
1616  static
1617  void
1618  renumber_face_dofs (const std::vector<types::global_dof_index> &new_numbers,
1619  const IndexSet &indices,
1620  DoFHandler<3,spacedim> &dof_handler)
1621  {
1622  // treat dofs on lines
1623  for (std::vector<types::global_dof_index>::iterator
1624  i=dof_handler.faces->lines.dofs.begin();
1625  i!=dof_handler.faces->lines.dofs.end(); ++i)
1626  if (*i != numbers::invalid_dof_index)
1627  *i = ((indices.size() == 0) ?
1628  new_numbers[*i] :
1629  new_numbers[indices.index_within_set(*i)]);
1630 
1631  // treat dofs on quads
1632  for (std::vector<types::global_dof_index>::iterator
1633  i=dof_handler.faces->quads.dofs.begin();
1634  i!=dof_handler.faces->quads.dofs.end(); ++i)
1635  if (*i != numbers::invalid_dof_index)
1636  *i = ((indices.size() == 0) ?
1637  new_numbers[*i] :
1638  new_numbers[indices.index_within_set(*i)]);
1639  }
1640 
1641 
1642 
1643  template <int dim, int spacedim>
1644  static
1645  void
1646  renumber_vertex_dofs (const std::vector<types::global_dof_index> &new_numbers,
1647  const IndexSet &indices,
1648  hp::DoFHandler<dim,spacedim> &dof_handler,
1649  const bool check_validity)
1650  {
1651  for (unsigned int vertex_index=0; vertex_index<dof_handler.get_triangulation().n_vertices();
1652  ++vertex_index)
1653  {
1654  const unsigned int n_active_fe_indices
1655  = ::internal::DoFAccessorImplementation::Implementation::
1656  n_active_vertex_fe_indices (dof_handler, vertex_index);
1657 
1658  // if this vertex is unused, then we really ought not to have allocated
1659  // any space for it, i.e., n_active_fe_indices should be zero, and
1660  // there is no space to actually store dof indices for this vertex
1661  if (dof_handler.get_triangulation().vertex_used(vertex_index) == false)
1662  Assert (n_active_fe_indices == 0,
1663  ExcInternalError());
1664 
1665  // otherwise the vertex is used; it may still not hold any dof indices
1666  // if it is located on an artificial cell and not adjacent to a ghost
1667  // cell, but in that case there is simply nothing for us to do
1668  for (unsigned int f=0; f<n_active_fe_indices; ++f)
1669  {
1670  const unsigned int fe_index
1671  = ::internal::DoFAccessorImplementation::Implementation::
1672  nth_active_vertex_fe_index (dof_handler, vertex_index, f);
1673 
1674  for (unsigned int d=0; d<dof_handler.get_fe(fe_index).dofs_per_vertex; ++d)
1675  {
1676  const types::global_dof_index old_dof_index
1677  = ::internal::DoFAccessorImplementation::Implementation::
1678  get_vertex_dof_index(dof_handler,
1679  vertex_index,
1680  fe_index,
1681  d);
1682 
1683  // if check_validity was set, then we are to verify that the
1684  // previous indices were all valid. this really should be
1685  // the case: we allocated space for these vertex dofs,
1686  // i.e., at least one adjacent cell has a valid
1687  // active_fe_index, so there are DoFs that really live
1688  // on this vertex. if check_validity is set, then we
1689  // must make sure that they have been set to something
1690  // useful
1691  if (check_validity)
1692  Assert (old_dof_index != numbers::invalid_dof_index,
1693  ExcInternalError());
1694 
1695  if (old_dof_index != numbers::invalid_dof_index)
1696  ::internal::DoFAccessorImplementation::Implementation::
1697  set_vertex_dof_index (dof_handler,
1698  vertex_index,
1699  fe_index,
1700  d,
1701  (indices.size() == 0)?
1702  (new_numbers[old_dof_index]) :
1703  (new_numbers[indices.index_within_set(old_dof_index)]));
1704  }
1705  }
1706  }
1707  }
1708 
1709 
1710 
1711  template <int dim, int spacedim>
1712  static
1713  void
1714  renumber_cell_dofs (const std::vector<types::global_dof_index> &new_numbers,
1715  const IndexSet &indices,
1716  hp::DoFHandler<dim,spacedim> &dof_handler)
1717  {
1719  cell=dof_handler.begin_active();
1720  cell!=dof_handler.end(); ++cell)
1721  if (!cell->is_artificial())
1722  {
1723  const unsigned int fe_index = cell->active_fe_index ();
1724 
1725  for (unsigned int d=0; d<dof_handler.get_fe(fe_index).template n_dofs_per_object<dim>(); ++d)
1726  {
1727  const types::global_dof_index old_dof_index = cell->dof_index(d,fe_index);
1728  if (old_dof_index != numbers::invalid_dof_index)
1729  cell->set_dof_index (d,
1730  (indices.size() == 0)?
1731  (new_numbers[old_dof_index]) :
1732  (new_numbers[indices.index_within_set(old_dof_index)]),
1733  fe_index);
1734  }
1735  }
1736  }
1737 
1738 
1739 
1740  template <int spacedim>
1741  static
1742  void
1743  renumber_face_dofs (const std::vector<types::global_dof_index> &/*new_numbers*/,
1744  const IndexSet &/*indices*/,
1745  hp::DoFHandler<1,spacedim> &/*dof_handler*/)
1746  {
1747  // nothing to do in 1d since there are no separate faces -- we've already
1748  // taken care of this when dealing with the vertices
1749  }
1750 
1751 
1752 
1753  template <int spacedim>
1754  static
1755  void
1756  renumber_face_dofs (const std::vector<types::global_dof_index> &new_numbers,
1757  const IndexSet &indices,
1758  hp::DoFHandler<2,spacedim> &dof_handler)
1759  {
1760  const unsigned int dim = 2;
1761 
1762  // deal with DoFs on lines
1763  {
1764  // save user flags on lines so we can use them to mark lines
1765  // we've already treated
1766  std::vector<bool> saved_line_user_flags;
1767  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1768  .save_user_flags_line (saved_line_user_flags);
1769  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1770  .clear_user_flags_line ();
1771 
1773  cell = dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
1774  if (!cell->is_artificial())
1775  for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
1776  if (cell->line(l)->user_flag_set() == false)
1777  {
1778  const typename hp::DoFHandler<dim,spacedim>::line_iterator line = cell->line(l);
1779  line->set_user_flag();
1780 
1781  const unsigned int n_active_fe_indices
1782  = line->n_active_fe_indices ();
1783 
1784  for (unsigned int f=0; f<n_active_fe_indices; ++f)
1785  {
1786  const unsigned int fe_index
1787  = line->nth_active_fe_index (f);
1788 
1789  for (unsigned int d=0; d<dof_handler.get_fe(fe_index).dofs_per_line; ++d)
1790  {
1791  const types::global_dof_index old_dof_index = line->dof_index(d,fe_index);
1792  if (old_dof_index != numbers::invalid_dof_index)
1793  line->set_dof_index (d,
1794  (indices.size() == 0)?
1795  (new_numbers[old_dof_index]) :
1796  (new_numbers[indices.index_within_set(old_dof_index)]),
1797  fe_index);
1798  }
1799  }
1800  }
1801 
1802  // at the end, restore the user
1803  // flags for the lines
1804  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1805  .load_user_flags_line (saved_line_user_flags);
1806  }
1807  }
1808 
1809 
1810 
1811  template <int spacedim>
1812  static
1813  void
1814  renumber_face_dofs (const std::vector<types::global_dof_index> &new_numbers,
1815  const IndexSet &indices,
1816  hp::DoFHandler<3,spacedim> &dof_handler)
1817  {
1818  const unsigned int dim = 3;
1819 
1820  // deal with DoFs on lines
1821  {
1822  // save user flags on lines so we can use them to mark lines
1823  // we've already treated
1824  std::vector<bool> saved_line_user_flags;
1825  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1826  .save_user_flags_line (saved_line_user_flags);
1827  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1828  .clear_user_flags_line ();
1829 
1831  cell = dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
1832  if (!cell->is_artificial())
1833  for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
1834  if (cell->line(l)->user_flag_set() == false)
1835  {
1836  const typename hp::DoFHandler<dim,spacedim>::line_iterator line = cell->line(l);
1837  line->set_user_flag();
1838 
1839  const unsigned int n_active_fe_indices
1840  = line->n_active_fe_indices ();
1841 
1842  for (unsigned int f=0; f<n_active_fe_indices; ++f)
1843  {
1844  const unsigned int fe_index
1845  = line->nth_active_fe_index (f);
1846 
1847  for (unsigned int d=0; d<dof_handler.get_fe(fe_index).dofs_per_line; ++d)
1848  {
1849  const types::global_dof_index old_dof_index = line->dof_index(d,fe_index);
1850  if (old_dof_index != numbers::invalid_dof_index)
1851  line->set_dof_index (d,
1852  (indices.size() == 0)?
1853  (new_numbers[old_dof_index]) :
1854  (new_numbers[indices.index_within_set(old_dof_index)]),
1855  fe_index);
1856  }
1857  }
1858  }
1859 
1860  // at the end, restore the user
1861  // flags for the lines
1862  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1863  .load_user_flags_line (saved_line_user_flags);
1864  }
1865 
1866  // then deal with dofs on quads
1867  {
1868  std::vector<bool> saved_quad_user_flags;
1869  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1870  .save_user_flags_quad (saved_quad_user_flags);
1871  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1872  .clear_user_flags_quad ();
1873 
1875  cell = dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
1876  if (!cell->is_artificial())
1877  for (unsigned int q=0; q<GeometryInfo<dim>::quads_per_cell; ++q)
1878  if (cell->quad(q)->user_flag_set() == false)
1879  {
1880  const typename hp::DoFHandler<dim,spacedim>::quad_iterator quad = cell->quad(q);
1881  quad->set_user_flag();
1882 
1883  const unsigned int n_active_fe_indices
1884  = quad->n_active_fe_indices ();
1885 
1886  for (unsigned int f=0; f<n_active_fe_indices; ++f)
1887  {
1888  const unsigned int fe_index
1889  = quad->nth_active_fe_index (f);
1890 
1891  for (unsigned int d=0; d<dof_handler.get_fe(fe_index).dofs_per_quad; ++d)
1892  {
1893  const types::global_dof_index old_dof_index = quad->dof_index(d,fe_index);
1894  if (old_dof_index != numbers::invalid_dof_index)
1895  quad->set_dof_index (d,
1896  (indices.size() == 0)?
1897  (new_numbers[old_dof_index]) :
1898  (new_numbers[indices.index_within_set(old_dof_index)]),
1899  fe_index);
1900  }
1901  }
1902  }
1903 
1904  // at the end, restore the user flags for the quads
1905  const_cast<::Triangulation<dim,spacedim>&>(dof_handler.get_triangulation())
1906  .load_user_flags_quad (saved_quad_user_flags);
1907  }
1908  }
1909 
1910 
1911 
1912 
1924  template <class DoFHandlerType>
1925  static
1926  void
1927  renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
1928  const IndexSet &indices,
1929  DoFHandlerType &dof_handler,
1930  const bool check_validity)
1931  {
1932  if (DoFHandlerType::dimension == 1)
1933  Assert (indices == IndexSet(0), ExcNotImplemented());
1934 
1935  // renumber DoF indices on vertices, cells, and faces. this
1936  // can be done in parallel because the respective functions
1937  // work on separate data structures
1938  Threads::TaskGroup<> tasks;
1939  tasks += Threads::new_task ([&]()
1940  {
1941  renumber_vertex_dofs (new_numbers, indices, dof_handler, check_validity);
1942  });
1943  tasks += Threads::new_task ([&]()
1944  {
1945  renumber_face_dofs (new_numbers, indices, dof_handler);
1946  });
1947  tasks += Threads::new_task ([&]()
1948  {
1949  renumber_cell_dofs (new_numbers, indices, dof_handler);
1950  });
1951  tasks.join_all ();
1952 
1953  // update the cache used for cell dof indices
1954  update_all_active_cell_dof_indices_caches (dof_handler);
1955  }
1956 
1957 
1958 
1959  /* --------------------- renumber_mg_dofs functionality ---------------- */
1960 
1968  template <int dim, int spacedim>
1969  static
1970  void
1971  renumber_vertex_mg_dofs (const std::vector<::types::global_dof_index> &new_numbers,
1972  const IndexSet &indices,
1973  DoFHandler<dim,spacedim> &dof_handler,
1974  const unsigned int level,
1975  const bool check_validity)
1976  {
1977  Assert (level<dof_handler.get_triangulation().n_levels(),
1978  ExcInternalError());
1979 
1980  for (typename std::vector<typename DoFHandler<dim,spacedim>::MGVertexDoFs>::iterator
1981  i=dof_handler.mg_vertex_dofs.begin();
1982  i!=dof_handler.mg_vertex_dofs.end();
1983  ++i)
1984  // if the present vertex lives on the current level
1985  if ((i->get_coarsest_level() <= level) &&
1986  (i->get_finest_level() >= level))
1987  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
1988  {
1989  const ::types::global_dof_index idx
1990  = i->get_index (level, d, dof_handler.get_fe().dofs_per_vertex);
1991 
1992  if (check_validity)
1994 
1995  if (idx != numbers::invalid_dof_index)
1996  i->set_index (level, d,
1997  dof_handler.get_fe().dofs_per_vertex,
1998  (indices.size() == 0)?
1999  (new_numbers[idx]) :
2000  (new_numbers[indices.index_within_set(idx)]));
2001  }
2002  }
2003 
2004 
2005 
2013  template <int dim, int spacedim>
2014  static
2015  void
2016  renumber_cell_mg_dofs (const std::vector<::types::global_dof_index> &new_numbers,
2017  const IndexSet &indices,
2018  DoFHandler<dim,spacedim> &dof_handler,
2019  const unsigned int level)
2020  {
2021  for (std::vector<types::global_dof_index>::iterator
2022  i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
2023  i!=dof_handler.mg_levels[level]->dof_object.dofs.end();
2024  ++i)
2025  {
2026  if (*i != numbers::invalid_dof_index)
2027  {
2028  Assert((indices.size() > 0 ?
2029  indices.is_element(*i) :
2030  (*i<new_numbers.size())), ExcInternalError());
2031  *i = (indices.size() == 0)?
2032  (new_numbers[*i]) :
2033  (new_numbers[indices.index_within_set(*i)]);
2034  }
2035  }
2036  }
2037 
2038 
2039 
2047  template <int spacedim>
2048  static
2049  void
2050  renumber_face_mg_dofs (const std::vector<types::global_dof_index> &/*new_numbers*/,
2051  const IndexSet &/*indices*/,
2052  DoFHandler<1,spacedim> &/*dof_handler*/,
2053  const unsigned int /*level*/,
2054  const bool /*check_validity*/)
2055  {
2056  // nothing to do in 1d because there are no separate faces
2057  }
2058 
2059 
2060 
2061  template <int spacedim>
2062  static
2063  void
2064  renumber_face_mg_dofs (const std::vector<::types::global_dof_index> &new_numbers,
2065  const IndexSet &indices,
2066  DoFHandler<2,spacedim> &dof_handler,
2067  const unsigned int level,
2068  const bool check_validity)
2069  {
2070  if (dof_handler.get_fe().dofs_per_line > 0)
2071  {
2072  // save user flags as they will be modified
2073  std::vector<bool> user_flags;
2074  dof_handler.get_triangulation().save_user_flags(user_flags);
2075  const_cast<::Triangulation<2,spacedim> &>(dof_handler.get_triangulation()).clear_user_flags ();
2076 
2077  // flag all lines adjacent to cells of the current
2078  // level, as those lines logically belong to the same
2079  // level as the cell, at least for for isotropic
2080  // refinement
2081  typename DoFHandler<2,spacedim>::level_cell_iterator cell,
2082  endc = dof_handler.end(level);
2083  for (cell = dof_handler.begin(level); cell != endc; ++cell)
2084  for (unsigned int line=0; line < GeometryInfo<2>::faces_per_cell; ++line)
2085  cell->face(line)->set_user_flag();
2086 
2087  for (typename DoFHandler<2,spacedim>::cell_iterator cell = dof_handler.begin();
2088  cell != dof_handler.end(); ++cell)
2089  for (unsigned int l=0; l<GeometryInfo<2>::lines_per_cell; ++l)
2090  if (cell->line(l)->user_flag_set())
2091  {
2092  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
2093  {
2094  const ::types::global_dof_index idx = cell->line(l)->mg_dof_index(level, d);
2095  if (check_validity)
2097 
2098  if (idx != numbers::invalid_dof_index)
2099  cell->line(l)->set_mg_dof_index (level, d, ((indices.size() == 0) ?
2100  new_numbers[idx] :
2101  new_numbers[indices.index_within_set(idx)]));
2102  }
2103  cell->line(l)->clear_user_flag();
2104  }
2105  // finally, restore user flags
2106  const_cast<::Triangulation<2,spacedim> &>(dof_handler.get_triangulation()).load_user_flags (user_flags);
2107  }
2108  }
2109 
2110 
2111 
2112  template <int spacedim>
2113  static
2114  void
2115  renumber_face_mg_dofs (const std::vector<::types::global_dof_index> &new_numbers,
2116  const IndexSet &indices,
2117  DoFHandler<3,spacedim> &dof_handler,
2118  const unsigned int level,
2119  const bool check_validity)
2120  {
2121  if (dof_handler.get_fe().dofs_per_line > 0 ||
2122  dof_handler.get_fe().dofs_per_quad > 0)
2123  {
2124  // save user flags as they will be modified
2125  std::vector<bool> user_flags;
2126  dof_handler.get_triangulation().save_user_flags(user_flags);
2127  const_cast<::Triangulation<3,spacedim> &>(dof_handler.get_triangulation()).clear_user_flags ();
2128 
2129  // flag all lines adjacent to cells of the current
2130  // level, as those lines logically belong to the same
2131  // level as the cell, at least for isotropic refinement
2132  typename DoFHandler<3,spacedim>::level_cell_iterator cell,
2133  endc = dof_handler.end(level);
2134  for (cell = dof_handler.begin(level); cell != endc; ++cell)
2135  for (unsigned int line=0; line < GeometryInfo<3>::lines_per_cell; ++line)
2136  cell->line(line)->set_user_flag();
2137 
2138  for (typename DoFHandler<3,spacedim>::cell_iterator cell = dof_handler.begin();
2139  cell != dof_handler.end(); ++cell)
2140  for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
2141  if (cell->line(l)->user_flag_set())
2142  {
2143  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
2144  {
2145  const ::types::global_dof_index idx = cell->line(l)->mg_dof_index(level, d);
2146  if (check_validity)
2148 
2149  if (idx != numbers::invalid_dof_index)
2150  cell->line(l)->set_mg_dof_index (level, d, ((indices.size() == 0) ?
2151  new_numbers[idx] :
2152  new_numbers[indices.index_within_set(idx)]));
2153  }
2154  cell->line(l)->clear_user_flag();
2155  }
2156 
2157  // flag all quads adjacent to cells of the current level, as
2158  // those quads logically belong to the same level as the cell,
2159  // at least for isotropic refinement
2160  for (cell = dof_handler.begin(level); cell != endc; ++cell)
2161  for (unsigned int quad=0; quad < GeometryInfo<3>::quads_per_cell; ++quad)
2162  cell->quad(quad)->set_user_flag();
2163 
2164  for (typename DoFHandler<3,spacedim>::cell_iterator cell = dof_handler.begin();
2165  cell != dof_handler.end(); ++cell)
2166  for (unsigned int l=0; l<GeometryInfo<3>::quads_per_cell; ++l)
2167  if (cell->quad(l)->user_flag_set())
2168  {
2169  for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
2170  {
2171  const ::types::global_dof_index idx = cell->quad(l)->mg_dof_index(level, d);
2172  if (check_validity)
2174 
2175  if (idx != numbers::invalid_dof_index)
2176  cell->quad(l)->set_mg_dof_index (level, d, ((indices.size() == 0) ?
2177  new_numbers[idx] :
2178  new_numbers[indices.index_within_set(idx)]));
2179  }
2180  cell->quad(l)->clear_user_flag();
2181  }
2182 
2183  // finally, restore user flags
2184  const_cast<::Triangulation<3,spacedim> &>(dof_handler.get_triangulation()).load_user_flags (user_flags);
2185  }
2186  }
2187 
2188 
2189 
2190  template <int dim, int spacedim>
2191  static
2192  void
2193  renumber_mg_dofs (const std::vector<::types::global_dof_index> &new_numbers,
2194  const IndexSet &indices,
2195  DoFHandler<dim,spacedim> &dof_handler,
2196  const unsigned int level,
2197  const bool check_validity)
2198  {
2199  Assert (level<dof_handler.get_triangulation().n_global_levels(),
2200  ExcInternalError());
2201 
2202  // renumber DoF indices on vertices, cells, and faces. this
2203  // can be done in parallel because the respective functions
2204  // work on separate data structures
2205  Threads::TaskGroup<> tasks;
2206  tasks += Threads::new_task ([&]()
2207  {
2208  renumber_vertex_mg_dofs (new_numbers, indices, dof_handler, level, check_validity);
2209  });
2210  tasks += Threads::new_task ([&]()
2211  {
2212  renumber_face_mg_dofs (new_numbers, indices, dof_handler, level, check_validity);
2213  });
2214  tasks += Threads::new_task ([&]()
2215  {
2216  renumber_cell_mg_dofs (new_numbers, indices, dof_handler, level);
2217  });
2218  tasks.join_all ();
2219  }
2220 
2221 
2222 
2223  template <int dim, int spacedim>
2224  static
2225  void
2226  renumber_mg_dofs (const std::vector<::types::global_dof_index> &/*new_numbers*/,
2227  const IndexSet &/*indices*/,
2228  hp::DoFHandler<dim,spacedim> &/*dof_handler*/,
2229  const unsigned int /*level*/,
2230  const bool /*check_validity*/)
2231  {
2232  Assert (false, ExcNotImplemented());
2233  }
2234 
2235  };
2236 
2237 
2238 
2239  /* --------------------- class Sequential ---------------- */
2240 
2241 
2242 
2243  template <class DoFHandlerType>
2245  Sequential (DoFHandlerType &dof_handler)
2246  :
2247  dof_handler (&dof_handler)
2248  {}
2249 
2250 
2251 
2252  template <class DoFHandlerType>
2253  NumberCache
2256  {
2257  const types::global_dof_index n_dofs =
2258  Implementation::distribute_dofs (numbers::invalid_subdomain_id,
2259  *dof_handler);
2260 
2261  // return a sequential, complete index set
2262  return NumberCache (n_dofs);
2263  }
2264 
2265 
2266 
2267  template <class DoFHandlerType>
2268  std::vector<NumberCache>
2271  {
2272  std::vector<bool> user_flags;
2273  dof_handler->get_triangulation().save_user_flags (user_flags);
2274 
2276  (dof_handler->get_triangulation()).clear_user_flags ();
2277 
2278  std::vector<NumberCache> number_caches;
2279  number_caches.reserve (dof_handler->get_triangulation().n_levels());
2280  for (unsigned int level = 0; level < dof_handler->get_triangulation().n_levels(); ++level)
2281  {
2282  // first distribute dofs on this level
2283  const types::global_dof_index n_level_dofs
2284  = Implementation::distribute_dofs_on_level(numbers::invalid_subdomain_id,
2285  *dof_handler, level);
2286 
2287  // then add a complete, sequential index set
2288  number_caches.emplace_back (NumberCache(n_level_dofs));
2289  }
2290 
2292  (dof_handler->get_triangulation()).load_user_flags (user_flags);
2293 
2294  return number_caches;
2295  }
2296 
2297 
2298 
2299  template <class DoFHandlerType>
2300  NumberCache
2302  renumber_dofs (const std::vector<types::global_dof_index> &new_numbers) const
2303  {
2304  Implementation::renumber_dofs (new_numbers, IndexSet(0),
2305  *dof_handler, true);
2306 
2307  // return a sequential, complete index set. take into account that the
2308  // number of DoF indices may in fact be smaller than there were before
2309  // if some previously separately numbered dofs have been identified.
2310  // this is, for example, what the hp::DoFHandler does: it first
2311  // enumerates all DoFs on cells independently, and then unifies
2312  // some located at vertices or faces; this leaves us with fewer
2313  // DoFs than there were before, so use the largest index as
2314  // the one to determine the size of the index space
2315  return NumberCache (*std::max_element(new_numbers.begin(),
2316  new_numbers.end()) + 1);
2317  }
2318 
2319 
2320 
2321  template <class DoFHandlerType>
2322  NumberCache
2324  renumber_mg_dofs (const unsigned int level,
2325  const std::vector<types::global_dof_index> &new_numbers) const
2326  {
2327  Implementation::renumber_mg_dofs (new_numbers, IndexSet(0),
2328  *dof_handler, level, true);
2329 
2330  // return a sequential, complete index set
2331  return NumberCache (new_numbers.size());
2332  }
2333 
2334 
2335  /* --------------------- class ParallelShared ---------------- */
2336 
2337 
2338  template <class DoFHandlerType>
2340  ParallelShared (DoFHandlerType &dof_handler)
2341  :
2342  dof_handler (&dof_handler)
2343  {}
2344 
2345 
2346 
2347  namespace
2348  {
2357  template <class DoFHandlerType>
2358  std::vector<types::subdomain_id>
2359  get_dof_subdomain_association (const DoFHandlerType &dof_handler,
2360  const types::global_dof_index n_dofs,
2361  const unsigned int n_procs)
2362  {
2363  (void)n_procs;
2364  std::vector<types::subdomain_id> subdomain_association (n_dofs,
2366  std::vector<types::global_dof_index> local_dof_indices;
2367  local_dof_indices.reserve (DoFTools::max_dofs_per_cell(dof_handler));
2368 
2369  // loop over all cells and record which subdomain a DoF belongs to.
2370  // give to the smaller subdomain_id in case it is on an interface
2371  typename DoFHandlerType::active_cell_iterator
2372  cell = dof_handler.begin_active(),
2373  endc = dof_handler.end();
2374  for (; cell!=endc; ++cell)
2375  {
2376  // get the owner of the cell; note that we have made sure above that
2377  // all cells are either locally owned or ghosts (not artificial), so
2378  // this call will always yield the true owner
2379  const types::subdomain_id subdomain_id = cell->subdomain_id();
2380  const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
2381  local_dof_indices.resize (dofs_per_cell);
2382  cell->get_dof_indices (local_dof_indices);
2383 
2384  // set subdomain ids. if dofs already have their values set then
2385  // they must be on partition interfaces. In that case assign them to the
2386  // processor with the smaller subdomain id.
2387  for (unsigned int i=0; i<dofs_per_cell; ++i)
2388  if (subdomain_association[local_dof_indices[i]] ==
2390  subdomain_association[local_dof_indices[i]] = subdomain_id;
2391  else if (subdomain_association[local_dof_indices[i]] > subdomain_id)
2392  {
2393  subdomain_association[local_dof_indices[i]] = subdomain_id;
2394  }
2395  }
2396 
2397  Assert (std::find (subdomain_association.begin(),
2398  subdomain_association.end(),
2400  == subdomain_association.end(),
2401  ExcInternalError());
2402 
2403  Assert (*std::max_element (subdomain_association.begin(),
2404  subdomain_association.end())
2405  < n_procs,
2406  ExcInternalError());
2407 
2408  return subdomain_association;
2409  }
2410 
2411 
2418  template <class DoFHandlerType>
2419  std::vector<types::subdomain_id>
2420  get_dof_level_subdomain_association (const DoFHandlerType &dof_handler,
2421  const types::global_dof_index n_dofs_on_level,
2422  const unsigned int n_procs,
2423  const unsigned int level)
2424  {
2425  (void)n_procs;
2426  std::vector<types::subdomain_id> level_subdomain_association (n_dofs_on_level,
2428  std::vector<types::global_dof_index> local_dof_indices;
2429  local_dof_indices.reserve (DoFTools::max_dofs_per_cell(dof_handler));
2430 
2431  // loop over all cells and record which subdomain a DoF belongs to.
2432  // interface goes to proccessor with smaller subdomain id
2433  typename DoFHandlerType::cell_iterator
2434  cell = dof_handler.begin(level),
2435  endc = dof_handler.end(level);
2436  for (; cell!=endc; ++cell)
2437  {
2438  // get the owner of the cell; note that we have made sure above that
2439  // all cells are either locally owned or ghosts (not artificial), so
2440  // this call will always yield the true owner
2441  const types::subdomain_id level_subdomain_id = cell->level_subdomain_id();
2442  const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
2443  local_dof_indices.resize (dofs_per_cell);
2444  cell->get_mg_dof_indices (local_dof_indices);
2445 
2446  // set level subdomain ids. if dofs already have their values set then
2447  // they must be on partition interfaces. In that case assign them to the
2448  // processor with the smaller subdomain id.
2449  for (unsigned int i=0; i<dofs_per_cell; ++i)
2450  if (level_subdomain_association[local_dof_indices[i]] ==
2452  level_subdomain_association[local_dof_indices[i]] = level_subdomain_id;
2453  else if (level_subdomain_association[local_dof_indices[i]] > level_subdomain_id)
2454  {
2455  level_subdomain_association[local_dof_indices[i]] = level_subdomain_id;
2456  }
2457  }
2458 
2459  Assert (std::find (level_subdomain_association.begin(),
2460  level_subdomain_association.end(),
2462  == level_subdomain_association.end(),
2463  ExcInternalError());
2464 
2465  Assert (*std::max_element (level_subdomain_association.begin(),
2466  level_subdomain_association.end())
2467  < n_procs,
2468  ExcInternalError());
2469 
2470  return level_subdomain_association;
2471  }
2472  }
2473 
2474 
2475 
2476  template <class DoFHandlerType>
2477  NumberCache
2480  {
2481  const unsigned int dim = DoFHandlerType::dimension;
2482  const unsigned int spacedim = DoFHandlerType::space_dimension;
2483 
2485  (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&this->dof_handler->get_triangulation()));
2486  Assert(tr != nullptr, ExcInternalError());
2487 
2488  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(tr->get_communicator());
2489 
2490  // If the underlying shared::Tria allows artificial cells,
2491  // then save the current set of subdomain ids, and set
2492  // subdomain ids to the "true" owner of each cell. we later
2493  // restore these flags
2494  std::vector<types::subdomain_id> saved_subdomain_ids;
2495  if (tr->with_artificial_cells())
2496  {
2497  saved_subdomain_ids.resize (tr->n_active_cells());
2498 
2499  typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
2500  cell = this->dof_handler->get_triangulation().begin_active(),
2501  endc = this->dof_handler->get_triangulation().end();
2502 
2503  const std::vector<types::subdomain_id> &true_subdomain_ids
2505 
2506  for (unsigned int index=0; cell != endc; ++cell, ++index)
2507  {
2508  saved_subdomain_ids[index] = cell->subdomain_id();
2509  cell->set_subdomain_id(true_subdomain_ids[index]);
2510  }
2511  }
2512 
2513  // first let the sequential algorithm do its magic. it is going to
2514  // enumerate DoFs on all cells, regardless of owner
2515  const types::global_dof_index n_dofs =
2516  Implementation::distribute_dofs (numbers::invalid_subdomain_id,
2517  *this->dof_handler);
2518 
2519  // then re-enumerate them based on their subdomain association.
2520  // for this, we first have to identify for each current DoF
2521  // index which subdomain they belong to. ideally, we would
2522  // like to call DoFRenumbering::subdomain_wise(), but
2523  // because the NumberCache of the current DoFHandler is not
2524  // fully set up yet, we can't quite do that. also, that
2525  // function has to deal with other kinds of triangulations as
2526  // well, whereas we here know what kind of triangulation
2527  // we have and can simplify the code accordingly
2528  std::vector<types::global_dof_index> new_dof_indices (n_dofs,
2530  {
2531  // first get the association of each dof with a subdomain and
2532  // determine the total number of subdomain ids used
2533  const std::vector<types::subdomain_id> subdomain_association
2534  = get_dof_subdomain_association (*this->dof_handler, n_dofs, n_procs);
2535 
2536  // then renumber the subdomains by first looking at those belonging
2537  // to subdomain 0, then those of subdomain 1, etc. note that the
2538  // algorithm is stable, i.e. if two dofs i,j have i<j and belong to
2539  // the same subdomain, then they will be in this order also after
2540  // reordering
2541  types::global_dof_index next_free_index = 0;
2542  for (types::subdomain_id subdomain=0; subdomain<n_procs; ++subdomain)
2543  for (types::global_dof_index i=0; i<n_dofs; ++i)
2544  if (subdomain_association[i] == subdomain)
2545  {
2546  Assert (new_dof_indices[i] == numbers::invalid_dof_index,
2547  ExcInternalError());
2548  new_dof_indices[i] = next_free_index;
2549  ++next_free_index;
2550  }
2551 
2552  // we should have numbered all dofs
2553  Assert (next_free_index == n_dofs, ExcInternalError());
2554  Assert (std::find (new_dof_indices.begin(), new_dof_indices.end(),
2556  == new_dof_indices.end(),
2557  ExcInternalError());
2558  }
2559  // finally do the renumbering. we can use the sequential
2560  // version of the function because we do things on all
2561  // cells and all cells have their subdomain ids and DoFs
2562  // correctly set
2563  Implementation::renumber_dofs (new_dof_indices, IndexSet(0),
2564  *this->dof_handler, true);
2565 
2566  // update the number cache. for this, we first have to find the subdomain
2567  // association for each DoF again following renumbering, from which we
2568  // can then compute the IndexSets of locally owned DoFs for all processors.
2569  // all other fields then follow from this
2570  //
2571  // given the way we enumerate degrees of freedom, the locally owned
2572  // ranges must all be contiguous and consecutive. this makes filling
2573  // the IndexSets cheap. an assertion at the top verifies that this
2574  // assumption is true
2575  const std::vector<types::subdomain_id> subdomain_association
2576  = get_dof_subdomain_association (*this->dof_handler, n_dofs, n_procs);
2577 
2578  for (unsigned int i=1; i<n_dofs; ++i)
2579  Assert (subdomain_association[i] >= subdomain_association[i-1],
2580  ExcInternalError());
2581 
2582  std::vector<IndexSet> locally_owned_dofs_per_processor (n_procs,
2583  IndexSet(n_dofs));
2584  {
2585  // we know that the set of subdomain indices is contiguous from
2586  // the assertion above; find the start and end index for each
2587  // processor, taking into account that sometimes a processor
2588  // may not in fact have any DoFs at all. we do the latter
2589  // by just identifying contiguous ranges of subdomain_ids
2590  // and filling IndexSets for those subdomains; subdomains
2591  // that don't appear will lead to IndexSets that are simply
2592  // never touched and remain empty as initialized above.
2593  unsigned int start_index = 0;
2594  unsigned int end_index = 0;
2595  while (start_index < n_dofs)
2596  {
2597  while ((end_index) < n_dofs &&
2598  (subdomain_association[end_index] == subdomain_association[start_index]))
2599  ++end_index;
2600 
2601  // we've now identified a range of same indices. set that
2602  // range in the corresponding IndexSet
2603  if (end_index > start_index)
2604  {
2605  const unsigned int subdomain_owner = subdomain_association[start_index];
2606  locally_owned_dofs_per_processor[subdomain_owner]
2607  .add_range (start_index, end_index);
2608  }
2609 
2610  // then move on to thinking about the next range
2611  start_index = end_index;
2612  }
2613  }
2614 
2615  // finally, restore current subdomain ids
2616  if (tr->with_artificial_cells())
2617  {
2618  typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
2619  cell = this->dof_handler->get_triangulation().begin_active(),
2620  endc = this->dof_handler->get_triangulation().end();
2621 
2622  for (unsigned int index=0; cell != endc; ++cell, ++index)
2623  cell->set_subdomain_id(saved_subdomain_ids[index]);
2624  }
2625 
2626  // return a NumberCache object made up from the sets of locally
2627  // owned DoFs
2628  return NumberCache(locally_owned_dofs_per_processor,
2629  this->dof_handler->get_triangulation().locally_owned_subdomain ());
2630  }
2631 
2632 
2633 
2634  template <class DoFHandlerType>
2635  std::vector<NumberCache>
2638  {
2639  const unsigned int dim = DoFHandlerType::dimension;
2640  const unsigned int spacedim = DoFHandlerType::space_dimension;
2641 
2643  (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&this->dof_handler->get_triangulation()));
2644  Assert(tr != nullptr, ExcInternalError());
2645 
2646  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(tr->get_communicator());
2647  const unsigned int n_levels = tr->n_global_levels();
2648 
2649  std::vector<NumberCache> number_caches;
2650  number_caches.reserve(n_levels);
2651 
2652  // We create an index set for each level
2653  for (unsigned int lvl=0; lvl<n_levels; ++lvl)
2654  {
2655  // If the underlying shared::Tria allows artificial cells,
2656  // then save the current set of level subdomain ids, and set
2657  // subdomain ids to the "true" owner of each cell. we later
2658  // restore these flags
2659  // Note: "allows_artificial_cells" is currently enforced for
2660  // MG computations.
2661  std::vector<types::subdomain_id> saved_level_subdomain_ids;
2662  saved_level_subdomain_ids.resize(tr->n_cells(lvl));
2663  {
2664  typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
2665  cell = this->dof_handler->get_triangulation().begin(lvl),
2666  endc = this->dof_handler->get_triangulation().end(lvl);
2667 
2668  const std::vector<types::subdomain_id> &true_level_subdomain_ids
2670 
2671  for (unsigned int index=0; cell != endc; ++cell, ++index)
2672  {
2673  saved_level_subdomain_ids[index] = cell->level_subdomain_id();
2674  cell->set_level_subdomain_id(true_level_subdomain_ids[index]);
2675  }
2676  }
2677 
2678  // Next let the sequential algorithm do its magic. it is going to
2679  // enumerate DoFs on all cells on the given level, regardless of owner
2680  const types::global_dof_index n_dofs_on_level =
2681  Implementation::distribute_dofs_on_level(numbers::invalid_subdomain_id,
2682  *this->dof_handler,
2683  lvl);
2684 
2685  // then re-enumerate them based on their level subdomain association.
2686  // for this, we first have to identify for each current DoF
2687  // index which subdomain they belong to. ideally, we would
2688  // like to call DoFRenumbering::subdomain_wise(), but
2689  // because the NumberCache of the current DoFHandler is not
2690  // fully set up yet, we can't quite do that. also, that
2691  // function has to deal with other kinds of triangulations as
2692  // well, whereas we here know what kind of triangulation
2693  // we have and can simplify the code accordingly
2694  std::vector<types::global_dof_index> new_dof_indices (n_dofs_on_level,
2696  {
2697  // first get the association of each dof with a subdomain and
2698  // determine the total number of subdomain ids used
2699  const std::vector<types::subdomain_id> level_subdomain_association
2700  = get_dof_level_subdomain_association (*this->dof_handler,
2701  n_dofs_on_level, n_procs, lvl);
2702 
2703  // then renumber the subdomains by first looking at those belonging
2704  // to subdomain 0, then those of subdomain 1, etc. note that the
2705  // algorithm is stable, i.e. if two dofs i,j have i<j and belong to
2706  // the same subdomain, then they will be in this order also after
2707  // reordering
2708  types::global_dof_index next_free_index = 0;
2709  for (types::subdomain_id level_subdomain=0; level_subdomain<n_procs; ++level_subdomain)
2710  for (types::global_dof_index i=0; i<n_dofs_on_level; ++i)
2711  if (level_subdomain_association[i] == level_subdomain)
2712  {
2713  Assert (new_dof_indices[i] == numbers::invalid_dof_index,
2714  ExcInternalError());
2715  new_dof_indices[i] = next_free_index;
2716  ++next_free_index;
2717  }
2718 
2719  // we should have numbered all dofs
2720  Assert (next_free_index == n_dofs_on_level, ExcInternalError());
2721  Assert (std::find (new_dof_indices.begin(), new_dof_indices.end(),
2723  == new_dof_indices.end(),
2724  ExcInternalError());
2725  }
2726 
2727  // finally do the renumbering. we can use the sequential
2728  // version of the function because we do things on all
2729  // cells and all cells have their subdomain ids and DoFs
2730  // correctly set
2731  Implementation::renumber_mg_dofs (new_dof_indices, IndexSet(0),
2732  *this->dof_handler, lvl, true);
2733 
2734  // update the number cache. for this, we first have to find the level subdomain
2735  // association for each DoF again following renumbering, from which we
2736  // can then compute the IndexSets of locally owned DoFs for all processors.
2737  // all other fields then follow from this
2738  //
2739  // given the way we enumerate degrees of freedom, the locally owned
2740  // ranges must all be contiguous and consecutive. this makes filling
2741  // the IndexSets cheap. an assertion at the top verifies that this
2742  // assumption is true
2743  const std::vector<types::subdomain_id> level_subdomain_association
2744  = get_dof_level_subdomain_association (*this->dof_handler,
2745  n_dofs_on_level, n_procs, lvl);
2746 
2747  for (unsigned int i=1; i<n_dofs_on_level; ++i)
2748  Assert (level_subdomain_association[i] >= level_subdomain_association[i-1],
2749  ExcInternalError());
2750 
2751  std::vector<IndexSet> locally_owned_dofs_per_processor (n_procs,
2752  IndexSet(n_dofs_on_level));
2753  {
2754  // we know that the set of subdomain indices is contiguous from
2755  // the assertion above; find the start and end index for each
2756  // processor, taking into account that sometimes a processor
2757  // may not in fact have any DoFs at all. we do the latter
2758  // by just identifying contiguous ranges of level_subdomain_ids
2759  // and filling IndexSets for those subdomains; subdomains
2760  // that don't appear will lead to IndexSets that are simply
2761  // never touched and remain empty as initialized above.
2762  unsigned int start_index = 0;
2763  unsigned int end_index = 0;
2764  while (start_index < n_dofs_on_level)
2765  {
2766  while ((end_index) < n_dofs_on_level &&
2767  (level_subdomain_association[end_index] == level_subdomain_association[start_index]))
2768  ++end_index;
2769 
2770  // we've now identified a range of same indices. set that
2771  // range in the corresponding IndexSet
2772  if (end_index > start_index)
2773  {
2774  const unsigned int level_subdomain_owner = level_subdomain_association[start_index];
2775  locally_owned_dofs_per_processor[level_subdomain_owner]
2776  .add_range (start_index, end_index);
2777  }
2778 
2779  // then move on to thinking about the next range
2780  start_index = end_index;
2781  }
2782  }
2783 
2784  // finally, restore current level subdomain ids
2785  {
2786  typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
2787  cell = this->dof_handler->get_triangulation().begin(lvl),
2788  endc = this->dof_handler->get_triangulation().end(lvl);
2789 
2790  for (unsigned int index=0; cell != endc; ++cell, ++index)
2791  cell->set_level_subdomain_id(saved_level_subdomain_ids[index]);
2792 
2793  // add NumberCache for current level
2794  number_caches.emplace_back (NumberCache(locally_owned_dofs_per_processor,
2795  this->dof_handler->get_triangulation()
2796  .locally_owned_subdomain ()));
2797  }
2798  }
2799 
2800  return number_caches;
2801  }
2802 
2803 
2804 
2805 
2806 
2807  template <class DoFHandlerType>
2808  NumberCache
2810  renumber_dofs (const std::vector<types::global_dof_index> &new_numbers) const
2811  {
2812 
2813 #ifndef DEAL_II_WITH_MPI
2814  (void)new_numbers;
2815  Assert (false, ExcNotImplemented());
2816  return NumberCache();
2817 #else
2818  const unsigned int dim = DoFHandlerType::dimension;
2819  const unsigned int spacedim = DoFHandlerType::space_dimension;
2820 
2821  // Similar to distribute_dofs() we need to have a special treatment in
2822  // case artificial cells are present.
2824  (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&this->dof_handler->get_triangulation()));
2825  Assert(tr != nullptr, ExcInternalError());
2826 
2827  typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
2828  cell = this->dof_handler->get_triangulation().begin_active(),
2829  endc = this->dof_handler->get_triangulation().end();
2830  std::vector<types::subdomain_id> current_subdomain_ids(tr->n_active_cells());
2831  const std::vector<types::subdomain_id> &true_subdomain_ids = tr->get_true_subdomain_ids_of_cells();
2832  if (tr->with_artificial_cells())
2833  for (unsigned int index=0; cell != endc; cell++, index++)
2834  {
2835  current_subdomain_ids[index] = cell->subdomain_id();
2836  cell->set_subdomain_id(true_subdomain_ids[index]);
2837  }
2838 
2839  std::vector<types::global_dof_index> global_gathered_numbers (this->dof_handler->n_dofs (), 0);
2840  // as we call DoFRenumbering::subdomain_wise (*dof_handler) from distribute_dofs(),
2841  // we need to support sequential-like input.
2842  // Distributed-like input from, for example, component_wise renumbering is also supported.
2843  if (new_numbers.size () == this->dof_handler->n_dofs ())
2844  {
2845  global_gathered_numbers = new_numbers;
2846  }
2847  else
2848  {
2849  Assert(new_numbers.size() == this->dof_handler->locally_owned_dofs().n_elements(),
2850  ExcInternalError());
2851  const unsigned int n_cpu = Utilities::MPI::n_mpi_processes (tr->get_communicator ());
2852  std::vector<types::global_dof_index> gathered_new_numbers (this->dof_handler->n_dofs (), 0);
2854  this->dof_handler->get_triangulation().locally_owned_subdomain (),
2855  ExcInternalError())
2856 
2857  // gather new numbers among processors into one vector
2858  {
2859  std::vector<types::global_dof_index> new_numbers_copy (new_numbers);
2860 
2861  // store the number of elements that are to be received from each process
2862  std::vector<int> rcounts(n_cpu);
2863 
2864  types::global_dof_index shift = 0;
2865  // set rcounts based on new_numbers:
2866  int cur_count = new_numbers_copy.size ();
2867  int ierr = MPI_Allgather (&cur_count, 1, MPI_INT,
2868  rcounts.data(), 1, MPI_INT,
2869  tr->get_communicator ());
2870  AssertThrowMPI(ierr);
2871 
2872  // compute the displacements (relative to recvbuf)
2873  // at which to place the incoming data from process i
2874  std::vector<int> displacements(n_cpu);
2875  for (unsigned int i = 0; i < n_cpu; i++)
2876  {
2877  displacements[i] = shift;
2878  shift += rcounts[i];
2879  }
2880  Assert(((int)new_numbers_copy.size()) ==
2882  ExcInternalError());
2883  ierr = MPI_Allgatherv (new_numbers_copy.data(), new_numbers_copy.size (),
2884  DEAL_II_DOF_INDEX_MPI_TYPE,
2885  gathered_new_numbers.data(), rcounts.data(),
2886  displacements.data(),
2887  DEAL_II_DOF_INDEX_MPI_TYPE,
2888  tr->get_communicator ());
2889  AssertThrowMPI(ierr);
2890  }
2891 
2892  // put new numbers according to the current locally_owned_dofs_per_processor IndexSets
2893  types::global_dof_index shift = 0;
2894  // flag_1 and flag_2 are
2895  // used to control that there is a
2896  // one-to-one relation between old and new DoFs.
2897  std::vector<unsigned int> flag_1 (this->dof_handler->n_dofs (), 0);
2898  std::vector<unsigned int> flag_2 (this->dof_handler->n_dofs (), 0);
2899  for (unsigned int i = 0; i < n_cpu; i++)
2900  {
2901  const IndexSet iset =
2902  this->dof_handler->locally_owned_dofs_per_processor()[i];
2903  for (types::global_dof_index ind = 0;
2904  ind < iset.n_elements (); ind++)
2905  {
2906  const types::global_dof_index target = iset.nth_index_in_set (ind);
2907  const types::global_dof_index value = gathered_new_numbers[shift + ind];
2908  Assert(target < this->dof_handler->n_dofs(), ExcInternalError());
2909  Assert(value < this->dof_handler->n_dofs(), ExcInternalError());
2910  global_gathered_numbers[target] = value;
2911  flag_1[target]++;
2912  flag_2[value]++;
2913  }
2914  shift += iset.n_elements ();
2915  }
2916 
2917  Assert(*std::max_element(flag_1.begin(), flag_1.end()) == 1,
2918  ExcInternalError());
2919  Assert(*std::min_element(flag_1.begin(), flag_1.end()) == 1,
2920  ExcInternalError());
2921  Assert((*std::max_element(flag_2.begin(), flag_2.end())) == 1,
2922  ExcInternalError());
2923  Assert((*std::min_element(flag_2.begin(), flag_2.end())) == 1,
2924  ExcInternalError());
2925  }
2926 
2927  // let the sequential algorithm do its magic; ignore the
2928  // return type, but reconstruct the number cache based on
2929  // which DoFs each process owns
2930  Implementation::renumber_dofs (global_gathered_numbers, IndexSet(0),
2931  *this->dof_handler, true);
2932 
2933  const NumberCache number_cache (DoFTools::locally_owned_dofs_per_subdomain (*this->dof_handler),
2934  this->dof_handler->get_triangulation().locally_owned_subdomain ());
2935 
2936  // restore artificial cells
2937  cell = tr->begin_active();
2938  if (tr->with_artificial_cells())
2939  for (unsigned int index=0; cell != endc; cell++, index++)
2940  cell->set_subdomain_id(current_subdomain_ids[index]);
2941 
2942  return number_cache;
2943 #endif
2944  }
2945 
2946 
2947 
2948  template <class DoFHandlerType>
2949  NumberCache
2951  renumber_mg_dofs (const unsigned int /*level*/,
2952  const std::vector<types::global_dof_index> &/*new_numbers*/) const
2953  {
2954  // multigrid is not currently implemented for shared triangulations
2955  Assert(false, ExcNotImplemented());
2956 
2957  return NumberCache ();
2958  }
2959 
2960 
2961 
2962  /* --------------------- class ParallelDistributed ---------------- */
2963 
2964 #ifdef DEAL_II_WITH_P4EST
2965 
2966  namespace
2967  {
2983  template <int dim>
2984  struct CellDataTransferBuffer
2985  {
2986  std::vector<unsigned int> tree_indices;
2987  std::vector<typename ::internal::p4est::types<dim>::quadrant> quadrants;
2988  std::vector<::types::global_dof_index> dof_numbers_and_indices;
2989 
2990 
2995  template <class Archive>
2996  void save (Archive &ar,
2997  const unsigned int /*version*/) const
2998  {
2999  // we would like to directly serialize the 'quadrants' vector,
3000  // but the element type is internal to p4est and does not
3001  // know how to serialize itself. consequently, first copy it over
3002  // to an array of bytes, and then serialize that
3003  std::vector<char> quadrants_as_chars (sizeof(quadrants[0]) * quadrants.size());
3004  if (quadrants_as_chars.size()>0)
3005  {
3006  Assert(quadrants.data() != nullptr, ExcInternalError());
3007  std::memcpy(quadrants_as_chars.data(),
3008  quadrants.data(),
3009  quadrants_as_chars.size());
3010  }
3011 
3012  // now serialize everything
3013  ar &quadrants_as_chars
3014  &tree_indices
3015  &dof_numbers_and_indices;
3016  }
3017 
3022  template <class Archive>
3023  void load (Archive &ar,
3024  const unsigned int /*version*/)
3025  {
3026  // undo the copying trick from the 'save' function
3027  std::vector<char> quadrants_as_chars;
3028  ar &quadrants_as_chars
3029  &tree_indices
3030  &dof_numbers_and_indices;
3031 
3032  if (quadrants_as_chars.size()>0)
3033  {
3034  quadrants.resize (quadrants_as_chars.size() / sizeof(quadrants[0]));
3035  std::memcpy(quadrants.data(),
3036  quadrants_as_chars.data(),
3037  quadrants_as_chars.size());
3038  }
3039  else
3040  quadrants.clear();
3041  }
3042 
3043  BOOST_SERIALIZATION_SPLIT_MEMBER()
3044 
3045 
3046 
3050  std::vector<char>
3051  pack_data () const
3052  {
3053  // set up a buffer and then use it as the target of a compressing
3054  // stream into which we serialize the current object
3055  std::vector<char> buffer;
3056  {
3057 #ifdef DEAL_II_WITH_ZLIB
3058  boost::iostreams::filtering_ostream out;
3059  out.push(boost::iostreams::gzip_compressor
3060  (boost::iostreams::gzip_params
3061  (boost::iostreams::gzip::best_compression)));
3062  out.push(boost::iostreams::back_inserter(buffer));
3063 
3064  boost::archive::binary_oarchive archive(out);
3065 
3066  archive << *this;
3067  out.flush();
3068 #else
3069  std::ostringstream out;
3070  boost::archive::binary_oarchive archive(out);
3071  archive << *this;
3072  const std::string &s = out.str();
3073  buffer.reserve(s.size());
3074  buffer.assign(s.begin(), s.end());
3075 #endif
3076  }
3077 
3078  return buffer;
3079  }
3080 
3081 
3087  void unpack_data (const std::vector<char> &buffer)
3088  {
3089  std::string decompressed_buffer;
3090 
3091  // first decompress the buffer
3092  {
3093 #ifdef DEAL_II_WITH_ZLIB
3094  boost::iostreams::filtering_ostream decompressing_stream;
3095  decompressing_stream.push(boost::iostreams::gzip_decompressor());
3096  decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
3097  decompressing_stream.write (buffer.data(), buffer.size());
3098 #else
3099  decompressed_buffer.assign (buffer.begin(), buffer.end());
3100 #endif
3101  }
3102 
3103  // then restore the object from the buffer
3104  std::istringstream in(decompressed_buffer);
3105  boost::archive::binary_iarchive archive(in);
3106 
3107  archive >> *this;
3108  }
3109  };
3110 
3111 
3112 
3113 
3114  template <int dim, int spacedim>
3115  void
3116  get_mg_dofindices_recursively (
3118  const typename ::internal::p4est::types<dim>::quadrant &p4est_cell,
3119  const typename DoFHandler<dim,spacedim>::level_cell_iterator &dealii_cell,
3120  const typename ::internal::p4est::types<dim>::quadrant &quadrant,
3121  CellDataTransferBuffer<dim> &cell_data_transfer_buffer)
3122  {
3123  if (internal::p4est::quadrant_is_equal<dim>(p4est_cell, quadrant))
3124  {
3125  // why would somebody request a cell that is not ours?
3126  Assert(dealii_cell->level_subdomain_id()==tria.locally_owned_subdomain(), ExcInternalError());
3127 
3128 
3129  std::vector<::types::global_dof_index>
3130  local_dof_indices (dealii_cell->get_fe().dofs_per_cell);
3131  dealii_cell->get_mg_dof_indices (local_dof_indices);
3132 
3133  cell_data_transfer_buffer.dof_numbers_and_indices.push_back(dealii_cell->get_fe().dofs_per_cell);
3134  cell_data_transfer_buffer.dof_numbers_and_indices.insert(cell_data_transfer_buffer.dof_numbers_and_indices.end(),
3135  local_dof_indices.begin(),
3136  local_dof_indices.end());
3137  return; // we are done
3138  }
3139 
3140  if (! dealii_cell->has_children())
3141  return;
3142 
3143  if (! internal::p4est::quadrant_is_ancestor<dim> (p4est_cell, quadrant))
3144  return;
3145 
3146  typename ::internal::p4est::types<dim>::quadrant
3148  internal::p4est::init_quadrant_children<dim>(p4est_cell, p4est_child);
3149 
3150  for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
3151  get_mg_dofindices_recursively<dim,spacedim> (tria, p4est_child[c],
3152  dealii_cell->child(c),
3153  quadrant, cell_data_transfer_buffer);
3154  }
3155 
3156 
3157  template <int dim, int spacedim>
3158  void
3159  find_marked_mg_ghost_cells_recursively(const typename parallel::distributed::Triangulation<dim,spacedim> &tria,
3160  const unsigned int tree_index,
3161  const typename DoFHandler<dim,spacedim>::level_cell_iterator &dealii_cell,
3162  const typename ::internal::p4est::types<dim>::quadrant &p4est_cell,
3163  std::map<::types::subdomain_id, CellDataTransferBuffer<dim>> &neighbor_cell_list)
3164  {
3165  // recurse...
3166  if (dealii_cell->has_children())
3167  {
3168  typename ::internal::p4est::types<dim>::quadrant
3170  internal::p4est::init_quadrant_children<dim>(p4est_cell, p4est_child);
3171 
3172 
3173  for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
3174  find_marked_mg_ghost_cells_recursively<dim,spacedim>(tria,
3175  tree_index,
3176  dealii_cell->child(c),
3177  p4est_child[c],
3178  neighbor_cell_list);
3179  }
3180 
3181  if (dealii_cell->user_flag_set() && dealii_cell->level_subdomain_id() != tria.locally_owned_subdomain())
3182  {
3183  neighbor_cell_list[dealii_cell->level_subdomain_id()].tree_indices.push_back(tree_index);
3184  neighbor_cell_list[dealii_cell->level_subdomain_id()].quadrants.push_back(p4est_cell);
3185  }
3186  }
3187 
3188 
3189  template <int dim, int spacedim>
3190  void
3191  set_mg_dofindices_recursively (
3193  const typename ::internal::p4est::types<dim>::quadrant &p4est_cell,
3194  const typename DoFHandler<dim,spacedim>::level_cell_iterator &dealii_cell,
3195  const typename ::internal::p4est::types<dim>::quadrant &quadrant,
3196  ::types::global_dof_index *dofs)
3197  {
3198  if (internal::p4est::quadrant_is_equal<dim>(p4est_cell, quadrant))
3199  {
3200  Assert(dealii_cell->level_subdomain_id()!=::numbers::artificial_subdomain_id, ExcInternalError());
3201 
3202  // update dof indices of cell
3203  std::vector<::types::global_dof_index>
3204  dof_indices (dealii_cell->get_fe().dofs_per_cell);
3205  dealii_cell->get_mg_dof_indices(dof_indices);
3206 
3207  bool complete = true;
3208  for (unsigned int i=0; i<dof_indices.size(); ++i)
3209  if (dofs[i] != numbers::invalid_dof_index)
3210  {
3211  Assert((dof_indices[i] ==
3213  ||
3214  (dof_indices[i]==dofs[i]),
3215  ExcInternalError());
3216  dof_indices[i]=dofs[i];
3217  }
3218  else
3219  complete=false;
3220 
3221  if (!complete)
3222  const_cast
3223  <typename DoFHandler<dim,spacedim>::level_cell_iterator &>
3224  (dealii_cell)->set_user_flag();
3225  else
3226  const_cast
3227  <typename DoFHandler<dim,spacedim>::level_cell_iterator &>
3228  (dealii_cell)->clear_user_flag();
3229 
3230  const_cast
3231  <typename DoFHandler<dim,spacedim>::level_cell_iterator &>
3232  (dealii_cell)->set_mg_dof_indices(dof_indices);
3233  return;
3234  }
3235 
3236  if (! dealii_cell->has_children())
3237  return;
3238 
3239  if (! internal::p4est::quadrant_is_ancestor<dim> (p4est_cell, quadrant))
3240  return;
3241 
3242  typename ::internal::p4est::types<dim>::quadrant
3244  internal::p4est::init_quadrant_children<dim>(p4est_cell, p4est_child);
3245 
3246  for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
3247  set_mg_dofindices_recursively<dim,spacedim> (tria, p4est_child[c],
3248  dealii_cell->child(c),
3249  quadrant, dofs);
3250 
3251  }
3252 
3253 
3254 
3255  template <int dim, int spacedim, class DoFHandlerType>
3256  void
3257  communicate_mg_ghost_cells(const typename parallel::distributed::Triangulation<dim,spacedim> &tria,
3258  DoFHandlerType &dof_handler,
3259  const std::vector<::types::global_dof_index> &coarse_cell_to_p4est_tree_permutation,
3260  const std::vector<::types::global_dof_index> &p4est_tree_to_coarse_cell_permutation)
3261  {
3262  // build list of cells to request for each neighbor
3263  std::set<::types::subdomain_id> level_ghost_owners = tria.level_ghost_owners();
3264  typedef std::map<::types::subdomain_id, CellDataTransferBuffer<dim>> cellmap_t;
3265  cellmap_t neighbor_cell_list;
3266  for (std::set<::types::subdomain_id>::iterator it = level_ghost_owners.begin();
3267  it != level_ghost_owners.end();
3268  ++it)
3269  neighbor_cell_list.insert(std::make_pair(*it, CellDataTransferBuffer<dim>()));
3270 
3271  for (typename DoFHandlerType::level_cell_iterator
3272  cell = dof_handler.begin(0);
3273  cell != dof_handler.end(0);
3274  ++cell)
3275  {
3276  typename ::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
3277  internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
3278 
3279  find_marked_mg_ghost_cells_recursively<dim,spacedim>
3280  (tria,
3281  coarse_cell_to_p4est_tree_permutation[cell->index()],
3282  cell,
3283  p4est_coarse_cell,
3284  neighbor_cell_list);
3285  }
3286  Assert(level_ghost_owners.size() == neighbor_cell_list.size(), ExcInternalError());
3287 
3288  //* send our requests:
3289  std::vector<std::vector<char> > sendbuffers (level_ghost_owners.size());
3290  std::vector<MPI_Request> requests (level_ghost_owners.size());
3291 
3292  unsigned int idx=0;
3293  for (typename cellmap_t::iterator it = neighbor_cell_list.begin();
3294  it!=neighbor_cell_list.end();
3295  ++it, ++idx)
3296  {
3297  // pack all the data into the buffer for this recipient
3298  // and send it. keep data around till we can make sure
3299  // that the packet has been received
3300  sendbuffers[idx] = it->second.pack_data ();
3301  const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
3302  MPI_BYTE, it->first,
3303  1100101, tria.get_communicator(), &requests[idx]);
3304  AssertThrowMPI(ierr);
3305  }
3306 
3307  //* receive requests and reply
3308  std::vector<std::vector<char> > reply_buffers (level_ghost_owners.size());
3309  std::vector<MPI_Request> reply_requests (level_ghost_owners.size());
3310 
3311  for (unsigned int idx=0; idx<level_ghost_owners.size(); ++idx)
3312  {
3313  std::vector<char> receive;
3314  CellDataTransferBuffer<dim> cell_data_transfer_buffer;
3315 
3316  MPI_Status status;
3317  int len;
3318  int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100101, tria.get_communicator(), &status);
3319  AssertThrowMPI(ierr);
3320  ierr = MPI_Get_count(&status, MPI_BYTE, &len);
3321  AssertThrowMPI(ierr);
3322  receive.resize(len);
3323 
3324  char *ptr = receive.data();
3325  ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
3326  tria.get_communicator(), &status);
3327  AssertThrowMPI(ierr);
3328 
3329  cell_data_transfer_buffer.unpack_data(receive);
3330 
3331  // store the dof indices for each cell
3332  for (unsigned int c=0; c<cell_data_transfer_buffer.tree_indices.size(); ++c)
3333  {
3334  typename DoFHandlerType::level_cell_iterator
3335  cell (&dof_handler.get_triangulation(),
3336  0,
3337  p4est_tree_to_coarse_cell_permutation[cell_data_transfer_buffer.tree_indices[c]],
3338  &dof_handler);
3339 
3340  typename ::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
3341  internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
3342 
3343  get_mg_dofindices_recursively<dim,spacedim> (tria,
3344  p4est_coarse_cell,
3345  cell,
3346  cell_data_transfer_buffer.quadrants[c],
3347  cell_data_transfer_buffer);
3348  }
3349 
3350  // send reply
3351  reply_buffers[idx] = cell_data_transfer_buffer.pack_data();
3352  ierr = MPI_Isend(&(reply_buffers[idx])[0], reply_buffers[idx].size(),
3353  MPI_BYTE, status.MPI_SOURCE,
3354  1100102, tria.get_communicator(), &reply_requests[idx]);
3355  AssertThrowMPI(ierr);
3356  }
3357 
3358  //* finally receive the replies
3359  for (unsigned int idx=0; idx<level_ghost_owners.size(); ++idx)
3360  {
3361  std::vector<char> receive;
3362  CellDataTransferBuffer<dim> cell_data_transfer_buffer;
3363 
3364  MPI_Status status;
3365  int len;
3366  int ierr = MPI_Probe(MPI_ANY_SOURCE, 1100102, tria.get_communicator(), &status);
3367  AssertThrowMPI(ierr);
3368  ierr = MPI_Get_count(&status, MPI_BYTE, &len);
3369  AssertThrowMPI(ierr);
3370  receive.resize(len);
3371 
3372  char *ptr = receive.data();
3373  ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
3374  tria.get_communicator(), &status);
3375  AssertThrowMPI(ierr);
3376 
3377  cell_data_transfer_buffer.unpack_data(receive);
3378  if (cell_data_transfer_buffer.tree_indices.size()==0)
3379  continue;
3380 
3381  // set the dof indices for each cell
3382  ::types::global_dof_index *dofs = cell_data_transfer_buffer.dof_numbers_and_indices.data();
3383  for (unsigned int c=0; c<cell_data_transfer_buffer.tree_indices.size(); ++c, dofs+=1+dofs[0])
3384  {
3385  typename DoFHandlerType::level_cell_iterator
3386  cell (&tria,
3387  0,
3388  p4est_tree_to_coarse_cell_permutation[cell_data_transfer_buffer.tree_indices[c]],
3389  &dof_handler);
3390 
3391  typename ::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
3392  internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
3393 
3394  Assert(cell->get_fe().dofs_per_cell==dofs[0], ExcInternalError());
3395 
3396  set_mg_dofindices_recursively<dim,spacedim> (tria,
3397  p4est_coarse_cell,
3398  cell,
3399  cell_data_transfer_buffer.quadrants[c],
3400  dofs+1);
3401  }
3402  }
3403 
3404  // complete all sends, so that we can safely destroy the
3405  // buffers.
3406  if (requests.size() > 0)
3407  {
3408  const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
3409  AssertThrowMPI(ierr);
3410  }
3411  if (reply_requests.size() > 0)
3412  {
3413  const int ierr = MPI_Waitall(reply_requests.size(), reply_requests.data(), MPI_STATUSES_IGNORE);
3414  AssertThrowMPI(ierr);
3415  }
3416  }
3417 
3418 
3419 
3420  template <int spacedim>
3421  void
3422  communicate_mg_ghost_cells(const typename parallel::distributed::Triangulation<1,spacedim> &,
3424  const std::vector<::types::global_dof_index> &,
3425  const std::vector<::types::global_dof_index> &)
3426  {
3427  Assert (false, ExcNotImplemented());
3428  }
3429 
3430 
3431 
3432 
3433  template <int spacedim>
3434  void
3435  communicate_mg_ghost_cells(const typename parallel::distributed::Triangulation<1,spacedim> &,
3437  const std::vector<::types::global_dof_index> &,
3438  const std::vector<::types::global_dof_index> &)
3439  {
3440  Assert (false, ExcNotImplemented());
3441  }
3442 
3443 
3444 
3463  template <int spacedim>
3464  void
3465  communicate_dof_indices_on_marked_cells
3466  (const DoFHandler<1,spacedim> &,
3467  const std::map<unsigned int, std::set<::types::subdomain_id> > &,
3468  const std::vector<::types::global_dof_index> &,
3469  const std::vector<::types::global_dof_index> &)
3470  {
3471  Assert (false, ExcNotImplemented());
3472  }
3473 
3474 
3475 
3476  template <int spacedim>
3477  void
3478  communicate_dof_indices_on_marked_cells
3479  (const hp::DoFHandler<1,spacedim> &,
3480  const std::map<unsigned int, std::set<::types::subdomain_id> > &,
3481  const std::vector<::types::global_dof_index> &,
3482  const std::vector<::types::global_dof_index> &)
3483  {
3484  Assert (false, ExcNotImplemented());
3485  }
3486 
3487 
3488 
3489  template <class DoFHandlerType>
3490  void
3491  communicate_dof_indices_on_marked_cells
3492  (const DoFHandlerType &dof_handler,
3493  const std::map<unsigned int, std::set<::types::subdomain_id> > &,
3494  const std::vector<::types::global_dof_index> &,
3495  const std::vector<::types::global_dof_index> &)
3496  {
3497 #ifndef DEAL_II_WITH_MPI
3498  (void)vertices_with_ghost_neighbors;
3499  Assert (false, ExcNotImplemented());
3500 #else
3501  const unsigned int dim = DoFHandlerType::dimension;
3502  const unsigned int spacedim = DoFHandlerType::space_dimension;
3503 
3504  // define functions that pack data on cells that are ghost cells
3505  // somewhere else, and unpack data on cells where we get information
3506  // from elsewhere
3507  auto pack
3508  = [] (const typename DoFHandlerType::active_cell_iterator &cell) -> boost::optional<std::vector<types::global_dof_index>>
3509  {
3510  Assert (cell->is_locally_owned(), ExcInternalError());
3511 
3512  // first see whether we need to do anything at all on this cell.
3513  // this is determined by whether the user_flag is set on the
3514  // cell that indicates that the *complete* set of DoF indices
3515  // has not been sent
3516  if (cell->user_flag_set())
3517  {
3518  // get dof indices for the current cell
3519  std::vector<types::global_dof_index> local_dof_indices (cell->get_fe().dofs_per_cell);
3520  cell->get_dof_indices (local_dof_indices);
3521 
3522  // now see if there are dof indices that were previously
3523  // unknown. this can only happen in phase 1, and in
3524  // that case we know that the user flag must have been set
3525  //
3526  // in any case, if the cell *is* complete, we do not
3527  // need to send the data any more in the next phase. indicate
3528  // this by removing the user flag
3529  if (std::find (local_dof_indices.begin(),
3530  local_dof_indices.end(),
3532  !=
3533  local_dof_indices.end())
3534  {
3535  Assert (cell->user_flag_set(), ExcInternalError());
3536  }
3537  else
3538  cell->clear_user_flag();
3539 
3540  return local_dof_indices;
3541  }
3542  else
3543  {
3544  // the fact that the user flag wasn't set means that there is
3545  // nothing we need to send that hasn't been sent so far.
3546  // so return an empty array, but also verify that indeed
3547  // the cell is complete
3548 #ifdef DEBUG
3549  std::vector<types::global_dof_index> local_dof_indices (cell->get_fe().dofs_per_cell);
3550  cell->get_dof_indices (local_dof_indices);
3551 
3552  const bool is_complete
3553  = (std::find (local_dof_indices.begin(), local_dof_indices.end(), numbers::invalid_dof_index)
3554  == local_dof_indices.end());
3555  Assert (is_complete, ExcInternalError());
3556 #endif
3557  return boost::optional<std::vector<types::global_dof_index>>();
3558  }
3559  };
3560 
3561  auto unpack
3562  = [] (const typename DoFHandlerType::active_cell_iterator &cell,
3563  const std::vector<types::global_dof_index> &received_dof_indices) -> void
3564  {
3565  // this function should only be called on ghost cells, and
3566  // on top of that, only on cells that have not been
3567  // completed -- which we indicate via the user flag.
3568  // check both
3569  Assert (cell->is_ghost(), ExcInternalError());
3570  Assert (cell->user_flag_set(), ExcInternalError());
3571 
3572  // if we just got an incomplete array of DoF indices, then we must
3573  // be in the first ghost exchange and the user flag must have been
3574  // set. we tested that already above.
3575  //
3576  // if we did get a complete array, then we may be in the first
3577  // or second ghost exchange, but in any case we need not exchange
3578  // another time. so delete the user flag
3579  const bool is_complete
3580  = (std::find (received_dof_indices.begin(), received_dof_indices.end(), numbers::invalid_dof_index)
3581  == received_dof_indices.end());
3582  if (is_complete)
3583  cell->clear_user_flag();
3584 
3585  // in any case, set the DoF indices on this cell. some
3586  // of the ones we received may still be invalid because
3587  // the sending processor did not know them yet, so we
3588  // need to merge the ones we get with those that are
3589  // already set here and may have already been known. for
3590  // those that we already know *and* get, they must obviously
3591  // agree
3592  //
3593  // before getting the local dof indices, we need to update the
3594  // cell dof indices cache because we may have set dof indices
3595  // on a neighboring ghost cell before this one, which may have
3596  // affected the dof indices we know about the current cell
3597  std::vector<types::global_dof_index> local_dof_indices (cell->get_fe().dofs_per_cell);
3598  cell->update_cell_dof_indices_cache();
3599  cell->get_dof_indices (local_dof_indices);
3600 
3601  for (unsigned int i=0; i<local_dof_indices.size(); ++i)
3602  if (local_dof_indices[i] == numbers::invalid_dof_index)
3603  local_dof_indices[i] = received_dof_indices[i];
3604  else
3605  // we already know the dof index. check that there
3606  // is no conflict
3607  Assert ((received_dof_indices[i] == numbers::invalid_dof_index)
3608  ||
3609  (received_dof_indices[i] == local_dof_indices[i]),
3610  ExcInternalError());
3611 
3612  const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)->set_dof_indices(local_dof_indices);
3613  };
3614 
3615  GridTools::exchange_cell_data_to_ghosts<std::vector<types::global_dof_index>, DoFHandlerType>
3616  (dof_handler, pack, unpack);
3617 
3618  // finally update the cell DoF indices caches to make sure
3619  // our internal data structures are consistent
3620  update_all_active_cell_dof_indices_caches (dof_handler);
3621 
3622 
3623  // have a barrier so that sends between two calls to this
3624  // function are not mixed up.
3625  //
3626  // this is necessary because above we just see if there are
3627  // messages and then receive them, without discriminating
3628  // where they come from and whether they were sent in phase
3629  // 1 or 2 (the function is called twice in a row). the need
3630  // for a global communication step like this barrier could
3631  // be avoided by receiving messages specifically from those
3632  // processors from which we expect messages, and by using
3633  // different tags for phase 1 and 2, but the cost of a
3634  // barrier is negligible compared to everything else we do
3635  // here
3636  if (const auto *triangulation =
3637  dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>(&dof_handler.get_triangulation()))
3638  {
3639  const int ierr = MPI_Barrier(triangulation->get_communicator());
3640  AssertThrowMPI(ierr);
3641  }
3642  else
3643  {
3644  Assert(false, ExcMessage("The function communicate_dof_indices_on_marked_cells() "
3645  "only works with parallel distributed triangulations."));
3646  }
3647 #endif
3648  }
3649 
3650 
3651 
3652 
3653 
3654 
3655 
3656  }
3657 
3658 #endif // DEAL_II_WITH_P4EST
3659 
3660 
3661 
3662  template <class DoFHandlerType>
3664  ParallelDistributed (DoFHandlerType &dof_handler)
3665  :
3666  dof_handler (&dof_handler)
3667  {}
3668 
3669 
3670 
3671 
3672 
3673 
3674  template <class DoFHandlerType>
3675  NumberCache
3678  {
3679 #ifndef DEAL_II_WITH_P4EST
3680  Assert (false, ExcNotImplemented());
3681  return NumberCache();
3682 #else
3683  const unsigned int dim = DoFHandlerType::dimension;
3684  const unsigned int spacedim = DoFHandlerType::space_dimension;
3685 
3688  (const_cast<::Triangulation< dim, spacedim >*>
3689  (&dof_handler->get_triangulation())));
3690  Assert (triangulation != nullptr, ExcInternalError());
3691 
3692  const unsigned int
3693  n_cpus = Utilities::MPI::n_mpi_processes (triangulation->get_communicator());
3694 
3695 
3696  /*
3697  The following algorithm has a number of stages that are all documented
3698  in the paper that describes the parallel::distributed functionality:
3699 
3700  1/ locally enumerate dofs on locally owned cells
3701  2/ un-numerate those that are on interfaces with ghost
3702  cells and that we don't own based on the tie-breaking
3703  criterion; re-enumerate the remaining ones. the
3704  end result are that we only enumerate locally owned
3705  DoFs
3706  3/ shift indices so that each processor has a unique
3707  range of indices
3708  4/ for all locally owned cells that are ghost
3709  cells somewhere else, send our own DoF indices
3710  to the appropriate set of other processors
3711  */
3712 
3713  // --------- Phase 1: enumerate dofs on locally owned cells
3714  const ::types::global_dof_index n_initial_local_dofs =
3715  Implementation::distribute_dofs (triangulation->locally_owned_subdomain(),
3716  *dof_handler);
3717 
3718  // --------- Phase 2: un-numerate dofs on interfaces to ghost cells
3719  // that we don't own; re-enumerate the remaining ones
3720 
3721  // start with the identity permutation of indices
3722  std::vector<::types::global_dof_index> renumbering(n_initial_local_dofs);
3723  for (::types::global_dof_index i=0; i<renumbering.size(); ++i)
3724  renumbering[i] = i;
3725 
3726  {
3727  std::vector<::types::global_dof_index> local_dof_indices;
3728 
3729  for (auto cell : dof_handler->active_cell_iterators())
3730  if (cell->is_ghost()
3731  &&
3732  (cell->subdomain_id() < triangulation->locally_owned_subdomain()))
3733  {
3734  // we found a neighboring ghost cell whose subdomain
3735  // is "stronger" than our own subdomain
3736 
3737  // delete all dofs that live there and that we have
3738  // previously assigned a number to (i.e. the ones on
3739  // the interface)
3740  local_dof_indices.resize (cell->get_fe().dofs_per_cell);
3741  cell->get_dof_indices (local_dof_indices);
3742  for (auto &local_dof_index : local_dof_indices)
3743  if (local_dof_index != numbers::invalid_dof_index)
3744  renumbering[local_dof_index]
3746  }
3747  }
3748 
3749 
3750  // make the remaining indices consecutive
3751  ::types::global_dof_index n_locally_owned_dofs = 0;
3752  for (auto &new_index : renumbering)
3753  if (new_index != numbers::invalid_dof_index)
3754  new_index = n_locally_owned_dofs++;
3755 
3756  // --------- Phase 3: shift indices so that each processor has a unique
3757  // range of indices
3758  std::vector<::types::global_dof_index> n_locally_owned_dofs_per_processor(n_cpus);
3759 
3760  const int ierr = MPI_Allgather ( &n_locally_owned_dofs,
3761  1, DEAL_II_DOF_INDEX_MPI_TYPE,
3762  n_locally_owned_dofs_per_processor.data(),
3763  1, DEAL_II_DOF_INDEX_MPI_TYPE,
3764  triangulation->get_communicator());
3765  AssertThrowMPI(ierr);
3766 
3767  const ::types::global_dof_index
3768  my_shift = std::accumulate (n_locally_owned_dofs_per_processor.begin(),
3769  n_locally_owned_dofs_per_processor.begin()
3770  + triangulation->locally_owned_subdomain(),
3771  static_cast<::types::global_dof_index>(0));
3772  for (auto &new_index : renumbering)
3773  if (new_index != numbers::invalid_dof_index)
3774  new_index += my_shift;
3775 
3776  // now re-enumerate all dofs to this shifted and condensed
3777  // numbering form. we renumber some dofs as invalid, so
3778  // choose the nocheck-version.
3779  Implementation::renumber_dofs (renumbering, IndexSet(0),
3780  *dof_handler, false);
3781 
3782  // now a little bit of housekeeping
3783  const ::types::global_dof_index n_global_dofs
3784  = std::accumulate (n_locally_owned_dofs_per_processor.begin(),
3785  n_locally_owned_dofs_per_processor.end(),
3787 
3788  std::vector<IndexSet> locally_owned_dofs_per_processor (n_cpus,
3789  IndexSet(n_global_dofs));
3790  {
3791  ::types::global_dof_index current_shift = 0;
3792  for (unsigned int i=0; i<n_cpus; ++i)
3793  {
3794  locally_owned_dofs_per_processor[i]
3795  .add_range(current_shift,
3796  current_shift +
3797  n_locally_owned_dofs_per_processor[i]);
3798  current_shift += n_locally_owned_dofs_per_processor[i];
3799  }
3800  }
3801  NumberCache number_cache (locally_owned_dofs_per_processor,
3802  triangulation->locally_owned_subdomain());
3803  Assert(number_cache.locally_owned_dofs_per_processor
3804  [triangulation->locally_owned_subdomain()].n_elements()
3805  ==
3806  number_cache.n_locally_owned_dofs,
3807  ExcInternalError());
3808  Assert(!number_cache.locally_owned_dofs_per_processor
3809  [triangulation->locally_owned_subdomain()].n_elements()
3810  ||
3811  number_cache.locally_owned_dofs_per_processor
3812  [triangulation->locally_owned_subdomain()].nth_index_in_set(0)
3813  == my_shift,
3814  ExcInternalError());
3815 
3816  // this ends the phase where we enumerate degrees of freedom on
3817  // each processor. what is missing is communicating DoF indices
3818  // on ghost cells
3819 
3820  // --------- Phase 4: for all locally owned cells that are ghost
3821  // cells somewhere else, send our own DoF indices
3822  // to the appropriate set of other processors
3823  {
3824  std::vector<bool> user_flags;
3825  triangulation->save_user_flags(user_flags);
3826  triangulation->clear_user_flags ();
3827 
3828  // figure out which cells are ghost cells on which we have
3829  // to exchange DoF indices
3830  const std::map<unsigned int, std::set<::types::subdomain_id> >
3831  vertices_with_ghost_neighbors
3832  = triangulation->compute_vertices_with_ghost_neighbors ();
3833 
3834  // mark all cells that either have to send data (locally
3835  // owned cells that are adjacent to ghost neighbors in some
3836  // way) or receive data (all ghost cells) via the user flags
3837  for (auto cell : dof_handler->active_cell_iterators())
3838  if (cell->is_locally_owned())
3839  {
3840  for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
3841  if (vertices_with_ghost_neighbors.find (cell->vertex_index(v))
3842  != vertices_with_ghost_neighbors.end())
3843  {
3844  cell->set_user_flag();
3845  break;
3846  }
3847  }
3848  else if (cell->is_ghost())
3849  cell->set_user_flag();
3850 
3851 
3852 
3853  // Send and receive cells. After this, only the local cells
3854  // are marked, that received new data. This has to be
3855  // communicated in a second communication step.
3856  //
3857  // as explained in the 'distributed' paper, this has to be
3858  // done twice
3859  communicate_dof_indices_on_marked_cells (*dof_handler,
3860  vertices_with_ghost_neighbors,
3861  triangulation->coarse_cell_to_p4est_tree_permutation,
3862  triangulation->p4est_tree_to_coarse_cell_permutation);
3863 
3864  communicate_dof_indices_on_marked_cells (*dof_handler,
3865  vertices_with_ghost_neighbors,
3866  triangulation->coarse_cell_to_p4est_tree_permutation,
3867  triangulation->p4est_tree_to_coarse_cell_permutation);
3868 
3869  // at this point, we must have taken care of the data transfer
3870  // on all cells we had previously marked. verify this
3871 #ifdef DEBUG
3872  for (auto cell : dof_handler->active_cell_iterators())
3873  Assert (cell->user_flag_set() == false,
3874  ExcInternalError());
3875 #endif
3876 
3877  triangulation->load_user_flags(user_flags);
3878  }
3879 
3880 #ifdef DEBUG
3881  // check that we are really done
3882  {
3883  std::vector<::types::global_dof_index> local_dof_indices;
3884 
3885  for (auto cell : dof_handler->active_cell_iterators())
3886  if (!cell->is_artificial())
3887  {
3888  local_dof_indices.resize (cell->get_fe().dofs_per_cell);
3889  cell->get_dof_indices (local_dof_indices);
3890  if (local_dof_indices.end() !=
3891  std::find (local_dof_indices.begin(),
3892  local_dof_indices.end(),
3894  {
3895  if (cell->is_ghost())
3896  {
3897  Assert(false,
3898  ExcMessage ("A ghost cell ended up with incomplete "
3899  "DoF index information. This should not "
3900  "have happened!"));
3901  }
3902  else
3903  {
3904  Assert(false,
3905  ExcMessage ("A locally owned cell ended up with incomplete "
3906  "DoF index information. This should not "
3907  "have happened!"));
3908  }
3909  }
3910  }
3911  }
3912 #endif // DEBUG
3913  return number_cache;
3914 #endif // DEAL_II_WITH_P4EST
3915  }
3916 
3917 
3918 
3919  template <class DoFHandlerType>
3920  std::vector<NumberCache>
3923  {
3924 #ifndef DEAL_II_WITH_P4EST
3925  Assert (false, ExcNotImplemented());
3926  return std::vector<NumberCache>();
3927 #else
3928  const unsigned int dim = DoFHandlerType::dimension;
3929  const unsigned int spacedim = DoFHandlerType::space_dimension;
3930 
3933  (const_cast<::Triangulation< dim, spacedim >*>
3934  (&dof_handler->get_triangulation())));
3935  Assert (triangulation != nullptr, ExcInternalError());
3936 
3937  AssertThrow(
3939  ExcMessage("Multigrid DoFs can only be distributed on a parallel "
3940  "Triangulation if the flag construct_multigrid_hierarchy "
3941  "is set in the constructor."));
3942 
3943 
3944  const unsigned int
3945  n_cpus = Utilities::MPI::n_mpi_processes (triangulation->get_communicator());
3946 
3947  // loop over all levels that exist globally (across all
3948  // processors), even if the current processor does not in fact
3949  // have any cells on that level or if the local part of the
3950  // Triangulation has fewer levels. we need to do this because
3951  // we need to communicate across all processors on all levels
3952  const unsigned int n_levels = triangulation->n_global_levels();
3953  std::vector<NumberCache> number_caches;
3954  number_caches.reserve(n_levels);
3955  for (unsigned int level = 0; level < n_levels; ++level)
3956  {
3957  NumberCache level_number_cache;
3958 
3959  //* 1. distribute on own subdomain
3960  const unsigned int n_initial_local_dofs =
3961  Implementation::distribute_dofs_on_level(triangulation->locally_owned_subdomain(),
3962  *dof_handler,
3963  level);
3964 
3965  //* 2. iterate over ghostcells and kill dofs that are not
3966  // owned by us
3967  std::vector<::types::global_dof_index> renumbering(n_initial_local_dofs);
3968  for (::types::global_dof_index i=0; i<renumbering.size(); ++i)
3969  renumbering[i] = i;
3970 
3971  if (level < triangulation->n_levels())
3972  {
3973  std::vector<::types::global_dof_index> local_dof_indices;
3974 
3975  typename DoFHandlerType::level_cell_iterator
3976  cell = dof_handler->begin(level),
3977  endc = dof_handler->end(level);
3978 
3979  for (; cell != endc; ++cell)
3980  if (cell->level_subdomain_id()!=numbers::artificial_subdomain_id &&
3981  (cell->level_subdomain_id() < triangulation->locally_owned_subdomain()))
3982  {
3983  // we found a neighboring ghost cell whose
3984  // subdomain is "stronger" than our own
3985  // subdomain
3986 
3987  // delete all dofs that live there and that we
3988  // have previously assigned a number to
3989  // (i.e. the ones on the interface)
3990  local_dof_indices.resize (cell->get_fe().dofs_per_cell);
3991  cell->get_mg_dof_indices (local_dof_indices);
3992  for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
3993  if (local_dof_indices[i] != numbers::invalid_dof_index)
3994  renumbering[local_dof_indices[i]]
3996  }
3997  }
3998 
3999 // TODO: make this code simpler with the new constructors of NumberCache
4000  // make indices consecutive
4001  level_number_cache.n_locally_owned_dofs = 0;
4002  for (std::vector<::types::global_dof_index>::iterator it=renumbering.begin();
4003  it!=renumbering.end(); ++it)
4004  if (*it != numbers::invalid_dof_index)
4005  *it = level_number_cache.n_locally_owned_dofs++;
4006 
4007  //* 3. communicate local dofcount and shift ids to make
4008  // them unique
4009  level_number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus);
4010 
4011  int ierr = MPI_Allgather ( &level_number_cache.n_locally_owned_dofs,
4012  1, DEAL_II_DOF_INDEX_MPI_TYPE,
4013  &level_number_cache.n_locally_owned_dofs_per_processor[0],
4014  1, DEAL_II_DOF_INDEX_MPI_TYPE,
4015  triangulation->get_communicator());
4016  AssertThrowMPI(ierr);
4017 
4018  const ::types::global_dof_index
4019  shift = std::accumulate (level_number_cache
4020  .n_locally_owned_dofs_per_processor.begin(),
4021  level_number_cache
4023  + triangulation->locally_owned_subdomain(),
4024  static_cast<::types::global_dof_index>(0));
4025  for (std::vector<::types::global_dof_index>::iterator it=renumbering.begin();
4026  it!=renumbering.end(); ++it)
4027  if (*it != numbers::invalid_dof_index)
4028  (*it) += shift;
4029 
4030  // now re-enumerate all dofs to this shifted and condensed
4031  // numbering form. we renumber some dofs as invalid, so
4032  // choose the nocheck-version of the function
4033  //
4034  // of course there is nothing for us to renumber if the
4035  // level we are currently dealing with doesn't even exist
4036  // within the current triangulation, so skip renumbering
4037  // in that case
4038  if (level < triangulation->n_levels())
4039  Implementation::renumber_mg_dofs (renumbering, IndexSet(0),
4040  *dof_handler, level,
4041  false);
4042 
4043  // now a little bit of housekeeping
4044  level_number_cache.n_global_dofs
4045  = std::accumulate (level_number_cache
4046  .n_locally_owned_dofs_per_processor.begin(),
4047  level_number_cache
4049  static_cast<::types::global_dof_index>(0));
4050 
4051  level_number_cache.locally_owned_dofs = IndexSet(level_number_cache.n_global_dofs);
4052  level_number_cache.locally_owned_dofs
4053  .add_range(shift,
4054  shift+level_number_cache.n_locally_owned_dofs);
4055  level_number_cache.locally_owned_dofs.compress();
4056 
4057  // fill global_dof_indexsets
4058  level_number_cache.locally_owned_dofs_per_processor.resize(n_cpus);
4059  {
4060  ::types::global_dof_index current_shift = 0;
4061  for (unsigned int i=0; i<n_cpus; ++i)
4062  {
4063  level_number_cache.locally_owned_dofs_per_processor[i]
4064  = IndexSet(level_number_cache.n_global_dofs);
4065  level_number_cache.locally_owned_dofs_per_processor[i]
4066  .add_range(current_shift,
4067  current_shift +
4068  level_number_cache.n_locally_owned_dofs_per_processor[i]);
4069  current_shift += level_number_cache.n_locally_owned_dofs_per_processor[i];
4070  }
4071  }
4072  Assert(level_number_cache.locally_owned_dofs_per_processor
4073  [triangulation->locally_owned_subdomain()].n_elements()
4074  ==
4075  level_number_cache.n_locally_owned_dofs,
4076  ExcInternalError());
4077  Assert(!level_number_cache.locally_owned_dofs_per_processor
4078  [triangulation->locally_owned_subdomain()].n_elements()
4079  ||
4080  level_number_cache.locally_owned_dofs_per_processor
4081  [triangulation->locally_owned_subdomain()].nth_index_in_set(0)
4082  == shift,
4083  ExcInternalError());
4084 
4085  number_caches.emplace_back (level_number_cache);
4086  }
4087 
4088 
4089  //* communicate ghost DoFs
4090  // We mark all ghost cells by setting the user_flag and then request
4091  // these cells from the corresponding owners. As this information
4092  // can be incomplete,
4093  {
4094  std::vector<bool> user_flags;
4095  triangulation->save_user_flags(user_flags);
4096  triangulation->clear_user_flags ();
4097 
4098  // mark all ghost cells for transfer
4099  {
4100  typename DoFHandlerType::level_cell_iterator
4101  cell, endc = dof_handler->end();
4102  for (cell = dof_handler->begin(); cell != endc; ++cell)
4103  if (cell->level_subdomain_id() != ::numbers::artificial_subdomain_id
4104  && !cell->is_locally_owned_on_level())
4105  cell->set_user_flag();
4106  }
4107 
4108  // Phase 1. Request all marked cells from corresponding owners. If we
4109  // managed to get every DoF, remove the user_flag, otherwise we
4110  // will request them again in the step below.
4111  communicate_mg_ghost_cells(*triangulation,
4112  *dof_handler,
4113  triangulation->coarse_cell_to_p4est_tree_permutation,
4114  triangulation->p4est_tree_to_coarse_cell_permutation);
4115 
4116  // have a barrier so that sends from above and below this
4117  // place are not mixed up.
4118  //
4119  // this is necessary because above we just see if there are
4120  // messages and then receive them, without discriminating
4121  // where they come from and whether they were sent in phase
4122  // 1 or 2 in communicate_mg_ghost_cells() on another
4123  // processor. the need for a global communication step like
4124  // this barrier could be avoided by receiving messages
4125  // specifically from those processors from which we expect
4126  // messages, and by using different tags for phase 1 and 2,
4127  // but the cost of a barrier is negligible compared to
4128  // everything else we do here
4129  const int ierr = MPI_Barrier(triangulation->get_communicator());
4130  AssertThrowMPI(ierr);
4131 
4132  // Phase 2, only request the cells that were not completed
4133  // in Phase 1.
4134  communicate_mg_ghost_cells(*triangulation,
4135  *dof_handler,
4136  triangulation->coarse_cell_to_p4est_tree_permutation,
4137  triangulation->p4est_tree_to_coarse_cell_permutation);
4138 
4139 #ifdef DEBUG
4140  // make sure we have removed all flags:
4141  {
4142  typename DoFHandlerType::level_cell_iterator
4143  cell, endc = dof_handler->end();
4144  for (cell = dof_handler->begin(); cell != endc; ++cell)
4145  if (cell->level_subdomain_id() != ::numbers::artificial_subdomain_id
4146  && !cell->is_locally_owned_on_level())
4147  Assert(cell->user_flag_set()==false, ExcInternalError());
4148  }
4149 #endif
4150 
4151  triangulation->load_user_flags(user_flags);
4152  }
4153 
4154 
4155 
4156 
4157 #ifdef DEBUG
4158  // check that we are really done
4159  {
4160  std::vector<::types::global_dof_index> local_dof_indices;
4161  typename DoFHandlerType::level_cell_iterator
4162  cell, endc = dof_handler->end();
4163 
4164  for (cell = dof_handler->begin(); cell != endc; ++cell)
4165  if (cell->level_subdomain_id() != ::numbers::artificial_subdomain_id)
4166  {
4167  local_dof_indices.resize (cell->get_fe().dofs_per_cell);
4168  cell->get_mg_dof_indices (local_dof_indices);
4169  if (local_dof_indices.end() !=
4170  std::find (local_dof_indices.begin(),
4171  local_dof_indices.end(),
4173  {
4174  Assert(false, ExcMessage ("not all DoFs got distributed!"));
4175  }
4176  }
4177  }
4178 #endif // DEBUG
4179 
4180  return number_caches;
4181 
4182 #endif // DEAL_II_WITH_P4EST
4183  }
4184 
4185 
4186  template <class DoFHandlerType>
4187  NumberCache
4189  renumber_dofs (const std::vector<::types::global_dof_index> &new_numbers) const
4190  {
4191  (void)new_numbers;
4192 
4193  Assert (new_numbers.size() == dof_handler->n_locally_owned_dofs(),
4194  ExcInternalError());
4195 
4196 #ifndef DEAL_II_WITH_P4EST
4197  Assert (false, ExcNotImplemented());
4198  return NumberCache();
4199 #else
4200  const unsigned int dim = DoFHandlerType::dimension;
4201  const unsigned int spacedim = DoFHandlerType::space_dimension;
4202 
4205  (const_cast<::Triangulation< dim, spacedim >*>
4206  (&dof_handler->get_triangulation())));
4207  Assert (triangulation != nullptr, ExcInternalError());
4208 
4209 
4210  // First figure out the new set of locally owned DoF indices.
4211  // If we own no DoFs, we still need to go through this function,
4212  // but we can skip this calculation.
4213  //
4214  // The IndexSet::add_indices() function is substantially more
4215  // efficient if the set of indices is already sorted because
4216  // it can then insert ranges instead of individual elements.
4217  // consequently, pre-sort the array of new indices
4218  IndexSet my_locally_owned_new_dof_indices (dof_handler->n_dofs());
4219  if (dof_handler->n_locally_owned_dofs() > 0)
4220  {
4221  std::vector<::types::global_dof_index> new_numbers_sorted = new_numbers;
4222  std::sort(new_numbers_sorted.begin(), new_numbers_sorted.end());
4223 
4224  my_locally_owned_new_dof_indices.add_indices (new_numbers_sorted.begin(),
4225  new_numbers_sorted.end());
4226  my_locally_owned_new_dof_indices.compress();
4227 
4228  Assert (my_locally_owned_new_dof_indices.n_elements() == new_numbers.size(),
4229  ExcInternalError());
4230  }
4231 
4232  // delete all knowledge of DoF indices that are not locally
4233  // owned. we do so by getting DoF indices on cells, checking
4234  // whether they are locally owned, if not, setting them to
4235  // an invalid value, and then setting them again on the current
4236  // cell
4237  //
4238  // DoFs we (i) know about, and (ii) don't own locally must be located
4239  // either on ghost cells, or on the interface between a locally
4240  // owned cell and a ghost cell. In any case, it is sufficient
4241  // to kill them only from the ghost side cell, so loop only over
4242  // ghost cells
4243  {
4244  std::vector<::types::global_dof_index> local_dof_indices;
4245 
4246  for (auto cell : dof_handler->active_cell_iterators())
4247  if (cell->is_ghost())
4248  {
4249  local_dof_indices.resize (cell->get_fe().dofs_per_cell);
4250  cell->get_dof_indices (local_dof_indices);
4251 
4252  for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
4253  // delete a DoF index if it has not already been deleted
4254  // (e.g., by visiting a neighboring cell, if it is on the
4255  // boundary), and if we don't own it
4256  if ((local_dof_indices[i] != numbers::invalid_dof_index)
4257  &&
4258  (!dof_handler->locally_owned_dofs().is_element(local_dof_indices[i])))
4259  local_dof_indices[i] = numbers::invalid_dof_index;
4260 
4261  cell->set_dof_indices (local_dof_indices);
4262  }
4263  }
4264 
4265 
4266  // renumber. Skip when there is nothing to do because we own no DoF.
4267  if (dof_handler->locally_owned_dofs().n_elements() > 0)
4268  Implementation::renumber_dofs (new_numbers,
4269  dof_handler->locally_owned_dofs(),
4270  *dof_handler,
4271  false);
4272 
4273  // communicate newly assigned DoF indices to other processors
4274  // and get the same information for our own ghost cells.
4275  //
4276  // this is the same as phase 4 in the distribute_dofs() algorithm
4277  {
4278  std::vector<bool> user_flags;
4279  triangulation->save_user_flags(user_flags);
4280  triangulation->clear_user_flags ();
4281 
4282  // mark all own cells for transfer
4283  for (auto cell : dof_handler->active_cell_iterators())
4284  if (!cell->is_artificial())
4285  cell->set_user_flag();
4286 
4287  // figure out which cells are ghost cells on which we have
4288  // to exchange DoF indices
4289  const std::map<unsigned int, std::set<::types::subdomain_id> >
4290  vertices_with_ghost_neighbors
4291  = triangulation->compute_vertices_with_ghost_neighbors ();
4292 
4293 
4294  // Send and receive cells. After this, only the local cells
4295  // are marked, that received new data. This has to be
4296  // communicated in a second communication step.
4297  //
4298  // as explained in the 'distributed' paper, this has to be
4299  // done twice
4300  communicate_dof_indices_on_marked_cells (*dof_handler,
4301  vertices_with_ghost_neighbors,
4302  triangulation->coarse_cell_to_p4est_tree_permutation,
4303  triangulation->p4est_tree_to_coarse_cell_permutation);
4304 
4305  communicate_dof_indices_on_marked_cells (*dof_handler,
4306  vertices_with_ghost_neighbors,
4307  triangulation->coarse_cell_to_p4est_tree_permutation,
4308  triangulation->p4est_tree_to_coarse_cell_permutation);
4309 
4310  triangulation->load_user_flags(user_flags);
4311  }
4312 
4313  // the last step is to update the NumberCache, including knowing which
4314  // processor owns which DoF index. this requires communication.
4315  //
4316  // this step is substantially more complicated than it is in
4317  // distribute_dofs() because the IndexSets of locally owned DoFs
4318  // after renumbering may not be contiguous any more. for
4319  // distribute_dofs() it was enough to exchange the starting
4320  // indices for each processor and the global number of DoFs,
4321  // but here we actually have to serialize the IndexSet
4322  // objects and shop them across the network.
4323  const unsigned int n_cpus = Utilities::MPI::n_mpi_processes (triangulation->get_communicator());
4324  std::vector<IndexSet> locally_owned_dofs_per_processor(n_cpus,
4325  IndexSet(dof_handler->n_dofs()));
4326  {
4327  // serialize our own IndexSet
4328  std::vector<char> my_data;
4329  {
4330 #ifdef DEAL_II_WITH_ZLIB
4331 
4332  boost::iostreams::filtering_ostream out;
4333  out.push(boost::iostreams::gzip_compressor
4334  (boost::iostreams::gzip_params
4335  (boost::iostreams::gzip::best_compression)));
4336  out.push(boost::iostreams::back_inserter(my_data));
4337 
4338  boost::archive::binary_oarchive archive(out);
4339 
4340  archive << my_locally_owned_new_dof_indices;
4341  out.flush();
4342 #else
4343  std::ostringstream out;
4344  boost::archive::binary_oarchive archive(out);
4345  archive << my_locally_owned_new_dof_indices;
4346  const std::string &s = out.str();
4347  my_data.reserve(s.size());
4348  my_data.assign(s.begin(), s.end());
4349 #endif
4350  }
4351 
4352  // determine maximum size of IndexSet
4353  const unsigned int max_size
4354  = Utilities::MPI::max (my_data.size(), triangulation->get_communicator());
4355 
4356  // as the MPI_Allgather call will be reading max_size elements, and
4357  // as this may be past the end of my_data, we need to increase the
4358  // size of the local buffer. This is filled with zeros.
4359  my_data.resize(max_size);
4360 
4361  std::vector<char> buffer(max_size*n_cpus);
4362  const int ierr = MPI_Allgather(my_data.data(), max_size, MPI_BYTE,
4363  buffer.data(), max_size, MPI_BYTE,
4364  triangulation->get_communicator());
4365  AssertThrowMPI(ierr);
4366 
4367  for (unsigned int i=0; i<n_cpus; ++i)
4368  if (i == Utilities::MPI::this_mpi_process (triangulation->get_communicator()))
4369  locally_owned_dofs_per_processor[i] = my_locally_owned_new_dof_indices;
4370  else
4371  {
4372  // copy the data previously received into a stringstream
4373  // object and then read the IndexSet from it
4374  std::string decompressed_buffer;
4375 
4376  // first decompress the buffer
4377  {
4378 #ifdef DEAL_II_WITH_ZLIB
4379 
4380  boost::iostreams::filtering_ostream decompressing_stream;
4381  decompressing_stream.push(boost::iostreams::gzip_decompressor());
4382  decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
4383 
4384  decompressing_stream.write(&buffer[i*max_size], max_size);
4385 #else
4386  decompressed_buffer.assign (&buffer[i*max_size], max_size);
4387 #endif
4388  }
4389 
4390  // then restore the object from the buffer
4391  std::istringstream in(decompressed_buffer);
4392  boost::archive::binary_iarchive archive(in);
4393 
4394  archive >> locally_owned_dofs_per_processor[i];
4395  }
4396  }
4397 
4398  return NumberCache (locally_owned_dofs_per_processor,
4399  Utilities::MPI::this_mpi_process (triangulation->get_communicator()));
4400 #endif
4401  }
4402 
4403 
4404 
4405  template <class DoFHandlerType>
4406  NumberCache
4408  renumber_mg_dofs (const unsigned int level,
4409  const std::vector<types::global_dof_index> &new_numbers) const
4410  {
4411  // we only implement the case where the multigrid numbers are
4412  // renumbered within the processor's partition, rather than the most
4413  // general case
4414  const std::vector<IndexSet> &index_sets = dof_handler->locally_owned_mg_dofs_per_processor(level);
4415 
4416  constexpr int dim = DoFHandlerType::dimension;
4417  constexpr int spacedim = DoFHandlerType::space_dimension;
4419  (dynamic_cast<const parallel::Triangulation<dim, spacedim>*>
4420  (&this->dof_handler->get_triangulation()));
4421  Assert(tr != nullptr, ExcInternalError());
4422 
4423 #ifdef DEAL_II_WITH_MPI
4424  const unsigned int my_rank = Utilities::MPI::this_mpi_process(tr->get_communicator());
4425 
4426 #ifdef DEBUG
4427  for (types::global_dof_index i : new_numbers)
4428  {
4429  Assert(index_sets[my_rank].is_element(i),
4430  ExcNotImplemented("Renumberings that change the locally owned mg dofs "
4431  "partitioning are currently not implemented for "
4432  "the multigrid levels"));
4433  }
4434 #endif
4435 
4436  // we need to access all locally relevant degrees of freedom. we
4437  // use Utilities::MPI::Partitioner for handling the data exchange
4438  // of the new numbers, which is simply the extraction of ghost data
4439  IndexSet relevant_dofs;
4440  DoFTools::extract_locally_relevant_level_dofs(*dof_handler, level, relevant_dofs);
4441  std::vector<types::global_dof_index> ghosted_new_numbers(relevant_dofs.n_elements());
4442  {
4443  Utilities::MPI::Partitioner partitioner(index_sets[my_rank],
4444  relevant_dofs, tr->get_communicator());
4445  std::vector<types::global_dof_index> temp_array(partitioner.n_import_indices());
4446  const unsigned int communication_channel = 17;
4447  std::vector<MPI_Request> requests;
4448  partitioner.export_to_ghosted_array_start
4449  (communication_channel,
4450  make_array_view(new_numbers),
4451  make_array_view(temp_array),
4452  ArrayView<types::global_dof_index>(ghosted_new_numbers.data()+
4453  new_numbers.size(), partitioner.n_ghost_indices()),
4454  requests);
4455  partitioner.export_to_ghosted_array_finish
4456  (ArrayView<types::global_dof_index>(ghosted_new_numbers.data()+
4457  new_numbers.size(), partitioner.n_ghost_indices()),
4458  requests);
4459 
4460  // we need to fill the indices of the locally owned part into the
4461  // new numbers array. their right position is somewhere in the
4462  // middle of the array, so we first copy the ghosted part from
4463  // smaller ranks to the front, then insert the data in the middle.
4464  unsigned int n_ghosts_on_smaller_ranks = 0;
4465  for (std::pair<unsigned int,unsigned int> t : partitioner.ghost_targets())
4466  {
4467  if (t.first > my_rank)
4468  break;
4469  n_ghosts_on_smaller_ranks += t.second;
4470  }
4471  if (n_ghosts_on_smaller_ranks>0)
4472  {
4473  Assert(ghosted_new_numbers.data()!=nullptr, ExcInternalError());
4474  std::memmove(ghosted_new_numbers.data(),
4475  ghosted_new_numbers.data()+new_numbers.size(),
4476  sizeof(types::global_dof_index)*n_ghosts_on_smaller_ranks);
4477  }
4478  if (new_numbers.size()>0)
4479  {
4480  Assert (new_numbers.data()!=nullptr, ExcInternalError());
4481  std::memcpy(ghosted_new_numbers.data()+n_ghosts_on_smaller_ranks,
4482  new_numbers.data(),
4483  sizeof(types::global_dof_index)*new_numbers.size());
4484  }
4485  }
4486 
4487  // in case we do not own any of the given level (but only some remote
4488  // processor), we do not need to call the renumbering
4489  if (level < this->dof_handler->get_triangulation().n_levels())
4490  Implementation::renumber_mg_dofs (ghosted_new_numbers, relevant_dofs,
4491  *dof_handler, level, true);
4492 #else
4493  (void) new_numbers;
4494  Assert(false, ExcNotImplemented());
4495 #endif
4496 
4497  return NumberCache (index_sets,
4499  }
4500  }
4501  }
4502 }
4503 
4504 
4505 
4506 
4507 /*-------------- Explicit Instantiations -------------------------------*/
4508 #include "dof_handler_policy.inst"
4509 
4510 
4511 DEAL_II_NAMESPACE_CLOSE
virtual NumberCache renumber_dofs(const std::vector< types::global_dof_index > &new_numbers) const
unsigned int n_active_cells() const
Definition: tria.cc:11084
std::vector< MGVertexDoFs > mg_vertex_dofs
Definition: dof_handler.h:1167
static const unsigned int invalid_unsigned_int
Definition: types.h:173
std::vector< IndexSet > locally_owned_dofs_per_subdomain(const DoFHandlerType &dof_handler)
Definition: dof_tools.cc:1276
const types::subdomain_id invalid_subdomain_id
Definition: types.h:248
cell_iterator begin(const unsigned int level=0) const
Definition: dof_handler.cc:723
unsigned int n_cells() const
Definition: tria.cc:11077
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1769
Task< RT > new_task(const std::function< RT()> &function)
const std::vector< types::subdomain_id > & get_true_subdomain_ids_of_cells() const
Definition: shared_tria.cc:278
cell_iterator end() const
Definition: dof_handler.cc:751
const unsigned int dofs_per_quad
Definition: fe_base.h:250
virtual std::vector< std::pair< unsigned int, unsigned int > > hp_quad_dof_identities(const FiniteElement< dim, spacedim > &fe_other) const
Definition: fe.cc:918
std::vector< std::unique_ptr<::internal::DoFHandlerImplementation::DoFLevel< dim > > > levels
Definition: dof_handler.h:1173
active_cell_iterator begin_active(const unsigned int level=0) const
T unpack(const std::vector< char > &buffer)
Definition: utilities.h:1123
ActiveSelector::active_cell_iterator active_cell_iterator
Definition: dof_handler.h:229
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: tria.cc:10508
STL namespace.
virtual std::vector< NumberCache > distribute_mg_dofs() const
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
const unsigned int dofs_per_line
Definition: fe_base.h:244
const std::vector< types::subdomain_id > & get_true_level_subdomain_ids_of_cells(const unsigned int level) const
Definition: shared_tria.cc:287
virtual NumberCache renumber_mg_dofs(const unsigned int level, const std::vector< types::global_dof_index > &new_numbers) const
void extract_locally_relevant_level_dofs(const DoFHandlerType &dof_handler, const unsigned int level, IndexSet &dof_set)
Definition: dof_tools.cc:1127
hp::FECollection< dim, spacedim > fe_collection
Definition: dof_handler.h:831
ActiveSelector::active_cell_iterator active_cell_iterator
Definition: dof_handler.h:186
size_type size() const
Definition: index_set.h:1575
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: dof_handler.cc:735
const hp::FECollection< dim, spacedim > & get_fe() const
static ::ExceptionBase & ExcMessage(std::string arg1)
ActiveSelector::cell_iterator cell_iterator
Definition: dof_handler.h:257
unsigned int global_dof_index
Definition: types.h:88
const unsigned int dofs_per_hex
Definition: fe_base.h:256
#define Assert(cond, exc)
Definition: exceptions.h:1142
virtual std::vector< NumberCache > distribute_mg_dofs() const
IteratorRange< active_cell_iterator > active_cell_iterators() const
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1811
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:146
const FiniteElement< dim, spacedim > & get_fe(const unsigned int index=0) const
unsigned int max_dofs_per_cell(const DoFHandler< dim, spacedim > &dh)
virtual NumberCache renumber_mg_dofs(const unsigned int level, const std::vector< types::global_dof_index > &new_numbers) const
std::vector< types::global_dof_index > n_locally_owned_dofs_per_processor
Definition: number_cache.h:141
void export_to_ghosted_array_start(const unsigned int communication_channel, const ArrayView< const Number > &locally_owned_array, const ArrayView< Number > &temporary_storage, const ArrayView< Number > &ghost_array, std::vector< MPI_Request > &requests) const
virtual std::vector< std::pair< unsigned int, unsigned int > > hp_vertex_dof_identities(const FiniteElement< dim, spacedim > &fe_other) const
Definition: fe.cc:896
std::unique_ptr<::internal::DoFHandlerImplementation::DoFFaces< dim > > faces
Definition: dof_handler.h:1182
unsigned int subdomain_id
Definition: types.h:42
const hp::FECollection< dim, spacedim > & get_fe_collection() const
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Definition: hp.h:102
virtual unsigned int n_global_levels() const
Definition: tria_base.cc:125
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:327
void add_range(const size_type begin, const size_type end)
Definition: index_set.cc:98
const types::subdomain_id artificial_subdomain_id
Definition: types.h:264
void compress() const
Definition: index_set.h:1584
virtual std::vector< std::pair< unsigned int, unsigned int > > hp_line_dof_identities(const FiniteElement< dim, spacedim > &fe_other) const
Definition: fe.cc:907
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
virtual NumberCache renumber_mg_dofs(const unsigned int level, const std::vector< types::global_dof_index > &new_numbers) const
const Triangulation< dim, spacedim > & get_triangulation() const
virtual NumberCache renumber_dofs(const std::vector< types::global_dof_index > &new_numbers) const
size_t pack(const T &object, std::vector< char > &dest_buffer)
Definition: utilities.h:1003
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1106
std::vector< IndexSet > locally_owned_dofs_per_processor
Definition: number_cache.h:152
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
static ::ExceptionBase & ExcNotImplemented()
const Triangulation< dim, spacedim > & get_triangulation() const
Iterator points to a valid object.
const unsigned int dofs_per_vertex
Definition: fe_base.h:238
static unsigned int n_threads()
Definition: table.h:34
virtual std::vector< NumberCache > distribute_mg_dofs() const
bool is_element(const size_type index) const
Definition: index_set.h:1645
types::subdomain_id locally_owned_subdomain() const
Definition: tria_base.cc:307
const types::global_dof_index invalid_dof_index
Definition: types.h:187
virtual NumberCache renumber_dofs(const std::vector< types::global_dof_index > &new_numbers) const
size_type n_elements() const
Definition: index_set.h:1717
cell_iterator end() const
T max(const T &t, const MPI_Comm &mpi_communicator)
virtual FiniteElementDomination::Domination compare_for_face_domination(const FiniteElement< dim, spacedim > &fe_other) const
Definition: fe.cc:929
std::vector< types::global_dof_index > vertex_dofs
Definition: dof_handler.h:1161
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
static ::ExceptionBase & ExcInternalError()