Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mg_level_global_transfer.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2003 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/function.h>
18 #include <deal.II/base/logstream.h>
19 
21 
22 #include <deal.II/fe/fe.h>
23 
24 #include <deal.II/grid/tria.h>
26 
33 #include <deal.II/lac/vector.h>
34 
37 #include <deal.II/multigrid/mg_transfer.templates.h>
39 
40 #include <algorithm>
41 
43 
44 
45 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
46 
47 
48 template <typename VectorType>
49 template <int dim, int spacedim>
50 void
52  const DoFHandler<dim, spacedim> &mg_dof)
53 {
55  mg_constrained_dofs,
56  copy_indices,
57  copy_indices_global_mine,
58  copy_indices_level_mine);
59 
60  // check if we can run a plain copy operation between the global DoFs and
61  // the finest level.
62  bool my_perform_plain_copy =
63  (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements()) &&
64  (mg_dof.locally_owned_dofs().n_elements() ==
65  mg_dof
66  .locally_owned_mg_dofs(mg_dof.get_triangulation().n_global_levels() - 1)
67  .n_elements());
68  if (my_perform_plain_copy)
69  {
70  AssertDimension(copy_indices_global_mine.back().size(), 0);
71  AssertDimension(copy_indices_level_mine.back().size(), 0);
72 
73  // check whether there is a renumbering of degrees of freedom on
74  // either the finest level or the global dofs, which means that we
75  // cannot apply a plain copy
76  for (const auto &copy_index : copy_indices.back())
77  if (copy_index.first != copy_index.second)
78  {
79  my_perform_plain_copy = false;
80  break;
81  }
82  }
83 
84  // now do a global reduction over all processors to see what operation
85  // they can agree upon
87  dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
88  &mg_dof.get_triangulation()))
89  perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
90  ptria->get_communicator()) == 1);
91  else
92  perform_plain_copy = my_perform_plain_copy;
93 }
94 
95 
96 
97 template <typename VectorType>
98 void
100 {
101  sizes.resize(0);
102  copy_indices.clear();
103  copy_indices_global_mine.clear();
104  copy_indices_level_mine.clear();
105  component_to_block_map.resize(0);
106  mg_constrained_dofs = nullptr;
107  perform_plain_copy = false;
108 }
109 
110 
111 
112 template <typename VectorType>
113 void
115 {
116  for (unsigned int level = 0; level < copy_indices.size(); ++level)
117  {
118  for (unsigned int i = 0; i < copy_indices[level].size(); ++i)
119  os << "copy_indices[" << level << "]\t" << copy_indices[level][i].first
120  << '\t' << copy_indices[level][i].second << std::endl;
121  }
122 
123  for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
124  {
125  for (unsigned int i = 0; i < copy_indices_level_mine[level].size(); ++i)
126  os << "copy_ifrom [" << level << "]\t"
127  << copy_indices_level_mine[level][i].first << '\t'
128  << copy_indices_level_mine[level][i].second << std::endl;
129  }
130  for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
131  {
132  for (unsigned int i = 0; i < copy_indices_global_mine[level].size(); ++i)
133  os << "copy_ito [" << level << "]\t"
134  << copy_indices_global_mine[level][i].first << '\t'
135  << copy_indices_global_mine[level][i].second << std::endl;
136  }
137 }
138 
139 
140 
141 template <typename VectorType>
142 std::size_t
144 {
145  std::size_t result = sizeof(*this);
146  result += MemoryConsumption::memory_consumption(sizes);
147  result += MemoryConsumption::memory_consumption(copy_indices);
148  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
149  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
150 
151  return result;
152 }
153 
154 
155 
156 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
157 
158 namespace
159 {
160  template <int dim, int spacedim, typename Number>
161  void
162  fill_internal(
163  const DoFHandler<dim, spacedim> &mg_dof,
164  SmartPointer<
165  const MGConstrainedDoFs,
167  mg_constrained_dofs,
168  const MPI_Comm mpi_communicator,
169  const bool transfer_solution_vectors,
170  std::vector<Table<2, unsigned int>> & copy_indices,
171  std::vector<Table<2, unsigned int>> & copy_indices_global_mine,
172  std::vector<Table<2, unsigned int>> & copy_indices_level_mine,
173  LinearAlgebra::distributed::Vector<Number> &ghosted_global_vector,
175  &ghosted_level_vector)
176  {
177  // first go to the usual routine...
178  std::vector<
179  std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
180  my_copy_indices;
181  std::vector<
182  std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
183  my_copy_indices_global_mine;
184  std::vector<
185  std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
186  my_copy_indices_level_mine;
187 
189  mg_constrained_dofs,
190  my_copy_indices,
191  my_copy_indices_global_mine,
192  my_copy_indices_level_mine,
193  !transfer_solution_vectors);
194 
195  // get all degrees of freedom that we need read access to in copy_to_mg
196  // and copy_from_mg, respectively. We fill an IndexSet once on each level
197  // (for the global_mine indices accessing remote level indices) and once
198  // globally (for the level_mine indices accessing remote global indices).
199 
200  // the variables index_set and level_index_set are going to define the
201  // ghost indices of the respective vectors (due to construction, these are
202  // precisely the indices that we need)
203 
204  IndexSet index_set(mg_dof.locally_owned_dofs().size());
205  std::vector<types::global_dof_index> accessed_indices;
206  ghosted_level_vector.resize(0,
207  mg_dof.get_triangulation().n_global_levels() -
208  1);
209  std::vector<IndexSet> level_index_set(
210  mg_dof.get_triangulation().n_global_levels());
211  for (unsigned int l = 0; l < mg_dof.get_triangulation().n_global_levels();
212  ++l)
213  {
214  for (const auto &indices : my_copy_indices_level_mine[l])
215  accessed_indices.push_back(indices.first);
216  std::vector<types::global_dof_index> accessed_level_indices;
217  for (const auto &indices : my_copy_indices_global_mine[l])
218  accessed_level_indices.push_back(indices.second);
219  std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
220  level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
221  level_index_set[l].add_indices(accessed_level_indices.begin(),
222  accessed_level_indices.end());
223  level_index_set[l].compress();
224  ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
225  level_index_set[l],
226  mpi_communicator);
227  }
228  std::sort(accessed_indices.begin(), accessed_indices.end());
229  index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
230  index_set.compress();
231  ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
232  index_set,
233  mpi_communicator);
234 
235  // localize the copy indices for faster access. Since all access will be
236  // through the ghosted vector in 'data', we can use this (much faster)
237  // option
238  copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
239  copy_indices_level_mine.resize(
240  mg_dof.get_triangulation().n_global_levels());
241  copy_indices_global_mine.resize(
242  mg_dof.get_triangulation().n_global_levels());
243  for (unsigned int level = 0;
244  level < mg_dof.get_triangulation().n_global_levels();
245  ++level)
246  {
247  const Utilities::MPI::Partitioner &global_partitioner =
248  *ghosted_global_vector.get_partitioner();
249  const Utilities::MPI::Partitioner &level_partitioner =
250  *ghosted_level_vector[level].get_partitioner();
251 
252  auto translate_indices =
253  [&](const std::vector<
254  std::pair<types::global_dof_index, types::global_dof_index>>
255  & global_copy_indices,
256  Table<2, unsigned int> &local_copy_indices) {
257  local_copy_indices.reinit(2, global_copy_indices.size());
258  for (unsigned int i = 0; i < global_copy_indices.size(); ++i)
259  {
260  local_copy_indices(0, i) = global_partitioner.global_to_local(
261  global_copy_indices[i].first);
262  local_copy_indices(1, i) = level_partitioner.global_to_local(
263  global_copy_indices[i].second);
264  }
265  };
266 
267  // owned-owned case
268  translate_indices(my_copy_indices[level], copy_indices[level]);
269 
270  // remote-owned case
271  translate_indices(my_copy_indices_level_mine[level],
272  copy_indices_level_mine[level]);
273 
274  // owned-remote case
275  translate_indices(my_copy_indices_global_mine[level],
276  copy_indices_global_mine[level]);
277  }
278  }
279 } // namespace
280 
281 template <typename Number>
282 template <int dim, int spacedim>
283 void
285  fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
286 {
288  dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
289  &mg_dof.get_triangulation());
290  const MPI_Comm mpi_communicator =
291  ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
292 
293  fill_internal(mg_dof,
294  mg_constrained_dofs,
295  mpi_communicator,
296  false,
297  this->copy_indices,
298  this->copy_indices_global_mine,
299  this->copy_indices_level_mine,
300  ghosted_global_vector,
301  ghosted_level_vector);
302 
303  // in case we have hanging nodes which imply different ownership of
304  // global and level dof indices, we must also fill the solution indices
305  // which contain additional indices, otherwise they can re-use the
306  // indices of the standard transfer.
307  int have_refinement_edge_dofs = 0;
308  if (mg_constrained_dofs != nullptr)
309  for (unsigned int level = 0;
310  level < mg_dof.get_triangulation().n_global_levels();
311  ++level)
312  if (mg_constrained_dofs->get_refinement_edge_indices(level).n_elements() >
313  0)
314  {
315  have_refinement_edge_dofs = 1;
316  break;
317  }
318  if (Utilities::MPI::max(have_refinement_edge_dofs, mpi_communicator) == 1)
319  fill_internal(mg_dof,
320  mg_constrained_dofs,
321  mpi_communicator,
322  true,
323  this->solution_copy_indices,
324  this->solution_copy_indices_global_mine,
325  this->solution_copy_indices_level_mine,
326  solution_ghosted_global_vector,
327  solution_ghosted_level_vector);
328  else
329  {
330  this->solution_copy_indices = this->copy_indices;
331  this->solution_copy_indices_global_mine = this->copy_indices_global_mine;
332  this->solution_copy_indices_level_mine = this->copy_indices_level_mine;
333  solution_ghosted_global_vector = ghosted_global_vector;
334  solution_ghosted_level_vector = ghosted_level_vector;
335  }
336 
337  bool my_perform_renumbered_plain_copy =
338  (this->copy_indices.back().n_cols() ==
339  mg_dof.locally_owned_dofs().n_elements());
340  bool my_perform_plain_copy = false;
341  if (my_perform_renumbered_plain_copy)
342  {
343  my_perform_plain_copy = true;
344  AssertDimension(this->copy_indices_global_mine.back().n_rows(), 0);
345  AssertDimension(this->copy_indices_level_mine.back().n_rows(), 0);
346 
347  // check whether there is a renumbering of degrees of freedom on
348  // either the finest level or the global dofs, which means that we
349  // cannot apply a plain copy
350  for (unsigned int i = 0; i < this->copy_indices.back().n_cols(); ++i)
351  if (this->copy_indices.back()(0, i) != this->copy_indices.back()(1, i))
352  {
353  my_perform_plain_copy = false;
354  break;
355  }
356  }
357 
358  // now do a global reduction over all processors to see what operation
359  // they can agree upon
360  perform_plain_copy =
361  Utilities::MPI::min(static_cast<int>(my_perform_plain_copy),
362  mpi_communicator);
363  perform_renumbered_plain_copy =
364  Utilities::MPI::min(static_cast<int>(my_perform_renumbered_plain_copy),
365  mpi_communicator);
366 
367  // if we do a plain copy, no need to hold additional ghosted vectors
368  if (perform_renumbered_plain_copy)
369  {
370  for (unsigned int i = 0; i < this->copy_indices.back().n_cols(); ++i)
371  AssertDimension(this->copy_indices.back()(0, i), i);
372 
373  ghosted_global_vector.reinit(0);
374  ghosted_level_vector.resize(0, 0);
375  solution_ghosted_global_vector.reinit(0);
376  solution_ghosted_level_vector.resize(0, 0);
377  }
378 }
379 
380 
381 
382 template <typename Number>
383 void
385 {
386  sizes.resize(0);
387  copy_indices.clear();
388  copy_indices_global_mine.clear();
389  copy_indices_level_mine.clear();
390  component_to_block_map.resize(0);
391  mg_constrained_dofs = nullptr;
392  ghosted_global_vector.reinit(0);
393  ghosted_level_vector.resize(0, 0);
394  perform_plain_copy = false;
395  perform_renumbered_plain_copy = false;
396 }
397 
398 
399 
400 template <typename Number>
401 void
403  print_indices(std::ostream &os) const
404 {
405  for (unsigned int level = 0; level < copy_indices.size(); ++level)
406  {
407  for (unsigned int i = 0; i < copy_indices[level].n_cols(); ++i)
408  os << "copy_indices[" << level << "]\t" << copy_indices[level](0, i)
409  << '\t' << copy_indices[level](1, i) << std::endl;
410  }
411 
412  for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
413  {
414  for (unsigned int i = 0; i < copy_indices_level_mine[level].n_cols(); ++i)
415  os << "copy_ifrom [" << level << "]\t"
416  << copy_indices_level_mine[level](0, i) << '\t'
417  << copy_indices_level_mine[level](1, i) << std::endl;
418  }
419  for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
420  {
421  for (unsigned int i = 0; i < copy_indices_global_mine[level].n_cols();
422  ++i)
423  os << "copy_ito [" << level << "]\t"
424  << copy_indices_global_mine[level](0, i) << '\t'
425  << copy_indices_global_mine[level](1, i) << std::endl;
426  }
427 }
428 
429 
430 
431 template <typename Number>
432 std::size_t
435 {
436  std::size_t result = sizeof(*this);
437  result += MemoryConsumption::memory_consumption(sizes);
438  result += MemoryConsumption::memory_consumption(copy_indices);
439  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
440  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
441  result += ghosted_global_vector.memory_consumption();
442  for (unsigned int i = ghosted_level_vector.min_level();
443  i <= ghosted_level_vector.max_level();
444  ++i)
445  result += ghosted_level_vector[i].memory_consumption();
446 
447  return result;
448 }
449 
450 
451 
452 // explicit instantiation
453 #include "mg_level_global_transfer.inst"
454 
455 // create an additional instantiation currently not supported by the automatic
456 // template instantiation scheme
458 
459 
internal::MGTransfer::fill_copy_indices
void fill_copy_indices(const DoFHandler< dim, spacedim > &dof_handler, const MGConstrainedDoFs *mg_constrained_dofs, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index >>> &copy_indices, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index >>> &copy_indices_global_mine, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index >>> &copy_indices_level_mine, const bool skip_interface_dofs=true)
IndexSet
Definition: index_set.h:74
DoFHandler::locally_owned_mg_dofs
const IndexSet & locally_owned_mg_dofs(const unsigned int level) const
Utilities::MPI::Partitioner
Definition: partitioner.h:195
LinearAlgebra::distributed::Vector< Number >
MGLevelGlobalTransfer
Definition: mg_transfer.h:279
MGConstrainedDoFs
Definition: mg_constrained_dofs.h:45
tria.h
MGLevelGlobalTransfer::memory_consumption
std::size_t memory_consumption() const
Definition: mg_level_global_transfer.cc:143
IndexSet::size
size_type size() const
Definition: index_set.h:1635
tria_iterator.h
LinearAlgebra::distributed::Vector::memory_consumption
virtual std::size_t memory_consumption() const override
MGLevelGlobalTransfer::print_indices
void print_indices(std::ostream &os) const
Definition: mg_level_global_transfer.cc:114
mg_transfer_internal.h
MPI_Comm
MGLevelGlobalTransfer::fill_and_communicate_copy_indices
void fill_and_communicate_copy_indices(const DoFHandler< dim, spacedim > &dof_handler)
Definition: mg_level_global_transfer.cc:51
second
Point< 2 > second
Definition: grid_out.cc:4353
DoFHandler
Definition: dof_handler.h:205
la_parallel_vector.h
Table< 2, unsigned int >
IndexSet::n_elements
size_type n_elements() const
Definition: index_set.h:1833
DoFHandler::locally_owned_dofs
const IndexSet & locally_owned_dofs() const
level
unsigned int level
Definition: grid_out.cc:4355
LinearAlgebra::distributed::Vector::get_partitioner
const std::shared_ptr< const Utilities::MPI::Partitioner > & get_partitioner() const
mg_tools.h
block_vector.h
DoFHandler::get_triangulation
const Triangulation< dim, spacedim > & get_triangulation() const
la_parallel_block_vector.h
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
MemoryConsumption::memory_consumption
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Definition: memory_consumption.h:268
Physics::Elasticity::Kinematics::l
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
fe.h
MGLevelGlobalTransfer::clear
void clear()
Definition: mg_level_global_transfer.cc:99
AssertDimension
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1579
Utilities::MPI::Partitioner::global_to_local
unsigned int global_to_local(const types::global_dof_index global_index) const
SmartPointer
Definition: smartpointer.h:68
function.h
parallel::TriangulationBase::get_communicator
virtual const MPI_Comm & get_communicator() const
Definition: tria_base.cc:138
MGLevelObject
Definition: mg_level_object.h:50
vector.h
dof_accessor.h
LinearAlgebra::distributed::Vector::reinit
void reinit(const size_type size, const bool omit_zeroing_entries=false)
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
Utilities::MPI::Partitioner::reinit
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm &communicator) override
Definition: partitioner.cc:98
parallel::TriangulationBase
Definition: tria_base.h:78
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
logstream.h
first
Point< 2 > first
Definition: grid_out.cc:4352
mg_transfer.h
trilinos_vector.h
trilinos_epetra_vector.h
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
trilinos_parallel_block_vector.h