Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mg_level_global_transfer.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2003 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/function.h>
18 #include <deal.II/base/logstream.h>
19 
20 #include <deal.II/dofs/dof_accessor.h>
21 
22 #include <deal.II/fe/fe.h>
23 
24 #include <deal.II/grid/tria.h>
25 #include <deal.II/grid/tria_iterator.h>
26 
27 #include <deal.II/lac/block_vector.h>
28 #include <deal.II/lac/la_parallel_block_vector.h>
29 #include <deal.II/lac/la_parallel_vector.h>
30 #include <deal.II/lac/trilinos_epetra_vector.h>
31 #include <deal.II/lac/trilinos_parallel_block_vector.h>
32 #include <deal.II/lac/trilinos_vector.h>
33 #include <deal.II/lac/vector.h>
34 
35 #include <deal.II/multigrid/mg_tools.h>
36 #include <deal.II/multigrid/mg_transfer.h>
37 #include <deal.II/multigrid/mg_transfer.templates.h>
38 #include <deal.II/multigrid/mg_transfer_internal.h>
39 
40 #include <algorithm>
41 
42 DEAL_II_NAMESPACE_OPEN
43 
44 
45 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
46 
47 
48 template <typename VectorType>
49 template <int dim, int spacedim>
50 void
52  const DoFHandler<dim, spacedim> &mg_dof)
53 {
54  internal::MGTransfer::fill_copy_indices(mg_dof,
55  mg_constrained_dofs,
56  copy_indices,
57  copy_indices_global_mine,
58  copy_indices_level_mine);
59 
60  // check if we can run a plain copy operation between the global DoFs and
61  // the finest level.
62  bool my_perform_plain_copy =
63  (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements()) &&
64  (mg_dof.locally_owned_dofs().n_elements() ==
65  mg_dof
66  .locally_owned_mg_dofs(mg_dof.get_triangulation().n_global_levels() - 1)
67  .n_elements());
68  if (my_perform_plain_copy)
69  {
70  AssertDimension(copy_indices_global_mine.back().size(), 0);
71  AssertDimension(copy_indices_level_mine.back().size(), 0);
72 
73  // check whether there is a renumbering of degrees of freedom on
74  // either the finest level or the global dofs, which means that we
75  // cannot apply a plain copy
76  for (unsigned int i = 0; i < copy_indices.back().size(); ++i)
77  if (copy_indices.back()[i].first != copy_indices.back()[i].second)
78  {
79  my_perform_plain_copy = false;
80  break;
81  }
82  }
83 
84  // now do a global reduction over all processors to see what operation
85  // they can agree upon
87  dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
88  &mg_dof.get_triangulation()))
89  perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
90  ptria->get_communicator()) == 1);
91  else
92  perform_plain_copy = my_perform_plain_copy;
93 }
94 
95 
96 
97 template <typename VectorType>
98 void
100 {
101  sizes.resize(0);
102  copy_indices.clear();
103  copy_indices_global_mine.clear();
104  copy_indices_level_mine.clear();
105  component_to_block_map.resize(0);
106  mg_constrained_dofs = nullptr;
107  perform_plain_copy = false;
108 }
109 
110 
111 
112 template <typename VectorType>
113 void
115 {
116  for (unsigned int level = 0; level < copy_indices.size(); ++level)
117  {
118  for (unsigned int i = 0; i < copy_indices[level].size(); ++i)
119  os << "copy_indices[" << level << "]\t" << copy_indices[level][i].first
120  << '\t' << copy_indices[level][i].second << std::endl;
121  }
122 
123  for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
124  {
125  for (unsigned int i = 0; i < copy_indices_level_mine[level].size(); ++i)
126  os << "copy_ifrom [" << level << "]\t"
127  << copy_indices_level_mine[level][i].first << '\t'
128  << copy_indices_level_mine[level][i].second << std::endl;
129  }
130  for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
131  {
132  for (unsigned int i = 0; i < copy_indices_global_mine[level].size(); ++i)
133  os << "copy_ito [" << level << "]\t"
134  << copy_indices_global_mine[level][i].first << '\t'
135  << copy_indices_global_mine[level][i].second << std::endl;
136  }
137 }
138 
139 
140 
141 template <typename VectorType>
142 std::size_t
144 {
145  std::size_t result = sizeof(*this);
146  result += MemoryConsumption::memory_consumption(sizes);
147  result += MemoryConsumption::memory_consumption(copy_indices);
148  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
149  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
150 
151  return result;
152 }
153 
154 
155 
156 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
157 
158 namespace
159 {
160  template <int dim, int spacedim, typename Number>
161  void
162  fill_internal(
163  const DoFHandler<dim, spacedim> &mg_dof,
164  SmartPointer<
165  const MGConstrainedDoFs,
167  mg_constrained_dofs,
168  const MPI_Comm mpi_communicator,
169  const bool transfer_solution_vectors,
170  std::vector<std::vector<std::pair<unsigned int, unsigned int>>>
171  &copy_indices,
172  std::vector<std::vector<std::pair<unsigned int, unsigned int>>>
173  &copy_indices_global_mine,
174  std::vector<std::vector<std::pair<unsigned int, unsigned int>>>
175  & copy_indices_level_mine,
176  LinearAlgebra::distributed::Vector<Number> &ghosted_global_vector,
178  &ghosted_level_vector)
179  {
180  // first go to the usual routine...
181  std::vector<
182  std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
183  my_copy_indices;
184  std::vector<
185  std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
186  my_copy_indices_global_mine;
187  std::vector<
188  std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
189  my_copy_indices_level_mine;
190 
191  internal::MGTransfer::fill_copy_indices(mg_dof,
192  mg_constrained_dofs,
193  my_copy_indices,
194  my_copy_indices_global_mine,
195  my_copy_indices_level_mine,
196  !transfer_solution_vectors);
197 
198  // get all degrees of freedom that we need read access to in copy_to_mg
199  // and copy_from_mg, respectively. We fill an IndexSet once on each level
200  // (for the global_mine indices accessing remote level indices) and once
201  // globally (for the level_mine indices accessing remote global indices).
202 
203  // the variables index_set and level_index_set are going to define the
204  // ghost indices of the respective vectors (due to construction, these are
205  // precisely the indices that we need)
206 
207  IndexSet index_set(mg_dof.locally_owned_dofs().size());
208  std::vector<types::global_dof_index> accessed_indices;
209  ghosted_level_vector.resize(0,
210  mg_dof.get_triangulation().n_global_levels() -
211  1);
212  std::vector<IndexSet> level_index_set(
213  mg_dof.get_triangulation().n_global_levels());
214  for (unsigned int l = 0; l < mg_dof.get_triangulation().n_global_levels();
215  ++l)
216  {
217  for (const auto &indices : my_copy_indices_level_mine[l])
218  accessed_indices.push_back(indices.first);
219  std::vector<types::global_dof_index> accessed_level_indices;
220  for (const auto &indices : my_copy_indices_global_mine[l])
221  accessed_level_indices.push_back(indices.second);
222  std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
223  level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
224  level_index_set[l].add_indices(accessed_level_indices.begin(),
225  accessed_level_indices.end());
226  level_index_set[l].compress();
227  ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
228  level_index_set[l],
229  mpi_communicator);
230  }
231  std::sort(accessed_indices.begin(), accessed_indices.end());
232  index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
233  index_set.compress();
234  ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
235  index_set,
236  mpi_communicator);
237 
238  // localize the copy indices for faster access. Since all access will be
239  // through the ghosted vector in 'data', we can use this (much faster)
240  // option
241  copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
242  copy_indices_level_mine.resize(
243  mg_dof.get_triangulation().n_global_levels());
244  copy_indices_global_mine.resize(
245  mg_dof.get_triangulation().n_global_levels());
246  for (unsigned int level = 0;
247  level < mg_dof.get_triangulation().n_global_levels();
248  ++level)
249  {
250  const Utilities::MPI::Partitioner &global_partitioner =
251  *ghosted_global_vector.get_partitioner();
252  const Utilities::MPI::Partitioner &level_partitioner =
253  *ghosted_level_vector[level].get_partitioner();
254  // owned-owned case: the locally owned indices are going to control
255  // the local index
256  copy_indices[level].resize(my_copy_indices[level].size());
257  for (unsigned int i = 0; i < my_copy_indices[level].size(); ++i)
258  copy_indices[level][i] = std::pair<unsigned int, unsigned int>(
259  global_partitioner.global_to_local(my_copy_indices[level][i].first),
260  level_partitioner.global_to_local(
261  my_copy_indices[level][i].second));
262 
263  // remote-owned case: the locally owned indices for the level and the
264  // ghost dofs for the global indices set the local index
265  copy_indices_level_mine[level].resize(
266  my_copy_indices_level_mine[level].size());
267  for (unsigned int i = 0; i < my_copy_indices_level_mine[level].size();
268  ++i)
269  copy_indices_level_mine[level][i] =
270  std::pair<unsigned int, unsigned int>(
271  global_partitioner.global_to_local(
272  my_copy_indices_level_mine[level][i].first),
273  level_partitioner.global_to_local(
274  my_copy_indices_level_mine[level][i].second));
275 
276  // owned-remote case: the locally owned indices for the global dofs
277  // and the ghost dofs for the level indices set the local index
278  copy_indices_global_mine[level].resize(
279  my_copy_indices_global_mine[level].size());
280  for (unsigned int i = 0; i < my_copy_indices_global_mine[level].size();
281  ++i)
282  copy_indices_global_mine[level][i] =
283  std::pair<unsigned int, unsigned int>(
284  global_partitioner.global_to_local(
285  my_copy_indices_global_mine[level][i].first),
286  level_partitioner.global_to_local(
287  my_copy_indices_global_mine[level][i].second));
288  }
289  }
290 } // namespace
291 
292 template <typename Number>
293 template <int dim, int spacedim>
294 void
296  fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
297 {
299  dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
300  &mg_dof.get_triangulation());
301  const MPI_Comm mpi_communicator =
302  ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
303 
304  fill_internal(mg_dof,
305  mg_constrained_dofs,
306  mpi_communicator,
307  false,
308  this->copy_indices,
309  this->copy_indices_global_mine,
310  this->copy_indices_level_mine,
311  ghosted_global_vector,
312  ghosted_level_vector);
313 
314  fill_internal(mg_dof,
315  mg_constrained_dofs,
316  mpi_communicator,
317  true,
318  this->solution_copy_indices,
319  this->solution_copy_indices_global_mine,
320  this->solution_copy_indices_level_mine,
321  solution_ghosted_global_vector,
322  solution_ghosted_level_vector);
323 
324  bool my_perform_renumbered_plain_copy =
325  (this->copy_indices.back().size() ==
326  mg_dof.locally_owned_dofs().n_elements());
327  bool my_perform_plain_copy = false;
328  if (my_perform_renumbered_plain_copy)
329  {
330  my_perform_plain_copy = true;
331  AssertDimension(this->copy_indices_global_mine.back().size(), 0);
332  AssertDimension(this->copy_indices_level_mine.back().size(), 0);
333 
334  // check whether there is a renumbering of degrees of freedom on
335  // either the finest level or the global dofs, which means that we
336  // cannot apply a plain copy
337  for (unsigned int i = 0; i < this->copy_indices.back().size(); ++i)
338  if (this->copy_indices.back()[i].first !=
339  this->copy_indices.back()[i].second)
340  {
341  my_perform_plain_copy = false;
342  break;
343  }
344  }
345 
346  // now do a global reduction over all processors to see what operation
347  // they can agree upon
348  perform_plain_copy =
349  Utilities::MPI::min(static_cast<int>(my_perform_plain_copy),
350  mpi_communicator);
351  perform_renumbered_plain_copy =
352  Utilities::MPI::min(static_cast<int>(my_perform_renumbered_plain_copy),
353  mpi_communicator);
354 
355  // if we do a plain copy, no need to hold additional ghosted vectors
356  if (perform_renumbered_plain_copy)
357  {
358  ghosted_global_vector.reinit(0);
359  ghosted_level_vector.resize(0, 0);
360  solution_ghosted_global_vector.reinit(0);
361  solution_ghosted_level_vector.resize(0, 0);
362  }
363 }
364 
365 
366 
367 template <typename Number>
368 void
370 {
371  sizes.resize(0);
372  copy_indices.clear();
373  copy_indices_global_mine.clear();
374  copy_indices_level_mine.clear();
375  component_to_block_map.resize(0);
376  mg_constrained_dofs = nullptr;
377  ghosted_global_vector.reinit(0);
378  ghosted_level_vector.resize(0, 0);
379  perform_plain_copy = false;
380  perform_renumbered_plain_copy = false;
381 }
382 
383 
384 
385 template <typename Number>
386 void
388  print_indices(std::ostream &os) const
389 {
390  for (unsigned int level = 0; level < copy_indices.size(); ++level)
391  {
392  for (unsigned int i = 0; i < copy_indices[level].size(); ++i)
393  os << "copy_indices[" << level << "]\t" << copy_indices[level][i].first
394  << '\t' << copy_indices[level][i].second << std::endl;
395  }
396 
397  for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
398  {
399  for (unsigned int i = 0; i < copy_indices_level_mine[level].size(); ++i)
400  os << "copy_ifrom [" << level << "]\t"
401  << copy_indices_level_mine[level][i].first << '\t'
402  << copy_indices_level_mine[level][i].second << std::endl;
403  }
404  for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
405  {
406  for (unsigned int i = 0; i < copy_indices_global_mine[level].size(); ++i)
407  os << "copy_ito [" << level << "]\t"
408  << copy_indices_global_mine[level][i].first << '\t'
409  << copy_indices_global_mine[level][i].second << std::endl;
410  }
411 }
412 
413 
414 
415 template <typename Number>
416 std::size_t
418  LinearAlgebra::distributed::Vector<Number>>::memory_consumption() const
419 {
420  std::size_t result = sizeof(*this);
421  result += MemoryConsumption::memory_consumption(sizes);
422  result += MemoryConsumption::memory_consumption(copy_indices);
423  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
424  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
425  result += ghosted_global_vector.memory_consumption();
426  for (unsigned int i = ghosted_level_vector.min_level();
427  i <= ghosted_level_vector.max_level();
428  ++i)
429  result += ghosted_level_vector[i].memory_consumption();
430 
431  return result;
432 }
433 
434 
435 
436 // explicit instantiation
437 #include "mg_level_global_transfer.inst"
438 
439 // create an additional instantiation currently not supported by the automatic
440 // template instantiation scheme
442 
443 
444 DEAL_II_NAMESPACE_CLOSE
virtual std::size_t memory_consumption() const override
const Triangulation< dim, spacedim > & get_triangulation() const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
std::size_t memory_consumption() const
void reinit(const size_type size, const bool omit_zeroing_entries=false)
unsigned int global_to_local(const types::global_dof_index global_index) const
void print_indices(std::ostream &os) const
size_type size() const
Definition: index_set.h:1600
void fill_and_communicate_copy_indices(const DoFHandler< dim, spacedim > &mg_dof)
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:155
const IndexSet & locally_owned_mg_dofs(const unsigned int level) const
T min(const T &t, const MPI_Comm &mpi_communicator)
const std::shared_ptr< const Utilities::MPI::Partitioner > & get_partitioner() const
const IndexSet & locally_owned_dofs() const
size_type n_elements() const
Definition: index_set.h:1732
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)