Reference documentation for deal.II version 9.0.0
mg_level_global_transfer.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2003 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
18 #include <deal.II/base/function.h>
19 
20 #include <deal.II/lac/vector.h>
21 #include <deal.II/lac/block_vector.h>
22 #include <deal.II/lac/la_parallel_vector.h>
23 #include <deal.II/lac/la_parallel_block_vector.h>
24 #include <deal.II/lac/trilinos_epetra_vector.h>
25 #include <deal.II/lac/trilinos_vector.h>
26 #include <deal.II/lac/trilinos_parallel_block_vector.h>
27 #include <deal.II/grid/tria.h>
28 #include <deal.II/grid/tria_iterator.h>
29 #include <deal.II/fe/fe.h>
30 #include <deal.II/dofs/dof_accessor.h>
31 #include <deal.II/multigrid/mg_tools.h>
32 #include <deal.II/multigrid/mg_transfer.h>
33 #include <deal.II/multigrid/mg_transfer.templates.h>
34 #include <deal.II/multigrid/mg_transfer_internal.h>
35 
36 #include <algorithm>
37 
38 DEAL_II_NAMESPACE_OPEN
39 
40 
41 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
42 
43 
44 template <typename VectorType>
45 template <int dim, int spacedim>
46 void
48 (const DoFHandler<dim,spacedim> &mg_dof)
49 {
50  internal::MGTransfer::fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices,
51  copy_indices_global_mine, copy_indices_level_mine);
52 
53  // check if we can run a plain copy operation between the global DoFs and
54  // the finest level.
55  bool
56  my_perform_plain_copy =
57  (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements())
58  &&
59  (mg_dof.locally_owned_dofs().n_elements() ==
60  mg_dof.locally_owned_mg_dofs(mg_dof.get_triangulation().n_global_levels()-1).n_elements());
61  if (my_perform_plain_copy)
62  {
63  AssertDimension(copy_indices_global_mine.back().size(), 0);
64  AssertDimension(copy_indices_level_mine.back().size(), 0);
65 
66  // check whether there is a renumbering of degrees of freedom on
67  // either the finest level or the global dofs, which means that we
68  // cannot apply a plain copy
69  for (unsigned int i=0; i<copy_indices.back().size(); ++i)
70  if (copy_indices.back()[i].first != copy_indices.back()[i].second)
71  {
72  my_perform_plain_copy = false;
73  break;
74  }
75  }
76 
77  // now do a global reduction over all processors to see what operation
78  // they can agree upon
80  dynamic_cast<const parallel::Triangulation<dim, spacedim> *>
81  (&mg_dof.get_triangulation()))
82  perform_plain_copy =
83  (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
84  ptria->get_communicator())
85  ==
86  1);
87  else
88  perform_plain_copy = my_perform_plain_copy;
89 }
90 
91 
92 
93 template <typename VectorType>
94 void
96 {
97  sizes.resize(0);
98  copy_indices.clear();
99  copy_indices_global_mine.clear();
100  copy_indices_level_mine.clear();
101  component_to_block_map.resize(0);
102  mg_constrained_dofs = nullptr;
103  perform_plain_copy = false;
104 }
105 
106 
107 
108 template <typename VectorType>
109 void
111 {
112  for (unsigned int level = 0; level<copy_indices.size(); ++level)
113  {
114  for (unsigned int i=0; i<copy_indices[level].size(); ++i)
115  os << "copy_indices[" << level
116  << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
117  }
118 
119  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
120  {
121  for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
122  os << "copy_ifrom [" << level
123  << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
124  }
125  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
126  {
127  for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
128  os << "copy_ito [" << level
129  << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
130  }
131 }
132 
133 
134 
135 template <typename VectorType>
136 std::size_t
138 {
139  std::size_t result = sizeof(*this);
140  result += MemoryConsumption::memory_consumption(sizes);
141  result += MemoryConsumption::memory_consumption(copy_indices);
142  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
143  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
144 
145  return result;
146 }
147 
148 
149 
150 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
151 
152 namespace
153 {
154  template <int dim, int spacedim, typename Number>
155  void fill_internal(const DoFHandler<dim,spacedim> &mg_dof,
157  const MPI_Comm mpi_communicator,
158  const bool transfer_solution_vectors,
159  std::vector<std::vector<std::pair<unsigned int, unsigned int> > > &copy_indices,
160  std::vector<std::vector<std::pair<unsigned int, unsigned int> > > &copy_indices_global_mine,
161  std::vector<std::vector<std::pair<unsigned int, unsigned int> > > &copy_indices_level_mine,
162  LinearAlgebra::distributed::Vector<Number> &ghosted_global_vector,
164  {
165  // first go to the usual routine...
166  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
167  my_copy_indices;
168  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
169  my_copy_indices_global_mine;
170  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
171  my_copy_indices_level_mine;
172 
173  internal::MGTransfer::fill_copy_indices(mg_dof, mg_constrained_dofs, my_copy_indices,
174  my_copy_indices_global_mine, my_copy_indices_level_mine,
175  !transfer_solution_vectors);
176 
177  // get all degrees of freedom that we need read access to in copy_to_mg
178  // and copy_from_mg, respectively. We fill an IndexSet once on each level
179  // (for the global_mine indices accessing remote level indices) and once
180  // globally (for the level_mine indices accessing remote global indices).
181 
182  // the variables index_set and level_index_set are going to define the
183  // ghost indices of the respective vectors (due to construction, these are
184  // precisely the indices that we need)
185 
186  IndexSet index_set(mg_dof.locally_owned_dofs().size());
187  std::vector<types::global_dof_index> accessed_indices;
188  ghosted_level_vector.resize(0, mg_dof.get_triangulation().n_global_levels()-1);
189  std::vector<IndexSet> level_index_set(mg_dof.get_triangulation().n_global_levels());
190  for (unsigned int l=0; l<mg_dof.get_triangulation().n_global_levels(); ++l)
191  {
192  for (unsigned int i=0; i<my_copy_indices_level_mine[l].size(); ++i)
193  accessed_indices.push_back(my_copy_indices_level_mine[l][i].first);
194  std::vector<types::global_dof_index> accessed_level_indices;
195  for (unsigned int i=0; i<my_copy_indices_global_mine[l].size(); ++i)
196  accessed_level_indices.push_back(my_copy_indices_global_mine[l][i].second);
197  std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
198  level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
199  level_index_set[l].add_indices(accessed_level_indices.begin(),
200  accessed_level_indices.end());
201  level_index_set[l].compress();
202  ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
203  level_index_set[l],
204  mpi_communicator);
205  }
206  std::sort(accessed_indices.begin(), accessed_indices.end());
207  index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
208  index_set.compress();
209  ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
210  index_set,
211  mpi_communicator);
212 
213  // localize the copy indices for faster access. Since all access will be
214  // through the ghosted vector in 'data', we can use this (much faster)
215  // option
216  copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
217  copy_indices_level_mine.resize(mg_dof.get_triangulation().n_global_levels());
218  copy_indices_global_mine.resize(mg_dof.get_triangulation().n_global_levels());
219  for (unsigned int level=0; level<mg_dof.get_triangulation().n_global_levels(); ++level)
220  {
221  const Utilities::MPI::Partitioner &global_partitioner =
222  *ghosted_global_vector.get_partitioner();
223  const Utilities::MPI::Partitioner &level_partitioner =
224  *ghosted_level_vector[level].get_partitioner();
225  // owned-owned case: the locally owned indices are going to control
226  // the local index
227  copy_indices[level].resize(my_copy_indices[level].size());
228  for (unsigned int i=0; i<my_copy_indices[level].size(); ++i)
229  copy_indices[level][i] =
230  std::pair<unsigned int,unsigned int>
231  (global_partitioner.global_to_local(my_copy_indices[level][i].first),
232  level_partitioner.global_to_local(my_copy_indices[level][i].second));
233 
234  // remote-owned case: the locally owned indices for the level and the
235  // ghost dofs for the global indices set the local index
236  copy_indices_level_mine[level].
237  resize(my_copy_indices_level_mine[level].size());
238  for (unsigned int i=0; i<my_copy_indices_level_mine[level].size(); ++i)
239  copy_indices_level_mine[level][i] =
240  std::pair<unsigned int,unsigned int>
241  (global_partitioner.global_to_local(my_copy_indices_level_mine[level][i].first),
242  level_partitioner.global_to_local(my_copy_indices_level_mine[level][i].second));
243 
244  // owned-remote case: the locally owned indices for the global dofs
245  // and the ghost dofs for the level indices set the local index
246  copy_indices_global_mine[level].
247  resize(my_copy_indices_global_mine[level].size());
248  for (unsigned int i=0; i<my_copy_indices_global_mine[level].size(); ++i)
249  copy_indices_global_mine[level][i] =
250  std::pair<unsigned int,unsigned int>
251  (global_partitioner.global_to_local(my_copy_indices_global_mine[level][i].first),
252  level_partitioner.global_to_local(my_copy_indices_global_mine[level][i].second));
253  }
254  }
255 }
256 
257 template <typename Number>
258 template <int dim, int spacedim>
259 void
260 MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::fill_and_communicate_copy_indices
262 {
263 
265  dynamic_cast<const parallel::Triangulation<dim, spacedim> *>
266  (&mg_dof.get_triangulation());
267  const MPI_Comm mpi_communicator = ptria != nullptr ? ptria->get_communicator() :
268  MPI_COMM_SELF;
269 
270  fill_internal(mg_dof,
271  mg_constrained_dofs,
272  mpi_communicator,
273  false,
274  this->copy_indices,
275  this->copy_indices_global_mine,
276  this->copy_indices_level_mine,
277  ghosted_global_vector,
278  ghosted_level_vector);
279 
280  fill_internal(mg_dof,
281  mg_constrained_dofs,
282  mpi_communicator,
283  true,
284  this->solution_copy_indices,
285  this->solution_copy_indices_global_mine,
286  this->solution_copy_indices_level_mine,
287  solution_ghosted_global_vector,
288  solution_ghosted_level_vector);
289 
290  bool my_perform_renumbered_plain_copy = (this->copy_indices.back().size() ==
291  mg_dof.locally_owned_dofs().n_elements());
292  bool my_perform_plain_copy = false;
293  if (my_perform_renumbered_plain_copy)
294  {
295  my_perform_plain_copy = true;
296  AssertDimension(this->copy_indices_global_mine.back().size(), 0);
297  AssertDimension(this->copy_indices_level_mine.back().size(), 0);
298 
299  // check whether there is a renumbering of degrees of freedom on
300  // either the finest level or the global dofs, which means that we
301  // cannot apply a plain copy
302  for (unsigned int i=0; i<this->copy_indices.back().size(); ++i)
303  if (this->copy_indices.back()[i].first !=
304  this->copy_indices.back()[i].second)
305  {
306  my_perform_plain_copy = false;
307  break;
308  }
309  }
310 
311  // now do a global reduction over all processors to see what operation
312  // they can agree upon
313  perform_plain_copy =
314  Utilities::MPI::min(static_cast<int>(my_perform_plain_copy),
315  mpi_communicator);
316  perform_renumbered_plain_copy =
317  Utilities::MPI::min(static_cast<int>(my_perform_renumbered_plain_copy),
318  mpi_communicator);
319 
320  // if we do a plain copy, no need to hold additional ghosted vectors
321  if (perform_renumbered_plain_copy)
322  {
323  ghosted_global_vector.reinit(0);
324  ghosted_level_vector.resize(0, 0);
325  solution_ghosted_global_vector.reinit(0);
326  solution_ghosted_level_vector.resize(0, 0);
327  }
328 }
329 
330 
331 
332 template <typename Number>
333 void
335 {
336  sizes.resize(0);
337  copy_indices.clear();
338  copy_indices_global_mine.clear();
339  copy_indices_level_mine.clear();
340  component_to_block_map.resize(0);
341  mg_constrained_dofs = nullptr;
342  ghosted_global_vector.reinit(0);
343  ghosted_level_vector.resize(0, 0);
344  perform_plain_copy = false;
345  perform_renumbered_plain_copy = false;
346 }
347 
348 
349 
350 template <typename Number>
351 void
353 {
354  for (unsigned int level = 0; level<copy_indices.size(); ++level)
355  {
356  for (unsigned int i=0; i<copy_indices[level].size(); ++i)
357  os << "copy_indices[" << level
358  << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
359  }
360 
361  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
362  {
363  for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
364  os << "copy_ifrom [" << level
365  << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
366  }
367  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
368  {
369  for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
370  os << "copy_ito [" << level
371  << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
372  }
373 }
374 
375 
376 
377 template <typename Number>
378 std::size_t
380 {
381  std::size_t result = sizeof(*this);
382  result += MemoryConsumption::memory_consumption(sizes);
383  result += MemoryConsumption::memory_consumption(copy_indices);
384  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
385  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
386  result += ghosted_global_vector.memory_consumption();
387  for (unsigned int i=ghosted_level_vector.min_level();
388  i<=ghosted_level_vector.max_level(); ++i)
389  result += ghosted_level_vector[i].memory_consumption();
390 
391  return result;
392 }
393 
394 
395 
396 // explicit instantiation
397 #include "mg_level_global_transfer.inst"
398 
399 // create an additional instantiation currently not supported by the automatic
400 // template instantiation scheme
402 
403 
404 DEAL_II_NAMESPACE_CLOSE
void reinit(const size_type size, const bool omit_zeroing_entries=false)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1248
std::size_t memory_consumption() const
unsigned int global_to_local(const types::global_dof_index global_index) const
void print_indices(std::ostream &os) const
size_type size() const
Definition: index_set.h:1575
void fill_and_communicate_copy_indices(const DoFHandler< dim, spacedim > &mg_dof)
virtual std::size_t memory_consumption() const override
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:146
const std::shared_ptr< const Utilities::MPI::Partitioner > & get_partitioner() const
const IndexSet & locally_owned_mg_dofs(const unsigned int level) const
T min(const T &t, const MPI_Comm &mpi_communicator)
const Triangulation< dim, spacedim > & get_triangulation() const
const IndexSet & locally_owned_dofs() const
size_type n_elements() const
Definition: index_set.h:1717
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)