Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
grid_refinement.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2000 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/utilities.h>
18 
19 #include <deal.II/lac/block_vector.h>
20 #include <deal.II/lac/trilinos_parallel_block_vector.h>
21 #include <deal.II/lac/trilinos_vector.h>
22 #include <deal.II/lac/vector.h>
23 
24 #ifdef DEAL_II_WITH_P4EST
25 
26 # include <deal.II/distributed/grid_refinement.h>
27 
28 # include <deal.II/grid/grid_refinement.h>
29 # include <deal.II/grid/tria.h>
30 # include <deal.II/grid/tria_accessor.h>
31 # include <deal.II/grid/tria_iterator.h>
32 
33 # include <algorithm>
34 # include <functional>
35 # include <limits>
36 # include <numeric>
37 
38 
39 DEAL_II_NAMESPACE_OPEN
40 
41 
42 namespace
43 {
44  template <typename number>
45  inline number
46  max_element(const ::Vector<number> &criteria)
47  {
48  return (criteria.size() > 0) ?
49  (*std::max_element(criteria.begin(), criteria.end())) :
50  std::numeric_limits<number>::min();
51  }
52 
53 
54 
55  template <typename number>
56  inline number
57  min_element(const ::Vector<number> &criteria)
58  {
59  return (criteria.size() > 0) ?
60  (*std::min_element(criteria.begin(), criteria.end())) :
61  std::numeric_limits<number>::max();
62  }
63 
64 
69  template <typename number>
70  std::pair<number, number>
71  compute_global_min_and_max_at_root(const ::Vector<number> &criteria,
72  MPI_Comm mpi_communicator)
73  {
74  // we'd like to compute the global max and min from the local ones in one
75  // MPI communication. we can do that by taking the elementwise minimum of
76  // the local min and the negative maximum over all processors
77 
78  const double local_min = min_element(criteria),
79  local_max = max_element(criteria);
80  double comp[2] = {local_min, -local_max};
81  double result[2] = {0, 0};
82 
83  // compute the minimum on processor zero
84  const int ierr =
85  MPI_Reduce(comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
86  AssertThrowMPI(ierr);
87 
88  // make sure only processor zero got something
89  if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
90  Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
91 
92  return std::make_pair(result[0], -result[1]);
93  }
94 
95 
96 
102  template <typename number>
103  double
104  compute_global_sum(const ::Vector<number> &criteria,
105  MPI_Comm mpi_communicator)
106  {
107  double my_sum =
108  std::accumulate(criteria.begin(),
109  criteria.end(),
110  /* do accumulation in the correct data type: */
111  number());
112 
113  double result = 0;
114  // compute the minimum on processor zero
115  const int ierr =
116  MPI_Reduce(&my_sum, &result, 1, MPI_DOUBLE, MPI_SUM, 0, mpi_communicator);
117  AssertThrowMPI(ierr);
118 
119  // make sure only processor zero got something
120  if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
121  Assert(result == 0, ExcInternalError());
122 
123  return result;
124  }
125 
126 
127 
132  template <int dim, int spacedim, typename Number>
133  void
134  get_locally_owned_indicators(
136  const ::Vector<Number> & criteria,
137  Vector<Number> &locally_owned_indicators)
138  {
139  Assert(locally_owned_indicators.size() ==
141  ExcInternalError());
142 
143  unsigned int owned_index = 0;
145  tria.begin_active();
146  cell != tria.end();
147  ++cell)
148  if (cell->subdomain_id() == tria.locally_owned_subdomain())
149  {
150  locally_owned_indicators(owned_index) =
151  criteria(cell->active_cell_index());
152  ++owned_index;
153  }
154  Assert(owned_index == tria.n_locally_owned_active_cells(),
155  ExcInternalError());
156  }
157 
158 
159  // we compute refinement thresholds by bisection of the interval spanned by
160  // the smallest and largest error indicator. this leads to a small problem:
161  // if, for example, we want to coarsen zero per cent of the cells, then we
162  // need to pick a threshold equal to the smallest indicator, but of course
163  // the bisection algorithm can never find a threshold equal to one of the
164  // end points of the interval. So we slightly increase the interval before
165  // we even start
166  void
167  adjust_interesting_range(double (&interesting_range)[2])
168  {
169  Assert(interesting_range[0] <= interesting_range[1], ExcInternalError());
170 
171  Assert(interesting_range[0] >= 0, ExcInternalError());
172 
173  // adjust the lower bound only if the end point is not equal to zero,
174  // otherwise it could happen, that the result becomes negative
175  if (interesting_range[0] > 0)
176  interesting_range[0] *= 0.99;
177 
178  if (interesting_range[1] > 0)
179  interesting_range[1] *= 1.01;
180  else
181  interesting_range[1] +=
182  0.01 * (interesting_range[1] - interesting_range[0]);
183  }
184 
185 
186 
192  template <int dim, int spacedim, typename Number>
193  void
195  const ::Vector<Number> & criteria,
196  const double top_threshold,
197  const double bottom_threshold)
198  {
199  ::GridRefinement::refine(tria, criteria, top_threshold);
200  ::GridRefinement::coarsen(tria, criteria, bottom_threshold);
201 
202  // as a final good measure, delete all flags again from cells that we don't
203  // locally own
205  tria.begin_active();
206  cell != tria.end();
207  ++cell)
208  if (cell->subdomain_id() != tria.locally_owned_subdomain())
209  {
210  cell->clear_refine_flag();
211  cell->clear_coarsen_flag();
212  }
213  }
214 
215 
216 
218  {
223  template <typename number>
224  number
225  compute_threshold(const ::Vector<number> & criteria,
226  const std::pair<double, double> &global_min_and_max,
227  const unsigned int n_target_cells,
228  MPI_Comm mpi_communicator)
229  {
230  double interesting_range[2] = {global_min_and_max.first,
231  global_min_and_max.second};
232  adjust_interesting_range(interesting_range);
233 
234  const unsigned int master_mpi_rank = 0;
235  unsigned int iteration = 0;
236 
237  do
238  {
239  int ierr = MPI_Bcast(interesting_range,
240  2,
241  MPI_DOUBLE,
242  master_mpi_rank,
243  mpi_communicator);
244  AssertThrowMPI(ierr);
245 
246  if (interesting_range[0] == interesting_range[1])
247  return interesting_range[0];
248 
249  const double test_threshold =
250  (interesting_range[0] > 0 ?
251  std::sqrt(interesting_range[0] * interesting_range[1]) :
252  (interesting_range[0] + interesting_range[1]) / 2);
253 
254  // count how many of our own elements would be above this threshold
255  // and then add to it the number for all the others
256  unsigned int my_count =
257  std::count_if(criteria.begin(),
258  criteria.end(),
259  [test_threshold](const double c) {
260  return c > test_threshold;
261  });
262 
263  unsigned int total_count = 0;
264 
265  ierr = MPI_Reduce(&my_count,
266  &total_count,
267  1,
268  MPI_UNSIGNED,
269  MPI_SUM,
270  master_mpi_rank,
271  mpi_communicator);
272  AssertThrowMPI(ierr);
273 
274  // now adjust the range. if we have too many cells, we take the upper
275  // half of the previous range, otherwise the lower half. if we have
276  // hit the right number, then set the range to the exact value.
277  // slave nodes also update their own interesting_range, however their
278  // results are not significant since the values will be overwritten by
279  // MPI_Bcast from the master node in next loop.
280  if (total_count > n_target_cells)
281  interesting_range[0] = test_threshold;
282  else if (total_count < n_target_cells)
283  interesting_range[1] = test_threshold;
284  else
285  interesting_range[0] = interesting_range[1] = test_threshold;
286 
287  // terminate the iteration after 25 go-arounds. this is necessary
288  // because oftentimes error indicators on cells have exactly the
289  // same value, and so there may not be a particular value that cuts
290  // the indicators in such a way that we can achieve the desired
291  // number of cells. using a maximal number of iterations means that
292  // we terminate the iteration after a fixed number N of steps if the
293  // indicators were perfectly badly distributed, and we make at most
294  // a mistake of 1/2^N in the number of cells flagged if indicators
295  // are perfectly equidistributed
296  ++iteration;
297  if (iteration == 25)
298  interesting_range[0] = interesting_range[1] = test_threshold;
299  }
300  while (true);
301 
302  Assert(false, ExcInternalError());
303  return -1;
304  }
305  } // namespace RefineAndCoarsenFixedNumber
306 
307 
308 
310  {
317  template <typename number>
318  number
319  compute_threshold(const ::Vector<number> & criteria,
320  const std::pair<double, double> &global_min_and_max,
321  const double target_error,
322  MPI_Comm mpi_communicator)
323  {
324  double interesting_range[2] = {global_min_and_max.first,
325  global_min_and_max.second};
326  adjust_interesting_range(interesting_range);
327 
328  const unsigned int master_mpi_rank = 0;
329  unsigned int iteration = 0;
330 
331  do
332  {
333  int ierr = MPI_Bcast(interesting_range,
334  2,
335  MPI_DOUBLE,
336  master_mpi_rank,
337  mpi_communicator);
338  AssertThrowMPI(ierr);
339 
340  if (interesting_range[0] == interesting_range[1])
341  {
342  // so we have found our threshold. since we adjust the range
343  // at the top of the function to be slightly larger than the
344  // actual extremes of the refinement criteria values, we can end
345  // up in a situation where the threshold is in fact larger than
346  // the maximal refinement indicator. in such cases, we get no
347  // refinement at all. thus, cap the threshold by the actual
348  // largest value
349  double final_threshold =
350  std::min(interesting_range[0], global_min_and_max.second);
351  ierr = MPI_Bcast(&final_threshold,
352  1,
353  MPI_DOUBLE,
354  master_mpi_rank,
355  mpi_communicator);
356  AssertThrowMPI(ierr);
357 
358  return final_threshold;
359  }
360 
361  const double test_threshold =
362  (interesting_range[0] > 0 ?
363  std::sqrt(interesting_range[0] * interesting_range[1]) :
364  (interesting_range[0] + interesting_range[1]) / 2);
365 
366  // accumulate the error of those our own elements above this threshold
367  // and then add to it the number for all the others
368  double my_error = 0;
369  for (unsigned int i = 0; i < criteria.size(); ++i)
370  if (criteria(i) > test_threshold)
371  my_error += criteria(i);
372 
373  double total_error = 0.;
374 
375  ierr = MPI_Reduce(&my_error,
376  &total_error,
377  1,
378  MPI_DOUBLE,
379  MPI_SUM,
380  master_mpi_rank,
381  mpi_communicator);
382  AssertThrowMPI(ierr);
383 
384  // now adjust the range. if we have too many cells, we take the upper
385  // half of the previous range, otherwise the lower half. if we have
386  // hit the right number, then set the range to the exact value.
387  // slave nodes also update their own interesting_range, however their
388  // results are not significant since the values will be overwritten by
389  // MPI_Bcast from the master node in next loop.
390  if (total_error > target_error)
391  interesting_range[0] = test_threshold;
392  else if (total_error < target_error)
393  interesting_range[1] = test_threshold;
394  else
395  interesting_range[0] = interesting_range[1] = test_threshold;
396 
397  // terminate the iteration after 25 go-arounds. this is
398  // necessary because oftentimes error indicators on cells
399  // have exactly the same value, and so there may not be a
400  // particular value that cuts the indicators in such a way
401  // that we can achieve the desired number of cells. using a
402  // max of 25 iterations means that we terminate the
403  // iteration after 25 steps if the indicators were perfectly
404  // badly distributed, and we make at most a mistake of
405  // 1/2^25 in the number of cells flagged if indicators are
406  // perfectly equidistributed
407  ++iteration;
408  if (iteration == 25)
409  interesting_range[0] = interesting_range[1] = test_threshold;
410  }
411  while (true);
412 
413  Assert(false, ExcInternalError());
414  return -1;
415  }
416  } // namespace RefineAndCoarsenFixedFraction
417 } // namespace
418 
419 
420 
421 namespace parallel
422 {
423  namespace distributed
424  {
425  namespace GridRefinement
426  {
427  template <int dim, typename Number, int spacedim>
428  void
431  const ::Vector<Number> & criteria,
432  const double top_fraction_of_cells,
433  const double bottom_fraction_of_cells,
434  const unsigned int max_n_cells)
435  {
436  Assert(criteria.size() == tria.n_active_cells(),
437  ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
438  Assert((top_fraction_of_cells >= 0) && (top_fraction_of_cells <= 1),
440  Assert((bottom_fraction_of_cells >= 0) &&
441  (bottom_fraction_of_cells <= 1),
443  Assert(top_fraction_of_cells + bottom_fraction_of_cells <= 1,
445  Assert(criteria.is_non_negative(),
447 
448  const std::pair<double, double> adjusted_fractions =
449  ::GridRefinement::adjust_refine_and_coarsen_number_fraction<
450  dim>(tria.n_global_active_cells(),
451  max_n_cells,
452  top_fraction_of_cells,
453  bottom_fraction_of_cells);
454 
455  // first extract from the vector of indicators the ones that correspond
456  // to cells that we locally own
457  Vector<Number> locally_owned_indicators(
459  get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
460 
461  MPI_Comm mpi_communicator = tria.get_communicator();
462 
463  // figure out the global max and min of the indicators. we don't need it
464  // here, but it's a collective communication call
465  const std::pair<Number, Number> global_min_and_max =
466  compute_global_min_and_max_at_root(locally_owned_indicators,
467  mpi_communicator);
468 
469 
470  double top_threshold, bottom_threshold;
471  top_threshold = RefineAndCoarsenFixedNumber::compute_threshold(
472  locally_owned_indicators,
473  global_min_and_max,
474  static_cast<unsigned int>(adjusted_fractions.first *
475  tria.n_global_active_cells()),
476  mpi_communicator);
477 
478  // compute bottom threshold only if necessary. otherwise use a threshold
479  // lower than the smallest value we have locally
480  if (adjusted_fractions.second > 0)
481  bottom_threshold = RefineAndCoarsenFixedNumber::compute_threshold(
482  locally_owned_indicators,
483  global_min_and_max,
484  static_cast<unsigned int>((1 - adjusted_fractions.second) *
485  tria.n_global_active_cells()),
486  mpi_communicator);
487  else
488  {
489  bottom_threshold =
490  *std::min_element(criteria.begin(), criteria.end());
491  bottom_threshold -= std::fabs(bottom_threshold);
492  }
493 
494  // now refine the mesh
495  mark_cells(tria, criteria, top_threshold, bottom_threshold);
496  }
497 
498 
499  template <int dim, typename Number, int spacedim>
500  void
503  const ::Vector<Number> & criteria,
504  const double top_fraction_of_error,
505  const double bottom_fraction_of_error)
506  {
507  Assert(criteria.size() == tria.n_active_cells(),
508  ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
509  Assert((top_fraction_of_error >= 0) && (top_fraction_of_error <= 1),
511  Assert((bottom_fraction_of_error >= 0) &&
512  (bottom_fraction_of_error <= 1),
514  Assert(top_fraction_of_error + bottom_fraction_of_error <= 1,
516  Assert(criteria.is_non_negative(),
518 
519  // first extract from the vector of indicators the ones that correspond
520  // to cells that we locally own
521  Vector<Number> locally_owned_indicators(
523  get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
524 
525  MPI_Comm mpi_communicator = tria.get_communicator();
526 
527  // figure out the global max and min of the indicators. we don't need it
528  // here, but it's a collective communication call
529  const std::pair<double, double> global_min_and_max =
530  compute_global_min_and_max_at_root(locally_owned_indicators,
531  mpi_communicator);
532 
533  const double total_error =
534  compute_global_sum(locally_owned_indicators, mpi_communicator);
535  double top_threshold, bottom_threshold;
536  top_threshold = RefineAndCoarsenFixedFraction::compute_threshold(
537  locally_owned_indicators,
538  global_min_and_max,
539  top_fraction_of_error * total_error,
540  mpi_communicator);
541  // compute bottom threshold only if necessary. otherwise use a threshold
542  // lower than the smallest value we have locally
543  if (bottom_fraction_of_error > 0)
544  bottom_threshold = RefineAndCoarsenFixedFraction::compute_threshold(
545  locally_owned_indicators,
546  global_min_and_max,
547  (1 - bottom_fraction_of_error) * total_error,
548  mpi_communicator);
549  else
550  {
551  bottom_threshold =
552  *std::min_element(criteria.begin(), criteria.end());
553  bottom_threshold -= std::fabs(bottom_threshold);
554  }
555 
556  // now refine the mesh
557  mark_cells(tria, criteria, top_threshold, bottom_threshold);
558  }
559  } // namespace GridRefinement
560  } // namespace distributed
561 } // namespace parallel
562 
563 
564 // explicit instantiations
565 # include "grid_refinement.inst"
566 
567 DEAL_II_NAMESPACE_CLOSE
568 
569 #endif
unsigned int n_active_cells() const
Definition: tria.cc:12545
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:335
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: tria.cc:11883
void refine_and_coarsen_fixed_fraction(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error)
cell_iterator end() const
Definition: tria.cc:11949
static ::ExceptionBase & ExcNegativeCriteria()
#define Assert(cond, exc)
Definition: exceptions.h:1407
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:155
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:126
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:82
virtual types::global_dof_index n_global_active_cells() const override
Definition: tria_base.cc:140
static ::ExceptionBase & ExcInvalidParameterValue()
static ::ExceptionBase & ExcInternalError()