Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
grid_refinement.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2000 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/utilities.h>
18 
22 #include <deal.II/lac/vector.h>
23 
24 #ifdef DEAL_II_WITH_P4EST
25 
27 
29 # include <deal.II/grid/tria.h>
32 
33 # include <algorithm>
34 # include <functional>
35 # include <limits>
36 # include <numeric>
37 
38 
40 
41 
42 namespace
43 {
44  template <typename number>
45  inline number
46  max_element(const ::Vector<number> &criteria)
47  {
48  return (criteria.size() > 0) ?
49  (*std::max_element(criteria.begin(), criteria.end())) :
51  }
52 
53 
54 
55  template <typename number>
56  inline number
57  min_element(const ::Vector<number> &criteria)
58  {
59  return (criteria.size() > 0) ?
60  (*std::min_element(criteria.begin(), criteria.end())) :
62  }
63 
64 
65 
71  template <typename number>
72  double
73  compute_global_sum(const ::Vector<number> &criteria,
74  MPI_Comm mpi_communicator)
75  {
76  double my_sum =
77  std::accumulate(criteria.begin(),
78  criteria.end(),
79  /* do accumulation in the correct data type: */
80  number());
81 
82  double result = 0;
83  // compute the minimum on processor zero
84  const int ierr =
85  MPI_Reduce(&my_sum, &result, 1, MPI_DOUBLE, MPI_SUM, 0, mpi_communicator);
86  AssertThrowMPI(ierr);
87 
88  // make sure only processor zero got something
89  if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
90  Assert(result == 0, ExcInternalError());
91 
92  return result;
93  }
94 
95 
96 
101  template <int dim, int spacedim, typename Number>
102  void
103  get_locally_owned_indicators(
105  const ::Vector<Number> & criteria,
106  Vector<Number> &locally_owned_indicators)
107  {
108  Assert(locally_owned_indicators.size() ==
110  ExcInternalError());
111 
112  unsigned int owned_index = 0;
113  for (const auto &cell : tria.active_cell_iterators())
114  if (cell->subdomain_id() == tria.locally_owned_subdomain())
115  {
116  locally_owned_indicators(owned_index) =
117  criteria(cell->active_cell_index());
118  ++owned_index;
119  }
120  Assert(owned_index == tria.n_locally_owned_active_cells(),
121  ExcInternalError());
122  }
123 
124 
125  // we compute refinement thresholds by bisection of the interval spanned by
126  // the smallest and largest error indicator. this leads to a small problem:
127  // if, for example, we want to coarsen zero per cent of the cells, then we
128  // need to pick a threshold equal to the smallest indicator, but of course
129  // the bisection algorithm can never find a threshold equal to one of the
130  // end points of the interval. So we slightly increase the interval before
131  // we even start
132  void
133  adjust_interesting_range(double (&interesting_range)[2])
134  {
135  Assert(interesting_range[0] <= interesting_range[1], ExcInternalError());
136 
137  if (interesting_range[0] > 0)
138  {
139  // In this case, we calculate the first interval split point `m` in the
140  // `compute_threshold` functions in the optimized way: We exploit that
141  // the logarithms of all criteria are more uniformly distributed than
142  // their actual values, i.e. m=sqrt(b*e).
143  //
144  // Both factors will modify the split point only slightly by a factor of
145  // sqrt(1.01*0.99) = sqrt(0.9999) ~ 0.9950.
146  interesting_range[0] *= 0.99;
147  interesting_range[1] *= 1.01;
148  }
149  else
150  {
151  // In all other cases, we begin with an the arithmetic mean as the
152  // standard interval split point, i.e. m=(b+e)/2.
153  //
154  // Both increments will add up to zero when calculating the initial
155  // split point in the `compute_threshold` functions.
156  const double difference =
157  std::abs(interesting_range[1] - interesting_range[0]);
158  interesting_range[0] -= 0.01 * difference;
159  interesting_range[1] += 0.01 * difference;
160  }
161  }
162 
163 
164 
170  template <int dim, int spacedim, typename Number>
171  void
173  const ::Vector<Number> & criteria,
174  const double top_threshold,
175  const double bottom_threshold)
176  {
177  ::GridRefinement::refine(tria, criteria, top_threshold);
178  ::GridRefinement::coarsen(tria, criteria, bottom_threshold);
179 
180  // as a final good measure, delete all flags again from cells that we don't
181  // locally own
182  for (const auto &cell : tria.active_cell_iterators())
183  if (cell->subdomain_id() != tria.locally_owned_subdomain())
184  {
185  cell->clear_refine_flag();
186  cell->clear_coarsen_flag();
187  }
188  }
189 } // namespace
190 
191 
192 
193 namespace internal
194 {
195  namespace parallel
196  {
197  namespace distributed
198  {
199  namespace GridRefinement
200  {
201  template <typename number>
202  std::pair<number, number>
204  const ::Vector<number> &criteria,
205  MPI_Comm mpi_communicator)
206  {
207  // we'd like to compute the global max and min from the local ones in
208  // one MPI communication. we can do that by taking the elementwise
209  // minimum of the local min and the negative maximum over all
210  // processors
211 
212  const double local_min = min_element(criteria),
213  local_max = max_element(criteria);
214  double comp[2] = {local_min, -local_max};
215  double result[2] = {0, 0};
216 
217  // compute the minimum on processor zero
218  const int ierr = MPI_Reduce(
219  comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
220  AssertThrowMPI(ierr);
221 
222  // make sure only processor zero got something
223  if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
224  Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
225 
226  return std::make_pair(result[0], -result[1]);
227  }
228 
229 
230 
231  namespace RefineAndCoarsenFixedNumber
232  {
233  template <typename number>
234  number
235  compute_threshold(const ::Vector<number> & criteria,
236  const std::pair<double, double> &global_min_and_max,
237  const types::global_cell_index n_target_cells,
238  MPI_Comm mpi_communicator)
239  {
240  double interesting_range[2] = {global_min_and_max.first,
241  global_min_and_max.second};
242  adjust_interesting_range(interesting_range);
243 
244  const unsigned int master_mpi_rank = 0;
245  unsigned int iteration = 0;
246 
247  do
248  {
249  int ierr = MPI_Bcast(interesting_range,
250  2,
251  MPI_DOUBLE,
252  master_mpi_rank,
253  mpi_communicator);
254  AssertThrowMPI(ierr);
255 
256  if (interesting_range[0] == interesting_range[1])
257  return interesting_range[0];
258 
259  const double test_threshold =
260  (interesting_range[0] > 0 ?
261  std::sqrt(interesting_range[0] * interesting_range[1]) :
262  (interesting_range[0] + interesting_range[1]) / 2);
263 
264  // Count how many of our own elements would be above this
265  // threshold. Use a 64bit result type if we are compiling with
266  // 64bit indices to avoid an overflow when computing the sum
267  // below.
268  const types::global_cell_index my_count =
269  std::count_if(criteria.begin(),
270  criteria.end(),
271  [test_threshold](const double c) {
272  return c > test_threshold;
273  });
274  const types::global_cell_index total_count =
275  Utilities::MPI::sum(my_count, mpi_communicator);
276 
277  // now adjust the range. if we have too many cells, we take the
278  // upper half of the previous range, otherwise the lower half.
279  // if we have hit the right number, then set the range to the
280  // exact value. slave nodes also update their own
281  // interesting_range, however their results are not significant
282  // since the values will be overwritten by MPI_Bcast from the
283  // master node in next loop.
284  if (total_count > n_target_cells)
285  interesting_range[0] = test_threshold;
286  else if (total_count < n_target_cells)
287  interesting_range[1] = test_threshold;
288  else
289  interesting_range[0] = interesting_range[1] = test_threshold;
290 
291  // terminate the iteration after 25 go-arounds. this is
292  // necessary because oftentimes error indicators on cells have
293  // exactly the same value, and so there may not be a particular
294  // value that cuts the indicators in such a way that we can
295  // achieve the desired number of cells. using a maximal number
296  // of iterations means that we terminate the iteration after a
297  // fixed number N of steps if the indicators were perfectly
298  // badly distributed, and we make at most a mistake of 1/2^N in
299  // the number of cells flagged if indicators are perfectly
300  // equidistributed
301  ++iteration;
302  if (iteration == 25)
303  interesting_range[0] = interesting_range[1] = test_threshold;
304  }
305  while (true);
306 
307  Assert(false, ExcInternalError());
308  return -1;
309  }
310  } // namespace RefineAndCoarsenFixedNumber
311 
312 
313 
314  namespace RefineAndCoarsenFixedFraction
315  {
316  template <typename number>
317  number
318  compute_threshold(const ::Vector<number> & criteria,
319  const std::pair<double, double> &global_min_and_max,
320  const double target_error,
321  MPI_Comm mpi_communicator)
322  {
323  double interesting_range[2] = {global_min_and_max.first,
324  global_min_and_max.second};
325  adjust_interesting_range(interesting_range);
326 
327  const unsigned int master_mpi_rank = 0;
328  unsigned int iteration = 0;
329 
330  do
331  {
332  int ierr = MPI_Bcast(interesting_range,
333  2,
334  MPI_DOUBLE,
335  master_mpi_rank,
336  mpi_communicator);
337  AssertThrowMPI(ierr);
338 
339  if (interesting_range[0] == interesting_range[1])
340  {
341  // so we have found our threshold. since we adjust the range
342  // at the top of the function to be slightly larger than the
343  // actual extremes of the refinement criteria values, we can
344  // end up in a situation where the threshold is in fact
345  // larger than the maximal refinement indicator. in such
346  // cases, we get no refinement at all. thus, cap the
347  // threshold by the actual largest value
348  double final_threshold =
349  std::min(interesting_range[0], global_min_and_max.second);
350  ierr = MPI_Bcast(&final_threshold,
351  1,
352  MPI_DOUBLE,
353  master_mpi_rank,
354  mpi_communicator);
355  AssertThrowMPI(ierr);
356 
357  return final_threshold;
358  }
359 
360  const double test_threshold =
361  (interesting_range[0] > 0 ?
362  std::sqrt(interesting_range[0] * interesting_range[1]) :
363  (interesting_range[0] + interesting_range[1]) / 2);
364 
365  // accumulate the error of those our own elements above this
366  // threshold and then add to it the number for all the others
367  double my_error = 0;
368  for (unsigned int i = 0; i < criteria.size(); ++i)
369  if (criteria(i) > test_threshold)
370  my_error += criteria(i);
371 
372  double total_error = 0.;
373 
374  ierr = MPI_Reduce(&my_error,
375  &total_error,
376  1,
377  MPI_DOUBLE,
378  MPI_SUM,
379  master_mpi_rank,
380  mpi_communicator);
381  AssertThrowMPI(ierr);
382 
383  // now adjust the range. if we have too many cells, we take the
384  // upper half of the previous range, otherwise the lower half.
385  // if we have hit the right number, then set the range to the
386  // exact value. slave nodes also update their own
387  // interesting_range, however their results are not significant
388  // since the values will be overwritten by MPI_Bcast from the
389  // master node in next loop.
390  if (total_error > target_error)
391  interesting_range[0] = test_threshold;
392  else if (total_error < target_error)
393  interesting_range[1] = test_threshold;
394  else
395  interesting_range[0] = interesting_range[1] = test_threshold;
396 
397  // terminate the iteration after 25 go-arounds. this is
398  // necessary because oftentimes error indicators on cells
399  // have exactly the same value, and so there may not be a
400  // particular value that cuts the indicators in such a way
401  // that we can achieve the desired number of cells. using a
402  // max of 25 iterations means that we terminate the
403  // iteration after 25 steps if the indicators were perfectly
404  // badly distributed, and we make at most a mistake of
405  // 1/2^25 in the number of cells flagged if indicators are
406  // perfectly equidistributed
407  ++iteration;
408  if (iteration == 25)
409  interesting_range[0] = interesting_range[1] = test_threshold;
410  }
411  while (true);
412 
413  Assert(false, ExcInternalError());
414  return -1;
415  }
416  } // namespace RefineAndCoarsenFixedFraction
417  } // namespace GridRefinement
418  } // namespace distributed
419  } // namespace parallel
420 } // namespace internal
421 
422 
423 
424 namespace parallel
425 {
426  namespace distributed
427  {
428  namespace GridRefinement
429  {
430  template <int dim, typename Number, int spacedim>
431  void
434  const ::Vector<Number> & criteria,
435  const double top_fraction_of_cells,
436  const double bottom_fraction_of_cells,
437  const types::global_cell_index max_n_cells)
438  {
439  Assert(criteria.size() == tria.n_active_cells(),
440  ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
441  Assert((top_fraction_of_cells >= 0) && (top_fraction_of_cells <= 1),
443  Assert((bottom_fraction_of_cells >= 0) &&
444  (bottom_fraction_of_cells <= 1),
446  Assert(top_fraction_of_cells + bottom_fraction_of_cells <= 1,
448  Assert(criteria.is_non_negative(),
450 
451  const std::pair<double, double> adjusted_fractions =
453  dim>(tria.n_global_active_cells(),
454  max_n_cells,
455  top_fraction_of_cells,
456  bottom_fraction_of_cells);
457 
458  // first extract from the vector of indicators the ones that correspond
459  // to cells that we locally own
460  Vector<Number> locally_owned_indicators(
462  get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
463 
464  MPI_Comm mpi_communicator = tria.get_communicator();
465 
466  // figure out the global max and min of the indicators. we don't need it
467  // here, but it's a collective communication call
468  const std::pair<Number, Number> global_min_and_max =
470  compute_global_min_and_max_at_root(locally_owned_indicators,
471  mpi_communicator);
472 
473 
474  double top_threshold, bottom_threshold;
477  locally_owned_indicators,
478  global_min_and_max,
479  static_cast<types::global_cell_index>(adjusted_fractions.first *
480  tria.n_global_active_cells()),
481  mpi_communicator);
482 
483  // compute bottom threshold only if necessary. otherwise use the lowest
484  // threshold possible
485  if (adjusted_fractions.second > 0)
486  bottom_threshold = ::internal::parallel::distributed::
488  locally_owned_indicators,
489  global_min_and_max,
490  static_cast<types::global_cell_index>(
491  std::ceil((1. - adjusted_fractions.second) *
492  tria.n_global_active_cells())),
493  mpi_communicator);
494  else
495  bottom_threshold = std::numeric_limits<Number>::lowest();
496 
497  // now refine the mesh
498  mark_cells(tria, criteria, top_threshold, bottom_threshold);
499  }
500 
501 
502  template <int dim, typename Number, int spacedim>
503  void
506  const ::Vector<Number> & criteria,
507  const double top_fraction_of_error,
508  const double bottom_fraction_of_error)
509  {
510  Assert(criteria.size() == tria.n_active_cells(),
511  ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
512  Assert((top_fraction_of_error >= 0) && (top_fraction_of_error <= 1),
514  Assert((bottom_fraction_of_error >= 0) &&
515  (bottom_fraction_of_error <= 1),
517  Assert(top_fraction_of_error + bottom_fraction_of_error <= 1,
519  Assert(criteria.is_non_negative(),
521 
522  // first extract from the vector of indicators the ones that correspond
523  // to cells that we locally own
524  Vector<Number> locally_owned_indicators(
526  get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
527 
528  MPI_Comm mpi_communicator = tria.get_communicator();
529 
530  // figure out the global max and min of the indicators. we don't need it
531  // here, but it's a collective communication call
532  const std::pair<double, double> global_min_and_max =
534  compute_global_min_and_max_at_root(locally_owned_indicators,
535  mpi_communicator);
536 
537  const double total_error =
538  compute_global_sum(locally_owned_indicators, mpi_communicator);
539  double top_threshold, bottom_threshold;
542  locally_owned_indicators,
543  global_min_and_max,
544  top_fraction_of_error * total_error,
545  mpi_communicator);
546 
547  // compute bottom threshold only if necessary. otherwise use the lowest
548  // threshold possible
549  if (bottom_fraction_of_error > 0)
550  bottom_threshold = ::internal::parallel::distributed::
552  locally_owned_indicators,
553  global_min_and_max,
554  (1. - bottom_fraction_of_error) * total_error,
555  mpi_communicator);
556  else
557  bottom_threshold = std::numeric_limits<Number>::lowest();
558 
559  // now refine the mesh
560  mark_cells(tria, criteria, top_threshold, bottom_threshold);
561  }
562  } // namespace GridRefinement
563  } // namespace distributed
564 } // namespace parallel
565 
566 
567 // explicit instantiations
568 # include "grid_refinement.inst"
569 
571 
572 #endif
tria_accessor.h
GridRefinement
Definition: grid_refinement.h:54
LinearAlgebra::distributed::Vector< Number >
tria.h
parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
Definition: grid_refinement.cc:432
parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction
void refine_and_coarsen_fixed_fraction(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error)
Definition: grid_refinement.cc:504
utilities.h
tria_iterator.h
GridRefinement::adjust_refine_and_coarsen_number_fraction
std::pair< double, double > adjust_refine_and_coarsen_number_fraction(const types::global_cell_index current_n_cells, const types::global_cell_index max_n_cells, const double top_fraction_of_cells, const double bottom_fraction_of_cells)
Definition: grid_refinement.cc:106
MPI_Comm
internal::parallel::distributed::GridRefinement::compute_global_min_and_max_at_root
std::pair< number, number > compute_global_min_and_max_at_root(const ::Vector< number > &criteria, MPI_Comm mpi_communicator)
Definition: grid_refinement.cc:203
GridRefinement::coarsen
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
Definition: grid_refinement.cc:88
GridRefinement::ExcNegativeCriteria
static ::ExceptionBase & ExcNegativeCriteria()
block_vector.h
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
internal::parallel::distributed::GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold
number compute_threshold(const ::Vector< number > &criteria, const std::pair< double, double > &global_min_and_max, const types::global_cell_index n_target_cells, MPI_Comm mpi_communicator)
Definition: grid_refinement.cc:235
parallel::distributed::Triangulation
Definition: tria.h:242
internal::parallel::distributed::GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold
number compute_threshold(const ::Vector< number > &criteria, const std::pair< double, double > &global_min_and_max, const double target_error, MPI_Comm mpi_communicator)
Definition: grid_refinement.cc:318
std::abs
inline ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5450
Utilities::MPI::sum
T sum(const T &t, const MPI_Comm &mpi_communicator)
grid_refinement.h
AssertThrowMPI
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1707
grid_refinement.h
parallel::TriangulationBase::locally_owned_subdomain
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:288
Differentiation::SD::ceil
Expression ceil(const Expression &x)
Definition: symengine_math.cc:301
unsigned int
parallel::TriangulationBase::n_locally_owned_active_cells
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:117
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
parallel::TriangulationBase::get_communicator
virtual const MPI_Comm & get_communicator() const
Definition: tria_base.cc:138
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
vector.h
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
StandardExceptions::ExcDimensionMismatch
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
Triangulation< dim, dim >::n_active_cells
unsigned int n_active_cells() const
Definition: tria.cc:12675
internal
Definition: aligned_vector.h:369
GridRefinement::ExcInvalidParameterValue
static ::ExceptionBase & ExcInvalidParameterValue()
Triangulation< dim, dim >::active_cell_iterators
IteratorRange< active_cell_iterator > active_cell_iterators() const
Definition: tria.cc:12185
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
trilinos_vector.h
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
parallel
Definition: distributed.h:416
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
trilinos_parallel_block_vector.h
parallel::TriangulationBase::n_global_active_cells
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:131