Reference documentation for deal.II version 9.0.0
grid_refinement.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2000 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/utilities.h>
18 #include <deal.II/lac/vector.h>
19 #include <deal.II/lac/block_vector.h>
20 #include <deal.II/lac/trilinos_vector.h>
21 #include <deal.II/lac/trilinos_parallel_block_vector.h>
22 
23 #ifdef DEAL_II_WITH_P4EST
24 
25 #include <deal.II/grid/grid_refinement.h>
26 #include <deal.II/grid/tria_accessor.h>
27 #include <deal.II/grid/tria_iterator.h>
28 #include <deal.II/grid/tria.h>
29 
30 #include <deal.II/distributed/grid_refinement.h>
31 
32 #include <numeric>
33 #include <algorithm>
34 #include <limits>
35 #include <functional>
36 
37 
38 DEAL_II_NAMESPACE_OPEN
39 
40 
41 namespace
42 {
43  template <typename number>
44  inline
45  number
46  max_element (const ::Vector<number> &criteria)
47  {
48  return (criteria.size()>0)
49  ?
50  (*std::max_element(criteria.begin(), criteria.end()))
51  :
52  std::numeric_limits<number>::min();
53  }
54 
55 
56 
57  template <typename number>
58  inline
59  number
60  min_element (const ::Vector<number> &criteria)
61  {
62  return (criteria.size()>0)
63  ?
64  (*std::min_element(criteria.begin(), criteria.end()))
65  :
66  std::numeric_limits<number>::max();
67  }
68 
69 
74  template <typename number>
75  std::pair<number,number>
76  compute_global_min_and_max_at_root (const ::Vector<number> &criteria,
77  MPI_Comm mpi_communicator)
78  {
79  // we'd like to compute the global max and min from the local ones in one
80  // MPI communication. we can do that by taking the elementwise minimum of
81  // the local min and the negative maximum over all processors
82 
83  const double local_min = min_element (criteria),
84  local_max = max_element (criteria);
85  double comp[2] = { local_min, -local_max };
86  double result[2] = { 0, 0 };
87 
88  // compute the minimum on processor zero
89  const int ierr = MPI_Reduce (comp, result, 2, MPI_DOUBLE,
90  MPI_MIN, 0, mpi_communicator);
91  AssertThrowMPI(ierr);
92 
93  // make sure only processor zero got something
94  if (Utilities::MPI::this_mpi_process (mpi_communicator) != 0)
95  Assert ((result[0] == 0) && (result[1] == 0),
97 
98  return std::make_pair (result[0], -result[1]);
99  }
100 
101 
102 
108  template <typename number>
109  double
110  compute_global_sum (const ::Vector<number> &criteria,
111  MPI_Comm mpi_communicator)
112  {
113  double my_sum = std::accumulate (criteria.begin(),
114  criteria.end(),
115  /* do accumulation in the correct data type: */
116  number());
117 
118  double result = 0;
119  // compute the minimum on processor zero
120  const int ierr = MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE,
121  MPI_SUM, 0, mpi_communicator);
122  AssertThrowMPI(ierr);
123 
124  // make sure only processor zero got something
125  if (Utilities::MPI::this_mpi_process (mpi_communicator) != 0)
126  Assert (result == 0, ExcInternalError());
127 
128  return result;
129  }
130 
131 
132 
137  template <int dim, int spacedim, typename Number>
138  void
139  get_locally_owned_indicators (const parallel::distributed::Triangulation<dim,spacedim> &tria,
140  const ::Vector<Number> &criteria,
141  Vector<Number> &locally_owned_indicators)
142  {
143  Assert (locally_owned_indicators.size() == tria.n_locally_owned_active_cells(),
144  ExcInternalError());
145 
146  unsigned int owned_index = 0;
148  cell = tria.begin_active();
149  cell != tria.end(); ++cell)
150  if (cell->subdomain_id() == tria.locally_owned_subdomain())
151  {
152  locally_owned_indicators(owned_index)
153  = criteria(cell->active_cell_index());
154  ++owned_index;
155  }
156  Assert (owned_index == tria.n_locally_owned_active_cells(),
157  ExcInternalError());
158  }
159 
160 
161  // we compute refinement thresholds by bisection of the interval spanned by
162  // the smallest and largest error indicator. this leads to a small problem:
163  // if, for example, we want to coarsen zero per cent of the cells, then we
164  // need to pick a threshold equal to the smallest indicator, but of course
165  // the bisection algorithm can never find a threshold equal to one of the
166  // end points of the interval. So we slightly increase the interval before
167  // we even start
168  void adjust_interesting_range (double (&interesting_range)[2])
169  {
170  Assert (interesting_range[0] <= interesting_range[1],
171  ExcInternalError());
172 
173  Assert (interesting_range[0] >= 0,
174  ExcInternalError());
175 
176  // adjust the lower bound only if the end point is not equal to zero,
177  // otherwise it could happen, that the result becomes negative
178  if (interesting_range[0] > 0)
179  interesting_range[0] *= 0.99;
180 
181  if (interesting_range[1] > 0)
182  interesting_range[1] *= 1.01;
183  else
184  interesting_range[1]
185  += 0.01 * (interesting_range[1] - interesting_range[0]);
186  }
187 
188 
189 
195  template <int dim, int spacedim, typename Number>
196  void
198  const ::Vector<Number> &criteria,
199  const double top_threshold,
200  const double bottom_threshold)
201  {
202  ::GridRefinement::refine (tria, criteria, top_threshold);
203  ::GridRefinement::coarsen (tria, criteria, bottom_threshold);
204 
205  // as a final good measure, delete all flags again from cells that we don't
206  // locally own
208  cell = tria.begin_active();
209  cell != tria.end(); ++cell)
210  if (cell->subdomain_id() != tria.locally_owned_subdomain())
211  {
212  cell->clear_refine_flag ();
213  cell->clear_coarsen_flag ();
214  }
215  }
216 
217 
218 
219 
221  {
226  template <typename number>
227  number
228  compute_threshold (const ::Vector<number> &criteria,
229  const std::pair<double,double> &global_min_and_max,
230  const unsigned int n_target_cells,
231  MPI_Comm mpi_communicator)
232  {
233  double interesting_range[2] = { global_min_and_max.first,
234  global_min_and_max.second
235  };
236  adjust_interesting_range (interesting_range);
237 
238  const unsigned int master_mpi_rank = 0;
239  unsigned int iteration = 0;
240 
241  do
242  {
243  int ierr = MPI_Bcast (interesting_range, 2, MPI_DOUBLE,
244  master_mpi_rank, mpi_communicator);
245  AssertThrowMPI(ierr);
246 
247  if (interesting_range[0] == interesting_range[1])
248  return interesting_range[0];
249 
250  const double test_threshold
251  = (interesting_range[0] > 0
252  ?
253  std::sqrt(interesting_range[0] * interesting_range[1])
254  :
255  (interesting_range[0] + interesting_range[1]) / 2);
256 
257  // count how many of our own elements would be above this threshold
258  // and then add to it the number for all the others
259  unsigned int
260  my_count = std::count_if (criteria.begin(),
261  criteria.end(),
262  [test_threshold](const double c)
263  {
264  return c>test_threshold;
265  });
266 
267  unsigned int total_count;
268  ierr = MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED,
269  MPI_SUM, master_mpi_rank, mpi_communicator);
270  AssertThrowMPI(ierr);
271 
272  // now adjust the range. if we have to many cells, we take the upper
273  // half of the previous range, otherwise the lower half. if we have
274  // hit the right number, then set the range to the exact value.
275  // slave nodes also update their own interesting_range, however their
276  // results are not significant since the values will be overwritten by
277  // MPI_Bcast from the master node in next loop.
278  if (total_count > n_target_cells)
279  interesting_range[0] = test_threshold;
280  else if (total_count < n_target_cells)
281  interesting_range[1] = test_threshold;
282  else
283  interesting_range[0] = interesting_range[1] = test_threshold;
284 
285  // terminate the iteration after 25 go-arounds. this is necessary
286  // because oftentimes error indicators on cells have exactly the
287  // same value, and so there may not be a particular value that cuts
288  // the indicators in such a way that we can achieve the desired
289  // number of cells. using a maximal number of iterations means that
290  // we terminate the iteration after a fixed number N of steps if the
291  // indicators were perfectly badly distributed, and we make at most
292  // a mistake of 1/2^N in the number of cells flagged if indicators
293  // are perfectly equidistributed
294  ++iteration;
295  if (iteration == 25)
296  interesting_range[0] = interesting_range[1] = test_threshold;
297  }
298  while (true);
299 
300  Assert (false, ExcInternalError());
301  return -1;
302  }
303  }
304 
305 
306 
307 
309  {
316  template <typename number>
317  number
318  compute_threshold (const ::Vector<number> &criteria,
319  const std::pair<double,double> &global_min_and_max,
320  const double target_error,
321  MPI_Comm mpi_communicator)
322  {
323  double interesting_range[2] = { global_min_and_max.first,
324  global_min_and_max.second
325  };
326  adjust_interesting_range (interesting_range);
327 
328  const unsigned int master_mpi_rank = 0;
329  unsigned int iteration = 0;
330 
331  do
332  {
333  int ierr = MPI_Bcast (interesting_range, 2, MPI_DOUBLE,
334  master_mpi_rank, mpi_communicator);
335  AssertThrowMPI(ierr);
336 
337  if (interesting_range[0] == interesting_range[1])
338  {
339  // so we have found our threshold. since we adjust the range
340  // at the top of the function to be slightly larger than the
341  // actual extremes of the refinement criteria values, we can end
342  // up in a situation where the threshold is in fact larger than
343  // the maximal refinement indicator. in such cases, we get no
344  // refinement at all. thus, cap the threshold by the actual
345  // largest value
346  double final_threshold = std::min (interesting_range[0],
347  global_min_and_max.second);
348  ierr = MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
349  master_mpi_rank, mpi_communicator);
350  AssertThrowMPI(ierr);
351 
352  return final_threshold;
353  }
354 
355  const double test_threshold
356  = (interesting_range[0] > 0
357  ?
358  std::sqrt(interesting_range[0] * interesting_range[1])
359  :
360  (interesting_range[0] + interesting_range[1]) / 2);
361 
362  // accumulate the error of those our own elements above this threshold
363  // and then add to it the number for all the others
364  double my_error = 0;
365  for (unsigned int i=0; i<criteria.size(); ++i)
366  if (criteria(i) > test_threshold)
367  my_error += criteria(i);
368 
369  double total_error;
370  ierr = MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE,
371  MPI_SUM, master_mpi_rank, mpi_communicator);
372  AssertThrowMPI(ierr);
373 
374  // now adjust the range. if we have to many cells, we take the upper
375  // half of the previous range, otherwise the lower half. if we have
376  // hit the right number, then set the range to the exact value.
377  // slave nodes also update their own interesting_range, however their
378  // results are not significant since the values will be overwritten by
379  // MPI_Bcast from the master node in next loop.
380  if (total_error > target_error)
381  interesting_range[0] = test_threshold;
382  else if (total_error < target_error)
383  interesting_range[1] = test_threshold;
384  else
385  interesting_range[0] = interesting_range[1] = test_threshold;
386 
387  // terminate the iteration after 25 go-arounds. this is
388  // necessary because oftentimes error indicators on cells
389  // have exactly the same value, and so there may not be a
390  // particular value that cuts the indicators in such a way
391  // that we can achieve the desired number of cells. using a
392  // max of 25 iterations means that we terminate the
393  // iteration after 25 steps if the indicators were perfectly
394  // badly distributed, and we make at most a mistake of
395  // 1/2^25 in the number of cells flagged if indicators are
396  // perfectly equidistributed
397  ++iteration;
398  if (iteration == 25)
399  interesting_range[0] = interesting_range[1] = test_threshold;
400  }
401  while (true);
402 
403  Assert (false, ExcInternalError());
404  return -1;
405  }
406  }
407 }
408 
409 
410 
411 namespace parallel
412 {
413  namespace distributed
414  {
415  namespace GridRefinement
416  {
417  template <int dim, typename Number, int spacedim>
418  void
421  const ::Vector<Number> &criteria,
422  const double top_fraction_of_cells,
423  const double bottom_fraction_of_cells,
424  const unsigned int max_n_cells)
425  {
426  Assert (criteria.size() == tria.n_active_cells(),
427  ExcDimensionMismatch (criteria.size(), tria.n_active_cells()));
428  Assert ((top_fraction_of_cells>=0) && (top_fraction_of_cells<=1),
430  Assert ((bottom_fraction_of_cells>=0) && (bottom_fraction_of_cells<=1),
432  Assert (top_fraction_of_cells+bottom_fraction_of_cells <= 1,
434  Assert (criteria.is_non_negative (),
436 
437  const std::pair<double, double> adjusted_fractions =
438  ::GridRefinement::adjust_refine_and_coarsen_number_fraction<dim> (
439  tria.n_global_active_cells(),
440  max_n_cells,
441  top_fraction_of_cells,
442  bottom_fraction_of_cells);
443 
444  // first extract from the vector of indicators the ones that correspond
445  // to cells that we locally own
447  locally_owned_indicators (tria.n_locally_owned_active_cells());
448  get_locally_owned_indicators (tria,
449  criteria,
450  locally_owned_indicators);
451 
452  MPI_Comm mpi_communicator = tria.get_communicator ();
453 
454  // figure out the global max and min of the indicators. we don't need it
455  // here, but it's a collective communication call
456  const std::pair<Number,Number> global_min_and_max
457  = compute_global_min_and_max_at_root (locally_owned_indicators,
458  mpi_communicator);
459 
460 
461  double top_threshold, bottom_threshold;
462  top_threshold =
463  RefineAndCoarsenFixedNumber::
464  compute_threshold (locally_owned_indicators,
465  global_min_and_max,
466  static_cast<unsigned int>
467  (adjusted_fractions.first *
468  tria.n_global_active_cells()),
469  mpi_communicator);
470 
471  // compute bottom threshold only if necessary. otherwise use a threshold
472  // lower than the smallest value we have locally
473  if (adjusted_fractions.second > 0)
474  bottom_threshold =
475  RefineAndCoarsenFixedNumber::
476  compute_threshold (locally_owned_indicators,
477  global_min_and_max,
478  static_cast<unsigned int>
479  ((1-adjusted_fractions.second) *
480  tria.n_global_active_cells()),
481  mpi_communicator);
482  else
483  {
484  bottom_threshold = *std::min_element (criteria.begin(),
485  criteria.end());
486  bottom_threshold -= std::fabs(bottom_threshold);
487  }
488 
489  // now refine the mesh
490  mark_cells (tria, criteria, top_threshold, bottom_threshold);
491  }
492 
493 
494  template <int dim, typename Number, int spacedim>
495  void
498  const ::Vector<Number> &criteria,
499  const double top_fraction_of_error,
500  const double bottom_fraction_of_error)
501  {
502  Assert (criteria.size() == tria.n_active_cells(),
503  ExcDimensionMismatch (criteria.size(), tria.n_active_cells()));
504  Assert ((top_fraction_of_error>=0) && (top_fraction_of_error<=1),
506  Assert ((bottom_fraction_of_error>=0) && (bottom_fraction_of_error<=1),
508  Assert (top_fraction_of_error+bottom_fraction_of_error <= 1,
510  Assert (criteria.is_non_negative (),
512 
513  // first extract from the vector of indicators the ones that correspond
514  // to cells that we locally own
515  Vector<Number> locally_owned_indicators (tria.n_locally_owned_active_cells());
516  get_locally_owned_indicators (tria,
517  criteria,
518  locally_owned_indicators);
519 
520  MPI_Comm mpi_communicator = tria.get_communicator ();
521 
522  // figure out the global max and min of the indicators. we don't need it
523  // here, but it's a collective communication call
524  const std::pair<double,double> global_min_and_max
525  = compute_global_min_and_max_at_root (locally_owned_indicators,
526  mpi_communicator);
527 
528  const double total_error
529  = compute_global_sum (locally_owned_indicators,
530  mpi_communicator);
531  double top_threshold, bottom_threshold;
532  top_threshold =
533  RefineAndCoarsenFixedFraction::
534  compute_threshold (locally_owned_indicators,
535  global_min_and_max,
536  top_fraction_of_error *
537  total_error,
538  mpi_communicator);
539  // compute bottom threshold only if necessary. otherwise use a threshold
540  // lower than the smallest value we have locally
541  if (bottom_fraction_of_error > 0)
542  bottom_threshold =
543  RefineAndCoarsenFixedFraction::
544  compute_threshold (locally_owned_indicators,
545  global_min_and_max,
546  (1-bottom_fraction_of_error) *
547  total_error,
548  mpi_communicator);
549  else
550  {
551  bottom_threshold = *std::min_element (criteria.begin(),
552  criteria.end());
553  bottom_threshold -= std::fabs(bottom_threshold);
554  }
555 
556  // now refine the mesh
557  mark_cells (tria, criteria, top_threshold, bottom_threshold);
558  }
559  }
560  }
561 }
562 
563 
564 // explicit instantiations
565 #include "grid_refinement.inst"
566 
567 DEAL_II_NAMESPACE_CLOSE
568 
569 #endif
unsigned int n_active_cells() const
Definition: tria.cc:11084
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: tria.cc:10508
void refine_and_coarsen_fixed_fraction(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error)
cell_iterator end() const
Definition: tria.cc:10576
static ::ExceptionBase & ExcNegativeCriteria()
#define Assert(cond, exc)
Definition: exceptions.h:1142
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
virtual MPI_Comm get_communicator() const
Definition: tria_base.cc:146
virtual types::global_dof_index n_global_active_cells() const
Definition: tria_base.cc:132
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:118
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
types::subdomain_id locally_owned_subdomain() const
Definition: tria_base.cc:307
static ::ExceptionBase & ExcInvalidParameterValue()
static ::ExceptionBase & ExcInternalError()