Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
grid_refinement.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2000 - 2020 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18
22#include <deal.II/lac/vector.h>
23
24#ifdef DEAL_II_WITH_P4EST
25
27
29# include <deal.II/grid/tria.h>
32
33# include <algorithm>
34# include <functional>
35# include <limits>
36# include <numeric>
37
38
40
41
42namespace
43{
44 template <typename number>
45 inline number
46 max_element(const ::Vector<number> &criteria)
47 {
48 return (criteria.size() > 0) ?
49 (*std::max_element(criteria.begin(), criteria.end())) :
51 }
52
53
54
55 template <typename number>
56 inline number
57 min_element(const ::Vector<number> &criteria)
58 {
59 return (criteria.size() > 0) ?
60 (*std::min_element(criteria.begin(), criteria.end())) :
62 }
63
64
65
71 template <typename number>
72 double
73 compute_global_sum(const ::Vector<number> &criteria,
74 const MPI_Comm & mpi_communicator)
75 {
76 double my_sum =
77 std::accumulate(criteria.begin(),
78 criteria.end(),
79 /* do accumulation in the correct data type: */
80 number());
81
82 double result = 0;
83 // compute the minimum on processor zero
84 const int ierr =
85 MPI_Reduce(&my_sum, &result, 1, MPI_DOUBLE, MPI_SUM, 0, mpi_communicator);
86 AssertThrowMPI(ierr);
87
88 // make sure only processor zero got something
89 if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
90 Assert(result == 0, ExcInternalError());
91
92 return result;
93 }
94
95
96
101 template <int dim, int spacedim, typename Number>
102 void
103 get_locally_owned_indicators(
105 const ::Vector<Number> & criteria,
106 Vector<Number> &locally_owned_indicators)
107 {
108 Assert(locally_owned_indicators.size() ==
111
112 unsigned int owned_index = 0;
113 for (const auto &cell : tria.active_cell_iterators())
114 if (cell->subdomain_id() == tria.locally_owned_subdomain())
115 {
116 locally_owned_indicators(owned_index) =
117 criteria(cell->active_cell_index());
118 ++owned_index;
119 }
120 Assert(owned_index == tria.n_locally_owned_active_cells(),
122 }
123
124
125 // we compute refinement thresholds by bisection of the interval spanned by
126 // the smallest and largest error indicator. this leads to a small problem:
127 // if, for example, we want to coarsen zero per cent of the cells, then we
128 // need to pick a threshold equal to the smallest indicator, but of course
129 // the bisection algorithm can never find a threshold equal to one of the
130 // end points of the interval. So we slightly increase the interval before
131 // we even start
132 void
133 adjust_interesting_range(double (&interesting_range)[2])
134 {
135 Assert(interesting_range[0] <= interesting_range[1], ExcInternalError());
136
137 if (interesting_range[0] > 0)
138 {
139 // In this case, we calculate the first interval split point `m` in the
140 // `compute_threshold` functions in the optimized way: We exploit that
141 // the logarithms of all criteria are more uniformly distributed than
142 // their actual values, i.e. m=sqrt(b*e).
143 //
144 // Both factors will modify the split point only slightly by a factor of
145 // sqrt(1.01*0.99) = sqrt(0.9999) ~ 0.9950.
146 interesting_range[0] *= 0.99;
147 interesting_range[1] *= 1.01;
148 }
149 else
150 {
151 // In all other cases, we begin with an the arithmetic mean as the
152 // standard interval split point, i.e. m=(b+e)/2.
153 //
154 // Both increments will add up to zero when calculating the initial
155 // split point in the `compute_threshold` functions.
156 const double difference =
157 std::abs(interesting_range[1] - interesting_range[0]);
158 interesting_range[0] -= 0.01 * difference;
159 interesting_range[1] += 0.01 * difference;
160 }
161 }
162
163
164
170 template <int dim, int spacedim, typename Number>
171 void
173 const ::Vector<Number> & criteria,
174 const double top_threshold,
175 const double bottom_threshold)
176 {
177 ::GridRefinement::refine(tria, criteria, top_threshold);
178 ::GridRefinement::coarsen(tria, criteria, bottom_threshold);
179
180 // as a final good measure, delete all flags again from cells that we don't
181 // locally own
182 for (const auto &cell : tria.active_cell_iterators())
183 if (cell->subdomain_id() != tria.locally_owned_subdomain())
184 {
185 cell->clear_refine_flag();
186 cell->clear_coarsen_flag();
187 }
188 }
189
190
191
199 template <int dim, int spacedim, typename Number>
200 void
201 refine_and_coarsen_fixed_fraction_via_l1_norm(
203 const ::Vector<Number> & criteria,
204 const double top_fraction_of_error,
205 const double bottom_fraction_of_error)
206 {
207 // first extract from the vector of indicators the ones that correspond
208 // to cells that we locally own
209 Vector<Number> locally_owned_indicators(
211 get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
212
213 MPI_Comm mpi_communicator = tria.get_communicator();
214
215 // figure out the global max and min of the indicators. we don't need it
216 // here, but it's a collective communication call
217 const std::pair<double, double> global_min_and_max =
219 compute_global_min_and_max_at_root(locally_owned_indicators,
220 mpi_communicator);
221
222 const double total_error =
223 compute_global_sum(locally_owned_indicators, mpi_communicator);
224
225 double top_target_error = top_fraction_of_error * total_error,
226 bottom_target_error = (1. - bottom_fraction_of_error) * total_error;
227
228 double top_threshold, bottom_threshold;
231 global_min_and_max,
232 top_target_error,
233 mpi_communicator);
234
235 // compute bottom threshold only if necessary. otherwise use the lowest
236 // threshold possible
237 if (bottom_fraction_of_error > 0)
238 bottom_threshold = ::internal::parallel::distributed::
240 locally_owned_indicators,
241 global_min_and_max,
242 bottom_target_error,
243 mpi_communicator);
244 else
245 bottom_threshold = std::numeric_limits<Number>::lowest();
246
247 // now refine the mesh
248 mark_cells(tria, criteria, top_threshold, bottom_threshold);
249 }
250} // namespace
251
252
253
254namespace internal
255{
256 namespace parallel
257 {
258 namespace distributed
259 {
260 namespace GridRefinement
261 {
262 template <typename number>
263 std::pair<number, number>
265 const ::Vector<number> &criteria,
266 const MPI_Comm & mpi_communicator)
267 {
268 // we'd like to compute the global max and min from the local ones in
269 // one MPI communication. we can do that by taking the elementwise
270 // minimum of the local min and the negative maximum over all
271 // processors
272
273 const double local_min = min_element(criteria),
274 local_max = max_element(criteria);
275 double comp[2] = {local_min, -local_max};
276 double result[2] = {0, 0};
277
278 // compute the minimum on processor zero
279 const int ierr = MPI_Reduce(
280 comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
281 AssertThrowMPI(ierr);
282
283 // make sure only processor zero got something
284 if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
285 Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
286
287 return std::make_pair(result[0], -result[1]);
288 }
289
290
291
292 namespace RefineAndCoarsenFixedNumber
293 {
294 template <typename number>
295 number
296 compute_threshold(const ::Vector<number> & criteria,
297 const std::pair<double, double> &global_min_and_max,
298 const types::global_cell_index n_target_cells,
299 const MPI_Comm & mpi_communicator)
300 {
301 double interesting_range[2] = {global_min_and_max.first,
302 global_min_and_max.second};
303 adjust_interesting_range(interesting_range);
304
305 const unsigned int root_mpi_rank = 0;
306 unsigned int iteration = 0;
307
308 do
309 {
310 int ierr = MPI_Bcast(interesting_range,
311 2,
312 MPI_DOUBLE,
313 root_mpi_rank,
314 mpi_communicator);
315 AssertThrowMPI(ierr);
316
317 if (interesting_range[0] == interesting_range[1])
318 return interesting_range[0];
319
320 const double test_threshold =
321 (interesting_range[0] > 0 ?
322 std::sqrt(interesting_range[0] * interesting_range[1]) :
323 (interesting_range[0] + interesting_range[1]) / 2);
324
325 // Count how many of our own elements would be above this
326 // threshold. Use a 64bit result type if we are compiling with
327 // 64bit indices to avoid an overflow when computing the sum
328 // below.
329 const types::global_cell_index my_count =
330 std::count_if(criteria.begin(),
331 criteria.end(),
332 [test_threshold](const double c) {
333 return c > test_threshold;
334 });
335 const types::global_cell_index total_count =
336 Utilities::MPI::sum(my_count, mpi_communicator);
337
338 // now adjust the range. if we have too many cells, we take the
339 // upper half of the previous range, otherwise the lower half.
340 // if we have hit the right number, then set the range to the
341 // exact value. non-root nodes also update their own
342 // interesting_range, however their results are not significant
343 // since the values will be overwritten by MPI_Bcast from the
344 // root node in next loop.
345 if (total_count > n_target_cells)
346 interesting_range[0] = test_threshold;
347 else if (total_count < n_target_cells)
348 interesting_range[1] = test_threshold;
349 else
350 interesting_range[0] = interesting_range[1] = test_threshold;
351
352 // terminate the iteration after 25 go-arounds. this is
353 // necessary because oftentimes error indicators on cells have
354 // exactly the same value, and so there may not be a particular
355 // value that cuts the indicators in such a way that we can
356 // achieve the desired number of cells. using a maximal number
357 // of iterations means that we terminate the iteration after a
358 // fixed number N of steps if the indicators were perfectly
359 // badly distributed, and we make at most a mistake of 1/2^N in
360 // the number of cells flagged if indicators are perfectly
361 // equidistributed
362 ++iteration;
363 if (iteration == 25)
364 interesting_range[0] = interesting_range[1] = test_threshold;
365 }
366 while (true);
367
368 Assert(false, ExcInternalError());
369 return -1;
370 }
371 } // namespace RefineAndCoarsenFixedNumber
372
373
374
375 namespace RefineAndCoarsenFixedFraction
376 {
377 template <typename number>
378 number
379 compute_threshold(const ::Vector<number> & criteria,
380 const std::pair<double, double> &global_min_and_max,
381 const double target_error,
382 const MPI_Comm & mpi_communicator)
383 {
384 double interesting_range[2] = {global_min_and_max.first,
385 global_min_and_max.second};
386 adjust_interesting_range(interesting_range);
387
388 const unsigned int root_mpi_rank = 0;
389 unsigned int iteration = 0;
390
391 do
392 {
393 int ierr = MPI_Bcast(interesting_range,
394 2,
395 MPI_DOUBLE,
396 root_mpi_rank,
397 mpi_communicator);
398 AssertThrowMPI(ierr);
399
400 if (interesting_range[0] == interesting_range[1])
401 {
402 // so we have found our threshold. since we adjust the range
403 // at the top of the function to be slightly larger than the
404 // actual extremes of the refinement criteria values, we can
405 // end up in a situation where the threshold is in fact
406 // larger than the maximal refinement indicator. in such
407 // cases, we get no refinement at all. thus, cap the
408 // threshold by the actual largest value
409 double final_threshold =
410 std::min(interesting_range[0], global_min_and_max.second);
411 ierr = MPI_Bcast(&final_threshold,
412 1,
413 MPI_DOUBLE,
414 root_mpi_rank,
415 mpi_communicator);
416 AssertThrowMPI(ierr);
417
418 return final_threshold;
419 }
420
421 const double test_threshold =
422 (interesting_range[0] > 0 ?
423 std::sqrt(interesting_range[0] * interesting_range[1]) :
424 (interesting_range[0] + interesting_range[1]) / 2);
425
426 // accumulate the error of those our own elements above this
427 // threshold and then add to it the number for all the others
428 double my_error = 0;
429 for (unsigned int i = 0; i < criteria.size(); ++i)
430 if (criteria(i) > test_threshold)
431 my_error += criteria(i);
432
433 double total_error = 0.;
434
435 ierr = MPI_Reduce(&my_error,
436 &total_error,
437 1,
438 MPI_DOUBLE,
439 MPI_SUM,
440 root_mpi_rank,
441 mpi_communicator);
442 AssertThrowMPI(ierr);
443
444 // now adjust the range. if we have too many cells, we take the
445 // upper half of the previous range, otherwise the lower half.
446 // if we have hit the right number, then set the range to the
447 // exact value. non-root nodes also update their own
448 // interesting_range, however their results are not significant
449 // since the values will be overwritten by MPI_Bcast from the
450 // root node in next loop.
451 if (total_error > target_error)
452 interesting_range[0] = test_threshold;
453 else if (total_error < target_error)
454 interesting_range[1] = test_threshold;
455 else
456 interesting_range[0] = interesting_range[1] = test_threshold;
457
458 // terminate the iteration after 25 go-arounds. this is
459 // necessary because oftentimes error indicators on cells
460 // have exactly the same value, and so there may not be a
461 // particular value that cuts the indicators in such a way
462 // that we can achieve the desired number of cells. using a
463 // max of 25 iterations means that we terminate the
464 // iteration after 25 steps if the indicators were perfectly
465 // badly distributed, and we make at most a mistake of
466 // 1/2^25 in the number of cells flagged if indicators are
467 // perfectly equidistributed
468 ++iteration;
469 if (iteration == 25)
470 interesting_range[0] = interesting_range[1] = test_threshold;
471 }
472 while (true);
473
474 Assert(false, ExcInternalError());
475 return -1;
476 }
477 } // namespace RefineAndCoarsenFixedFraction
478 } // namespace GridRefinement
479 } // namespace distributed
480 } // namespace parallel
481} // namespace internal
482
483
484
485namespace parallel
486{
487 namespace distributed
488 {
489 namespace GridRefinement
490 {
491 template <int dim, typename Number, int spacedim>
492 void
495 const ::Vector<Number> & criteria,
496 const double top_fraction_of_cells,
497 const double bottom_fraction_of_cells,
498 const types::global_cell_index max_n_cells)
499 {
500 Assert(criteria.size() == tria.n_active_cells(),
501 ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
502 Assert((top_fraction_of_cells >= 0) && (top_fraction_of_cells <= 1),
504 Assert((bottom_fraction_of_cells >= 0) &&
505 (bottom_fraction_of_cells <= 1),
507 Assert(top_fraction_of_cells + bottom_fraction_of_cells <= 1,
509 Assert(criteria.is_non_negative(),
511
512 const std::pair<double, double> adjusted_fractions =
514 dim>(tria.n_global_active_cells(),
515 max_n_cells,
516 top_fraction_of_cells,
517 bottom_fraction_of_cells);
518
519 // first extract from the vector of indicators the ones that correspond
520 // to cells that we locally own
521 Vector<Number> locally_owned_indicators(
523 get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
524
525 MPI_Comm mpi_communicator = tria.get_communicator();
526
527 // figure out the global max and min of the indicators. we don't need it
528 // here, but it's a collective communication call
529 const std::pair<Number, Number> global_min_and_max =
531 compute_global_min_and_max_at_root(locally_owned_indicators,
532 mpi_communicator);
533
534
535 double top_threshold, bottom_threshold;
538 locally_owned_indicators,
539 global_min_and_max,
540 static_cast<types::global_cell_index>(adjusted_fractions.first *
541 tria.n_global_active_cells()),
542 mpi_communicator);
543
544 // compute bottom threshold only if necessary. otherwise use the lowest
545 // threshold possible
546 if (adjusted_fractions.second > 0)
547 bottom_threshold = ::internal::parallel::distributed::
549 locally_owned_indicators,
550 global_min_and_max,
551 static_cast<types::global_cell_index>(
552 std::ceil((1. - adjusted_fractions.second) *
553 tria.n_global_active_cells())),
554 mpi_communicator);
555 else
556 bottom_threshold = std::numeric_limits<Number>::lowest();
557
558 // now refine the mesh
559 mark_cells(tria, criteria, top_threshold, bottom_threshold);
560 }
561
562
563
564 template <int dim, typename Number, int spacedim>
565 void
568 const ::Vector<Number> & criteria,
569 const double top_fraction_of_error,
570 const double bottom_fraction_of_error,
571 const VectorTools::NormType norm_type)
572 {
573 Assert(criteria.size() == tria.n_active_cells(),
574 ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
575 Assert((top_fraction_of_error >= 0) && (top_fraction_of_error <= 1),
577 Assert((bottom_fraction_of_error >= 0) &&
578 (bottom_fraction_of_error <= 1),
580 Assert(top_fraction_of_error + bottom_fraction_of_error <= 1,
582 Assert(criteria.is_non_negative(),
584
585 switch (norm_type)
586 {
588 // evaluate norms on subsets and compare them as
589 // c_0 + c_1 + ... < fraction * l1-norm(c)
590 refine_and_coarsen_fixed_fraction_via_l1_norm(
591 tria,
592 criteria,
593 top_fraction_of_error,
594 bottom_fraction_of_error);
595 break;
596
598 {
599 // we do not want to evaluate norms on subsets as:
600 // sqrt(c_0^2 + c_1^2 + ...) < fraction * l2-norm(c)
601 // instead take the square of both sides of the equation
602 // and evaluate:
603 // c_0^2 + c_1^2 + ... < fraction^2 * l1-norm(c.c)
604 // we adjust all parameters accordingly
605 Vector<Number> criteria_squared(criteria.size());
606 std::transform(criteria.begin(),
607 criteria.end(),
608 criteria_squared.begin(),
609 [](Number c) { return c * c; });
610
611 refine_and_coarsen_fixed_fraction_via_l1_norm(
612 tria,
613 criteria_squared,
614 top_fraction_of_error * top_fraction_of_error,
615 bottom_fraction_of_error * bottom_fraction_of_error);
616 }
617 break;
618
619 default:
620 Assert(false, ExcNotImplemented());
621 break;
622 }
623 }
624 } // namespace GridRefinement
625 } // namespace distributed
626} // namespace parallel
627
628
629// explicit instantiations
630# include "grid_refinement.inst"
631
633
634#endif
unsigned int n_active_cells() const
Definition: vector.h:110
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:133
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:320
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:119
virtual MPI_Comm get_communicator() const override
Definition: tria_base.cc:140
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
static ::ExceptionBase & ExcNegativeCriteria()
static ::ExceptionBase & ExcInvalidParameterValue()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
size_type size() const
iterator begin()
Expression ceil(const Expression &x)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
std::pair< double, double > adjust_refine_and_coarsen_number_fraction(const types::global_cell_index current_n_cells, const types::global_cell_index max_n_cells, const double top_fraction_of_cells, const double bottom_fraction_of_cells)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
T sum(const T &t, const MPI_Comm &mpi_communicator)
number compute_threshold(const ::Vector< number > &criteria, const std::pair< double, double > &global_min_and_max, const double target_error, const MPI_Comm &mpi_communicator)
number compute_threshold(const ::Vector< number > &criteria, const std::pair< double, double > &global_min_and_max, const types::global_cell_index n_target_cells, const MPI_Comm &mpi_communicator)
std::pair< number, number > compute_global_min_and_max_at_root(const ::Vector< number > &criteria, const MPI_Comm &mpi_communicator)
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
void refine_and_coarsen_fixed_fraction(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::NormType::L1_norm)
void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Predicate &predicate, const unsigned int grainsize)
Definition: parallel.h:213
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)