Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
grid_refinement.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2010 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
17
21#include <deal.II/lac/vector.h>
22
23#ifdef DEAL_II_WITH_P4EST
24
26
29# include <deal.II/grid/tria.h>
32
33# include <algorithm>
34# include <functional>
35# include <limits>
36# include <numeric>
37
38
40
41
42namespace
43{
44 template <int dim, int spacedim>
45 unsigned int
46 n_locally_owned_active_cells(const Triangulation<dim, spacedim> &tria)
47 {
48 if (const auto parallel_tria =
50 &tria))
51 return parallel_tria->n_locally_owned_active_cells();
52 else
53 return tria.n_active_cells();
54 }
55
56 template <typename number>
57 inline number
58 max_element(const ::Vector<number> &criteria)
59 {
60 return (criteria.size() > 0) ?
61 (*std::max_element(criteria.begin(), criteria.end())) :
62 std::numeric_limits<number>::min();
63 }
64
65
66
67 template <typename number>
68 inline number
69 min_element(const ::Vector<number> &criteria)
70 {
71 return (criteria.size() > 0) ?
72 (*std::min_element(criteria.begin(), criteria.end())) :
73 std::numeric_limits<number>::max();
74 }
75
76
77
83 template <typename number>
84 double
85 compute_global_sum(const ::Vector<number> &criteria,
86 const MPI_Comm mpi_communicator)
87 {
88 double my_sum =
89 std::accumulate(criteria.begin(),
90 criteria.end(),
91 /* do accumulation in the correct data type: */
92 number());
93
94 double result = 0;
95 // compute the minimum on processor zero
96 const int ierr =
97 MPI_Reduce(&my_sum, &result, 1, MPI_DOUBLE, MPI_SUM, 0, mpi_communicator);
98 AssertThrowMPI(ierr);
99
100 // make sure only processor zero got something
101 if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
102 Assert(result == 0, ExcInternalError());
103
104 return result;
105 }
106
107
108
113 template <int dim, int spacedim, typename Number>
114 void
115 get_locally_owned_indicators(const ::Triangulation<dim, spacedim> &tria,
116 const ::Vector<Number> &criteria,
117 Vector<Number> &locally_owned_indicators)
118 {
119 Assert(locally_owned_indicators.size() ==
120 n_locally_owned_active_cells(tria),
122
123 unsigned int owned_index = 0;
124 for (const auto &cell :
125 tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
126 {
127 locally_owned_indicators(owned_index) =
128 criteria(cell->active_cell_index());
129 ++owned_index;
130 }
131 Assert(owned_index == n_locally_owned_active_cells(tria),
133 }
134
135
136 // we compute refinement thresholds by bisection of the interval spanned by
137 // the smallest and largest error indicator. this leads to a small problem:
138 // if, for example, we want to coarsen zero per cent of the cells, then we
139 // need to pick a threshold equal to the smallest indicator, but of course
140 // the bisection algorithm can never find a threshold equal to one of the
141 // end points of the interval. So we slightly increase the interval before
142 // we even start
143 void
144 adjust_interesting_range(double (&interesting_range)[2])
145 {
146 Assert(interesting_range[0] <= interesting_range[1], ExcInternalError());
147
148 if (interesting_range[0] > 0)
149 {
150 // In this case, we calculate the first interval split point `m` in the
151 // `compute_threshold` functions in the optimized way: We exploit that
152 // the logarithms of all criteria are more uniformly distributed than
153 // their actual values, i.e. m=sqrt(b*e).
154 //
155 // Both factors will modify the split point only slightly by a factor of
156 // sqrt(1.01*0.99) = sqrt(0.9999) ~ 0.9950.
157 interesting_range[0] *= 0.99;
158 interesting_range[1] *= 1.01;
159 }
160 else
161 {
162 // In all other cases, we begin with an the arithmetic mean as the
163 // standard interval split point, i.e. m=(b+e)/2.
164 //
165 // Both increments will add up to zero when calculating the initial
166 // split point in the `compute_threshold` functions.
167 const double difference =
168 std::abs(interesting_range[1] - interesting_range[0]);
169 interesting_range[0] -= 0.01 * difference;
170 interesting_range[1] += 0.01 * difference;
171 }
172 }
173
174
175
181 template <int dim, int spacedim, typename Number>
182 void
183 mark_cells(::Triangulation<dim, spacedim> &tria,
184 const ::Vector<Number> &criteria,
185 const double top_threshold,
186 const double bottom_threshold)
187 {
188 ::GridRefinement::refine(tria, criteria, top_threshold);
189 ::GridRefinement::coarsen(tria, criteria, bottom_threshold);
190
191 // as a final good measure, delete all flags again from cells that we don't
192 // locally own
193 for (const auto &cell : tria.active_cell_iterators())
194 if (cell->subdomain_id() != tria.locally_owned_subdomain())
195 {
196 cell->clear_refine_flag();
197 cell->clear_coarsen_flag();
198 }
199 }
200
201
202
210 template <int dim, int spacedim, typename Number>
211 void
212 refine_and_coarsen_fixed_fraction_via_l1_norm(
214 const ::Vector<Number> &criteria,
215 const double top_fraction_of_error,
216 const double bottom_fraction_of_error)
217 {
218 // first extract from the vector of indicators the ones that correspond
219 // to cells that we locally own
220 Vector<Number> locally_owned_indicators(n_locally_owned_active_cells(tria));
221 get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
222
223 MPI_Comm mpi_communicator = tria.get_communicator();
224
225 // figure out the global max and min of the indicators. we don't need it
226 // here, but it's a collective communication call
227 const std::pair<double, double> global_min_and_max =
229 compute_global_min_and_max_at_root(locally_owned_indicators,
230 mpi_communicator);
231
232 const double total_error =
233 compute_global_sum(locally_owned_indicators, mpi_communicator);
234
235 double top_target_error = top_fraction_of_error * total_error,
236 bottom_target_error = (1. - bottom_fraction_of_error) * total_error;
237
238 double top_threshold, bottom_threshold;
241 global_min_and_max,
242 top_target_error,
243 mpi_communicator);
244
245 // compute bottom threshold only if necessary. otherwise use the lowest
246 // threshold possible
247 if (bottom_fraction_of_error > 0)
248 bottom_threshold = ::internal::parallel::distributed::
250 locally_owned_indicators,
251 global_min_and_max,
252 bottom_target_error,
253 mpi_communicator);
254 else
255 bottom_threshold = std::numeric_limits<Number>::lowest();
256
257 // now refine the mesh
258 mark_cells(tria, criteria, top_threshold, bottom_threshold);
259 }
260} // namespace
261
262
263
264namespace internal
265{
266 namespace parallel
267 {
268 namespace distributed
269 {
270 namespace GridRefinement
271 {
272 template <typename number>
273 std::pair<number, number>
275 const ::Vector<number> &criteria,
276 const MPI_Comm mpi_communicator)
277 {
278 // we'd like to compute the global max and min from the local ones in
279 // one MPI communication. we can do that by taking the elementwise
280 // minimum of the local min and the negative maximum over all
281 // processors
282
283 const double local_min = min_element(criteria),
284 local_max = max_element(criteria);
285 double comp[2] = {local_min, -local_max};
286 double result[2] = {0, 0};
287
288 // compute the minimum on processor zero
289 const int ierr = MPI_Reduce(
290 comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
291 AssertThrowMPI(ierr);
292
293 // make sure only processor zero got something
294 if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
295 Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
296
297 return std::make_pair(result[0], -result[1]);
298 }
299
300
301
302 namespace RefineAndCoarsenFixedNumber
303 {
304 template <typename number>
305 number
306 compute_threshold(const ::Vector<number> &criteria,
307 const std::pair<double, double> &global_min_and_max,
308 const types::global_cell_index n_target_cells,
309 const MPI_Comm mpi_communicator)
310 {
311 double interesting_range[2] = {global_min_and_max.first,
312 global_min_and_max.second};
313 adjust_interesting_range(interesting_range);
314
315 const unsigned int root_mpi_rank = 0;
316 unsigned int iteration = 0;
317
318 do
319 {
320 int ierr = MPI_Bcast(interesting_range,
321 2,
322 MPI_DOUBLE,
323 root_mpi_rank,
324 mpi_communicator);
325 AssertThrowMPI(ierr);
326
327 if (interesting_range[0] == interesting_range[1])
328 return interesting_range[0];
329
330 const double test_threshold =
331 (interesting_range[0] > 0 ?
332 std::sqrt(interesting_range[0] * interesting_range[1]) :
333 (interesting_range[0] + interesting_range[1]) / 2);
334
335 // Count how many of our own elements would be above this
336 // threshold. Use a 64bit result type if we are compiling with
337 // 64bit indices to avoid an overflow when computing the sum
338 // below.
339 const types::global_cell_index my_count =
340 std::count_if(criteria.begin(),
341 criteria.end(),
342 [test_threshold](const double c) {
343 return c > test_threshold;
344 });
345 const types::global_cell_index total_count =
346 Utilities::MPI::sum(my_count, mpi_communicator);
347
348 // now adjust the range. if we have too many cells, we take the
349 // upper half of the previous range, otherwise the lower half.
350 // if we have hit the right number, then set the range to the
351 // exact value. non-root nodes also update their own
352 // interesting_range, however their results are not significant
353 // since the values will be overwritten by MPI_Bcast from the
354 // root node in next loop.
355 if (total_count > n_target_cells)
356 interesting_range[0] = test_threshold;
357 else if (total_count < n_target_cells)
358 interesting_range[1] = test_threshold;
359 else
360 interesting_range[0] = interesting_range[1] = test_threshold;
361
362 // terminate the iteration after 25 go-arounds. this is
363 // necessary because oftentimes error indicators on cells have
364 // exactly the same value, and so there may not be a particular
365 // value that cuts the indicators in such a way that we can
366 // achieve the desired number of cells. using a maximal number
367 // of iterations means that we terminate the iteration after a
368 // fixed number N of steps if the indicators were perfectly
369 // badly distributed, and we make at most a mistake of 1/2^N in
370 // the number of cells flagged if indicators are perfectly
371 // equidistributed
372 ++iteration;
373 if (iteration == 25)
374 interesting_range[0] = interesting_range[1] = test_threshold;
375 }
376 while (true);
377
379 return -1;
380 }
381 } // namespace RefineAndCoarsenFixedNumber
382
383
384
385 namespace RefineAndCoarsenFixedFraction
386 {
387 template <typename number>
388 number
389 compute_threshold(const ::Vector<number> &criteria,
390 const std::pair<double, double> &global_min_and_max,
391 const double target_error,
392 const MPI_Comm mpi_communicator)
393 {
394 double interesting_range[2] = {global_min_and_max.first,
395 global_min_and_max.second};
396 adjust_interesting_range(interesting_range);
397
398 const unsigned int root_mpi_rank = 0;
399 unsigned int iteration = 0;
400
401 do
402 {
403 int ierr = MPI_Bcast(interesting_range,
404 2,
405 MPI_DOUBLE,
406 root_mpi_rank,
407 mpi_communicator);
408 AssertThrowMPI(ierr);
409
410 if (interesting_range[0] == interesting_range[1])
411 {
412 // so we have found our threshold. since we adjust the range
413 // at the top of the function to be slightly larger than the
414 // actual extremes of the refinement criteria values, we can
415 // end up in a situation where the threshold is in fact
416 // larger than the maximal refinement indicator. in such
417 // cases, we get no refinement at all. thus, cap the
418 // threshold by the actual largest value
419 double final_threshold =
420 std::min(interesting_range[0], global_min_and_max.second);
421 ierr = MPI_Bcast(&final_threshold,
422 1,
423 MPI_DOUBLE,
424 root_mpi_rank,
425 mpi_communicator);
426 AssertThrowMPI(ierr);
427
428 return final_threshold;
429 }
430
431 const double test_threshold =
432 (interesting_range[0] > 0 ?
433 std::sqrt(interesting_range[0] * interesting_range[1]) :
434 (interesting_range[0] + interesting_range[1]) / 2);
435
436 // accumulate the error of those our own elements above this
437 // threshold and then add to it the number for all the others
438 double my_error = 0;
439 for (unsigned int i = 0; i < criteria.size(); ++i)
440 if (criteria(i) > test_threshold)
441 my_error += criteria(i);
442
443 double total_error = 0.;
444
445 ierr = MPI_Reduce(&my_error,
446 &total_error,
447 1,
448 MPI_DOUBLE,
449 MPI_SUM,
450 root_mpi_rank,
451 mpi_communicator);
452 AssertThrowMPI(ierr);
453
454 // now adjust the range. if we have too many cells, we take the
455 // upper half of the previous range, otherwise the lower half.
456 // if we have hit the right number, then set the range to the
457 // exact value. non-root nodes also update their own
458 // interesting_range, however their results are not significant
459 // since the values will be overwritten by MPI_Bcast from the
460 // root node in next loop.
461 if (total_error > target_error)
462 interesting_range[0] = test_threshold;
463 else if (total_error < target_error)
464 interesting_range[1] = test_threshold;
465 else
466 interesting_range[0] = interesting_range[1] = test_threshold;
467
468 // terminate the iteration after 25 go-arounds. this is
469 // necessary because oftentimes error indicators on cells
470 // have exactly the same value, and so there may not be a
471 // particular value that cuts the indicators in such a way
472 // that we can achieve the desired number of cells. using a
473 // max of 25 iterations means that we terminate the
474 // iteration after 25 steps if the indicators were perfectly
475 // badly distributed, and we make at most a mistake of
476 // 1/2^25 in the number of cells flagged if indicators are
477 // perfectly equidistributed
478 ++iteration;
479 if (iteration == 25)
480 interesting_range[0] = interesting_range[1] = test_threshold;
481 }
482 while (true);
483
485 return -1;
486 }
487 } // namespace RefineAndCoarsenFixedFraction
488 } // namespace GridRefinement
489 } // namespace distributed
490 } // namespace parallel
491} // namespace internal
492
493
494
495namespace parallel
496{
497 namespace distributed
498 {
499 namespace GridRefinement
500 {
501 template <int dim, typename Number, int spacedim>
502 void
505 const ::Vector<Number> &criteria,
506 const double top_fraction_of_cells,
507 const double bottom_fraction_of_cells,
508 const types::global_cell_index max_n_cells)
509 {
510 Assert(criteria.size() == tria.n_active_cells(),
511 ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
512 Assert((top_fraction_of_cells >= 0) && (top_fraction_of_cells <= 1),
514 Assert((bottom_fraction_of_cells >= 0) &&
515 (bottom_fraction_of_cells <= 1),
517 Assert(top_fraction_of_cells + bottom_fraction_of_cells <= 1,
519 Assert(criteria.is_non_negative(),
521
522 const std::pair<double, double> adjusted_fractions =
524 dim>(tria.n_global_active_cells(),
525 max_n_cells,
526 top_fraction_of_cells,
527 bottom_fraction_of_cells);
528
529 // first extract from the vector of indicators the ones that correspond
530 // to cells that we locally own
531 Vector<Number> locally_owned_indicators(
532 n_locally_owned_active_cells(tria));
533 get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
534
535 MPI_Comm mpi_communicator = tria.get_communicator();
536
537 // figure out the global max and min of the indicators. we don't need it
538 // here, but it's a collective communication call
539 const std::pair<Number, Number> global_min_and_max =
541 compute_global_min_and_max_at_root(locally_owned_indicators,
542 mpi_communicator);
543
544
545 double top_threshold, bottom_threshold;
548 locally_owned_indicators,
549 global_min_and_max,
550 static_cast<types::global_cell_index>(adjusted_fractions.first *
551 tria.n_global_active_cells()),
552 mpi_communicator);
553
554 // compute bottom threshold only if necessary. otherwise use the lowest
555 // threshold possible
556 if (adjusted_fractions.second > 0)
557 bottom_threshold = ::internal::parallel::distributed::
559 locally_owned_indicators,
560 global_min_and_max,
561 static_cast<types::global_cell_index>(
562 std::ceil((1. - adjusted_fractions.second) *
563 tria.n_global_active_cells())),
564 mpi_communicator);
565 else
566 bottom_threshold = std::numeric_limits<Number>::lowest();
567
568 // now refine the mesh
569 mark_cells(tria, criteria, top_threshold, bottom_threshold);
570 }
571
572
573
574 template <int dim, typename Number, int spacedim>
575 void
578 const ::Vector<Number> &criteria,
579 const double top_fraction_of_error,
580 const double bottom_fraction_of_error,
581 const VectorTools::NormType norm_type)
582 {
583 Assert(criteria.size() == tria.n_active_cells(),
584 ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
585 Assert((top_fraction_of_error >= 0) && (top_fraction_of_error <= 1),
587 Assert((bottom_fraction_of_error >= 0) &&
588 (bottom_fraction_of_error <= 1),
590 Assert(top_fraction_of_error + bottom_fraction_of_error <= 1,
592 Assert(criteria.is_non_negative(),
594
595 switch (norm_type)
596 {
598 // evaluate norms on subsets and compare them as
599 // c_0 + c_1 + ... < fraction * l1-norm(c)
600 refine_and_coarsen_fixed_fraction_via_l1_norm(
601 tria,
602 criteria,
603 top_fraction_of_error,
604 bottom_fraction_of_error);
605 break;
606
608 {
609 // we do not want to evaluate norms on subsets as:
610 // sqrt(c_0^2 + c_1^2 + ...) < fraction * l2-norm(c)
611 // instead take the square of both sides of the equation
612 // and evaluate:
613 // c_0^2 + c_1^2 + ... < fraction^2 * l1-norm(c.c)
614 // we adjust all parameters accordingly
615 Vector<Number> criteria_squared(criteria.size());
616 std::transform(criteria.begin(),
617 criteria.end(),
618 criteria_squared.begin(),
619 [](Number c) { return c * c; });
620
621 refine_and_coarsen_fixed_fraction_via_l1_norm(
622 tria,
623 criteria_squared,
624 top_fraction_of_error * top_fraction_of_error,
625 bottom_fraction_of_error * bottom_fraction_of_error);
626 }
627 break;
628
629 default:
631 break;
632 }
633 }
634 } // namespace GridRefinement
635 } // namespace distributed
636} // namespace parallel
637
638
639// explicit instantiations
640# include "grid_refinement.inst"
641
643
644#endif
virtual types::global_cell_index n_global_active_cells() const
virtual MPI_Comm get_communicator() const
unsigned int n_active_cells() const
virtual size_type size() const override
iterator begin()
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcNegativeCriteria()
static ::ExceptionBase & ExcInvalidParameterValue()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
std::pair< double, double > adjust_refine_and_coarsen_number_fraction(const types::global_cell_index current_n_cells, const types::global_cell_index max_n_cells, const double top_fraction_of_cells, const double bottom_fraction_of_cells)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
number compute_threshold(const ::Vector< number > &criteria, const std::pair< double, double > &global_min_and_max, const double target_error, const MPI_Comm mpi_communicator)
number compute_threshold(const ::Vector< number > &criteria, const std::pair< double, double > &global_min_and_max, const types::global_cell_index n_target_cells, const MPI_Comm mpi_communicator)
std::pair< number, number > compute_global_min_and_max_at_root(const ::Vector< number > &criteria, const MPI_Comm mpi_communicator)
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
void refine_and_coarsen_fixed_number(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
unsigned int subdomain_id
Definition types.h:43