33 template <
int dim,
int spacedim>
35 add_indices_recursively_for_first_child_policy(
42 if (cell->level() > 0 && cell->parent()->child(0) == cell)
43 add_indices_recursively_for_first_child_policy(cell->parent(),
50 template <
int dim,
int spacedim>
55 template <
int dim,
int spacedim>
70 #ifndef DEAL_II_WITH_MPI
77 const unsigned int process_has_active_locally_owned_cells =
78 tria->n_locally_owned_active_cells() > 0;
79 const unsigned int n_processes_with_active_locally_owned_cells =
82 if (n_processes_with_active_locally_owned_cells ==
86 unsigned int offset = 0;
89 MPI_Exscan(&process_has_active_locally_owned_cells,
93 decltype(process_has_active_locally_owned_cells)>,
109 template <
int dim,
int spacedim>
112 : n_coarse_cells(tria_fine.n_global_coarse_cells())
113 , n_global_levels(tria_fine.n_global_levels())
118 "FirstChildPolicy is only working for pure hex meshes at the moment."));
125 if (cell->is_locally_owned())
126 add_indices_recursively_for_first_child_policy(cell,
133 template <
int dim,
int spacedim>
146 if (cell->is_locally_owned())
149 std::vector<unsigned int> owning_ranks_of_coarse_cells(
153 process(is_level_partitions,
156 owning_ranks_of_coarse_cells,
161 std::pair<types::global_cell_index, types::global_cell_index>>,
162 std::vector<unsigned int>>
164 consensus_algorithm.
run(process, communicator);
178 partition[cell->global_active_cell_index()] =
186 template <
int dim,
int spacedim>
188 const unsigned int n_min_cells)
189 : n_min_cells(n_min_cells)
194 template <
int dim,
int spacedim>
210 const unsigned int n_locally_owned_active_cells =
214 [](
const auto &cell) { return cell.is_locally_owned(); });
228 const unsigned int n_partitions =
229 std::max<unsigned int>(1,
230 std::min<types::global_cell_index>(
231 n_global_active_cells / n_min_cells,
234 const unsigned int min_cells = n_global_active_cells / n_partitions;
236 const auto convert = [&](
const unsigned int i) {
241 const unsigned int n_partitions_with_additional_cell =
242 n_global_active_cells - min_cells * n_partitions;
244 const unsigned int rank =
245 (i < (min_cells + 1) * n_partitions_with_additional_cell) ?
246 (i / (min_cells + 1)) :
247 ((i - n_partitions_with_additional_cell) / min_cells);
254 for (
const auto i :
partition.locally_owned_elements())
262 template <
int dim,
int spacedim>
267 : weighting_function(weighting_function)
272 template <
int dim,
int spacedim>
277 #ifndef DEAL_II_WITH_MPI
288 const auto partitioner =
291 std::vector<unsigned int> weights(partitioner->locally_owned_size());
297 for (
const auto &cell :
299 weights[partitioner->global_to_local(cell->global_active_cell_index())] =
303 std::uint64_t process_local_weight = 0;
304 for (
const auto &weight : weights)
305 process_local_weight += weight;
308 std::uint64_t process_local_weight_offset = 0;
310 int ierr = MPI_Exscan(
311 &process_local_weight,
312 &process_local_weight_offset,
320 std::uint64_t total_weight =
321 process_local_weight_offset + process_local_weight;
324 MPI_Bcast(&total_weight,
334 for (std::uint64_t i = 0, weight = process_local_weight_offset;
336 weight += weights[i], ++i)
338 static_cast<double>(weight * n_subdomains / total_weight);
350 #include "repartitioning_policy_tools.inst"
size_type index_within_set(const size_type global_index) const
size_type n_elements() const
void set_size(const size_type size)
void add_index(const size_type index)
virtual types::global_cell_index n_global_active_cells() const
virtual MPI_Comm get_communicator() const
bool all_reference_cells_are_hyper_cube() const
virtual std::weak_ptr< const Utilities::MPI::Partitioner > global_active_cell_index_partitioner() const
cell_iterator end() const
active_cell_iterator begin_active(const unsigned int level=0) const
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
types::global_cell_index translate(const TriaIterator< Accessor > &cell) const
types::global_cell_index size() const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
static ::ExceptionBase & ExcNotImplemented()
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
unsigned int global_cell_index
const ::Triangulation< dim, spacedim > & tria