32 template <
int dim,
int spacedim>
34 add_indices_recursively_for_first_child_policy(
41 if (cell->level() > 0 && cell->parent()->child(0) == cell)
42 add_indices_recursively_for_first_child_policy(cell->parent(),
49 template <
int dim,
int spacedim>
54 template <
int dim,
int spacedim>
69#ifndef DEAL_II_WITH_MPI
74 const auto comm = tria->get_communicator();
76 const unsigned int process_has_active_locally_owned_cells =
77 tria->n_locally_owned_active_cells() > 0;
78 const unsigned int n_processes_with_active_locally_owned_cells =
81 if (n_processes_with_active_locally_owned_cells ==
85 unsigned int offset = 0;
88 MPI_Exscan(&process_has_active_locally_owned_cells,
92 decltype(process_has_active_locally_owned_cells)>,
98 tria->global_active_cell_index_partitioner().lock());
108 template <
int dim,
int spacedim>
111 : n_coarse_cells(tria_fine.n_global_coarse_cells())
112 , n_global_levels(tria_fine.n_global_levels())
117 "FirstChildPolicy is only working for pure hex meshes at the moment."));
124 if (cell->is_locally_owned())
125 add_indices_recursively_for_first_child_policy(cell,
132 template <
int dim,
int spacedim>
145 if (cell->is_locally_owned())
148 std::vector<unsigned int> owning_ranks_of_coarse_cells(
152 process(is_level_partitions,
155 owning_ranks_of_coarse_cells,
160 std::pair<types::global_cell_index, types::global_cell_index>>,
161 std::vector<unsigned int>>
163 consensus_algorithm.
run(process, communicator);
173 tria->global_active_cell_index_partitioner().lock());
177 partition[cell->global_active_cell_index()] =
185 template <
int dim,
int spacedim>
187 const unsigned int n_min_cells)
188 : n_min_cells(n_min_cells)
193 template <
int dim,
int spacedim>
205 tria->global_active_cell_index_partitioner().lock());
209 const unsigned int n_locally_owned_active_cells =
213 [](
const auto &cell) { return cell.is_locally_owned(); });
227 const unsigned int n_partitions =
230 n_global_active_cells / n_min_cells,
233 const unsigned int min_cells = n_global_active_cells / n_partitions;
235 const auto convert = [&](
const unsigned int i) {
240 const unsigned int n_partitions_with_additional_cell =
241 n_global_active_cells - min_cells * n_partitions;
243 const unsigned int rank =
244 (i < (min_cells + 1) * n_partitions_with_additional_cell) ?
245 (i / (min_cells + 1)) :
246 ((i - n_partitions_with_additional_cell) / min_cells);
253 for (
const auto i : partition.locally_owned_elements())
254 partition[i] = convert(i);
261 template <
int dim,
int spacedim>
266 : weighting_function(weighting_function)
271 template <
int dim,
int spacedim>
276#ifndef DEAL_II_WITH_MPI
287 const auto partitioner =
288 tria->global_active_cell_index_partitioner().lock();
290 std::vector<unsigned int> weights(partitioner->locally_owned_size());
296 for (
const auto &cell :
298 weights[partitioner->global_to_local(cell->global_active_cell_index())] =
302 std::uint64_t process_local_weight = 0;
303 for (
const auto &weight : weights)
304 process_local_weight += weight;
308 const auto [process_local_weight_offset, total_weight] =
310 tria->get_communicator());
315 for (std::uint64_t i = 0, weight = process_local_weight_offset;
316 i < partition.locally_owned_size();
317 weight += weights[i], ++i)
318 partition.local_element(i) =
319 static_cast<double>(weight * n_subdomains / total_weight);
331#include "repartitioning_policy_tools.inst"
size_type index_within_set(const size_type global_index) const
size_type n_elements() const
void set_size(const size_type size)
void add_index(const size_type index)
virtual types::global_cell_index n_global_active_cells() const
virtual MPI_Comm get_communicator() const
bool all_reference_cells_are_hyper_cube() const
cell_iterator end() const
active_cell_iterator begin_active(const unsigned int level=0) const
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
types::global_cell_index translate(const TriaIterator< Accessor > &cell) const
types::global_cell_index size() const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm