27#ifdef DEAL_II_WITH_MPI
35#ifdef DEAL_II_WITH_METIS
42#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
43# include <zoltan_cpp.h>
57 const std::vector<unsigned int> &cell_weights,
58 const unsigned int n_partitions,
59 std::vector<unsigned int> &partition_indices)
63#ifndef DEAL_II_WITH_METIS
64 (void)sparsity_pattern;
67 (void)partition_indices;
75 idx_t n =
static_cast<signed int>(sparsity_pattern.
n_rows());
90 idx_t options[METIS_NOPTIONS];
91 METIS_SetDefaultOptions(options);
95 std::vector<idx_t> int_rowstart(1);
96 int_rowstart.reserve(sparsity_pattern.
n_rows() + 1);
97 std::vector<idx_t> int_colnums;
103 col < sparsity_pattern.
end(row);
105 int_colnums.push_back(col->column());
106 int_rowstart.push_back(int_colnums.size());
109 std::vector<idx_t> int_partition_indices(sparsity_pattern.
n_rows());
112 std::vector<idx_t> int_cell_weights;
113 if (cell_weights.size() > 0)
115 Assert(cell_weights.size() == sparsity_pattern.
n_rows(),
117 sparsity_pattern.
n_rows()));
118 int_cell_weights.resize(cell_weights.size());
119 std::copy(cell_weights.begin(),
121 int_cell_weights.begin());
125 idx_t *
const p_int_cell_weights =
126 (cell_weights.size() > 0 ? int_cell_weights.data() :
nullptr);
137 ierr = METIS_PartGraphRecursive(&n,
149 int_partition_indices.data());
153 ierr = METIS_PartGraphKway(&n,
165 int_partition_indices.data());
172 std::copy(int_partition_indices.begin(),
173 int_partition_indices.end(),
174 partition_indices.begin());
180#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
183 get_number_of_objects(
void *data,
int *ierr)
194 get_object_list(
void *data,
197 ZOLTAN_ID_PTR globalID,
198 ZOLTAN_ID_PTR localID,
210 auto n_dofs = graph->
n_rows();
212 for (
unsigned int i = 0; i < n_dofs; ++i)
221 get_num_edges_list(
void *data,
225 ZOLTAN_ID_PTR globalID,
236 for (
int i = 0; i < num_obj; ++i)
239 numEdges[i] = graph->
row_length(globalID[i]) - 1;
248 get_edge_list(
void *data,
255 ZOLTAN_ID_PTR nborGID,
264 ZOLTAN_ID_PTR nextNborGID = nborGID;
265 int *nextNborProc = nborProc;
269 i < static_cast<SparsityPattern::size_type>(num_obj);
277 if (i != col->column())
282 *nextNborGID++ = col->column();
292 const std::vector<unsigned int> &cell_weights,
293 const unsigned int n_partitions,
294 std::vector<unsigned int> &partition_indices)
298#ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
299 (void)sparsity_pattern;
302 (void)partition_indices;
307 cell_weights.empty(),
309 "The cell weighting functionality for Zoltan has not yet been implemented."));
313 std::unique_ptr<Zoltan> zz = std::make_unique<Zoltan>(MPI_COMM_SELF);
317 zz->Set_Param(
"DEBUG_LEVEL",
"0");
321 zz->Set_Param(
"NUM_LOCAL_PARTS",
322 std::to_string(n_partitions));
337 zz->Set_Param(
"PHG_EDGE_SIZE_THRESHOLD",
"0.5");
344 zz->Set_Num_Obj_Fn(get_number_of_objects, &graph);
345 zz->Set_Obj_List_Fn(get_object_list, &graph);
346 zz->Set_Num_Edges_Multi_Fn(get_num_edges_list, &graph);
347 zz->Set_Edge_List_Multi_Fn(get_edge_list, &graph);
351 int num_gid_entries = 1;
352 int num_lid_entries = 1;
354 ZOLTAN_ID_PTR import_global_ids =
nullptr;
355 ZOLTAN_ID_PTR import_local_ids =
nullptr;
356 int *import_procs =
nullptr;
357 int *import_to_part =
nullptr;
359 ZOLTAN_ID_PTR export_global_ids =
nullptr;
360 ZOLTAN_ID_PTR export_local_ids =
nullptr;
361 int *export_procs =
nullptr;
362 int *export_to_part =
nullptr;
365 const int rc = zz->LB_Partition(changes,
386 std::fill(partition_indices.begin(), partition_indices.end(), 0);
390 for (
int i = 0; i < num_export; ++i)
391 partition_indices[export_local_ids[i]] = export_to_part[i];
399 const unsigned int n_partitions,
400 std::vector<unsigned int> &partition_indices,
403 std::vector<unsigned int> cell_weights;
416 const std::vector<unsigned int> &cell_weights,
417 const unsigned int n_partitions,
418 std::vector<unsigned int> &partition_indices,
427 Assert(partition_indices.size() == sparsity_pattern.
n_rows(),
429 sparsity_pattern.
n_rows()));
432 if (n_partitions == 1 || (sparsity_pattern.
n_rows() == 1))
434 std::fill_n(partition_indices.begin(), partition_indices.size(), 0U);
439 partition_metis(sparsity_pattern,
444 partition_zoltan(sparsity_pattern,
455 std::vector<unsigned int> &color_indices)
459#ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
460 (void)sparsity_pattern;
466 std::unique_ptr<Zoltan> zz = std::make_unique<Zoltan>(MPI_COMM_SELF);
470 zz->Set_Param(
"DEBUG_LEVEL",
"0");
471 zz->Set_Param(
"COLORING_PROBLEM",
"DISTANCE-1");
472 zz->Set_Param(
"NUM_GID_ENTRIES",
"1");
473 zz->Set_Param(
"NUM_LID_ENTRIES",
"1");
474 zz->Set_Param(
"OBJ_WEIGHT_DIM",
"0");
475 zz->Set_Param(
"RECOLORING_NUM_OF_ITERATIONS",
"0");
482 zz->Set_Num_Obj_Fn(get_number_of_objects, &graph);
483 zz->Set_Obj_List_Fn(get_object_list, &graph);
484 zz->Set_Num_Edges_Multi_Fn(get_num_edges_list, &graph);
485 zz->Set_Edge_List_Multi_Fn(get_edge_list, &graph);
488 int num_gid_entries = 1;
489 const int num_objects = graph.
n_rows();
492 std::vector<ZOLTAN_ID_TYPE> global_ids(num_objects);
493 std::vector<int> color_exp(num_objects);
496 for (
int i = 0; i < num_objects; ++i)
500 int rc = zz->Color(num_gid_entries,
510 color_indices.resize(num_objects);
511 Assert(color_exp.size() == color_indices.size(),
514 std::copy(color_exp.begin(), color_exp.end(), color_indices.begin());
516 unsigned int n_colors =
517 *(std::max_element(color_indices.begin(), color_indices.end()));
533 const std::vector<DynamicSparsityPattern::size_type> &new_indices)
543 if (sparsity.
row_length(row) < min_coordination)
546 starting_point = row;
571 return starting_point;
580 std::vector<DynamicSparsityPattern::size_type> &new_indices,
581 const std::vector<DynamicSparsityPattern::size_type> &starting_indices)
589 "You can't specify more starting indices than there are rows"));
593 "Only valid for sparsity patterns which store all rows."));
594 for (
const auto starting_index : starting_indices)
596 (void)starting_index;
598 ExcMessage(
"Invalid starting index: All starting indices need "
599 "to be between zero and the number of rows in the "
600 "sparsity pattern."));
605 std::vector<DynamicSparsityPattern::size_type> last_round_dofs(
609 std::fill(new_indices.begin(),
615 if (last_round_dofs.empty())
616 last_round_dofs.push_back(
623 for (
const auto &last_round_dof : last_round_dofs)
624 new_indices[last_round_dof] = next_free_number++;
627 std::vector<DynamicSparsityPattern::size_type> next_round_dofs;
631 std::vector<std::pair<unsigned int, DynamicSparsityPattern::size_type>>
632 dofs_by_coordination;
637 next_round_dofs.clear();
640 for (
const auto dof : last_round_dofs)
642 const unsigned int row_length = sparsity.
row_length(dof);
643 for (
unsigned int i = 0; i < row_length; ++i)
649 next_round_dofs.push_back(column);
654 new_indices[column] = 0;
663 if (next_round_dofs.empty())
665 if (std::find(new_indices.begin(),
676 Assert(starting_indices.empty(),
677 ExcMessage(
"The input graph appears to have more than one "
678 "component, but as stated in the documentation "
679 "we only want to reorder such graphs if no "
680 "starting indices are given. The function was "
681 "called with starting indices, however."));
683 next_round_dofs.push_back(
689 dofs_by_coordination.clear();
691 dofs_by_coordination.emplace_back(sparsity.
row_length(next_round_dof),
693 std::sort(dofs_by_coordination.begin(), dofs_by_coordination.end());
696 for (
const auto &i : dofs_by_coordination)
697 new_indices[i.second] = next_free_number++;
700 last_round_dofs.swap(next_round_dofs);
706 Assert((std::find(new_indices.begin(),
709 (next_free_number == sparsity.
n_rows()),
720 std::vector<DynamicSparsityPattern::size_type> &renumbering)
727 "Only valid for sparsity patterns which store all rows."));
729 std::vector<types::global_dof_index> touched_nodes(
731 std::vector<unsigned int> row_lengths(connectivity.
n_rows());
732 std::set<types::global_dof_index> current_neighbors;
733 std::vector<std::vector<types::global_dof_index>> groups;
743 row_lengths[row] = connectivity.
row_length(row);
746 std::vector<unsigned int> n_remaining_neighbors(row_lengths);
759 std::pair<types::global_dof_index, types::global_dof_index>
764 if (row_lengths[i] < min_neighbors.second)
766 min_neighbors = std::make_pair(i, n_remaining_neighbors[i]);
767 if (n_remaining_neighbors[i] <= 1)
775 current_neighbors.clear();
776 current_neighbors.insert(min_neighbors.first);
777 while (!current_neighbors.empty())
783 for (
const auto current_neighbor : current_neighbors)
785 Assert(touched_nodes[current_neighbor] ==
788 if (n_remaining_neighbors[current_neighbor] <
789 min_neighbors.second)
791 std::make_pair(current_neighbor,
792 n_remaining_neighbors[current_neighbor]);
799 min_neighbors.second;
800 for (
const auto current_neighbor : current_neighbors)
801 if (n_remaining_neighbors[current_neighbor] == best_row_length)
802 if (row_lengths[current_neighbor] > min_neighbors.second)
804 std::make_pair(current_neighbor,
805 row_lengths[current_neighbor]);
809 groups.emplace_back();
810 std::vector<types::global_dof_index> &next_group = groups.back();
812 next_group.push_back(min_neighbors.first);
813 touched_nodes[min_neighbors.first] = groups.size() - 1;
815 connectivity.
begin(min_neighbors.first);
816 it != connectivity.
end(min_neighbors.first);
820 next_group.push_back(it->column());
821 touched_nodes[it->column()] = groups.size() - 1;
829 for (
const auto index : next_group)
832 connectivity.
begin(index);
833 it != connectivity.
end(index);
836 if (touched_nodes[it->column()] ==
838 current_neighbors.insert(it->column());
839 n_remaining_neighbors[it->column()]--;
841 current_neighbors.erase(index);
852 if (groups.size() < connectivity.
n_rows())
860 connectivity.
begin(groups[i][col]);
861 it != connectivity.
end(groups[i][col]);
863 connectivity_next.
add(i, touched_nodes[it->column()]);
866 std::vector<types::global_dof_index> renumbering_next(groups.size());
873 col < groups[renumbering_next[i]].size();
875 renumbering[count] = groups[renumbering_next[i]][col];
884 renumbering[count] = groups[i][col];
892 std::vector<DynamicSparsityPattern::size_type> &renumbering)
903#ifdef DEAL_II_WITH_MPI
909 const IndexSet &locally_relevant_rows)
912 std::map<unsigned int, std::vector<DynamicSparsityPattern::size_type>>;
915 IndexSet requested_rows(locally_relevant_rows);
918 std::vector<unsigned int> index_owner =
933 rows_data[index_owner[i]].push_back(row);
937 const auto rows_data_received =
944 for (
const auto &data : rows_data_received)
946 for (
const auto &row : data.second)
955 send_data[data.first].push_back(row);
956 send_data[data.first].push_back(rlen);
958 send_data[data.first].push_back(
964 const auto received_data =
968 for (
const auto &data : received_data)
970 const auto &recv_buf = data.second;
971 auto ptr = recv_buf.begin();
972 const auto end = recv_buf.end();
975 const auto row = *(ptr++);
978 const auto n_entries = *(ptr++);
1000 const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
1005 std::vector<DynamicSparsityPattern::size_type> start_index(
1006 rows_per_cpu.size() + 1);
1009 start_index[i + 1] = start_index[i] + rows_per_cpu[i];
1011 IndexSet owned(start_index.back());
1012 owned.
add_range(start_index[myid], start_index[myid] + rows_per_cpu[myid]);
1021 const IndexSet &locally_owned_rows,
1023 const IndexSet &locally_relevant_rows)
1028 "The DynamicSparsityPattern must be initialized with an IndexSet that contains locally relevant indices."));
1030 IndexSet requested_rows(locally_relevant_rows);
1033 std::vector<unsigned int> index_owner =
1039 std::map<unsigned int, std::vector<DynamicSparsityPattern::size_type>>;
1041 map_vec_t send_data;
1057 send_data[index_owner[i]].push_back(row);
1058 send_data[index_owner[i]].push_back(rlen);
1063 send_data[index_owner[i]].push_back(column);
1070 for (
const auto &data : receive_data)
1072 const auto &recv_buf = data.second;
1073 auto ptr = recv_buf.begin();
1074 const auto end = recv_buf.end();
1077 const auto row = *(ptr++);
1079 const auto n_entries = *(ptr++);
1093 const std::vector<IndexSet> &owned_set_per_cpu,
1099 owned_set_per_cpu[myid],
1108 const IndexSet &locally_owned_rows,
1110 const IndexSet &locally_relevant_rows)
1114 std::vector<BlockDynamicSparsityPattern::size_type>>;
1115 map_vec_t send_data;
1117 IndexSet requested_rows(locally_relevant_rows);
1120 std::vector<unsigned int> index_owner =
1139 std::vector<BlockDynamicSparsityPattern::size_type> &dst =
1140 send_data[index_owner[i]];
1142 dst.push_back(rlen);
1149 dst.push_back(column);
1153 unsigned int num_receive = 0;
1155 std::vector<unsigned int> send_to;
1156 send_to.reserve(send_data.size());
1157 for (
const auto &sparsity_line : send_data)
1158 send_to.push_back(sparsity_line.first);
1165 std::vector<MPI_Request> requests(send_data.size());
1177 unsigned int idx = 0;
1178 for (
const auto &sparsity_line : send_data)
1180 const int ierr = MPI_Isend(sparsity_line.second.data(),
1181 sparsity_line.second.size(),
1183 sparsity_line.first,
1193 std::vector<BlockDynamicSparsityPattern::size_type> recv_buf;
1194 for (
unsigned int index = 0; index < num_receive; ++index)
1197 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, mpi_comm, &status);
1204 recv_buf.resize(len);
1205 ierr = MPI_Recv(recv_buf.data(),
1214 std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator
1215 ptr = recv_buf.begin();
1216 std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator
1217 end = recv_buf.end();
1223 for (
unsigned int c = 0; c < num; ++c)
1235 if (requests.size() > 0)
1238 MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
size_type column_number(const size_type row, const unsigned int index) const
unsigned int row_length(const size_type row) const
void add(const size_type i, const size_type j)
void add_entries(const size_type row, ForwardIterator begin, ForwardIterator end, const bool indices_are_unique_and_sorted=false)
const IndexSet & row_index_set() const
size_type row_length(const size_type row) const
size_type column_number(const size_type row, const size_type index) const
void clear_row(const size_type row)
void add(const size_type i, const size_type j)
size_type n_elements() const
void subtract_set(const IndexSet &other)
void add_range(const size_type begin, const size_type end)
size_type nth_index_in_set(const size_type local_index) const
types::global_dof_index size_type
bool is_compressed() const
std::size_t n_nonzero_elements() const
bool exists(const size_type i, const size_type j) const
void copy_from(const size_type n_rows, const size_type n_cols, const ForwardIterator begin, const ForwardIterator end)
unsigned int row_length(const size_type row) const
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcMETISNotInstalled()
static ::ExceptionBase & ExcInvalidNumberOfPartitions(int arg1)
static ::ExceptionBase & ExcMETISError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInvalidArraySize(int arg1, int arg2)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcZOLTANNotInstalled()
static ::ExceptionBase & ExcNotCompressed()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
const types::global_dof_index invalid_dof_index
const types::global_dof_index invalid_size_type
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
#define DEAL_II_DOF_INDEX_MPI_TYPE