Reference documentation for deal.II version GIT relicensing-399-g79d89019c5 2024-04-16 15:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
task_info.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2018 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.h>
22
24
27
28
29#ifdef DEAL_II_WITH_TBB
30# include <tbb/blocked_range.h>
31# include <tbb/parallel_for.h>
32# include <tbb/task.h>
33# ifndef DEAL_II_TBB_WITH_ONEAPI
34# include <tbb/task_scheduler_init.h>
35# endif
36#endif
37
38#include <iostream>
39#include <set>
40
41//
42// TBB with oneAPI API has deprecated and removed the
43// <code>tbb::tasks</code> backend. With this it is no longer possible to
44// compile the following code that builds a directed acyclic graph (DAG) of
45// (thread parallel) tasks without a major porting effort. It turned out
46// that such a dynamic handling of dependencies and structures is not as
47// competitive as initially assumed. Consequently, this part of the matrix
48// free infrastructure has seen less attention than the rest over the last
49// years and is (presumably) not used that often.
50//
51// In case of detected oneAPI backend we simply disable threading in the
52// matrix free backend for now.
53//
54// Matthias Maier, Martin Kronbichler, 2021
55//
56
58
59
60
61/*-------------------- Implementation of the matrix-free loop --------------*/
62namespace internal
63{
64 namespace MatrixFreeFunctions
65 {
66#if defined(DEAL_II_WITH_TBB) && !defined(DEAL_II_TBB_WITH_ONEAPI)
67
68 // This defines the TBB data structures that are needed to schedule the
69 // partition-partition variant
70
71 namespace partition
72 {
74 {
75 public:
84
86 const unsigned int partition,
87 const TaskInfo &task_info)
88 : worker(&worker)
89 , worker_pointer(nullptr)
92 {}
93
94 void
95 operator()() const
96 {
97 MFWorkerInterface *used_worker =
98 worker != nullptr ? worker : *worker_pointer;
99 Assert(used_worker != nullptr, ExcInternalError());
100 used_worker->cell(partition);
101
102 if (task_info.face_partition_data.empty() == false)
103 {
104 used_worker->face(partition);
105 used_worker->boundary(partition);
106 }
107 }
108
109 private:
112 const unsigned int partition;
114 };
115
116 class CellWork : public tbb::task
117 {
118 public:
120 const unsigned int partition,
121 const TaskInfo &task_info,
122 const bool is_blocked)
123 : dummy(nullptr)
124 , work(worker, partition, task_info)
126 {}
127
128 tbb::task *
129 execute() override
130 {
131 work();
132
133 if (is_blocked == true)
134 tbb::empty_task::spawn(*dummy);
135 return nullptr;
136 }
137
138 tbb::empty_task *dummy;
139
140 private:
142 const bool is_blocked;
143 };
144
145
146
147 class PartitionWork : public tbb::task
148 {
149 public:
151 const unsigned int partition_in,
152 const TaskInfo &task_info_in,
153 const bool is_blocked_in = false)
154 : dummy(nullptr)
155 , function(function_in)
156 , partition(partition_in)
157 , task_info(task_info_in)
158 , is_blocked(is_blocked_in)
159 {}
160
161 tbb::task *
162 execute() override
163 {
164 tbb::empty_task *root =
165 new (tbb::task::allocate_root()) tbb::empty_task;
166 const unsigned int evens = task_info.partition_evens[partition];
167 const unsigned int odds = task_info.partition_odds[partition];
168 const unsigned int n_blocked_workers =
170 const unsigned int n_workers =
172 std::vector<CellWork *> worker(n_workers);
173 std::vector<CellWork *> blocked_worker(n_blocked_workers);
174
175 root->set_ref_count(evens + 1);
176 for (unsigned int j = 0; j < evens; ++j)
177 {
178 worker[j] = new (root->allocate_child())
181 task_info,
182 false);
183 if (j > 0)
184 {
185 worker[j]->set_ref_count(2);
186 blocked_worker[j - 1]->dummy =
187 new (worker[j]->allocate_child()) tbb::empty_task;
188 tbb::task::spawn(*blocked_worker[j - 1]);
189 }
190 else
191 worker[j]->set_ref_count(1);
192 if (j < evens - 1)
193 {
194 blocked_worker[j] = new (worker[j]->allocate_child())
197 1,
198 task_info,
199 true);
200 }
201 else
202 {
203 if (odds == evens)
204 {
205 worker[evens] = new (worker[j]->allocate_child())
208 2 * j + 1,
209 task_info,
210 false);
211 tbb::task::spawn(*worker[evens]);
212 }
213 else
214 {
215 tbb::empty_task *child =
216 new (worker[j]->allocate_child()) tbb::empty_task();
217 tbb::task::spawn(*child);
218 }
219 }
220 }
221
222 root->wait_for_all();
223 root->destroy(*root);
224 if (is_blocked == true)
225 tbb::empty_task::spawn(*dummy);
226 return nullptr;
227 }
228
229 tbb::empty_task *dummy;
230
231 private:
233 const unsigned int partition;
235 const bool is_blocked;
236 };
237
238 } // end of namespace partition
239
240
241
242 namespace color
243 {
245 {
246 public:
248 const TaskInfo &task_info_in,
249 const unsigned int partition_in)
250 : worker(worker_in)
251 , task_info(task_info_in)
252 , partition(partition_in)
253 {}
254
255 void
256 operator()(const tbb::blocked_range<unsigned int> &r) const
257 {
258 const unsigned int start_index =
260 task_info.block_size * r.begin();
261 const unsigned int end_index =
262 std::min(start_index + task_info.block_size * (r.end() - r.begin()),
264 worker.cell(std::make_pair(start_index, end_index));
265
266 if (task_info.face_partition_data.empty() == false)
267 {
269 }
270 }
271
272 private:
275 const unsigned int partition;
276 };
277
278
279
280 class PartitionWork : public tbb::task
281 {
282 public:
284 const unsigned int partition_in,
285 const TaskInfo &task_info_in,
286 const bool is_blocked_in)
287 : dummy(nullptr)
288 , worker(worker_in)
289 , partition(partition_in)
290 , task_info(task_info_in)
291 , is_blocked(is_blocked_in)
292 {}
293
294 tbb::task *
295 execute() override
296 {
297 const unsigned int n_chunks =
300 1) /
302 parallel_for(tbb::blocked_range<unsigned int>(0, n_chunks, 1),
304 if (is_blocked == true)
305 tbb::empty_task::spawn(*dummy);
306 return nullptr;
307 }
308
309 tbb::empty_task *dummy;
310
311 private:
313 const unsigned int partition;
315 const bool is_blocked;
316 };
317
318 } // end of namespace color
319
320
321
322 class MPICommunication : public tbb::task
323 {
324 public:
326 : worker(worker_in)
328 {}
329
330 tbb::task *
331 execute() override
332 {
333 if (do_compress == false)
335 else
337 return nullptr;
338 }
339
340 private:
342 const bool do_compress;
343 };
344
345#endif // DEAL_II_WITH_TBB
346
347
348
349 void
351 {
352 // If we use thread parallelism, we do not currently support to schedule
353 // pieces of updates within the loop, so this index will collect all
354 // calls in that case and work like a single complete loop over all
355 // cells
356 if (scheme != none)
358 else
361
363
364#if defined(DEAL_II_WITH_TBB) && !defined(DEAL_II_TBB_WITH_ONEAPI)
365
366 if (scheme != none)
367 {
369 if (scheme == partition_partition && evens > 0)
370 {
371 tbb::empty_task *root =
372 new (tbb::task::allocate_root()) tbb::empty_task;
373 root->set_ref_count(evens + 1);
374 std::vector<partition::PartitionWork *> worker(n_workers);
375 std::vector<partition::PartitionWork *> blocked_worker(
377 MPICommunication *worker_compr =
378 new (root->allocate_child()) MPICommunication(funct, true);
379 worker_compr->set_ref_count(1);
380 for (unsigned int j = 0; j < evens; ++j)
381 {
382 if (j > 0)
383 {
384 worker[j] = new (root->allocate_child())
385 partition::PartitionWork(funct, 2 * j, *this, false);
386 worker[j]->set_ref_count(2);
387 blocked_worker[j - 1]->dummy =
388 new (worker[j]->allocate_child()) tbb::empty_task;
389 tbb::task::spawn(*blocked_worker[j - 1]);
390 }
391 else
392 {
393 worker[j] = new (worker_compr->allocate_child())
394 partition::PartitionWork(funct, 2 * j, *this, false);
395 worker[j]->set_ref_count(2);
396 MPICommunication *worker_dist =
397 new (worker[j]->allocate_child())
398 MPICommunication(funct, false);
399 tbb::task::spawn(*worker_dist);
400 }
401 if (j < evens - 1)
402 {
403 blocked_worker[j] = new (worker[j]->allocate_child())
404 partition::PartitionWork(funct, 2 * j + 1, *this, true);
405 }
406 else
407 {
408 if (odds == evens)
409 {
410 worker[evens] = new (worker[j]->allocate_child())
412 2 * j + 1,
413 *this,
414 false);
415 tbb::task::spawn(*worker[evens]);
416 }
417 else
418 {
419 tbb::empty_task *child =
420 new (worker[j]->allocate_child()) tbb::empty_task();
421 tbb::task::spawn(*child);
422 }
423 }
424 }
425
426 root->wait_for_all();
427 root->destroy(*root);
428 }
429 else if (scheme == partition_partition)
430 {
431 // catch the case of empty partition list: we still need to call
432 // the vector communication routines to clean up and initiate
433 // things
435 funct.vector_compress_start();
436 }
437 else // end of partition-partition, start of partition-color
438 {
439 // check whether there is only one partition. if not, build up the
440 // tree of partitions
441 if (odds > 0)
442 {
443 tbb::empty_task *root =
444 new (tbb::task::allocate_root()) tbb::empty_task;
445 root->set_ref_count(evens + 1);
446 const unsigned int n_blocked_workers =
447 odds - (odds + evens + 1) % 2;
448 const unsigned int n_workers =
450 std::vector<color::PartitionWork *> worker(n_workers);
451 std::vector<color::PartitionWork *> blocked_worker(
453 unsigned int worker_index = 0, slice_index = 0;
454 int spawn_index_child = -2;
455 MPICommunication *worker_compr =
456 new (root->allocate_child()) MPICommunication(funct, true);
457 worker_compr->set_ref_count(1);
458 for (unsigned int part = 0;
459 part < partition_row_index.size() - 1;
460 part++)
461 {
462 if (part == 0)
463 worker[worker_index] =
464 new (worker_compr->allocate_child())
466 slice_index,
467 *this,
468 false);
469 else
470 worker[worker_index] = new (root->allocate_child())
472 slice_index,
473 *this,
474 false);
475 ++slice_index;
476 for (; slice_index < partition_row_index[part + 1];
477 slice_index++)
478 {
479 worker[worker_index]->set_ref_count(1);
480 ++worker_index;
481 worker[worker_index] =
482 new (worker[worker_index - 1]->allocate_child())
484 slice_index,
485 *this,
486 false);
487 }
488 worker[worker_index]->set_ref_count(2);
489 if (part > 0)
490 {
491 blocked_worker[(part - 1) / 2]->dummy =
492 new (worker[worker_index]->allocate_child())
493 tbb::empty_task;
494 ++worker_index;
495 if (spawn_index_child == -1)
496 tbb::task::spawn(*blocked_worker[(part - 1) / 2]);
497 else
498 {
499 Assert(spawn_index_child >= 0,
501 tbb::task::spawn(*worker[spawn_index_child]);
502 }
503 spawn_index_child = -2;
504 }
505 else
506 {
507 MPICommunication *worker_dist =
508 new (worker[worker_index]->allocate_child())
509 MPICommunication(funct, false);
510 tbb::task::spawn(*worker_dist);
511 ++worker_index;
512 }
513 part += 1;
514 if (part < partition_row_index.size() - 1)
515 {
516 if (part < partition_row_index.size() - 2)
517 {
518 blocked_worker[part / 2] =
519 new (worker[worker_index - 1]->allocate_child())
521 slice_index,
522 *this,
523 true);
524 ++slice_index;
525 if (slice_index < partition_row_index[part + 1])
526 {
527 blocked_worker[part / 2]->set_ref_count(1);
528 worker[worker_index] = new (
529 blocked_worker[part / 2]->allocate_child())
531 slice_index,
532 *this,
533 false);
534 ++slice_index;
535 }
536 else
537 {
538 spawn_index_child = -1;
539 continue;
540 }
541 }
542 for (; slice_index < partition_row_index[part + 1];
543 slice_index++)
544 {
545 if (slice_index > partition_row_index[part])
546 {
547 worker[worker_index]->set_ref_count(1);
548 ++worker_index;
549 }
550 worker[worker_index] =
551 new (worker[worker_index - 1]->allocate_child())
553 slice_index,
554 *this,
555 false);
556 }
557 spawn_index_child = worker_index;
558 ++worker_index;
559 }
560 else
561 {
562 tbb::empty_task *final =
563 new (worker[worker_index - 1]->allocate_child())
564 tbb::empty_task;
565 tbb::task::spawn(*final);
566 spawn_index_child = worker_index - 1;
567 }
568 }
569 if (evens == odds)
570 {
571 Assert(spawn_index_child >= 0, ExcInternalError());
572 tbb::task::spawn(*worker[spawn_index_child]);
573 }
574 root->wait_for_all();
575 root->destroy(*root);
576 }
577 // case when we only have one partition: this is the usual
578 // coloring scheme, and we just schedule a parallel for loop for
579 // each color
580 else
581 {
584
585 for (unsigned int color = 0; color < partition_row_index[1];
586 ++color)
587 {
588 tbb::empty_task *root =
589 new (tbb::task::allocate_root()) tbb::empty_task;
590 root->set_ref_count(2);
591 color::PartitionWork *worker =
592 new (root->allocate_child())
593 color::PartitionWork(funct, color, *this, false);
594 tbb::empty_task::spawn(*worker);
595 root->wait_for_all();
596 root->destroy(*root);
597 }
598
599 funct.vector_compress_start();
600 }
601 }
602 }
603 else
604#endif
605 // serial loop, go through up to three times and do the MPI transfer at
606 // the beginning/end of the second part
607 {
608 for (unsigned int part = 0; part < partition_row_index.size() - 2;
609 ++part)
610 {
611 if (part == 1)
613
614 for (unsigned int i = partition_row_index[part];
615 i < partition_row_index[part + 1];
616 ++i)
617 {
618 funct.cell_loop_pre_range(i);
619 funct.zero_dst_vector_range(i);
622 {
623 funct.cell(i);
624 }
625
626 if (face_partition_data.empty() == false)
627 {
629 funct.face(i);
630 if (boundary_partition_data[i + 1] >
632 funct.boundary(i);
633 }
634 funct.cell_loop_post_range(i);
635 }
636
637 if (part == 1)
638 funct.vector_compress_start();
639 }
640 }
642
643 if (scheme != none)
645 else
648 }
649
650
651
653 {
654 clear();
655 }
656
657
658
659 void
661 {
662 n_active_cells = 0;
663 n_ghost_cells = 0;
665 block_size = 0;
666 n_blocks = 0;
667 scheme = none;
668 partition_row_index.clear();
669 partition_row_index.resize(2);
670 cell_partition_data.clear();
671 face_partition_data.clear();
673 evens = 0;
674 odds = 0;
676 n_workers = 0;
677 partition_evens.clear();
678 partition_odds.clear();
680 partition_n_workers.clear();
681 communicator = MPI_COMM_SELF;
682 my_pid = 0;
683 n_procs = 1;
684 }
685
686
687
688 template <typename StreamType>
689 void
691 const std::size_t data_length) const
692 {
694 Utilities::MPI::min_max_avg(1e-6 * data_length, communicator);
695 if (n_procs < 2)
696 out << memory_c.min;
697 else
698 out << memory_c.min << "/" << memory_c.avg << "/" << memory_c.max;
699 out << " MB" << std::endl;
700 }
701
702
703
704 std::size_t
718
719
720
721 void
723 std::vector<unsigned int> &boundary_cells)
724 {
725 // try to make the number of boundary cells divisible by the number of
726 // vectors in vectorization
727 unsigned int fillup_needed =
728 (vectorization_length - boundary_cells.size() % vectorization_length) %
730 if (fillup_needed > 0 && boundary_cells.size() < n_active_cells)
731 {
732 // fill additional cells into the list of boundary cells to get a
733 // balanced number. Go through the indices successively until we
734 // found enough indices
735 std::vector<unsigned int> new_boundary_cells;
736 new_boundary_cells.reserve(boundary_cells.size());
737
738 unsigned int next_free_slot = 0, bound_index = 0;
739 while (fillup_needed > 0 && bound_index < boundary_cells.size())
740 {
741 if (next_free_slot < boundary_cells[bound_index])
742 {
743 // check if there are enough cells to fill with in the
744 // current slot
745 if (next_free_slot + fillup_needed <=
746 boundary_cells[bound_index])
747 {
748 for (unsigned int j =
749 boundary_cells[bound_index] - fillup_needed;
750 j < boundary_cells[bound_index];
751 ++j)
752 new_boundary_cells.push_back(j);
753 fillup_needed = 0;
754 }
755 // ok, not enough indices, so just take them all up to the
756 // next boundary cell
757 else
758 {
759 for (unsigned int j = next_free_slot;
760 j < boundary_cells[bound_index];
761 ++j)
762 new_boundary_cells.push_back(j);
763 fillup_needed -=
764 boundary_cells[bound_index] - next_free_slot;
765 }
766 }
767 new_boundary_cells.push_back(boundary_cells[bound_index]);
768 next_free_slot = boundary_cells[bound_index] + 1;
769 ++bound_index;
770 }
771 while (fillup_needed > 0 &&
772 (new_boundary_cells.empty() ||
773 new_boundary_cells.back() < n_active_cells - 1))
774 new_boundary_cells.push_back(new_boundary_cells.back() + 1);
775 while (bound_index < boundary_cells.size())
776 new_boundary_cells.push_back(boundary_cells[bound_index++]);
777
778 boundary_cells.swap(new_boundary_cells);
779 }
780
781 // set the number of cells
782 std::sort(boundary_cells.begin(), boundary_cells.end());
783
784 // check that number of boundary cells is divisible by
785 // vectorization_length or that it contains all cells
786 Assert(boundary_cells.size() % vectorization_length == 0 ||
787 boundary_cells.size() == n_active_cells,
789 }
790
791
792
793 void
795 const std::vector<unsigned int> &cells_with_comm,
796 const unsigned int dofs_per_cell,
797 const bool categories_are_hp,
798 const std::vector<unsigned int> &cell_vectorization_categories,
799 const bool cell_vectorization_categories_strict,
800 const std::vector<unsigned int> &parent_relation,
801 std::vector<unsigned int> &renumbering,
802 std::vector<unsigned char> &incompletely_filled_vectorization)
803 {
804 Assert(dofs_per_cell > 0, ExcInternalError());
805 // This function is decomposed into several steps to determine a good
806 // ordering that satisfies the following constraints:
807 // a. Only cells belonging to the same category (or next higher if the
808 // cell_vectorization_categories_strict is false) can be grouped into
809 // the same SIMD batch
810 // b. hp-adaptive computations must form contiguous ranges for the same
811 // degree (category) in cell_partition_data
812 // c. We want to group the cells with the same parent in the same SIMD
813 // lane if possible
814 // d. The cell order should be similar to the initial one
815 // e. Form sets without MPI communication and those with to overlap
816 // communication with computation
817 //
818 // These constraints are satisfied by first grouping by the categories
819 // and, within the groups, to distinguish between cells with a parent
820 // and those without. All of this is set up with batches of cells (with
821 // padding if the size does not match). Then we define a vector of
822 // arrays where we define sorting criteria for the cell batches to
823 // satisfy the items b and d together, split by different parts to
824 // satisfy item e.
825
826 // Give the compiler a chance to detect that vectorization_length is a
827 // power of two, which allows it to replace integer divisions by shifts
828 const unsigned int n_lanes = indicate_power_of_two(vectorization_length);
829
830 // Step 1: find tight map of categories for not taking exceeding amounts
831 // of memory below. Sort the new categories by the numbers in the
832 // old one to ensure we respect the given rules
833 unsigned int n_categories = 1;
834 std::vector<unsigned int> tight_category_map(n_active_cells, 0);
835 if (cell_vectorization_categories.empty() == false)
836 {
837 AssertDimension(cell_vectorization_categories.size(),
839
840 std::set<unsigned int> used_categories;
841 for (unsigned int i = 0; i < n_active_cells; ++i)
842 used_categories.insert(cell_vectorization_categories[i]);
843 std::vector<unsigned int> used_categories_vector(
844 used_categories.size());
845 n_categories = 0;
846 for (const auto &it : used_categories)
847 used_categories_vector[n_categories++] = it;
848 for (unsigned int i = 0; i < n_active_cells; ++i)
849 {
850 const unsigned int index =
851 std::lower_bound(used_categories_vector.begin(),
852 used_categories_vector.end(),
853 cell_vectorization_categories[i]) -
854 used_categories_vector.begin();
855 AssertIndexRange(index, used_categories_vector.size());
856 tight_category_map[i] = index;
857 }
858 }
859
860 // Step 2: Sort the cells by the category. If we want to fill up the
861 // ranges in vectorization, promote some of the cells to a higher
862 // category
863 std::vector<std::vector<unsigned int>> renumbering_category(n_categories);
864 for (unsigned int i = 0; i < n_active_cells; ++i)
865 renumbering_category[tight_category_map[i]].push_back(i);
866
867 if (cell_vectorization_categories_strict == false && n_categories > 1)
868 for (unsigned int j = n_categories - 1; j > 0; --j)
869 {
870 unsigned int lower_index = j - 1;
871 while ((renumbering_category[j].size() % n_lanes) != 0u)
872 {
873 while (((renumbering_category[j].size() % n_lanes) != 0u) &&
874 !renumbering_category[lower_index].empty())
875 {
876 renumbering_category[j].push_back(
877 renumbering_category[lower_index].back());
878 renumbering_category[lower_index].pop_back();
879 }
880 if (lower_index == 0)
881 break;
882 else
883 --lower_index;
884 }
885 }
886
887 // Step 3: Use the parent relation to find a good grouping of cells. To
888 // do this, we first put cells of each category defined above into two
889 // bins, those which we know can be grouped together by the given parent
890 // relation and those which cannot
891 std::vector<unsigned int> temporary_numbering;
892 temporary_numbering.reserve(n_active_cells +
893 (n_lanes - 1) * n_categories);
894 const unsigned int n_cells_per_parent =
895 std::count(parent_relation.begin(), parent_relation.end(), 0);
896 std::vector<unsigned int> category_size;
897 for (unsigned int j = 0; j < n_categories; ++j)
898 {
899 std::vector<std::pair<unsigned int, unsigned int>> grouped_cells;
900 std::vector<unsigned int> other_cells;
901 for (const unsigned int cell : renumbering_category[j])
902 if (parent_relation.empty() ||
903 parent_relation[cell] == numbers::invalid_unsigned_int)
904 other_cells.push_back(cell);
905 else
906 grouped_cells.emplace_back(parent_relation[cell], cell);
907
908 // Compute the number of cells per group
909 std::sort(grouped_cells.begin(), grouped_cells.end());
910 std::vector<unsigned int> n_cells_per_group;
911 unsigned int length = 0;
912 for (unsigned int i = 0; i < grouped_cells.size(); ++i, ++length)
913 if (i > 0 && grouped_cells[i].first != grouped_cells[i - 1].first)
914 {
915 n_cells_per_group.push_back(length);
916 length = 0;
917 }
918 if (length > 0)
919 n_cells_per_group.push_back(length);
920
921 // Move groups that do not have the complete size (due to
922 // categories) to the 'other_cells'. The cells with correct group
923 // size are immediately appended to the temporary cell numbering
924 auto group_it = grouped_cells.begin();
925 for (const unsigned int length : n_cells_per_group)
926 if (length < n_cells_per_parent)
927 for (unsigned int j = 0; j < length; ++j)
928 other_cells.push_back((group_it++)->second);
929 else
930 {
931 // we should not have more cells in a group than in the first
932 // check we did above
933 AssertDimension(length, n_cells_per_parent);
934 for (unsigned int j = 0; j < length; ++j)
935 temporary_numbering.push_back((group_it++)->second);
936 }
937
938 // Sort the remaining cells and append them as well
939 std::sort(other_cells.begin(), other_cells.end());
940 temporary_numbering.insert(temporary_numbering.end(),
941 other_cells.begin(),
942 other_cells.end());
943
944 while (temporary_numbering.size() % n_lanes != 0)
945 temporary_numbering.push_back(numbers::invalid_unsigned_int);
946
947 category_size.push_back(temporary_numbering.size());
948 }
949
950 // Step 4: Identify the batches with cells marked as "comm"
951 std::vector<bool> batch_with_comm(temporary_numbering.size() / n_lanes,
952 false);
953 std::vector<unsigned int> temporary_numbering_inverse(n_active_cells);
954 for (unsigned int i = 0; i < temporary_numbering.size(); ++i)
955 if (temporary_numbering[i] != numbers::invalid_unsigned_int)
956 temporary_numbering_inverse[temporary_numbering[i]] = i;
957 for (const unsigned int cell : cells_with_comm)
958 batch_with_comm[temporary_numbering_inverse[cell] / n_lanes] = true;
959
960 // Step 5: Sort the batches of cells by their last cell index to get
961 // good locality, assuming that the initial cell order is of good
962 // locality. In case we have hp-calculations with categories, we need to
963 // sort also by the category.
964 std::vector<std::array<unsigned int, 3>> batch_order;
965 std::vector<std::array<unsigned int, 3>> batch_order_comm;
966 for (unsigned int i = 0; i < temporary_numbering.size(); i += n_lanes)
967 {
968 unsigned int max_index = 0;
969 for (unsigned int j = 0; j < n_lanes; ++j)
970 if (temporary_numbering[i + j] < numbers::invalid_unsigned_int)
971 max_index = std::max(temporary_numbering[i + j], max_index);
972 const unsigned int category_hp =
973 categories_are_hp ?
974 std::upper_bound(category_size.begin(), category_size.end(), i) -
975 category_size.begin() :
976 0;
977 const std::array<unsigned int, 3> next{{category_hp, max_index, i}};
978 if (batch_with_comm[i / n_lanes])
979 batch_order_comm.emplace_back(next);
980 else
981 batch_order.emplace_back(next);
982 }
983
984 std::sort(batch_order.begin(), batch_order.end());
985 std::sort(batch_order_comm.begin(), batch_order_comm.end());
986
987 // Step 6: Put the cells with communication in the middle of the cell
988 // range. For the MPI case, we need three groups to enable overlap for
989 // communication and computation (part before comm, part with comm, part
990 // after comm), whereas we need one for the other case. And in each
991 // case, we allow for a slot of "ghosted" cells.
992 std::vector<unsigned int> blocks;
993 if (n_procs == 1)
994 {
995 if (batch_order.empty())
996 std::swap(batch_order_comm, batch_order);
997 Assert(batch_order_comm.empty(), ExcInternalError());
998 partition_row_index.resize(3);
999 blocks = {0, static_cast<unsigned int>(batch_order.size())};
1000 }
1001 else
1002 {
1003 partition_row_index.resize(5);
1004 const unsigned int comm_begin = batch_order.size() / 2;
1005 batch_order.insert(batch_order.begin() + comm_begin,
1006 batch_order_comm.begin(),
1007 batch_order_comm.end());
1008 const unsigned int comm_end = comm_begin + batch_order_comm.size();
1009 const unsigned int end = batch_order.size();
1010 blocks = {0, comm_begin, comm_end, end};
1011 }
1012
1013 // Step 7: sort ghost cells according to the category
1014 std::vector<std::array<unsigned int, 2>> tight_category_map_ghost;
1015
1016 if (cell_vectorization_categories.empty() == false)
1017 {
1018 tight_category_map_ghost.reserve(n_ghost_cells);
1019
1020 std::set<unsigned int> used_categories;
1021 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1022 used_categories.insert(
1023 cell_vectorization_categories[i + n_active_cells]);
1024
1025 std::vector<unsigned int> used_categories_vector(
1026 used_categories.size());
1027 n_categories = 0;
1028 for (const auto &it : used_categories)
1029 used_categories_vector[n_categories++] = it;
1030
1031 std::vector<unsigned int> counters(n_categories, 0);
1032
1033 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1034 {
1035 const unsigned int index =
1036 std::lower_bound(
1037 used_categories_vector.begin(),
1038 used_categories_vector.end(),
1039 cell_vectorization_categories[i + n_active_cells]) -
1040 used_categories_vector.begin();
1041 AssertIndexRange(index, used_categories_vector.size());
1042 tight_category_map_ghost.emplace_back(
1043 std::array<unsigned int, 2>{{index, i}});
1044
1045 // account for padding in the hp and strict case
1046 if (categories_are_hp || cell_vectorization_categories_strict)
1047 counters[index]++;
1048 }
1049
1050 // insert padding
1051 for (unsigned int i = 0; i < counters.size(); ++i)
1052 if (counters[i] % n_lanes != 0)
1053 for (unsigned int j = counters[i] % n_lanes; j < n_lanes; ++j)
1054 tight_category_map_ghost.emplace_back(
1055 std::array<unsigned int, 2>{
1056 {i, numbers::invalid_unsigned_int}});
1057
1058 std::sort(tight_category_map_ghost.begin(),
1059 tight_category_map_ghost.end());
1060 }
1061
1062 // Step 8: Fill in the data by batches for the locally owned cells.
1063 const unsigned int n_cell_batches = batch_order.size();
1064 const unsigned int n_ghost_batches =
1065 ((tight_category_map_ghost.empty() ? n_ghost_cells :
1066 tight_category_map_ghost.size()) +
1067 n_lanes - 1) /
1068 n_lanes;
1069 incompletely_filled_vectorization.resize(n_cell_batches +
1070 n_ghost_batches);
1071
1072 cell_partition_data.clear();
1073 cell_partition_data.resize(1, 0);
1074
1075 renumbering.clear();
1076 renumbering.resize(n_active_cells + n_ghost_cells,
1078
1079 unsigned int counter = 0;
1080 for (unsigned int block = 0; block < blocks.size() - 1; ++block)
1081 {
1082 const unsigned int grain_size =
1083 std::max((2048U / dofs_per_cell) / 8 * 4, 2U);
1084 for (unsigned int k = blocks[block]; k < blocks[block + 1];
1085 k += grain_size)
1086 cell_partition_data.push_back(
1087 std::min(k + grain_size, blocks[block + 1]));
1088 partition_row_index[block + 1] = cell_partition_data.size() - 1;
1089
1090 // Set the numbering according to the reordered temporary one
1091 for (unsigned int k = blocks[block]; k < blocks[block + 1]; ++k)
1092 {
1093 const unsigned int pos = batch_order[k][2];
1094 unsigned int j = 0;
1095 for (; j < n_lanes && temporary_numbering[pos + j] !=
1097 ++j)
1098 renumbering[counter++] = temporary_numbering[pos + j];
1099 if (j < n_lanes)
1100 incompletely_filled_vectorization[k] = j;
1101 }
1102 }
1103 AssertDimension(counter, n_active_cells);
1104
1105 // Step 9: Treat the ghost cells
1106 if (tight_category_map_ghost.empty())
1107 {
1108 for (unsigned int cell = 0; cell < n_ghost_cells; ++cell)
1109 renumbering[n_active_cells + cell] = n_active_cells + cell;
1110
1111 if ((n_ghost_cells % n_lanes) != 0u)
1112 incompletely_filled_vectorization.back() = n_ghost_cells % n_lanes;
1113 }
1114 else
1115 {
1116 for (unsigned int k = 0, ptr = 0; k < n_ghost_batches;
1117 ++k, ptr += n_lanes)
1118 {
1119 unsigned int j = 0;
1120
1121 for (;
1122 j < n_lanes && (ptr + j < tight_category_map_ghost.size()) &&
1123 (tight_category_map_ghost[ptr + j][1] !=
1125 ++j)
1126 renumbering[counter++] =
1127 n_active_cells + tight_category_map_ghost[ptr + j][1];
1128
1129 if (j < n_lanes)
1130 incompletely_filled_vectorization[n_cell_batches + k] = j;
1131 }
1132
1133 AssertDimension(counter, n_active_cells + n_ghost_cells);
1134 }
1135
1136 cell_partition_data.push_back(n_cell_batches + n_ghost_batches);
1137 partition_row_index.back() = cell_partition_data.size() - 1;
1138
1139#ifdef DEBUG
1140 std::vector<unsigned int> renumber_cpy(renumbering);
1141 std::sort(renumber_cpy.begin(), renumber_cpy.end());
1142 for (unsigned int i = 0; i < renumber_cpy.size(); ++i)
1143 AssertDimension(i, renumber_cpy[i]);
1144#endif
1145 }
1146
1147
1148
1149 void
1150 TaskInfo::initial_setup_blocks_tasks(
1151 const std::vector<unsigned int> &boundary_cells,
1152 std::vector<unsigned int> &renumbering,
1153 std::vector<unsigned char> &incompletely_filled_vectorization)
1154 {
1155 const unsigned int n_cell_batches =
1156 (n_active_cells + vectorization_length - 1) / vectorization_length;
1157 const unsigned int n_ghost_slots =
1158 (n_ghost_cells + vectorization_length - 1) / vectorization_length;
1159 incompletely_filled_vectorization.resize(n_cell_batches + n_ghost_slots);
1160 if (n_cell_batches * vectorization_length > n_active_cells)
1161 incompletely_filled_vectorization[n_cell_batches - 1] =
1162 vectorization_length -
1163 (n_cell_batches * vectorization_length - n_active_cells);
1164 if (n_ghost_slots * vectorization_length > n_ghost_cells)
1165 incompletely_filled_vectorization[n_cell_batches + n_ghost_slots - 1] =
1166 vectorization_length -
1167 (n_ghost_slots * vectorization_length - n_ghost_cells);
1168
1169 std::vector<unsigned int> reverse_numbering(
1170 n_active_cells, numbers::invalid_unsigned_int);
1171 for (unsigned int j = 0; j < boundary_cells.size(); ++j)
1172 reverse_numbering[boundary_cells[j]] = j;
1173 unsigned int counter = boundary_cells.size();
1174 for (unsigned int j = 0; j < n_active_cells; ++j)
1175 if (reverse_numbering[j] == numbers::invalid_unsigned_int)
1176 reverse_numbering[j] = counter++;
1177
1178 AssertDimension(counter, n_active_cells);
1179 renumbering = Utilities::invert_permutation(reverse_numbering);
1180
1181 for (unsigned int j = n_active_cells; j < n_active_cells + n_ghost_cells;
1182 ++j)
1183 renumbering.push_back(j);
1184
1185 // TODO: might be able to simplify this code by not relying on the cell
1186 // partition data while computing the thread graph
1187 cell_partition_data.clear();
1188 cell_partition_data.push_back(0);
1189 if (n_procs > 1)
1190 {
1191 const unsigned int n_macro_boundary_cells =
1192 (boundary_cells.size() + vectorization_length - 1) /
1193 vectorization_length;
1194 cell_partition_data.push_back(
1195 (n_cell_batches - n_macro_boundary_cells) / 2);
1196 cell_partition_data.push_back(cell_partition_data[1] +
1197 n_macro_boundary_cells);
1198 }
1199 else
1200 AssertDimension(boundary_cells.size(), 0);
1201 cell_partition_data.push_back(n_cell_batches);
1202 cell_partition_data.push_back(cell_partition_data.back() + n_ghost_slots);
1203 partition_row_index.resize(n_procs > 1 ? 4 : 2);
1204 partition_row_index[0] = 0;
1205 partition_row_index[1] = 1;
1206 if (n_procs > 1)
1207 {
1208 partition_row_index[2] = 2;
1209 partition_row_index[3] = 3;
1210 }
1211 }
1212
1213
1214
1215 void
1216 TaskInfo::guess_block_size(const unsigned int dofs_per_cell)
1217 {
1218 // user did not say a positive number, so we have to guess
1219 if (block_size == 0)
1220 {
1221 // we would like to have enough work to do, so as first guess, try
1222 // to get 16 times as many chunks as we have threads on the system.
1223 block_size = n_active_cells / (MultithreadInfo::n_threads() * 16 *
1224 vectorization_length);
1225
1226 // if there are too few degrees of freedom per cell, need to
1227 // increase the block size
1228 const unsigned int minimum_parallel_grain_size = 200;
1229 if (dofs_per_cell * block_size < minimum_parallel_grain_size)
1230 block_size = (minimum_parallel_grain_size / dofs_per_cell + 1);
1231 if (dofs_per_cell * block_size > 10000)
1232 block_size /= 4;
1233
1234 block_size =
1235 1 << static_cast<unsigned int>(std::log2(block_size + 1));
1236 }
1237 if (block_size > n_active_cells)
1238 block_size = std::max(1U, n_active_cells);
1239 }
1240
1241
1242
1243 void
1244 TaskInfo::make_thread_graph_partition_color(
1245 DynamicSparsityPattern &connectivity_large,
1246 std::vector<unsigned int> &renumbering,
1247 std::vector<unsigned char> &irregular_cells,
1248 const bool)
1249 {
1250 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1251 if (n_cell_batches == 0)
1252 return;
1253
1254 Assert(vectorization_length > 0, ExcInternalError());
1255
1256 unsigned int partition = 0, counter = 0;
1257
1258 // Create connectivity graph for blocks based on connectivity graph for
1259 // cells.
1260 DynamicSparsityPattern connectivity(n_blocks, n_blocks);
1261 make_connectivity_cells_to_blocks(irregular_cells,
1262 connectivity_large,
1263 connectivity);
1264
1265 // Create cell-block partitioning.
1266
1267 // For each block of cells, this variable saves to which partitions the
1268 // block belongs. Initialize all to -1 to mark them as not yet assigned
1269 // a partition.
1270 std::vector<unsigned int> cell_partition(n_blocks,
1272
1273 // In element j of this variable, one puts the old number of the block
1274 // that should be the jth block in the new numeration.
1275 std::vector<unsigned int> partition_list(n_blocks, 0);
1276 std::vector<unsigned int> partition_color_list(n_blocks, 0);
1277
1278 // This vector points to the start of each partition.
1279 std::vector<unsigned int> partition_size(2, 0);
1280
1281 // blocking_connectivity = true;
1282
1283 // The cluster_size in make_partitioning defines that the no. of cells
1284 // in each partition should be a multiple of cluster_size.
1285 unsigned int cluster_size = 1;
1286
1287 // Make the partitioning of the first layer of the blocks of cells.
1288 make_partitioning(connectivity,
1289 cluster_size,
1290 cell_partition,
1291 partition_list,
1292 partition_size,
1293 partition);
1294
1295 // Color the cells within each partition
1296 make_coloring_within_partitions_pre_blocked(connectivity,
1297 partition,
1298 cell_partition,
1299 partition_list,
1300 partition_size,
1301 partition_color_list);
1302
1303 partition_list = renumbering;
1304
1305#ifdef DEBUG
1306 // in debug mode, check that the partition color list is one-to-one
1307 {
1308 std::vector<unsigned int> sorted_pc_list(partition_color_list);
1309 std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
1310 for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
1311 Assert(sorted_pc_list[i] == i, ExcInternalError());
1312 }
1313#endif
1314
1315 // set the start list for each block and compute the renumbering of
1316 // cells
1317 std::vector<unsigned int> block_start(n_cell_batches + 1);
1318 std::vector<unsigned char> irregular(n_cell_batches);
1319
1320 unsigned int mcell_start = 0;
1321 block_start[0] = 0;
1322 for (unsigned int block = 0; block < n_blocks; ++block)
1323 {
1324 block_start[block + 1] = block_start[block];
1325 for (unsigned int mcell = mcell_start;
1326 mcell < std::min(mcell_start + block_size, n_cell_batches);
1327 ++mcell)
1328 {
1329 unsigned int n_comp = (irregular_cells[mcell] > 0) ?
1330 irregular_cells[mcell] :
1331 vectorization_length;
1332 block_start[block + 1] += n_comp;
1333 ++counter;
1334 }
1335 mcell_start += block_size;
1336 }
1337 counter = 0;
1338 unsigned int counter_macro = 0;
1339 unsigned int block_size_last =
1340 n_cell_batches - block_size * (n_blocks - 1);
1341 if (block_size_last == 0)
1342 block_size_last = block_size;
1343
1344 unsigned int tick = 0;
1345 for (unsigned int block = 0; block < n_blocks; ++block)
1346 {
1347 unsigned int present_block = partition_color_list[block];
1348 for (unsigned int cell = block_start[present_block];
1349 cell < block_start[present_block + 1];
1350 ++cell)
1351 renumbering[counter++] = partition_list[cell];
1352 unsigned int this_block_size =
1353 (present_block == n_blocks - 1) ? block_size_last : block_size;
1354
1355 // Also re-compute the content of cell_partition_data to
1356 // contain the numbers of cells, not blocks
1357 if (cell_partition_data[tick] == block)
1358 cell_partition_data[tick++] = counter_macro;
1359
1360 for (unsigned int j = 0; j < this_block_size; ++j)
1361 irregular[counter_macro++] =
1362 irregular_cells[present_block * block_size + j];
1363 }
1364 AssertDimension(tick + 1, cell_partition_data.size());
1365 cell_partition_data.back() = counter_macro;
1366
1367 irregular_cells.swap(irregular);
1368 AssertDimension(counter, n_active_cells);
1369 AssertDimension(counter_macro, n_cell_batches);
1370
1371 // check that the renumbering is one-to-one
1372#ifdef DEBUG
1373 {
1374 std::vector<unsigned int> sorted_renumbering(renumbering);
1375 std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
1376 for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
1377 Assert(sorted_renumbering[i] == i, ExcInternalError());
1378 }
1379#endif
1380
1381
1382 update_task_info(
1383 partition); // Actually sets too much for partition color case
1384
1385 AssertDimension(cell_partition_data.back(), n_cell_batches);
1386 }
1387
1388
1389
1390 void
1391 TaskInfo::make_thread_graph(
1392 const std::vector<unsigned int> &cell_active_fe_index,
1393 DynamicSparsityPattern &connectivity,
1394 std::vector<unsigned int> &renumbering,
1395 std::vector<unsigned char> &irregular_cells,
1396 const bool hp_bool)
1397 {
1398 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1399 if (n_cell_batches == 0)
1400 return;
1401
1402 Assert(vectorization_length > 0, ExcInternalError());
1403
1404 // if we want to block before partitioning, create connectivity graph
1405 // for blocks based on connectivity graph for cells.
1406 DynamicSparsityPattern connectivity_blocks(n_blocks, n_blocks);
1407 make_connectivity_cells_to_blocks(irregular_cells,
1408 connectivity,
1409 connectivity_blocks);
1410
1411 unsigned int n_blocks = 0;
1412 if (scheme == partition_color ||
1413 scheme == color) // blocking_connectivity == true
1414 n_blocks = this->n_blocks;
1415 else
1416 n_blocks = n_active_cells;
1417
1418 // For each block of cells, this variable saves to which partitions the
1419 // block belongs. Initialize all to -1 to mark them as not yet assigned
1420 // a partition.
1421 std::vector<unsigned int> cell_partition(n_blocks,
1423
1424 // In element j of this variable, one puts the old number (but after
1425 // renumbering according to the input renumbering) of the block that
1426 // should be the jth block in the new numeration.
1427 std::vector<unsigned int> partition_list(n_blocks, 0);
1428 std::vector<unsigned int> partition_2layers_list(n_blocks, 0);
1429
1430 // This vector points to the start of each partition.
1431 std::vector<unsigned int> partition_size(2, 0);
1432
1433 unsigned int partition = 0;
1434
1435 // Within the partitions we want to be able to block for the case that
1436 // we do not block already in the connectivity. The cluster_size in
1437 // make_partitioning defines that the no. of cells in each partition
1438 // should be a multiple of cluster_size.
1439 unsigned int cluster_size = 1;
1440 if (scheme == partition_partition)
1441 cluster_size = block_size * vectorization_length;
1442
1443 // Make the partitioning of the first layer of the blocks of cells.
1444 if (scheme == partition_color || scheme == color)
1445 make_partitioning(connectivity_blocks,
1446 cluster_size,
1447 cell_partition,
1448 partition_list,
1449 partition_size,
1450 partition);
1451 else
1452 make_partitioning(connectivity,
1453 cluster_size,
1454 cell_partition,
1455 partition_list,
1456 partition_size,
1457 partition);
1458
1459 // Partition or color second layer
1460 if (scheme == partition_partition)
1461
1462 {
1463 // Partition within partitions.
1464 make_partitioning_within_partitions_post_blocked(
1465 connectivity,
1466 cell_active_fe_index,
1467 partition,
1468 cluster_size,
1469 hp_bool,
1470 cell_partition,
1471 partition_list,
1472 partition_size,
1473 partition_2layers_list,
1474 irregular_cells);
1475 }
1476 else if (scheme == partition_color || scheme == color)
1477 {
1478 make_coloring_within_partitions_pre_blocked(connectivity_blocks,
1479 partition,
1480 cell_partition,
1481 partition_list,
1482 partition_size,
1483 partition_2layers_list);
1484 }
1485
1486 // in debug mode, check that the partition_2layers_list is one-to-one
1487#ifdef DEBUG
1488 {
1489 std::vector<unsigned int> sorted_pc_list(partition_2layers_list);
1490 std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
1491 for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
1492 Assert(sorted_pc_list[i] == i, ExcInternalError());
1493 }
1494#endif
1495
1496 // Set the new renumbering
1497 std::vector<unsigned int> renumbering_in(n_active_cells, 0);
1498 renumbering_in.swap(renumbering);
1499 if (scheme == partition_partition) // blocking_connectivity == false
1500 {
1501 // This is the simple case. The renumbering is just a combination of
1502 // the renumbering that we were given as an input and the
1503 // renumbering of partition/coloring given in partition_2layers_list
1504 for (unsigned int j = 0; j < renumbering.size(); ++j)
1505 renumbering[j] = renumbering_in[partition_2layers_list[j]];
1506 // Account for the ghost cells, finally.
1507 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1508 renumbering.push_back(i + n_active_cells);
1509 }
1510 else
1511 {
1512 // set the start list for each block and compute the renumbering of
1513 // cells
1514 std::vector<unsigned int> block_start(n_cell_batches + 1);
1515 std::vector<unsigned char> irregular(n_cell_batches);
1516
1517 unsigned int counter = 0;
1518 unsigned int mcell_start = 0;
1519 block_start[0] = 0;
1520 for (unsigned int block = 0; block < n_blocks; ++block)
1521 {
1522 block_start[block + 1] = block_start[block];
1523 for (unsigned int mcell = mcell_start;
1524 mcell < std::min(mcell_start + block_size, n_cell_batches);
1525 ++mcell)
1526 {
1527 unsigned int n_comp = (irregular_cells[mcell] > 0) ?
1528 irregular_cells[mcell] :
1529 vectorization_length;
1530 block_start[block + 1] += n_comp;
1531 ++counter;
1532 }
1533 mcell_start += block_size;
1534 }
1535 counter = 0;
1536 unsigned int counter_macro = 0;
1537 unsigned int block_size_last =
1538 n_cell_batches - block_size * (n_blocks - 1);
1539 if (block_size_last == 0)
1540 block_size_last = block_size;
1541
1542 unsigned int tick = 0;
1543 for (unsigned int block = 0; block < n_blocks; ++block)
1544 {
1545 unsigned int present_block = partition_2layers_list[block];
1546 for (unsigned int cell = block_start[present_block];
1547 cell < block_start[present_block + 1];
1548 ++cell)
1549 renumbering[counter++] = renumbering_in[cell];
1550 unsigned int this_block_size =
1551 (present_block == n_blocks - 1) ? block_size_last : block_size;
1552
1553 // Also re-compute the content of cell_partition_data to
1554 // contain the numbers of cells, not blocks
1555 if (cell_partition_data[tick] == block)
1556 cell_partition_data[tick++] = counter_macro;
1557
1558 for (unsigned int j = 0; j < this_block_size; ++j)
1559 irregular[counter_macro++] =
1560 irregular_cells[present_block * block_size + j];
1561 }
1562 AssertDimension(tick + 1, cell_partition_data.size());
1563 cell_partition_data.back() = counter_macro;
1564
1565 irregular_cells.swap(irregular);
1566 AssertDimension(counter, n_active_cells);
1567 AssertDimension(counter_macro, n_cell_batches);
1568 // check that the renumbering is one-to-one
1569#ifdef DEBUG
1570 {
1571 std::vector<unsigned int> sorted_renumbering(renumbering);
1572 std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
1573 for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
1574 Assert(sorted_renumbering[i] == i, ExcInternalError());
1575 }
1576#endif
1577 }
1578
1579 // Update the task_info with the more information for the thread graph.
1580 update_task_info(partition);
1581 }
1582
1583
1584
1585 void
1586 TaskInfo::make_thread_graph_partition_partition(
1587 const std::vector<unsigned int> &cell_active_fe_index,
1588 DynamicSparsityPattern &connectivity,
1589 std::vector<unsigned int> &renumbering,
1590 std::vector<unsigned char> &irregular_cells,
1591 const bool hp_bool)
1592 {
1593 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1594 if (n_cell_batches == 0)
1595 return;
1596
1597 const unsigned int cluster_size = block_size * vectorization_length;
1598
1599 // Create cell-block partitioning.
1600
1601 // For each block of cells, this variable saves to which partitions the
1602 // block belongs. Initialize all to n_cell_batches to mark them as not
1603 // yet assigned a partition.
1604 std::vector<unsigned int> cell_partition(n_active_cells,
1606
1607
1608 // In element j of this variable, one puts the old number of the block
1609 // that should be the jth block in the new numeration.
1610 std::vector<unsigned int> partition_list(n_active_cells, 0);
1611 std::vector<unsigned int> partition_partition_list(n_active_cells, 0);
1612
1613 // This vector points to the start of each partition.
1614 std::vector<unsigned int> partition_size(2, 0);
1615
1616 unsigned int partition = 0;
1617 // Here, we do not block inside the connectivity graph
1618 // blocking_connectivity = false;
1619
1620 // Make the partitioning of the first layer of the blocks of cells.
1621 make_partitioning(connectivity,
1622 cluster_size,
1623 cell_partition,
1624 partition_list,
1625 partition_size,
1626 partition);
1627
1628 // Partition within partitions.
1629 make_partitioning_within_partitions_post_blocked(connectivity,
1630 cell_active_fe_index,
1631 partition,
1632 cluster_size,
1633 hp_bool,
1634 cell_partition,
1635 partition_list,
1636 partition_size,
1637 partition_partition_list,
1638 irregular_cells);
1639
1640 partition_list.swap(renumbering);
1641
1642 for (unsigned int j = 0; j < renumbering.size(); ++j)
1643 renumbering[j] = partition_list[partition_partition_list[j]];
1644
1645 for (unsigned int i = 0; i < n_ghost_cells; ++i)
1646 renumbering.push_back(i + n_active_cells);
1647
1648 update_task_info(partition);
1649 }
1650
1651
1652
1653 void
1654 TaskInfo::make_connectivity_cells_to_blocks(
1655 const std::vector<unsigned char> &irregular_cells,
1656 const DynamicSparsityPattern &connectivity_cells,
1657 DynamicSparsityPattern &connectivity_blocks) const
1658 {
1659 std::vector<std::vector<unsigned int>> cell_blocks(n_blocks);
1660 std::vector<unsigned int> touched_cells(n_active_cells);
1661 unsigned int cell = 0;
1662 for (unsigned int i = 0, mcell = 0; i < n_blocks; ++i)
1663 {
1664 for (unsigned int c = 0;
1665 c < block_size && mcell < *(cell_partition_data.end() - 2);
1666 ++c, ++mcell)
1667 {
1668 unsigned int ncomp = (irregular_cells[mcell] > 0) ?
1669 irregular_cells[mcell] :
1670 vectorization_length;
1671 for (unsigned int c = 0; c < ncomp; ++c, ++cell)
1672 {
1673 cell_blocks[i].push_back(cell);
1674 touched_cells[cell] = i;
1675 }
1676 }
1677 }
1678 AssertDimension(cell, n_active_cells);
1679 for (unsigned int i = 0; i < cell_blocks.size(); ++i)
1680 for (unsigned int col = 0; col < cell_blocks[i].size(); ++col)
1681 {
1683 connectivity_cells.begin(cell_blocks[i][col]);
1684 it != connectivity_cells.end(cell_blocks[i][col]);
1685 ++it)
1686 {
1687 if (touched_cells[it->column()] != i)
1688 connectivity_blocks.add(i, touched_cells[it->column()]);
1689 }
1690 }
1691 }
1692
1693
1694
1695 // Function to create partitioning on the second layer within each
1696 // partition. Version without preblocking.
1697 void
1698 TaskInfo::make_partitioning_within_partitions_post_blocked(
1699 const DynamicSparsityPattern &connectivity,
1700 const std::vector<unsigned int> &cell_active_fe_index,
1701 const unsigned int partition,
1702 const unsigned int cluster_size,
1703 const bool hp_bool,
1704 const std::vector<unsigned int> &cell_partition,
1705 const std::vector<unsigned int> &partition_list,
1706 const std::vector<unsigned int> &partition_size,
1707 std::vector<unsigned int> &partition_partition_list,
1708 std::vector<unsigned char> &irregular_cells)
1709 {
1710 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
1711 const unsigned int n_ghost_slots =
1712 *(cell_partition_data.end() - 1) - n_cell_batches;
1713
1714 // List of cells in previous partition
1715 std::vector<unsigned int> neighbor_list;
1716 // List of cells in current partition for use as neighbors in next
1717 // partition
1718 std::vector<unsigned int> neighbor_neighbor_list;
1719
1720 std::vector<unsigned int> renumbering(n_active_cells);
1721
1722 irregular_cells.back() = 0;
1723 irregular_cells.resize(n_active_cells + n_ghost_slots);
1724
1725 unsigned int max_fe_index = 0;
1726 for (const unsigned int fe_index : cell_active_fe_index)
1727 max_fe_index = std::max(fe_index, max_fe_index);
1728
1729 Assert(!hp_bool || cell_active_fe_index.size() == n_active_cells,
1731
1732 {
1733 unsigned int n_cell_batches_before = 0;
1734 // Create partitioning within partitions.
1735
1736 // For each block of cells, this variable saves to which partitions
1737 // the block belongs. Initialize all to n_cell_batches to mark them as
1738 // not yet assigned a partition.
1739 std::vector<unsigned int> cell_partition_l2(
1740 n_active_cells, numbers::invalid_unsigned_int);
1741 partition_row_index.clear();
1742 partition_row_index.resize(partition + 1, 0);
1743 cell_partition_data.resize(1, 0);
1744
1745 unsigned int counter = 0;
1746 unsigned int missing_macros;
1747 for (unsigned int part = 0; part < partition; ++part)
1748 {
1749 neighbor_neighbor_list.resize(0);
1750 neighbor_list.resize(0);
1751 bool work = true;
1752 unsigned int partition_l2 = 0;
1753 unsigned int start_up = partition_size[part];
1754 unsigned int partition_counter = 0;
1755 while (work)
1756 {
1757 if (neighbor_list.empty())
1758 {
1759 work = false;
1760 partition_counter = 0;
1761 for (unsigned int j = start_up;
1762 j < partition_size[part + 1];
1763 ++j)
1764 if (cell_partition[partition_list[j]] == part &&
1765 cell_partition_l2[partition_list[j]] ==
1767 {
1768 start_up = j;
1769 work = true;
1770 partition_counter = 1;
1771 // To start up, set the start_up cell to partition
1772 // and list all its neighbors.
1773 AssertIndexRange(start_up, partition_size[part + 1]);
1774 cell_partition_l2[partition_list[start_up]] =
1775 partition_l2;
1776 neighbor_neighbor_list.push_back(
1777 partition_list[start_up]);
1778 partition_partition_list[counter++] =
1779 partition_list[start_up];
1780 ++start_up;
1781 break;
1782 }
1783 }
1784 else
1785 {
1786 partition_counter = 0;
1787 for (const unsigned int neighbor : neighbor_list)
1788 {
1789 Assert(cell_partition[neighbor] == part,
1791 Assert(cell_partition_l2[neighbor] == partition_l2 - 1,
1793 auto neighbor_it = connectivity.begin(neighbor);
1794 const auto end_it = connectivity.end(neighbor);
1795 for (; neighbor_it != end_it; ++neighbor_it)
1796 {
1797 if (cell_partition[neighbor_it->column()] == part &&
1798 cell_partition_l2[neighbor_it->column()] ==
1800 {
1801 cell_partition_l2[neighbor_it->column()] =
1802 partition_l2;
1803 neighbor_neighbor_list.push_back(
1804 neighbor_it->column());
1805 partition_partition_list[counter++] =
1806 neighbor_it->column();
1807 ++partition_counter;
1808 }
1809 }
1810 }
1811 }
1812 if (partition_counter > 0)
1813 {
1814 int index_before = neighbor_neighbor_list.size(),
1815 index = index_before;
1816 {
1817 // put the cells into separate lists for each FE index
1818 // within one partition-partition
1819 missing_macros = 0;
1820 std::vector<unsigned int> remaining_per_cell_batch(
1821 max_fe_index + 1);
1822 std::vector<std::vector<unsigned int>>
1823 renumbering_fe_index;
1824 unsigned int cell;
1825 bool filled = true;
1826 if (hp_bool == true)
1827 {
1828 renumbering_fe_index.resize(max_fe_index + 1);
1829 for (cell = counter - partition_counter;
1830 cell < counter;
1831 ++cell)
1832 {
1833 renumbering_fe_index
1834 [cell_active_fe_index.empty() ?
1835 0 :
1836 cell_active_fe_index
1837 [partition_partition_list[cell]]]
1838 .push_back(partition_partition_list[cell]);
1839 }
1840 // check how many more cells are needed in the lists
1841 for (unsigned int j = 0; j < max_fe_index + 1; ++j)
1842 {
1843 remaining_per_cell_batch[j] =
1844 renumbering_fe_index[j].size() %
1845 vectorization_length;
1846 if (remaining_per_cell_batch[j] != 0)
1847 filled = false;
1848 missing_macros +=
1849 ((renumbering_fe_index[j].size() +
1850 vectorization_length - 1) /
1851 vectorization_length);
1852 }
1853 }
1854 else
1855 {
1856 remaining_per_cell_batch.resize(1);
1857 remaining_per_cell_batch[0] =
1858 partition_counter % vectorization_length;
1859 missing_macros =
1860 partition_counter / vectorization_length;
1861 if (remaining_per_cell_batch[0] != 0)
1862 {
1863 filled = false;
1864 ++missing_macros;
1865 }
1866 }
1867 missing_macros =
1868 cluster_size - (missing_macros % cluster_size);
1869
1870 // now we realized that there are some cells missing.
1871 while (missing_macros > 0 || filled == false)
1872 {
1873 if (index == 0)
1874 {
1875 index = neighbor_neighbor_list.size();
1876 if (index == index_before)
1877 {
1878 if (missing_macros != 0)
1879 {
1880 neighbor_neighbor_list.resize(0);
1881 }
1882 start_up--;
1883 break; // not connected - start again
1884 }
1885 index_before = index;
1886 }
1887 index--;
1888 unsigned int additional =
1889 neighbor_neighbor_list[index];
1890
1891 // go through the neighbors of the last cell in the
1892 // current partition and check if we find some to
1893 // fill up with.
1895 connectivity.begin(
1896 additional),
1897 end =
1898 connectivity.end(
1899 additional);
1900 for (; neighbor != end; ++neighbor)
1901 {
1902 if (cell_partition[neighbor->column()] == part &&
1903 cell_partition_l2[neighbor->column()] ==
1905 {
1906 unsigned int this_index = 0;
1907 if (hp_bool == true)
1908 this_index =
1909 cell_active_fe_index.empty() ?
1910 0 :
1911 cell_active_fe_index[neighbor
1912 ->column()];
1913
1914 // Only add this cell if we need more macro
1915 // cells in the current block or if there is
1916 // a macro cell with the FE index that is
1917 // not yet fully populated
1918 if (missing_macros > 0 ||
1919 remaining_per_cell_batch[this_index] > 0)
1920 {
1921 cell_partition_l2[neighbor->column()] =
1922 partition_l2;
1923 neighbor_neighbor_list.push_back(
1924 neighbor->column());
1925 if (hp_bool == true)
1926 renumbering_fe_index[this_index]
1927 .push_back(neighbor->column());
1928 partition_partition_list[counter] =
1929 neighbor->column();
1930 ++counter;
1931 ++partition_counter;
1932 if (remaining_per_cell_batch
1933 [this_index] == 0 &&
1934 missing_macros > 0)
1935 missing_macros--;
1936 remaining_per_cell_batch[this_index]++;
1937 if (remaining_per_cell_batch
1938 [this_index] ==
1939 vectorization_length)
1940 {
1941 remaining_per_cell_batch[this_index] =
1942 0;
1943 }
1944 if (missing_macros == 0)
1945 {
1946 filled = true;
1947 for (unsigned int fe_ind = 0;
1948 fe_ind < max_fe_index + 1;
1949 ++fe_ind)
1950 if (remaining_per_cell_batch
1951 [fe_ind] != 0)
1952 filled = false;
1953 }
1954 if (filled == true)
1955 break;
1956 }
1957 }
1958 }
1959 }
1960 if (hp_bool == true)
1961 {
1962 // set the renumbering according to their active FE
1963 // index within one partition-partition which was
1964 // implicitly assumed above
1965 cell = counter - partition_counter;
1966 for (unsigned int j = 0; j < max_fe_index + 1; ++j)
1967 {
1968 for (const unsigned int jj :
1969 renumbering_fe_index[j])
1970 renumbering[cell++] = jj;
1971 if (renumbering_fe_index[j].size() %
1972 vectorization_length !=
1973 0)
1974 irregular_cells[renumbering_fe_index[j].size() /
1975 vectorization_length +
1976 n_cell_batches_before] =
1977 renumbering_fe_index[j].size() %
1978 vectorization_length;
1979 n_cell_batches_before +=
1980 (renumbering_fe_index[j].size() +
1981 vectorization_length - 1) /
1982 vectorization_length;
1983 renumbering_fe_index[j].resize(0);
1984 }
1985 }
1986 else
1987 {
1988 n_cell_batches_before +=
1989 partition_counter / vectorization_length;
1990 if (partition_counter % vectorization_length != 0)
1991 {
1992 irregular_cells[n_cell_batches_before] =
1993 partition_counter % vectorization_length;
1994 ++n_cell_batches_before;
1995 }
1996 }
1997 }
1998 cell_partition_data.push_back(n_cell_batches_before);
1999 partition_l2++;
2000 }
2001 neighbor_list = neighbor_neighbor_list;
2002 neighbor_neighbor_list.resize(0);
2003 }
2004 partition_row_index[part + 1] =
2005 partition_row_index[part] + partition_l2;
2006 }
2007 }
2008 if (hp_bool == true)
2009 {
2010 partition_partition_list.swap(renumbering);
2011 }
2012 }
2013
2014
2015
2016 // Function to create coloring on the second layer within each partition.
2017 // Version assumes preblocking.
2018 void
2019 TaskInfo::make_coloring_within_partitions_pre_blocked(
2020 const DynamicSparsityPattern &connectivity,
2021 const unsigned int partition,
2022 const std::vector<unsigned int> &cell_partition,
2023 const std::vector<unsigned int> &partition_list,
2024 const std::vector<unsigned int> &partition_size,
2025 std::vector<unsigned int> &partition_color_list)
2026 {
2027 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
2028 std::vector<unsigned int> cell_color(n_blocks, n_cell_batches);
2029 std::vector<bool> color_finder;
2030
2031 partition_row_index.resize(partition + 1);
2032 cell_partition_data.clear();
2033 unsigned int color_counter = 0, index_counter = 0;
2034 for (unsigned int part = 0; part < partition; ++part)
2035 {
2036 partition_row_index[part] = index_counter;
2037 unsigned int max_color = 0;
2038 for (unsigned int k = partition_size[part];
2039 k < partition_size[part + 1];
2040 k++)
2041 {
2042 unsigned int cell = partition_list[k];
2043 unsigned int n_neighbors = connectivity.row_length(cell);
2044
2045 // In the worst case, each neighbor has a different color. So we
2046 // find at least one available color between 0 and n_neighbors.
2047 color_finder.resize(n_neighbors + 1);
2048 for (unsigned int j = 0; j <= n_neighbors; ++j)
2049 color_finder[j] = true;
2051 connectivity.begin(cell),
2052 end = connectivity.end(cell);
2053 for (; neighbor != end; ++neighbor)
2054 {
2055 // Mark the color that a neighbor within the partition has
2056 // as taken
2057 if (cell_partition[neighbor->column()] == part &&
2058 cell_color[neighbor->column()] <= n_neighbors)
2059 color_finder[cell_color[neighbor->column()]] = false;
2060 }
2061 // Choose the smallest color that is not taken for the block
2062 cell_color[cell] = 0;
2063 while (color_finder[cell_color[cell]] == false)
2064 cell_color[cell]++;
2065 if (cell_color[cell] > max_color)
2066 max_color = cell_color[cell];
2067 }
2068 // Reorder within partition: First, all blocks that belong the 0 and
2069 // then so on until those with color max (Note that the smaller the
2070 // number the larger the partition)
2071 for (unsigned int color = 0; color <= max_color; ++color)
2072 {
2073 cell_partition_data.push_back(color_counter);
2074 ++index_counter;
2075 for (unsigned int k = partition_size[part];
2076 k < partition_size[part + 1];
2077 k++)
2078 {
2079 unsigned int cell = partition_list[k];
2080 if (cell_color[cell] == color)
2081 {
2082 partition_color_list[color_counter++] = cell;
2083 }
2084 }
2085 }
2086 }
2087 cell_partition_data.push_back(n_blocks);
2088 partition_row_index[partition] = index_counter;
2089 AssertDimension(color_counter, n_blocks);
2090 }
2091
2092
2093 // Function to create partitioning on the first layer.
2094 void
2095 TaskInfo::make_partitioning(const DynamicSparsityPattern &connectivity,
2096 const unsigned int cluster_size,
2097 std::vector<unsigned int> &cell_partition,
2098 std::vector<unsigned int> &partition_list,
2099 std::vector<unsigned int> &partition_size,
2100 unsigned int &partition) const
2101
2102 {
2103 // For each block of cells, this variable saves to which partitions the
2104 // block belongs. Initialize all to n_cell_batches to mark them as not
2105 // yet assigned a partition.
2106 // std::vector<unsigned int> cell_partition (n_active_cells,
2107 // numbers::invalid_unsigned_int);
2108 // List of cells in previous partition
2109 std::vector<unsigned int> neighbor_list;
2110 // List of cells in current partition for use as neighbors in next
2111 // partition
2112 std::vector<unsigned int> neighbor_neighbor_list;
2113
2114 // In element j of this variable, one puts the old number of the block
2115 // that should be the jth block in the new numeration.
2116 // std::vector<unsigned int> partition_list(n_active_cells,0);
2117
2118 // This vector points to the start of each partition.
2119 // std::vector<unsigned int> partition_size(2,0);
2120
2121 partition = 0;
2122 unsigned int counter = 0;
2123 unsigned int start_nonboundary =
2124 cell_partition_data.size() == 5 ?
2125 vectorization_length *
2126 (cell_partition_data[2] - cell_partition_data[1]) :
2127 0;
2128
2129 const unsigned int n_cell_batches = *(cell_partition_data.end() - 2);
2130 if (n_cell_batches == 0)
2131 return;
2132 if (scheme == color)
2133 start_nonboundary = n_cell_batches;
2134 if (scheme == partition_color ||
2135 scheme == color) // blocking_connectivity == true
2136 start_nonboundary = ((start_nonboundary + block_size - 1) / block_size);
2137 unsigned int n_blocks;
2138 if (scheme == partition_color ||
2139 scheme == color) // blocking_connectivity == true
2140 n_blocks = this->n_blocks;
2141 else
2142 n_blocks = n_active_cells;
2143
2144 if (start_nonboundary > n_blocks)
2145 start_nonboundary = n_blocks;
2146
2147
2148 unsigned int start_up = 0;
2149 bool work = true;
2150 unsigned int remainder = cluster_size;
2151
2152 // this performs a classical breath-first search in the connectivity
2153 // graph of the cells under the restriction that the size of the
2154 // partitions should be a multiple of the given block size
2155 while (work)
2156 {
2157 // put the cells with neighbors on remote MPI processes up front
2158 if (start_nonboundary > 0)
2159 {
2160 for (unsigned int cell = 0; cell < start_nonboundary; ++cell)
2161 {
2162 const unsigned int cell_nn = cell;
2163 cell_partition[cell_nn] = partition;
2164 neighbor_list.push_back(cell_nn);
2165 partition_list[counter++] = cell_nn;
2166 partition_size.back()++;
2167 }
2168 start_nonboundary = 0;
2169 remainder -= (start_nonboundary % cluster_size);
2170 if (remainder == cluster_size)
2171 remainder = 0;
2172 }
2173 else
2174 {
2175 // To start up, set the start_up cell to partition and list all
2176 // its neighbors.
2177 cell_partition[start_up] = partition;
2178 neighbor_list.push_back(start_up);
2179 partition_list[counter++] = start_up;
2180 partition_size.back()++;
2181 ++start_up;
2182 remainder--;
2183 if (remainder == cluster_size)
2184 remainder = 0;
2185 }
2186 int index_before = neighbor_list.size(), index = index_before,
2187 index_stop = 0;
2188 while (remainder > 0)
2189 {
2190 if (index == index_stop)
2191 {
2192 index = neighbor_list.size();
2193 if (index == index_before)
2194 {
2195 neighbor_list.resize(0);
2196 goto not_connect;
2197 }
2198 index_stop = index_before;
2199 index_before = index;
2200 }
2201 index--;
2202 unsigned int additional = neighbor_list[index];
2204 connectivity.begin(additional),
2205 end =
2206 connectivity.end(additional);
2207 for (; neighbor != end; ++neighbor)
2208 {
2209 if (cell_partition[neighbor->column()] ==
2211 {
2212 partition_size.back()++;
2213 cell_partition[neighbor->column()] = partition;
2214 neighbor_list.push_back(neighbor->column());
2215 partition_list[counter++] = neighbor->column();
2216 remainder--;
2217 if (remainder == 0)
2218 break;
2219 }
2220 }
2221 }
2222
2223 while (neighbor_list.size() > 0)
2224 {
2225 ++partition;
2226
2227 // counter for number of cells so far in current partition
2228 unsigned int partition_counter = 0;
2229
2230 // Mark the start of the new partition
2231 partition_size.push_back(partition_size.back());
2232
2233 // Loop through the list of cells in previous partition and put
2234 // all their neighbors in current partition
2235 for (const unsigned int cell : neighbor_list)
2236 {
2237 Assert(cell_partition[cell] == partition - 1,
2239 auto neighbor = connectivity.begin(cell);
2240 const auto end = connectivity.end(cell);
2241 for (; neighbor != end; ++neighbor)
2242 {
2243 if (cell_partition[neighbor->column()] ==
2245 {
2246 partition_size.back()++;
2247 cell_partition[neighbor->column()] = partition;
2248
2249 // collect the cells of the current partition for
2250 // use as neighbors in next partition
2251 neighbor_neighbor_list.push_back(neighbor->column());
2252 partition_list[counter++] = neighbor->column();
2253 ++partition_counter;
2254 }
2255 }
2256 }
2257 remainder = cluster_size - (partition_counter % cluster_size);
2258 if (remainder == cluster_size)
2259 remainder = 0;
2260 int index_stop = 0;
2261 int index_before = neighbor_neighbor_list.size(),
2262 index = index_before;
2263 while (remainder > 0)
2264 {
2265 if (index == index_stop)
2266 {
2267 index = neighbor_neighbor_list.size();
2268 if (index == index_before)
2269 {
2270 neighbor_neighbor_list.resize(0);
2271 break;
2272 }
2273 index_stop = index_before;
2274 index_before = index;
2275 }
2276 index--;
2277 unsigned int additional = neighbor_neighbor_list[index];
2279 connectivity.begin(
2280 additional),
2281 end = connectivity.end(
2282 additional);
2283 for (; neighbor != end; ++neighbor)
2284 {
2285 if (cell_partition[neighbor->column()] ==
2287 {
2288 partition_size.back()++;
2289 cell_partition[neighbor->column()] = partition;
2290 neighbor_neighbor_list.push_back(neighbor->column());
2291 partition_list[counter++] = neighbor->column();
2292 remainder--;
2293 if (remainder == 0)
2294 break;
2295 }
2296 }
2297 }
2298
2299 neighbor_list = neighbor_neighbor_list;
2300 neighbor_neighbor_list.resize(0);
2301 }
2302 not_connect:
2303 // One has to check if the graph is not connected so we have to find
2304 // another partition.
2305 work = false;
2306 for (unsigned int j = start_up; j < n_blocks; ++j)
2307 if (cell_partition[j] == numbers::invalid_unsigned_int)
2308 {
2309 start_up = j;
2310 work = true;
2311 if (remainder == 0)
2312 remainder = cluster_size;
2313 break;
2314 }
2315 }
2316 if (remainder != 0)
2317 ++partition;
2318
2319 AssertDimension(partition_size[partition], n_blocks);
2320 }
2321
2322
2323 void
2324 TaskInfo::update_task_info(const unsigned int partition)
2325 {
2326 evens = (partition + 1) / 2;
2327 odds = partition / 2;
2328 n_blocked_workers = odds - (odds + evens + 1) % 2;
2329 n_workers = evens + odds - n_blocked_workers;
2330 // From here only used for partition partition option.
2331 partition_evens.resize(partition);
2332 partition_odds.resize(partition);
2333 partition_n_blocked_workers.resize(partition);
2334 partition_n_workers.resize(partition);
2335 for (unsigned int part = 0; part < partition; ++part)
2336 {
2337 partition_evens[part] =
2338 (partition_row_index[part + 1] - partition_row_index[part] + 1) / 2;
2339 partition_odds[part] =
2340 (partition_row_index[part + 1] - partition_row_index[part]) / 2;
2341 partition_n_blocked_workers[part] =
2342 partition_odds[part] -
2343 (partition_odds[part] + partition_evens[part] + 1) % 2;
2344 partition_n_workers[part] = partition_evens[part] +
2345 partition_odds[part] -
2346 partition_n_blocked_workers[part];
2347 }
2348 }
2349 } // namespace MatrixFreeFunctions
2350} // namespace internal
2351
2352
2353
2354// explicit instantiations of template functions
2355template void
2356internal::MatrixFreeFunctions::TaskInfo::print_memory_statistics<std::ostream>(
2357 std::ostream &,
2358 const std::size_t) const;
2359template void
2361 ConditionalOStream>(ConditionalOStream &, const std::size_t) const;
2362
2363
size_type row_length(const size_type row) const
void add(const size_type i, const size_type j)
static unsigned int n_threads()
MPICommunication(MFWorkerInterface &worker_in, const bool do_compress)
Definition task_info.cc:325
CellWork(MFWorkerInterface &worker_in, const TaskInfo &task_info_in, const unsigned int partition_in)
Definition task_info.cc:247
void operator()(const tbb::blocked_range< unsigned int > &r) const
Definition task_info.cc:256
PartitionWork(MFWorkerInterface &worker_in, const unsigned int partition_in, const TaskInfo &task_info_in, const bool is_blocked_in)
Definition task_info.cc:283
ActualCellWork(MFWorkerInterface **worker_pointer, const unsigned int partition, const TaskInfo &task_info)
Definition task_info.cc:76
ActualCellWork(MFWorkerInterface &worker, const unsigned int partition, const TaskInfo &task_info)
Definition task_info.cc:85
CellWork(MFWorkerInterface &worker, const unsigned int partition, const TaskInfo &task_info, const bool is_blocked)
Definition task_info.cc:119
PartitionWork(MFWorkerInterface &function_in, const unsigned int partition_in, const TaskInfo &task_info_in, const bool is_blocked_in=false)
Definition task_info.cc:150
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 2 > first
Definition grid_out.cc:4623
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
#define AssertThrow(cond, exc)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
std::vector< Integer > invert_permutation(const std::vector< Integer > &permutation)
Definition utilities.h:1699
unsigned int indicate_power_of_two(const unsigned int vectorization_length)
Definition util.h:165
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
virtual void face(const unsigned int range_index)=0
virtual void zero_dst_vector_range(const unsigned int range_index)=0
virtual void cell(const std::pair< unsigned int, unsigned int > &cell_range)=0
virtual void boundary(const unsigned int range_index)=0
virtual void vector_compress_start()=0
Starts the communication for the vector compress operation.
virtual void cell_loop_post_range(const unsigned int range_index)=0
virtual void vector_update_ghosts_start()=0
Starts the communication for the update ghost values operation.
virtual void cell_loop_pre_range(const unsigned int range_index)=0
virtual void vector_update_ghosts_finish()=0
Finishes the communication for the update ghost values operation.
virtual void vector_compress_finish()=0
Finishes the communication for the vector compress operation.
std::vector< unsigned int > boundary_partition_data
Definition task_info.h:518
void loop(MFWorkerInterface &worker) const
Definition task_info.cc:350
std::vector< unsigned int > partition_n_workers
Definition task_info.h:576
void create_blocks_serial(const std::vector< unsigned int > &cells_with_comm, const unsigned int dofs_per_cell, const bool categories_are_hp, const std::vector< unsigned int > &cell_vectorization_categories, const bool cell_vectorization_categories_strict, const std::vector< unsigned int > &parent_relation, std::vector< unsigned int > &renumbering, std::vector< unsigned char > &incompletely_filled_vectorization)
Definition task_info.cc:794
void print_memory_statistics(StreamType &out, std::size_t data_length) const
Definition task_info.cc:690
std::vector< unsigned int > partition_row_index
Definition task_info.h:466
std::vector< unsigned int > partition_evens
Definition task_info.h:558
std::vector< unsigned int > partition_n_blocked_workers
Definition task_info.h:570
std::vector< unsigned int > cell_partition_data
Definition task_info.h:474
void make_boundary_cells_divisible(std::vector< unsigned int > &boundary_cells)
Definition task_info.cc:722
std::vector< unsigned int > partition_odds
Definition task_info.h:564
std::vector< unsigned int > face_partition_data
Definition task_info.h:496