deal.II version GIT relicensing-2165-gc91f007519 2024-11-20 01:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
work_stream.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2009 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_work_stream_h
16# define dealii_work_stream_h
17
18
19# include <deal.II/base/config.h>
20
28
29# ifdef DEAL_II_WITH_TBB
30# ifdef DEAL_II_TBB_WITH_ONEAPI
31# include <tbb/parallel_pipeline.h>
32# else
33# include <tbb/pipeline.h>
34# endif
35# endif
36
37# include <functional>
38# include <iterator>
39# include <memory>
40# include <utility>
41# include <vector>
42
44
45
46
159namespace WorkStream
160{
165 namespace internal
166 {
172 template <typename Iterator, typename ScratchData, typename CopyData>
174 {
175 std::unique_ptr<ScratchData> scratch_data;
176 std::unique_ptr<CopyData> copy_data;
178
185
186 ScratchAndCopyDataObjects(std::unique_ptr<ScratchData> &&p,
187 std::unique_ptr<CopyData> &&q,
188 const bool in_use)
189 : scratch_data(std::move(p))
190 , copy_data(std::move(q))
191 , currently_in_use(in_use)
192 {}
193
194 // Provide a copy constructor that actually doesn't copy the
195 // internal state. This makes handling ScratchAndCopyDataObjects
196 // easier to handle with STL containers.
200 };
201
207 template <typename ScratchData>
209 {
210 std::unique_ptr<ScratchData> scratch_data;
212
217 : currently_in_use(false)
218 {}
219
220 ScratchDataObject(std::unique_ptr<ScratchData> &&p, const bool in_use)
221 : scratch_data(std::move(p))
222 , currently_in_use(in_use)
223 {}
224
225 ScratchDataObject(ScratchData *p, const bool in_use)
226 : scratch_data(p)
227 , currently_in_use(in_use)
228 {}
229
230 // Provide a copy constructor that actually doesn't copy the
231 // internal state. This makes handling ScratchAndCopyDataObjects
232 // easier to handle with STL containers.
236
237 ScratchDataObject(ScratchDataObject &&o) noexcept = default;
238 };
239
240# ifdef DEAL_II_WITH_TBB
255 namespace tbb_no_coloring
256 {
260 template <typename Iterator, typename ScratchData, typename CopyData>
262 {
263 public:
270 struct ItemType
271 {
276 using ScratchDataList = std::list<ScratchDataObject<ScratchData>>;
277
282 std::vector<Iterator> iterators;
283
289 std::vector<CopyData> copy_datas;
290
296 unsigned int n_iterators;
297
330
335 const ScratchData *sample_scratch_data;
336
342
343
349 : n_iterators(0)
350 , scratch_data(nullptr)
351 , sample_scratch_data(nullptr)
352 , currently_in_use(false)
353 {}
354 };
355
356
362 IteratorRangeToItemStream(const Iterator &begin,
363 const Iterator &end,
364 const unsigned int buffer_size,
365 const unsigned int chunk_size,
366 const ScratchData &sample_scratch_data,
367 const CopyData &sample_copy_data)
368 : remaining_iterator_range(begin, end)
369 , item_buffer(buffer_size)
372 {
373 // initialize the elements of the ring buffer
374 for (auto &item : item_buffer)
375 {
376 Assert(item.n_iterators == 0, ExcInternalError());
377
378 item.iterators.resize(chunk_size,
380 item.scratch_data = &thread_local_scratch;
381 item.sample_scratch_data = &sample_scratch_data;
382 item.copy_datas.resize(chunk_size, sample_copy_data);
383 item.currently_in_use = false;
384 }
385 }
386
387
391 ItemType *
393 {
394 // find first unused item. we know that there must be one
395 // because we have set the maximal number of tokens in flight
396 // and have set the ring buffer to have exactly this size. so
397 // if this function is called, we know that less than the
398 // maximal number of items in currently in flight
399 //
400 // note that we need not lock access to this array since
401 // the current stage is run sequentially and we can therefore
402 // enter the following block only once at any given time.
403 // thus, there can be no race condition between checking that
404 // a flag is false and setting it to true. (there may be
405 // another thread where we release items and set 'false'
406 // flags to 'true', but that too does not produce any
407 // problems)
408 ItemType *current_item = nullptr;
409 for (unsigned int i = 0; i < item_buffer.size(); ++i)
410 if (item_buffer[i].currently_in_use == false)
411 {
413 current_item = &item_buffer[i];
414 break;
415 }
416 Assert(current_item != nullptr,
417 ExcMessage("This can't be. There must be a free item!"));
418
419 // initialize the next item. it may
420 // consist of at most chunk_size
421 // elements
422 current_item->n_iterators = 0;
423 while ((remaining_iterator_range.first !=
424 remaining_iterator_range.second) &&
425 (current_item->n_iterators < chunk_size))
426 {
427 current_item->iterators[current_item->n_iterators] =
429
431 ++current_item->n_iterators;
432 }
433
434 if (current_item->n_iterators == 0)
435 // there were no items
436 // left. terminate the pipeline
437 return nullptr;
438 else
439 return current_item;
440 }
441
442 private:
447 std::pair<Iterator, Iterator> remaining_iterator_range;
448
452 std::vector<ItemType> item_buffer;
453
486
492 const ScratchData &sample_scratch_data;
493
500 const unsigned int chunk_size;
501 };
502
503
504
505 template <typename Worker,
506 typename Copier,
507 typename Iterator,
508 typename ScratchData,
509 typename CopyData>
510 void
511 run(const Iterator &begin,
513 Worker worker,
514 Copier copier,
515 const ScratchData &sample_scratch_data,
516 const CopyData &sample_copy_data,
517 const unsigned int queue_length,
518 const unsigned int chunk_size)
519 {
520 using ItemType = typename IteratorRangeToItemStream<Iterator,
521 ScratchData,
522 CopyData>::ItemType;
523
524 // Define the three stages of the pipeline:
525
526 //
527 // ----- Stage 1 -----
528 //
529 // The first stage is the one that provides us with chunks of data
530 // to work on (the stream of "items"). This stage will run sequentially.
532 iterator_range_to_item_stream(begin,
533 end,
534 queue_length,
535 chunk_size,
536 sample_scratch_data,
537 sample_copy_data);
538 auto item_generator = [&](tbb::flow_control &fc) -> ItemType * {
539 if (const auto item = iterator_range_to_item_stream.get_item())
540 return item;
541 else
542 {
543 fc.stop();
544 return nullptr;
545 }
546 };
547
548 //
549 // ----- Stage 2 -----
550 //
551 // The second stage is the one that does the actual work. This is the
552 // stage that runs in parallel
553 auto item_worker =
554 [worker =
555 std::function<void(const Iterator &, ScratchData &, CopyData &)>(
556 worker),
557 copier_exists =
558 static_cast<bool>(std::function<void(const CopyData &)>(copier))](
559 ItemType *current_item) {
560 // we need to find an unused scratch data object in the list that
561 // corresponds to the current thread and then mark it as used. if
562 // we can't find one, create one
563 //
564 // as discussed in the discussion of the documentation of the
565 // IteratorRangeToItemStream::scratch_data variable, there is no
566 // need to synchronize access to this variable using a mutex
567 // as long as we have no yield-point in between. this means that
568 // we can't take an iterator into the list now and expect it to
569 // still be valid after calling the worker, but we at least do
570 // not have to lock the following section
571 ScratchData *scratch_data = nullptr;
572 {
573 // see if there is an unused object. if so, grab it and mark
574 // it as used
575 for (auto &p : current_item->scratch_data->get())
576 if (p.currently_in_use == false)
577 {
578 scratch_data = p.scratch_data.get();
579 p.currently_in_use = true;
580
581 break;
582 }
583
584 // if no object was found, create one and mark it as used
585 if (scratch_data == nullptr)
586 {
587 scratch_data =
588 new ScratchData(*current_item->sample_scratch_data);
589 current_item->scratch_data->get().emplace_back(scratch_data,
590 true);
591 }
592 };
593
594 // then call the worker function on each element of the chunk we
595 // were given. since these worker functions are called on separate
596 // threads, nothing good can happen if they throw an exception and
597 // we are best off catching it and showing an error message
598 for (unsigned int i = 0; i < current_item->n_iterators; ++i)
599 {
600 try
601 {
602 if (worker)
603 worker(current_item->iterators[i],
604 *scratch_data,
605 current_item->copy_datas[i]);
606 }
607 catch (const std::exception &exc)
608 {
610 }
611 catch (...)
612 {
614 }
615 }
616
617 // finally mark the scratch object as unused again. as above, there
618 // is no need to lock anything here since the object we work on
619 // is thread-local
620 for (auto &p : current_item->scratch_data->get())
621 if (p.scratch_data.get() == scratch_data)
622 {
623 Assert(p.currently_in_use == true, ExcInternalError());
624 p.currently_in_use = false;
625
626 break;
627 }
628
629 // if there is no copier, mark current item as usable again
630 if (copier_exists == false)
631 current_item->currently_in_use = false;
632
633
634 // Then return the original pointer
635 // to the now modified object. The copier will work on it next.
636 return current_item;
637 };
638
639 //
640 // ----- Stage 3 -----
641 //
642 // The last stage is the one that copies data from the CopyData objects
643 // to the final destination. This stage runs sequentially again.
644 auto item_copier = [copier = std::function<void(const CopyData &)>(
645 copier)](ItemType *current_item) {
646 if (copier)
647 {
648 // Initiate copying data. For the same reasons as in the worker
649 // class above, catch exceptions rather than letting them
650 // propagate into unknown territories:
651 for (unsigned int i = 0; i < current_item->n_iterators; ++i)
652 {
653 try
654 {
655 copier(current_item->copy_datas[i]);
656 }
657 catch (const std::exception &exc)
658 {
660 }
661 catch (...)
662 {
664 }
665 }
666 }
667 // mark current item as usable again
668 current_item->currently_in_use = false;
669 };
670
671
672 // Now we just have to set up the pipeline and run it:
673 auto tbb_item_stream_filter = tbb::make_filter<void, ItemType *>(
674# ifdef DEAL_II_TBB_WITH_ONEAPI
675 tbb::filter_mode::serial_in_order,
676# else
677 tbb::filter::serial,
678# endif
679 item_generator);
680
681 auto tbb_worker_filter = tbb::make_filter<ItemType *, ItemType *>(
682# ifdef DEAL_II_TBB_WITH_ONEAPI
683 tbb::filter_mode::parallel,
684# else
685 tbb::filter::parallel,
686# endif
687 item_worker);
688
689 auto tbb_copier_filter = tbb::make_filter<ItemType *, void>(
690# ifdef DEAL_II_TBB_WITH_ONEAPI
691 tbb::filter_mode::serial_in_order,
692# else
693 tbb::filter::serial,
694# endif
695 item_copier);
696
697 tbb::parallel_pipeline(queue_length,
698 tbb_item_stream_filter & tbb_worker_filter &
699 tbb_copier_filter);
700 }
701
702 } // namespace tbb_no_coloring
703# endif // DEAL_II_WITH_TBB
704
705
706
707# ifdef DEAL_II_WITH_TASKFLOW
715 namespace taskflow_no_coloring
716 {
723 template <typename Worker,
724 typename Copier,
725 typename Iterator,
726 typename ScratchData,
727 typename CopyData>
728 void
729 run(const Iterator &begin,
731 Worker worker,
732 Copier copier,
733 const ScratchData &sample_scratch_data,
734 const CopyData &sample_copy_data,
735 const unsigned int /*queue_length*/ = 2 *
737 const unsigned int /*chunk_size*/ = 8)
738
739 {
740 tf::Executor &executor = MultithreadInfo::get_taskflow_executor();
741 tf::Taskflow taskflow;
742
743 using ScratchDataList = std::list<ScratchDataObject<ScratchData>>;
744
746 thread_safe_scratch_data_list;
747
748 tf::Task last_copier;
749
750 // idx is used to connect each worker to its copier as communication
751 // between tasks is not supported. It does this by providing a unique
752 // index in the vector of pointers copy_datas at which the copy data
753 // object where the work done by work task #idx is stored
754 unsigned int idx = 0;
755
756 std::vector<std::unique_ptr<CopyData>> copy_datas;
757
758 // Generate a static task graph. Here we generate a task for each cell
759 // that will be worked on. The tasks are not executed until all of them
760 // are created, this code runs sequentially.
761 for (Iterator it = begin; it != end; ++it, ++idx)
762 {
763 copy_datas.emplace_back();
764 // Create a worker task.
765 auto worker_task =
766 taskflow
767 .emplace([it,
768 idx,
769 &thread_safe_scratch_data_list,
770 &sample_scratch_data,
771 &sample_copy_data,
772 &copy_datas,
773 &worker]() {
774 ScratchData *scratch_data = nullptr;
775
776 ScratchDataList &scratch_data_list =
777 thread_safe_scratch_data_list.get();
778 // See if there is an unused object. if so,
779 // grab it and mark it as used.
780 for (auto &p : scratch_data_list)
781 {
782 if (p.currently_in_use == false)
783 {
784 scratch_data = p.scratch_data.get();
785 p.currently_in_use = true;
786 break;
787 }
788 }
789 // If no element in the list was found, create
790 // one and mark it as used.
791 if (scratch_data == nullptr)
792 {
793 scratch_data_list.emplace_back(
794 std::make_unique<ScratchData>(sample_scratch_data),
795 true);
796 scratch_data =
797 scratch_data_list.back().scratch_data.get();
798 }
799
800 // Create a unique copy data object where this
801 // worker's work will be stored.
802 auto &copy = copy_datas[idx];
803 copy = std::make_unique<CopyData>(sample_copy_data);
804 worker(it, *scratch_data, *copy.get());
805
806 // Find our currently used scratch data and
807 // mark it as unused.
808 for (auto &p : scratch_data_list)
809 {
810 if (p.scratch_data.get() == scratch_data)
811 {
812 Assert(p.currently_in_use == true,
814 p.currently_in_use = false;
815 }
816 }
817 })
818 .name("worker");
819
820 // Create a copier task. This task is a separate object from the
821 // worker task.
822 tf::Task copier_task = taskflow
823 .emplace([idx, &copy_datas, &copier]() {
824 copier(*copy_datas[idx].get());
825 copy_datas[idx].reset();
826 })
827 .name("copy");
828
829 // Ensure the copy task runs after the worker task.
830 worker_task.precede(copier_task);
831
832 // Ensure that only one copy task can run at a time. The code below
833 // makes each copy task wait until the previous one has finished
834 // before it can start
835 if (!last_copier.empty())
836 last_copier.precede(copier_task);
837
838 // Keep a handle to the last copier. Tasks in taskflow are
839 // basically handles to internally stored data, so this does not
840 // perform a copy:
841 last_copier = copier_task;
842 }
843
844 // Now we run all the tasks in the task graph. They will be run in
845 // parallel and are eligible to run when their dependencies established
846 // above are met.
847 executor.run(taskflow).wait();
848 }
849 } // namespace taskflow_no_coloring
850# endif
851
858 namespace sequential
859 {
863 template <typename Worker,
864 typename Copier,
865 typename Iterator,
866 typename ScratchData,
867 typename CopyData>
868 void
869 run(const Iterator &begin,
871 Worker worker,
872 Copier copier,
873 const ScratchData &sample_scratch_data,
874 const CopyData &sample_copy_data)
875 {
876 // need to copy the sample since it is marked const
877 ScratchData scratch_data = sample_scratch_data;
878 CopyData copy_data = sample_copy_data; // NOLINT
879
880 // Optimization: Check if the functions are not the zero function. To
881 // check zero-ness, create a C++ function out of it:
882 const bool have_worker =
883 (static_cast<const std::function<
884 void(const Iterator &, ScratchData &, CopyData &)> &>(worker)) !=
885 nullptr;
886 const bool have_copier =
887 (static_cast<const std::function<void(const CopyData &)> &>(
888 copier)) != nullptr;
889
890 // Finally loop over all items and perform the necessary work:
891 for (Iterator i = begin; i != end; ++i)
892 {
893 if (have_worker)
894 worker(i, scratch_data, copy_data);
895 if (have_copier)
896 copier(copy_data);
897 }
898 }
899
900
901
905 template <typename Worker,
906 typename Copier,
907 typename Iterator,
908 typename ScratchData,
909 typename CopyData>
910 void
911 run(const std::vector<std::vector<Iterator>> &colored_iterators,
912 Worker worker,
913 Copier copier,
914 const ScratchData &sample_scratch_data,
915 const CopyData &sample_copy_data)
916 {
917 // need to copy the sample since it is marked const
918 ScratchData scratch_data = sample_scratch_data;
919 CopyData copy_data = sample_copy_data; // NOLINT
920
921 // Optimization: Check if the functions are not the zero function. To
922 // check zero-ness, create a C++ function out of it:
923 const bool have_worker =
924 (static_cast<const std::function<
925 void(const Iterator &, ScratchData &, CopyData &)> &>(worker)) !=
926 nullptr;
927 const bool have_copier =
928 (static_cast<const std::function<void(const CopyData &)> &>(
929 copier)) != nullptr;
930
931 // Finally loop over all items and perform the necessary work:
932 for (unsigned int color = 0; color < colored_iterators.size(); ++color)
933 if (colored_iterators[color].size() > 0)
934 for (auto &it : colored_iterators[color])
935 {
936 if (have_worker)
937 worker(it, scratch_data, copy_data);
938 if (have_copier)
939 copier(copy_data);
940 }
941 }
942
943 } // namespace sequential
944
945
946
947# ifdef DEAL_II_WITH_TBB
955 namespace tbb_colored
956 {
962 template <typename Iterator, typename ScratchData, typename CopyData>
964 {
965 public:
970 const std::function<void(const Iterator &, ScratchData &, CopyData &)>
971 &worker,
972 const std::function<void(const CopyData &)> &copier,
973 const ScratchData &sample_scratch_data,
974 const CopyData &sample_copy_data)
975 : worker(worker)
976 , copier(copier)
979 {}
980
981
986 void
987 operator()(const tbb::blocked_range<
988 typename std::vector<Iterator>::const_iterator> &range)
989 {
990 // we need to find an unused scratch and corresponding copy
991 // data object in the list that corresponds to the current
992 // thread and then mark it as used. If we can't find one,
993 // create one as discussed in the discussion of the documentation
994 // of the IteratorRangeToItemStream::scratch_data variable,
995 // there is no need to synchronize access to this variable
996 // using a mutex as long as we have no yield-point in between.
997 // This means that we can't take an iterator into the list
998 // now and expect it to still be valid after calling the worker,
999 // but we at least do not have to lock the following section.
1000 ScratchData *scratch_data = nullptr;
1001 CopyData *copy_data = nullptr;
1002 {
1003 ScratchAndCopyDataList &scratch_and_copy_data_list = data.get();
1004
1005 // see if there is an unused object. if so, grab it and mark
1006 // it as used
1007 for (typename ScratchAndCopyDataList::iterator p =
1008 scratch_and_copy_data_list.begin();
1009 p != scratch_and_copy_data_list.end();
1010 ++p)
1011 if (p->currently_in_use == false)
1012 {
1013 scratch_data = p->scratch_data.get();
1014 copy_data = p->copy_data.get();
1015 p->currently_in_use = true;
1016 break;
1017 }
1018
1019 // if no element in the list was found, create one and mark it as
1020 // used
1021 if (scratch_data == nullptr)
1022 {
1023 Assert(copy_data == nullptr, ExcInternalError());
1024
1025 scratch_and_copy_data_list.emplace_back(
1026 std::make_unique<ScratchData>(sample_scratch_data),
1027 std::make_unique<CopyData>(sample_copy_data),
1028 true);
1029 scratch_data =
1030 scratch_and_copy_data_list.back().scratch_data.get();
1031 copy_data = scratch_and_copy_data_list.back().copy_data.get();
1032 }
1033 }
1034
1035 // then call the worker and copier functions on each
1036 // element of the chunk we were given.
1037 for (typename std::vector<Iterator>::const_iterator p = range.begin();
1038 p != range.end();
1039 ++p)
1040 {
1041 try
1042 {
1043 if (worker)
1044 worker(*p, *scratch_data, *copy_data);
1045 if (copier)
1046 copier(*copy_data);
1047 }
1048 catch (const std::exception &exc)
1049 {
1051 }
1052 catch (...)
1053 {
1055 }
1056 }
1057
1058 // finally mark the scratch object as unused again. as above, there
1059 // is no need to lock anything here since the object we work on
1060 // is thread-local
1061 {
1062 ScratchAndCopyDataList &scratch_and_copy_data_list = data.get();
1063
1064 for (typename ScratchAndCopyDataList::iterator p =
1065 scratch_and_copy_data_list.begin();
1066 p != scratch_and_copy_data_list.end();
1067 ++p)
1068 if (p->scratch_data.get() == scratch_data)
1069 {
1070 Assert(p->currently_in_use == true, ExcInternalError());
1071 p->currently_in_use = false;
1072 }
1073 }
1074 }
1075
1076 private:
1077 using ScratchAndCopyDataObjects = typename internal::
1078 ScratchAndCopyDataObjects<Iterator, ScratchData, CopyData>;
1079
1084 using ScratchAndCopyDataList = std::list<ScratchAndCopyDataObjects>;
1085
1087
1092 const std::function<void(const Iterator &, ScratchData &, CopyData &)>
1094
1099 const std::function<void(const CopyData &)> copier;
1100
1104 const ScratchData &sample_scratch_data;
1105 const CopyData &sample_copy_data;
1106 };
1107
1111 template <typename Worker,
1112 typename Copier,
1113 typename Iterator,
1114 typename ScratchData,
1115 typename CopyData>
1116 void
1117 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1118 Worker worker,
1119 Copier copier,
1120 const ScratchData &sample_scratch_data,
1121 const CopyData &sample_copy_data,
1122 const unsigned int chunk_size)
1123 {
1124 // loop over the various colors of what we're given
1125 for (unsigned int color = 0; color < colored_iterators.size(); ++color)
1126 if (colored_iterators[color].size() > 0)
1127 {
1128 using WorkerAndCopier = internal::tbb_colored::
1129 WorkerAndCopier<Iterator, ScratchData, CopyData>;
1130
1131 WorkerAndCopier worker_and_copier(worker,
1132 copier,
1133 sample_scratch_data,
1134 sample_copy_data);
1135
1137 colored_iterators[color].begin(),
1138 colored_iterators[color].end(),
1139 [&worker_and_copier](
1140 const tbb::blocked_range<
1141 typename std::vector<Iterator>::const_iterator> &range) {
1142 worker_and_copier(range);
1143 },
1144 chunk_size);
1145 }
1146 }
1147
1148 } // namespace tbb_colored
1149# endif // DEAL_II_WITH_TBB
1150
1151
1152
1153# ifdef DEAL_II_WITH_TASKFLOW
1159 namespace taskflow_colored
1160 {
1167 template <typename Worker,
1168 typename Copier,
1169 typename Iterator,
1170 typename ScratchData,
1171 typename CopyData>
1172 void
1173 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1174 Worker worker,
1175 Copier copier,
1176 const ScratchData &sample_scratch_data,
1177 const CopyData &sample_copy_data,
1178 const unsigned int /*queue_length*/ = 2 *
1180 const unsigned int /*chunk_size*/ = 8)
1181
1182 {
1183 tf::Executor &executor = MultithreadInfo::get_taskflow_executor();
1184 using ScratchAndCopyDataObjects = typename internal::
1185 ScratchAndCopyDataObjects<Iterator, ScratchData, CopyData>;
1186
1187 using ScratchAndCopyDataList = std::list<ScratchAndCopyDataObjects>;
1188
1190 thread_safe_scratch_and_copy_data_list;
1191
1192 tf::Taskflow taskflow;
1193
1194 // Create a "future" object which eventually contains the execution
1195 // result of a taskflow graph and can be used to yield execution
1196 tf::Future<void> execution_future;
1197
1198 const bool have_worker =
1199 (static_cast<const std::function<
1200 void(const Iterator &, ScratchData &, CopyData &)> &>(worker)) !=
1201 nullptr;
1202 const bool have_copier =
1203 (static_cast<const std::function<void(const CopyData &)> &>(
1204 copier)) != nullptr;
1205
1206 // Generate a static task graph. Here we generate a task for each cell
1207 // that will be worked on. The tasks are not executed until all of them
1208 // are created, this code runs sequentially. Cells have been grouped
1209 // into "colors" data from cells in the same color are safe to copy in
1210 // parallel so copying need not be sequential.
1211 for (unsigned int color = 0; color < colored_iterators.size(); ++color)
1212 // Ignore color blocks which are empty.
1213 if (colored_iterators[color].size() > 0)
1214 {
1215 // For each cell queue up a combined worker and copier task. These
1216 // are not yet run.
1217 for (const Iterator &it : colored_iterators[color])
1218 {
1219 taskflow
1220 .emplace(
1221 [it = it, // make a copy of the reference to the iterator
1222 have_worker,
1223 have_copier,
1224 &thread_safe_scratch_and_copy_data_list,
1225 &sample_scratch_data,
1226 &sample_copy_data,
1227 &worker,
1228 &copier]() {
1229 ScratchData *scratch_data = nullptr;
1230 CopyData *copy_data = nullptr;
1231
1232 ScratchAndCopyDataList &scratch_and_copy_data_list =
1233 thread_safe_scratch_and_copy_data_list.get();
1234 // See if there is an unused object. if so, grab it
1235 // and mark it as used.
1236 for (typename ScratchAndCopyDataList::iterator p =
1237 scratch_and_copy_data_list.begin();
1238 p != scratch_and_copy_data_list.end();
1239 ++p)
1240 {
1241 if (p->currently_in_use == false)
1242 {
1243 scratch_data = p->scratch_data.get();
1244 copy_data = p->copy_data.get();
1245 p->currently_in_use = true;
1246 break;
1247 }
1248 }
1249 // If no element in the list was found, create one and
1250 // mark it as used.
1251 if (scratch_data == nullptr)
1252 {
1253 Assert(copy_data == nullptr, ExcInternalError());
1254 scratch_and_copy_data_list.emplace_back(
1255 std::make_unique<ScratchData>(
1256 sample_scratch_data),
1257 std::make_unique<CopyData>(sample_copy_data),
1258 true);
1259 scratch_data = scratch_and_copy_data_list.back()
1260 .scratch_data.get();
1261 copy_data =
1262 scratch_and_copy_data_list.back().copy_data.get();
1263 }
1264 if (have_worker)
1265 worker(it, *scratch_data, *copy_data);
1266 if (have_copier)
1267 copier(*copy_data);
1268
1269 // Mark objects as free to be used again.
1270 for (typename ScratchAndCopyDataList::iterator p =
1271 scratch_and_copy_data_list.begin();
1272 p != scratch_and_copy_data_list.end();
1273 ++p)
1274 {
1275 if (p->scratch_data.get() == scratch_data)
1276 {
1277 Assert(p->currently_in_use == true,
1279 p->currently_in_use = false;
1280 }
1281 }
1282 })
1283 .name("worker_and_copier");
1284 }
1285 if (color > 0)
1286 // Wait for the previous color to finish executing
1287 execution_future.wait();
1288 execution_future = executor.run(std::move(taskflow));
1289 }
1290 // Wait for our final execution to finish
1291 if (colored_iterators.size() > 0)
1292 execution_future.wait();
1293 }
1294 } // namespace taskflow_colored
1295# endif // DEAL_II_WITH_TASKFLOW
1296
1297
1298 } // namespace internal
1299
1300
1301
1349 template <typename Worker,
1350 typename Copier,
1351 typename Iterator,
1352 typename ScratchData,
1353 typename CopyData>
1354 void
1355 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1356 Worker worker,
1357 Copier copier,
1358 const ScratchData &sample_scratch_data,
1359 const CopyData &sample_copy_data,
1360 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1361 const unsigned int chunk_size = 8);
1362
1363
1413 template <typename Worker,
1414 typename Copier,
1415 typename Iterator,
1416 typename ScratchData,
1417 typename CopyData>
1418 void
1419 run(const Iterator &begin,
1421 Worker worker,
1422 Copier copier,
1423 const ScratchData &sample_scratch_data,
1424 const CopyData &sample_copy_data,
1425 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1426 const unsigned int chunk_size = 8)
1427 {
1428 Assert(queue_length > 0,
1429 ExcMessage("The queue length must be at least one, and preferably "
1430 "larger than the number of processors on this system."));
1431 (void)queue_length; // removes -Wunused-parameter warning in optimized mode
1432 Assert(chunk_size > 0, ExcMessage("The chunk_size must be at least one."));
1433 (void)chunk_size; // removes -Wunused-parameter warning in optimized mode
1434
1435 // If no work then skip. (only use operator!= for iterators since we may
1436 // not have an equality comparison operator)
1437 if (!(begin != end))
1438 return;
1439
1441 {
1442# if defined(DEAL_II_WITH_TBB) || defined(DEAL_II_WITH_TASKFLOW)
1443 if (static_cast<const std::function<void(const CopyData &)> &>(copier))
1444 {
1445 // If we have a copier, run the algorithm:
1446# if defined(DEAL_II_WITH_TASKFLOW)
1448 end,
1449 worker,
1450 copier,
1451 sample_scratch_data,
1452 sample_copy_data,
1453 queue_length,
1454 chunk_size);
1455# elif defined(DEAL_II_WITH_TBB)
1457 end,
1458 worker,
1459 copier,
1460 sample_scratch_data,
1461 sample_copy_data,
1462 queue_length,
1463 chunk_size);
1464# endif
1465 }
1466 else
1467 {
1468 // There is no copier function. in this case, we have an
1469 // embarrassingly parallel problem where we can
1470 // essentially apply parallel_for. because parallel_for
1471 // requires subdividing the range for which operator- is
1472 // necessary between iterators, it is often inefficient to
1473 // apply it directly to cell ranges and similar iterator
1474 // types for which operator- is expensive or, in fact,
1475 // nonexistent. rather, in that case, we simply copy the
1476 // iterators into a large array and use operator- on
1477 // iterators to this array of iterators.
1478 //
1479 // instead of duplicating code, this is essentially the
1480 // same situation we have in the colored implementation below, so we
1481 // just defer to that place
1482 std::vector<std::vector<Iterator>> all_iterators(1);
1483 for (Iterator p = begin; p != end; ++p)
1484 all_iterators[0].push_back(p);
1485
1486 run(all_iterators,
1487 worker,
1488 copier,
1489 sample_scratch_data,
1490 sample_copy_data,
1491 queue_length,
1492 chunk_size);
1493 }
1494
1495 // exit this function to not run the sequential version below:
1496 return;
1497# endif
1498 }
1499
1500 // no TBB or Taskflow installed or we are requested to run sequentially:
1502 begin, end, worker, copier, sample_scratch_data, sample_copy_data);
1503 }
1504
1505
1506
1514 template <typename Worker,
1515 typename Copier,
1516 typename IteratorRangeType,
1517 typename ScratchData,
1518 typename CopyData,
1519 typename = std::enable_if_t<has_begin_and_end<IteratorRangeType>>>
1520 void
1521 run(IteratorRangeType iterator_range,
1522 Worker worker,
1523 Copier copier,
1524 const ScratchData &sample_scratch_data,
1525 const CopyData &sample_copy_data,
1526 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1527 const unsigned int chunk_size = 8)
1528 {
1529 // Call the function above
1530 run(iterator_range.begin(),
1531 iterator_range.end(),
1532 worker,
1533 copier,
1534 sample_scratch_data,
1535 sample_copy_data,
1536 queue_length,
1537 chunk_size);
1538 }
1539
1540
1541
1545 template <typename Worker,
1546 typename Copier,
1547 typename Iterator,
1548 typename ScratchData,
1549 typename CopyData>
1550 void
1551 run(const IteratorRange<Iterator> &iterator_range,
1552 Worker worker,
1553 Copier copier,
1554 const ScratchData &sample_scratch_data,
1555 const CopyData &sample_copy_data,
1556 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1557 const unsigned int chunk_size = 8)
1558 {
1559 // Call the function above
1560 run(iterator_range.begin(),
1561 iterator_range.end(),
1562 worker,
1563 copier,
1564 sample_scratch_data,
1565 sample_copy_data,
1566 queue_length,
1567 chunk_size);
1568 }
1569
1570
1571
1572 template <typename Worker,
1573 typename Copier,
1574 typename Iterator,
1575 typename ScratchData,
1576 typename CopyData>
1577 void
1578 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1579 Worker worker,
1580 Copier copier,
1581 const ScratchData &sample_scratch_data,
1582 const CopyData &sample_copy_data,
1583 const unsigned int queue_length,
1584 const unsigned int chunk_size)
1585 {
1586 Assert(queue_length > 0,
1587 ExcMessage("The queue length must be at least one, and preferably "
1588 "larger than the number of processors on this system."));
1589 (void)queue_length; // removes -Wunused-parameter warning in optimized mode
1590 Assert(chunk_size > 0, ExcMessage("The chunk_size must be at least one."));
1591 (void)chunk_size; // removes -Wunused-parameter warning in optimized mode
1592
1593
1595 {
1596# ifdef DEAL_II_WITH_TASKFLOW
1597 internal::taskflow_colored::run(colored_iterators,
1598 worker,
1599 copier,
1600 sample_scratch_data,
1601 sample_copy_data,
1602 chunk_size);
1603
1604 // exit this function to not run the sequential version below:
1605 return;
1606# elif defined(DEAL_II_WITH_TBB)
1607 internal::tbb_colored::run(colored_iterators,
1608 worker,
1609 copier,
1610 sample_scratch_data,
1611 sample_copy_data,
1612 chunk_size);
1613
1614 // exit this function to not run the sequential version below:
1615 return;
1616# endif
1617 }
1618
1619 // run all colors sequentially:
1620 {
1621 internal::sequential::run(colored_iterators,
1622 worker,
1623 copier,
1624 sample_scratch_data,
1625 sample_copy_data);
1626 }
1627 }
1628
1629
1630
1672 template <typename MainClass,
1673 typename Iterator,
1674 typename ScratchData,
1675 typename CopyData>
1676 void
1677 run(const Iterator &begin,
1679 MainClass &main_object,
1680 void (MainClass::*worker)(const Iterator &, ScratchData &, CopyData &),
1681 void (MainClass::*copier)(const CopyData &),
1682 const ScratchData &sample_scratch_data,
1683 const CopyData &sample_copy_data,
1684 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1685 const unsigned int chunk_size = 8)
1686 {
1687 // forward to the other function
1688 run(
1689 begin,
1690 end,
1691 [&main_object, worker](const Iterator &iterator,
1692 ScratchData &scratch_data,
1693 CopyData &copy_data) {
1694 (main_object.*worker)(iterator, scratch_data, copy_data);
1695 },
1696 [&main_object, copier](const CopyData &copy_data) {
1697 (main_object.*copier)(copy_data);
1698 },
1699 sample_scratch_data,
1700 sample_copy_data,
1701 queue_length,
1702 chunk_size);
1703 }
1704
1705
1706 template <typename MainClass,
1707 typename Iterator,
1708 typename ScratchData,
1709 typename CopyData>
1710 void
1713 MainClass &main_object,
1714 void (MainClass::*worker)(const Iterator &, ScratchData &, CopyData &),
1715 void (MainClass::*copier)(const CopyData &),
1716 const ScratchData &sample_scratch_data,
1717 const CopyData &sample_copy_data,
1718 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1719 const unsigned int chunk_size = 8)
1720 {
1721 // forward to the other function
1722 run(
1723 begin,
1724 end,
1725 [&main_object, worker](const Iterator &iterator,
1726 ScratchData &scratch_data,
1727 CopyData &copy_data) {
1728 (main_object.*worker)(iterator, scratch_data, copy_data);
1729 },
1730 [&main_object, copier](const CopyData &copy_data) {
1731 (main_object.*copier)(copy_data);
1732 },
1733 sample_scratch_data,
1734 sample_copy_data,
1735 queue_length,
1736 chunk_size);
1737 }
1738
1739
1740
1748 template <typename MainClass,
1749 typename IteratorRangeType,
1750 typename ScratchData,
1751 typename CopyData,
1752 typename = std::enable_if_t<has_begin_and_end<IteratorRangeType>>>
1753 void
1755 IteratorRangeType iterator_range,
1756 MainClass &main_object,
1757 void (MainClass::*worker)(
1758 const typename std_cxx20::type_identity_t<IteratorRangeType>::iterator &,
1759 ScratchData &,
1760 CopyData &),
1761 void (MainClass::*copier)(const CopyData &),
1762 const ScratchData &sample_scratch_data,
1763 const CopyData &sample_copy_data,
1764 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1765 const unsigned int chunk_size = 8)
1766 {
1767 // Call the function above
1768 run(std::begin(iterator_range),
1769 std::end(iterator_range),
1770 main_object,
1771 worker,
1772 copier,
1773 sample_scratch_data,
1774 sample_copy_data,
1775 queue_length,
1776 chunk_size);
1777 }
1778
1779
1780
1784 template <typename MainClass,
1785 typename Iterator,
1786 typename ScratchData,
1787 typename CopyData>
1788 void
1790 MainClass &main_object,
1791 void (MainClass::*worker)(const Iterator &, ScratchData &, CopyData &),
1792 void (MainClass::*copier)(const CopyData &),
1793 const ScratchData &sample_scratch_data,
1794 const CopyData &sample_copy_data,
1795 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1796 const unsigned int chunk_size = 8)
1797 {
1798 // Call the function above
1799 run(std::begin(iterator_range),
1800 std::end(iterator_range),
1801 main_object,
1802 worker,
1803 copier,
1804 sample_scratch_data,
1805 sample_copy_data,
1806 queue_length,
1807 chunk_size);
1808 }
1809
1810} // namespace WorkStream
1811
1812
1813
1815
1816
1817
1818//---------------------------- work_stream.h ---------------------------
1819// end of #ifndef dealii_work_stream_h
1820#endif
1821//---------------------------- work_stream.h ---------------------------
IteratorOverIterators end() const
IteratorOverIterators begin()
static unsigned int n_threads()
static tf::Executor & get_taskflow_executor()
A class that provides a separate storage location on each thread that accesses the object.
std::list< ScratchAndCopyDataObjects > ScratchAndCopyDataList
typename internal::ScratchAndCopyDataObjects< Iterator, ScratchData, CopyData > ScratchAndCopyDataObjects
WorkerAndCopier(const std::function< void(const Iterator &, ScratchData &, CopyData &)> &worker, const std::function< void(const CopyData &)> &copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data)
Threads::ThreadLocalStorage< ScratchAndCopyDataList > data
void operator()(const tbb::blocked_range< typename std::vector< Iterator >::const_iterator > &range)
const std::function< void(const Iterator &, ScratchData &, CopyData &)> worker
const std::function< void(const CopyData &)> copier
IteratorRangeToItemStream(const Iterator &begin, const Iterator &end, const unsigned int buffer_size, const unsigned int chunk_size, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data)
Threads::ThreadLocalStorage< typename ItemType::ScratchDataList > thread_local_scratch
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
std::size_t size
Definition mpi.cc:734
void handle_std_exception(const std::exception &exc)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int=2 *MultithreadInfo::n_threads(), const unsigned int=8)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int=2 *MultithreadInfo::n_threads(), const unsigned int=8)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int chunk_size)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
Definition parallel.h:83
typename type_identity< T >::type type_identity_t
Definition type_traits.h:95
STL namespace.
std::unique_ptr< ScratchData > scratch_data
ScratchAndCopyDataObjects(const ScratchAndCopyDataObjects &)
ScratchAndCopyDataObjects(std::unique_ptr< ScratchData > &&p, std::unique_ptr< CopyData > &&q, const bool in_use)
ScratchDataObject(std::unique_ptr< ScratchData > &&p, const bool in_use)
ScratchDataObject(ScratchDataObject &&o) noexcept=default
ScratchDataObject(ScratchData *p, const bool in_use)
ScratchDataObject(const ScratchDataObject &)
std::unique_ptr< ScratchData > scratch_data
std::list< ScratchDataObject< ScratchData > > ScratchDataList
Threads::ThreadLocalStorage< ScratchDataList > * scratch_data