Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
symengine_optimizer.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2020 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE at
12// the top level of the deal.II distribution.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_differentiation_sd_symengine_optimizer_h
17#define dealii_differentiation_sd_symengine_optimizer_h
18
19#include <deal.II/base/config.h>
20
21#ifdef DEAL_II_WITH_SYMENGINE
22
23// Low level
24# include <symengine/basic.h>
25# include <symengine/dict.h>
26# include <symengine/symengine_exception.h>
27# include <symengine/symengine_rcp.h>
28
29// Optimization
30# include <symengine/lambda_double.h>
31# include <symengine/visitor.h>
32# ifdef HAVE_SYMENGINE_LLVM
33# include <symengine/llvm_double.h>
34# endif
35
39
45
46# include <boost/serialization/split_member.hpp>
47# include <boost/type_traits.hpp>
48
49# include <algorithm>
50# include <map>
51# include <memory>
52# include <type_traits>
53# include <utility>
54# include <vector>
55
56
58
59
60namespace Differentiation
61{
62 namespace SD
63 {
74 "SymEngine has not been built with LLVM support.");
75
81 "The SymEngine LLVM optimizer does not (yet) support the "
82 "selected return type.");
83
87 // Forward declarations
88 template <typename ReturnType>
89 class BatchOptimizer;
90
91
97 enum class OptimizerType
98 {
107 lambda,
112 llvm
113 };
114
115
119 template <class StreamType>
120 inline StreamType &
121 operator<<(StreamType &s, OptimizerType o)
122 {
124 s << "dictionary";
125 else if (o == OptimizerType::lambda)
126 s << "lambda";
127 else if (o == OptimizerType::llvm)
128 s << "llvm";
129 else
130 {
131 Assert(false, ExcMessage("Unknown optimization method."));
132 }
133
134 return s;
135 }
136
137
143 enum class OptimizationFlags : unsigned char
144 {
152 optimize_cse = 0x0001,
157 optimize_aggressive = 0x0002,
162 };
163
164
173 // This operator exists since if it did not then the result of the bit-or
174 // <tt>operator |</tt> would be an integer which would in turn trigger a
175 // compiler warning when we tried to assign it to an object of type
176 // OptimizationFlags.
177 inline OptimizationFlags
179 {
180 return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) |
181 static_cast<unsigned int>(f2));
182 }
183
184
189 inline OptimizationFlags &
191 {
192 f1 = f1 | f2;
193 return f1;
194 }
195
196
205 // This operator exists since if it did not then the result of the bit-or
206 // <tt>operator |</tt> would be an integer which would in turn trigger a
207 // compiler warning when we tried to assign it to an object of type
208 // OptimizationFlags.
209 inline OptimizationFlags
211 {
212 return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) &
213 static_cast<unsigned int>(f2));
214 }
215
216
221 inline OptimizationFlags &
223 {
224 f1 = f1 & f2;
225 return f1;
226 }
227
228
229 namespace internal
230 {
235 inline bool
237 {
238 return static_cast<int>(flags & OptimizationFlags::optimize_cse);
239 }
240
245 inline int
247 {
248 // With the LLVM compiler there exists the opportunity to tune
249 // the level of optimizations performed during compilation.
250 // By default SymEngine sets this at "opt_level=2", which one
251 // presumes targets -O2. Here we are a bit more specific about
252 // want we want it to do:
253 // - Normal compilation: -02 (default settings)
254 // - Aggressive mode: -03 (the whole lot!)
255 // In theory we could also target
256 // - Debug mode: -O0 (no optimizations)
257 // but this doesn't make much sense since SymEngine is a
258 // tested external library.
259 const bool use_agg_opt =
260 static_cast<int>(flags & OptimizationFlags::optimize_aggressive);
261 const int opt_level = (use_agg_opt ? 3 : 2);
262 return opt_level;
263 }
264 } // namespace internal
265
266
271 template <class StreamType>
272 inline StreamType &
273 operator<<(StreamType &s, OptimizationFlags o)
274 {
275 s << " OptimizationFlags|";
276 if (static_cast<unsigned int>(o & OptimizationFlags::optimize_cse))
277 s << "cse|";
278
279 // LLVM optimization level
280 s << "-O" + std::to_string(internal::get_LLVM_optimization_level(o)) +
281 "|";
282
283 return s;
284 }
285
286
287 namespace internal
288 {
298 template <typename ReturnType, typename T = void>
300
301
311 template <typename ReturnType, typename T = void>
313
314
315# ifdef HAVE_SYMENGINE_LLVM
325 template <typename ReturnType, typename T = void>
326 struct LLVMOptimizer;
327# endif // HAVE_SYMENGINE_LLVM
328
329
345 template <typename ReturnType, typename Optimizer, typename T = void>
347
348
349# ifndef DOXYGEN
350
351
352 /* ----------- Specializations for the Optimizers ----------- */
353
354
355 // A helper struct to type trait detection for the optimizers that
356 // will be defined next.
357 template <typename ReturnType_, typename T = void>
358 struct SupportedOptimizerTypeTraits
359 {
360 static const bool is_supported = false;
361
362 using ReturnType = void;
363 };
364
365
366
367 // Specialization for arithmetic types
368 template <typename ReturnType_>
369 struct SupportedOptimizerTypeTraits<
370 ReturnType_,
371 std::enable_if_t<std::is_arithmetic<ReturnType_>::value>>
372 {
373 static const bool is_supported = true;
374
375 using ReturnType =
376 typename std::conditional<std::is_same<ReturnType_, float>::value,
377 float,
378 double>::type;
379 };
380
381
382
383 // Specialization for complex arithmetic types
384 template <typename ReturnType_>
385 struct SupportedOptimizerTypeTraits<
386 ReturnType_,
387 std::enable_if_t<
388 boost::is_complex<ReturnType_>::value &&
389 std::is_arithmetic<typename ReturnType_::value_type>::value>>
390 {
391 static const bool is_supported = true;
392
393 using ReturnType = typename std::conditional<
394 std::is_same<ReturnType_, std::complex<float>>::value,
395 std::complex<float>,
396 std::complex<double>>::type;
397 };
398
399
400
401 template <typename ReturnType_>
402 struct DictionaryOptimizer<ReturnType_,
403 std::enable_if_t<SupportedOptimizerTypeTraits<
404 ReturnType_>::is_supported>>
405 {
406 using ReturnType =
407 typename SupportedOptimizerTypeTraits<ReturnType_>::ReturnType;
408 using OptimizerType =
409 internal::DictionarySubstitutionVisitor<ReturnType, SD::Expression>;
410
411
420 static void
421 initialize(OptimizerType & optimizer,
422 const SymEngine::vec_basic & independent_symbols,
423 const SymEngine::vec_basic & dependent_functions,
424 const enum OptimizationFlags &optimization_flags)
425 {
426 const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
427 optimizer.init(independent_symbols,
428 dependent_functions,
429 use_symbolic_cse);
430 }
431
432
433
438 template <class Archive>
439 static void
440 save(Archive & archive,
441 const unsigned int version,
442 OptimizerType & optimizer)
443 {
444 optimizer.save(archive, version);
445 }
446
447
448
453 template <class Archive>
454 static void
455 load(Archive & archive,
456 const unsigned int version,
457 OptimizerType & optimizer,
458 const SymEngine::vec_basic & /*independent_symbols*/,
459 const SymEngine::vec_basic & /*dependent_functions*/,
460 const enum OptimizationFlags & /*optimization_flags*/)
461 {
462 optimizer.load(archive, version);
463 }
464
465
466
482 template <typename Stream>
483 static void
484 print(Stream & stream,
485 const OptimizerType &optimizer,
486 const bool print_independent_symbols = false,
487 const bool print_dependent_functions = false,
488 const bool print_cse_reductions = true)
489 {
490 optimizer.print(stream,
491 print_independent_symbols,
492 print_dependent_functions,
493 print_cse_reductions);
494 }
495 };
496
497
498
499 template <typename ReturnType_>
500 struct LambdaOptimizer<ReturnType_,
501 std::enable_if_t<SupportedOptimizerTypeTraits<
502 ReturnType_>::is_supported>>
503 {
504 using ReturnType =
505 typename std::conditional<!boost::is_complex<ReturnType_>::value,
506 double,
507 std::complex<double>>::type;
508 using OptimizerType = typename std::conditional<
509 !boost::is_complex<ReturnType_>::value,
510 SymEngine::LambdaRealDoubleVisitor,
511 SymEngine::LambdaComplexDoubleVisitor>::type;
512
513
522 static void
523 initialize(OptimizerType & optimizer,
524 const SymEngine::vec_basic & independent_symbols,
525 const SymEngine::vec_basic & dependent_functions,
526 const enum OptimizationFlags &optimization_flags)
527 {
528 const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
529 optimizer.init(independent_symbols,
530 dependent_functions,
531 use_symbolic_cse);
532 }
533
534
535
540 template <class Archive>
541 static void
542 save(Archive & /*archive*/,
543 const unsigned int /*version*/,
544 OptimizerType & /*optimizer*/)
545 {}
546
547
552 template <class Archive>
553 static void
554 load(Archive & /*archive*/,
555 const unsigned int /*version*/,
556 OptimizerType & optimizer,
557 const SymEngine::vec_basic & independent_symbols,
558 const SymEngine::vec_basic & dependent_functions,
559 const enum OptimizationFlags &optimization_flags)
560 {
561 initialize(optimizer,
562 independent_symbols,
563 dependent_functions,
564 optimization_flags);
565 }
566
567
568
584 template <typename StreamType>
585 static void
586 print(StreamType & /*stream*/,
587 const OptimizerType & /*optimizer*/,
588 const bool /*print_independent_symbols*/ = false,
589 const bool /*print_dependent_functions*/ = false,
590 const bool /*print_cse_reductions*/ = true)
591 {
592 // No built-in print function
593 }
594 };
595
596
597
598# ifdef HAVE_SYMENGINE_LLVM
599 template <typename ReturnType_>
600 struct LLVMOptimizer<
601 ReturnType_,
602 std::enable_if_t<std::is_arithmetic<ReturnType_>::value>>
603 {
604 using ReturnType =
605 typename std::conditional<std::is_same<ReturnType_, float>::value,
606 float,
607 double>::type;
608 using OptimizerType =
609 typename std::conditional<std::is_same<ReturnType_, float>::value,
610 SymEngine::LLVMFloatVisitor,
611 SymEngine::LLVMDoubleVisitor>::type;
612
617 static const bool supported_by_LLVM = true;
618
619
628 static void
629 initialize(OptimizerType & optimizer,
630 const SymEngine::vec_basic & independent_symbols,
631 const SymEngine::vec_basic & dependent_functions,
632 const enum OptimizationFlags &optimization_flags)
633 {
634 const int opt_level = get_LLVM_optimization_level(optimization_flags);
635 const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
636 optimizer.init(independent_symbols,
637 dependent_functions,
638 use_symbolic_cse,
639 opt_level);
640 }
641
642
643
648 template <class Archive>
649 static void
650 save(Archive &archive,
651 const unsigned int /*version*/,
652 OptimizerType &optimizer)
653 {
654 const std::string llvm_compiled_function = optimizer.dumps();
655 archive & llvm_compiled_function;
656 }
657
658
659
664 template <class Archive>
665 static void
666 load(Archive &archive,
667 const unsigned int /*version*/,
668 OptimizerType &optimizer,
669 const SymEngine::vec_basic & /*independent_symbols*/,
670 const SymEngine::vec_basic & /*dependent_functions*/,
671 const enum OptimizationFlags & /*optimization_flags*/)
672 {
673 std::string llvm_compiled_function;
674 archive & llvm_compiled_function;
675 optimizer.loads(llvm_compiled_function);
676 }
677
678
679
695 template <typename StreamType>
696 static void
697 print(StreamType & /*stream*/,
698 const OptimizerType & /*optimizer*/,
699 const bool /*print_independent_symbols*/ = false,
700 const bool /*print_dependent_functions*/ = false,
701 const bool /*print_cse_reductions*/ = true)
702 {
703 // No built-in print function
704 }
705 };
706
707
708 // There is no LLVM optimizer built with complex number support.
709 // So we fall back to the LambdaDouble case as a type (required
710 // at compile time), but offer no implementation. We expect that
711 // the calling class does not create this type: This can be done by
712 // checking the `supported_by_LLVM` flag.
713 template <typename ReturnType_>
714 struct LLVMOptimizer<
715 ReturnType_,
716 std::enable_if_t<
717 boost::is_complex<ReturnType_>::value &&
718 std::is_arithmetic<typename ReturnType_::value_type>::value>>
719 {
720 // Since there is no working implementation, these are dummy types
721 // that help with templating in the calling function.
722 using ReturnType = typename LambdaOptimizer<ReturnType_>::ReturnType;
723 using OptimizerType =
724 typename LambdaOptimizer<ReturnType_>::OptimizerType;
725
730 static const bool supported_by_LLVM = false;
731
732
741 static void
742 initialize(OptimizerType & /*optimizer*/,
743 const SymEngine::vec_basic & /*independent_symbols*/,
744 const SymEngine::vec_basic & /*dependent_functions*/,
745 const enum OptimizationFlags & /*optimization_flags*/)
746 {
748 }
749
750
751
756 template <class Archive>
757 static void
758 save(Archive & /*archive*/,
759 const unsigned int /*version*/,
760 OptimizerType & /*optimizer*/)
761 {
763 }
764
765
766
771 template <class Archive>
772 static void
773 load(Archive & /*archive*/,
774 const unsigned int /*version*/,
775 OptimizerType & /*optimizer*/,
776 const SymEngine::vec_basic & /*independent_symbols*/,
777 const SymEngine::vec_basic & /*dependent_functions*/,
778 const enum OptimizationFlags & /*optimization_flags*/)
779 {
781 }
782
783
784
800 template <typename StreamType>
801 static void
802 print(StreamType & /*stream*/,
803 const OptimizerType & /*optimizer*/,
804 const bool /*print_independent_symbols*/ = false,
805 const bool /*print_dependent_functions*/ = false,
806 const bool /*print_cse_reductions*/ = true)
807 {
809 }
810 };
811# endif // HAVE_SYMENGINE_LLVM
812
813
814 /* ----------- Specializations for OptimizerHelper ----------- */
815
816
817 template <typename ReturnType, typename Optimizer>
818 struct OptimizerHelper<
819 ReturnType,
820 Optimizer,
821 std::enable_if_t<
822 std::is_same<ReturnType, typename Optimizer::ReturnType>::value>>
823 {
832 static void
833 initialize(typename Optimizer::OptimizerType *optimizer,
834 const SymEngine::vec_basic & independent_symbols,
835 const SymEngine::vec_basic & dependent_functions,
836 const enum OptimizationFlags & optimization_flags)
837 {
838 Assert(optimizer, ExcNotInitialized());
839
840 // Some optimizers don't have the same interface for
841 // initialization, we filter them out through the specializations
842 // of the Optimizer class
843 Optimizer::initialize(*optimizer,
844 independent_symbols,
845 dependent_functions,
846 optimization_flags);
847 }
848
849
850
864 static void
865 substitute(typename Optimizer::OptimizerType *optimizer,
866 std::vector<ReturnType> & output_values,
867 const std::vector<ReturnType> & substitution_values)
868 {
869 Assert(optimizer, ExcNotInitialized());
870 optimizer->call(output_values.data(), substitution_values.data());
871 }
872
873
874
879 template <class Archive>
880 static void
881 save(Archive & archive,
882 const unsigned int version,
883 typename Optimizer::OptimizerType *optimizer)
884 {
885 Assert(optimizer, ExcNotInitialized());
886
887 // Some optimizers don't have the same interface for
888 // serialization, we filter them out through the specializations
889 // of the Optimizer class
890 Optimizer::save(archive, version, *optimizer);
891 }
892
893
894
899 template <class Archive>
900 static void
901 load(Archive & archive,
902 const unsigned int version,
903 typename Optimizer::OptimizerType *optimizer,
904 const SymEngine::vec_basic & independent_symbols,
905 const SymEngine::vec_basic & dependent_functions,
906 const enum OptimizationFlags & optimization_flags)
907 {
908 Assert(optimizer, ExcNotInitialized());
909
910 // Some optimizers don't have the same interface for
911 // serialization, we filter them out through the specializations
912 // of the Optimizer class
913 Optimizer::load(archive,
914 version,
915 *optimizer,
916 independent_symbols,
917 dependent_functions,
918 optimization_flags);
919 }
920
921
922
938 template <typename Stream>
939 static void
940 print(Stream & stream,
941 typename Optimizer::OptimizerType *optimizer,
942 const bool print_independent_symbols = false,
943 const bool print_dependent_functions = false,
944 const bool print_cse_reductions = true)
945 {
946 Assert(optimizer, ExcNotInitialized());
947
948 // Some optimizers don't have a print function, so
949 // we filter them out through the specializations of
950 // the Optimizer class
951 Optimizer::print(stream,
952 *optimizer,
953 print_independent_symbols,
954 print_dependent_functions,
955 print_cse_reductions);
956 }
957 };
958
959 template <typename ReturnType, typename Optimizer>
960 struct OptimizerHelper<
961 ReturnType,
962 Optimizer,
963 std::enable_if_t<
964 !std::is_same<ReturnType, typename Optimizer::ReturnType>::value>>
965 {
974 static void
975 initialize(typename Optimizer::OptimizerType *optimizer,
976 const SymEngine::vec_basic & independent_symbols,
977 const SymEngine::vec_basic & dependent_functions,
978 const enum OptimizationFlags & optimization_flags)
979 {
980 Assert(optimizer, ExcNotInitialized());
981
982 const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
983 optimizer->init(independent_symbols,
984 dependent_functions,
985 use_symbolic_cse);
986 }
987
988
989
1003 static void
1004 substitute(typename Optimizer::OptimizerType *optimizer,
1005 std::vector<ReturnType> & output_values,
1006 const std::vector<ReturnType> & substitution_values)
1007 {
1008 Assert(optimizer, ExcNotInitialized());
1009
1010 // Intermediate values to accommodate the difference in
1011 // value types.
1012 std::vector<typename Optimizer::ReturnType> int_outputs(
1013 output_values.size());
1014 std::vector<typename Optimizer::ReturnType> int_inputs(
1015 substitution_values.size());
1016
1017 std::copy(substitution_values.begin(),
1018 substitution_values.end(),
1019 int_inputs.begin());
1020 optimizer->call(int_outputs.data(), int_inputs.data());
1021 std::copy(int_outputs.begin(),
1022 int_outputs.end(),
1023 output_values.begin());
1024 }
1025
1026
1027
1032 template <class Archive>
1033 static void
1034 save(Archive & archive,
1035 const unsigned int version,
1036 typename Optimizer::OptimizerType *optimizer)
1037 {
1038 Assert(optimizer, ExcNotInitialized());
1039 Optimizer::save(archive, version, *optimizer);
1040 }
1041
1042
1043
1048 template <class Archive>
1049 static void
1050 load(Archive & archive,
1051 const unsigned int version,
1052 typename Optimizer::OptimizerType *optimizer,
1053 const SymEngine::vec_basic & independent_symbols,
1054 const SymEngine::vec_basic & dependent_functions,
1055 const enum OptimizationFlags & optimization_flags)
1056 {
1057 Assert(optimizer, ExcNotInitialized());
1058
1059 // Some optimizers don't have the same interface for
1060 // serialization, we filter them out through the specializations
1061 // of the Optimizer class
1062 Optimizer::load(archive,
1063 version,
1064 *optimizer,
1065 independent_symbols,
1066 dependent_functions,
1067 optimization_flags);
1068 }
1069
1070
1071
1087 template <typename Stream>
1088 static void
1089 print(Stream & stream,
1090 typename Optimizer::OptimizerType *optimizer,
1091 const bool print_cse_reductions = true,
1092 const bool print_independent_symbols = false,
1093 const bool print_dependent_functions = false)
1094 {
1095 Assert(optimizer, ExcNotInitialized());
1096
1097 optimizer->print(stream,
1098 print_independent_symbols,
1099 print_dependent_functions,
1100 print_cse_reductions);
1101 }
1102 };
1103
1104# endif // DOXYGEN
1105
1106
1107 /* -------------------- Utility functions ---------------------- */
1108
1109
1131 template <typename NumberType,
1132 int rank,
1133 int dim,
1134 template <int, int, typename>
1135 class TensorType>
1136 TensorType<rank, dim, NumberType>
1138 const TensorType<rank, dim, Expression> &symbol_tensor,
1139 const std::vector<NumberType> & cached_evaluation,
1140 const BatchOptimizer<NumberType> & optimizer)
1141 {
1142 TensorType<rank, dim, NumberType> out;
1143 for (unsigned int i = 0; i < out.n_independent_components; ++i)
1144 {
1145 const TableIndices<rank> indices(
1146 out.unrolled_to_component_indices(i));
1147 out[indices] =
1148 optimizer.extract(symbol_tensor[indices], cached_evaluation);
1149 }
1150 return out;
1151 }
1152
1153
1176 template <typename NumberType, int dim>
1179 const SymmetricTensor<4, dim, Expression> &symbol_tensor,
1180 const std::vector<NumberType> & cached_evaluation,
1181 const BatchOptimizer<NumberType> & optimizer)
1182 {
1184 for (unsigned int i = 0;
1185 i < SymmetricTensor<2, dim>::n_independent_components;
1186 ++i)
1187 for (unsigned int j = 0;
1188 j < SymmetricTensor<2, dim>::n_independent_components;
1189 ++j)
1190 {
1191 const TableIndices<4> indices =
1192 make_rank_4_tensor_indices<dim>(i, j);
1193 out[indices] =
1194 optimizer.extract(symbol_tensor[indices], cached_evaluation);
1195 }
1196 return out;
1197 }
1198
1199
1217 template <typename NumberType, typename T>
1218 void
1220 const T & function)
1221 {
1222 optimizer.register_function(function);
1223 }
1224
1225
1243 template <typename NumberType, typename T>
1244 void
1246 const std::vector<T> & functions)
1247 {
1248 for (const auto &function : functions)
1249 register_functions(optimizer, function);
1250 }
1251
1252
1272 template <typename NumberType, typename T, typename... Args>
1273 void
1275 const T & function,
1276 const Args &...other_functions)
1277 {
1278 register_functions(optimizer, function);
1279 register_functions(optimizer, other_functions...);
1280 }
1281
1282
1294 template <int rank,
1295 int dim,
1296 template <int, int, typename>
1297 class TensorType>
1300 const TensorType<rank, dim, Expression> &symbol_tensor)
1301 {
1303 out.reserve(symbol_tensor.n_independent_components);
1304 for (unsigned int i = 0; i < symbol_tensor.n_independent_components;
1305 ++i)
1306 {
1307 const TableIndices<rank> indices(
1308 symbol_tensor.unrolled_to_component_indices(i));
1309 out.push_back(symbol_tensor[indices].get_RCP());
1310 }
1311 return out;
1312 }
1313
1314
1324 template <int dim>
1327 const SymmetricTensor<4, dim, Expression> &symbol_tensor)
1328 {
1330 out.reserve(symbol_tensor.n_independent_components);
1331 for (unsigned int i = 0;
1332 i < SymmetricTensor<2, dim>::n_independent_components;
1333 ++i)
1334 for (unsigned int j = 0;
1335 j < SymmetricTensor<2, dim>::n_independent_components;
1336 ++j)
1337 {
1338 const TableIndices<4> indices =
1339 make_rank_4_tensor_indices<dim>(i, j);
1340 out.push_back(symbol_tensor[indices].get_RCP());
1341 }
1342 return out;
1343 }
1344
1345 } // namespace internal
1346
1347
1348
1439 template <typename ReturnType>
1441 {
1442 public:
1451
1472
1482 BatchOptimizer(const BatchOptimizer &other);
1483
1487 BatchOptimizer(BatchOptimizer &&) noexcept = default;
1488
1492 ~BatchOptimizer() = default;
1493
1504 void
1505 copy_from(const BatchOptimizer &other);
1506
1516 template <typename Stream>
1517 void
1518 print(Stream &stream, const bool print_cse = false) const;
1519
1528 template <class Archive>
1529 void
1530 save(Archive &archive, const unsigned int version) const;
1531
1545 template <class Archive>
1546 void
1547 load(Archive &archive, const unsigned int version);
1548
1549# ifdef DOXYGEN
1571 template <class Archive>
1572 void
1573 serialize(Archive &archive, const unsigned int version);
1574# else
1575 // This macro defines the serialize() method that is compatible with
1576 // the templated save() and load() method that have been implemented.
1577 BOOST_SERIALIZATION_SPLIT_MEMBER()
1578# endif
1579
1590 void
1591 register_symbols(const types::substitution_map &substitution_map);
1592
1598 void
1599 register_symbols(const SymEngine::map_basic_basic &substitution_map);
1600
1611 void
1612 register_symbols(const types::symbol_vector &symbols);
1613
1624 void
1625 register_symbols(const SymEngine::vec_basic &symbols);
1626
1633
1639 std::size_t
1641
1653 void
1654 register_function(const Expression &function);
1655
1660 template <int rank, int dim>
1661 void
1663
1668 template <int rank, int dim>
1669 void
1671 const SymmetricTensor<rank, dim, Expression> &function_tensor);
1672
1677 void
1678 register_functions(const types::symbol_vector &functions);
1679
1684 void
1685 register_functions(const SymEngine::vec_basic &functions);
1686
1696 template <typename T>
1697 void
1698 register_functions(const std::vector<T> &functions);
1699
1713 template <typename T, typename... Args>
1714 void
1715 register_functions(const T &functions, const Args &...other_functions);
1716
1721 const types::symbol_vector &
1723
1730 std::size_t
1731 n_dependent_variables() const;
1732
1751 void
1755
1760 enum OptimizerType
1761 optimization_method() const;
1762
1768 optimization_flags() const;
1769
1775 bool
1776 use_symbolic_CSE() const;
1777
1793 void
1794 optimize();
1795
1800 bool
1801 optimized() const;
1802
1819 void
1820 substitute(const types::substitution_map &substitution_map) const;
1821
1831 void
1832 substitute(const SymEngine::map_basic_basic &substitution_map) const;
1833
1844 void
1845 substitute(const types::symbol_vector & symbols,
1846 const std::vector<ReturnType> &values) const;
1847
1858 void
1859 substitute(const SymEngine::vec_basic & symbols,
1860 const std::vector<ReturnType> &values) const;
1861
1867 bool
1868 values_substituted() const;
1869
1899 const std::vector<ReturnType> &
1900 evaluate() const;
1901
1909 ReturnType
1910 evaluate(const Expression &func) const;
1911
1920 std::vector<ReturnType>
1921 evaluate(const std::vector<Expression> &funcs) const;
1922
1931 template <int rank, int dim>
1934
1935
1944 template <int rank, int dim>
1947
1948
1956 ReturnType
1957 extract(const Expression & func,
1958 const std::vector<ReturnType> &cached_evaluation) const;
1959
1960
1968 std::vector<ReturnType>
1969 extract(const std::vector<Expression> &funcs,
1970 const std::vector<ReturnType> &cached_evaluation) const;
1971
1972
1980 template <int rank, int dim>
1983 const std::vector<ReturnType> & cached_evaluation) const;
1984
1985
1993 template <int rank, int dim>
1996 const std::vector<ReturnType> &cached_evaluation) const;
1997
2000 private:
2005
2011
2022
2029
2034 bool
2036 const SD::Expression &function) const;
2037
2042 bool
2044 const SymEngine::RCP<const SymEngine::Basic> &function) const;
2045
2060 mutable std::vector<ReturnType> dependent_variables_output;
2061
2071 std::map<SD::Expression,
2072 std::size_t,
2074
2080
2087 mutable std::unique_ptr<SymEngine::Visitor> optimizer;
2088
2098
2104
2108 void
2110
2115 void
2117
2121 void
2122 create_optimizer(std::unique_ptr<SymEngine::Visitor> &optimizer);
2123
2140 void
2141 substitute(const std::vector<ReturnType> &substitution_values) const;
2142 };
2143
2144
2145
2146 /* -------------------- inline and template functions ------------------ */
2147
2148
2149# ifndef DOXYGEN
2150
2151
2152 template <typename ReturnType>
2153 template <typename Stream>
2154 void
2155 BatchOptimizer<ReturnType>::print(Stream &stream,
2156 const bool /*print_cse*/) const
2157 {
2158 // Settings
2159 stream << "Method? " << optimization_method() << '\n';
2160 stream << "Flags: " << optimization_flags() << '\n';
2161 stream << "Optimized? " << (optimized() ? "Yes" : "No") << '\n';
2162 stream << "Values substituted? " << values_substituted() << "\n\n";
2163
2164 // Independent variables
2165 stream << "Symbols (" << n_independent_variables()
2166 << " independent variables):" << '\n';
2167 int cntr = 0;
2168 for (SD::types::substitution_map::const_iterator it =
2169 independent_variables_symbols.begin();
2170 it != independent_variables_symbols.end();
2171 ++it, ++cntr)
2172 {
2173 stream << cntr << ": " << it->first << '\n';
2174 }
2175 stream << '\n' << std::flush;
2176
2177 // Dependent functions
2178 stream << "Functions (" << n_dependent_variables()
2179 << " dependent variables):" << '\n';
2180 cntr = 0;
2181 for (typename SD::types::symbol_vector::const_iterator it =
2182 dependent_variables_functions.begin();
2183 it != dependent_variables_functions.end();
2184 ++it, ++cntr)
2185 {
2186 stream << cntr << ": " << (*it) << '\n';
2187 }
2188 stream << '\n' << std::flush;
2189
2190 // Common subexpression
2191 if (optimized() == true && use_symbolic_CSE() == true)
2192 {
2193 Assert(optimizer, ExcNotInitialized());
2194 const bool print_cse_reductions = true;
2195 const bool print_independent_symbols = false;
2196 const bool print_dependent_functions = false;
2197
2198 if (optimization_method() == OptimizerType::dictionary)
2199 {
2200 Assert(dynamic_cast<typename internal::DictionaryOptimizer<
2201 ReturnType>::OptimizerType *>(optimizer.get()),
2202 ExcMessage("Cannot cast optimizer to Dictionary type."));
2203
2204 internal::OptimizerHelper<
2205 ReturnType,
2206 internal::DictionaryOptimizer<ReturnType>>::
2207 print(stream,
2208 dynamic_cast<typename internal::DictionaryOptimizer<
2209 ReturnType>::OptimizerType *>(optimizer.get()),
2210 print_independent_symbols,
2211 print_dependent_functions,
2212 print_cse_reductions);
2213
2214 stream << '\n' << std::flush;
2215 }
2216 else if (optimization_method() == OptimizerType::lambda)
2217 {
2218 Assert(dynamic_cast<typename internal::LambdaOptimizer<
2219 ReturnType>::OptimizerType *>(optimizer.get()),
2220 ExcMessage("Cannot cast optimizer to Lambda type."));
2221
2222 internal::OptimizerHelper<ReturnType,
2223 internal::LambdaOptimizer<ReturnType>>::
2224 print(stream,
2225 dynamic_cast<typename internal::LambdaOptimizer<
2226 ReturnType>::OptimizerType *>(optimizer.get()),
2227 print_independent_symbols,
2228 print_dependent_functions,
2229 print_cse_reductions);
2230 }
2231# ifdef HAVE_SYMENGINE_LLVM
2232 else if (optimization_method() == OptimizerType::llvm)
2233 {
2234 Assert(dynamic_cast<typename internal::LLVMOptimizer<
2235 ReturnType>::OptimizerType *>(optimizer.get()),
2236 ExcMessage("Cannot cast optimizer to LLVM type."));
2237
2238 internal::OptimizerHelper<ReturnType,
2239 internal::LLVMOptimizer<ReturnType>>::
2240 print(stream,
2241 dynamic_cast<typename internal::LLVMOptimizer<
2242 ReturnType>::OptimizerType *>(optimizer.get()),
2243 print_independent_symbols,
2244 print_dependent_functions,
2245 print_cse_reductions);
2246 }
2247# endif // HAVE_SYMENGINE_LLVM
2248 else
2249 {
2250 AssertThrow(false, ExcMessage("Unknown optimizer type."));
2251 }
2252 }
2253
2254 if (values_substituted())
2255 {
2256 stream << "Evaluated functions:" << '\n';
2257 stream << std::flush;
2258 cntr = 0;
2259 for (typename std::vector<ReturnType>::const_iterator it =
2260 dependent_variables_output.begin();
2261 it != dependent_variables_output.end();
2262 ++it, ++cntr)
2263 {
2264 stream << cntr << ": " << (*it) << '\n';
2265 }
2266 stream << '\n' << std::flush;
2267 }
2268 }
2269
2270
2271
2272 template <typename ReturnType>
2273 template <class Archive>
2274 void
2276 const unsigned int version) const
2277 {
2278 // Serialize enum classes...
2279 {
2280 const auto m =
2281 static_cast<typename std::underlying_type<OptimizerType>::type>(
2282 method);
2283 ar &m;
2284 }
2285 {
2286 const auto f =
2287 static_cast<typename std::underlying_type<OptimizationFlags>::type>(
2288 flags);
2289 ar &f;
2290 }
2291
2292 // Important: Independent variables must always be
2293 // serialized before the dependent variables.
2294 ar &independent_variables_symbols;
2295 ar &dependent_variables_functions;
2296
2297 ar &dependent_variables_output;
2298 ar &map_dep_expr_vec_entry;
2299 ar &ready_for_value_extraction;
2300
2301 // Mark that we've saved this class at some point.
2302 has_been_serialized = true;
2303 ar &has_been_serialized;
2304
2305 // When we serialize the optimizer itself, we have to (unfortunately)
2306 // provide it with sufficient information to rebuild itself from scratch.
2307 // This is because only two of the three optimization classes support
2308 // real serialization (i.e. have save/load capability).
2309 const SD::types::symbol_vector symbol_vec =
2310 Utilities::extract_symbols(independent_variables_symbols);
2311 if (typename internal::DictionaryOptimizer<ReturnType>::OptimizerType
2312 *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2313 ReturnType>::OptimizerType *>(optimizer.get()))
2314 {
2315 Assert(optimization_method() == OptimizerType::dictionary,
2317 internal::OptimizerHelper<
2318 ReturnType,
2319 internal::DictionaryOptimizer<ReturnType>>::save(ar, version, opt);
2320 }
2321 else if (typename internal::LambdaOptimizer<ReturnType>::OptimizerType
2322 *opt = dynamic_cast<typename internal::LambdaOptimizer<
2323 ReturnType>::OptimizerType *>(optimizer.get()))
2324 {
2325 Assert(optimization_method() == OptimizerType::lambda,
2327 internal::OptimizerHelper<
2328 ReturnType,
2329 internal::LambdaOptimizer<ReturnType>>::save(ar, version, opt);
2330 }
2331# ifdef HAVE_SYMENGINE_LLVM
2332 else if (typename internal::LLVMOptimizer<ReturnType>::OptimizerType
2333 *opt = dynamic_cast<typename internal::LLVMOptimizer<
2334 ReturnType>::OptimizerType *>(optimizer.get()))
2335 {
2336 Assert(optimization_method() == OptimizerType::llvm,
2338 internal::OptimizerHelper<
2339 ReturnType,
2340 internal::LLVMOptimizer<ReturnType>>::save(ar, version, opt);
2341 }
2342# endif
2343 else
2344 {
2345 AssertThrow(false, ExcMessage("Unknown optimizer type."));
2346 }
2347 }
2348
2349
2350
2351 template <typename ReturnType>
2352 template <class Archive>
2353 void
2354 BatchOptimizer<ReturnType>::load(Archive &ar, const unsigned int version)
2355 {
2356 Assert(independent_variables_symbols.empty(), ExcInternalError());
2357 Assert(dependent_variables_functions.empty(), ExcInternalError());
2358 Assert(dependent_variables_output.empty(), ExcInternalError());
2359 Assert(map_dep_expr_vec_entry.empty(), ExcInternalError());
2360 Assert(ready_for_value_extraction == false, ExcInternalError());
2361
2362 // Deserialize enum classes...
2363 {
2364 typename std::underlying_type<OptimizerType>::type m;
2365 ar & m;
2366 method = static_cast<OptimizerType>(m);
2367 }
2368 {
2369 typename std::underlying_type<OptimizationFlags>::type f;
2370 ar & f;
2371 flags = static_cast<OptimizationFlags>(f);
2372 }
2373
2374 // Important: Independent variables must always be
2375 // deserialized before the dependent variables.
2376 ar &independent_variables_symbols;
2377 ar &dependent_variables_functions;
2378
2379 ar &dependent_variables_output;
2380 ar &map_dep_expr_vec_entry;
2381 ar &ready_for_value_extraction;
2382
2383 ar &has_been_serialized;
2384
2385 // If we're reading in data, then create the optimizer
2386 // and then deserialize it.
2387 Assert(!optimizer, ExcInternalError());
2388
2389 // Create and configure the optimizer
2390 create_optimizer(optimizer);
2391 Assert(optimizer, ExcNotInitialized());
2392
2393 // When we deserialize the optimizer itself, we have to (unfortunately)
2394 // provide it with sufficient information to rebuild itself from scratch.
2395 // This is because only two of the three optimization classes support
2396 // real serialization (i.e. have save/load capability).
2397 const SD::types::symbol_vector symbol_vec =
2398 Utilities::extract_symbols(independent_variables_symbols);
2399 if (typename internal::DictionaryOptimizer<ReturnType>::OptimizerType
2400 *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2401 ReturnType>::OptimizerType *>(optimizer.get()))
2402 {
2403 Assert(optimization_method() == OptimizerType::dictionary,
2405 internal::OptimizerHelper<ReturnType,
2406 internal::DictionaryOptimizer<ReturnType>>::
2407 load(ar,
2408 version,
2409 opt,
2411 symbol_vec),
2413 dependent_variables_functions),
2414 optimization_flags());
2415 }
2416 else if (typename internal::LambdaOptimizer<ReturnType>::OptimizerType
2417 *opt = dynamic_cast<typename internal::LambdaOptimizer<
2418 ReturnType>::OptimizerType *>(optimizer.get()))
2419 {
2420 Assert(optimization_method() == OptimizerType::lambda,
2422 internal::OptimizerHelper<ReturnType,
2423 internal::LambdaOptimizer<ReturnType>>::
2424 load(ar,
2425 version,
2426 opt,
2428 symbol_vec),
2430 dependent_variables_functions),
2431 optimization_flags());
2432 }
2433# ifdef HAVE_SYMENGINE_LLVM
2434 else if (typename internal::LLVMOptimizer<ReturnType>::OptimizerType
2435 *opt = dynamic_cast<typename internal::LLVMOptimizer<
2436 ReturnType>::OptimizerType *>(optimizer.get()))
2437 {
2438 Assert(optimization_method() == OptimizerType::llvm,
2440 internal::OptimizerHelper<ReturnType,
2441 internal::LLVMOptimizer<ReturnType>>::
2442 load(ar,
2443 version,
2444 opt,
2446 symbol_vec),
2448 dependent_variables_functions),
2449 optimization_flags());
2450 }
2451# endif
2452 else
2453 {
2454 AssertThrow(false, ExcMessage("Unknown optimizer type."));
2455 }
2456 }
2457
2458
2459
2460 template <typename ReturnType>
2461 template <int rank, int dim>
2462 void
2464 const Tensor<rank, dim, Expression> &function_tensor)
2465 {
2466 Assert(optimized() == false,
2467 ExcMessage(
2468 "Cannot register functions once the optimizer is finalised."));
2469
2470 register_vector_functions(
2471 internal::unroll_to_expression_vector(function_tensor));
2472 }
2473
2474
2475
2476 template <typename ReturnType>
2477 template <int rank, int dim>
2478 void
2480 const SymmetricTensor<rank, dim, Expression> &function_tensor)
2481 {
2482 Assert(optimized() == false,
2483 ExcMessage(
2484 "Cannot register functions once the optimizer is finalised."));
2485
2486 register_vector_functions(
2487 internal::unroll_to_expression_vector(function_tensor));
2488 }
2489
2490
2491
2492 template <typename ReturnType>
2493 template <typename T, typename... Args>
2494 void
2496 const T &functions,
2497 const Args &...other_functions)
2498 {
2499 internal::register_functions(*this, functions);
2500 internal::register_functions(*this, other_functions...);
2501 }
2502
2503
2504
2505 template <typename ReturnType>
2506 template <typename T>
2507 void
2509 const std::vector<T> &functions)
2510 {
2511 internal::register_functions(*this, functions);
2512 }
2513
2514
2515
2516 template <typename ReturnType>
2517 template <int rank, int dim>
2520 const Tensor<rank, dim, Expression> &funcs,
2521 const std::vector<ReturnType> & cached_evaluation) const
2522 {
2524 cached_evaluation,
2525 *this);
2526 }
2527
2528
2529
2530 template <typename ReturnType>
2531 template <int rank, int dim>
2534 const Tensor<rank, dim, Expression> &funcs) const
2535 {
2536 Assert(
2537 values_substituted() == true,
2538 ExcMessage(
2539 "The optimizer is not configured to perform evaluation. "
2540 "This action can only performed after substitute() has been called."));
2541
2542 return extract(funcs, dependent_variables_output);
2543 }
2544
2545
2546
2547 template <typename ReturnType>
2548 template <int rank, int dim>
2552 const std::vector<ReturnType> & cached_evaluation) const
2553 {
2555 cached_evaluation,
2556 *this);
2557 }
2558
2559
2560
2561 template <typename ReturnType>
2562 template <int rank, int dim>
2565 const SymmetricTensor<rank, dim, Expression> &funcs) const
2566 {
2567 Assert(
2568 values_substituted() == true,
2569 ExcMessage(
2570 "The optimizer is not configured to perform evaluation. "
2571 "This action can only performed after substitute() has been called."));
2572
2573 return extract(funcs, dependent_variables_output);
2574 }
2575
2576# endif // DOXYGEN
2577
2578 } // namespace SD
2579} // namespace Differentiation
2580
2581
2583
2584#endif // DEAL_II_WITH_SYMENGINE
2585
2586#endif
SymmetricTensor< rank, dim, ReturnType > extract(const SymmetricTensor< rank, dim, Expression > &funcs, const std::vector< ReturnType > &cached_evaluation) const
types::substitution_map independent_variables_symbols
void substitute(const types::substitution_map &substitution_map) const
void register_functions(const T &functions, const Args &...other_functions)
void register_scalar_function(const SD::Expression &function)
const types::symbol_vector & get_dependent_functions() const
void create_optimizer(std::unique_ptr< SymEngine::Visitor > &optimizer)
void print(Stream &stream, const bool print_cse=false) const
enum OptimizerType optimization_method() const
void copy_from(const BatchOptimizer &other)
void register_function(const Tensor< rank, dim, Expression > &function_tensor)
void set_optimization_method(const enum OptimizerType &optimization_method, const enum OptimizationFlags &optimization_flags=OptimizationFlags::optimize_all)
SymmetricTensor< rank, dim, ReturnType > evaluate(const SymmetricTensor< rank, dim, Expression > &funcs) const
void save(Archive &archive, const unsigned int version) const
enum OptimizationFlags optimization_flags() const
void register_functions(const types::symbol_vector &functions)
std::vector< ReturnType > dependent_variables_output
Tensor< rank, dim, ReturnType > extract(const Tensor< rank, dim, Expression > &funcs, const std::vector< ReturnType > &cached_evaluation) const
void serialize(Archive &archive, const unsigned int version)
void register_symbols(const types::substitution_map &substitution_map)
const std::vector< ReturnType > & evaluate() const
std::map< SD::Expression, std::size_t, SD::types::internal::ExpressionKeyLess > map_dependent_expression_to_vector_entry_t
void register_function(const Expression &function)
Tensor< rank, dim, ReturnType > evaluate(const Tensor< rank, dim, Expression > &funcs) const
std::unique_ptr< SymEngine::Visitor > optimizer
ReturnType extract(const Expression &func, const std::vector< ReturnType > &cached_evaluation) const
BatchOptimizer(BatchOptimizer &&) noexcept=default
void load(Archive &archive, const unsigned int version)
void register_functions(const std::vector< T > &functions)
bool is_valid_nonunique_dependent_variable(const SD::Expression &function) const
void register_vector_functions(const types::symbol_vector &functions)
types::symbol_vector get_independent_symbols() const
void register_function(const SymmetricTensor< rank, dim, Expression > &function_tensor)
map_dependent_expression_to_vector_entry_t map_dep_expr_vec_entry
static constexpr unsigned int n_independent_components
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
static ::ExceptionBase & ExcSymEngineLLVMReturnTypeNotSupported()
#define DeclExceptionMsg(Exception, defaulttext)
Definition exceptions.h:488
static ::ExceptionBase & ExcSymEngineLLVMNotAvailable()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNotInitialized()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
SD::types::symbol_vector extract_symbols(const SD::types::substitution_map &substitution_values)
SymEngine::vec_basic convert_expression_vector_to_basic_vector(const SD::types::symbol_vector &symbol_vector)
TensorType< rank, dim, NumberType > tensor_evaluate_optimized(const TensorType< rank, dim, Expression > &symbol_tensor, const std::vector< NumberType > &cached_evaluation, const BatchOptimizer< NumberType > &optimizer)
bool use_symbolic_CSE(const enum OptimizationFlags &flags)
types::symbol_vector unroll_to_expression_vector(const TensorType< rank, dim, Expression > &symbol_tensor)
int get_LLVM_optimization_level(const enum OptimizationFlags &flags)
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function)
std::vector< SD::Expression > symbol_vector
std::map< SD::Expression, SD::Expression, internal::ExpressionKeyLess > substitution_map
OptimizationFlags & operator|=(OptimizationFlags &f1, const OptimizationFlags f2)
Expression operator|(const Expression &lhs, const Expression &rhs)
Expression operator&(const Expression &lhs, const Expression &rhs)
Expression substitute(const Expression &expression, const types::substitution_map &substitution_map)
std::ostream & operator<<(std::ostream &stream, const Expression &expression)
OptimizationFlags & operator&=(OptimizationFlags &f1, const OptimizationFlags f2)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
STL namespace.