Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Public Member Functions | List of all members

#include <deal.II/differentiation/sd/symengine_optimizer.h>

Public Member Functions

 BatchOptimizer ()
 
 BatchOptimizer (const enum OptimizerType &optimization_method, const enum OptimizationFlags &optimization_flags=OptimizationFlags::optimize_all)
 
 BatchOptimizer (const BatchOptimizer &other)
 
 BatchOptimizer (BatchOptimizer &&)=default
 
 ~BatchOptimizer ()=default
 
template<typename Stream >
void print (Stream &stream, const bool print_cse=false) const
 
template<class Archive >
void save (Archive &archive, const unsigned int version) const
 
template<class Archive >
void load (Archive &archive, const unsigned int version)
 
template<class Archive >
void serialize (Archive &archive, const unsigned int version)
 
template<typename ReturnType >
void register_functions (const SD::types::symbol_vector &functions)
 
Independent variables
void register_symbols (const types::substitution_map &substitution_map)
 
void register_symbols (const SymEngine::map_basic_basic &substitution_map)
 
void register_symbols (const types::symbol_vector &symbols)
 
void register_symbols (const SymEngine::vec_basic &symbols)
 
types::symbol_vector get_independent_symbols () const
 
std::size_t n_independent_variables () const
 
Dependent variables
void register_function (const Expression &function)
 
template<int rank, int dim>
void register_function (const Tensor< rank, dim, Expression > &function_tensor)
 
template<int rank, int dim>
void register_function (const SymmetricTensor< rank, dim, Expression > &function_tensor)
 
void register_functions (const types::symbol_vector &functions)
 
void register_functions (const SymEngine::vec_basic &functions)
 
template<typename T >
void register_functions (const std::vector< T > &functions)
 
template<typename T , typename... Args>
void register_functions (const T &functions, const Args &... other_functions)
 
const types::symbol_vectorget_dependent_functions () const
 
std::size_t n_dependent_variables () const
 
Optimization
void set_optimization_method (const enum OptimizerType &optimization_method, const enum OptimizationFlags &optimization_flags=OptimizationFlags::optimize_all)
 
enum OptimizerType optimization_method () const
 
enum OptimizationFlags optimization_flags () const
 
bool use_symbolic_CSE () const
 
void optimize ()
 
bool optimized () const
 
Symbol substitution
void substitute (const types::substitution_map &substitution_map) const
 
void substitute (const SymEngine::map_basic_basic &substitution_map) const
 
void substitute (const types::symbol_vector &symbols, const std::vector< ReturnType > &values) const
 
void substitute (const SymEngine::vec_basic &symbols, const std::vector< ReturnType > &values) const
 
bool values_substituted () const
 

Evaluation / data extraction

using map_dependent_expression_to_vector_entry_t = std::map< SD::Expression, std::size_t, SD::types::internal::ExpressionKeyLess >
 
enum OptimizerType method
 
enum OptimizationFlags flags
 
types::substitution_map independent_variables_symbols
 
types::symbol_vector dependent_variables_functions
 
std::vector< ReturnType > dependent_variables_output
 
map_dependent_expression_to_vector_entry_t map_dep_expr_vec_entry
 
std::unique_ptr< SymEngine::Visitor > optimizer
 
bool ready_for_value_extraction
 
bool has_been_serialized
 
const std::vector< ReturnType > & evaluate () const
 
ReturnType evaluate (const Expression &func) const
 
std::vector< ReturnType > evaluate (const std::vector< Expression > &funcs) const
 
template<int rank, int dim>
Tensor< rank, dim, ReturnType > evaluate (const Tensor< rank, dim, Expression > &funcs) const
 
template<int rank, int dim>
SymmetricTensor< rank, dim, ReturnType > evaluate (const SymmetricTensor< rank, dim, Expression > &funcs) const
 
bool is_valid_nonunique_dependent_variable (const SD::Expression &function) const
 
bool is_valid_nonunique_dependent_variable (const SymEngine::RCP< const SymEngine::Basic > &function) const
 
void register_scalar_function (const SD::Expression &function)
 
void register_vector_functions (const types::symbol_vector &functions)
 
void create_optimizer (std::unique_ptr< SymEngine::Visitor > &optimizer)
 
void substitute (const std::vector< ReturnType > &substitution_values) const
 

Detailed Description

template<typename ReturnType>
class Differentiation::SD::BatchOptimizer< ReturnType >

A class that facilitates the optimization of symbol expressions.

This expression will be optimized by this class; that is to say that the code path taken to substitute the set of (independent) symbols into a collection of (dependent) symbolic functions will be optimized using a chosen approach.

This snippet of pseudo-code describes the general usage of this class:

// Define some independent variables
const Expression x("x");
const Expression y("y");
...
// Compute some symbolic expressions that are dependent on the
// independent variables. These could be, for example, scalar
// expressions or tensors of expressions.
const auto f = calculate_f(x, y, ...);
const auto g = calculate_g(x, y, ...);
...
// Now create a optimizer to evaluate the dependent functions.
// The numerical result will be of type double, and a "lambda" optimizer,
// which employs common subexpression elimination, will be used.
using ReturnType = double;
BatchOptimizer<ReturnType> optimizer (OptimizerType::lambda,
// Register symbols that represent independent variables...
optimizer.register_symbols(x, y, ...);
// ... and symbolic expressions that represent dependent functions.
optimizer.register_functions(f, g, ...);
// Now we determine an equivalent code path that will evaluate
// all of the dependent functions at once, but with less computational
// cost than when evaluating the symbolic expression directly.
optimizer.optimize(); // Note: This is an expensive call.
// Next we pass the optimizer the numeric values that we wish the
// independent variables to represent.
const auto substitution_map
= make_substitution_map({x, ...}, {y, ...}, ...);
// When making this next call, the call path used to (numerically)
// evaluate the dependent functions is quicker than dictionary
// substitution.
// Finally, we can get the numeric equivalent of the dependent functions
// from the optimizer.
const auto result_f = optimizer.evaluate(f);
const auto result_g = optimizer.evaluate(g);

Since the call to optimize() may be quite costly, there are a few "best practices" that can be adopted in order to mitigate this cost as much as possible:

  1. Reuse a single instance of the class as much as possible. The most obvious way that this can be achieved would be to place an instance of this class in a centralized location where it can potentially be used by multiple calling functions and objects, if contextually possible.
  2. Another form of reuse would entail generalizing the dependent functions/expressions to be evaluated by the optimizer as much as possible. For example, material coefficients need not necessarily be hard-coded, and one generalized statement of a constitutive law could then be broadly used in other material subdomains governed by the same class of constitutive law, but with different constitutive parameters. The same principle applies if using symbolic expressions to describe boundary conditions, systems of linear equations, etc.
  3. When possible, consider using serialization to save and load the state of an optimizer that has already "optimized", i.e. it has been placed in a state where it is ready to evaluate expressions. With the exception of "lambda" optimization, all other forms of optimization permit checkpointing, meaning that the optimization could be done up front before executing the main body of code. It could also be used to duplicate an optimizer in an efficient manner, should multiple instances of the same optimizer be required.
Template Parameters
ReturnTypeThe number type that is to be returned after value substitution and evaluation. Floating point and complex numbers are currently supported.
Warning
This class is not thread-safe.
The LLVM optimizer does not yet support complex numbers. If this incompatible combination of ReturnType and optimization method are selected, then an error will be thrown at run time.
Author
Jean-Paul Pelteret, Isuru Fernando, 2017, 2020

Definition at line 91 of file symengine_optimizer.h.

Member Function Documentation

◆ register_functions()

template<typename ReturnType >
template<typename ReturnType >
void Differentiation::SD::BatchOptimizer< ReturnType >::register_functions ( const SD::types::symbol_vector functions)

Definition at line 249 of file symengine_optimizer.cc.


The documentation for this class was generated from the following files:
Differentiation::SD::BatchOptimizer::optimizer
std::unique_ptr< SymEngine::Visitor > optimizer
Definition: symengine_optimizer.h:2012
Differentiation::SD::OptimizerType::lambda
@ lambda
Differentiation::SD::types::substitution_map
std::map< SD::Expression, SD::Expression, internal::ExpressionKeyLess > substitution_map
Definition: symengine_types.h:62
double
Differentiation::SD::make_substitution_map
types::substitution_map make_substitution_map(const Expression &symbol, const Expression &value)
Definition: symengine_scalar_operations.cc:172
Differentiation::SD::OptimizationFlags::optimize_cse
@ optimize_cse