Reference documentation for deal.II version 8.5.1
trilinos_precondition_muelu.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_precondition.h>
17 
18 #ifdef DEAL_II_WITH_TRILINOS
19 #if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
20 
21 # include <deal.II/lac/vector.h>
22 # include <deal.II/lac/sparse_matrix.h>
23 # include <deal.II/lac/trilinos_sparse_matrix.h>
24 
25 DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
26 # include <Teuchos_ParameterList.hpp>
27 # include <Teuchos_RCP.hpp>
28 # include <Epetra_MultiVector.h>
29 # include <ml_include.h>
30 # include <ml_MultiLevelPreconditioner.h>
31 
32 # include <MueLu.hpp>
33 # include <MueLu_EpetraOperator.hpp>
34 # include <MueLu_MLParameterListInterpreter.hpp>
35 DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
36 
37 DEAL_II_NAMESPACE_OPEN
38 
39 namespace TrilinosWrappers
40 {
41  namespace
42  {
43 #ifndef DEAL_II_WITH_64BIT_INDICES
44  int n_global_rows (const Epetra_RowMatrix &matrix)
45  {
46  return matrix.NumGlobalRows();
47  }
48 
49  int global_length (const Epetra_MultiVector &vector)
50  {
51  return vector.GlobalLength();
52  }
53 
54  int gid(const Epetra_Map &map, unsigned int i)
55  {
56  return map.GID(i);
57  }
58 #else
59  long long int n_global_rows (const Epetra_RowMatrix &matrix)
60  {
61  return matrix.NumGlobalRows64();
62  }
63 
64  long long int global_length (const Epetra_MultiVector &vector)
65  {
66  return vector.GlobalLength64();
67  }
68 
69  long long int gid(const Epetra_Map &map, ::types::global_dof_index i)
70  {
71  return map.GID64(i);
72  }
73 #endif
74  }
75 
76 
77 
79  AdditionalData (const bool elliptic,
80  const unsigned int n_cycles,
81  const bool w_cycle,
82  const double aggregation_threshold,
83  const std::vector<std::vector<bool> > &constant_modes,
84  const unsigned int smoother_sweeps,
85  const unsigned int smoother_overlap,
86  const bool output_details,
87  const char *smoother_type,
88  const char *coarse_type)
89  :
90  elliptic (elliptic),
91  n_cycles (n_cycles),
92  w_cycle (w_cycle),
93  aggregation_threshold (aggregation_threshold),
94  constant_modes (constant_modes),
95  smoother_sweeps (smoother_sweeps),
96  smoother_overlap (smoother_overlap),
97  output_details (output_details),
98  smoother_type (smoother_type),
99  coarse_type (coarse_type)
100  {}
101 
102 
103 
105  {
106 #ifdef DEAL_II_WITH_64BIT_INDICES
107  AssertThrow (false, ExcMessage("PreconditionAMGMueLu does not support 64bit-indices!"));
108 #endif
109  }
110 
111 
112 
114  {
115  preconditioner.reset();
116  trilinos_matrix.reset();
117  }
118 
119 
120 
121  void
123  const AdditionalData &additional_data)
124  {
125  initialize(matrix.trilinos_matrix(), additional_data);
126  }
127 
128 
129 
130  void
131  PreconditionAMGMueLu::initialize (const Epetra_CrsMatrix &matrix,
132  const AdditionalData &additional_data)
133  {
134  // Build the AMG preconditioner.
135  Teuchos::ParameterList parameter_list;
136 
137  if (additional_data.elliptic == true)
138  ML_Epetra::SetDefaults("SA",parameter_list);
139  else
140  {
141  ML_Epetra::SetDefaults("NSSA",parameter_list);
142  parameter_list.set("aggregation: block scaling", true);
143  }
144  // MIS does not exist anymore, only choice are uncoupled and coupled. When using
145  // uncoupled, aggregates cannot span multiple processes. When using coupled
146  // aggregates can span multiple processes.
147  parameter_list.set("aggregation: type", "Uncoupled");
148 
149  parameter_list.set("smoother: type", additional_data.smoother_type);
150  parameter_list.set("coarse: type", additional_data.coarse_type);
151 
152  parameter_list.set("smoother: sweeps",
153  static_cast<int>(additional_data.smoother_sweeps));
154  parameter_list.set("cycle applications",
155  static_cast<int>(additional_data.n_cycles));
156  if (additional_data.w_cycle == true)
157  parameter_list.set("prec type", "MGW");
158  else
159  parameter_list.set("prec type", "MGV");
160 
161  parameter_list.set("smoother: Chebyshev alpha",10.);
162  parameter_list.set("smoother: ifpack overlap",
163  static_cast<int>(additional_data.smoother_overlap));
164  parameter_list.set("aggregation: threshold",
165  additional_data.aggregation_threshold);
166  parameter_list.set("coarse: max size", 2000);
167 
168  if (additional_data.output_details)
169  parameter_list.set("ML output", 10);
170  else
171  parameter_list.set("ML output", 0);
172 
173  const Epetra_Map &domain_map = matrix.OperatorDomainMap();
174 
175  const size_type constant_modes_dimension =
176  additional_data.constant_modes.size();
177  Epetra_MultiVector distributed_constant_modes (domain_map,
178  constant_modes_dimension > 0 ?
179  constant_modes_dimension : 1);
180  std::vector<double> dummy (constant_modes_dimension);
181 
182  if (constant_modes_dimension > 0)
183  {
184  const size_type n_rows = n_global_rows(matrix);
185  const bool constant_modes_are_global =
186  additional_data.constant_modes[0].size() == n_rows;
187  const size_type n_relevant_rows =
188  constant_modes_are_global ? n_rows : additional_data.constant_modes[0].size();
189  const size_type my_size = domain_map.NumMyElements();
190  if (constant_modes_are_global == false)
191  Assert (n_relevant_rows == my_size,
192  ExcDimensionMismatch(n_relevant_rows, my_size));
193  Assert (n_rows ==
194  static_cast<size_type>(global_length(distributed_constant_modes)),
195  ExcDimensionMismatch(n_rows,
196  global_length(distributed_constant_modes)));
197 
198  (void)n_relevant_rows;
199  (void)global_length;
200 
201  // Reshape null space as a contiguous vector of doubles so that
202  // Trilinos can read from it.
203  for (size_type d=0; d<constant_modes_dimension; ++d)
204  for (size_type row=0; row<my_size; ++row)
205  {
206  TrilinosWrappers::types::int_type global_row_id =
207  constant_modes_are_global ? gid(domain_map,row) : row;
208  distributed_constant_modes[d][row] =
209  additional_data.constant_modes[d][global_row_id];
210  }
211 
212  parameter_list.set("null space: type", "pre-computed");
213  parameter_list.set("null space: dimension",
214  distributed_constant_modes.NumVectors());
215  if (my_size > 0)
216  parameter_list.set("null space: vectors",
217  distributed_constant_modes.Values());
218  // We need to set a valid pointer to data even if there is no data on
219  // the current processor. Therefore, pass a dummy in that case
220  else
221  parameter_list.set("null space: vectors",
222  &dummy[0]);
223  }
224 
225  initialize (matrix, parameter_list);
226  }
227 
228 
229 
230  void
232  Teuchos::ParameterList &muelu_parameters)
233  {
234  initialize(matrix.trilinos_matrix(), muelu_parameters);
235  }
236 
237 
238 
239  void
240  PreconditionAMGMueLu::initialize (const Epetra_CrsMatrix &matrix,
241  Teuchos::ParameterList &muelu_parameters)
242  {
243  // We cannot use MueLu::CreateEpetraOperator directly because, we cannot
244  // transfer ownership of MueLu::EpetraOperator from Teuchos::RCP to
245  // std::shared_ptr.
246 
247  // For now, just use serial node, i.e. no multithreaing or GPU.
248  typedef KokkosClassic::DefaultNode::DefaultNodeType node;
249  preconditioner.reset ();
250 
251  // Cast matrix into a MueLu::Matrix. The constness needs to be cast away.
252  // MueLu uses Teuchos::RCP which are Trilinos version of std::shared_ptr.
253  Teuchos::RCP<Epetra_CrsMatrix> rcp_matrix = Teuchos::rcpFromRef(
254  *(const_cast<Epetra_CrsMatrix *>(&matrix)));
255  Teuchos::RCP<Xpetra::CrsMatrix<double,int,int,node> > muelu_crs_matrix =
256  Teuchos::rcp(new Xpetra::EpetraCrsMatrix (rcp_matrix));
257  Teuchos::RCP<Xpetra::Matrix<double,int,int,node> > muelu_matrix =
258  Teuchos::rcp(new Xpetra::CrsMatrixWrap<double,int,int,node> (muelu_crs_matrix));
259 
260  // Create the multigrid hierarchy using ML parameters.
261  Teuchos::RCP<MueLu::HierarchyManager<double,int,int,node> > hierarchy_factory;
262  hierarchy_factory = Teuchos::rcp(
263  new MueLu::MLParameterListInterpreter<double,int,int,node> (muelu_parameters));
264  Teuchos::RCP<MueLu::Hierarchy<double,int,int,node> > hierarchy = hierarchy_factory->CreateHierarchy();
265  hierarchy->GetLevel(0)->Set("A",muelu_matrix);
266  hierarchy_factory->SetupHierarchy(*hierarchy);
267 
268  // MueLu::EpetraOperator is just a wrapper around a "standard"
269  // Epetra_Operator.
270  preconditioner.reset(new MueLu::EpetraOperator(hierarchy));
271  }
272 
273 
274 
275  template <typename number>
276  void
278  initialize (const ::SparseMatrix<number> &deal_ii_sparse_matrix,
279  const AdditionalData &additional_data,
280  const double drop_tolerance,
281  const ::SparsityPattern *use_this_sparsity)
282  {
283  preconditioner.reset();
284  const size_type n_rows = deal_ii_sparse_matrix.m();
285 
286  // Init Epetra Matrix using an
287  // equidistributed map; avoid
288  // storing the nonzero
289  // elements.
290  vector_distributor.reset (new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n_rows),
291  0, communicator));
292 
293  if (trilinos_matrix.get() == 0)
294  trilinos_matrix.reset (new SparseMatrix());
295 
297  deal_ii_sparse_matrix, drop_tolerance, true,
298  use_this_sparsity);
299 
300  initialize (*trilinos_matrix, additional_data);
301  }
302 
303 
304 
306  {
308  trilinos_matrix.reset();
309  }
310 
311 
312 
315  {
316  unsigned int memory = sizeof(*this);
317 
318  // todo: find a way to read out ML's data
319  // sizes
320  if (trilinos_matrix.get() != 0)
321  memory += trilinos_matrix->memory_consumption();
322  return memory;
323  }
324 
325 
326 
327  // explicit instantiations
328  template void PreconditionAMGMueLu::initialize (const ::SparseMatrix<double> &,
329  const AdditionalData &, const double,
330  const ::SparsityPattern *);
331  template void PreconditionAMGMueLu::initialize (const ::SparseMatrix<float> &,
332  const AdditionalData &, const double,
333  const ::SparsityPattern *);
334 
335 }
336 
337 DEAL_II_NAMESPACE_CLOSE
338 
339 #endif // DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
340 #endif // DEAL_II_WITH_TRILINOS
void initialize(const SparseMatrix &matrix, const AdditionalData &additional_data=AdditionalData())
AdditionalData(const bool elliptic=true, const unsigned int n_cycles=1, const bool w_cyle=false, const double aggregation_threshold=1e-4, const std::vector< std::vector< bool > > &constant_modes=std::vector< std::vector< bool > >(0), const unsigned int smoother_sweeps=2, const unsigned int smoother_overlap=0, const bool output_details=false, const char *smoother_type="Chebyshev", const char *coarse_type="Amesos-KLU")
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
std_cxx11::shared_ptr< Epetra_Map > vector_distributor
static ::ExceptionBase & ExcMessage(std::string arg1)
std_cxx11::shared_ptr< Epetra_Operator > preconditioner
unsigned int global_dof_index
Definition: types.h:88
#define Assert(cond, exc)
Definition: exceptions.h:313
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std_cxx11::shared_ptr< SparseMatrix > trilinos_matrix