Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
trilinos_sparsity_pattern.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_index_access.h>
17 #include <deal.II/lac/trilinos_sparsity_pattern.h>
18 
19 #ifdef DEAL_II_WITH_TRILINOS
20 
21 # include <deal.II/base/mpi.h>
22 # include <deal.II/base/utilities.h>
23 
24 # include <deal.II/lac/dynamic_sparsity_pattern.h>
25 # include <deal.II/lac/sparsity_pattern.h>
26 # include <deal.II/lac/trilinos_index_access.h>
27 
28 # include <Epetra_Export.h>
29 
30 DEAL_II_NAMESPACE_OPEN
31 
32 namespace TrilinosWrappers
33 {
34  namespace SparsityPatternIterators
35  {
36  void
38  {
39  // if we are asked to visit the past-the-end line, then simply
40  // release all our caches and go on with life
41  if (this->a_row == sparsity_pattern->n_rows())
42  {
43  colnum_cache.reset();
44  return;
45  }
46 
47  // otherwise first flush Trilinos caches if necessary
50 
51  colnum_cache = std::make_shared<std::vector<size_type>>(
53 
54  if (colnum_cache->size() > 0)
55  {
56  // get a representation of the present row
57  int ncols;
58  const int ierr = sparsity_pattern->graph->ExtractGlobalRowCopy(
59  this->a_row,
60  colnum_cache->size(),
61  ncols,
62  reinterpret_cast<TrilinosWrappers::types::int_type *>(
63  const_cast<size_type *>(colnum_cache->data())));
64  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
65  AssertThrow(static_cast<std::vector<size_type>::size_type>(ncols) ==
66  colnum_cache->size(),
68  }
69  }
70  } // namespace SparsityPatternIterators
71 
72 
73  // The constructor is actually the
74  // only point where we have to check
75  // whether we build a serial or a
76  // parallel Trilinos matrix.
77  // Actually, it does not even matter
78  // how many threads there are, but
79  // only if we use an MPI compiler or
80  // a standard compiler. So, even one
81  // thread on a configuration with
82  // MPI will still get a parallel
83  // interface.
85  {
87  std_cxx14::make_unique<Epetra_Map>(TrilinosWrappers::types::int_type(0),
88  TrilinosWrappers::types::int_type(0),
90  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(View,
93  0);
94  graph->FillComplete();
95  }
96 
97 
98  SparsityPattern::SparsityPattern(const Epetra_Map &input_map,
99  const size_type n_entries_per_row)
100  {
101  reinit(input_map, input_map, n_entries_per_row);
102  }
103 
104 
105 
107  const Epetra_Map & input_map,
108  const std::vector<size_type> &n_entries_per_row)
109  {
110  reinit(input_map, input_map, n_entries_per_row);
111  }
112 
113 
114 
115  SparsityPattern::SparsityPattern(const Epetra_Map &input_row_map,
116  const Epetra_Map &input_col_map,
117  const size_type n_entries_per_row)
118  {
119  reinit(input_row_map, input_col_map, n_entries_per_row);
120  }
121 
122 
123 
125  const Epetra_Map & input_row_map,
126  const Epetra_Map & input_col_map,
127  const std::vector<size_type> &n_entries_per_row)
128  {
129  reinit(input_row_map, input_col_map, n_entries_per_row);
130  }
131 
132 
133 
135  const size_type n,
136  const size_type n_entries_per_row)
137  {
138  reinit(m, n, n_entries_per_row);
139  }
140 
141 
142 
144  const size_type m,
145  const size_type n,
146  const std::vector<size_type> &n_entries_per_row)
147  {
148  reinit(m, n, n_entries_per_row);
149  }
150 
151 
152 
154  : Subscriptor(std::move(other))
155  , column_space_map(std::move(other.column_space_map))
156  , graph(std::move(other.graph))
157  , nonlocal_graph(std::move(other.nonlocal_graph))
158  {}
159 
160 
161 
162  // Copy function only works if the
163  // sparsity pattern is empty.
165  : Subscriptor()
166  , column_space_map(new Epetra_Map(TrilinosWrappers::types::int_type(0),
167  TrilinosWrappers::types::int_type(0),
168  Utilities::Trilinos::comm_self()))
169  , graph(
170  new Epetra_FECrsGraph(View, *column_space_map, *column_space_map, 0))
171  {
172  (void)input_sparsity;
173  Assert(input_sparsity.n_rows() == 0,
174  ExcMessage(
175  "Copy constructor only works for empty sparsity patterns."));
176  }
177 
178 
179 
180  SparsityPattern::SparsityPattern(const IndexSet &parallel_partitioning,
181  const MPI_Comm &communicator,
182  const size_type n_entries_per_row)
183  {
184  reinit(parallel_partitioning,
185  parallel_partitioning,
186  communicator,
187  n_entries_per_row);
188  }
189 
190 
191 
193  const IndexSet & parallel_partitioning,
194  const MPI_Comm & communicator,
195  const std::vector<size_type> &n_entries_per_row)
196  {
197  reinit(parallel_partitioning,
198  parallel_partitioning,
199  communicator,
200  n_entries_per_row);
201  }
202 
203 
204 
205  SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning,
206  const IndexSet &col_parallel_partitioning,
207  const MPI_Comm &communicator,
208  const size_type n_entries_per_row)
209  {
210  reinit(row_parallel_partitioning,
211  col_parallel_partitioning,
212  communicator,
213  n_entries_per_row);
214  }
215 
216 
217 
219  const IndexSet & row_parallel_partitioning,
220  const IndexSet & col_parallel_partitioning,
221  const MPI_Comm & communicator,
222  const std::vector<size_type> &n_entries_per_row)
223  {
224  reinit(row_parallel_partitioning,
225  col_parallel_partitioning,
226  communicator,
227  n_entries_per_row);
228  }
229 
230 
231 
232  SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning,
233  const IndexSet &col_parallel_partitioning,
234  const IndexSet &writable_rows,
235  const MPI_Comm &communicator,
236  const size_type n_max_entries_per_row)
237  {
238  reinit(row_parallel_partitioning,
239  col_parallel_partitioning,
240  writable_rows,
241  communicator,
242  n_max_entries_per_row);
243  }
244 
245 
246 
247  void
249  const size_type n,
250  const size_type n_entries_per_row)
251  {
252  reinit(complete_index_set(m),
253  complete_index_set(n),
254  MPI_COMM_SELF,
255  n_entries_per_row);
256  }
257 
258 
259 
260  void
262  const size_type n,
263  const std::vector<size_type> &n_entries_per_row)
264  {
265  reinit(complete_index_set(m),
266  complete_index_set(n),
267  MPI_COMM_SELF,
268  n_entries_per_row);
269  }
270 
271 
272 
273  namespace
274  {
275  using size_type = SparsityPattern::size_type;
276 
277  void
278  reinit_sp(const Epetra_Map & row_map,
279  const Epetra_Map & col_map,
280  const size_type n_entries_per_row,
281  std::unique_ptr<Epetra_Map> & column_space_map,
282  std::unique_ptr<Epetra_FECrsGraph> &graph,
283  std::unique_ptr<Epetra_CrsGraph> & nonlocal_graph)
284  {
285  Assert(row_map.IsOneToOne(),
286  ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
287  "the maps of different processors."));
288  Assert(col_map.IsOneToOne(),
289  ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
290  "the maps of different processors."));
291 
292  nonlocal_graph.reset();
293  graph.reset();
294  column_space_map = std_cxx14::make_unique<Epetra_Map>(col_map);
295 
296  // for more than one processor, need to specify only row map first and
297  // let the matrix entries decide about the column map (which says which
298  // columns are present in the matrix, not to be confused with the
299  // col_map that tells how the domain dofs of the matrix will be
300  // distributed). for only one processor, we can directly assign the
301  // columns as well. If we use a recent Trilinos version, we can also
302  // require building a non-local graph which gives us thread-safe
303  // initialization.
304  if (row_map.Comm().NumProc() > 1)
305  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(
306  Copy, row_map, n_entries_per_row, false
307  // TODO: Check which new Trilinos version supports this...
308  // Remember to change tests/trilinos/assemble_matrix_parallel_07, too.
309  //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
310  // , true
311  //#endif
312  );
313  else
314  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(
315  Copy, row_map, col_map, n_entries_per_row, false);
316  }
317 
318 
319 
320  void
321  reinit_sp(const Epetra_Map & row_map,
322  const Epetra_Map & col_map,
323  const std::vector<size_type> & n_entries_per_row,
324  std::unique_ptr<Epetra_Map> & column_space_map,
325  std::unique_ptr<Epetra_FECrsGraph> &graph,
326  std::unique_ptr<Epetra_CrsGraph> & nonlocal_graph)
327  {
328  Assert(row_map.IsOneToOne(),
329  ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
330  "the maps of different processors."));
331  Assert(col_map.IsOneToOne(),
332  ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
333  "the maps of different processors."));
334 
335  // release memory before reallocation
336  nonlocal_graph.reset();
337  graph.reset();
338  AssertDimension(n_entries_per_row.size(),
340 
341  column_space_map = std_cxx14::make_unique<Epetra_Map>(col_map);
342  std::vector<int> local_entries_per_row(
345  for (unsigned int i = 0; i < local_entries_per_row.size(); ++i)
346  local_entries_per_row[i] =
347  n_entries_per_row[TrilinosWrappers::min_my_gid(row_map) + i];
348 
349  if (row_map.Comm().NumProc() > 1)
350  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(
351  Copy, row_map, local_entries_per_row.data(), false
352  // TODO: Check which new Trilinos version supports this...
353  // Remember to change tests/trilinos/assemble_matrix_parallel_07, too.
354  //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
355  // , true
356  //#endif
357  );
358  else
359  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(
360  Copy, row_map, col_map, local_entries_per_row.data(), false);
361  }
362 
363 
364 
365  template <typename SparsityPatternType>
366  void
367  reinit_sp(const Epetra_Map & row_map,
368  const Epetra_Map & col_map,
369  const SparsityPatternType & sp,
370  const bool exchange_data,
371  std::unique_ptr<Epetra_Map> & column_space_map,
372  std::unique_ptr<Epetra_FECrsGraph> &graph,
373  std::unique_ptr<Epetra_CrsGraph> & nonlocal_graph)
374  {
375  nonlocal_graph.reset();
376  graph.reset();
377 
378  AssertDimension(sp.n_rows(),
380  AssertDimension(sp.n_cols(),
382 
383  column_space_map = std_cxx14::make_unique<Epetra_Map>(col_map);
384 
385  Assert(row_map.LinearMap() == true,
386  ExcMessage(
387  "This function only works if the row map is contiguous."));
388 
389  const size_type first_row = TrilinosWrappers::min_my_gid(row_map),
390  last_row = TrilinosWrappers::max_my_gid(row_map) + 1;
391  std::vector<int> n_entries_per_row(last_row - first_row);
392 
393  // Trilinos wants the row length as an int this is hopefully never going
394  // to be a problem.
395  for (size_type row = first_row; row < last_row; ++row)
396  n_entries_per_row[row - first_row] =
397  static_cast<int>(sp.row_length(row));
398 
399  if (row_map.Comm().NumProc() > 1)
400  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(
401  Copy, row_map, n_entries_per_row.data(), false);
402  else
403  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(
404  Copy, row_map, col_map, n_entries_per_row.data(), false);
405 
406  AssertDimension(sp.n_rows(), n_global_rows(*graph));
407 
408  std::vector<TrilinosWrappers::types::int_type> row_indices;
409 
410  // Include possibility to exchange data since DynamicSparsityPattern is
411  // able to do so
412  if (exchange_data == false)
413  for (size_type row = first_row; row < last_row; ++row)
414  {
415  const TrilinosWrappers::types::int_type row_length =
416  sp.row_length(row);
417  if (row_length == 0)
418  continue;
419 
420  row_indices.resize(row_length, -1);
421  {
422  typename SparsityPatternType::iterator p = sp.begin(row);
423  // avoid incrementing p over the end of the current row because
424  // it is slow for DynamicSparsityPattern in parallel
425  for (int col = 0; col < row_length;)
426  {
427  row_indices[col++] = p->column();
428  if (col < row_length)
429  ++p;
430  }
431  }
432  graph->Epetra_CrsGraph::InsertGlobalIndices(row,
433  row_length,
434  row_indices.data());
435  }
436  else
437  for (size_type row = 0; row < sp.n_rows(); ++row)
438  {
439  const TrilinosWrappers::types::int_type row_length =
440  sp.row_length(row);
441  if (row_length == 0)
442  continue;
443 
444  row_indices.resize(row_length, -1);
445  {
446  typename SparsityPatternType::iterator p = sp.begin(row);
447  // avoid incrementing p over the end of the current row because
448  // it is slow for DynamicSparsityPattern in parallel
449  for (int col = 0; col < row_length;)
450  {
451  row_indices[col++] = p->column();
452  if (col < row_length)
453  ++p;
454  }
455  }
456  const TrilinosWrappers::types::int_type trilinos_row = row;
457  graph->InsertGlobalIndices(1,
458  &trilinos_row,
459  row_length,
460  row_indices.data());
461  }
462 
463  // TODO A dynamic_cast fails here, this is suspicious.
464  const auto &range_map =
465  static_cast<const Epetra_Map &>(graph->RangeMap()); // NOLINT
466  int ierr = graph->GlobalAssemble(*column_space_map, range_map, true);
467  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
468 
469  ierr = graph->OptimizeStorage();
470  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
471  }
472  } // namespace
473 
474 
475  void
476  SparsityPattern::reinit(const Epetra_Map &input_map,
477  const size_type n_entries_per_row)
478  {
479  reinit_sp(input_map,
480  input_map,
481  n_entries_per_row,
483  graph,
485  }
486 
487 
488 
489  void
490  SparsityPattern::reinit(const Epetra_Map &input_row_map,
491  const Epetra_Map &input_col_map,
492  const size_type n_entries_per_row)
493  {
494  reinit_sp(input_row_map,
495  input_col_map,
496  n_entries_per_row,
498  graph,
500  }
501 
502 
503 
504  void
505  SparsityPattern::reinit(const Epetra_Map & input_map,
506  const std::vector<size_type> &n_entries_per_row)
507  {
508  reinit_sp(input_map,
509  input_map,
510  n_entries_per_row,
512  graph,
514  }
515 
516 
517 
518  void
519  SparsityPattern::reinit(const Epetra_Map & input_row_map,
520  const Epetra_Map & input_col_map,
521  const std::vector<size_type> &n_entries_per_row)
522  {
523  reinit_sp(input_row_map,
524  input_col_map,
525  n_entries_per_row,
527  graph,
529  }
530 
531 
532 
533  void
534  SparsityPattern::reinit(const IndexSet &parallel_partitioning,
535  const MPI_Comm &communicator,
536  const size_type n_entries_per_row)
537  {
538  Epetra_Map map =
539  parallel_partitioning.make_trilinos_map(communicator, false);
540  reinit_sp(
541  map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
542  }
543 
544 
545 
546  void
547  SparsityPattern::reinit(const IndexSet & parallel_partitioning,
548  const MPI_Comm & communicator,
549  const std::vector<size_type> &n_entries_per_row)
550  {
551  Epetra_Map map =
552  parallel_partitioning.make_trilinos_map(communicator, false);
553  reinit_sp(
554  map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
555  }
556 
557 
558 
559  void
560  SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
561  const IndexSet &col_parallel_partitioning,
562  const MPI_Comm &communicator,
563  const size_type n_entries_per_row)
564  {
565  Epetra_Map row_map =
566  row_parallel_partitioning.make_trilinos_map(communicator, false);
567  Epetra_Map col_map =
568  col_parallel_partitioning.make_trilinos_map(communicator, false);
569  reinit_sp(row_map,
570  col_map,
571  n_entries_per_row,
573  graph,
575  }
576 
577 
578 
579  void
580  SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
581  const IndexSet &col_parallel_partitioning,
582  const MPI_Comm &communicator,
583  const std::vector<size_type> &n_entries_per_row)
584  {
585  Epetra_Map row_map =
586  row_parallel_partitioning.make_trilinos_map(communicator, false);
587  Epetra_Map col_map =
588  col_parallel_partitioning.make_trilinos_map(communicator, false);
589  reinit_sp(row_map,
590  col_map,
591  n_entries_per_row,
593  graph,
595  }
596 
597 
598 
599  void
600  SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
601  const IndexSet &col_parallel_partitioning,
602  const IndexSet &writable_rows,
603  const MPI_Comm &communicator,
604  const size_type n_entries_per_row)
605  {
606  Epetra_Map row_map =
607  row_parallel_partitioning.make_trilinos_map(communicator, false);
608  Epetra_Map col_map =
609  col_parallel_partitioning.make_trilinos_map(communicator, false);
610  reinit_sp(row_map,
611  col_map,
612  n_entries_per_row,
614  graph,
616 
617  IndexSet nonlocal_partitioner = writable_rows;
618  AssertDimension(nonlocal_partitioner.size(),
619  row_parallel_partitioning.size());
620 # ifdef DEBUG
621  {
622  IndexSet tmp = writable_rows & row_parallel_partitioning;
623  Assert(tmp == row_parallel_partitioning,
624  ExcMessage(
625  "The set of writable rows passed to this method does not "
626  "contain the locally owned rows, which is not allowed."));
627  }
628 # endif
629  nonlocal_partitioner.subtract_set(row_parallel_partitioning);
630  if (Utilities::MPI::n_mpi_processes(communicator) > 1)
631  {
632  Epetra_Map nonlocal_map =
633  nonlocal_partitioner.make_trilinos_map(communicator, true);
635  std_cxx14::make_unique<Epetra_CrsGraph>(Copy, nonlocal_map, 0);
636  }
637  else
638  Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
639  }
640 
641 
642 
643  template <typename SparsityPatternType>
644  void
646  const IndexSet & row_parallel_partitioning,
647  const IndexSet & col_parallel_partitioning,
648  const SparsityPatternType &nontrilinos_sparsity_pattern,
649  const MPI_Comm & communicator,
650  const bool exchange_data)
651  {
652  Epetra_Map row_map =
653  row_parallel_partitioning.make_trilinos_map(communicator, false);
654  Epetra_Map col_map =
655  col_parallel_partitioning.make_trilinos_map(communicator, false);
656  reinit_sp(row_map,
657  col_map,
658  nontrilinos_sparsity_pattern,
659  exchange_data,
661  graph,
663  }
664 
665 
666 
667  template <typename SparsityPatternType>
668  void
670  const IndexSet & parallel_partitioning,
671  const SparsityPatternType &nontrilinos_sparsity_pattern,
672  const MPI_Comm & communicator,
673  const bool exchange_data)
674  {
675  Epetra_Map map =
676  parallel_partitioning.make_trilinos_map(communicator, false);
677  reinit_sp(map,
678  map,
679  nontrilinos_sparsity_pattern,
680  exchange_data,
682  graph,
684  }
685 
686 
687 
688  template <typename SparsityPatternType>
689  void
690  SparsityPattern::reinit(const Epetra_Map & input_map,
691  const SparsityPatternType &sp,
692  const bool exchange_data)
693  {
694  reinit_sp(input_map,
695  input_map,
696  sp,
697  exchange_data,
699  graph,
701  }
702 
703 
704 
705  template <typename SparsityPatternType>
706  void
707  SparsityPattern::reinit(const Epetra_Map & input_row_map,
708  const Epetra_Map & input_col_map,
709  const SparsityPatternType &sp,
710  const bool exchange_data)
711  {
712  reinit_sp(input_row_map,
713  input_col_map,
714  sp,
715  exchange_data,
717  graph,
719  compress();
720  }
721 
722 
723 
726  {
727  Assert(false, ExcNotImplemented());
728  return *this;
729  }
730 
731 
732 
733  void
735  {
736  column_space_map = std_cxx14::make_unique<Epetra_Map>(*sp.column_space_map);
737  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(*sp.graph);
738 
739  if (sp.nonlocal_graph.get() != nullptr)
741  std_cxx14::make_unique<Epetra_CrsGraph>(*sp.nonlocal_graph);
742  else
743  nonlocal_graph.reset();
744  }
745 
746 
747 
748  template <typename SparsityPatternType>
749  void
750  SparsityPattern::copy_from(const SparsityPatternType &sp)
751  {
752  const Epetra_Map rows(TrilinosWrappers::types::int_type(sp.n_rows()),
753  0,
755  const Epetra_Map columns(TrilinosWrappers::types::int_type(sp.n_cols()),
756  0,
758 
759  reinit_sp(
760  rows, columns, sp, false, column_space_map, graph, nonlocal_graph);
761  }
762 
763 
764 
765  void
767  {
768  // When we clear the matrix, reset
769  // the pointer and generate an
770  // empty sparsity pattern.
772  std_cxx14::make_unique<Epetra_Map>(TrilinosWrappers::types::int_type(0),
773  TrilinosWrappers::types::int_type(0),
775  graph = std_cxx14::make_unique<Epetra_FECrsGraph>(View,
778  0);
779  graph->FillComplete();
780 
781  nonlocal_graph.reset();
782  }
783 
784 
785 
786  void
788  {
789  int ierr;
791  if (nonlocal_graph.get() != nullptr)
792  {
793  if (nonlocal_graph->IndicesAreGlobal() == false &&
794  nonlocal_graph->RowMap().NumMyElements() > 0)
795  {
796  // Insert dummy element at (row, column) that corresponds to row 0
797  // in local index counting.
798  TrilinosWrappers::types::int_type row =
800  TrilinosWrappers::types::int_type column = 0;
801 
802  // in case we have a square sparsity pattern, add the entry on the
803  // diagonal
806  column = row;
807  // if not, take a column index that we have ourselves since we
808  // know for sure it is there (and it will not create spurious
809  // messages to many ranks like putting index 0 on many processors)
810  else if (column_space_map->NumMyElements() > 0)
812  ierr = nonlocal_graph->InsertGlobalIndices(row, 1, &column);
813  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
814  }
815  Assert(nonlocal_graph->RowMap().NumMyElements() == 0 ||
816  nonlocal_graph->IndicesAreGlobal() == true,
817  ExcInternalError());
818 
819  ierr =
820  nonlocal_graph->FillComplete(*column_space_map, graph->RangeMap());
821  AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
822  ierr = nonlocal_graph->OptimizeStorage();
823  AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
824  Epetra_Export exporter(nonlocal_graph->RowMap(), graph->RowMap());
825  ierr = graph->Export(*nonlocal_graph, exporter, Add);
826  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
827  ierr = graph->FillComplete(*column_space_map, graph->RangeMap());
828  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
829  }
830  else
831  {
832  // TODO A dynamic_cast fails here, this is suspicious.
833  const auto &range_map =
834  static_cast<const Epetra_Map &>(graph->RangeMap()); // NOLINT
835  ierr = graph->GlobalAssemble(*column_space_map, range_map, true);
836  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
837  }
838 
839  ierr = graph->OptimizeStorage();
840  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
841  }
842 
843 
844 
845  bool
847  {
848  return graph->RowMap().LID(
849  static_cast<TrilinosWrappers::types::int_type>(i)) != -1;
850  }
851 
852 
853 
854  bool
856  {
857  if (!row_is_stored_locally(i))
858  {
859  return false;
860  }
861  else
862  {
863  // Extract local indices in
864  // the matrix.
865  int trilinos_i =
866  graph->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
867  trilinos_j =
868  graph->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
869 
870  // Check whether the matrix
871  // already is transformed to
872  // local indices.
873  if (graph->Filled() == false)
874  {
875  int nnz_present = graph->NumGlobalIndices(i);
876  int nnz_extracted;
877  TrilinosWrappers::types::int_type *col_indices;
878 
879  // Generate the view and make
880  // sure that we have not generated
881  // an error.
882  // TODO: trilinos_i is the local row index -> it is an int but
883  // ExtractGlobalRowView requires trilinos_i to be the global row
884  // index and thus it should be a long long int
885  int ierr = graph->ExtractGlobalRowView(trilinos_i,
886  nnz_extracted,
887  col_indices);
888  (void)ierr;
889  Assert(ierr == 0, ExcTrilinosError(ierr));
890  Assert(nnz_present == nnz_extracted,
891  ExcDimensionMismatch(nnz_present, nnz_extracted));
892 
893  // Search the index
894  const std::ptrdiff_t local_col_index =
895  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
896  col_indices;
897 
898  if (local_col_index == nnz_present)
899  return false;
900  }
901  else
902  {
903  // Prepare pointers for extraction
904  // of a view of the row.
905  int nnz_present = graph->NumGlobalIndices(i);
906  int nnz_extracted;
907  int *col_indices;
908 
909  // Generate the view and make
910  // sure that we have not generated
911  // an error.
912  int ierr =
913  graph->ExtractMyRowView(trilinos_i, nnz_extracted, col_indices);
914  (void)ierr;
915  Assert(ierr == 0, ExcTrilinosError(ierr));
916 
917  Assert(nnz_present == nnz_extracted,
918  ExcDimensionMismatch(nnz_present, nnz_extracted));
919 
920  // Search the index
921  const std::ptrdiff_t local_col_index =
922  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
923  col_indices;
924 
925  if (local_col_index == nnz_present)
926  return false;
927  }
928  }
929 
930  return true;
931  }
932 
933 
934 
937  {
938  size_type local_b = 0;
939  TrilinosWrappers::types::int_type global_b = 0;
940  for (int i = 0; i < static_cast<int>(local_size()); ++i)
941  {
942  int *indices;
943  int num_entries;
944  graph->ExtractMyRowView(i, num_entries, indices);
945  for (unsigned int j = 0; j < static_cast<unsigned int>(num_entries);
946  ++j)
947  {
948  if (static_cast<size_type>(std::abs(i - indices[j])) > local_b)
949  local_b = std::abs(i - indices[j]);
950  }
951  }
952  graph->Comm().MaxAll(reinterpret_cast<TrilinosWrappers::types::int_type *>(
953  &local_b),
954  &global_b,
955  1);
956  return static_cast<size_type>(global_b);
957  }
958 
959 
960 
963  {
964  const TrilinosWrappers::types::int_type n_rows = n_global_rows(*graph);
965  return n_rows;
966  }
967 
968 
969 
972  {
973  TrilinosWrappers::types::int_type n_cols;
974  if (graph->Filled() == true)
976  else
978 
979  return n_cols;
980  }
981 
982 
983 
984  unsigned int
986  {
987  int n_rows = graph->NumMyRows();
988 
989  return n_rows;
990  }
991 
992 
993 
994  std::pair<SparsityPattern::size_type, SparsityPattern::size_type>
996  {
999  end = TrilinosWrappers::max_my_gid(graph->RowMap()) + 1;
1000 
1001  return std::make_pair(begin, end);
1002  }
1003 
1004 
1005 
1008  {
1009  TrilinosWrappers::types::int_type nnz = n_global_entries(*graph);
1010 
1011  return static_cast<size_type>(nnz);
1012  }
1013 
1014 
1015 
1016  unsigned int
1018  {
1019  int nnz = graph->MaxNumIndices();
1020 
1021  return static_cast<unsigned int>(nnz);
1022  }
1023 
1024 
1025 
1028  {
1029  Assert(row < n_rows(), ExcInternalError());
1030 
1031  // Get a representation of the where the present row is located on
1032  // the current processor
1033  TrilinosWrappers::types::int_type local_row =
1034  graph->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1035 
1036  // On the processor who owns this row, we'll have a non-negative
1037  // value for `local_row` and can ask for the length of the row.
1038  if (local_row >= 0)
1039  return graph->NumMyIndices(local_row);
1040  else
1041  return static_cast<size_type>(-1);
1042  }
1043 
1044 
1045 
1046  const Epetra_Map &
1048  {
1049  // TODO A dynamic_cast fails here, this is suspicious.
1050  const auto &domain_map =
1051  static_cast<const Epetra_Map &>(graph->DomainMap()); // NOLINT
1052  return domain_map;
1053  }
1054 
1055 
1056 
1057  const Epetra_Map &
1059  {
1060  // TODO A dynamic_cast fails here, this is suspicious.
1061  const auto &range_map =
1062  static_cast<const Epetra_Map &>(graph->RangeMap()); // NOLINT
1063  return range_map;
1064  }
1065 
1066 
1067 
1068  const Epetra_Map &
1070  {
1071  // TODO A dynamic_cast fails here, this is suspicious.
1072  const auto &row_map =
1073  static_cast<const Epetra_Map &>(graph->RowMap()); // NOLINT
1074  return row_map;
1075  }
1076 
1077 
1078 
1079  const Epetra_Map &
1081  {
1082  // TODO A dynamic_cast fails here, this is suspicious.
1083  const auto &col_map =
1084  static_cast<const Epetra_Map &>(graph->ColMap()); // NOLINT
1085  return col_map;
1086  }
1087 
1088 
1089 
1090  const Epetra_Comm &
1092  {
1093  return graph->RangeMap().Comm();
1094  }
1095 
1096 
1097 
1098  MPI_Comm
1100  {
1101 # ifdef DEAL_II_WITH_MPI
1102 
1103  const Epetra_MpiComm *mpi_comm =
1104  dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
1105  Assert(mpi_comm != nullptr, ExcInternalError());
1106  return mpi_comm->Comm();
1107 # else
1108 
1109  return MPI_COMM_SELF;
1110 
1111 # endif
1112  }
1113 
1114 
1115 
1116  void
1118  {
1119  Assert(false, ExcNotImplemented());
1120  }
1121 
1122 
1123 
1124  // As of now, no particularly neat
1125  // output is generated in case of
1126  // multiple processors.
1127  void
1128  SparsityPattern::print(std::ostream &out,
1129  const bool write_extended_trilinos_info) const
1130  {
1131  if (write_extended_trilinos_info)
1132  out << *graph;
1133  else
1134  {
1135  int *indices;
1136  int num_entries;
1137 
1138  for (int i = 0; i < graph->NumMyRows(); ++i)
1139  {
1140  graph->ExtractMyRowView(i, num_entries, indices);
1141  for (int j = 0; j < num_entries; ++j)
1142  out << "(" << TrilinosWrappers::global_index(graph->RowMap(), i)
1143  << ","
1144  << TrilinosWrappers::global_index(graph->ColMap(), indices[j])
1145  << ") " << std::endl;
1146  }
1147  }
1148 
1149  AssertThrow(out, ExcIO());
1150  }
1151 
1152 
1153 
1154  void
1155  SparsityPattern::print_gnuplot(std::ostream &out) const
1156  {
1157  Assert(graph->Filled() == true, ExcInternalError());
1158  for (::types::global_dof_index row = 0; row < local_size(); ++row)
1159  {
1160  int *indices;
1161  int num_entries;
1162  graph->ExtractMyRowView(row, num_entries, indices);
1163 
1164  Assert(num_entries >= 0, ExcInternalError());
1165  // avoid sign comparison warning
1166  const ::types::global_dof_index num_entries_ = num_entries;
1167  for (::types::global_dof_index j = 0; j < num_entries_; ++j)
1168  // while matrix entries are usually
1169  // written (i,j), with i vertical and
1170  // j horizontal, gnuplot output is
1171  // x-y, that is we have to exchange
1172  // the order of output
1173  out << static_cast<int>(
1174  TrilinosWrappers::global_index(graph->ColMap(), indices[j]))
1175  << " "
1176  << -static_cast<int>(
1177  TrilinosWrappers::global_index(graph->RowMap(), row))
1178  << std::endl;
1179  }
1180 
1181  AssertThrow(out, ExcIO());
1182  }
1183 
1184  // TODO: Implement!
1185  std::size_t
1187  {
1188  Assert(false, ExcNotImplemented());
1189  return 0;
1190  }
1191 
1192 
1193  // explicit instantiations
1194  //
1195  template void
1196  SparsityPattern::copy_from(const ::SparsityPattern &);
1197  template void
1198  SparsityPattern::copy_from(const ::DynamicSparsityPattern &);
1199 
1200 
1201  template void
1202  SparsityPattern::reinit(const Epetra_Map &,
1203  const ::SparsityPattern &,
1204  bool);
1205  template void
1206  SparsityPattern::reinit(const Epetra_Map &,
1207  const ::DynamicSparsityPattern &,
1208  bool);
1209 
1210  template void
1211  SparsityPattern::reinit(const Epetra_Map &,
1212  const Epetra_Map &,
1213  const ::SparsityPattern &,
1214  bool);
1215  template void
1216  SparsityPattern::reinit(const Epetra_Map &,
1217  const Epetra_Map &,
1218  const ::DynamicSparsityPattern &,
1219  bool);
1220 
1221 
1222  template void
1224  const ::SparsityPattern &,
1225  const MPI_Comm &,
1226  bool);
1227  template void
1229  const ::DynamicSparsityPattern &,
1230  const MPI_Comm &,
1231  bool);
1232 
1233 
1234  template void
1236  const IndexSet &,
1237  const ::SparsityPattern &,
1238  const MPI_Comm &,
1239  bool);
1240  template void
1242  const IndexSet &,
1243  const ::DynamicSparsityPattern &,
1244  const MPI_Comm &,
1245  bool);
1246 
1247 } // namespace TrilinosWrappers
1248 
1249 DEAL_II_NAMESPACE_CLOSE
1250 
1251 #endif // DEAL_II_WITH_TRILINOS
static ::ExceptionBase & ExcTrilinosError(int arg1)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
std::shared_ptr< const std::vector< size_type > > colnum_cache
static ::ExceptionBase & ExcIO()
size_type row_length(const size_type row) const
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
const Epetra_Comm & comm_self()
Definition: utilities.cc:1086
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
const_iterator begin() const
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:594
size_type size() const
Definition: index_set.h:1600
bool exists(const size_type i, const size_type j) const
static ::ExceptionBase & ExcTrilinosError(int arg1)
static ::ExceptionBase & ExcMessage(std::string arg1)
TrilinosWrappers::types::int_type n_global_rows(const Epetra_CrsGraph &graph)
Definition: types.h:31
std::unique_ptr< Epetra_Map > column_space_map
void subtract_set(const IndexSet &other)
Definition: index_set.cc:267
#define Assert(cond, exc)
Definition: exceptions.h:1407
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
void print_gnuplot(std::ostream &out) const
const_iterator end() const
TrilinosWrappers::types::int_type n_global_entries(const Epetra_CrsGraph &graph)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
unsigned int global_dof_index
Definition: types.h:89
SparsityPattern & operator=(const SparsityPattern &input_sparsity_pattern)
Definition: cuda.h:31
void copy_from(const SparsityPattern &input_sparsity_pattern)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
const Epetra_Comm & trilinos_communicator() const
static ::ExceptionBase & ExcNotImplemented()
TrilinosWrappers::types::int_type global_index(const Epetra_BlockMap &map, const ::types::global_dof_index i)
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
size_type n_elements() const
Definition: index_set.h:1732
std::pair< size_type, size_type > local_range() const
std::unique_ptr< Epetra_FECrsGraph > graph
static ::ExceptionBase & ExcInternalError()
bool row_is_stored_locally(const size_type i) const