Reference documentation for deal.II version 8.5.1
trilinos_sparsity_pattern.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/trilinos_sparsity_pattern.h>
17 
18 #ifdef DEAL_II_WITH_TRILINOS
19 
20 # include <deal.II/base/utilities.h>
21 # include <deal.II/base/mpi.h>
22 # include <deal.II/lac/sparsity_pattern.h>
23 # include <deal.II/lac/dynamic_sparsity_pattern.h>
24 
25 DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
26 # include <Epetra_Export.h>
27 DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
28 
29 DEAL_II_NAMESPACE_OPEN
30 
31 namespace TrilinosWrappers
32 {
33  namespace
34  {
35  // define a helper function that queries the size of an Epetra_Map object
36  // by calling either the 32- or 64-bit function necessary, and returns the
37  // result in the correct data type so that we can use it in calling other
38  // Epetra member functions that are overloaded by index type
39 #ifndef DEAL_II_WITH_64BIT_INDICES
40  int n_global_elements (const Epetra_BlockMap &map)
41  {
42  return map.NumGlobalElements();
43  }
44 
45  int min_my_gid(const Epetra_BlockMap &map)
46  {
47  return map.MinMyGID();
48  }
49 
50  int max_my_gid(const Epetra_BlockMap &map)
51  {
52  return map.MaxMyGID();
53  }
54 
55  int n_global_rows(const Epetra_CrsGraph &graph)
56  {
57  return graph.NumGlobalRows();
58  }
59 
60  int n_global_cols(const Epetra_CrsGraph &graph)
61  {
62  return graph.NumGlobalCols();
63  }
64 
65  int n_global_entries(const Epetra_CrsGraph &graph)
66  {
67  return graph.NumGlobalEntries();
68  }
69 
70  int local_to_global_index(const Epetra_BlockMap &map, int i)
71  {
72  return map.GID(i);
73  }
74 #else
75  long long int n_global_elements (const Epetra_BlockMap &map)
76  {
77  return map.NumGlobalElements64();
78  }
79 
80  long long int min_my_gid(const Epetra_BlockMap &map)
81  {
82  return map.MinMyGID64();
83  }
84 
85  long long int max_my_gid(const Epetra_BlockMap &map)
86  {
87  return map.MaxMyGID64();
88  }
89 
90  long long int n_global_rows(const Epetra_CrsGraph &graph)
91  {
92  return graph.NumGlobalRows64();
93  }
94 
95  long long int n_global_cols(const Epetra_CrsGraph &graph)
96  {
97  return graph.NumGlobalCols64();
98  }
99 
100  long long int n_global_entries(const Epetra_CrsGraph &graph)
101  {
102  return graph.NumGlobalEntries64();
103  }
104 
105  long long int local_to_global_index(const Epetra_BlockMap &map, int i)
106  {
107  return map.GID64(i);
108  }
109 #endif
110  }
111 
112  namespace SparsityPatternIterators
113  {
114  void
116  {
117  // if we are asked to visit the past-the-end line, then simply
118  // release all our caches and go on with life
119  if (this->a_row == sparsity_pattern->n_rows())
120  {
121  colnum_cache.reset ();
122 
123  return;
124  }
125 
126  // otherwise first flush Trilinos caches
128 
129  colnum_cache.reset (new std::vector<size_type> (sparsity_pattern->row_length(this->a_row)));
130 
131 
132  if (colnum_cache->size() > 0)
133  {
134  // get a representation of the present row
135  int ncols;
136  const int ierr = sparsity_pattern->graph->ExtractGlobalRowCopy
137  ((TrilinosWrappers::types::int_type)this->a_row,
138  colnum_cache->size(),
139  ncols,
140  (TrilinosWrappers::types::int_type *)&(*colnum_cache)[0]);
141  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
142  AssertThrow (static_cast<std::vector<size_type>::size_type>(ncols) == colnum_cache->size(),
143  ExcInternalError());
144  }
145  }
146  }
147 
148 
149  // The constructor is actually the
150  // only point where we have to check
151  // whether we build a serial or a
152  // parallel Trilinos matrix.
153  // Actually, it does not even matter
154  // how many threads there are, but
155  // only if we use an MPI compiler or
156  // a standard compiler. So, even one
157  // thread on a configuration with
158  // MPI will still get a parallel
159  // interface.
161  {
162  column_space_map.reset(new Epetra_Map (TrilinosWrappers::types::int_type(0),
163  TrilinosWrappers::types::int_type(0),
165  graph.reset (new Epetra_FECrsGraph(View,
168  0));
169  graph->FillComplete();
170  }
171 
172 
173  SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
174  const size_type n_entries_per_row)
175  {
176  reinit (input_map, input_map, n_entries_per_row);
177  }
178 
179 
180 
181  SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
182  const std::vector<size_type> &n_entries_per_row)
183  {
184  reinit (input_map, input_map, n_entries_per_row);
185  }
186 
187 
188 
189  SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
190  const Epetra_Map &input_col_map,
191  const size_type n_entries_per_row)
192  {
193  reinit (input_row_map, input_col_map, n_entries_per_row);
194  }
195 
196 
197 
198  SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
199  const Epetra_Map &input_col_map,
200  const std::vector<size_type> &n_entries_per_row)
201  {
202  reinit (input_row_map, input_col_map, n_entries_per_row);
203  }
204 
205 
206 
208  const size_type n,
209  const size_type n_entries_per_row)
210  {
211  reinit (m, n, n_entries_per_row);
212  }
213 
214 
215 
217  const size_type n,
218  const std::vector<size_type> &n_entries_per_row)
219  {
220  reinit (m, n, n_entries_per_row);
221  }
222 
223 
224  // Copy function only works if the
225  // sparsity pattern is empty.
227  :
228  Subscriptor(),
229  column_space_map (new Epetra_Map(TrilinosWrappers::types::int_type(0),
230  TrilinosWrappers::types::int_type(0),
231  Utilities::Trilinos::comm_self())),
232  graph (new Epetra_FECrsGraph(View,
233  *column_space_map,
234  *column_space_map,
235  0))
236  {
237  (void)input_sparsity;
238  Assert (input_sparsity.n_rows() == 0,
239  ExcMessage ("Copy constructor only works for empty sparsity patterns."));
240  }
241 
242 
243 
244  SparsityPattern::SparsityPattern (const IndexSet &parallel_partitioning,
245  const MPI_Comm &communicator,
246  const size_type n_entries_per_row)
247  {
248  reinit (parallel_partitioning, parallel_partitioning, communicator,
249  n_entries_per_row);
250  }
251 
252 
253 
254  SparsityPattern::SparsityPattern (const IndexSet &parallel_partitioning,
255  const MPI_Comm &communicator,
256  const std::vector<size_type> &n_entries_per_row)
257  {
258  reinit (parallel_partitioning, parallel_partitioning, communicator,
259  n_entries_per_row);
260  }
261 
262 
263 
264  SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning,
265  const IndexSet &col_parallel_partitioning,
266  const MPI_Comm &communicator,
267  const size_type n_entries_per_row)
268  {
269  reinit (row_parallel_partitioning, col_parallel_partitioning,
270  communicator, n_entries_per_row);
271  }
272 
273 
274 
276  SparsityPattern (const IndexSet &row_parallel_partitioning,
277  const IndexSet &col_parallel_partitioning,
278  const MPI_Comm &communicator,
279  const std::vector<size_type> &n_entries_per_row)
280  {
281  reinit (row_parallel_partitioning, col_parallel_partitioning,
282  communicator, n_entries_per_row);
283  }
284 
285 
286 
288  SparsityPattern (const IndexSet &row_parallel_partitioning,
289  const IndexSet &col_parallel_partitioning,
290  const IndexSet &writable_rows,
291  const MPI_Comm &communicator,
292  const size_type n_max_entries_per_row)
293  {
294  reinit (row_parallel_partitioning, col_parallel_partitioning,
295  writable_rows, communicator, n_max_entries_per_row);
296  }
297 
298 
299 
301  {}
302 
303 
304 
305  void
307  const size_type n,
308  const size_type n_entries_per_row)
309  {
310  reinit (complete_index_set(m), complete_index_set(n), MPI_COMM_SELF,
311  n_entries_per_row);
312  }
313 
314 
315 
316  void
318  const size_type n,
319  const std::vector<size_type> &n_entries_per_row)
320  {
321  reinit (complete_index_set(m), complete_index_set(n), MPI_COMM_SELF,
322  n_entries_per_row);
323  }
324 
325 
326 
327  namespace
328  {
329  typedef SparsityPattern::size_type size_type;
330 
331  void
332  reinit_sp (const Epetra_Map &row_map,
333  const Epetra_Map &col_map,
334  const size_type n_entries_per_row,
335  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
336  std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
337  std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
338  {
339  Assert(row_map.IsOneToOne(),
340  ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
341  "the maps of different processors."));
342  Assert(col_map.IsOneToOne(),
343  ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
344  "the maps of different processors."));
345 
346  nonlocal_graph.reset();
347  graph.reset ();
348  column_space_map.reset (new Epetra_Map (col_map));
349 
350  // for more than one processor, need to specify only row map first and
351  // let the matrix entries decide about the column map (which says which
352  // columns are present in the matrix, not to be confused with the
353  // col_map that tells how the domain dofs of the matrix will be
354  // distributed). for only one processor, we can directly assign the
355  // columns as well. If we use a recent Trilinos version, we can also
356  // require building a non-local graph which gives us thread-safe
357  // initialization.
358  if (row_map.Comm().NumProc() > 1)
359  graph.reset (new Epetra_FECrsGraph(Copy, row_map,
360  n_entries_per_row, false
361  // TODO: Check which new Trilinos
362  // version supports this... Remember
363  // to change tests/trilinos/assemble_matrix_parallel_07
364  // too.
365  //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
366  //, true
367  //#endif
368  ));
369  else
370  graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
371  n_entries_per_row, false));
372  }
373 
374 
375 
376  void
377  reinit_sp (const Epetra_Map &row_map,
378  const Epetra_Map &col_map,
379  const std::vector<size_type> &n_entries_per_row,
380  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
381  std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
382  std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
383  {
384  Assert(row_map.IsOneToOne(),
385  ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
386  "the maps of different processors."));
387  Assert(col_map.IsOneToOne(),
388  ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
389  "the maps of different processors."));
390 
391  // release memory before reallocation
392  nonlocal_graph.reset();
393  graph.reset ();
394  AssertDimension (n_entries_per_row.size(),
395  static_cast<size_type>(n_global_elements(row_map)));
396 
397  column_space_map.reset (new Epetra_Map (col_map));
398  std::vector<int> local_entries_per_row(max_my_gid(row_map)-
399  min_my_gid(row_map));
400  for (unsigned int i=0; i<local_entries_per_row.size(); ++i)
401  local_entries_per_row[i] = n_entries_per_row[min_my_gid(row_map)+i];
402 
403  if (row_map.Comm().NumProc() > 1)
404  graph.reset(new Epetra_FECrsGraph(Copy, row_map,
405  &local_entries_per_row[0],
406  false
407  // TODO: Check which new Trilinos
408  // version supports this... Remember
409  // to change tests/trilinos/assemble_matrix_parallel_07
410  // too.
411  //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
412  //, true
413  //#endif
414  ));
415  else
416  graph.reset(new Epetra_FECrsGraph(Copy, row_map, col_map,
417  &local_entries_per_row[0],
418  false));
419  }
420 
421 
422 
423  template <typename SparsityPatternType>
424  void
425  reinit_sp (const Epetra_Map &row_map,
426  const Epetra_Map &col_map,
427  const SparsityPatternType &sp,
428  const bool exchange_data,
429  std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
430  std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
431  std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
432  {
433  nonlocal_graph.reset ();
434  graph.reset ();
435 
436  AssertDimension (sp.n_rows(),
437  static_cast<size_type>(n_global_elements(row_map)));
438  AssertDimension (sp.n_cols(),
439  static_cast<size_type>(n_global_elements(col_map)));
440 
441  column_space_map.reset (new Epetra_Map (col_map));
442 
443  Assert (row_map.LinearMap() == true,
444  ExcMessage ("This function only works if the row map is contiguous."));
445 
446  const size_type first_row = min_my_gid(row_map),
447  last_row = max_my_gid(row_map)+1;
448  std::vector<int> n_entries_per_row(last_row - first_row);
449 
450  // Trilinos wants the row length as an int this is hopefully never going
451  // to be a problem.
452  for (size_type row=first_row; row<last_row; ++row)
453  n_entries_per_row[row-first_row] = static_cast<int>(sp.row_length(row));
454 
455  if (row_map.Comm().NumProc() > 1)
456  graph.reset(new Epetra_FECrsGraph(Copy, row_map,
457  &n_entries_per_row[0],
458  false));
459  else
460  graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
461  &n_entries_per_row[0],
462  false));
463 
464  AssertDimension (sp.n_rows(),
465  static_cast<size_type>(n_global_rows(*graph)));
466 
467  std::vector<TrilinosWrappers::types::int_type> row_indices;
468 
469  // Include possibility to exchange data since DynamicSparsityPattern is
470  // able to do so
471  if (exchange_data==false)
472  for (size_type row=first_row; row<last_row; ++row)
473  {
474  const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
475  if (row_length == 0)
476  continue;
477 
478  row_indices.resize (row_length, -1);
479  {
480  typename SparsityPatternType::iterator p = sp.begin(row);
481  // avoid incrementing p over the end of the current row because
482  // it is slow for DynamicSparsityPattern in parallel
483  for (int col=0; col<row_length; )
484  {
485  row_indices[col++] = p->column();
486  if (col < row_length)
487  ++p;
488  }
489  }
490  graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
491  &row_indices[0]);
492  }
493  else
494  for (size_type row=0; row<sp.n_rows(); ++row)
495  {
496  const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
497  if (row_length == 0)
498  continue;
499 
500  row_indices.resize (row_length, -1);
501  {
502  typename SparsityPatternType::iterator p = sp.begin(row);
503  // avoid incrementing p over the end of the current row because
504  // it is slow for DynamicSparsityPattern in parallel
505  for (int col=0; col<row_length; )
506  {
507  row_indices[col++] = p->column();
508  if (col < row_length)
509  ++p;
510  }
511  }
512  graph->InsertGlobalIndices (1,
513  reinterpret_cast<TrilinosWrappers::types::int_type *>(&row),
514  row_length, &row_indices[0]);
515  }
516 
517  int ierr =
518  graph->GlobalAssemble (*column_space_map,
519  static_cast<const Epetra_Map &>(graph->RangeMap()),
520  true);
521  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
522 
523  ierr = graph->OptimizeStorage ();
524  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
525  }
526  }
527 
528 
529  void
530  SparsityPattern::reinit (const Epetra_Map &input_map,
531  const size_type n_entries_per_row)
532  {
533  reinit_sp (input_map, input_map, n_entries_per_row,
535  }
536 
537 
538 
539  void
540  SparsityPattern::reinit (const Epetra_Map &input_row_map,
541  const Epetra_Map &input_col_map,
542  const size_type n_entries_per_row)
543  {
544  reinit_sp (input_row_map, input_col_map, n_entries_per_row,
546  }
547 
548 
549 
550  void
551  SparsityPattern::reinit (const Epetra_Map &input_map,
552  const std::vector<size_type> &n_entries_per_row)
553  {
554  reinit_sp (input_map, input_map, n_entries_per_row,
556  }
557 
558 
559 
560  void
561  SparsityPattern::reinit (const Epetra_Map &input_row_map,
562  const Epetra_Map &input_col_map,
563  const std::vector<size_type> &n_entries_per_row)
564  {
565  reinit_sp (input_row_map, input_col_map, n_entries_per_row,
567  }
568 
569 
570 
571  void
572  SparsityPattern::reinit (const IndexSet &parallel_partitioning,
573  const MPI_Comm &communicator,
574  const size_type n_entries_per_row)
575  {
576  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
577  false);
578  reinit_sp (map, map, n_entries_per_row,
580  }
581 
582 
583 
584  void SparsityPattern::reinit (const IndexSet &parallel_partitioning,
585  const MPI_Comm &communicator,
586  const std::vector<size_type> &n_entries_per_row)
587  {
588  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
589  false);
590  reinit_sp (map, map, n_entries_per_row,
592  }
593 
594 
595 
596  void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
597  const IndexSet &col_parallel_partitioning,
598  const MPI_Comm &communicator,
599  const size_type n_entries_per_row)
600  {
601  Epetra_Map row_map =
602  row_parallel_partitioning.make_trilinos_map (communicator, false);
603  Epetra_Map col_map =
604  col_parallel_partitioning.make_trilinos_map (communicator, false);
605  reinit_sp (row_map, col_map, n_entries_per_row,
607  }
608 
609 
610 
611  void
612  SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
613  const IndexSet &col_parallel_partitioning,
614  const MPI_Comm &communicator,
615  const std::vector<size_type> &n_entries_per_row)
616  {
617  Epetra_Map row_map =
618  row_parallel_partitioning.make_trilinos_map (communicator, false);
619  Epetra_Map col_map =
620  col_parallel_partitioning.make_trilinos_map (communicator, false);
621  reinit_sp (row_map, col_map, n_entries_per_row,
623  }
624 
625 
626 
627  void
628  SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
629  const IndexSet &col_parallel_partitioning,
630  const IndexSet &writable_rows,
631  const MPI_Comm &communicator,
632  const size_type n_entries_per_row)
633  {
634  Epetra_Map row_map =
635  row_parallel_partitioning.make_trilinos_map (communicator, false);
636  Epetra_Map col_map =
637  col_parallel_partitioning.make_trilinos_map (communicator, false);
638  reinit_sp (row_map, col_map, n_entries_per_row,
640 
641  IndexSet nonlocal_partitioner = writable_rows;
642  AssertDimension(nonlocal_partitioner.size(), row_parallel_partitioning.size());
643 #ifdef DEBUG
644  {
645  IndexSet tmp = writable_rows & row_parallel_partitioning;
646  Assert (tmp == row_parallel_partitioning,
647  ExcMessage("The set of writable rows passed to this method does not "
648  "contain the locally owned rows, which is not allowed."));
649  }
650 #endif
651  nonlocal_partitioner.subtract_set(row_parallel_partitioning);
652  if (Utilities::MPI::n_mpi_processes(communicator) > 1)
653  {
654  Epetra_Map nonlocal_map =
655  nonlocal_partitioner.make_trilinos_map(communicator, true);
656  nonlocal_graph.reset(new Epetra_CrsGraph(Copy, nonlocal_map, 0));
657  }
658  else
659  Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
660  }
661 
662 
663 
664  template<typename SparsityPatternType>
665  void
666  SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
667  const IndexSet &col_parallel_partitioning,
668  const SparsityPatternType &nontrilinos_sparsity_pattern,
669  const MPI_Comm &communicator,
670  const bool exchange_data)
671  {
672  Epetra_Map row_map =
673  row_parallel_partitioning.make_trilinos_map (communicator, false);
674  Epetra_Map col_map =
675  col_parallel_partitioning.make_trilinos_map (communicator, false);
676  reinit_sp (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data,
678  }
679 
680 
681 
682  template<typename SparsityPatternType>
683  void
684  SparsityPattern::reinit (const IndexSet &parallel_partitioning,
685  const SparsityPatternType &nontrilinos_sparsity_pattern,
686  const MPI_Comm &communicator,
687  const bool exchange_data)
688  {
689  Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
690  false);
691  reinit_sp (map, map, nontrilinos_sparsity_pattern, exchange_data,
693  }
694 
695 
696 
697  template <typename SparsityPatternType>
698  void
699  SparsityPattern::reinit (const Epetra_Map &input_map,
700  const SparsityPatternType &sp,
701  const bool exchange_data)
702  {
703  reinit_sp (input_map, input_map, sp, exchange_data,
705  }
706 
707 
708 
709  template <typename SparsityPatternType>
710  void
711  SparsityPattern::reinit (const Epetra_Map &input_row_map,
712  const Epetra_Map &input_col_map,
713  const SparsityPatternType &sp,
714  const bool exchange_data)
715  {
716  reinit_sp (input_row_map, input_col_map, sp, exchange_data,
718 
719  compress();
720  }
721 
722 
723 
726  {
727  Assert (false, ExcNotImplemented());
728  return *this;
729  }
730 
731 
732 
733  template <typename SparsityPatternType>
734  void
735  SparsityPattern::copy_from (const SparsityPatternType &sp)
736  {
737  const Epetra_Map rows (TrilinosWrappers::types::int_type(sp.n_rows()), 0,
739  const Epetra_Map columns (TrilinosWrappers::types::int_type(sp.n_cols()), 0,
741 
742  reinit_sp (rows, columns, sp, false,
744  }
745 
746 
747 
748  void
750  {
751  // When we clear the matrix, reset
752  // the pointer and generate an
753  // empty sparsity pattern.
754  column_space_map.reset (new Epetra_Map (TrilinosWrappers::types::int_type(0),
755  TrilinosWrappers::types::int_type(0),
757  graph.reset (new Epetra_FECrsGraph(View, *column_space_map,
758  *column_space_map, 0));
759  graph->FillComplete();
760 
761  nonlocal_graph.reset();
762  }
763 
764 
765 
766  void
768  {
769  int ierr;
770  Assert (column_space_map.get() != 0, ExcInternalError());
771  if (nonlocal_graph.get() != 0)
772  {
773  if (nonlocal_graph->IndicesAreGlobal() == false &&
774  nonlocal_graph->RowMap().NumMyElements() > 0)
775  {
776  // insert dummy element
777  TrilinosWrappers::types::int_type row = nonlocal_graph->RowMap().MyGID(
778  static_cast<TrilinosWrappers::types::int_type> (0));
779  nonlocal_graph->InsertGlobalIndices(row, 1, &row);
780  }
781  Assert(nonlocal_graph->RowMap().NumMyElements() == 0 ||
782  nonlocal_graph->IndicesAreGlobal() == true,
783  ExcInternalError());
784  nonlocal_graph->FillComplete(*column_space_map,
785  static_cast<const Epetra_Map &>(graph->RangeMap()));
786  nonlocal_graph->OptimizeStorage();
787  Epetra_Export exporter(nonlocal_graph->RowMap(), graph->RowMap());
788  ierr = graph->Export(*nonlocal_graph, exporter, Add);
789  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
790  ierr =
791  graph->FillComplete(*column_space_map,
792  static_cast<const Epetra_Map &>(graph->RangeMap()));
793  }
794  else
795  ierr = graph->GlobalAssemble (*column_space_map,
796  static_cast<const Epetra_Map &>(graph->RangeMap()),
797  true);
798 
799  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
800 
801  ierr = graph->OptimizeStorage ();
802  AssertThrow (ierr == 0, ExcTrilinosError(ierr));
803  }
804 
805 
806 
807  bool
809  const size_type j) const
810  {
811  // Extract local indices in
812  // the matrix.
813  int trilinos_i = graph->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
814  trilinos_j = graph->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
815 
816  // If the data is not on the
817  // present processor, we throw
818  // an exception. This is on of
819  // the two tiny differences to
820  // the el(i,j) call, which does
821  // not throw any assertions.
822  if (trilinos_i == -1)
823  {
824  return false;
825  }
826  else
827  {
828  // Check whether the matrix
829  // already is transformed to
830  // local indices.
831  if (graph->Filled() == false)
832  {
833  int nnz_present = graph->NumGlobalIndices(i);
834  int nnz_extracted;
835  TrilinosWrappers::types::int_type *col_indices;
836 
837  // Generate the view and make
838  // sure that we have not generated
839  // an error.
840  // TODO: trilinos_i is the local row index -> it is an int but
841  // ExtractGlobalRowView requires trilinos_i to be the global row
842  // index and thus it should be a long long int
843  int ierr = graph->ExtractGlobalRowView(
844  static_cast<TrilinosWrappers::types::int_type>(trilinos_i),
845  nnz_extracted, col_indices);
846  (void)ierr;
847  Assert (ierr==0, ExcTrilinosError(ierr));
848  Assert (nnz_present == nnz_extracted,
849  ExcDimensionMismatch(nnz_present, nnz_extracted));
850 
851  // Search the index
852  TrilinosWrappers::types::int_type *el_find =
853  std::find(col_indices, col_indices + nnz_present, trilinos_j);
854 
855  TrilinosWrappers::types::int_type local_col_index =
856  (TrilinosWrappers::types::int_type)(el_find - col_indices);
857 
858  if (local_col_index == nnz_present)
859  return false;
860  }
861  else
862  {
863  // Prepare pointers for extraction
864  // of a view of the row.
865  int nnz_present = graph->NumGlobalIndices(
866  static_cast<TrilinosWrappers::types::int_type>(i));
867  int nnz_extracted;
868  int *col_indices;
869 
870  // Generate the view and make
871  // sure that we have not generated
872  // an error.
873  int ierr = graph->ExtractMyRowView(trilinos_i,
874  nnz_extracted, col_indices);
875  (void)ierr;
876  Assert (ierr==0, ExcTrilinosError(ierr));
877 
878  Assert (nnz_present == nnz_extracted,
879  ExcDimensionMismatch(nnz_present, nnz_extracted));
880 
881  // Search the index
882  int *el_find = std::find(col_indices, col_indices + nnz_present,
883  static_cast<int>(trilinos_j));
884 
885  int local_col_index = (int)(el_find - col_indices);
886 
887  if (local_col_index == nnz_present)
888  return false;
889  }
890  }
891 
892  return true;
893  }
894 
895 
896 
899  {
900  size_type local_b=0;
901  TrilinosWrappers::types::int_type global_b=0;
902  for (int i=0; i<(int)local_size(); ++i)
903  {
904  int *indices;
905  int num_entries;
906  graph->ExtractMyRowView(i, num_entries, indices);
907  for (unsigned int j=0; j<(unsigned int)num_entries; ++j)
908  {
909  if (static_cast<size_type>(std::abs(static_cast<TrilinosWrappers::types::int_type>(i-indices[j]))) > local_b)
910  local_b = std::abs(static_cast<TrilinosWrappers::types::int_type>(i-indices[j]));
911  }
912  }
913  graph->Comm().MaxAll((TrilinosWrappers::types::int_type *)&local_b, &global_b, 1);
914  return static_cast<size_type>(global_b);
915  }
916 
917 
918 
921  {
922  const TrilinosWrappers::types::int_type n_rows = n_global_rows(*graph);
923  return n_rows;
924  }
925 
926 
927 
930  {
931  TrilinosWrappers::types::int_type n_cols;
932  if (graph->Filled() == true)
933  n_cols = n_global_cols(*graph);
934  else
935  n_cols = n_global_elements(*column_space_map);
936 
937  return n_cols;
938  }
939 
940 
941 
942  unsigned int
944  {
945  int n_rows = graph -> NumMyRows();
946 
947  return n_rows;
948  }
949 
950 
951 
952  std::pair<SparsityPattern::size_type, SparsityPattern::size_type>
954  {
956  begin = min_my_gid(graph->RowMap());
957  end = max_my_gid(graph->RowMap())+1;
958 
959  return std::make_pair (begin, end);
960  }
961 
962 
963 
966  {
967  TrilinosWrappers::types::int_type nnz = n_global_entries(*graph);
968 
969  return static_cast<size_type>(nnz);
970  }
971 
972 
973 
974  unsigned int
976  {
977  int nnz = graph->MaxNumIndices();
978 
979  return static_cast<unsigned int>(nnz);
980  }
981 
982 
983 
986  {
987  Assert (row < n_rows(), ExcInternalError());
988 
989  // get a representation of the
990  // present row
991  TrilinosWrappers::types::int_type ncols = -1;
992  TrilinosWrappers::types::int_type local_row =
993  graph->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
994 
995  // on the processor who owns this
996  // row, we'll have a non-negative
997  // value.
998  if (local_row >= 0)
999  ncols = graph->NumMyIndices (local_row);
1000 
1001  return static_cast<size_type>(ncols);
1002  }
1003 
1004 
1005 
1006  const Epetra_Map &
1008  {
1009  return static_cast<const Epetra_Map &>(graph->DomainMap());
1010  }
1011 
1012 
1013 
1014  const Epetra_Map &
1016  {
1017  return static_cast<const Epetra_Map &>(graph->RangeMap());
1018  }
1019 
1020 
1021 
1022  const Epetra_Map &
1024  {
1025  return static_cast<const Epetra_Map &>(graph->RowMap());
1026  }
1027 
1028 
1029 
1030  const Epetra_Map &
1032  {
1033  return static_cast<const Epetra_Map &>(graph->ColMap());
1034  }
1035 
1036 
1037 
1038  const Epetra_Comm &
1040  {
1041  return graph->RangeMap().Comm();
1042  }
1043 
1044 
1045 
1046  MPI_Comm
1048  {
1049 
1050 #ifdef DEAL_II_WITH_MPI
1051 
1052  const Epetra_MpiComm *mpi_comm
1053  = dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
1054  Assert (mpi_comm != 0, ExcInternalError());
1055  return mpi_comm->Comm();
1056 #else
1057 
1058  return MPI_COMM_SELF;
1059 
1060 #endif
1061 
1062  }
1063 
1064 
1065 
1066  void
1068  {
1069  Assert (false, ExcNotImplemented());
1070  }
1071 
1072 
1073 
1074  // As of now, no particularly neat
1075  // ouput is generated in case of
1076  // multiple processors.
1077  void
1078  SparsityPattern::print (std::ostream &out,
1079  const bool write_extended_trilinos_info) const
1080  {
1081  if (write_extended_trilinos_info)
1082  out << *graph;
1083  else
1084  {
1085  int *indices;
1086  int num_entries;
1087 
1088  for (int i=0; i<graph->NumMyRows(); ++i)
1089  {
1090  graph->ExtractMyRowView (i, num_entries, indices);
1091  for (int j=0; j<num_entries; ++j)
1092  out << "(" << local_to_global_index(graph->RowMap(), i)
1093  << "," << local_to_global_index(graph->ColMap(), indices[j]) << ") "
1094  << std::endl;
1095  }
1096  }
1097 
1098  AssertThrow (out, ExcIO());
1099  }
1100 
1101 
1102 
1103  void
1104  SparsityPattern::print_gnuplot (std::ostream &out) const
1105  {
1106  Assert (graph->Filled() == true, ExcInternalError());
1107  for (unsigned int row=0; row<local_size(); ++row)
1108  {
1109  int *indices;
1110  int num_entries;
1111  graph->ExtractMyRowView (row, num_entries, indices);
1112 
1113  for (int j=0; j<num_entries; ++j)
1114  // while matrix entries are usually
1115  // written (i,j), with i vertical and
1116  // j horizontal, gnuplot output is
1117  // x-y, that is we have to exchange
1118  // the order of output
1119  out << static_cast<int>(local_to_global_index(graph->ColMap(), indices[j]))
1120  << " " << -static_cast<int>(local_to_global_index(graph->RowMap(), row)) << std::endl;
1121  }
1122 
1123  AssertThrow (out, ExcIO());
1124  }
1125 
1126 //TODO: Implement!
1127  std::size_t
1129  {
1130  Assert(false, ExcNotImplemented());
1131  return 0;
1132  }
1133 
1134 
1135  // explicit instantiations
1136  //
1137  template void
1138  SparsityPattern::copy_from (const ::SparsityPattern &);
1139  template void
1140  SparsityPattern::copy_from (const ::DynamicSparsityPattern &);
1141 
1142 
1143  template void
1144  SparsityPattern::reinit (const Epetra_Map &,
1145  const ::SparsityPattern &,
1146  bool);
1147  template void
1148  SparsityPattern::reinit (const Epetra_Map &,
1149  const ::DynamicSparsityPattern &,
1150  bool);
1151 
1152  template void
1153  SparsityPattern::reinit (const Epetra_Map &,
1154  const Epetra_Map &,
1155  const ::SparsityPattern &,
1156  bool);
1157  template void
1158  SparsityPattern::reinit (const Epetra_Map &,
1159  const Epetra_Map &,
1160  const ::DynamicSparsityPattern &,
1161  bool);
1162 
1163 
1164  template void
1166  const ::SparsityPattern &,
1167  const MPI_Comm &,
1168  bool);
1169  template void
1171  const ::DynamicSparsityPattern &,
1172  const MPI_Comm &,
1173  bool);
1174 
1175 
1176  template void
1178  const IndexSet &,
1179  const ::SparsityPattern &,
1180  const MPI_Comm &,
1181  bool);
1182  template void
1184  const IndexSet &,
1185  const ::DynamicSparsityPattern &,
1186  const MPI_Comm &,
1187  bool);
1188 
1189 }
1190 
1191 DEAL_II_NAMESPACE_CLOSE
1192 
1193 #endif // DEAL_II_WITH_TRILINOS
static ::ExceptionBase & ExcTrilinosError(int arg1)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1146
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
static ::ExceptionBase & ExcIO()
size_type row_length(const size_type row) const
std_cxx11::shared_ptr< Epetra_FECrsGraph > graph
std_cxx11::shared_ptr< Epetra_CrsGraph > nonlocal_graph
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
const Epetra_Comm & comm_self()
Definition: utilities.cc:736
const_iterator begin() const
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:535
size_type size() const
Definition: index_set.h:1419
bool exists(const size_type i, const size_type j) const
static ::ExceptionBase & ExcTrilinosError(int arg1)
std_cxx11::shared_ptr< Epetra_Map > column_space_map
static ::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:30
std_cxx11::shared_ptr< const std::vector< size_type > > colnum_cache
void subtract_set(const IndexSet &other)
Definition: index_set.cc:278
#define Assert(cond, exc)
Definition: exceptions.h:313
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
void copy_from(const SparsityPattern &input_sparsity_pattern)
const Epetra_Comm & trilinos_communicator() const 1
const Epetra_Map & domain_partitioner() const 1
void print_gnuplot(std::ostream &out) const
const_iterator end() const
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:63
const Epetra_Map & range_partitioner() const 1
SparsityPattern & operator=(const SparsityPattern &input_sparsity_pattern)
Definition: mpi.h:48
const Epetra_Map & col_partitioner() const 1
static ::ExceptionBase & ExcNotImplemented()
const Epetra_Map & row_partitioner() const 1
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
size_type n_elements() const
Definition: index_set.h:1560
std::pair< size_type, size_type > local_range() const
static ::ExceptionBase & ExcInternalError()