Reference documentation for deal.II version 8.5.1
constraint_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1998 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/constraint_matrix.h>
17 #include <deal.II/lac/constraint_matrix.templates.h>
18 
19 #include <deal.II/base/memory_consumption.h>
20 #include <deal.II/lac/dynamic_sparsity_pattern.h>
21 #include <deal.II/lac/block_vector.h>
22 #include <deal.II/lac/block_sparse_matrix.h>
23 #include <deal.II/lac/sparse_matrix_ez.h>
24 #include <deal.II/lac/chunk_sparse_matrix.h>
25 #include <deal.II/lac/block_sparse_matrix_ez.h>
26 #include <deal.II/lac/la_vector.h>
27 #include <deal.II/lac/la_parallel_vector.h>
28 #include <deal.II/lac/la_parallel_block_vector.h>
29 #include <deal.II/lac/petsc_vector.h>
30 #include <deal.II/lac/petsc_block_vector.h>
31 #include <deal.II/lac/petsc_sparse_matrix.h>
32 #include <deal.II/lac/petsc_block_sparse_matrix.h>
33 #include <deal.II/lac/petsc_parallel_vector.h>
34 #include <deal.II/lac/petsc_parallel_block_vector.h>
35 #include <deal.II/lac/petsc_parallel_sparse_matrix.h>
36 #include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
37 #include <deal.II/lac/trilinos_vector.h>
38 #include <deal.II/lac/trilinos_block_vector.h>
39 #include <deal.II/lac/trilinos_sparse_matrix.h>
40 #include <deal.II/lac/trilinos_block_sparse_matrix.h>
41 #include <deal.II/lac/matrix_block.h>
42 #include <deal.II/lac/diagonal_matrix.h>
43 
44 #include <algorithm>
45 #include <numeric>
46 #include <set>
47 #include <ostream>
48 
49 DEAL_II_NAMESPACE_OPEN
50 
51 
52 
53 // Static member variable
55 
56 
57 
58 bool
59 ConstraintMatrix::check_zero_weight (const std::pair<size_type, double> &p)
60 {
61  return (p.second == 0);
62 }
63 
64 
65 
66 bool
68 {
69  return line < a.line;
70 }
71 
72 
73 
74 bool
76 {
77  return line == a.line;
78 }
79 
80 
81 
82 std::size_t
84 {
88 }
89 
90 
91 
92 void
93 ConstraintMatrix::add_lines (const std::set<size_type> &lines)
94 {
95  for (std::set<size_type>::const_iterator
96  i = lines.begin(); i != lines.end(); ++i)
97  add_line (*i);
98 }
99 
100 
101 
102 void
103 ConstraintMatrix::add_lines (const std::vector<bool> &lines)
104 {
105  for (size_type i=0; i<lines.size(); ++i)
106  if (lines[i] == true)
107  add_line (i);
108 }
109 
110 
111 
112 void
114 {
115  for (size_type i=0; i<lines.n_elements(); ++i)
116  add_line (lines.nth_index_in_set(i));
117 }
118 
119 
120 
121 void
123 (const size_type line,
124  const std::vector<std::pair<size_type,double> > &col_val_pairs)
125 {
126  Assert (sorted==false, ExcMatrixIsClosed());
127  Assert (is_constrained(line), ExcLineInexistant(line));
128 
130  Assert (line_ptr->line == line, ExcInternalError());
131 
132  // if in debug mode, check whether an entry for this column already
133  // exists and if its the same as the one entered at present
134  //
135  // in any case: skip this entry if an entry for this column already
136  // exists, since we don't want to enter it twice
137  for (std::vector<std::pair<size_type,double> >::const_iterator
138  col_val_pair = col_val_pairs.begin();
139  col_val_pair!=col_val_pairs.end(); ++col_val_pair)
140  {
141  Assert (line != col_val_pair->first,
142  ExcMessage ("Can't constrain a degree of freedom to itself"));
143 
144  for (ConstraintLine::Entries::const_iterator
145  p=line_ptr->entries.begin();
146  p != line_ptr->entries.end(); ++p)
147  if (p->first == col_val_pair->first)
148  {
149  // entry exists, break innermost loop
150  Assert (p->second == col_val_pair->second,
151  ExcEntryAlreadyExists(line, col_val_pair->first,
152  p->second, col_val_pair->second));
153  break;
154  }
155 
156  line_ptr->entries.push_back (*col_val_pair);
157  }
158 }
159 
160 
161 
163 (const ConstraintMatrix &constraints,
164  const IndexSet &filter)
165 {
166  if (constraints.n_constraints() == 0)
167  return;
168 
169  Assert (filter.size() > constraints.lines.back().line,
170  ExcMessage ("Filter needs to be larger than constraint matrix size."));
171  for (std::vector<ConstraintLine>::const_iterator line=constraints.lines.begin();
172  line!=constraints.lines.end(); ++line)
173  if (filter.is_element(line->line))
174  {
175  const size_type row = filter.index_within_set (line->line);
176  add_line (row);
177  set_inhomogeneity (row, line->inhomogeneity);
178  for (size_type i=0; i<line->entries.size(); ++i)
179  if (filter.is_element(line->entries[i].first))
180  add_entry (row, filter.index_within_set (line->entries[i].first),
181  line->entries[i].second);
182  }
183 }
184 
185 
186 
188 {
189  if (sorted == true)
190  return;
191 
192  // sort the lines
193  std::sort (lines.begin(), lines.end());
194 
195  // update list of pointers and give the vector a sharp size since we
196  // won't modify the size any more after this point.
197  {
198  std::vector<size_type> new_lines (lines_cache.size(),
200  size_type counter = 0;
201  for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
202  line!=lines.end(); ++line, ++counter)
203  new_lines[calculate_line_index(line->line)] = counter;
204  std::swap (lines_cache, new_lines);
205  }
206 
207  // in debug mode: check whether we really set the pointers correctly.
208  for (size_type i=0; i<lines_cache.size(); ++i)
211  ExcInternalError());
212 
213  // first, strip zero entries, as we have to do that only once
214  for (std::vector<ConstraintLine>::iterator line = lines.begin();
215  line!=lines.end(); ++line)
216  // first remove zero entries. that would mean that in the linear
217  // constraint for a node, x_i = ax_1 + bx_2 + ..., another node times 0
218  // appears. obviously, 0*something can be omitted
219  line->entries.erase (std::remove_if (line->entries.begin(),
220  line->entries.end(),
222  line->entries.end());
223 
224 
225 
226 #ifdef DEBUG
227  // In debug mode we are computing an estimate for the maximum number
228  // of constraints so that we can bail out if there is a cycle in the
229  // constraints (which is easier than searching for cycles in the graph).
230  //
231  // Let us figure out the largest dof index. This is an upper bound for the
232  // number of constraints because it is an approximation for the number of dofs
233  // in our system.
234  size_type largest_idx = 0;
235  for (std::vector<ConstraintLine>::iterator line = lines.begin();
236  line!=lines.end(); ++line)
237  {
238  for (ConstraintLine::Entries::iterator it = line->entries.begin(); it!=line->entries.end(); ++it)
239  {
240  largest_idx=std::max(largest_idx, it->first);
241  }
242  }
243 #endif
244 
245  // replace references to dofs that are themselves constrained. note that
246  // because we may replace references to other dofs that may themselves be
247  // constrained to third ones, we have to iterate over all this until we
248  // replace no chains of constraints any more
249  //
250  // the iteration replaces references to constrained degrees of freedom by
251  // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
252  // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
253  // twice. we will throw this duplicate out in the following step, where
254  // we sort the list so that throwing out duplicates becomes much more
255  // efficient. also, we have to do it only once, rather than in each
256  // iteration
257  size_type iteration = 0;
258  while (true)
259  {
260  bool chained_constraint_replaced = false;
261 
262  for (std::vector<ConstraintLine>::iterator line = lines.begin();
263  line!=lines.end(); ++line)
264  {
265 #ifdef DEBUG
266  // we need to keep track of how many replacements we do in this line, because we can
267  // end up in a cycle A->B->C->A without the number of entries growing.
268  size_type n_replacements = 0;
269 #endif
270 
271  // loop over all entries of this line (including ones that we
272  // have appended in this go around) and see whether they are
273  // further constrained. ignore elements that we don't store on
274  // the current processor
275  size_type entry = 0;
276  while (entry < line->entries.size())
277  if (((local_lines.size() == 0)
278  ||
279  (local_lines.is_element(line->entries[entry].first)))
280  &&
281  is_constrained (line->entries[entry].first))
282  {
283  // ok, this entry is further constrained:
284  chained_constraint_replaced = true;
285 
286  // look up the chain of constraints for this entry
287  const size_type dof_index = line->entries[entry].first;
288  const double weight = line->entries[entry].second;
289 
290  Assert (dof_index != line->line,
291  ExcMessage ("Cycle in constraints detected!"));
292 
293  const ConstraintLine *constrained_line =
294  &lines[lines_cache[calculate_line_index(dof_index)]];
295  Assert (constrained_line->line == dof_index,
296  ExcInternalError());
297 
298  // now we have to replace an entry by its expansion. we do
299  // that by overwriting the entry by the first entry of the
300  // expansion and adding the remaining ones to the end,
301  // where we will later process them once more
302  //
303  // we can of course only do that if the DoF that we are
304  // currently handle is constrained by a linear combination
305  // of other dofs:
306  if (constrained_line->entries.size() > 0)
307  {
308  for (size_type i=0; i<constrained_line->entries.size(); ++i)
309  Assert (dof_index != constrained_line->entries[i].first,
310  ExcMessage ("Cycle in constraints detected!"));
311 
312  // replace first entry, then tack the rest to the end
313  // of the list
314  line->entries[entry] =
315  std::make_pair (constrained_line->entries[0].first,
316  constrained_line->entries[0].second *
317  weight);
318 
319  for (size_type i=1; i<constrained_line->entries.size(); ++i)
320  line->entries
321  .push_back (std::make_pair (constrained_line->entries[i].first,
322  constrained_line->entries[i].second *
323  weight));
324 
325 #ifdef DEBUG
326  // keep track of how many entries we replace in this
327  // line. If we do more than there are constraints or
328  // dofs in our system, we must have a cycle.
329  ++n_replacements;
330  Assert(n_replacements/2<largest_idx, ExcMessage("Cycle in constraints detected!"));
331  if (n_replacements/2>=largest_idx)
332  return; // this enables us to test for this Exception.
333 #endif
334  }
335  else
336  // the DoF that we encountered is not constrained by a
337  // linear combination of other dofs but is equal to just
338  // the inhomogeneity (i.e. its chain of entries is
339  // empty). in that case, we can't just overwrite the
340  // current entry, but we have to actually eliminate it
341  {
342  line->entries.erase (line->entries.begin()+entry);
343  }
344 
345  line->inhomogeneity += constrained_line->inhomogeneity *
346  weight;
347 
348  // now that we're here, do not increase index by one but
349  // rather make another pass for the present entry because
350  // we have replaced the present entry by another one, or
351  // because we have deleted it and shifted all following
352  // ones one forward
353  }
354  else
355  // entry not further constrained. just move ahead by one
356  ++entry;
357  }
358 
359  // if we didn't do anything in this round, then quit the loop
360  if (chained_constraint_replaced == false)
361  break;
362 
363  // increase iteration count. note that we should not iterate more
364  // times than there are constraints, since this puts a natural upper
365  // bound on the length of constraint chains
366  ++iteration;
367  Assert (iteration <= lines.size(), ExcInternalError());
368  }
369 
370  // finally sort the entries and re-scale them if necessary. in this step,
371  // we also throw out duplicates as mentioned above. moreover, as some
372  // entries might have had zero weights, we replace them by a vector with
373  // sharp sizes.
374  for (std::vector<ConstraintLine>::iterator line = lines.begin();
375  line!=lines.end(); ++line)
376  {
377  std::sort (line->entries.begin(), line->entries.end());
378 
379  // loop over the now sorted list and see whether any of the entries
380  // references the same dofs more than once in order to find how many
381  // non-duplicate entries we have. This lets us allocate the correct
382  // amount of memory for the constraint entries.
383  size_type duplicates = 0;
384  for (size_type i=1; i<line->entries.size(); ++i)
385  if (line->entries[i].first == line->entries[i-1].first)
386  duplicates++;
387 
388  if (duplicates > 0 || line->entries.size() < line->entries.capacity())
389  {
390  ConstraintLine::Entries new_entries;
391 
392  // if we have no duplicates, copy verbatim the entries. this way,
393  // the final size is of the vector is correct.
394  if (duplicates == 0)
395  new_entries = line->entries;
396  else
397  {
398  // otherwise, we need to go through the list by and and
399  // resolve the duplicates
400  new_entries.reserve (line->entries.size() - duplicates);
401  new_entries.push_back(line->entries[0]);
402  for (size_type j=1; j<line->entries.size(); ++j)
403  if (line->entries[j].first == line->entries[j-1].first)
404  {
405  Assert (new_entries.back().first == line->entries[j].first,
406  ExcInternalError());
407  new_entries.back().second += line->entries[j].second;
408  }
409  else
410  new_entries.push_back (line->entries[j]);
411 
412  Assert (new_entries.size() == line->entries.size() - duplicates,
413  ExcInternalError());
414 
415  // make sure there are really no duplicates left and that the
416  // list is still sorted
417  for (size_type j=1; j<new_entries.size(); ++j)
418  {
419  Assert (new_entries[j].first != new_entries[j-1].first,
420  ExcInternalError());
421  Assert (new_entries[j].first > new_entries[j-1].first,
422  ExcInternalError());
423  }
424  }
425 
426  // replace old list of constraints for this dof by the new one
427  line->entries.swap (new_entries);
428  }
429 
430  // finally do the following check: if the sum of weights for the
431  // constraints is close to one, but not exactly one, then rescale all
432  // the weights so that they sum up to 1. this adds a little numerical
433  // stability and avoids all sorts of problems where the actual value
434  // is close to, but not quite what we expected
435  //
436  // the case where the weights don't quite sum up happens when we
437  // compute the interpolation weights "on the fly", i.e. not from
438  // precomputed tables. in this case, the interpolation weights are
439  // also subject to round-off
440  double sum = 0;
441  for (size_type i=0; i<line->entries.size(); ++i)
442  sum += line->entries[i].second;
443  if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13))
444  {
445  for (size_type i=0; i<line->entries.size(); ++i)
446  line->entries[i].second /= sum;
447  line->inhomogeneity /= sum;
448  }
449  } // end of loop over all constraint lines
450 
451 #ifdef DEBUG
452  // if in debug mode: check that no dof is constrained to another dof that
453  // is also constrained. exclude dofs from this check whose constraint
454  // lines are not stored on the local processor
455  for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
456  line!=lines.end(); ++line)
457  for (ConstraintLine::Entries::const_iterator
458  entry=line->entries.begin();
459  entry!=line->entries.end(); ++entry)
460  if ((local_lines.size() == 0)
461  ||
462  (local_lines.is_element(entry->first)))
463  {
464  // make sure that entry->first is not the index of a line itself
465  const bool is_circle = is_constrained(entry->first);
466  Assert (is_circle == false,
467  ExcDoFConstrainedToConstrainedDoF(line->line, entry->first));
468  }
469 #endif
470 
471  sorted = true;
472 }
473 
474 
475 
476 void
477 ConstraintMatrix::merge (const ConstraintMatrix &other_constraints,
478  const MergeConflictBehavior merge_conflict_behavior,
479  const bool allow_different_local_lines)
480 {
481  (void) allow_different_local_lines;
482  Assert(allow_different_local_lines ||
483  local_lines == other_constraints.local_lines,
484  ExcMessage("local_lines for this and the other objects are not the same "
485  "although allow_different_local_lines is false."));
486 
487  // store the previous state with respect to sorting
488  const bool object_was_sorted = sorted;
489  sorted = false;
490 
491  // first action is to fold into the present object possible constraints
492  // in the second object. we don't strictly need to do this any more since
493  // the ConstraintMatrix has learned to deal with chains of constraints in
494  // the close() function, but we have traditionally done this and it's not
495  // overly hard to do.
496  //
497  // for this, loop over all constraints and replace the constraint lines
498  // with a new one where constraints are replaced if necessary.
500  for (std::vector<ConstraintLine>::iterator line=lines.begin();
501  line!=lines.end(); ++line)
502  {
503  tmp.clear ();
504  for (size_type i=0; i<line->entries.size(); ++i)
505  {
506  // if the present dof is not stored, or not constrained, or if we won't take the
507  // constraint from the other object, then simply copy it over
508  if ((other_constraints.local_lines.size() != 0
509  && other_constraints.local_lines.is_element(line->entries[i].first) == false)
510  ||
511  other_constraints.is_constrained(line->entries[i].first) == false
512  ||
513  ((merge_conflict_behavior != right_object_wins)
514  && other_constraints.is_constrained(line->entries[i].first)
515  && this->is_constrained(line->entries[i].first)))
516  tmp.push_back(line->entries[i]);
517  else
518  // otherwise resolve further constraints by replacing the old
519  // entry by a sequence of new entries taken from the other
520  // object, but with multiplied weights
521  {
522  const ConstraintLine::Entries *other_line
523  = other_constraints.get_constraint_entries (line->entries[i].first);
524  Assert (other_line != 0,
525  ExcInternalError());
526 
527  const double weight = line->entries[i].second;
528 
529  for (ConstraintLine::Entries::const_iterator j=other_line->begin();
530  j!=other_line->end(); ++j)
531  tmp.push_back (std::pair<size_type,double>(j->first,
532  j->second*weight));
533 
534  line->inhomogeneity
535  += other_constraints.get_inhomogeneity(line->entries[i].first) *
536  weight;
537  }
538  }
539  // finally exchange old and newly resolved line
540  line->entries.swap (tmp);
541  }
542 
543  if (local_lines.size() != 0)
544  local_lines.add_indices(other_constraints.local_lines);
545 
546  {
547  // do not bother to resize the lines cache exactly since it is pretty
548  // cheap to adjust it along the way.
549  std::fill(lines_cache.begin(), lines_cache.end(), numbers::invalid_size_type);
550 
551  // reset lines_cache for our own constraints
552  size_type index = 0;
553  for (std::vector<ConstraintLine>::const_iterator line = lines.begin();
554  line != lines.end(); ++line)
555  {
556  size_type local_line_no = calculate_line_index(line->line);
557  if (local_line_no >= lines_cache.size())
558  lines_cache.resize(local_line_no+1, numbers::invalid_size_type);
559  lines_cache[local_line_no] = index++;
560  }
561 
562  // Add other_constraints to lines cache and our list of constraints
563  for (std::vector<ConstraintLine>::const_iterator line = other_constraints.lines.begin();
564  line != other_constraints.lines.end(); ++line)
565  {
566  const size_type local_line_no = calculate_line_index(line->line);
567  if (local_line_no >= lines_cache.size())
568  {
569  lines_cache.resize(local_line_no+1, numbers::invalid_size_type);
570  lines.push_back(*line);
571  lines_cache[local_line_no] = index++;
572  }
573  else if (lines_cache[local_line_no] == numbers::invalid_size_type)
574  {
575  // there are no constraints for that line yet
576  lines.push_back(*line);
577  AssertIndexRange(local_line_no, lines_cache.size());
578  lines_cache[local_line_no] = index++;
579  }
580  else
581  {
582  // we already store that line
583  switch (merge_conflict_behavior)
584  {
586  AssertThrow (false,
588  break;
589 
590  case left_object_wins:
591  // ignore this constraint
592  break;
593 
594  case right_object_wins:
595  AssertIndexRange(local_line_no, lines_cache.size());
596  lines[lines_cache[local_line_no]] = *line;
597  break;
598 
599  default:
600  Assert (false, ExcNotImplemented());
601  }
602  }
603  }
604 
605  // check that we set the pointers correctly
606  for (size_type i=0; i<lines_cache.size(); ++i)
609  ExcInternalError());
610  }
611 
612  // if the object was sorted before, then make sure it is so afterward as
613  // well. otherwise leave everything in the unsorted state
614  if (object_was_sorted == true)
615  close ();
616 }
617 
618 
619 
621 {
622  if (local_lines.size() == 0)
623  lines_cache.insert (lines_cache.begin(), offset,
625  else
626  {
627  // shift local_lines
628  IndexSet new_local_lines(local_lines.size());
629  new_local_lines.add_indices(local_lines, offset);
630  std::swap(local_lines, new_local_lines);
631  }
632 
633  for (std::vector<ConstraintLine>::iterator i = lines.begin();
634  i != lines.end(); ++i)
635  {
636  i->line += offset;
637  for (ConstraintLine::Entries::iterator
638  j = i->entries.begin();
639  j != i->entries.end(); ++j)
640  j->first += offset;
641  }
642 
643 #ifdef DEBUG
644  // make sure that lines, lines_cache and local_lines
645  // are still linked correctly
646  for (size_type i=0; i<lines_cache.size(); ++i)
648  calculate_line_index(lines[lines_cache[i]].line) == i,
649  ExcInternalError());
650 #endif
651 }
652 
653 
654 
656 {
657  {
658  std::vector<ConstraintLine> tmp;
659  lines.swap (tmp);
660  }
661 
662  {
663  std::vector<size_type> tmp;
664  lines_cache.swap (tmp);
665  }
666 
667  sorted = false;
668 }
669 
670 
671 
672 void ConstraintMatrix::reinit (const IndexSet &local_constraints)
673 {
674  local_lines = local_constraints;
675 
676  // make sure the IndexSet is compressed. Otherwise this can lead to crashes
677  // that are hard to find (only happen in release mode).
678  // see tests/mpi/constraint_matrix_crash_01
680 
681  clear();
682 }
683 
684 
685 
687 {
688  Assert (sorted == true, ExcMatrixNotClosed());
689  Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
690  Assert (sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
691 
692  // store for each index whether it must be distributed or not. If entry
693  // is numbers::invalid_unsigned_int, no distribution is necessary.
694  // otherwise, the number states which line in the constraint matrix
695  // handles this index
696  std::vector<size_type> distribute(sparsity.n_rows(),
698 
699  for (size_type c=0; c<lines.size(); ++c)
700  distribute[lines[c].line] = c;
701 
702  const size_type n_rows = sparsity.n_rows();
703  for (size_type row=0; row<n_rows; ++row)
704  {
706  {
707  // regular line. loop over cols all valid cols. note that this
708  // changes the line we are presently working on: we add additional
709  // entries. these are put to the end of the row. however, as
710  // constrained nodes cannot be constrained to other constrained
711  // nodes, nothing will happen if we run into these added nodes, as
712  // they can't be distributed further. we might store the position of
713  // the last old entry and stop work there, but since operating on
714  // the newly added ones only takes two comparisons (column index
715  // valid, distribute[column] necessarily
716  // ==numbers::invalid_size_type), it is cheaper to not do so and
717  // run right until the end of the line
718  for (SparsityPattern::iterator entry = sparsity.begin(row);
719  ((entry != sparsity.end(row)) &&
720  entry->is_valid_entry());
721  ++entry)
722  {
723  const size_type column = entry->column();
724 
725  if (distribute[column] != numbers::invalid_size_type)
726  {
727  // distribute entry at regular row @p{row} and irregular
728  // column sparsity.colnums[j]
729  for (size_type q=0;
730  q!=lines[distribute[column]].entries.size();
731  ++q)
732  sparsity.add (row,
733  lines[distribute[column]].entries[q].first);
734  }
735  }
736  }
737  else
738  // row must be distributed. note that here the present row is not
739  // touched (unlike above)
740  {
741  for (SparsityPattern::iterator entry = sparsity.begin(row);
742  (entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry)
743  {
744  const size_type column = entry->column();
745  if (distribute[column] == numbers::invalid_size_type)
746  // distribute entry at irregular row @p{row} and regular
747  // column sparsity.colnums[j]
748  for (size_type q=0;
749  q!=lines[distribute[row]].entries.size(); ++q)
750  sparsity.add (lines[distribute[row]].entries[q].first,
751  column);
752  else
753  // distribute entry at irregular row @p{row} and irregular
754  // column sparsity.get_column_numbers()[j]
755  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
756  for (size_type q=0;
757  q!=lines[distribute[column]].entries.size(); ++q)
758  sparsity.add (lines[distribute[row]].entries[p].first,
759  lines[distribute[column]].entries[q].first);
760  }
761  }
762  }
763 
764  sparsity.compress();
765 }
766 
767 
768 
769 
771 {
772  Assert (sorted == true, ExcMatrixNotClosed());
773  Assert (sparsity.n_rows() == sparsity.n_cols(),
774  ExcNotQuadratic());
775 
776  // store for each index whether it must be distributed or not. If entry
777  // is numbers::invalid_unsigned_int, no distribution is necessary.
778  // otherwise, the number states which line in the constraint matrix
779  // handles this index
780  std::vector<size_type> distribute(sparsity.n_rows(),
782 
783  for (size_type c=0; c<lines.size(); ++c)
784  distribute[lines[c].line] = c;
785 
786  const size_type n_rows = sparsity.n_rows();
787  for (size_type row=0; row<n_rows; ++row)
788  {
790  // regular line. loop over cols. note that as we proceed to
791  // distribute cols, the loop may get longer
792  for (size_type j=0; j<sparsity.row_length(row); ++j)
793  {
794  const size_type column = sparsity.column_number(row,j);
795 
796  if (distribute[column] != numbers::invalid_size_type)
797  {
798  // distribute entry at regular row @p{row} and irregular
799  // column column. note that this changes the line we are
800  // presently working on: we add additional entries. if we
801  // add another entry at a column behind the present one, we
802  // will encounter it later on (but since it can't be
803  // further constrained, won't have to do anything about
804  // it). if we add it up front of the present column, we
805  // will find the present column later on again as it was
806  // shifted back (again nothing happens, in particular no
807  // endless loop, as when we encounter it the second time we
808  // won't be able to add more entries as they all already
809  // exist, but we do the same work more often than
810  // necessary, and the loop gets longer), so move the cursor
811  // one to the right in the case that we add an entry up
812  // front that did not exist before. check whether it
813  // existed before by tracking the length of this row
814  size_type old_rowlength = sparsity.row_length(row);
815  for (size_type q=0;
816  q!=lines[distribute[column]].entries.size();
817  ++q)
818  {
819  const size_type
820  new_col = lines[distribute[column]].entries[q].first;
821 
822  sparsity.add (row, new_col);
823 
824  const size_type new_rowlength = sparsity.row_length(row);
825  if ((new_col < column) && (old_rowlength != new_rowlength))
826  ++j;
827  old_rowlength = new_rowlength;
828  };
829  };
830  }
831  else
832  // row must be distributed
833  for (size_type j=0; j<sparsity.row_length(row); ++j)
834  {
835  const size_type column = sparsity.column_number(row,j);
836 
837  if (distribute[column] == numbers::invalid_size_type)
838  // distribute entry at irregular row @p{row} and regular
839  // column sparsity.colnums[j]
840  for (size_type q=0;
841  q!=lines[distribute[row]].entries.size(); ++q)
842  sparsity.add (lines[distribute[row]].entries[q].first,
843  column);
844  else
845  // distribute entry at irregular row @p{row} and irregular
846  // column sparsity.get_column_numbers()[j]
847  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
848  for (size_type q=0;
849  q!=lines[distribute[sparsity.column_number(row,j)]]
850  .entries.size(); ++q)
851  sparsity.add (lines[distribute[row]].entries[p].first,
852  lines[distribute[sparsity.column_number(row,j)]]
853  .entries[q].first);
854  };
855  };
856 }
857 
858 
859 
861 {
862  Assert (sorted == true, ExcMatrixNotClosed());
863  Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
864  Assert (sparsity.n_rows() == sparsity.n_cols(),
865  ExcNotQuadratic());
866  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
867  ExcNotQuadratic());
868  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
869  ExcNotQuadratic());
870 
871  const BlockIndices &
872  index_mapping = sparsity.get_column_indices();
873 
874  const size_type n_blocks = sparsity.n_block_rows();
875 
876  // store for each index whether it must be distributed or not. If entry
877  // is numbers::invalid_unsigned_int, no distribution is necessary.
878  // otherwise, the number states which line in the constraint matrix
879  // handles this index
880  std::vector<size_type> distribute (sparsity.n_rows(),
882 
883  for (size_type c=0; c<lines.size(); ++c)
884  distribute[lines[c].line] = c;
885 
886  const size_type n_rows = sparsity.n_rows();
887  for (size_type row=0; row<n_rows; ++row)
888  {
889  // get index of this row within the blocks
890  const std::pair<size_type,size_type>
891  block_index = index_mapping.global_to_local(row);
892  const size_type block_row = block_index.first;
893 
895  // regular line. loop over all columns and see whether this column
896  // must be distributed
897  {
898 
899  // to loop over all entries in this row, we have to loop over all
900  // blocks in this blockrow and the corresponding row therein
901  for (size_type block_col=0; block_col<n_blocks; ++block_col)
902  {
903  const SparsityPattern &
904  block_sparsity = sparsity.block(block_row, block_col);
905 
907  entry = block_sparsity.begin(block_index.second);
908  (entry != block_sparsity.end(block_index.second)) &&
909  entry->is_valid_entry();
910  ++entry)
911  {
912  const size_type global_col
913  = index_mapping.local_to_global(block_col, entry->column());
914 
915  if (distribute[global_col] != numbers::invalid_size_type)
916  // distribute entry at regular row @p{row} and
917  // irregular column global_col
918  {
919  for (size_type q=0;
920  q!=lines[distribute[global_col]].entries.size(); ++q)
921  sparsity.add (row,
922  lines[distribute[global_col]].entries[q].first);
923  }
924  }
925  }
926  }
927  else
928  {
929  // row must be distributed. split the whole row into the chunks
930  // defined by the blocks
931  for (size_type block_col=0; block_col<n_blocks; ++block_col)
932  {
933  const SparsityPattern &
934  block_sparsity = sparsity.block(block_row,block_col);
935 
937  entry = block_sparsity.begin(block_index.second);
938  (entry != block_sparsity.end(block_index.second)) &&
939  entry->is_valid_entry();
940  ++entry)
941  {
942  const size_type global_col
943  = index_mapping.local_to_global (block_col, entry->column());
944 
945  if (distribute[global_col] == numbers::invalid_size_type)
946  // distribute entry at irregular row @p{row} and
947  // regular column global_col.
948  {
949  for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
950  sparsity.add (lines[distribute[row]].entries[q].first, global_col);
951  }
952  else
953  // distribute entry at irregular row @p{row} and
954  // irregular column @p{global_col}
955  {
956  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
957  for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
958  sparsity.add (lines[distribute[row]].entries[p].first,
959  lines[distribute[global_col]].entries[q].first);
960  }
961  }
962  }
963  }
964  }
965 
966  sparsity.compress();
967 }
968 
969 
970 
971 
973 {
974  Assert (sorted == true, ExcMatrixNotClosed());
975  Assert (sparsity.n_rows() == sparsity.n_cols(),
976  ExcNotQuadratic());
977  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
978  ExcNotQuadratic());
979  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
980  ExcNotQuadratic());
981 
982  const BlockIndices &
983  index_mapping = sparsity.get_column_indices();
984 
985  const size_type n_blocks = sparsity.n_block_rows();
986 
987  // store for each index whether it must be distributed or not. If entry
988  // is numbers::invalid_unsigned_int, no distribution is necessary.
989  // otherwise, the number states which line in the constraint matrix
990  // handles this index
991  std::vector<size_type> distribute (sparsity.n_rows(),
993 
994  for (size_type c=0; c<lines.size(); ++c)
995  distribute[lines[c].line] = static_cast<signed int>(c);
996 
997  const size_type n_rows = sparsity.n_rows();
998  for (size_type row=0; row<n_rows; ++row)
999  {
1000  // get index of this row within the blocks
1001  const std::pair<size_type,size_type>
1002  block_index = index_mapping.global_to_local(row);
1003  const size_type block_row = block_index.first;
1004  const size_type local_row = block_index.second;
1005 
1007  // regular line. loop over all columns and see whether this column
1008  // must be distributed. note that as we proceed to distribute cols,
1009  // the loop over cols may get longer.
1010  //
1011  // don't try to be clever here as in the algorithm for the
1012  // DynamicSparsityPattern, as that would be much more
1013  // complicated here. after all, we know that compressed patterns
1014  // are inefficient...
1015  {
1016 
1017  // to loop over all entries in this row, we have to loop over all
1018  // blocks in this blockrow and the corresponding row therein
1019  for (size_type block_col=0; block_col<n_blocks; ++block_col)
1020  {
1021  const DynamicSparsityPattern &
1022  block_sparsity = sparsity.block(block_row, block_col);
1023 
1024  for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
1025  {
1026  const size_type global_col
1027  = index_mapping.local_to_global(block_col,
1028  block_sparsity.column_number(local_row,j));
1029 
1030  if (distribute[global_col] != numbers::invalid_size_type)
1031  // distribute entry at regular row @p{row} and
1032  // irregular column global_col
1033  {
1034  for (size_type q=0;
1035  q!=lines[distribute[global_col]]
1036  .entries.size(); ++q)
1037  sparsity.add (row,
1038  lines[distribute[global_col]].entries[q].first);
1039  };
1040  };
1041  };
1042  }
1043  else
1044  {
1045  // row must be distributed. split the whole row into the chunks
1046  // defined by the blocks
1047  for (size_type block_col=0; block_col<n_blocks; ++block_col)
1048  {
1049  const DynamicSparsityPattern &
1050  block_sparsity = sparsity.block(block_row,block_col);
1051 
1052  for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
1053  {
1054  const size_type global_col
1055  = index_mapping.local_to_global (block_col,
1056  block_sparsity.column_number(local_row,j));
1057 
1058  if (distribute[global_col] == numbers::invalid_size_type)
1059  // distribute entry at irregular row @p{row} and
1060  // regular column global_col.
1061  {
1062  for (size_type q=0;
1063  q!=lines[distribute[row]].entries.size(); ++q)
1064  sparsity.add (lines[distribute[row]].entries[q].first,
1065  global_col);
1066  }
1067  else
1068  // distribute entry at irregular row @p{row} and
1069  // irregular column @p{global_col}
1070  {
1071  for (size_type p=0;
1072  p!=lines[distribute[row]].entries.size(); ++p)
1073  for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
1074  sparsity.add (lines[distribute[row]].entries[p].first,
1075  lines[distribute[global_col]].entries[q].first);
1076  };
1077  };
1078  };
1079  };
1080  };
1081 }
1082 
1083 
1084 
1086 {
1087  if (is_constrained(index) == false)
1088  return false;
1089 
1091  Assert (p.line == index, ExcInternalError());
1092 
1093  // return if an entry for this line was found and if it has only one
1094  // entry equal to 1.0
1095  return ((p.entries.size() == 1) &&
1096  (p.entries[0].second == 1.0));
1097 }
1098 
1099 
1101  const size_type index2) const
1102 {
1103  if (is_constrained(index1) == true)
1104  {
1106  Assert (p.line == index1, ExcInternalError());
1107 
1108  // return if an entry for this line was found and if it has only one
1109  // entry equal to 1.0 and that one is index2
1110  return ((p.entries.size() == 1) &&
1111  (p.entries[0].first == index2) &&
1112  (p.entries[0].second == 1.0));
1113  }
1114  else if (is_constrained(index2) == true)
1115  {
1117  Assert (p.line == index2, ExcInternalError());
1118 
1119  // return if an entry for this line was found and if it has only one
1120  // entry equal to 1.0 and that one is index1
1121  return ((p.entries.size() == 1) &&
1122  (p.entries[0].first == index1) &&
1123  (p.entries[0].second == 1.0));
1124  }
1125  else
1126  return false;
1127 }
1128 
1129 
1130 
1133 {
1134  size_type return_value = 0;
1135  for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
1136  i!=lines.end(); ++i)
1137  // use static cast, since typeof(size)==std::size_t, which is !=
1138  // size_type on AIX
1139  return_value = std::max(return_value,
1140  static_cast<size_type>(i->entries.size()));
1141 
1142  return return_value;
1143 }
1144 
1145 
1146 
1148 {
1149  for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
1150  i!=lines.end(); ++i)
1151  if (i->inhomogeneity != 0.)
1152  return true;
1153 
1154  return false;
1155 }
1156 
1157 
1158 void ConstraintMatrix::print (std::ostream &out) const
1159 {
1160  for (size_type i=0; i!=lines.size(); ++i)
1161  {
1162  // output the list of constraints as pairs of dofs and their weights
1163  if (lines[i].entries.size() > 0)
1164  {
1165  for (size_type j=0; j<lines[i].entries.size(); ++j)
1166  out << " " << lines[i].line
1167  << " " << lines[i].entries[j].first
1168  << ": " << lines[i].entries[j].second << "\n";
1169 
1170  // print out inhomogeneity.
1171  if (lines[i].inhomogeneity != 0)
1172  out << " " << lines[i].line
1173  << ": " << lines[i].inhomogeneity << "\n";
1174  }
1175  else
1176  // but also output something if the constraint simply reads
1177  // x[13]=0, i.e. where the right hand side is not a linear
1178  // combination of other dofs
1179  {
1180  if (lines[i].inhomogeneity != 0)
1181  out << " " << lines[i].line
1182  << " = " << lines[i].inhomogeneity
1183  << "\n";
1184  else
1185  out << " " << lines[i].line << " = 0\n";
1186  }
1187  }
1188 
1189  AssertThrow (out, ExcIO());
1190 }
1191 
1192 
1193 
1194 void
1195 ConstraintMatrix::write_dot (std::ostream &out) const
1196 {
1197  out << "digraph constraints {"
1198  << std::endl;
1199  for (size_type i=0; i!=lines.size(); ++i)
1200  {
1201  // same concept as in the previous function
1202  if (lines[i].entries.size() > 0)
1203  for (size_type j=0; j<lines[i].entries.size(); ++j)
1204  out << " " << lines[i].line << "->" << lines[i].entries[j].first
1205  << "; // weight: "
1206  << lines[i].entries[j].second
1207  << "\n";
1208  else
1209  out << " " << lines[i].line << "\n";
1210  }
1211  out << "}" << std::endl;
1212 }
1213 
1214 
1215 
1216 std::size_t
1218 {
1223 }
1224 
1225 
1226 
1227 void
1228 ConstraintMatrix::resolve_indices (std::vector<types::global_dof_index> &indices) const
1229 {
1230  const unsigned int indices_size = indices.size();
1231  const std::vector<std::pair<types::global_dof_index,double> > *line_ptr;
1232  for (unsigned int i=0; i<indices_size; ++i)
1233  {
1234  line_ptr = get_constraint_entries(indices[i]);
1235  // if the index is constraint, the constraints indices are added to the
1236  // indices vector
1237  if (line_ptr!=NULL)
1238  {
1239  const unsigned int line_size = line_ptr->size();
1240  for (unsigned int j=0; j<line_size; ++j)
1241  indices.push_back((*line_ptr)[j].first);
1242  }
1243  }
1244 
1245  // keep only the unique elements
1246  std::sort(indices.begin(),indices.end());
1247  std::vector<types::global_dof_index>::iterator it;
1248  it = std::unique(indices.begin(),indices.end());
1249  indices.resize(it-indices.begin());
1250 }
1251 
1252 
1253 
1254 // explicit instantiations
1255 //
1256 // define a list of functions for vectors and matrices, respectively, where
1257 // the vector/matrix can be replaced using a preprocessor variable
1258 // VectorType/MatrixType. note that we need a space between "VectorType" and
1259 // ">" to disambiguate ">>" when VectorType trails in an angle bracket
1260 
1261 // TODO: The way we define all the instantiations is probably not the very
1262 // best one. Try to find a better description.
1263 
1264 #define VECTOR_FUNCTIONS(VectorType) \
1265  template void ConstraintMatrix::condense<VectorType >(const VectorType &uncondensed,\
1266  VectorType &condensed) const;\
1267  template void ConstraintMatrix::condense<VectorType >(VectorType &vec) const;\
1268  template void ConstraintMatrix:: \
1269  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1270  const std::vector<ConstraintMatrix::size_type> &, \
1271  VectorType &, \
1272  const FullMatrix<VectorType::value_type> &) const;\
1273  template void ConstraintMatrix:: \
1274  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1275  const std::vector<ConstraintMatrix::size_type> &, \
1276  const std::vector<ConstraintMatrix::size_type> &, \
1277  VectorType &, \
1278  const FullMatrix<VectorType::value_type> &, \
1279  bool) const
1280 
1281 #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
1282  template void ConstraintMatrix:: \
1283  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1284  const std::vector<ConstraintMatrix::size_type> &, \
1285  VectorType &, \
1286  const FullMatrix<VectorType::value_type> &) const;\
1287  template void ConstraintMatrix:: \
1288  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1289  const std::vector<ConstraintMatrix::size_type> &, \
1290  const std::vector<ConstraintMatrix::size_type> &, \
1291  VectorType &, \
1292  const FullMatrix<VectorType::value_type> &, \
1293  bool) const
1294 
1295 #ifdef DEAL_II_WITH_PETSC
1296 VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
1297 VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
1298 #endif
1299 
1300 #ifdef DEAL_II_WITH_TRILINOS
1301 PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::Vector);
1302 PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::BlockVector);
1303 #endif
1304 
1305 #define MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
1306  template void ConstraintMatrix:: \
1307  distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<MatrixType::value_type> &, \
1308  const Vector<VectorType::value_type> &, \
1309  const std::vector<ConstraintMatrix::size_type> &, \
1310  MatrixType &, \
1311  VectorType &, \
1312  bool , \
1313  internal::bool2type<false>) const
1314 #define MATRIX_FUNCTIONS(MatrixType) \
1315  template void ConstraintMatrix:: \
1316  distribute_local_to_global<MatrixType,Vector<MatrixType::value_type> > (const FullMatrix<MatrixType::value_type> &, \
1317  const Vector<MatrixType::value_type> &, \
1318  const std::vector<ConstraintMatrix::size_type> &, \
1319  MatrixType &, \
1320  Vector<MatrixType::value_type> &, \
1321  bool , \
1322  internal::bool2type<false>) const
1323 #define BLOCK_MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
1324  template void ConstraintMatrix:: \
1325  distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<MatrixType::value_type> &, \
1326  const Vector<VectorType::value_type> &, \
1327  const std::vector<ConstraintMatrix::size_type> &, \
1328  MatrixType &, \
1329  VectorType &, \
1330  bool , \
1331  internal::bool2type<true>) const
1332 #define BLOCK_MATRIX_FUNCTIONS(MatrixType) \
1333  template void ConstraintMatrix:: \
1334  distribute_local_to_global<MatrixType,Vector<MatrixType::value_type> > (const FullMatrix<MatrixType::value_type> &, \
1335  const Vector<MatrixType::value_type> &, \
1336  const std::vector<ConstraintMatrix::size_type> &, \
1337  MatrixType &, \
1338  Vector<MatrixType::value_type> &, \
1339  bool , \
1340  internal::bool2type<true>) const
1341 
1342 MATRIX_FUNCTIONS(SparseMatrix<double>);
1343 MATRIX_FUNCTIONS(SparseMatrix<float>);
1344 MATRIX_FUNCTIONS(FullMatrix<double>);
1345 MATRIX_FUNCTIONS(FullMatrix<float>);
1346 MATRIX_FUNCTIONS(FullMatrix<std::complex<double> >);
1347 MATRIX_FUNCTIONS(SparseMatrix<std::complex<double> >);
1348 MATRIX_FUNCTIONS(SparseMatrix<std::complex<float> >);
1349 
1350 BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
1351 BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
1352 BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<double>, BlockVector<double>);
1353 BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<float>, BlockVector<float>);
1354 
1355 MATRIX_FUNCTIONS(SparseMatrixEZ<double>);
1356 MATRIX_FUNCTIONS(SparseMatrixEZ<float>);
1357 MATRIX_FUNCTIONS(ChunkSparseMatrix<double>);
1358 MATRIX_FUNCTIONS(ChunkSparseMatrix<float>);
1359 
1360 // BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrixEZ<double>);
1361 // BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrixEZ<float>, Vector<float>);
1362 
1363 #ifdef DEAL_II_WITH_PETSC
1364 MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
1365 BLOCK_MATRIX_FUNCTIONS(PETScWrappers::BlockSparseMatrix);
1366 MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
1367 BLOCK_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
1368 MATRIX_VECTOR_FUNCTIONS(PETScWrappers::SparseMatrix, PETScWrappers::Vector);
1369 BLOCK_MATRIX_VECTOR_FUNCTIONS(PETScWrappers::BlockSparseMatrix, PETScWrappers::BlockVector);
1372 #endif
1373 
1374 #ifdef DEAL_II_WITH_TRILINOS
1375 MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
1376 BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
1381 #endif
1382 
1383 
1384 #define SPARSITY_FUNCTIONS(SparsityPatternType) \
1385  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1386  const std::vector<ConstraintMatrix::size_type> &, \
1387  SparsityPatternType &, \
1388  const bool, \
1389  const Table<2,bool> &, \
1390  internal::bool2type<false>) const; \
1391  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1392  const std::vector<ConstraintMatrix::size_type> &, \
1393  const std::vector<ConstraintMatrix::size_type> &, \
1394  SparsityPatternType &, \
1395  const bool, \
1396  const Table<2,bool> &) const
1397 #define BLOCK_SPARSITY_FUNCTIONS(SparsityPatternType) \
1398  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1399  const std::vector<ConstraintMatrix::size_type> &, \
1400  SparsityPatternType &, \
1401  const bool, \
1402  const Table<2,bool> &, \
1403  internal::bool2type<true>) const; \
1404  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1405  const std::vector<ConstraintMatrix::size_type> &, \
1406  const std::vector<ConstraintMatrix::size_type> &, \
1407  SparsityPatternType &, \
1408  const bool, \
1409  const Table<2,bool> &) const
1410 
1411 SPARSITY_FUNCTIONS(SparsityPattern);
1412 SPARSITY_FUNCTIONS(DynamicSparsityPattern);
1413 BLOCK_SPARSITY_FUNCTIONS(BlockSparsityPattern);
1414 BLOCK_SPARSITY_FUNCTIONS(BlockDynamicSparsityPattern);
1415 
1416 #ifdef DEAL_II_WITH_TRILINOS
1417 SPARSITY_FUNCTIONS(TrilinosWrappers::SparsityPattern);
1418 BLOCK_SPARSITY_FUNCTIONS(TrilinosWrappers::BlockSparsityPattern);
1419 #endif
1420 
1421 
1422 #define ONLY_MATRIX_FUNCTIONS(MatrixType) \
1423  template void ConstraintMatrix::distribute_local_to_global<MatrixType > (\
1424  const FullMatrix<MatrixType::value_type> &, \
1425  const std::vector<ConstraintMatrix::size_type> &, \
1426  const std::vector<ConstraintMatrix::size_type> &, \
1427  MatrixType &) const
1428 
1429 ONLY_MATRIX_FUNCTIONS(FullMatrix<float>);
1430 ONLY_MATRIX_FUNCTIONS(FullMatrix<double>);
1431 ONLY_MATRIX_FUNCTIONS(SparseMatrix<float>);
1432 ONLY_MATRIX_FUNCTIONS(SparseMatrix<double>);
1433 ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<float> >);
1434 ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<double> >);
1435 ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
1436 ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
1437 
1438 #ifdef DEAL_II_WITH_TRILINOS
1439 ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
1440 ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
1441 #endif
1442 
1443 #ifdef DEAL_II_WITH_PETSC
1444 ONLY_MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
1445 ONLY_MATRIX_FUNCTIONS(PETScWrappers::BlockSparseMatrix);
1446 ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
1447 ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
1448 #endif
1449 
1450 #include "constraint_matrix.inst"
1451 
1452 // allocate scratch data. Cannot use the generic template instantiation
1453 // because we need to provide an initializer object of type
1454 // internals::ConstraintMatrixData<Number> that can be passed to the
1455 // constructor of scratch_data (it won't allow one to be constructed in place).
1456 namespace internals
1457 {
1458 #define SCRATCH_INITIALIZER(Number,Name) \
1459  ConstraintMatrixData<Number>::ScratchData scratch_data_initializer_##Name; \
1460  template<> Threads::ThreadLocalStorage<ConstraintMatrixData<Number>::ScratchData> \
1461  ConstraintMatrixData<Number>::scratch_data(scratch_data_initializer_##Name)
1462 
1463  SCRATCH_INITIALIZER(double,double);
1464  SCRATCH_INITIALIZER(float,float);
1465  SCRATCH_INITIALIZER(std::complex<double>,cdouble);
1466  SCRATCH_INITIALIZER(std::complex<float>,cfloat);
1467 #undef SCRATCH_INITIALIZER
1468 }
1469 
1470 
1471 DEAL_II_NAMESPACE_CLOSE
const types::global_dof_index invalid_size_type
Definition: types.h:179
bool operator<(const ConstraintLine &) const
std::vector< size_type > lines_cache
static ::ExceptionBase & ExcDoFIsConstrainedFromBothObjects(size_type arg1)
static ::ExceptionBase & ExcEntryAlreadyExists(size_type arg1, size_type arg2, double arg3, double arg4)
void merge(const ConstraintMatrix &other_constraints, const MergeConflictBehavior merge_conflict_behavior=no_conflicts_allowed, const bool allow_different_local_lines=false)
static bool check_zero_weight(const std::pair< size_type, double > &p)
static ::ExceptionBase & ExcIO()
std::size_t memory_consumption() const
size_type column_number(const size_type row, const size_type index) const
DEAL_VOLATILE unsigned int counter
Definition: subscriptor.h:203
void add(const size_type i, const size_type j)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
void add_entries(const size_type line, const std::vector< std::pair< size_type, double > > &col_val_pairs)
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1463
iterator end() const
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
types::global_dof_index size_type
std::vector< std::pair< size_type, double > > Entries
static ::ExceptionBase & ExcLineInexistant(size_type arg1)
size_type n_cols() const
bool is_compressed() const
SparsityPatternType & block(const size_type row, const size_type column)
size_type size() const
Definition: index_set.h:1419
size_type n_rows() const
void add_line(const size_type line)
bool operator==(const ConstraintLine &) const
static ::ExceptionBase & ExcMessage(std::string arg1)
void add_entry(const size_type line, const size_type column, const double value)
void add(const size_type i, const size_type j)
void add(const size_type i, const size_type j)
#define Assert(cond, exc)
Definition: exceptions.h:313
void print(std::ostream &out) const
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1653
void condense(SparsityPattern &sparsity) const
size_type n_constraints() const
const BlockIndices & get_row_indices() const
void resolve_indices(std::vector< types::global_dof_index > &indices) const
bool has_inhomogeneities() const
static const Table< 2, bool > default_empty_table
std::size_t memory_consumption() const
void add_lines(const std::vector< bool > &lines)
size_type max_constraint_indirections() const
size_type calculate_line_index(const size_type line) const
static ::ExceptionBase & ExcMatrixNotClosed()
static ::ExceptionBase & ExcDoFConstrainedToConstrainedDoF(int arg1, int arg2)
static ::ExceptionBase & ExcNotQuadratic()
void distribute(VectorType &vec) const
std_cxx11::enable_if< std_cxx11::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
void compress() const
Definition: index_set.h:1428
void write_dot(std::ostream &) const
size_type row_length(const size_type row) const
bool is_identity_constrained(const size_type index) const
void shift(const size_type offset)
bool are_identity_constrained(const size_type index1, const size_type index2) const
static ::ExceptionBase & ExcNotImplemented()
bool is_element(const size_type index) const
Definition: index_set.h:1489
void reinit(const IndexSet &local_constraints=IndexSet())
void add_selected_constraints(const ConstraintMatrix &constraints_in, const IndexSet &filter)
const std::vector< std::pair< size_type, double > > * get_constraint_entries(const size_type line) const
double get_inhomogeneity(const size_type line) const
void set_inhomogeneity(const size_type line, const double value)
iterator begin() const
std::vector< ConstraintLine > lines
bool is_constrained(const size_type index) const
const BlockIndices & get_column_indices() const
static ::ExceptionBase & ExcMatrixIsClosed()
static ::ExceptionBase & ExcInternalError()