16 #ifndef dealii_petsc_block_vector_h
17 #define dealii_petsc_block_vector_h
22 #ifdef DEAL_II_WITH_PETSC
115 BlockVector(
const std::vector<size_type> &block_sizes,
117 const std::vector<size_type> &local_elements);
123 explicit BlockVector(
const std::vector<IndexSet> ¶llel_partitioning,
124 const MPI_Comm &communicator = MPI_COMM_WORLD);
129 BlockVector(
const std::vector<IndexSet> ¶llel_partitioning,
130 const std::vector<IndexSet> &ghost_indices,
167 const bool omit_zeroing_entries =
false);
190 reinit(
const std::vector<size_type> &block_sizes,
192 const std::vector<size_type> &local_sizes,
193 const bool omit_zeroing_entries =
false);
217 reinit(
const std::vector<IndexSet> ¶llel_partitioning,
224 reinit(
const std::vector<IndexSet> ¶llel_partitioning,
225 const std::vector<IndexSet> &ghost_entries,
235 reinit(
const unsigned int num_blocks);
274 print(std::ostream & out,
275 const unsigned int precision = 3,
276 const bool scientific =
true,
277 const bool across =
true)
const;
304 const std::vector<size_type> &block_sizes,
306 const std::vector<size_type> &local_elements)
308 reinit(block_sizes, communicator, local_elements,
false);
318 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
323 const std::vector<IndexSet> ¶llel_partitioning,
326 reinit(parallel_partitioning, communicator);
330 const std::vector<IndexSet> ¶llel_partitioning,
331 const std::vector<IndexSet> &ghost_indices,
334 reinit(parallel_partitioning, ghost_indices, communicator);
370 const bool omit_zeroing_entries)
374 std::vector<size_type>(
n_blocks, local_size),
375 omit_zeroing_entries);
383 const std::vector<size_type> &local_sizes,
384 const bool omit_zeroing_entries)
387 if (this->
components.size() != this->n_blocks())
390 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
394 omit_zeroing_entries);
402 if (this->
components.size() != this->n_blocks())
405 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
406 block(i).reinit(v.
block(i), omit_zeroing_entries);
413 std::vector<size_type> sizes(parallel_partitioning.size());
414 for (
unsigned int i = 0; i < parallel_partitioning.size(); ++i)
415 sizes[i] = parallel_partitioning[i].
size();
418 if (this->
components.size() != this->n_blocks())
421 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
422 block(i).reinit(parallel_partitioning[i], communicator);
427 const std::vector<IndexSet> &ghost_entries,
430 std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
431 for (
unsigned int i = 0; i < parallel_partitioning.size(); ++i)
432 sizes[i] = parallel_partitioning[i].
size();
435 if (this->
components.size() != this->n_blocks())
438 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
439 block(i).reinit(parallel_partitioning[i],
449 return block(0).get_mpi_communicator();
455 bool ghosted =
block(0).has_ghost_elements();
457 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
476 const unsigned int precision,
477 const bool scientific,
478 const bool across)
const
480 for (
unsigned int i = 0; i < this->
n_blocks(); ++i)
483 out <<
'C' << i <<
':';
485 out <<
"Component " << i << std::endl;
486 this->
components[i].print(out, precision, scientific, across);
512 namespace LinearOperatorImplementation
525 template <
typename Matrix>
532 matrix.get_mpi_communicator());
535 template <
typename Matrix>
542 matrix.get_mpi_communicator());
562 #endif // DEAL_II_WITH_PETSC