1320 * dof_handler.locally_owned_dofs(),
1322 * locally_relevant_dofs);
1324 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1325 * system_matrix.reinit(locally_owned_dofs,
1326 * dynamic_sparsity_pattern,
1327 * mpi_communicator);
1329 * CDR::create_system_matrix<dim>(dof_handler,
1331 * convection_function,
1337 * preconditioner.initialize(system_matrix);
1341 *
template <
int dim>
1343 * CDRProblem<dim>::time_iterate()
1345 *
double current_time = parameters.start_time;
1346 * CDR::WritePVTUOutput pvtu_output(parameters.patch_level);
1347 *
for (
unsigned int time_step_n = 0; time_step_n < parameters.n_time_steps;
1350 * current_time += time_step;
1353 * CDR::create_system_rhs<dim>(dof_handler,
1355 * convection_function,
1358 * locally_relevant_solution,
1365 * 1e-6 * system_rhs.l2_norm(),
1369 * solver.solve(system_matrix,
1370 * completely_distributed_solution,
1373 * constraints.distribute(completely_distributed_solution);
1374 * locally_relevant_solution = completely_distributed_solution;
1376 *
if (time_step_n % parameters.save_interval == 0)
1378 * pvtu_output.write_output(dof_handler,
1379 * locally_relevant_solution,
1389 *
template <
int dim>
1391 * CDRProblem<dim>::refine_mesh()
1393 *
using FunctionMap = std::map<types::boundary_id, const Function<dim> *>;
1399 * locally_relevant_solution,
1400 * estimated_error_per_cell);
1404 * This solver uses a crude refinement strategy where cells with relatively
1405 * high errors are refined and cells with relatively low errors are
1406 * coarsened. The maximum refinement
level is capped to prevent
run-away
1410 *
for (
const auto &cell :
triangulation.active_cell_iterators())
1412 *
if (
std::abs(estimated_error_per_cell[cell->active_cell_index()]) >= 1e-3)
1414 * cell->set_refine_flag();
1416 *
else if (
std::abs(estimated_error_per_cell[cell->active_cell_index()]) <=
1419 * cell->set_coarsen_flag();
1423 *
if (
triangulation.n_levels() > parameters.max_refinement_level)
1425 *
for (
const auto &cell :
triangulation.cell_iterators_on_level(
1426 * parameters.max_refinement_level))
1428 * cell->clear_refine_flag();
1434 * Transferring the solution between different grids is ultimately just a
1435 * few function calls but they must be made in exactly the right order.
1439 * solution_transfer(dof_handler);
1442 * solution_transfer.prepare_for_coarsening_and_refinement(
1443 * locally_relevant_solution);
1450 * The <code>solution_transfer</code>
object stores a pointer to
1451 * <code>locally_relevant_solution</code>, so when
1453 * those
values to populate <code>temporary</code>.
1457 * solution_transfer.interpolate(temporary);
1460 * After <code>temporary</code> has the correct
value,
this call correctly
1461 * populates <code>completely_distributed_solution</code>, which had its
1462 *
index set updated above with the
call to <code>setup_dofs</code>.
1465 * completely_distributed_solution = temporary;
1468 * Constraints cannot be applied to
1469 * @ref GlossGhostedVector
"vectors with ghost entries" since the ghost
1470 * entries are write only, so
this first goes through the completely
1471 * distributed vector.
1474 * constraints.distribute(completely_distributed_solution);
1475 * locally_relevant_solution = completely_distributed_solution;
1480 *
template <
int dim>
1482 * CDRProblem<dim>::run()
1491 *
constexpr int dim{2};
1495 * main(
int argc,
char *argv[])
1499 * One of the
new features in
C++11 is the <code>chrono</code> component of
1500 * the standard library. This gives us an easy way to time the output.
1503 *
auto t0 = std::chrono::high_resolution_clock::now();
1506 * CDR::Parameters parameters;
1507 * parameters.read_parameter_file(
"parameters.prm");
1508 * CDRProblem<dim> cdr_problem(parameters);
1509 * cdr_problem.run();
1511 *
auto t1 = std::chrono::high_resolution_clock::now();
1514 * std::cout <<
"time elapsed: "
1515 * << std::chrono::duration_cast<std::chrono::milliseconds>(t1 -
1518 * <<
" milliseconds." << std::endl;
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void interpolate(std::vector< VectorType * > &all_out)
__global__ void set(Number *val, const Number s, const size_type N)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation