Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
cdr.h
Go to the documentation of this file.
1 true);
1319 * SparsityTools::distribute_sparsity_pattern(dynamic_sparsity_pattern,
1320 * dof_handler.locally_owned_dofs(),
1321 * mpi_communicator,
1322 * locally_relevant_dofs);
1323 *
1324 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1325 * system_matrix.reinit(locally_owned_dofs,
1326 * dynamic_sparsity_pattern,
1327 * mpi_communicator);
1328 *
1329 * CDR::create_system_matrix<dim>(dof_handler,
1330 * quad,
1331 * convection_function,
1332 * parameters,
1333 * time_step,
1334 * constraints,
1335 * system_matrix);
1336 * system_matrix.compress(VectorOperation::add);
1337 * preconditioner.initialize(system_matrix);
1338 * }
1339 *
1340 *
1341 * template <int dim>
1342 * void
1343 * CDRProblem<dim>::time_iterate()
1344 * {
1345 * double current_time = parameters.start_time;
1346 * CDR::WritePVTUOutput pvtu_output(parameters.patch_level);
1347 * for (unsigned int time_step_n = 0; time_step_n < parameters.n_time_steps;
1348 * ++time_step_n)
1349 * {
1350 * current_time += time_step;
1351 *
1352 * system_rhs = 0.0;
1353 * CDR::create_system_rhs<dim>(dof_handler,
1354 * quad,
1355 * convection_function,
1356 * forcing_function,
1357 * parameters,
1358 * locally_relevant_solution,
1359 * constraints,
1360 * current_time,
1361 * system_rhs);
1362 * system_rhs.compress(VectorOperation::add);
1363 *
1364 * SolverControl solver_control(dof_handler.n_dofs(),
1365 * 1e-6 * system_rhs.l2_norm(),
1366 * /*log_history = */ false,
1367 * /*log_result = */ false);
1368 * TrilinosWrappers::SolverGMRES solver(solver_control);
1369 * solver.solve(system_matrix,
1370 * completely_distributed_solution,
1371 * system_rhs,
1372 * preconditioner);
1373 * constraints.distribute(completely_distributed_solution);
1374 * locally_relevant_solution = completely_distributed_solution;
1375 *
1376 * if (time_step_n % parameters.save_interval == 0)
1377 * {
1378 * pvtu_output.write_output(dof_handler,
1379 * locally_relevant_solution,
1380 * time_step_n,
1381 * current_time);
1382 * }
1383 *
1384 * refine_mesh();
1385 * }
1386 * }
1387 *
1388 *
1389 * template <int dim>
1390 * void
1391 * CDRProblem<dim>::refine_mesh()
1392 * {
1393 * using FunctionMap = std::map<types::boundary_id, const Function<dim> *>;
1394 *
1395 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1397 * QGauss<dim - 1>(fe.degree + 1),
1398 * FunctionMap(),
1399 * locally_relevant_solution,
1400 * estimated_error_per_cell);
1401 *
1402 * @endcode
1403 *
1404 * This solver uses a crude refinement strategy where cells with relatively
1405 * high errors are refined and cells with relatively low errors are
1406 * coarsened. The maximum refinement level is capped to prevent run-away
1407 * refinement.
1408 *
1409 * @code
1410 * for (const auto &cell : triangulation.active_cell_iterators())
1411 * {
1412 * if (std::abs(estimated_error_per_cell[cell->active_cell_index()]) >= 1e-3)
1413 * {
1414 * cell->set_refine_flag();
1415 * }
1416 * else if (std::abs(estimated_error_per_cell[cell->active_cell_index()]) <=
1417 * 1e-5)
1418 * {
1419 * cell->set_coarsen_flag();
1420 * }
1421 * }
1422 *
1423 * if (triangulation.n_levels() > parameters.max_refinement_level)
1424 * {
1425 * for (const auto &cell : triangulation.cell_iterators_on_level(
1426 * parameters.max_refinement_level))
1427 * {
1428 * cell->clear_refine_flag();
1429 * }
1430 * }
1431 *
1432 * @endcode
1433 *
1434 * Transferring the solution between different grids is ultimately just a
1435 * few function calls but they must be made in exactly the right order.
1436 *
1437 * @code
1439 * solution_transfer(dof_handler);
1440 *
1441 * triangulation.prepare_coarsening_and_refinement();
1442 * solution_transfer.prepare_for_coarsening_and_refinement(
1443 * locally_relevant_solution);
1444 * triangulation.execute_coarsening_and_refinement();
1445 *
1446 * setup_dofs();
1447 *
1448 * @endcode
1449 *
1450 * The <code>solution_transfer</code> object stores a pointer to
1451 * <code>locally_relevant_solution</code>, so when
1453 * those values to populate <code>temporary</code>.
1454 *
1455 * @code
1456 * TrilinosWrappers::MPI::Vector temporary(locally_owned_dofs, mpi_communicator);
1457 * solution_transfer.interpolate(temporary);
1458 * @endcode
1459 *
1460 * After <code>temporary</code> has the correct value, this call correctly
1461 * populates <code>completely_distributed_solution</code>, which had its
1462 * index set updated above with the call to <code>setup_dofs</code>.
1463 *
1464 * @code
1465 * completely_distributed_solution = temporary;
1466 * @endcode
1467 *
1468 * Constraints cannot be applied to
1469 * @ref GlossGhostedVector "vectors with ghost entries" since the ghost
1470 * entries are write only, so this first goes through the completely
1471 * distributed vector.
1472 *
1473 * @code
1474 * constraints.distribute(completely_distributed_solution);
1475 * locally_relevant_solution = completely_distributed_solution;
1476 * setup_system();
1477 * }
1478 *
1479 *
1480 * template <int dim>
1481 * void
1482 * CDRProblem<dim>::run()
1483 * {
1484 * setup_geometry();
1485 * setup_dofs();
1486 * setup_system();
1487 * time_iterate();
1488 * }
1489 *
1490 *
1491 * constexpr int dim{2};
1492 *
1493 *
1494 * int
1495 * main(int argc, char *argv[])
1496 * {
1497 * @endcode
1498 *
1499 * One of the new features in C++11 is the <code>chrono</code> component of
1500 * the standard library. This gives us an easy way to time the output.
1501 *
1502 * @code
1503 * auto t0 = std::chrono::high_resolution_clock::now();
1504 *
1505 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1506 * CDR::Parameters parameters;
1507 * parameters.read_parameter_file("parameters.prm");
1508 * CDRProblem<dim> cdr_problem(parameters);
1509 * cdr_problem.run();
1510 *
1511 * auto t1 = std::chrono::high_resolution_clock::now();
1512 * if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
1513 * {
1514 * std::cout << "time elapsed: "
1515 * << std::chrono::duration_cast<std::chrono::milliseconds>(t1 -
1516 * t0)
1517 * .count()
1518 * << " milliseconds." << std::endl;
1519 * }
1520 *
1521 * return 0;
1522 * }
1523 * @endcode
1524
1525
1526*/
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Definition: vector.h:109
void interpolate(std::vector< VectorType * > &all_out)
Point< 2 > first
Definition: grid_out.cc:4603
unsigned int level
Definition: grid_out.cc:4606
__global__ void set(Number *val, const Number s, const size_type N)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm &mpi_comm, const IndexSet &locally_relevant_rows)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:474
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation