Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.3.3
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
cdr.h
Go to the documentation of this file.
1true);
1248 * (dynamic_sparsity_pattern, dof_handler.n_locally_owned_dofs_per_processor(),
1249 * mpi_communicator, locally_relevant_dofs);
1250 *
1251 * system_rhs.reinit(locally_owned_dofs, mpi_communicator);
1252 * system_matrix.reinit(locally_owned_dofs, dynamic_sparsity_pattern,
1253 * mpi_communicator);
1254 *
1255 * CDR::create_system_matrix<dim>
1256 * (dof_handler, quad, convection_function, parameters, time_step, constraints,
1257 * system_matrix);
1258 * system_matrix.compress(VectorOperation::add);
1259 * preconditioner.initialize(system_matrix);
1260 * }
1261 *
1262 *
1263 * template<int dim>
1264 * void CDRProblem<dim>::time_iterate()
1265 * {
1266 * double current_time = parameters.start_time;
1267 * CDR::WritePVTUOutput pvtu_output(parameters.patch_level);
1268 * for (unsigned int time_step_n = 0; time_step_n < parameters.n_time_steps;
1269 * ++time_step_n)
1270 * {
1271 * current_time += time_step;
1272 *
1273 * system_rhs = 0.0;
1274 * CDR::create_system_rhs<dim>
1275 * (dof_handler, quad, convection_function, forcing_function, parameters,
1276 * locally_relevant_solution, constraints, current_time, system_rhs);
1277 * system_rhs.compress(VectorOperation::add);
1278 *
1279 * SolverControl solver_control(dof_handler.n_dofs(),
1280 * 1e-6*system_rhs.l2_norm(),
1281 * /*log_history = */ false,
1282 * /*log_result = */ false);
1283 * TrilinosWrappers::SolverGMRES solver(solver_control);
1284 * solver.solve(system_matrix, completely_distributed_solution, system_rhs,
1285 * preconditioner);
1286 * constraints.distribute(completely_distributed_solution);
1287 * locally_relevant_solution = completely_distributed_solution;
1288 *
1289 * if (time_step_n % parameters.save_interval == 0)
1290 * {
1291 * pvtu_output.write_output(dof_handler, locally_relevant_solution,
1292 * time_step_n, current_time);
1293 * }
1294 *
1295 * refine_mesh();
1296 * }
1297 * }
1298 *
1299 *
1300 * template<int dim>
1301 * void CDRProblem<dim>::refine_mesh()
1302 * {
1303 * using FunctionMap =
1304 * std::map<types::boundary_id, const Function<dim> *>;
1305 *
1306 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1308 * (dof_handler, QGauss<dim - 1>(fe.degree + 1), FunctionMap(),
1309 * locally_relevant_solution, estimated_error_per_cell);
1310 *
1311 * @endcode
1312 *
1313 * This solver uses a crude refinement strategy where cells with relatively
1314 * high errors are refined and cells with relatively low errors are
1315 * coarsened. The maximum refinement level is capped to prevent run-away
1316 * refinement.
1317 *
1318 * @code
1319 * for (const auto &cell : triangulation.active_cell_iterators())
1320 * {
1321 * if (std::abs(estimated_error_per_cell[cell->active_cell_index()]) >= 1e-3)
1322 * {
1323 * cell->set_refine_flag();
1324 * }
1325 * else if (std::abs(estimated_error_per_cell[cell->active_cell_index()]) <= 1e-5)
1326 * {
1327 * cell->set_coarsen_flag();
1328 * }
1329 * }
1330 *
1331 * if (triangulation.n_levels() > parameters.max_refinement_level)
1332 * {
1333 * for (const auto &cell :
1334 * triangulation.cell_iterators_on_level(parameters.max_refinement_level))
1335 * {
1336 * cell->clear_refine_flag();
1337 * }
1338 * }
1339 *
1340 * @endcode
1341 *
1342 * Transferring the solution between different grids is ultimately just a
1343 * few function calls but they must be made in exactly the right order.
1344 *
1345 * @code
1347 * solution_transfer(dof_handler);
1348 *
1349 * triangulation.prepare_coarsening_and_refinement();
1350 * solution_transfer.prepare_for_coarsening_and_refinement
1351 * (locally_relevant_solution);
1352 * triangulation.execute_coarsening_and_refinement();
1353 *
1354 * setup_dofs();
1355 *
1356 * @endcode
1357 *
1358 * The <code>solution_transfer</code> object stores a pointer to
1359 * <code>locally_relevant_solution</code>, so when
1361 * those values to populate <code>temporary</code>.
1362 *
1363 * @code
1365 * (locally_owned_dofs, mpi_communicator);
1366 * solution_transfer.interpolate(temporary);
1367 * @endcode
1368 *
1369 * After <code>temporary</code> has the correct value, this call correctly
1370 * populates <code>completely_distributed_solution</code>, which had its
1371 * index set updated above with the call to <code>setup_dofs</code>.
1372 *
1373 * @code
1374 * completely_distributed_solution = temporary;
1375 * @endcode
1376 *
1377 * Constraints cannot be applied to
1378 * @ref GlossGhostedVector "vectors with ghost entries" since the ghost
1379 * entries are write only, so this first goes through the completely
1380 * distributed vector.
1381 *
1382 * @code
1383 * constraints.distribute(completely_distributed_solution);
1384 * locally_relevant_solution = completely_distributed_solution;
1385 * setup_system();
1386 * }
1387 *
1388 *
1389 * template<int dim>
1390 * void CDRProblem<dim>::run()
1391 * {
1392 * setup_geometry();
1393 * setup_dofs();
1394 * setup_system();
1395 * time_iterate();
1396 * }
1397 *
1398 *
1399 * constexpr int dim {2};
1400 *
1401 *
1402 * int main(int argc, char *argv[])
1403 * {
1404 * @endcode
1405 *
1406 * One of the new features in C++11 is the <code>chrono</code> component of
1407 * the standard library. This gives us an easy way to time the output.
1408 *
1409 * @code
1410 * auto t0 = std::chrono::high_resolution_clock::now();
1411 *
1412 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1413 * CDR::Parameters parameters;
1414 * parameters.read_parameter_file("parameters.prm");
1415 * CDRProblem<dim> cdr_problem(parameters);
1416 * cdr_problem.run();
1417 *
1418 * auto t1 = std::chrono::high_resolution_clock::now();
1419 * if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
1420 * {
1421 * std::cout << "time elapsed: "
1422 * << std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0).count()
1423 * << " milliseconds."
1424 * << std::endl;
1425 * }
1426 *
1427 * return 0;
1428 * }
1429 * @endcode
1430
1431
1432*/
Definition: vector.h:110
void interpolate(std::vector< VectorType * > &all_out)
Point< 2 > first
Definition: grid_out.cc:4587
unsigned int level
Definition: grid_out.cc:4590
__global__ void set(Number *val, const Number s, const size_type N)
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm &mpi_comm, const IndexSet &locally_relevant_rows)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:472
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation