Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.4.1
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
step-19.h
Go to the documentation of this file.
1dim)
757 * , next_unused_particle_id(0)
758 * , n_recently_lost_particles(0)
759 * , n_total_lost_particles(0)
760 * , n_particles_lost_through_anode(0)
761 * , time(0, 1e-4)
762 * {
763 * particle_handler.signals.particle_lost.connect(
764 * [this](const typename Particles::ParticleIterator<dim> & particle,
765 * const typename Triangulation<dim>::active_cell_iterator &cell) {
766 * this->track_lost_particle(particle, cell);
767 * });
768 * }
769 *
770 *
771 *
772 * @endcode
773 *
774 *
775 * <a name="ThecodeCathodeRaySimulatormake_gridcodefunction"></a>
776 * <h4>The <code>CathodeRaySimulator::make_grid</code> function</h4>
777 *
778
779 *
780 * The next function is then responsible for generating the mesh on which
781 * we want to solve. Recall how the domain looks like:
782 * <p align="center">
783 * <img
784 * src="https://www.dealii.org/images/steps/developer/step-19.geometry.png"
785 * alt="The geometry used in this program"
786 * width="600">
787 * </p>
788 * We subdivide this geometry into a mesh of @f$4\times 2@f$ cells that looks
789 * like this:
790 * <div class=CodeFragmentInTutorialComment>
791 * @code
792 * *---*---*---*---*
793 * \ | | | |
794 * *--*---*---*---*
795 * / | | | |
796 * *---*---*---*---*
797 * @endcode
798 * </div>
799 * The way this is done is by first defining where the @f$15=5\times 3@f$
800 * vertices are located -- here, we say that they are on integer points
801 * with the middle one on the left side moved to the right by a value of
802 * `delta=0.5`.
803 *
804
805 *
806 * In the following, we then have to say which vertices together form
807 * the 8 cells. The following code is then entirely equivalent to what
808 * we also do in @ref step_14 "step-14":
809 *
810 * @code
811 * template <int dim>
812 * void CathodeRaySimulator<dim>::make_grid()
813 * {
814 * static_assert(dim == 2,
815 * "This function is currently only implemented for 2d.");
816 *
817 * const double delta = 0.5;
818 * const unsigned int nx = 5;
819 * const unsigned int ny = 3;
820 *
821 * const std::vector<Point<dim>> vertices
822 * = {{0, 0},
823 * {1, 0},
824 * {2, 0},
825 * {3, 0},
826 * {4, 0},
827 * {delta, 1},
828 * {1, 1},
829 * {2, 1},
830 * {3, 1},
831 * {4, 1},
832 * {0, 2},
833 * {1, 2},
834 * {2, 2},
835 * {3, 2},
836 * {4, 2}};
837 * AssertDimension(vertices.size(), nx * ny);
838 *
839 * const std::vector<unsigned int> cell_vertices[(nx - 1) * (ny - 1)] = {
840 * {0, 1, nx + 0, nx + 1},
841 * {1, 2, nx + 1, nx + 2},
842 * {2, 3, nx + 2, nx + 3},
843 * {3, 4, nx + 3, nx + 4},
844 *
845 * {5, nx + 1, 2 * nx + 0, 2 * nx + 1},
846 * {nx + 1, nx + 2, 2 * nx + 1, 2 * nx + 2},
847 * {nx + 2, nx + 3, 2 * nx + 2, 2 * nx + 3},
848 * {nx + 3, nx + 4, 2 * nx + 3, 2 * nx + 4}};
849 *
850 * @endcode
851 *
852 * With these arrays out of the way, we can move to slightly higher
853 * higher-level data structures. We create a vector of CellData
854 * objects that store for each cell to be created the vertices in
855 * question as well as the @ref GlossMaterialId "material id" (which
856 * we will here simply set to zero since we don't use it in the program).
857 *
858
859 *
860 * This information is then handed to the
861 * Triangulation::create_triangulation() function, and the mesh is twice
862 * globally refined.
863 *
864 * @code
865 * std::vector<CellData<dim>> cells((nx - 1) * (ny - 1), CellData<dim>());
866 * for (unsigned int i = 0; i < cells.size(); ++i)
867 * {
868 * cells[i].vertices = cell_vertices[i];
869 * cells[i].material_id = 0;
870 * }
871 *
872 * triangulation.create_triangulation(
873 * vertices,
874 * cells,
875 * SubCellData()); // No boundary information
876 *
877 * triangulation.refine_global(2);
878 *
879 * @endcode
880 *
881 * The remaining part of the function loops over all cells and their faces,
882 * and if a face is at the boundary determines which boundary indicator
883 * should be applied to it. The various conditions should make sense if
884 * you compare the code with the picture of the geometry above.
885 *
886
887 *
888 * Once done with this step, we refine the mesh once more globally.
889 *
890 * @code
891 * for (auto &cell : triangulation.active_cell_iterators())
892 * for (auto &face : cell->face_iterators())
893 * if (face->at_boundary())
894 * {
895 * if ((face->center()[0] > 0) && (face->center()[0] < 0.5) &&
896 * (face->center()[1] > 0) && (face->center()[1] < 2))
897 * face->set_boundary_id(BoundaryIds::cathode);
898 * else if ((face->center()[0] > 0) && (face->center()[0] < 2))
899 * face->set_boundary_id(BoundaryIds::focus_element);
900 * else if ((face->center()[0] > 4 - 1e-12) &&
901 * ((face->center()[1] > 1.5) || (face->center()[1] < 0.5)))
902 * face->set_boundary_id(BoundaryIds::anode);
903 * else
904 * face->set_boundary_id(BoundaryIds::open);
905 * }
906 *
907 * triangulation.refine_global(1);
908 * }
909 *
910 *
911 * @endcode
912 *
913 *
914 * <a name="ThecodeCathodeRaySimulatorsetup_systemcodefunction"></a>
915 * <h4>The <code>CathodeRaySimulator::setup_system</code> function</h4>
916 *
917
918 *
919 * The next function in this program deals with setting up the various
920 * objects related to solving the partial differential equations. It is
921 * in essence a copy of the corresponding function in @ref step_6 "step-6" and requires
922 * no further discussion.
923 *
924 * @code
925 * template <int dim>
926 * void CathodeRaySimulator<dim>::setup_system()
927 * {
928 * dof_handler.distribute_dofs(fe);
929 *
930 * solution.reinit(dof_handler.n_dofs());
931 * system_rhs.reinit(dof_handler.n_dofs());
932 *
933 * constraints.clear();
934 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
935 *
936 * VectorTools::interpolate_boundary_values(dof_handler,
937 * BoundaryIds::cathode,
938 * Functions::ConstantFunction<dim>(
939 * -Constants::V0),
940 * constraints);
941 * VectorTools::interpolate_boundary_values(dof_handler,
942 * BoundaryIds::focus_element,
943 * Functions::ConstantFunction<dim>(
944 * -Constants::V0),
945 * constraints);
946 * VectorTools::interpolate_boundary_values(dof_handler,
947 * BoundaryIds::anode,
948 * Functions::ConstantFunction<dim>(
949 * +Constants::V0),
950 * constraints);
951 * constraints.close();
952 *
953 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
954 * DoFTools::make_sparsity_pattern(dof_handler,
955 * dsp,
956 * constraints,
957 * /*keep_constrained_dofs = */ false);
958 * sparsity_pattern.copy_from(dsp);
959 *
960 * system_matrix.reinit(sparsity_pattern);
961 * }
962 *
963 *
964 * @endcode
965 *
966 *
967 * <a name="ThecodeCathodeRaySimulatorassemble_systemcodefunction"></a>
968 * <h4>The <code>CathodeRaySimulator::assemble_system</code> function</h4>
969 *
970
971 *
972 * The function that computes
973 * the matrix entries is again in essence a copy of the
974 * corresponding function in @ref step_6 "step-6":
975 *
976 * @code
977 * template <int dim>
978 * void CathodeRaySimulator<dim>::assemble_system()
979 * {
980 * system_matrix = 0;
981 * system_rhs = 0;
982 *
983 * const QGauss<dim> quadrature_formula(fe.degree + 1);
984 *
985 * FEValues<dim> fe_values(fe,
986 * quadrature_formula,
987 * update_values | update_gradients |
988 * update_quadrature_points | update_JxW_values);
989 *
990 * const unsigned int dofs_per_cell = fe.dofs_per_cell;
991 *
992 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
993 * Vector<double> cell_rhs(dofs_per_cell);
994 *
995 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
996 *
997 * for (const auto &cell : dof_handler.active_cell_iterators())
998 * {
999 * cell_matrix = 0;
1000 * cell_rhs = 0;
1001 *
1002 * fe_values.reinit(cell);
1003 *
1004 * for (const unsigned int q_index : fe_values.quadrature_point_indices())
1005 * for (const unsigned int i : fe_values.dof_indices())
1006 * {
1007 * for (const unsigned int j : fe_values.dof_indices())
1008 * cell_matrix(i, j) +=
1009 * (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
1010 * fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
1011 * fe_values.JxW(q_index)); // dx
1012 * }
1013 *
1014 * @endcode
1015 *
1016 * The only interesting part of this function is how it forms the right
1017 * hand side of the linear system. Recall that the right hand side
1018 * of the PDE is
1019 * @f[
1020 * \sum_p (N e)\delta(\mathbf x-\mathbf x_p),
1021 * @f]
1022 * where we have used @f$p@f$ to index the particles here to avoid
1023 * confusion with the shape function @f$\varphi_i@f$; @f$\mathbf x_p@f$
1024 * is the position of the @f$p@f$th particle.
1025 *
1026
1027 *
1028 * When multiplied by a test function @f$\varphi_i@f$ and integrated over
1029 * the domain results in a right hand side vector
1030 * @f{align*}{
1031 * F_i &= \int_\Omega \varphi_i (\mathbf x)\left[
1032 * \sum_p (N e)\delta(\mathbf x-\mathbf x_p) \right] dx
1033 * \\ &= \sum_p (N e) \varphi_i(\mathbf x_p).
1034 * @f}
1035 * Note that the final line no longer contains an integral, and
1036 * consequently also no occurrence of @f$dx@f$ which would require the
1037 * appearance of the `JxW` symbol in our code.
1038 *
1039
1040 *
1041 * For a given cell @f$K@f$, this cell's contribution to the right hand
1042 * side is then
1043 * @f{align*}{
1044 * F_i^K &= \sum_{p, \mathbf x_p\in K} (N e) \varphi_i(\mathbf x_p),
1045 * @f}
1046 * i.e., we only have to worry about those particles that are actually
1047 * located on the current cell @f$K@f$.
1048 *
1049
1050 *
1051 * In practice, what we do here is the following: If there are any
1052 * particles on the current cell, then we first obtain an iterator range
1053 * pointing to the first particle of that cell as well as the particle
1054 * past the last one on this cell (or the end iterator) -- i.e., a
1055 * half-open range as is common for C++ functions. Knowing now the list
1056 * of particles, we query their reference locations (with respect to
1057 * the reference cell), evaluate the shape functions in these reference
1058 * locations, and compute the force according to the formula above
1059 * (without any FEValues::JxW).
1060 *
1061
1062 *
1063 * @note It is worth pointing out that calling the
1065 * Particles::ParticleHandler::n_particles_in_cell() functions is not
1066 * very efficient on problems with a large number of particles. But it
1067 * illustrates the easiest way to write this algorithm, and so we are
1068 * willing to incur this cost for the moment for expository purposes.
1069 * We discuss the issue in more detail in the
1070 * <a href="#extensions">"possibilities for extensions" section</a>
1071 * below, and use a better approach in @ref step_70 "step-70", for example.
1072 *
1073 * @code
1074 * if (particle_handler.n_particles_in_cell(cell) > 0)
1075 * for (const auto &particle : particle_handler.particles_in_cell(cell))
1076 * {
1077 * const Point<dim> &reference_location =
1078 * particle.get_reference_location();
1079 * for (const unsigned int i : fe_values.dof_indices())
1080 * cell_rhs(i) +=
1081 * (fe.shape_value(i, reference_location) * // phi_i(x_p)
1082 * (-Constants::electrons_per_particle * // N
1083 * Constants::electron_charge)); // e
1084 * }
1085 *
1086 * @endcode
1087 *
1088 * Finally, we can copy the contributions of this cell into
1089 * the global matrix and right hand side vector:
1090 *
1091 * @code
1092 * cell->get_dof_indices(local_dof_indices);
1093 * constraints.distribute_local_to_global(
1094 * cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
1095 * }
1096 * }
1097 *
1098 *
1099 * @endcode
1100 *
1101 *
1102 * <a name="CathodeRaySimulatorsolve"></a>
1103 * <h4>CathodeRaySimulator::solve</h4>
1104 *
1105
1106 *
1107 * The function that solves the linear system is then again exactly as in
1108 * @ref step_6 "step-6":
1109 *
1110 * @code
1111 * template <int dim>
1112 * void CathodeRaySimulator<dim>::solve_field()
1113 * {
1114 * SolverControl solver_control(1000, 1e-12);
1115 * SolverCG<Vector<double>> solver(solver_control);
1116 *
1117 * PreconditionSSOR<SparseMatrix<double>> preconditioner;
1118 * preconditioner.initialize(system_matrix, 1.2);
1119 *
1120 * solver.solve(system_matrix, solution, system_rhs, preconditioner);
1121 *
1122 * constraints.distribute(solution);
1123 * }
1124 *
1125 *
1126 * @endcode
1127 *
1128 *
1129 * <a name="CathodeRaySimulatorrefine_grid"></a>
1130 * <h4>CathodeRaySimulator::refine_grid</h4>
1131 *
1132
1133 *
1134 * The final field-related function is the one that refines the grid. We will
1135 * call it a number of times in the first time step to obtain a mesh that
1136 * is well-adapted to the structure of the solution and, in particular,
1137 * resolves the various singularities in the solution that are due to
1138 * re-entrant corners and places where the boundary condition type
1139 * changes. You might want to refer to @ref step_6 "step-6" again for more details:
1140 *
1141 * @code
1142 * template <int dim>
1143 * void CathodeRaySimulator<dim>::refine_grid()
1144 * {
1145 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1146 *
1147 * KellyErrorEstimator<dim>::estimate(dof_handler,
1148 * QGauss<dim - 1>(fe.degree + 1),
1149 * {},
1150 * solution,
1151 * estimated_error_per_cell);
1152 *
1153 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
1154 * estimated_error_per_cell,
1155 * 0.1,
1156 * 0.03);
1157 *
1158 * triangulation.execute_coarsening_and_refinement();
1159 * }
1160 *
1161 *
1162 * @endcode
1163 *
1164 *
1165 * <a name="CathodeRaySimulatorcreate_particles"></a>
1166 * <h4>CathodeRaySimulator::create_particles</h4>
1167 *
1168
1169 *
1170 * Let us now turn to the functions that deal with particles. The first one
1171 * is about the creation of particles. As mentioned in the introduction,
1172 * we want to create a particle at points of the cathode if the electric
1173 * field @f$\mathbf E=\nabla V@f$ exceeds a certain threshold, i.e., if
1174 * @f$|\mathbf E| \ge E_\text{threshold}@f$, and if furthermore the electric field
1175 * points into the domain (i.e., if @f$\mathbf E \cdot \mathbf n < 0@f$). As is
1176 * common in the finite element method, we evaluate fields (and their
1177 * derivatives) at specific evaluation points; typically, these are
1178 * "quadrature points", and so we create a "quadrature formula" that we will
1179 * use to designate the points at which we want to evaluate the solution.
1180 * Here, we will simply take QMidpoint implying that we will only check the
1181 * threshold condition at the midpoints of faces. We then use this to
1182 * initialize an object of type FEFaceValues to evaluate the solution at these
1183 * points.
1184 *
1185
1186 *
1187 * All of this will then be used in a loop over all cells, their faces, and
1188 * specifically those faces that are at the boundary and, moreover, the
1189 * cathode part of the boundary.
1190 *
1191 * @code
1192 * template <int dim>
1193 * void CathodeRaySimulator<dim>::create_particles()
1194 * {
1195 * FEFaceValues<dim> fe_face_values(fe,
1196 * QMidpoint<dim - 1>(),
1197 * update_quadrature_points |
1198 * update_gradients |
1199 * update_normal_vectors);
1200 *
1201 * std::vector<Tensor<1, dim>> solution_gradients(
1202 * fe_face_values.n_quadrature_points);
1203 *
1204 * for (const auto &cell : dof_handler.active_cell_iterators())
1205 * for (const auto &face : cell->face_iterators())
1206 * if (face->at_boundary() &&
1207 * (face->boundary_id() == BoundaryIds::cathode))
1208 * {
1209 * fe_face_values.reinit(cell, face);
1210 *
1211 * @endcode
1212 *
1213 * So we have found a face on the cathode. Next, we let the
1214 * FEFaceValues object compute the gradient of the solution at each
1215 * "quadrature" point, and extract the electric field vector from
1216 * the gradient in the form of a Tensor variable through the methods
1217 * discussed in the
1218 * @ref vector_valued "vector-valued problems" documentation module.
1219 *
1220 * @code
1221 * const FEValuesExtractors::Scalar electric_potential(0);
1222 * fe_face_values[electric_potential].get_function_gradients(
1223 * solution, solution_gradients);
1224 * for (const unsigned int q_point :
1225 * fe_face_values.quadrature_point_indices())
1226 * {
1227 * const Tensor<1, dim> E = solution_gradients[q_point];
1228 *
1229 * @endcode
1230 *
1231 * Electrons can only escape the cathode if the electric field
1232 * strength exceeds a threshold and,
1233 * crucially, if the electric field points *into* the domain.
1234 * Once we have that checked, we create a new
1235 * Particles::Particle object at this location and insert it
1236 * into the Particles::ParticleHandler object with a unique ID.
1237 *
1238
1239 *
1240 * The only thing that may be not obvious here is that we also
1241 * associate with this particle the location in the reference
1242 * coordinates of the cell we are currently on. This is done
1243 * because we will in downstream functions compute quantities
1244 * such as the electric field at the location of the particle
1245 * (e.g., to compute the forces that act on it when updating its
1246 * position in each time step). Evaluating a finite element
1247 * field at arbitrary coordinates is quite an expensive
1248 * operation because shape functions are really only defined on
1249 * the reference cell, and so when asking for the electric field
1250 * at an arbitrary point requires us first to determine what the
1251 * reference coordinates of that point are. To avoid having to
1252 * do this over and over, we determine these coordinates once
1253 * and for all and then store these reference coordinates
1254 * directly with the particle.
1255 *
1256 * @code
1257 * if ((E * fe_face_values.normal_vector(q_point) < 0) &&
1258 * (E.norm() > Constants::E_threshold))
1259 * {
1260 * const Point<dim> &location =
1261 * fe_face_values.quadrature_point(q_point);
1262 *
1263 * Particles::Particle<dim> new_particle;
1264 * new_particle.set_location(location);
1265 * new_particle.set_reference_location(
1266 * mapping.transform_real_to_unit_cell(cell, location));
1267 * new_particle.set_id(next_unused_particle_id);
1268 * particle_handler.insert_particle(new_particle, cell);
1269 *
1270 * ++next_unused_particle_id;
1271 * }
1272 * }
1273 * }
1274 *
1275 * @endcode
1276 *
1277 * At the end of all of these insertions, we let the `particle_handler`
1278 * update some internal statistics about the particles it stores.
1279 *
1280 * @code
1281 * particle_handler.update_cached_numbers();
1282 * }
1283 *
1284 *
1285 * @endcode
1286 *
1287 *
1288 * <a name="CathodeRaySimulatormove_particles"></a>
1289 * <h4>CathodeRaySimulator::move_particles</h4>
1290 *
1291
1292 *
1293 * The second particle-related function is the one that moves the particles
1294 * in each time step. To do this, we have to loop over all cells, the
1295 * particles in each cell, and evaluate the electric field at each of the
1296 * particles' positions.
1297 *
1298
1299 *
1300 * The approach used here is conceptually the same used in the
1301 * `assemble_system()` function: We loop over all cells, find the particles
1302 * located there (with the same caveat about the inefficiency of the algorithm
1303 * used here to find these particles), and use FEPointEvaluation object to
1304 * evaluate the gradient at these positions:
1305 *
1306 * @code
1307 * template <int dim>
1308 * void CathodeRaySimulator<dim>::move_particles()
1309 * {
1310 * const double dt = time.get_next_step_size();
1311 *
1312 * Vector<double> solution_values(fe.n_dofs_per_cell());
1313 * FEPointEvaluation<1, dim> evaluator(mapping, fe, update_gradients);
1314 *
1315 * for (const auto &cell : dof_handler.active_cell_iterators())
1316 * if (particle_handler.n_particles_in_cell(cell) > 0)
1317 * {
1318 * const typename Particles::ParticleHandler<
1319 * dim>::particle_iterator_range particles_in_cell =
1320 * particle_handler.particles_in_cell(cell);
1321 *
1322 * std::vector<Point<dim>> particle_positions;
1323 * for (const auto &particle : particles_in_cell)
1324 * particle_positions.push_back(particle.get_reference_location());
1325 *
1326 * cell->get_dof_values(solution, solution_values);
1327 *
1328 * @endcode
1329 *
1330 * Then we can ask the FEPointEvaluation object for the gradients of
1331 * the solution (i.e., the electric field @f$\mathbf E@f$) at these
1332 * locations and loop over the individual particles:
1333 *
1334 * @code
1335 * evaluator.reinit(cell, particle_positions);
1336 * evaluator.evaluate(make_array_view(solution_values),
1337 * EvaluationFlags::gradients);
1338 *
1339 * {
1340 * typename Particles::ParticleHandler<dim>::particle_iterator
1341 * particle = particles_in_cell.begin();
1342 * for (unsigned int particle_index = 0;
1343 * particle != particles_in_cell.end();
1344 * ++particle, ++particle_index)
1345 * {
1346 * const Tensor<1, dim> &E =
1347 * evaluator.get_gradient(particle_index);
1348 *
1349 * @endcode
1350 *
1351 * Having now obtained the electric field at the location of one
1352 * of the particles, we use this to update first the velocity
1353 * and then the position. To do so, let us first get the old
1354 * velocity out of the properties stored with the particle,
1355 * compute the acceleration, update the velocity, and store this
1356 * new velocity again in the properties of the particle. Recall
1357 * that this corresponds to the first of the following set of
1358 * update equations discussed in the introduction:
1359 * @f{align*}{
1360 * \frac{{\mathbf v}_i^{(n)}
1361 * -{\mathbf v}_i^{(n-1)}}{\Delta t}
1362 * &= \frac{e\nabla V^{(n)}}{m}
1363 * \\ \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}
1364 * {\Delta t} &= {\mathbf v}_i^{(n)}.
1365 * @f}
1366 *
1367 * @code
1368 * const Tensor<1, dim> old_velocity(particle->get_properties());
1369 *
1370 * const Tensor<1, dim> acceleration =
1371 * Constants::electron_charge / Constants::electron_mass * E;
1372 *
1373 * const Tensor<1, dim> new_velocity =
1374 * old_velocity + acceleration * dt;
1375 *
1376 * particle->set_properties(new_velocity);
1377 *
1378 * @endcode
1379 *
1380 * With the new velocity, we can then also update the location
1381 * of the particle and tell the particle about it.
1382 *
1383 * @code
1384 * const Point<dim> new_location =
1385 * particle->get_location() + dt * new_velocity;
1386 * particle->set_location(new_location);
1387 * }
1388 * }
1389 * }
1390 *
1391 * @endcode
1392 *
1393 * Having updated the locations and properties (i.e., velocities) of all
1394 * particles, we need to make sure that the `particle_handler` again knows
1395 * which cells they are in, and what their locations in the coordinate
1396 * system of the reference cell are. The following function does that. (It
1397 * also makes sure that, in parallel computations, particles are moved from
1398 * one processor to another processor if a particle moves from the subdomain
1399 * owned by the former to the subdomain owned by the latter.)
1400 *
1401 * @code
1402 * particle_handler.sort_particles_into_subdomains_and_cells();
1403 * }
1404 *
1405 *
1406 * @endcode
1407 *
1408 *
1409 * <a name="CathodeRaySimulatortrack_lost_particle"></a>
1410 * <h4>CathodeRaySimulator::track_lost_particle</h4>
1411 *
1412
1413 *
1414 * The final particle-related function is the one that is called whenever a
1415 * particle is lost from the simulation. This typically happens if it leaves
1416 * the domain. If that happens, this function is called both the cell (which
1417 * we can ask for its new location) and the cell it was previously on. The
1418 * function then keeps track of updating the number of particles lost in this
1419 * time step, the total number of lost particles, and then estimates whether
1420 * the particle left through the hole in the middle of the anode. We do so by
1421 * first checking whether the cell it was in last had an @f$x@f$ coordinate to the
1422 * left of the right boundary (located at @f$x=4@f$) and the particle now has a
1423 * position to the right of the right boundary. If that is so, we compute a
1424 * direction vector of its motion that is normalized so that the @f$x@f$ component
1425 * of the direction vector is equal to @f$1@f$. With this direction vector, we can
1426 * compute where it would have intersected the line @f$x=4@f$. If this intersect
1427 * is between @f$0.5@f$ and @f$1.5@f$, then we claim that the particle left through
1428 * the hole and increment a counter.
1429 *
1430 * @code
1431 * template <int dim>
1432 * void CathodeRaySimulator<dim>::track_lost_particle(
1433 * const typename Particles::ParticleIterator<dim> & particle,
1434 * const typename Triangulation<dim>::active_cell_iterator &cell)
1435 * {
1436 * ++n_recently_lost_particles;
1437 * ++n_total_lost_particles;
1438 *
1439 * const Point<dim> current_location = particle->get_location();
1440 * const Point<dim> approximate_previous_location = cell->center();
1441 *
1442 * if ((approximate_previous_location[0] < 4) && (current_location[0] > 4))
1443 * {
1444 * const Tensor<1, dim> direction =
1445 * (current_location - approximate_previous_location) /
1446 * (current_location[0] - approximate_previous_location[0]);
1447 *
1448 * const double right_boundary_intercept =
1449 * approximate_previous_location[1] +
1450 * (4 - approximate_previous_location[0]) * direction[1];
1451 * if ((right_boundary_intercept > 0.5) &&
1452 * (right_boundary_intercept < 1.5))
1453 * ++n_particles_lost_through_anode;
1454 * }
1455 * }
1456 *
1457 *
1458 *
1459 * @endcode
1460 *
1461 *
1462 * <a name="CathodeRaySimulatorupdate_timestep_size"></a>
1463 * <h4>CathodeRaySimulator::update_timestep_size</h4>
1464 *
1465
1466 *
1467 * As discussed at length in the introduction, we need to respect a time step
1468 * condition whereby particles can not move further than one cell in one time
1469 * step. To ensure that this is the case, we again first compute the maximal
1470 * speed of all particles on each cell, and divide the cell size by that
1471 * speed. We then compute the next time step size as the minimum of this
1472 * quantity over all cells, using the safety factor discussed in the
1473 * introduction, and set this as the desired time step size using the
1474 * DiscreteTime::set_desired_time_step_size() function.
1475 *
1476 * @code
1477 * template <int dim>
1478 * void CathodeRaySimulator<dim>::update_timestep_size()
1479 * {
1480 * if (time.get_step_number() > 0)
1481 * {
1482 * double min_cell_size_over_velocity = std::numeric_limits<double>::max();
1483 *
1484 * for (const auto &cell : dof_handler.active_cell_iterators())
1485 * if (particle_handler.n_particles_in_cell(cell) > 0)
1486 * {
1487 * const double cell_size = cell->minimum_vertex_distance();
1488 *
1489 * double max_particle_velocity(0.0);
1490 *
1491 * for (const auto &particle :
1492 * particle_handler.particles_in_cell(cell))
1493 * {
1494 * const Tensor<1, dim> velocity(particle.get_properties());
1495 * max_particle_velocity =
1496 * std::max(max_particle_velocity, velocity.norm());
1497 * }
1498 *
1499 * if (max_particle_velocity > 0)
1500 * min_cell_size_over_velocity =
1501 * std::min(min_cell_size_over_velocity,
1502 * cell_size / max_particle_velocity);
1503 * }
1504 *
1505 * constexpr double c_safety = 0.5;
1506 * time.set_desired_next_step_size(c_safety * 0.5 *
1507 * min_cell_size_over_velocity);
1508 * }
1509 * @endcode
1510 *
1511 * As mentioned in the introduction, we have to treat the very first
1512 * time step differently since there, particles are not available yet or
1513 * do not yet have the information associated that we need for the
1514 * computation of a reasonable step length. The formulas below follow the
1515 * discussion in the introduction.
1516 *
1517 * @code
1518 * else
1519 * {
1520 * const QTrapezoid<dim> vertex_quadrature;
1521 * FEValues<dim> fe_values(fe, vertex_quadrature, update_gradients);
1522 *
1523 * std::vector<Tensor<1, dim>> field_gradients(vertex_quadrature.size());
1524 *
1525 * double min_timestep = std::numeric_limits<double>::max();
1526 *
1527 * for (const auto &cell : dof_handler.active_cell_iterators())
1528 * if (particle_handler.n_particles_in_cell(cell) > 0)
1529 * {
1530 * const double cell_size = cell->minimum_vertex_distance();
1531 *
1532 * fe_values.reinit(cell);
1533 * fe_values.get_function_gradients(solution, field_gradients);
1534 *
1535 * double max_E = 0;
1536 * for (const auto q_point : fe_values.quadrature_point_indices())
1537 * max_E = std::max(max_E, field_gradients[q_point].norm());
1538 *
1539 * if (max_E > 0)
1540 * min_timestep =
1541 * std::min(min_timestep,
1542 * std::sqrt(0.5 * cell_size *
1543 * Constants::electron_mass /
1544 * Constants::electron_charge / max_E));
1545 * }
1546 *
1547 * time.set_desired_next_step_size(min_timestep);
1548 * }
1549 * }
1550 *
1551 *
1552 *
1553 * @endcode
1554 *
1555 *
1556 * <a name="ThecodeCathodeRaySimulatoroutput_resultscodefunction"></a>
1557 * <h4>The <code>CathodeRaySimulator::output_results()</code> function</h4>
1558 *
1559
1560 *
1561 * The final function implementing pieces of the overall algorithm is the one
1562 * that generates graphical output. In the current context, we want to output
1563 * both the electric potential field as well as the particle locations and
1564 * velocities. But we also want to output the electric field, i.e., the
1565 * gradient of the solution.
1566 *
1567
1568 *
1569 * deal.II has a general way how one can compute derived quantities from
1570 * the solution and output those as well. Here, this is the electric
1571 * field, but it could also be some other quantity -- say, the norm of the
1572 * electric field, or in fact anything else one could want to compute from
1573 * the solution @f$V_h(\mathbf x)@f$ or its derivatives. This general solution
1574 * uses the DataPostprocessor class and, in cases like the one here where we
1575 * want to output a quantity that represents a vector field, the
1576 * DataPostprocessorVector class.
1577 *
1578
1579 *
1580 * Rather than try and explain how this class works, let us simply refer to
1581 * the documentation of the DataPostprocessorVector class that has essentially
1582 * this case as a well-documented example.
1583 *
1584 * @code
1585 * template <int dim>
1586 * class ElectricFieldPostprocessor : public DataPostprocessorVector<dim>
1587 * {
1588 * public:
1589 * ElectricFieldPostprocessor()
1590 * : DataPostprocessorVector<dim>("electric_field", update_gradients)
1591 * {}
1592 *
1593 * virtual void evaluate_scalar_field(
1594 * const DataPostprocessorInputs::Scalar<dim> &input_data,
1595 * std::vector<Vector<double>> &computed_quantities) const override
1596 * {
1597 * AssertDimension(input_data.solution_gradients.size(),
1598 * computed_quantities.size());
1599 *
1600 * for (unsigned int p = 0; p < input_data.solution_gradients.size(); ++p)
1601 * {
1602 * AssertDimension(computed_quantities[p].size(), dim);
1603 * for (unsigned int d = 0; d < dim; ++d)
1604 * computed_quantities[p][d] = input_data.solution_gradients[p][d];
1605 * }
1606 * }
1607 * };
1608 *
1609 *
1610 *
1611 * @endcode
1612 *
1613 * With this, the `output_results()` function becomes relatively
1614 * straightforward: We use the DataOut class as we have in almost
1615 * every one of the previous tutorial programs to output the
1616 * solution (the "electric potential") and we use the postprocessor
1617 * defined above to also output its gradient (the "electric
1618 * field"). This all is then written into a file in VTU format after
1619 * also associating the current time and time step number with this
1620 * file, and providing physical units for the two fields to be
1621 * output.
1622 *
1623 * @code
1624 * template <int dim>
1625 * void CathodeRaySimulator<dim>::output_results() const
1626 * {
1627 * {
1628 * ElectricFieldPostprocessor<dim> electric_field;
1629 * DataOut<dim> data_out;
1630 * data_out.attach_dof_handler(dof_handler);
1631 * data_out.add_data_vector(solution, "electric_potential");
1632 * data_out.add_data_vector(solution, electric_field);
1633 * data_out.build_patches();
1634 *
1635 * DataOutBase::VtkFlags output_flags;
1636 * output_flags.time = time.get_current_time();
1637 * output_flags.cycle = time.get_step_number();
1638 * output_flags.physical_units["electric_potential"] = "V";
1639 * output_flags.physical_units["electric_field"] = "V/m";
1640 *
1641 * data_out.set_flags(output_flags);
1642 *
1643 * std::ofstream output("solution-" +
1644 * Utilities::int_to_string(time.get_step_number(), 4) +
1645 * ".vtu");
1646 * data_out.write_vtu(output);
1647 * }
1648 *
1649 * @endcode
1650 *
1651 * Output the particle positions and properties is not more complicated. The
1652 * Particles::DataOut class plays the role of the DataOut class for
1653 * particles, and all we have to do is tell that class where to take
1654 * particles from and how to interpret the `dim` components of the
1655 * properties -- namely, as a single vector indicating the velocity, rather
1656 * than as `dim` scalar properties. The rest is then the same as above:
1657 *
1658 * @code
1659 * {
1660 * Particles::DataOut<dim> particle_out;
1661 * particle_out.build_patches(
1662 * particle_handler,
1663 * std::vector<std::string>(dim, "velocity"),
1664 * std::vector<DataComponentInterpretation::DataComponentInterpretation>(
1665 * dim, DataComponentInterpretation::component_is_part_of_vector));
1666 *
1667 * DataOutBase::VtkFlags output_flags;
1668 * output_flags.time = time.get_current_time();
1669 * output_flags.cycle = time.get_step_number();
1670 * output_flags.physical_units["velocity"] = "m/s";
1671 *
1672 * particle_out.set_flags(output_flags);
1673 *
1674 * std::ofstream output("particles-" +
1675 * Utilities::int_to_string(time.get_step_number(), 4) +
1676 * ".vtu");
1677 * particle_out.write_vtu(output);
1678 * }
1679 * }
1680 *
1681 *
1682 * @endcode
1683 *
1684 *
1685 * <a name="CathodeRaySimulatorrun"></a>
1686 * <h4>CathodeRaySimulator::run</h4>
1687 *
1688
1689 *
1690 * The last member function of the principal class of this program is then the
1691 * driver. At the top, it refines the mesh a number of times by solving the
1692 * problem (with not particles yet created) on a sequence of finer and finer
1693 * meshes.
1694 *
1695 * @code
1696 * template <int dim>
1697 * void CathodeRaySimulator<dim>::run()
1698 * {
1699 * make_grid();
1700 *
1701 * @endcode
1702 *
1703 * do a few refinement cycles up front
1704 *
1705 * @code
1706 * const unsigned int n_pre_refinement_cycles = 3;
1707 * for (unsigned int refinement_cycle = 0;
1708 * refinement_cycle < n_pre_refinement_cycles;
1709 * ++refinement_cycle)
1710 * {
1711 * setup_system();
1712 * assemble_system();
1713 * solve_field();
1714 * refine_grid();
1715 * }
1716 *
1717 *
1718 * @endcode
1719 *
1720 * Now do the loop over time. The sequence of steps follows closely the
1721 * outline of the algorithm discussed in the introduction. As discussed in
1722 * great detail in the documentation of the DiscreteTime class, while we
1723 * move the field and particle information forward by one time step, the
1724 * time stored in the `time` variable is not consistent with where (some of)
1725 * these quantities are (in the diction of DiscreteTime, this is the "update
1726 * stage"). The call to `time.advance_time()` makes everything consistent
1727 * again by setting the `time` variable to the time at which the field and
1728 * particles already are, and once we are in this "consistent stage", we can
1729 * generate graphical output and write information about the current state
1730 * of the simulation to screen.
1731 *
1732 * @code
1733 * setup_system();
1734 * do
1735 * {
1736 * std::cout << "Timestep " << time.get_step_number() + 1 << std::endl;
1737 * std::cout << " Field degrees of freedom: "
1738 * << dof_handler.n_dofs() << std::endl;
1739 *
1740 * assemble_system();
1741 * solve_field();
1742 *
1743 * create_particles();
1744 * std::cout << " Total number of particles in simulation: "
1745 * << particle_handler.n_global_particles() << std::endl;
1746 *
1747 * n_recently_lost_particles = 0;
1748 * update_timestep_size();
1749 * move_particles();
1750 *
1751 * time.advance_time();
1752 *
1753 * output_results();
1754 *
1755 * std::cout << " Number of particles lost this time step: "
1756 * << n_recently_lost_particles << std::endl;
1757 * if (n_total_lost_particles > 0)
1758 * std::cout << " Fraction of particles lost through anode: "
1759 * << 1. * n_particles_lost_through_anode /
1760 * n_total_lost_particles
1761 * << std::endl;
1762 *
1763 * std::cout << std::endl
1764 * << " Now at t=" << time.get_current_time()
1765 * << ", dt=" << time.get_previous_step_size() << '.'
1766 * << std::endl
1767 * << std::endl;
1768 * }
1769 * while (time.is_at_end() == false);
1770 * }
1771 * } // namespace Step19
1772 *
1773 *
1774 *
1775 * @endcode
1776 *
1777 *
1778 * <a name="Thecodemaincodefunction"></a>
1779 * <h3>The <code>main</code> function</h3>
1780 *
1781
1782 *
1783 * The final function of the program is then again the `main()` function. It is
1784 * unchanged in all tutorial programs since @ref step_6 "step-6" and so there is nothing new
1785 * to discuss:
1786 *
1787 * @code
1788 * int main()
1789 * {
1790 * try
1791 * {
1792 * Step19::CathodeRaySimulator<2> cathode_ray_simulator_2d;
1793 * cathode_ray_simulator_2d.run();
1794 * }
1795 * catch (std::exception &exc)
1796 * {
1797 * std::cerr << std::endl
1798 * << std::endl
1799 * << "----------------------------------------------------"
1800 * << std::endl;
1801 * std::cerr << "Exception on processing: " << std::endl
1802 * << exc.what() << std::endl
1803 * << "Aborting!" << std::endl
1804 * << "----------------------------------------------------"
1805 * << std::endl;
1806 *
1807 * return 1;
1808 * }
1809 * catch (...)
1810 * {
1811 * std::cerr << std::endl
1812 * << std::endl
1813 * << "----------------------------------------------------"
1814 * << std::endl;
1815 * std::cerr << "Unknown exception!" << std::endl
1816 * << "Aborting!" << std::endl
1817 * << "----------------------------------------------------"
1818 * << std::endl;
1819 * return 1;
1820 * }
1821 * return 0;
1822 * }
1823 * @endcode
1824<a name="Results"></a><h1>Results</h1>
1825
1826
1827When this program is run, it produces output that looks as follows:
1828```
1829Timestep 1
1830 Field degrees of freedom: 4989
1831 Total number of particles in simulation: 20
1832 Number of particles lost this time step: 0
1833
1834 Now at t=2.12647e-07, dt=2.12647e-07.
1835
1836Timestep 2
1837 Field degrees of freedom: 4989
1838 Total number of particles in simulation: 24
1839 Number of particles lost this time step: 0
1840
1841 Now at t=4.14362e-07, dt=2.01715e-07.
1842
1843Timestep 3
1844 Field degrees of freedom: 4989
1845 Total number of particles in simulation: 28
1846 Number of particles lost this time step: 0
1847
1848 Now at t=5.96019e-07, dt=1.81657e-07.
1849
1850Timestep 4
1851 Field degrees of freedom: 4989
1852 Total number of particles in simulation: 32
1853 Number of particles lost this time step: 0
1854
1855 Now at t=7.42634e-07, dt=1.46614e-07.
1856
1857
1858...
1859
1860
1861 Timestep 1000
1862 Field degrees of freedom: 4989
1863 Total number of particles in simulation: 44
1864 Number of particles lost this time step: 6
1865 Fraction of particles lost through anode: 0.0601266
1866
1867 Now at t=4.93276e-05, dt=4.87463e-08.
1868
1869Timestep 1001
1870 Field degrees of freedom: 4989
1871 Total number of particles in simulation: 44
1872 Number of particles lost this time step: 0
1873 Fraction of particles lost through anode: 0.0601266
1874
1875 Now at t=4.93759e-05, dt=4.82873e-08.
1876
1877
1878...
1879
1880
1881Timestep 2091
1882 Field degrees of freedom: 4989
1883 Total number of particles in simulation: 44
1884 Number of particles lost this time step: 0
1885 Fraction of particles lost through anode: 0.0503338
1886
1887 Now at t=9.99237e-05, dt=4.26254e-08.
1888
1889Timestep 2092
1890 Field degrees of freedom: 4989
1891 Total number of particles in simulation: 44
1892 Number of particles lost this time step: 0
1893 Fraction of particles lost through anode: 0.0503338
1894
1895 Now at t=9.99661e-05, dt=4.24442e-08.
1896
1897Timestep 2093
1898 Field degrees of freedom: 4989
1899 Total number of particles in simulation: 44
1900 Number of particles lost this time step: 2
1901 Fraction of particles lost through anode: 0.050308
1902
1903 Now at t=0.0001, dt=3.38577e-08.
1904```
1905
1906Picking a random few time steps, we can visualize the solution in the
1907form of streamlines for the electric field and dots for the electrons:
1908<div class="twocolumn" style="width: 80%">
1909 <div>
1910 <img src="https://www.dealii.org/images/steps/developer/step-19.solution.0000.png"
1911 alt="The solution at time step 0 (t=0 seconds)."
1912 width="500">
1913 <br>
1914 Solution at time step 0 (t=0 seconds).
1915 <br>
1916 </div>
1917 <div>
1918 <img src="https://www.dealii.org/images/steps/developer/step-19.solution.1400.png"
1919 alt="The solution at time step 1400 (t=0.000068 seconds)."
1920 width="500">
1921 <br>
1922 Solution at time step 1400 (t=0.000068 seconds).
1923 <br>
1924 </div>
1925 <div>
1926 <img src="https://www.dealii.org/images/steps/developer/step-19.solution.0700.png"
1927 alt="The solution at time step 700 (t=0.000035 seconds)."
1928 width="500">
1929 <br>
1930 Solution at time step 700 (t=0.000035 seconds).
1931 <br>
1932 </div>
1933 <div>
1934 <img src="https://www.dealii.org/images/steps/developer/step-19.solution.2092.png"
1935 alt="The solution at time step 2092 (t=0.0001 seconds)."
1936 width="500">
1937 <br>
1938 Solution at time step 2092 (t=0.0001 seconds).
1939 <br>
1940 </div>
1941</div>
1942
1943That said, a more appropriate way to visualize the results of this
1944program are by creating a video that shows how these electrons move, and how
1945the electric field changes in response to their motion:
1946
1947@htmlonly
1948<p align="center">
1949 <iframe width="560" height="315" src="https://www.youtube.com/embed/HwUtE7xuteE"
1950 frameborder="0"
1951 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1952 allowfullscreen></iframe>
1953 </p>
1954@endhtmlonly
1955
1956What you can see here is how the "focus element" of the boundary with its negative
1957voltage repels the electrons and makes sure that they do not just fly away
1958perpendicular from the cathode (as they do in the initial part of their
1959trajectories). It also shows how the electric field lines
1960move around over time, in response to the charges flying by -- in other words,
1961the feedback the particles have on the electric field that itself drives the
1962motion of the electrons.
1963
1964The movie suggests that electrons move in "bunches" or "bursts". One element of
1965this appearance is an artifact of how the movie was created: Every frame of the
1966movie corresponds to one time step, but the time step length varies. More specifically,
1967the fastest particle moving through the smallest cell determines the length of the
1968time step (see the discussion in the introduction), and consequently time steps
1969are small whenever a (fast) particle moves through the small cells at the right
1970edge of the domain; time steps are longer again once the particle has left
1971the domain. This slowing-accelerating effect can easily be visualized by plotting
1972the time step length shown in the screen output.
1973
1974The second part of this is real, however: The simulation creates a large group
1975of particles in the beginning, and fewer after about the 300th time step. This
1976is probably because of the negative charge of the particles in the simulation:
1977They reduce the magnitude of the electric field at the (also negatively charged
1978electrode) and consequently reduce the number of points on the cathode at which
1979the magnitude exceeds the threshold necessary to draw an electron out of the
1980electrode.
1981
1982
1983<a name="extensions"></a>
1984<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1985
1986
1987
1988<a name="Morestatisticsaboutelectrons"></a><h4> More statistics about electrons </h4>
1989
1990
1991The program already computes the fraction of the electrons that leave the
1992domain through the hole in the anode. But there are other quantities one might be
1993interested in. For example, the average velocity of these particles. It would
1994not be very difficult to obtain each particle's velocity from its properties,
1995in the same way as we do in the `move_particles()` function, and compute
1996statistics from it.
1997
1998
1999<a name="Abettersynchronizedvisualization"></a><h4> A better-synchronized visualization </h4>
2000
2001
2002As discussed above, there is a varying time difference between different frames
2003of the video because we create output for every time step. A better way to
2004create movies would be to generate a new output file in fixed time intervals,
2005regardless of how many time steps lie between each such point.
2006
2007
2008<a name="Abettertimeintegrator"></a><h4> A better time integrator </h4>
2009
2010
2011The problem we are considering in this program is a coupled, multiphysics
2012problem. But the way we solve it is by first computing the (electric) potential
2013field and then update the particle locations. This is what is called an
2014"operator-splitting method", a concept we will investigate in more detail
2015in @ref step_58 "step-58".
2016
2017While it is awkward to think of a way to solve this problem that does not involve
2018splitting the problem into a PDE piece and a particles piece, one
2019*can* (and probably should!) think of a better way to update the particle
2020locations. Specifically, the equations we use to update the particle location
2021are
2022@f{align*}{
2023 \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
2024 \\
2025 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
2026@f}
2027This corresponds to a simple forward-Euler time discretization -- a method of
2028first order accuracy in the time step size @f$\Delta t@f$ that we know we should
2029avoid because we can do better. Rather, one might want to consider a scheme such
2030as the
2031[leapfrog scheme](https://en.wikipedia.org/wiki/Leapfrog_integration)
2032or more generally
2033[symplectic integrators](https://en.wikipedia.org/wiki/Symplectic_integrator)
2034such as the
2035[Verlet scheme](https://en.wikipedia.org/wiki/Verlet_integration).
2036
2037
2038<a name="Parallelization"></a><h4> Parallelization </h4>
2039
2040
2041In release mode, the program runs in about 3.5 minutes on one of the author's
2042laptops at the time of writing this. That's acceptable. But what if we wanted
2043to make the simulation three-dimensional? If we wanted to not use a maximum
2044of around 100 particles at any given time (as happens with the parameters
2045used here) but 100,000? If we needed a substantially finer mesh?
2046
2047In those cases, one would want to run the program not just on a single processor,
2048but in fact on as many as one has available. This requires parallelization
2049both the PDE solution as well as over particles. In practice, while there
2050are substantial challenges to making this efficient and scale well, these
2051challenges are all addressed in deal.II itself. For example, @ref step_40 "step-40" shows
2052how to parallelize the finite element part, and @ref step_70 "step-70" shows how one can
2053then also parallelize the particles part.
2054 *
2055 *
2056<a name="PlainProg"></a>
2057<h1> The plain program</h1>
2058@include "step-19.cc"
2059*/
double JxW(const unsigned int quadrature_point) const
particle_iterator_range particles_in_cell(const typename Triangulation< dim, spacedim >::active_cell_iterator &cell)
Point< 3 > vertices[4]
Point< 2 > second
Definition: grid_out.cc:4604
Point< 2 > first
Definition: grid_out.cc:4603
unsigned int level
Definition: grid_out.cc:4606
__global__ void set(Number *val, const Number s, const size_type N)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
int(&) functions(const void *v1, const void *v2)