Reference documentation for deal.II version GIT relicensing-1062-gc06da148b8 2024-07-15 19:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
particle_handler.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2017 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17
19
21
22#include <limits>
23#include <memory>
24#include <utility>
25
27
28namespace Particles
29{
30 namespace
31 {
32 template <int dim, int spacedim>
33 std::vector<char>
34 pack_particles(std::vector<ParticleIterator<dim, spacedim>> &particles)
35 {
36 std::vector<char> buffer;
37
38 if (particles.empty())
39 return buffer;
40
41 buffer.resize(particles.size() *
42 particles.front()->serialized_size_in_bytes());
43 void *current_data = buffer.data();
44
45 for (const auto &particle : particles)
46 {
47 current_data = particle->write_particle_data_to_memory(current_data);
48 }
49
50 return buffer;
51 }
52 } // namespace
53
54
55
56 template <int dim, int spacedim>
59 , mapping()
60 , property_pool(std::make_unique<PropertyPool<dim, spacedim>>(0))
61 , global_number_of_particles(0)
62 , number_of_locally_owned_particles(0)
63 , global_max_particles_per_cell(0)
64 , next_free_particle_index(0)
65 , size_callback()
66 , store_callback()
67 , load_callback()
68 , handle(numbers::invalid_unsigned_int)
69 , tria_listeners()
70 {
72 }
73
74
75
76 template <int dim, int spacedim>
79 const Mapping<dim, spacedim> &mapping,
80 const unsigned int n_properties)
81 : triangulation(&triangulation, typeid(*this).name())
82 , mapping(&mapping, typeid(*this).name())
83 , property_pool(std::make_unique<PropertyPool<dim, spacedim>>(n_properties))
84 , cells_to_particle_cache(triangulation.n_active_cells(), particles.end())
85 , global_number_of_particles(0)
86 , number_of_locally_owned_particles(0)
87 , global_max_particles_per_cell(0)
88 , next_free_particle_index(0)
89 , size_callback()
90 , store_callback()
91 , load_callback()
92 , handle(numbers::invalid_unsigned_int)
93 , triangulation_cache(
94 std::make_unique<GridTools::Cache<dim, spacedim>>(triangulation,
95 mapping))
96 , tria_listeners()
97 {
100 }
101
102
103
104 template <int dim, int spacedim>
106 {
107 clear_particles();
108
109 for (const auto &connection : tria_listeners)
110 connection.disconnect();
111 }
112
113
114
115 template <int dim, int spacedim>
116 void
118 const Triangulation<dim, spacedim> &new_triangulation,
119 const Mapping<dim, spacedim> &new_mapping,
120 const unsigned int n_properties)
121 {
122 clear();
123
124 triangulation = &new_triangulation;
125 mapping = &new_mapping;
126
127 reset_particle_container(particles);
128
129 // Create the memory pool that will store all particle properties
130 property_pool = std::make_unique<PropertyPool<dim, spacedim>>(n_properties);
131
132 // Create the grid cache to cache the information about the triangulation
133 // that is used to locate the particles into subdomains and cells
134 triangulation_cache =
135 std::make_unique<GridTools::Cache<dim, spacedim>>(new_triangulation,
136 new_mapping);
137
138 cells_to_particle_cache.resize(triangulation->n_active_cells(),
139 particles.end());
140
141 connect_to_triangulation_signals();
142 }
143
144
145
146 template <int dim, int spacedim>
147 void
149 const ParticleHandler<dim, spacedim> &particle_handler)
150 {
151 const unsigned int n_properties =
152 particle_handler.property_pool->n_properties_per_slot();
153 initialize(*particle_handler.triangulation,
154 *particle_handler.mapping,
155 n_properties);
156
157 property_pool = std::make_unique<PropertyPool<dim, spacedim>>(
158 *(particle_handler.property_pool));
159
160 // copy static members
161 global_number_of_particles = particle_handler.global_number_of_particles;
162 number_of_locally_owned_particles =
163 particle_handler.number_of_locally_owned_particles;
164
165 global_max_particles_per_cell =
166 particle_handler.global_max_particles_per_cell;
167 next_free_particle_index = particle_handler.next_free_particle_index;
168
169 // Manually copy over the particles because we do not want to touch the
170 // anchor iterators set by initialize()
171 particles.insert(particle_container_owned_end(),
172 particle_handler.particle_container_owned_begin(),
173 particle_handler.particle_container_owned_end());
174 particles.insert(particle_container_ghost_end(),
175 particle_handler.particle_container_ghost_begin(),
176 particle_handler.particle_container_ghost_end());
177
178 for (auto it = particles.begin(); it != particles.end(); ++it)
179 if (!it->particles.empty())
180 cells_to_particle_cache[it->cell->active_cell_index()] = it;
181
182 ghost_particles_cache.ghost_particles_by_domain =
183 particle_handler.ghost_particles_cache.ghost_particles_by_domain;
184 handle = particle_handler.handle;
185 }
186
187
188
189 template <int dim, int spacedim>
190 void
192 {
193 clear_particles();
194 global_number_of_particles = 0;
195 number_of_locally_owned_particles = 0;
196 next_free_particle_index = 0;
197 global_max_particles_per_cell = 0;
198 }
199
200
201
202 template <int dim, int spacedim>
203 void
205 {
206 for (auto &particles_in_cell : particles)
207 for (auto &particle : particles_in_cell.particles)
209 property_pool->deregister_particle(particle);
210
211 cells_to_particle_cache.clear();
212 reset_particle_container(particles);
213 if (triangulation != nullptr)
214 cells_to_particle_cache.resize(triangulation->n_active_cells(),
215 particles.end());
216
217 // the particle properties have already been deleted by their destructor,
218 // but the memory is still allocated. Return the memory as well.
219 property_pool->clear();
220 }
221
222
223
224 template <int dim, int spacedim>
225 void
226 ParticleHandler<dim, spacedim>::reserve(const std::size_t n_particles)
227 {
228 property_pool->reserve(n_particles);
229 }
230
231
232
233 template <int dim, int spacedim>
234 void
236 particle_container &given_particles)
237 {
238 // Make sure to set a valid past-the-end iterator also in case we have no
239 // triangulation
241 past_the_end_iterator =
242 triangulation != nullptr ?
243 triangulation->end() :
244 typename Triangulation<dim, spacedim>::cell_iterator(nullptr, -1, -1);
245
246 given_particles.clear();
247 for (unsigned int i = 0; i < 3; ++i)
248 given_particles.emplace_back(
249 std::vector<typename PropertyPool<dim, spacedim>::Handle>(),
250 past_the_end_iterator);
251
252 // Set the end of owned particles to the middle of the three elements
253 const_cast<typename particle_container::iterator &>(owned_particles_end) =
254 ++given_particles.begin();
255 }
256
257
258
259 template <int dim, int spacedim>
260 void
262 {
263 // first sort the owned particles by the active cell index
264 bool sort_is_necessary = false;
265 {
266 auto previous = particle_container_owned_begin();
267 for (auto next = previous; next != particle_container_owned_end(); ++next)
268 {
269 if (previous->cell.state() == IteratorState::valid &&
270 next->cell.state() == IteratorState::valid &&
271 previous->cell > next->cell)
272 {
273 sort_is_necessary = true;
274 break;
275 }
276 previous = next;
277 }
278 }
279 if (sort_is_necessary)
280 {
281 // we could have tried to call std::list::sort with a custom
282 // comparator to get things sorted, but things are complicated by the
283 // three anchor entries that we do not want to move, and we would
284 // hence pay an O(N log(N)) algorithm with a large constant in front
285 // of it (on the order of 20+ instructions with many
286 // difficult-to-predict branches). Therefore, we simply copy the list
287 // into a new one (keeping alive the possibly large vectors with
288 // particles on cells) into a new container.
289 particle_container sorted_particles;
290
291 // note that this call updates owned_particles_end, so that
292 // particle_container_owned_end() below already points to the
293 // new container
294 reset_particle_container(sorted_particles);
295
296 // iterate over cells and insert the entries in the new order
297 for (const auto &cell : triangulation->active_cell_iterators())
298 if (!cell->is_artificial())
299 if (cells_to_particle_cache[cell->active_cell_index()] !=
300 particles.end())
301 {
302 // before we move the sorted_particles into particles
303 // particle_container_ghost_end() still points to the
304 // old particles container. Therefore this condition looks
305 // quirky.
306 typename particle_container::iterator insert_position =
307 cell->is_locally_owned() ? particle_container_owned_end() :
308 --sorted_particles.end();
309 typename particle_container::iterator new_entry =
310 sorted_particles.insert(
311 insert_position, typename particle_container::value_type());
312 new_entry->cell = cell;
313 new_entry->particles =
314 std::move(cells_to_particle_cache[cell->active_cell_index()]
315 ->particles);
316 }
317 particles = std::move(sorted_particles);
318
319 // refresh cells_to_particle_cache
320 cells_to_particle_cache.clear();
321 cells_to_particle_cache.resize(triangulation->n_active_cells(),
322 particles.end());
323 for (auto it = particles.begin(); it != particles.end(); ++it)
324 if (!it->particles.empty())
325 cells_to_particle_cache[it->cell->active_cell_index()] = it;
326 }
327
328 // Ensure that we did not accidentally modify the anchor entries with
329 // special purpose.
330 Assert(particles.front().cell.state() == IteratorState::past_the_end &&
331 particles.front().particles.empty() &&
332 particles.back().cell.state() == IteratorState::past_the_end &&
333 particles.back().particles.empty() &&
334 owned_particles_end->cell.state() == IteratorState::past_the_end &&
335 owned_particles_end->particles.empty(),
337
338#ifdef DEBUG
339 // check that no cache element hits the three anchor states in the list of
340 // particles
341 for (const auto &it : cells_to_particle_cache)
342 Assert(it != particles.begin() && it != owned_particles_end &&
343 it != --(particles.end()),
345
346 // check that we only have locally owned particles in the first region of
347 // cells; note that we skip the very first anchor element
348 for (auto it = particle_container_owned_begin();
349 it != particle_container_owned_end();
350 ++it)
351 Assert(it->cell->is_locally_owned(), ExcInternalError());
352
353 // check that the cache is consistent with the iterators
354 std::vector<typename particle_container::iterator> verify_cache(
355 triangulation->n_active_cells(), particles.end());
356 for (auto it = particles.begin(); it != particles.end(); ++it)
357 if (!it->particles.empty())
358 verify_cache[it->cell->active_cell_index()] = it;
359
360 for (unsigned int i = 0; i < verify_cache.size(); ++i)
361 Assert(verify_cache[i] == cells_to_particle_cache[i], ExcInternalError());
362#endif
363
364 // now compute local result with the function above and then compute the
365 // collective results
366 number_of_locally_owned_particles = 0;
367
368 types::particle_index result[2] = {};
369 for (const auto &particles_in_cell : particles)
370 {
371 const types::particle_index n_particles_in_cell =
372 particles_in_cell.particles.size();
373
374 // local_max_particles_per_cell
375 result[0] = std::max(result[0], n_particles_in_cell);
376
377 // number of locally owned particles
378 if (n_particles_in_cell > 0 &&
379 particles_in_cell.cell->is_locally_owned())
380 number_of_locally_owned_particles += n_particles_in_cell;
381
382 // local_max_particle_index
383 for (const auto &particle : particles_in_cell.particles)
384 result[1] = std::max(result[1], property_pool->get_id(particle));
385 }
386
387 global_number_of_particles =
388 ::Utilities::MPI::sum(number_of_locally_owned_particles,
389 triangulation->get_communicator());
390
391 if (global_number_of_particles == 0)
392 {
393 next_free_particle_index = 0;
394 global_max_particles_per_cell = 0;
395 }
396 else
397 {
398 Utilities::MPI::max(result, triangulation->get_communicator(), result);
399
400 next_free_particle_index = result[1] + 1;
401 global_max_particles_per_cell = result[0];
402 }
403 }
404
405
406
407 template <int dim, int spacedim>
411 const
412 {
413 if (cells_to_particle_cache.empty())
414 return 0;
415
416 if (cell->is_artificial() == false)
417 {
418 return cells_to_particle_cache[cell->active_cell_index()] !=
419 particles.end() ?
420 cells_to_particle_cache[cell->active_cell_index()]
421 ->particles.size() :
422 0;
423 }
424 else
425 AssertThrow(false,
426 ExcMessage("You can't ask for the particles on an artificial "
427 "cell since we don't know what exists on these "
428 "kinds of cells."));
429
431 }
432
433
434
435 template <int dim, int spacedim>
439 const
440 {
441 return (const_cast<ParticleHandler<dim, spacedim> *>(this))
442 ->particles_in_cell(cell);
443 }
444
445
446
447 template <int dim, int spacedim>
451 {
452 const unsigned int active_cell_index = cell->active_cell_index();
453
454 if (cell->is_artificial() == false)
455 {
456 if (cells_to_particle_cache[active_cell_index] == particles.end())
457 {
458 return boost::make_iterator_range(
459 particle_iterator(particles.begin(), *property_pool, 0),
460 particle_iterator(particles.begin(), *property_pool, 0));
461 }
462 else
463 {
464 const typename particle_container::iterator
465 particles_in_current_cell =
466 cells_to_particle_cache[active_cell_index];
467 typename particle_container::iterator particles_in_next_cell =
468 particles_in_current_cell;
469 ++particles_in_next_cell;
470 return boost::make_iterator_range(
471 particle_iterator(particles_in_current_cell, *property_pool, 0),
472 particle_iterator(particles_in_next_cell, *property_pool, 0));
473 }
474 }
475 else
476 AssertThrow(false,
477 ExcMessage("You can't ask for the particles on an artificial "
478 "cell since we don't know what exists on these "
479 "kinds of cells."));
480
481 return {};
482 }
483
484
485
486 template <int dim, int spacedim>
487 void
490 {
491 auto &particles_on_cell = particle->particles_in_cell->particles;
492
493 // if the particle has an invalid handle (e.g. because it has
494 // been duplicated before calling this function) do not try
495 // to deallocate its memory again
496 auto handle = particle->get_handle();
498 property_pool->deregister_particle(handle);
499
500 // need to reduce the cached number before deleting, because the iterator
501 // may be invalid after removing the particle even if only
502 // accessing the cell
503 const auto cell = particle->get_surrounding_cell();
504 const bool owned_cell = cell->is_locally_owned();
505 if (owned_cell)
506 --number_of_locally_owned_particles;
507
508 if (particles_on_cell.size() > 1)
509 {
510 particles_on_cell[particle->particle_index_within_cell] =
511 std::move(particles_on_cell.back());
512 particles_on_cell.resize(particles_on_cell.size() - 1);
513 }
514 else
515 {
516 particles.erase(particle->particles_in_cell);
517 cells_to_particle_cache[cell->active_cell_index()] = particles.end();
518 }
519 }
520
521
522
523 template <int dim, int spacedim>
524 void
527 &particles_to_remove)
528 {
529 // We need to remove particles backwards on each cell to keep the particle
530 // iterators alive as we keep removing particles on the same cell. To
531 // ensure that this is safe, we either check if we already have sorted
532 // iterators or if we need to manually sort
533 const auto check_greater = [](const particle_iterator &a,
534 const particle_iterator &b) {
535 return a->particles_in_cell->cell > b->particles_in_cell->cell ||
536 (a->particles_in_cell->cell == b->particles_in_cell->cell &&
537 a->particle_index_within_cell > b->particle_index_within_cell);
538 };
539
540 bool particles_are_sorted = true;
541 auto previous = particles_to_remove.begin();
542 for (auto next = previous; next != particles_to_remove.end(); ++next)
543 {
544 if (check_greater(*previous, *next))
545 {
546 particles_are_sorted = false;
547 break;
548 }
549 previous = next;
550 }
551 if (particles_are_sorted)
552 {
553 // pass along backwards in array
554 for (auto it = particles_to_remove.rbegin();
555 it != particles_to_remove.rend();
556 ++it)
557 remove_particle(*it);
558 }
559 else
560 {
561 std::vector<ParticleHandler<dim, spacedim>::particle_iterator>
562 sorted_particles(particles_to_remove);
563 std::sort(sorted_particles.begin(),
564 sorted_particles.end(),
565 check_greater);
566
567 for (const auto &particle : sorted_particles)
568 remove_particle(particle);
569 }
570
571 update_cached_numbers();
572 }
573
574
575
576 template <int dim, int spacedim>
579 const Particle<dim, spacedim> &particle,
581 {
582 return insert_particle(particle.get_location(),
583 particle.get_reference_location(),
584 particle.get_id(),
585 cell,
586 particle.get_properties());
587 }
588
589
590
591 template <int dim, int spacedim>
594 const typename PropertyPool<dim, spacedim>::Handle handle,
596 {
597 const unsigned int active_cell_index = cell->active_cell_index();
598 typename particle_container::iterator &cache =
599 cells_to_particle_cache[active_cell_index];
600 if (cache == particles.end())
601 {
602 const typename particle_container::iterator insert_position =
603 cell->is_locally_owned() ? particle_container_owned_end() :
604 particle_container_ghost_end();
605 cache = particles.emplace(
606 insert_position,
607 std::vector<typename PropertyPool<dim, spacedim>::Handle>{handle},
608 cell);
609 }
610 else
611 {
612 cache->particles.push_back(handle);
613 Assert(cache->cell == cell, ExcInternalError());
614 }
615 return particle_iterator(cache,
616 *property_pool,
617 cache->particles.size() - 1);
618 }
619
620
621
622 template <int dim, int spacedim>
625 const void *&data,
627 {
629 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
631 Assert(cell->is_locally_owned(),
632 ExcMessage("You tried to insert particles into a cell that is not "
633 "locally owned. This is not supported."));
634
635 particle_iterator particle_it =
636 insert_particle(property_pool->register_particle(), cell);
637
638 data = particle_it->read_particle_data_from_memory(data);
639
640 ++number_of_locally_owned_particles;
641
642 return particle_it;
643 }
644
645
646
647 template <int dim, int spacedim>
650 const Point<spacedim> &position,
651 const Point<dim> &reference_position,
652 const types::particle_index particle_index,
654 const ArrayView<const double> &properties)
655 {
657 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
660 Assert(cell->is_locally_owned(),
661 ExcMessage("You tried to insert particles into a cell that is not "
662 "locally owned. This is not supported."));
663
664 particle_iterator particle_it =
665 insert_particle(property_pool->register_particle(), cell);
666
667 particle_it->set_location(position);
668 particle_it->set_reference_location(reference_position);
669 particle_it->set_id(particle_index);
670
671 if (properties.size() != 0)
672 particle_it->set_properties(properties);
673
674 ++number_of_locally_owned_particles;
675
676 return particle_it;
677 }
678
679
680
681 template <int dim, int spacedim>
682 void
684 const std::multimap<
686 Particle<dim, spacedim>> &new_particles)
687 {
688 reserve(n_locally_owned_particles() + new_particles.size());
689 for (const auto &cell_and_particle : new_particles)
690 insert_particle(cell_and_particle.second, cell_and_particle.first);
691
692 update_cached_numbers();
693 }
694
695
696
697 template <int dim, int spacedim>
698 void
700 const std::vector<Point<spacedim>> &positions)
701 {
703
704 update_cached_numbers();
705 reserve(n_locally_owned_particles() + positions.size());
706
707 // Determine the starting particle index of this process, which
708 // is the highest currently existing particle index plus the sum
709 // of the number of newly generated particles of all
710 // processes with a lower rank if in a parallel computation.
711 const types::particle_index local_next_particle_index =
712 get_next_free_particle_index();
713 types::particle_index local_start_index = 0;
714
715#ifdef DEAL_II_WITH_MPI
716 if (const auto parallel_triangulation =
717 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
718 &*triangulation))
719 {
720 types::particle_index particles_to_add_locally = positions.size();
721 const int ierr = MPI_Scan(&particles_to_add_locally,
722 &local_start_index,
723 1,
725 MPI_SUM,
726 parallel_triangulation->get_communicator());
727 AssertThrowMPI(ierr);
728 local_start_index -= particles_to_add_locally;
729 }
730#endif
731
732 local_start_index += local_next_particle_index;
733
734 auto point_locations =
736 positions);
737
738 auto &cells = std::get<0>(point_locations);
739 auto &local_positions = std::get<1>(point_locations);
740 auto &index_map = std::get<2>(point_locations);
741 auto &missing_points = std::get<3>(point_locations);
742 // If a point was not found, throwing an error, as the old
743 // implementation of compute_point_locations would have done
744 AssertThrow(missing_points.empty(),
746
747 (void)missing_points;
748
749 for (unsigned int i = 0; i < cells.size(); ++i)
750 for (unsigned int p = 0; p < local_positions[i].size(); ++p)
751 insert_particle(positions[index_map[i][p]],
752 local_positions[i][p],
753 local_start_index + index_map[i][p],
754 cells[i]);
755
756 update_cached_numbers();
757 }
758
759
760
761 template <int dim, int spacedim>
762 std::map<unsigned int, IndexSet>
764 const std::vector<Point<spacedim>> &positions,
765 const std::vector<std::vector<BoundingBox<spacedim>>>
766 &global_bounding_boxes,
767 const std::vector<std::vector<double>> &properties,
768 const std::vector<types::particle_index> &ids)
769 {
770 if (!properties.empty())
771 {
772 AssertDimension(properties.size(), positions.size());
773#ifdef DEBUG
774 for (const auto &p : properties)
775 AssertDimension(p.size(), n_properties_per_particle());
776#endif
777 }
778
779 if (!ids.empty())
780 AssertDimension(ids.size(), positions.size());
781
782 const auto comm = triangulation->get_communicator();
783
784 const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(comm);
785
786 // Compute the global number of properties
787 const auto n_global_properties =
788 Utilities::MPI::sum(properties.size(), comm);
789
790 // Gather the number of points per processor
791 const auto n_particles_per_proc =
792 Utilities::MPI::all_gather(comm, positions.size());
793
794 // Calculate all starting points locally
795 std::vector<unsigned int> particle_start_indices(n_mpi_processes);
796
797 unsigned int particle_start_index = get_next_free_particle_index();
798 for (unsigned int process = 0; process < particle_start_indices.size();
799 ++process)
800 {
801 particle_start_indices[process] = particle_start_index;
802 particle_start_index += n_particles_per_proc[process];
803 }
804
805 // Get all local information
806 const auto cells_positions_and_index_maps =
808 positions,
809 global_bounding_boxes);
810
811 // Unpack the information into several vectors:
812 // All cells that contain at least one particle
813 const auto &local_cells_containing_particles =
814 std::get<0>(cells_positions_and_index_maps);
815
816 // The reference position of every particle in the local part of the
817 // triangulation.
818 const auto &local_reference_positions =
819 std::get<1>(cells_positions_and_index_maps);
820 // The original index in the positions vector for each particle in the
821 // local part of the triangulation
822 const auto &original_indices_of_local_particles =
823 std::get<2>(cells_positions_and_index_maps);
824 // The real spatial position of every particle in the local part of the
825 // triangulation.
826 const auto &local_positions = std::get<3>(cells_positions_and_index_maps);
827 // The MPI process that inserted each particle
828 const auto &calling_process_indices =
829 std::get<4>(cells_positions_and_index_maps);
830
831 // Create the map of cpu to indices, indicating who sent us what particle
832 std::map<unsigned int, std::vector<unsigned int>>
833 original_process_to_local_particle_indices_tmp;
834 for (unsigned int i_cell = 0;
835 i_cell < local_cells_containing_particles.size();
836 ++i_cell)
837 {
838 for (unsigned int i_particle = 0;
839 i_particle < local_positions[i_cell].size();
840 ++i_particle)
841 {
842 const unsigned int local_id_on_calling_process =
843 original_indices_of_local_particles[i_cell][i_particle];
844 const unsigned int calling_process =
845 calling_process_indices[i_cell][i_particle];
846
847 original_process_to_local_particle_indices_tmp[calling_process]
848 .push_back(local_id_on_calling_process);
849 }
850 }
851 std::map<unsigned int, IndexSet> original_process_to_local_particle_indices;
852 for (auto &process_and_particle_indices :
853 original_process_to_local_particle_indices_tmp)
854 {
855 const unsigned int calling_process = process_and_particle_indices.first;
856 original_process_to_local_particle_indices.insert(
857 {calling_process, IndexSet(n_particles_per_proc[calling_process])});
858 std::sort(process_and_particle_indices.second.begin(),
859 process_and_particle_indices.second.end());
860 original_process_to_local_particle_indices[calling_process].add_indices(
861 process_and_particle_indices.second.begin(),
862 process_and_particle_indices.second.end());
863 original_process_to_local_particle_indices[calling_process].compress();
864 }
865
866 // A map from mpi process to properties, ordered as in the IndexSet.
867 // Notice that this ordering may be different from the ordering in the
868 // vectors above, since no local ordering is guaranteed by the
869 // distribute_compute_point_locations() call.
870 // This is only filled if n_global_properties is > 0
871 std::map<unsigned int, std::vector<std::vector<double>>>
872 locally_owned_properties_from_other_processes;
873
874 // A map from mpi process to ids, ordered as in the IndexSet.
875 // Notice that this ordering may be different from the ordering in the
876 // vectors above, since no local ordering is guaranteed by the
877 // distribute_compute_point_locations() call.
878 // This is only filled if ids.size() is > 0
879 std::map<unsigned int, std::vector<types::particle_index>>
880 locally_owned_ids_from_other_processes;
881
882 if (n_global_properties > 0 || !ids.empty())
883 {
884 // Gather whom I sent my own particles to, to decide whom to send
885 // the particle properties or the ids
886 auto send_to_cpu = Utilities::MPI::some_to_some(
887 comm, original_process_to_local_particle_indices);
888
889 // Prepare the vector of properties to send
890 if (n_global_properties > 0)
891 {
892 std::map<unsigned int, std::vector<std::vector<double>>>
893 non_locally_owned_properties;
894
895 for (const auto &it : send_to_cpu)
896 {
897 std::vector<std::vector<double>> properties_to_send(
898 it.second.n_elements(),
899 std::vector<double>(n_properties_per_particle()));
900 unsigned int index = 0;
901 for (const auto el : it.second)
902 properties_to_send[index++] = properties[el];
903 non_locally_owned_properties.insert(
904 {it.first, properties_to_send});
905 }
906
907 // Send the non locally owned properties to each mpi process
908 // that needs them
909 locally_owned_properties_from_other_processes =
910 Utilities::MPI::some_to_some(comm, non_locally_owned_properties);
911
913 locally_owned_properties_from_other_processes.size(),
914 original_process_to_local_particle_indices.size());
915 }
916
917 if (!ids.empty())
918 {
919 std::map<unsigned int, std::vector<types::particle_index>>
920 non_locally_owned_ids;
921 for (const auto &it : send_to_cpu)
922 {
923 std::vector<types::particle_index> ids_to_send(
924 it.second.n_elements());
925 unsigned int index = 0;
926 for (const auto el : it.second)
927 ids_to_send[index++] = ids[el];
928 non_locally_owned_ids.insert({it.first, ids_to_send});
929 }
930
931 // Send the non locally owned ids to each mpi process
932 // that needs them
933 locally_owned_ids_from_other_processes =
934 Utilities::MPI::some_to_some(comm, non_locally_owned_ids);
935
936 AssertDimension(locally_owned_ids_from_other_processes.size(),
937 original_process_to_local_particle_indices.size());
938 }
939 }
940
941 // Now fill up the actual particles
942 for (unsigned int i_cell = 0;
943 i_cell < local_cells_containing_particles.size();
944 ++i_cell)
945 {
946 for (unsigned int i_particle = 0;
947 i_particle < local_positions[i_cell].size();
948 ++i_particle)
949 {
950 const unsigned int local_id_on_calling_process =
951 original_indices_of_local_particles[i_cell][i_particle];
952
953 const unsigned int calling_process =
954 calling_process_indices[i_cell][i_particle];
955
956 const unsigned int index_within_set =
957 original_process_to_local_particle_indices[calling_process]
958 .index_within_set(local_id_on_calling_process);
959
960 const unsigned int particle_id =
961 ids.empty() ?
962 local_id_on_calling_process +
963 particle_start_indices[calling_process] :
964 locally_owned_ids_from_other_processes[calling_process]
965 [index_within_set];
966
967 auto particle_it =
968 insert_particle(local_positions[i_cell][i_particle],
969 local_reference_positions[i_cell][i_particle],
970 particle_id,
971 local_cells_containing_particles[i_cell]);
972
973 if (n_global_properties > 0)
974 {
975 particle_it->set_properties(
976 locally_owned_properties_from_other_processes
977 [calling_process][index_within_set]);
978 }
979 }
980 }
981
982 update_cached_numbers();
983
984 return original_process_to_local_particle_indices;
985 }
986
987
988
989 template <int dim, int spacedim>
990 std::map<unsigned int, IndexSet>
992 const std::vector<Particle<dim, spacedim>> &particles,
993 const std::vector<std::vector<BoundingBox<spacedim>>>
994 &global_bounding_boxes)
995 {
996 // Store the positions in a vector of points, the ids in a vector of ids,
997 // and the properties, if any, in a vector of vector of properties.
998 std::vector<Point<spacedim>> positions;
999 std::vector<std::vector<double>> properties;
1000 std::vector<types::particle_index> ids;
1001 positions.resize(particles.size());
1002 ids.resize(particles.size());
1003 if (n_properties_per_particle() > 0)
1004 properties.resize(particles.size(),
1005 std::vector<double>(n_properties_per_particle()));
1006
1007 unsigned int i = 0;
1008 for (const auto &p : particles)
1009 {
1010 positions[i] = p.get_location();
1011 ids[i] = p.get_id();
1012 if (p.has_properties())
1013 properties[i] = {p.get_properties().begin(),
1014 p.get_properties().end()};
1015 ++i;
1016 }
1017
1018 return insert_global_particles(positions,
1019 global_bounding_boxes,
1020 properties,
1021 ids);
1022 }
1023
1024
1025
1026 template <int dim, int spacedim>
1029 {
1030 return global_number_of_particles;
1031 }
1032
1033
1034
1035 template <int dim, int spacedim>
1038 {
1039 return global_max_particles_per_cell;
1040 }
1041
1042
1043
1044 template <int dim, int spacedim>
1047 {
1048 return number_of_locally_owned_particles;
1049 }
1050
1051
1052
1053 template <int dim, int spacedim>
1054 unsigned int
1056 {
1057 return property_pool->n_properties_per_slot();
1058 }
1059
1060
1061
1062 template <int dim, int spacedim>
1065 {
1066 return next_free_particle_index;
1067 }
1068
1069
1070
1071 template <int dim, int spacedim>
1072 IndexSet
1074 {
1075 IndexSet set(get_next_free_particle_index());
1076 std::vector<types::particle_index> indices;
1077 indices.reserve(n_locally_owned_particles());
1078 for (const auto &p : *this)
1079 indices.push_back(p.get_id());
1080 set.add_indices(indices.begin(), indices.end());
1081 set.compress();
1082 return set;
1083 }
1084
1085
1086
1087 template <int dim, int spacedim>
1090 {
1091 return property_pool->n_slots();
1092 }
1093
1094
1095
1096 template <int dim, int spacedim>
1097 void
1099 std::vector<Point<spacedim>> &positions,
1100 const bool add_to_output_vector)
1101 {
1102 // There should be one point per particle to gather
1103 AssertDimension(positions.size(), n_locally_owned_particles());
1104
1105 unsigned int i = 0;
1106 for (auto it = begin(); it != end(); ++it, ++i)
1107 {
1108 if (add_to_output_vector)
1109 positions[i] = positions[i] + it->get_location();
1110 else
1111 positions[i] = it->get_location();
1112 }
1113 }
1114
1115
1116
1117 template <int dim, int spacedim>
1118 void
1120 const std::vector<Point<spacedim>> &new_positions,
1121 const bool displace_particles)
1122 {
1123 // There should be one point per particle to fix the new position
1124 AssertDimension(new_positions.size(), n_locally_owned_particles());
1125
1126 unsigned int i = 0;
1127 for (auto it = begin(); it != end(); ++it, ++i)
1128 {
1129 Point<spacedim> &location = it->get_location();
1130 if (displace_particles)
1131 location += new_positions[i];
1132 else
1133 location = new_positions[i];
1134 }
1135 sort_particles_into_subdomains_and_cells();
1136 }
1137
1138
1139
1140 template <int dim, int spacedim>
1141 void
1143 const Function<spacedim> &function,
1144 const bool displace_particles)
1145 {
1146 // The function should have sufficient components to displace the
1147 // particles
1148 AssertDimension(function.n_components, spacedim);
1149
1150 Vector<double> new_position(spacedim);
1151 for (auto &particle : *this)
1152 {
1153 Point<spacedim> &particle_location = particle.get_location();
1154 function.vector_value(particle_location, new_position);
1155 if (displace_particles)
1156 for (unsigned int d = 0; d < spacedim; ++d)
1157 particle_location[d] += new_position[d];
1158 else
1159 for (unsigned int d = 0; d < spacedim; ++d)
1160 particle_location[d] = new_position[d];
1161 }
1162 sort_particles_into_subdomains_and_cells();
1163 }
1164
1165
1166
1167 template <int dim, int spacedim>
1170 {
1171 return *property_pool;
1172 }
1173
1174
1175
1176 namespace
1177 {
1187 template <int dim>
1188 bool
1189 compare_particle_association(
1190 const unsigned int a,
1191 const unsigned int b,
1192 const Tensor<1, dim> &particle_direction,
1193 const std::vector<Tensor<1, dim>> &center_directions)
1194 {
1195 const double scalar_product_a = center_directions[a] * particle_direction;
1196 const double scalar_product_b = center_directions[b] * particle_direction;
1197
1198 // The function is supposed to return if a is before b. We are looking
1199 // for the alignment of particle direction and center direction,
1200 // therefore return if the scalar product of a is larger.
1201 return (scalar_product_a > scalar_product_b);
1202 }
1203 } // namespace
1204
1205
1206
1207 template <int dim, int spacedim>
1208 void
1210 {
1211 Assert(triangulation != nullptr, ExcInternalError());
1212 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
1214
1215 // TODO: The current algorithm only works for particles that are in
1216 // the local domain or in ghost cells, because it only knows the
1217 // subdomain_id of ghost cells, but not of artificial cells. This
1218 // can be extended using the distributed version of compute point
1219 // locations.
1220 // TODO: Extend this function to allow keeping particles on other
1221 // processes around (with an invalid cell).
1222
1223 std::vector<particle_iterator> particles_out_of_cell;
1224
1225 // Reserve some space for particles that need sorting to avoid frequent
1226 // re-allocation. Guess 25% of particles need sorting. Balance memory
1227 // overhead and performance.
1228 particles_out_of_cell.reserve(n_locally_owned_particles() / 4);
1229
1230 // Now update the reference locations of the moved particles
1231 std::vector<Point<spacedim>> real_locations;
1232 std::vector<Point<dim>> reference_locations;
1233 real_locations.reserve(global_max_particles_per_cell);
1234 reference_locations.reserve(global_max_particles_per_cell);
1235
1236 for (const auto &cell : triangulation->active_cell_iterators())
1237 {
1238 // Particles can be inserted into arbitrary cells, e.g. if their cell is
1239 // not known. However, for artificial cells we can not evaluate
1240 // the reference position of particles. Do not sort particles that are
1241 // not locally owned, because they will be sorted by the process that
1242 // owns them.
1243 if (cell->is_locally_owned() == false)
1244 {
1245 continue;
1246 }
1247
1248 const unsigned int n_pic = n_particles_in_cell(cell);
1249 auto pic = particles_in_cell(cell);
1250
1251 real_locations.clear();
1252 for (const auto &particle : pic)
1253 real_locations.push_back(particle.get_location());
1254
1255 reference_locations.resize(n_pic);
1256 mapping->transform_points_real_to_unit_cell(cell,
1257 real_locations,
1258 reference_locations);
1259
1260 auto particle = pic.begin();
1261 for (const auto &p_unit : reference_locations)
1262 {
1263 if (numbers::is_finite(p_unit[0]) &&
1265 tolerance_inside_cell))
1266 particle->set_reference_location(p_unit);
1267 else
1268 particles_out_of_cell.push_back(particle);
1269
1270 ++particle;
1271 }
1272 }
1273
1274 // There are three reasons why a particle is not in its old cell:
1275 // It moved to another cell, to another subdomain or it left the mesh.
1276 // Particles that moved to another cell are updated and moved inside the
1277 // particles vector, particles that moved to another domain are
1278 // collected in the moved_particles_domain vector. Particles that left
1279 // the mesh completely are ignored and removed.
1280 std::map<types::subdomain_id, std::vector<particle_iterator>>
1281 moved_particles;
1282 std::map<
1284 std::vector<typename Triangulation<dim, spacedim>::active_cell_iterator>>
1285 moved_cells;
1286
1287 // We do not know exactly how many particles are lost, exchanged between
1288 // domains, or remain on this process. Therefore we pre-allocate
1289 // approximate sizes for these vectors. If more space is needed an
1290 // automatic and relatively fast (compared to other parts of this
1291 // algorithm) re-allocation will happen.
1292 using vector_size = typename std::vector<particle_iterator>::size_type;
1293
1294 std::set<types::subdomain_id> ghost_owners;
1295 if (const auto parallel_triangulation =
1296 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1297 &*triangulation))
1298 ghost_owners = parallel_triangulation->ghost_owners();
1299
1300 // Reserve some space for particles that need communication to avoid
1301 // frequent re-allocation. Guess 25% of particles out of their old cell need
1302 // communication. Balance memory overhead and performance.
1303 for (const auto &ghost_owner : ghost_owners)
1304 moved_particles[ghost_owner].reserve(particles_out_of_cell.size() / 4);
1305 for (const auto &ghost_owner : ghost_owners)
1306 moved_cells[ghost_owner].reserve(particles_out_of_cell.size() / 4);
1307
1308 {
1309 // Create a map from vertices to adjacent cells using grid cache
1310 const std::vector<
1311 std::set<typename Triangulation<dim, spacedim>::active_cell_iterator>>
1312 &vertex_to_cells = triangulation_cache->get_vertex_to_cell_map();
1313
1314 // Create a corresponding map of vectors from vertex to cell center
1315 // using grid cache
1316 const std::vector<std::vector<Tensor<1, spacedim>>>
1317 &vertex_to_cell_centers =
1318 triangulation_cache->get_vertex_to_cell_centers_directions();
1319
1320 std::vector<unsigned int> search_order;
1321
1322 // Reuse these vectors below, but only with a single element.
1323 // Avoid resizing for every particle.
1324 Point<dim> invalid_reference_point;
1325 Point<spacedim> invalid_point;
1326 invalid_reference_point[0] = std::numeric_limits<double>::infinity();
1327 invalid_point[0] = std::numeric_limits<double>::infinity();
1328 reference_locations.resize(1, invalid_reference_point);
1329 real_locations.resize(1, invalid_point);
1330
1331 // Find the cells that the particles moved to.
1332 for (auto &out_particle : particles_out_of_cell)
1333 {
1334 // make a copy of the current cell, since we will modify the
1335 // variable current_cell below, but we need the original in
1336 // the case the particle is not found
1337 auto current_cell = out_particle->get_surrounding_cell();
1338
1339 real_locations[0] = out_particle->get_location();
1340
1341 // Record if the new cell was found
1342 bool found_cell = false;
1343
1344 // Check if the particle is in one of the old cell's neighbors
1345 // that are adjacent to the closest vertex
1346 const unsigned int closest_vertex =
1347 GridTools::find_closest_vertex_of_cell<dim, spacedim>(
1348 current_cell, out_particle->get_location(), *mapping);
1349 const unsigned int closest_vertex_index =
1350 current_cell->vertex_index(closest_vertex);
1351
1352 const auto &candidate_cells = vertex_to_cells[closest_vertex_index];
1353 const unsigned int n_candidate_cells = candidate_cells.size();
1354
1355 // The order of searching through the candidate cells matters for
1356 // performance reasons. Start with a simple order.
1357 search_order.resize(n_candidate_cells);
1358 for (unsigned int i = 0; i < n_candidate_cells; ++i)
1359 search_order[i] = i;
1360
1361 // If the particle is not on a vertex, we can do better by
1362 // sorting the candidate cells by alignment with
1363 // the vertex_to_particle direction.
1364 Tensor<1, spacedim> vertex_to_particle =
1365 out_particle->get_location() - current_cell->vertex(closest_vertex);
1366
1367 // Only do this if the particle is not on a vertex, otherwise we
1368 // cannot normalize
1369 if (vertex_to_particle.norm_square() >
1370 1e4 * std::numeric_limits<double>::epsilon() *
1371 std::numeric_limits<double>::epsilon() *
1372 vertex_to_cell_centers[closest_vertex_index][0].norm_square())
1373 {
1374 vertex_to_particle /= vertex_to_particle.norm();
1375 const auto &vertex_to_cells =
1376 vertex_to_cell_centers[closest_vertex_index];
1377
1378 std::sort(search_order.begin(),
1379 search_order.end(),
1380 [&vertex_to_particle,
1381 &vertex_to_cells](const unsigned int a,
1382 const unsigned int b) {
1383 return compare_particle_association(
1384 a, b, vertex_to_particle, vertex_to_cells);
1385 });
1386 }
1387
1388 // Search all of the candidate cells according to the determined
1389 // order. Most likely we will find the particle in them.
1390 for (unsigned int i = 0; i < n_candidate_cells; ++i)
1391 {
1392 typename std::set<
1394 const_iterator candidate_cell = candidate_cells.begin();
1395
1396 std::advance(candidate_cell, search_order[i]);
1397 mapping->transform_points_real_to_unit_cell(*candidate_cell,
1398 real_locations,
1399 reference_locations);
1400
1401 if (GeometryInfo<dim>::is_inside_unit_cell(reference_locations[0],
1402 tolerance_inside_cell))
1403 {
1404 current_cell = *candidate_cell;
1405 found_cell = true;
1406 break;
1407 }
1408 }
1409
1410 // If we did not find a cell the particle is not in a neighbor of
1411 // its old cell. Look for the new cell in the whole local domain.
1412 // This case should be rare.
1413 if (!found_cell)
1414 {
1415 // For some clang-based compilers and boost versions the call to
1416 // RTree::query doesn't compile. We use a slower implementation as
1417 // workaround.
1418 // This is fixed in boost in
1419 // https://github.com/boostorg/numeric_conversion/commit/50a1eae942effb0a9b90724323ef8f2a67e7984a
1420#if defined(DEAL_II_WITH_BOOST_BUNDLED) || \
1421 !(defined(__clang_major__) && __clang_major__ >= 16) || \
1422 BOOST_VERSION >= 108100
1423
1424 std::vector<std::pair<Point<spacedim>, unsigned int>>
1425 closest_vertex_in_domain;
1426 triangulation_cache->get_used_vertices_rtree().query(
1427 boost::geometry::index::nearest(out_particle->get_location(),
1428 1),
1429 std::back_inserter(closest_vertex_in_domain));
1430
1431 // We should have one and only one result
1432 AssertDimension(closest_vertex_in_domain.size(), 1);
1433 const unsigned int closest_vertex_index_in_domain =
1434 closest_vertex_in_domain[0].second;
1435#else
1436 const unsigned int closest_vertex_index_in_domain =
1439 out_particle->get_location());
1440#endif
1441
1442 // Search all of the cells adjacent to the closest vertex of the
1443 // domain. Most likely we will find the particle in them.
1444 for (const auto &cell :
1445 vertex_to_cells[closest_vertex_index_in_domain])
1446 {
1447 mapping->transform_points_real_to_unit_cell(
1448 cell, real_locations, reference_locations);
1449
1451 reference_locations[0], tolerance_inside_cell))
1452 {
1453 current_cell = cell;
1454 found_cell = true;
1455 break;
1456 }
1457 }
1458 }
1459
1460 if (!found_cell)
1461 {
1462 // We can find no cell for this particle. It has left the
1463 // domain due to an integration error or an open boundary.
1464 // Signal the loss and move on.
1465 signals.particle_lost(out_particle,
1466 out_particle->get_surrounding_cell());
1467 continue;
1468 }
1469
1470 // If we are here, we found a cell and reference position for this
1471 // particle
1472 out_particle->set_reference_location(reference_locations[0]);
1473
1474 // Reinsert the particle into our domain if we own its cell.
1475 // Mark it for MPI transfer otherwise
1476 if (current_cell->is_locally_owned())
1477 {
1479 out_particle->particles_in_cell
1480 ->particles[out_particle->particle_index_within_cell];
1481
1482 // Avoid deallocating the memory of this particle
1483 const auto old_value = old;
1485
1486 // Allocate particle with the old handle
1487 insert_particle(old_value, current_cell);
1488 }
1489 else
1490 {
1491 moved_particles[current_cell->subdomain_id()].push_back(
1492 out_particle);
1493 moved_cells[current_cell->subdomain_id()].push_back(current_cell);
1494 }
1495 }
1496 }
1497
1498 // Exchange particles between processors if we have more than one process
1499#ifdef DEAL_II_WITH_MPI
1500 if (const auto parallel_triangulation =
1501 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1502 &*triangulation))
1503 {
1505 parallel_triangulation->get_communicator()) > 1)
1506 send_recv_particles(moved_particles, moved_cells);
1507 }
1508#endif
1509
1510 // remove_particles also calls update_cached_numbers()
1511 remove_particles(particles_out_of_cell);
1512
1513 // now make sure particle data is sorted in order of iteration
1514 std::vector<typename PropertyPool<dim, spacedim>::Handle> unsorted_handles;
1515 unsorted_handles.reserve(property_pool->n_registered_slots());
1516
1517 typename PropertyPool<dim, spacedim>::Handle sorted_handle = 0;
1518 for (auto &particles_in_cell : particles)
1519 for (auto &particle : particles_in_cell.particles)
1520 {
1521 unsorted_handles.push_back(particle);
1522 particle = sorted_handle++;
1523 }
1524
1525 property_pool->sort_memory_slots(unsorted_handles);
1526
1527 } // namespace Particles
1528
1529
1530
1531 template <int dim, int spacedim>
1532 void
1534 const bool enable_cache)
1535 {
1536 // Nothing to do in serial computations
1537 const auto parallel_triangulation =
1538 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1539 &*triangulation);
1540 if (parallel_triangulation != nullptr)
1541 {
1543 parallel_triangulation->get_communicator()) == 1)
1544 return;
1545 }
1546 else
1547 return;
1548
1549#ifndef DEAL_II_WITH_MPI
1550 (void)enable_cache;
1551#else
1552 // Clear ghost particles and their properties
1553 for (const auto &cell : triangulation->active_cell_iterators())
1554 if (cell->is_ghost() &&
1555 cells_to_particle_cache[cell->active_cell_index()] != particles.end())
1556 {
1557 Assert(cells_to_particle_cache[cell->active_cell_index()]->cell ==
1558 cell,
1560 // Clear particle properties
1561 for (auto &ghost_particle :
1562 cells_to_particle_cache[cell->active_cell_index()]->particles)
1563 property_pool->deregister_particle(ghost_particle);
1564
1565 // Clear particles themselves
1566 particles.erase(cells_to_particle_cache[cell->active_cell_index()]);
1567 cells_to_particle_cache[cell->active_cell_index()] = particles.end();
1568 }
1569
1570 // Clear ghost particles cache and invalidate it
1571 ghost_particles_cache.ghost_particles_by_domain.clear();
1572 ghost_particles_cache.valid = false;
1573
1574 // In the case of a parallel simulation with periodic boundary conditions
1575 // the vertices associated with periodic boundaries are not directly
1576 // connected to the ghost cells but they are connected to the ghost cells
1577 // through their coinciding vertices. We gather this information using the
1578 // vertices_with_ghost_neighbors map
1579 const std::map<unsigned int, std::set<types::subdomain_id>>
1581 triangulation_cache->get_vertices_with_ghost_neighbors();
1582
1583 const std::set<types::subdomain_id> ghost_owners =
1584 parallel_triangulation->ghost_owners();
1585 for (const auto ghost_owner : ghost_owners)
1586 ghost_particles_cache.ghost_particles_by_domain[ghost_owner].reserve(
1587 n_locally_owned_particles() / 4);
1588
1589 const std::vector<std::set<unsigned int>> vertex_to_neighbor_subdomain =
1590 triangulation_cache->get_vertex_to_neighbor_subdomain();
1591
1592 for (const auto &cell : triangulation->active_cell_iterators())
1593 {
1594 if (cell->is_locally_owned())
1595 {
1596 std::set<unsigned int> cell_to_neighbor_subdomain;
1597 for (const unsigned int v : cell->vertex_indices())
1598 {
1599 const auto vertex_ghost_neighbors =
1600 vertices_with_ghost_neighbors.find(cell->vertex_index(v));
1601 if (vertex_ghost_neighbors !=
1603 {
1604 cell_to_neighbor_subdomain.insert(
1605 vertex_ghost_neighbors->second.begin(),
1606 vertex_ghost_neighbors->second.end());
1607 }
1608 }
1609
1610 if (cell_to_neighbor_subdomain.size() > 0)
1611 {
1612 const particle_iterator_range particle_range =
1613 particles_in_cell(cell);
1614
1615 for (const auto domain : cell_to_neighbor_subdomain)
1616 {
1617 for (typename particle_iterator_range::iterator particle =
1618 particle_range.begin();
1619 particle != particle_range.end();
1620 ++particle)
1621 ghost_particles_cache.ghost_particles_by_domain[domain]
1622 .push_back(particle);
1623 }
1624 }
1625 }
1626 }
1627
1628 send_recv_particles(
1629 ghost_particles_cache.ghost_particles_by_domain,
1630 std::map<
1632 std::vector<
1634 enable_cache);
1635#endif
1636 }
1637
1638
1639
1640 template <int dim, int spacedim>
1641 void
1643 {
1644 // Nothing to do in serial computations
1645 const auto parallel_triangulation =
1646 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1647 &*triangulation);
1648 if (parallel_triangulation == nullptr ||
1650 parallel_triangulation->get_communicator()) == 1)
1651 {
1652 return;
1653 }
1654
1655
1656#ifdef DEAL_II_WITH_MPI
1657 // First clear the current ghost_particle information
1658 // ghost_particles.clear();
1659 Assert(ghost_particles_cache.valid,
1660 ExcMessage(
1661 "Ghost particles cannot be updated if they first have not been "
1662 "exchanged at least once with the cache enabled"));
1663
1664
1665 send_recv_particles_properties_and_location(
1666 ghost_particles_cache.ghost_particles_by_domain);
1667#endif
1668 }
1669
1670
1671
1672#ifdef DEAL_II_WITH_MPI
1673 template <int dim, int spacedim>
1674 void
1676 const std::map<types::subdomain_id, std::vector<particle_iterator>>
1677 &particles_to_send,
1678 const std::map<
1681 &send_cells,
1682 const bool build_cache)
1683 {
1684 Assert(triangulation != nullptr, ExcInternalError());
1685 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
1687
1688 ghost_particles_cache.valid = build_cache;
1689
1690 const auto parallel_triangulation =
1691 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1692 &*triangulation);
1693 Assert(parallel_triangulation,
1694 ExcMessage("This function is only implemented for "
1695 "parallel::TriangulationBase objects."));
1696
1697 // Determine the communication pattern
1698 const std::set<types::subdomain_id> ghost_owners =
1699 parallel_triangulation->ghost_owners();
1700 const std::vector<types::subdomain_id> neighbors(ghost_owners.begin(),
1701 ghost_owners.end());
1702 const unsigned int n_neighbors = neighbors.size();
1703
1704 if (send_cells.size() != 0)
1705 Assert(particles_to_send.size() == send_cells.size(), ExcInternalError());
1706
1707 // If we do not know the subdomain this particle needs to be send to,
1708 // throw an error
1709 Assert(particles_to_send.find(numbers::artificial_subdomain_id) ==
1710 particles_to_send.end(),
1712
1713 // TODO: Implement the shipping of particles to processes that are not
1714 // ghost owners of the local domain
1715 for (auto send_particles = particles_to_send.begin();
1716 send_particles != particles_to_send.end();
1717 ++send_particles)
1718 Assert(ghost_owners.find(send_particles->first) != ghost_owners.end(),
1720
1721 std::size_t n_send_particles = 0;
1722 for (auto send_particles = particles_to_send.begin();
1723 send_particles != particles_to_send.end();
1724 ++send_particles)
1725 n_send_particles += send_particles->second.size();
1726
1727 const unsigned int cellid_size = sizeof(CellId::binary_type);
1728
1729 // Containers for the amount and offsets of data we will send
1730 // to other processors and the data itself.
1731 std::vector<unsigned int> n_send_data(n_neighbors, 0);
1732 std::vector<unsigned int> send_offsets(n_neighbors, 0);
1733 std::vector<char> send_data;
1734
1735 Particle<dim, spacedim> test_particle;
1736 test_particle.set_property_pool(*property_pool);
1737
1738 const unsigned int individual_particle_data_size =
1739 test_particle.serialized_size_in_bytes() +
1740 (size_callback ? size_callback() : 0);
1741
1742 const unsigned int individual_total_particle_data_size =
1743 individual_particle_data_size + cellid_size;
1744
1745 // Only serialize things if there are particles to be send.
1746 // We can not return early even if no particles
1747 // are send, because we might receive particles from other processes
1748 if (n_send_particles > 0)
1749 {
1750 // Allocate space for sending particle data
1751 send_data.resize(n_send_particles *
1752 individual_total_particle_data_size);
1753
1754 void *data = static_cast<void *>(&send_data.front());
1755
1756 // Serialize the data sorted by receiving process
1757 for (unsigned int i = 0; i < n_neighbors; ++i)
1758 {
1759 send_offsets[i] = reinterpret_cast<std::size_t>(data) -
1760 reinterpret_cast<std::size_t>(&send_data.front());
1761
1762 const unsigned int n_particles_to_send =
1763 particles_to_send.at(neighbors[i]).size();
1764
1765 Assert(static_cast<std::size_t>(n_particles_to_send) *
1766 individual_total_particle_data_size ==
1767 static_cast<std::size_t>(
1768 n_particles_to_send *
1769 individual_total_particle_data_size),
1770 ExcMessage("Overflow when trying to send particle "
1771 "data"));
1772
1773 for (unsigned int j = 0; j < n_particles_to_send; ++j)
1774 {
1775 // If no target cells are given, use the iterator
1776 // information
1778 cell;
1779 if (send_cells.empty())
1780 cell = particles_to_send.at(neighbors[i])[j]
1781 ->get_surrounding_cell();
1782 else
1783 cell = send_cells.at(neighbors[i])[j];
1784
1785 const CellId::binary_type cellid =
1786 cell->id().template to_binary<dim>();
1787 memcpy(data, &cellid, cellid_size);
1788 data = static_cast<char *>(data) + cellid_size;
1789
1790 data = particles_to_send.at(neighbors[i])[j]
1791 ->write_particle_data_to_memory(data);
1792 if (store_callback)
1793 data =
1794 store_callback(particles_to_send.at(neighbors[i])[j], data);
1795 }
1796 n_send_data[i] = n_particles_to_send;
1797 }
1798 }
1799
1800 // Containers for the data we will receive from other processors
1801 std::vector<unsigned int> n_recv_data(n_neighbors);
1802 std::vector<unsigned int> recv_offsets(n_neighbors);
1803
1804 {
1805 const int mpi_tag = Utilities::MPI::internal::Tags::
1807
1808 std::vector<MPI_Request> n_requests(2 * n_neighbors);
1809 for (unsigned int i = 0; i < n_neighbors; ++i)
1810 {
1811 const int ierr = MPI_Irecv(&(n_recv_data[i]),
1812 1,
1813 MPI_UNSIGNED,
1814 neighbors[i],
1815 mpi_tag,
1816 parallel_triangulation->get_communicator(),
1817 &(n_requests[2 * i]));
1818 AssertThrowMPI(ierr);
1819 }
1820 for (unsigned int i = 0; i < n_neighbors; ++i)
1821 {
1822 const int ierr = MPI_Isend(&(n_send_data[i]),
1823 1,
1824 MPI_UNSIGNED,
1825 neighbors[i],
1826 mpi_tag,
1827 parallel_triangulation->get_communicator(),
1828 &(n_requests[2 * i + 1]));
1829 AssertThrowMPI(ierr);
1830 }
1831 const int ierr =
1832 MPI_Waitall(2 * n_neighbors, n_requests.data(), MPI_STATUSES_IGNORE);
1833 AssertThrowMPI(ierr);
1834 }
1835
1836 // Determine how many particles and data we will receive
1837 unsigned int total_recv_data = 0;
1838 for (unsigned int neighbor_id = 0; neighbor_id < n_neighbors; ++neighbor_id)
1839 {
1840 recv_offsets[neighbor_id] = total_recv_data;
1841 total_recv_data +=
1842 n_recv_data[neighbor_id] * individual_total_particle_data_size;
1843 }
1844
1845 // Set up the space for the received particle data
1846 std::vector<char> recv_data(total_recv_data);
1847
1848 // Exchange the particle data between domains
1849 {
1850 std::vector<MPI_Request> requests(2 * n_neighbors);
1851 unsigned int send_ops = 0;
1852 unsigned int recv_ops = 0;
1853
1854 const int mpi_tag = Utilities::MPI::internal::Tags::
1856
1857 for (unsigned int i = 0; i < n_neighbors; ++i)
1858 if (n_recv_data[i] > 0)
1859 {
1860 const int ierr =
1861 MPI_Irecv(&(recv_data[recv_offsets[i]]),
1862 n_recv_data[i] * individual_total_particle_data_size,
1863 MPI_CHAR,
1864 neighbors[i],
1865 mpi_tag,
1866 parallel_triangulation->get_communicator(),
1867 &(requests[send_ops]));
1868 AssertThrowMPI(ierr);
1869 ++send_ops;
1870 }
1871
1872 for (unsigned int i = 0; i < n_neighbors; ++i)
1873 if (n_send_data[i] > 0)
1874 {
1875 const int ierr =
1876 MPI_Isend(&(send_data[send_offsets[i]]),
1877 n_send_data[i] * individual_total_particle_data_size,
1878 MPI_CHAR,
1879 neighbors[i],
1880 mpi_tag,
1881 parallel_triangulation->get_communicator(),
1882 &(requests[send_ops + recv_ops]));
1883 AssertThrowMPI(ierr);
1884 ++recv_ops;
1885 }
1886 const int ierr =
1887 MPI_Waitall(send_ops + recv_ops, requests.data(), MPI_STATUSES_IGNORE);
1888 AssertThrowMPI(ierr);
1889 }
1890
1891 // Put the received particles into the domain if they are in the
1892 // triangulation
1893 const void *recv_data_it = static_cast<const void *>(recv_data.data());
1894
1895 // Store the particle iterators in the cache
1896 auto &ghost_particles_iterators =
1897 ghost_particles_cache.ghost_particles_iterators;
1898
1899 if (build_cache)
1900 {
1901 ghost_particles_iterators.clear();
1902
1903 auto &send_pointers_particles = ghost_particles_cache.send_pointers;
1904 send_pointers_particles.assign(n_neighbors + 1, 0);
1905
1906 for (unsigned int i = 0; i < n_neighbors; ++i)
1907 send_pointers_particles[i + 1] =
1908 send_pointers_particles[i] +
1909 n_send_data[i] * individual_particle_data_size;
1910
1911 auto &recv_pointers_particles = ghost_particles_cache.recv_pointers;
1912 recv_pointers_particles.assign(n_neighbors + 1, 0);
1913
1914 for (unsigned int i = 0; i < n_neighbors; ++i)
1915 recv_pointers_particles[i + 1] =
1916 recv_pointers_particles[i] +
1917 n_recv_data[i] * individual_particle_data_size;
1918
1919 ghost_particles_cache.neighbors = neighbors;
1920
1921 ghost_particles_cache.send_data.resize(
1922 ghost_particles_cache.send_pointers.back());
1923 ghost_particles_cache.recv_data.resize(
1924 ghost_particles_cache.recv_pointers.back());
1925 }
1926
1927 while (reinterpret_cast<std::size_t>(recv_data_it) -
1928 reinterpret_cast<std::size_t>(recv_data.data()) <
1929 total_recv_data)
1930 {
1931 CellId::binary_type binary_cellid;
1932 memcpy(&binary_cellid, recv_data_it, cellid_size);
1933 const CellId id(binary_cellid);
1934 recv_data_it = static_cast<const char *>(recv_data_it) + cellid_size;
1935
1937 triangulation->create_cell_iterator(id);
1938
1939 insert_particle(property_pool->register_particle(), cell);
1940 const typename particle_container::iterator &cache =
1941 cells_to_particle_cache[cell->active_cell_index()];
1942 Assert(cache->cell == cell, ExcInternalError());
1943
1944 particle_iterator particle_it(cache,
1945 *property_pool,
1946 cache->particles.size() - 1);
1947
1948 recv_data_it =
1949 particle_it->read_particle_data_from_memory(recv_data_it);
1950
1951 if (load_callback)
1952 recv_data_it = load_callback(particle_it, recv_data_it);
1953
1954 if (build_cache) // TODO: is this safe?
1955 ghost_particles_iterators.push_back(particle_it);
1956 }
1957
1958 AssertThrow(recv_data_it == recv_data.data() + recv_data.size(),
1959 ExcMessage(
1960 "The amount of data that was read into new particles "
1961 "does not match the amount of data sent around."));
1962 }
1963#endif
1964
1965
1966
1967#ifdef DEAL_II_WITH_MPI
1968 template <int dim, int spacedim>
1969 void
1971 const std::map<types::subdomain_id, std::vector<particle_iterator>>
1972 &particles_to_send)
1973 {
1974 const auto parallel_triangulation =
1975 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1976 &*triangulation);
1977 Assert(
1978 parallel_triangulation,
1979 ExcMessage(
1980 "This function is only implemented for parallel::TriangulationBase "
1981 "objects."));
1982
1983 const auto &neighbors = ghost_particles_cache.neighbors;
1984 const auto &send_pointers = ghost_particles_cache.send_pointers;
1985 const auto &recv_pointers = ghost_particles_cache.recv_pointers;
1986
1987 std::vector<char> &send_data = ghost_particles_cache.send_data;
1988
1989 // Fill data to send
1990 if (send_pointers.back() > 0)
1991 {
1992 void *data = static_cast<void *>(&send_data.front());
1993
1994 // Serialize the data sorted by receiving process
1995 for (const auto i : neighbors)
1996 for (const auto &p : particles_to_send.at(i))
1997 {
1998 data = p->write_particle_data_to_memory(data);
1999 if (store_callback)
2000 data = store_callback(p, data);
2001 }
2002 }
2003
2004 std::vector<char> &recv_data = ghost_particles_cache.recv_data;
2005
2006 // Exchange the particle data between domains
2007 {
2008 std::vector<MPI_Request> requests(2 * neighbors.size());
2009 unsigned int send_ops = 0;
2010 unsigned int recv_ops = 0;
2011
2012 const int mpi_tag = Utilities::MPI::internal::Tags::
2014
2015 for (unsigned int i = 0; i < neighbors.size(); ++i)
2016 if ((recv_pointers[i + 1] - recv_pointers[i]) > 0)
2017 {
2018 const int ierr =
2019 MPI_Irecv(recv_data.data() + recv_pointers[i],
2020 recv_pointers[i + 1] - recv_pointers[i],
2021 MPI_CHAR,
2022 neighbors[i],
2023 mpi_tag,
2024 parallel_triangulation->get_communicator(),
2025 &(requests[send_ops]));
2026 AssertThrowMPI(ierr);
2027 ++send_ops;
2028 }
2029
2030 for (unsigned int i = 0; i < neighbors.size(); ++i)
2031 if ((send_pointers[i + 1] - send_pointers[i]) > 0)
2032 {
2033 const int ierr =
2034 MPI_Isend(send_data.data() + send_pointers[i],
2035 send_pointers[i + 1] - send_pointers[i],
2036 MPI_CHAR,
2037 neighbors[i],
2038 mpi_tag,
2039 parallel_triangulation->get_communicator(),
2040 &(requests[send_ops + recv_ops]));
2041 AssertThrowMPI(ierr);
2042 ++recv_ops;
2043 }
2044 const int ierr =
2045 MPI_Waitall(send_ops + recv_ops, requests.data(), MPI_STATUSES_IGNORE);
2046 AssertThrowMPI(ierr);
2047 }
2048
2049 // Put the received particles into the domain if they are in the
2050 // triangulation
2051 const void *recv_data_it = static_cast<const void *>(recv_data.data());
2052
2053 // Gather ghost particle iterators from the cache
2054 auto &ghost_particles_iterators =
2055 ghost_particles_cache.ghost_particles_iterators;
2056
2057 for (auto &recv_particle : ghost_particles_iterators)
2058 {
2059 // Update particle data using previously allocated memory space
2060 // for efficiency reasons
2061 recv_data_it =
2062 recv_particle->read_particle_data_from_memory(recv_data_it);
2063
2064 Assert(recv_particle->particles_in_cell->cell->is_ghost(),
2066
2067 if (load_callback)
2068 recv_data_it = load_callback(
2069 particle_iterator(recv_particle->particles_in_cell,
2070 *property_pool,
2071 recv_particle->particle_index_within_cell),
2072 recv_data_it);
2073 }
2074
2075 AssertThrow(recv_data_it == recv_data.data() + recv_data.size(),
2076 ExcMessage(
2077 "The amount of data that was read into new particles "
2078 "does not match the amount of data sent around."));
2079 }
2080#endif
2081
2082 template <int dim, int spacedim>
2083 void
2085 const std::function<std::size_t()> &size_callb,
2086 const std::function<void *(const particle_iterator &, void *)> &store_callb,
2087 const std::function<const void *(const particle_iterator &, const void *)>
2088 &load_callb)
2089 {
2090 size_callback = size_callb;
2091 store_callback = store_callb;
2092 load_callback = load_callb;
2093 }
2094
2095
2096 template <int dim, int spacedim>
2097 void
2099 {
2100 // First disconnect existing connections
2101 for (const auto &connection : tria_listeners)
2102 connection.disconnect();
2103
2104 tria_listeners.clear();
2105
2106 tria_listeners.push_back(triangulation->signals.create.connect([&]() {
2107 this->initialize(*(this->triangulation),
2108 *(this->mapping),
2109 this->property_pool->n_properties_per_slot());
2110 }));
2111
2112 this->tria_listeners.push_back(
2113 this->triangulation->signals.clear.connect([&]() { this->clear(); }));
2114
2115 // for distributed triangulations, connect to distributed signals
2117 *>(&(*triangulation)) != nullptr)
2118 {
2119 tria_listeners.push_back(
2120 triangulation->signals.post_distributed_refinement.connect(
2121 [&]() { this->post_mesh_change_action(); }));
2122 tria_listeners.push_back(
2123 triangulation->signals.post_distributed_repartition.connect(
2124 [&]() { this->post_mesh_change_action(); }));
2125 tria_listeners.push_back(
2126 triangulation->signals.post_distributed_load.connect(
2127 [&]() { this->post_mesh_change_action(); }));
2128 }
2129 else
2130 {
2131 tria_listeners.push_back(triangulation->signals.post_refinement.connect(
2132 [&]() { this->post_mesh_change_action(); }));
2133 }
2134 }
2135
2136
2137
2138 template <int dim, int spacedim>
2139 void
2141 {
2142 Assert(triangulation != nullptr, ExcInternalError());
2143
2144 const bool distributed_triangulation =
2145 dynamic_cast<
2147 &(*triangulation)) != nullptr;
2148 (void)distributed_triangulation;
2149
2150 Assert(
2151 distributed_triangulation || number_of_locally_owned_particles == 0,
2152 ExcMessage(
2153 "Mesh refinement in a non-distributed triangulation is not supported "
2154 "by the ParticleHandler class. Either insert particles after mesh "
2155 "creation, or use a distributed triangulation."));
2156
2157 // Resize the container if it is possible without
2158 // transferring particles
2159 if (number_of_locally_owned_particles == 0)
2160 cells_to_particle_cache.resize(triangulation->n_active_cells(),
2161 particles.end());
2162 }
2163
2164
2165
2166 template <int dim, int spacedim>
2167 void
2172
2173
2174
2175 template <int dim, int spacedim>
2176 void
2178 {
2179 register_data_attach();
2180 }
2181
2182
2183
2184 template <int dim, int spacedim>
2185 void
2187 {
2188 register_data_attach();
2189 }
2190
2191
2192
2193 template <int dim, int spacedim>
2194 void
2196 {
2198 *distributed_triangulation =
2200 dynamic_cast<
2202 &(*triangulation)));
2203 (void)distributed_triangulation;
2204
2205 Assert(
2206 distributed_triangulation != nullptr,
2207 ExcMessage(
2208 "Mesh refinement in a non-distributed triangulation is not supported "
2209 "by the ParticleHandler class. Either insert particles after mesh "
2210 "creation and do not refine afterwards, or use a distributed triangulation."));
2211
2212 const auto callback_function =
2214 &cell_iterator,
2215 const CellStatus cell_status) {
2216 return this->pack_callback(cell_iterator, cell_status);
2217 };
2218
2219 handle = distributed_triangulation->register_data_attach(
2220 callback_function, /*returns_variable_size_data=*/true);
2221 }
2222
2223
2224
2225 template <int dim, int spacedim>
2226 void
2228 {
2229 const bool serialization = false;
2230 notify_ready_to_unpack(serialization);
2231 }
2232
2233
2234
2235 template <int dim, int spacedim>
2236 void
2238 {
2239 const bool serialization = true;
2240 notify_ready_to_unpack(serialization);
2241 }
2242
2243
2244 template <int dim, int spacedim>
2245 void
2247 const bool serialization)
2248 {
2249 notify_ready_to_unpack(serialization);
2250 }
2251
2252
2253
2254 template <int dim, int spacedim>
2255 void
2257 const bool serialization)
2258 {
2260 *distributed_triangulation =
2262 dynamic_cast<
2264 &(*triangulation)));
2265 (void)distributed_triangulation;
2266
2267 Assert(
2268 distributed_triangulation != nullptr,
2269 ExcMessage(
2270 "Mesh refinement in a non-distributed triangulation is not supported "
2271 "by the ParticleHandler class. Either insert particles after mesh "
2272 "creation and do not refine afterwards, or use a distributed triangulation."));
2273
2274 // First prepare container for insertion
2275 clear();
2276
2277 // If we are resuming from a checkpoint, we first have to register the
2278 // store function again, to set the triangulation to the same state as
2279 // before the serialization. Only afterwards we know how to deserialize the
2280 // data correctly.
2281 if (serialization)
2282 register_data_attach();
2283
2284 // Check if something was stored and load it
2285 if (handle != numbers::invalid_unsigned_int)
2286 {
2287 const auto callback_function =
2289 &cell_iterator,
2290 const CellStatus cell_status,
2291 const boost::iterator_range<std::vector<char>::const_iterator>
2292 &range_iterator) {
2293 this->unpack_callback(cell_iterator, cell_status, range_iterator);
2294 };
2295
2296 distributed_triangulation->notify_ready_to_unpack(handle,
2297 callback_function);
2298
2299 // Reset handle and update global numbers.
2301 update_cached_numbers();
2302 }
2303 }
2304
2305
2306
2307 template <int dim, int spacedim>
2308 std::vector<char>
2311 const CellStatus status) const
2312 {
2313 std::vector<particle_iterator> stored_particles_on_cell;
2314
2315 switch (status)
2316 {
2319 // If the cell persist or is refined store all particles of the
2320 // current cell.
2321 {
2322 const unsigned int n_particles = n_particles_in_cell(cell);
2323 stored_particles_on_cell.reserve(n_particles);
2324
2325 for (unsigned int i = 0; i < n_particles; ++i)
2326 stored_particles_on_cell.push_back(particle_iterator(
2327 cells_to_particle_cache[cell->active_cell_index()],
2328 *property_pool,
2329 i));
2330 }
2331 break;
2332
2334 // If this cell is the parent of children that will be coarsened,
2335 // collect the particles of all children.
2336 {
2337 for (const auto &child : cell->child_iterators())
2338 {
2339 const unsigned int n_particles = n_particles_in_cell(child);
2340
2341 stored_particles_on_cell.reserve(
2342 stored_particles_on_cell.size() + n_particles);
2343
2344 const typename particle_container::iterator &cache =
2345 cells_to_particle_cache[child->active_cell_index()];
2346 for (unsigned int i = 0; i < n_particles; ++i)
2347 stored_particles_on_cell.push_back(
2348 particle_iterator(cache, *property_pool, i));
2349 }
2350 }
2351 break;
2352
2353 default:
2355 break;
2356 }
2357
2358 return pack_particles(stored_particles_on_cell);
2359 }
2360
2361
2362
2363 template <int dim, int spacedim>
2364 void
2367 const CellStatus status,
2368 const boost::iterator_range<std::vector<char>::const_iterator> &data_range)
2369 {
2370 if (data_range.begin() == data_range.end())
2371 return;
2372
2373 const auto cell_to_store_particles =
2374 (status != CellStatus::cell_will_be_refined) ? cell : cell->child(0);
2375
2376 // deserialize particles and insert into local storage
2377 if (data_range.begin() != data_range.end())
2378 {
2379 const void *data = static_cast<const void *>(&(*data_range.begin()));
2380 const void *end = static_cast<const void *>(
2381 &(*data_range.begin()) + (data_range.end() - data_range.begin()));
2382
2383 while (data < end)
2384 {
2385 const void *old_data = data;
2386 const auto x = insert_particle(data, cell_to_store_particles);
2387
2388 // Ensure that the particle read exactly as much data as
2389 // it promised it needs to store its data
2390 const void *new_data = data;
2391 (void)old_data;
2392 (void)new_data;
2393 (void)x;
2394 AssertDimension((const char *)new_data - (const char *)old_data,
2395 x->serialized_size_in_bytes());
2396 }
2397
2398 Assert(data == end,
2399 ExcMessage(
2400 "The particle data could not be deserialized successfully. "
2401 "Check that when deserializing the particles you expect "
2402 "the same number of properties that were serialized."));
2403 }
2404
2405 auto loaded_particles_on_cell = particles_in_cell(cell_to_store_particles);
2406
2407 // now update particle storage location and properties if necessary
2408 switch (status)
2409 {
2411 {
2412 // all particles are correctly inserted
2413 }
2414 break;
2415
2417 {
2418 // all particles are in correct cell, but their reference location
2419 // has changed
2420 for (auto &particle : loaded_particles_on_cell)
2421 {
2422 const Point<dim> p_unit =
2423 mapping->transform_real_to_unit_cell(cell_to_store_particles,
2424 particle.get_location());
2425 particle.set_reference_location(p_unit);
2426 }
2427 }
2428 break;
2429
2431 {
2432 // we need to find the correct child to store the particles and
2433 // their reference location has changed
2434 typename particle_container::iterator &cache =
2435 cells_to_particle_cache[cell_to_store_particles
2436 ->active_cell_index()];
2437
2438 // make sure that the call above has inserted an entry
2439 Assert(cache != particles.end(), ExcInternalError());
2440
2441 // Cannot use range-based loop, because number of particles in cell
2442 // is going to change
2443 auto particle = loaded_particles_on_cell.begin();
2444 for (unsigned int i = 0; i < cache->particles.size();)
2445 {
2446 bool found_new_cell = false;
2447
2448 for (const auto &child : cell->child_iterators())
2449 {
2450 Assert(child->is_locally_owned(), ExcInternalError());
2451
2452 try
2453 {
2454 const Point<dim> p_unit =
2455 mapping->transform_real_to_unit_cell(
2456 child, particle->get_location());
2458 p_unit, tolerance_inside_cell))
2459 {
2460 found_new_cell = true;
2461 particle->set_reference_location(p_unit);
2462
2463 // if the particle is not in the cell we stored it
2464 // in above, its handle is in the wrong place
2465 if (child != cell_to_store_particles)
2466 {
2467 // move handle into correct cell
2468 insert_particle(cache->particles[i], child);
2469 // remove handle by replacing it with last one
2470 cache->particles[i] = cache->particles.back();
2471 cache->particles.pop_back();
2472 // no loop increment, we need to process
2473 // the new i-th particle.
2474 }
2475 else
2476 {
2477 // move on to next particle
2478 ++i;
2479 ++particle;
2480 }
2481 break;
2482 }
2483 }
2484 catch (typename Mapping<dim>::ExcTransformationFailed &)
2485 {}
2486 }
2487
2488 if (found_new_cell == false)
2489 {
2490 // If we get here, we did not find the particle in any
2491 // child. This case may happen for particles that are at the
2492 // boundary for strongly curved cells. We apply a tolerance
2493 // in the call to GeometryInfo<dim>::is_inside_unit_cell to
2494 // account for this, but if that is not enough, we still
2495 // need to prevent an endless loop here. Delete the particle
2496 // and move on.
2497 signals.particle_lost(particle,
2498 particle->get_surrounding_cell());
2499 if (cache->particles[i] !=
2501 property_pool->deregister_particle(cache->particles[i]);
2502 cache->particles[i] = cache->particles.back();
2503 cache->particles.pop_back();
2504 }
2505 }
2506 // clean up in case child 0 has no particle left
2507 if (cache->particles.empty())
2508 {
2509 particles.erase(cache);
2510 cache = particles.end();
2511 }
2512 }
2513 break;
2514
2515 default:
2517 break;
2518 }
2519 }
2520} // namespace Particles
2521
2522#include "particle_handler.inst"
2523
CellStatus
Definition cell_status.h:31
@ cell_will_be_refined
@ children_will_be_coarsened
std::size_t size() const
Definition array_view.h:684
std::array< unsigned int, 4 > binary_type
Definition cell_id.h:80
const unsigned int n_components
Definition function.h:163
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
Abstract base class for mapping classes.
Definition mapping.h:318
void register_additional_store_load_functions(const std::function< std::size_t()> &size_callback, const std::function< void *(const particle_iterator &, void *)> &store_callback, const std::function< const void *(const particle_iterator &, const void *)> &load_callback)
void exchange_ghost_particles(const bool enable_ghost_cache=false)
types::particle_index n_global_particles() const
unsigned int global_max_particles_per_cell
internal::GhostParticlePartitioner< dim, spacedim > ghost_particles_cache
particle_container::iterator particle_container_ghost_begin() const
unsigned int n_properties_per_particle() const
types::particle_index global_number_of_particles
void get_particle_positions(VectorType &output_vector, const bool add_to_output_vector=false)
particle_container::iterator particle_container_owned_end() const
particle_container::iterator particle_container_ghost_end() const
boost::iterator_range< particle_iterator > particle_iterator_range
void send_recv_particles(const std::map< types::subdomain_id, std::vector< particle_iterator > > &particles_to_send, const std::map< types::subdomain_id, std::vector< typename Triangulation< dim, spacedim >::active_cell_iterator > > &new_cells_for_particles=std::map< types::subdomain_id, std::vector< typename Triangulation< dim, spacedim >::active_cell_iterator > >(), const bool enable_cache=false)
void send_recv_particles_properties_and_location(const std::map< types::subdomain_id, std::vector< particle_iterator > > &particles_to_send)
types::particle_index number_of_locally_owned_particles
PropertyPool< dim, spacedim > & get_property_pool() const
void initialize(const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping, const unsigned int n_properties=0)
std::unique_ptr< PropertyPool< dim, spacedim > > property_pool
types::particle_index get_max_local_particle_index() const
void reserve(const std::size_t n_particles)
std::map< unsigned int, IndexSet > insert_global_particles(const std::vector< Point< spacedim > > &positions, const std::vector< std::vector< BoundingBox< spacedim > > > &global_bounding_boxes, const std::vector< std::vector< double > > &properties={}, const std::vector< types::particle_index > &ids={})
void notify_ready_to_unpack(const bool serialization)
void register_load_callback_function(const bool serialization)
std::enable_if_t< std::is_convertible_v< VectorType *, Function< spacedim > * >==false > set_particle_positions(const VectorType &input_vector, const bool displace_particles=true)
std::vector< char > pack_callback(const typename Triangulation< dim, spacedim >::cell_iterator &cell, const CellStatus status) const
void insert_particles(const std::multimap< typename Triangulation< dim, spacedim >::active_cell_iterator, Particle< dim, spacedim > > &particles)
types::particle_index next_free_particle_index
void remove_particle(const particle_iterator &particle)
types::particle_index n_locally_owned_particles() const
void reset_particle_container(particle_container &particles)
types::particle_index n_particles_in_cell(const typename Triangulation< dim, spacedim >::active_cell_iterator &cell) const
typename ParticleAccessor< dim, spacedim >::particle_container particle_container
particle_iterator_range particles_in_cell(const typename Triangulation< dim, spacedim >::active_cell_iterator &cell)
SmartPointer< const Triangulation< dim, spacedim >, ParticleHandler< dim, spacedim > > triangulation
particle_iterator insert_particle(const Particle< dim, spacedim > &particle, const typename Triangulation< dim, spacedim >::active_cell_iterator &cell)
void remove_particles(const std::vector< particle_iterator > &particles)
types::particle_index n_global_max_particles_per_cell() const
particle_container::iterator particle_container_owned_begin() const
void unpack_callback(const typename Triangulation< dim, spacedim >::cell_iterator &cell, const CellStatus status, const boost::iterator_range< std::vector< char >::const_iterator > &data_range)
void copy_from(const ParticleHandler< dim, spacedim > &particle_handler)
SmartPointer< const Mapping< dim, spacedim >, ParticleHandler< dim, spacedim > > mapping
types::particle_index get_next_free_particle_index() const
IndexSet locally_owned_particle_ids() const
void set_property_pool(PropertyPool< dim, spacedim > &property_pool)
Definition particle.h:608
const Point< dim > & get_reference_location() const
Definition particle.h:581
const Point< spacedim > & get_location() const
Definition particle.h:554
std::size_t serialized_size_in_bytes() const
Definition particle.cc:286
types::particle_index get_id() const
Definition particle.h:590
ArrayView< double > get_properties()
Definition particle.cc:330
Definition point.h:111
numbers::NumberTraits< Number >::real_type norm() const
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
IteratorState::IteratorStates state() const
unsigned int register_data_attach(const std::function< std::vector< char >(const cell_iterator &, const ::CellStatus)> &pack_callback, const bool returns_variable_size_data)
void notify_ready_to_unpack(const unsigned int handle, const std::function< void(const cell_iterator &, const ::CellStatus, const boost::iterator_range< std::vector< char >::const_iterator > &)> &unpack_callback)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcPointNotAvailableHere()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
TriaIterator< CellAccessor< dim, spacedim > > cell_iterator
Definition tria.h:1550
#define DEAL_II_ASSERT_UNREACHABLE()
unsigned int find_closest_vertex(const std::map< unsigned int, Point< spacedim > > &vertices, const Point< spacedim > &p)
return_type compute_point_locations_try_all(const Cache< dim, spacedim > &cache, const std::vector< Point< spacedim > > &points, const typename Triangulation< dim, spacedim >::active_cell_iterator &cell_hint=typename Triangulation< dim, spacedim >::active_cell_iterator())
return_type distributed_compute_point_locations(const GridTools::Cache< dim, spacedim > &cache, const std::vector< Point< spacedim > > &local_points, const std::vector< std::vector< BoundingBox< spacedim > > > &global_bboxes, const double tolerance=1e-10, const std::vector< bool > &marked_vertices={}, const bool enforce_unique_mapping=true)
@ past_the_end
Iterator reached end of container.
@ valid
Iterator points to a valid object.
@ particle_handler_send_recv_particles_send
ParticleHandler<dim, spacedim>::send_recv_particles.
Definition mpi_tags.h:113
@ particle_handler_send_recv_particles_setup
ParticleHandler<dim, spacedim>::send_recv_particles.
Definition mpi_tags.h:109
T sum(const T &t, const MPI_Comm mpi_communicator)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
const types::subdomain_id artificial_subdomain_id
Definition types.h:362
static const unsigned int invalid_unsigned_int
Definition types.h:220
bool is_finite(const double x)
Definition numbers.h:538
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int subdomain_id
Definition types.h:43
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
std::map< unsigned int, std::set<::types::subdomain_id > > * vertices_with_ghost_neighbors
*braid_SplitCommworld & comm
#define DEAL_II_PARTICLE_INDEX_MPI_TYPE
static bool is_inside_unit_cell(const Point< dim > &p)