Loading...
Searching...
No Matches
patch_kernel_parallel.cpp
1/*---------------------------------------------------------------------------*\
2 *
3 * bitpit
4 *
5 * Copyright (C) 2015-2021 OPTIMAD engineering Srl
6 *
7 * -------------------------------------------------------------------------
8 * License
9 * This file is part of bitpit.
10 *
11 * bitpit is free software: you can redistribute it and/or modify it
12 * under the terms of the GNU Lesser General Public License v3 (LGPL)
13 * as published by the Free Software Foundation.
14 *
15 * bitpit is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
18 * License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with bitpit. If not, see <http://www.gnu.org/licenses/>.
22 *
23\*---------------------------------------------------------------------------*/
24#if BITPIT_ENABLE_MPI==1
25
26// ========================================================================== //
27// INCLUDES //
28// ========================================================================== //
29#if BITPIT_ENABLE_METIS==1
30#include <metis.h>
31#endif
32#include <mpi.h>
33
34#include <chrono>
35#include <unordered_set>
36#include <unordered_map>
37
38#include "bitpit_communications.hpp"
39
40#include "patch_kernel.hpp"
41
42// ========================================================================== //
43// NAMESPACES //
44// ========================================================================== //
45using namespace std;
46using namespace chrono;
47
48namespace bitpit {
49
54
61void PatchKernel::initializeCommunicator(MPI_Comm communicator)
62{
63 // Early return if setting a null communicator
64 if (communicator == MPI_COMM_NULL) {
65 m_communicator = communicator;
66
67 m_nProcessors = 1;
68 m_rank = 0;
69
70 return;
71 }
72
73 // Creat a copy of the user-specified communicator
74 //
75 // No library routine should use MPI_COMM_WORLD as the communicator;
76 // instead, a duplicate of a user-specified communicator should always
77 // be used.
78 MPI_Comm_dup(communicator, &m_communicator);
79
80 // Get MPI information
81 MPI_Comm_size(m_communicator, &m_nProcessors);
82 MPI_Comm_rank(m_communicator, &m_rank);
83
84 // Set parallel data for the VTK output
85 if (m_nProcessors > 1) {
86 m_vtk.setParallel(m_nProcessors, m_rank);
87 }
88}
89
96void PatchKernel::setCommunicator(MPI_Comm communicator)
97{
98 // Communication can be set just once
99 if (isCommunicatorSet()) {
100 throw std::runtime_error ("Patch communicator can be set just once");
101 }
102
103 // The communicator has to be valid
104 if (communicator == MPI_COMM_NULL) {
105 throw std::runtime_error ("Patch communicator is not valid");
106 }
107
108 // Set the communicator
109 initializeCommunicator(communicator);
110}
111
119{
120 return (getCommunicator() != MPI_COMM_NULL);
121}
122
128const MPI_Comm & PatchKernel::getCommunicator() const
129{
130 return m_communicator;
131}
132
136void PatchKernel::freeCommunicator()
137{
138 if (!isCommunicatorSet()) {
139 return;
140 }
141
142 int finalizedCalled;
143 MPI_Finalized(&finalizedCalled);
144 if (finalizedCalled) {
145 return;
146 }
147
148 MPI_Comm_free(&m_communicator);
149}
150
157{
158 return m_rank;
159}
160
168{
169 return m_nProcessors;
170}
171
187bool PatchKernel::isDistributed(bool allowDirty) const
188{
189 return (getOwner(allowDirty) < 0);
190}
191
211int PatchKernel::getOwner(bool allowDirty) const
212{
213 if (allowDirty) {
214 return evalOwner();
215 }
216
217 assert(!arePartitioningInfoDirty(false));
218
219 return m_owner;
220}
221
232int PatchKernel::evalOwner() const
233{
234 long nInternalCells = getInternalCellCount();
235 long nGlobalInternalCells = nInternalCells;
236 if (isPartitioned()) {
237 MPI_Allreduce(MPI_IN_PLACE, &nGlobalInternalCells, 1, MPI_LONG, MPI_SUM, getCommunicator());
238 }
239
240 int owner = -1;
241 if (nGlobalInternalCells > 0) {
242 if (nInternalCells == nGlobalInternalCells) {
243 owner = getRank();
244 }
245
246 if (isPartitioned()) {
247 MPI_Allreduce(MPI_IN_PLACE, &owner, 1, MPI_INT, MPI_MAX, getCommunicator());
248 }
249 }
250
251 return owner;
252}
253
261void PatchKernel::updateOwner()
262{
263 m_owner = evalOwner();
264}
265
274void PatchKernel::initializeHaloSize(std::size_t haloSize)
275{
276 m_haloSize = haloSize;
277}
278
285void PatchKernel::setHaloSize(std::size_t haloSize)
286{
287
288 if (isDistributed()) {
289 throw std::runtime_error ("Halo size can only be set when the patch is all on a single process");
290 }
291
292 if (m_haloSize == haloSize) {
293 return;
294 }
295
296 std::size_t maxHaloSize = _getMaxHaloSize();
297 if (haloSize > maxHaloSize) {
298 throw std::runtime_error ("Halo size exceeds the maximum allowed value.");
299 }
300
301 initializeHaloSize(haloSize);
302
303 _setHaloSize(haloSize);
304
305 if (isPartitioned()) {
306 updatePartitioningInfo(true);
307 }
308}
309
315std::size_t PatchKernel::getHaloSize() const
316{
317 return m_haloSize;
318}
319
328{
329 return 1;
330}
331
339void PatchKernel::_setHaloSize(std::size_t haloSize)
340{
341 BITPIT_UNUSED(haloSize);
342}
343
351{
353 return m_vertices.end();
354 }
355
356 // Swap the vertex with the last internal vertex
357 if (id != m_lastInternalVertexId) {
358 m_vertices.swap(id, m_lastInternalVertexId);
359 }
360
361 // Get the iterator pointing to the updated position of the vertex
362 VertexIterator iterator = m_vertices.find(id);
363
364 // Update the interior flag
365 iterator->setInterior(false);
366
367 // Update vertex counters
368 --m_nInternalVertices;
369 ++m_nGhostVertices;
370
371 // Update the last internal and first ghost markers
372 m_firstGhostVertexId = id;
373 if (m_nInternalVertices == 0) {
374 m_lastInternalVertexId = Vertex::NULL_ID;
375 } else {
376 m_lastInternalVertexId = m_vertices.getSizeMarker(m_nInternalVertices - 1, Vertex::NULL_ID);
377 }
378
379 // Set ghost owner
380 setGhostVertexInfo(id, owner);
381
382 // Return the iterator to the new position
383 return iterator;
384}
385
392{
394 return m_vertices.end();
395 }
396
397 // Swap the vertex with the first ghost
398 if (id != m_firstGhostVertexId) {
399 m_vertices.swap(id, m_firstGhostVertexId);
400 }
401
402 // Get the iterator pointing to the updated position of the vertex
403 VertexIterator iterator = m_vertices.find(id);
404
405 // Update the interior flag
406 iterator->setInterior(true);
407
408 // Update vertex counters
409 ++m_nInternalVertices;
410 --m_nGhostVertices;
411
412 // Update the last internal and first ghost markers
413 m_lastInternalVertexId = id;
414 if (m_nGhostVertices == 0) {
415 m_firstGhostVertexId = Vertex::NULL_ID;
416 } else {
417 VertexIterator firstGhostVertexIterator = iterator;
418 ++firstGhostVertexIterator;
419 m_firstGhostVertexId = firstGhostVertexIterator->getId();
420 }
421
422 // Unset ghost owner
423 unsetGhostVertexInfo(id);
424
425 // Return the iterator to the new position
426 return iterator;
427}
428
435{
436 return m_nGhostVertices;
437}
438
445{
446 return m_vertices[m_firstGhostVertexId];
447}
448
455{
456 return m_vertices[m_firstGhostVertexId];
457}
458
470PatchKernel::VertexIterator PatchKernel::restoreVertex(const std::array<double, 3> &coords, int owner, long id)
471{
473 return vertexEnd();
474 }
475
476 VertexIterator iterator = m_vertices.find(id);
477 if (iterator == m_vertices.end()) {
478 throw std::runtime_error("Unable to restore the specified vertex: the kernel doesn't contain an entry for that vertex.");
479 }
480
481 // There is not need to set the id of the vertex as assigned, because
482 // also the index generator will be restored.
483 if (owner == getRank()) {
484 _restoreInternalVertex(iterator, coords);
485 } else {
486 _restoreGhostVertex(iterator, coords, owner);
487 }
488
489 return iterator;
490}
491
502void PatchKernel::_restoreGhostVertex(const VertexIterator &iterator, const std::array<double, 3> &coords, int owner)
503{
504 // Restore the vertex
505 //
506 // There is no need to set the id of the vertex as assigned, because
507 // also the index generator will be restored.
508 Vertex &vertex = *iterator;
509 vertex.initialize(iterator.getId(), coords, false);
510 m_nGhostVertices++;
511
512 // Update the bounding box
514
515 // Set owner
516 setGhostVertexInfo(vertex.getId(), owner);
517}
518
524void PatchKernel::_deleteGhostVertex(long id)
525{
526 // Unset ghost owner
527 unsetGhostVertexInfo(id);
528
529 // Update the bounding box
530 const Vertex &vertex = m_vertices[id];
531 removePointFromBoundingBox(vertex.getCoords());
532
533 // Delete vertex
534 m_vertices.erase(id, true);
535 m_nGhostVertices--;
536 if (id == m_firstGhostVertexId) {
538 }
539
540 // Vertex id is no longer used
541 if (m_vertexIdGenerator) {
542 m_vertexIdGenerator->trash(id);
543 }
544}
545
552{
553 if (m_nGhostVertices > 0) {
554 return m_vertices.find(m_firstGhostVertexId);
555 } else {
556 return m_vertices.end();
557 }
558}
559
566{
567 return m_vertices.end();
568}
569
577{
578 if (m_nGhostVertices > 0) {
579 return m_vertices.find(m_firstGhostVertexId);
580 } else {
581 return m_vertices.cend();
582 }
583}
584
591{
592 return m_vertices.cend();
593}
594
599{
600 if (m_nGhostVertices == 0) {
601 m_firstGhostVertexId = Vertex::NULL_ID;
602 } else if (m_nInternalVertices == 0) {
603 VertexIterator firstGhostVertexItr = vertexBegin();
604 m_firstGhostVertexId = firstGhostVertexItr->getId();
605 } else {
606 m_firstGhostVertexId = m_vertices.getSizeMarker(m_nInternalVertices, Vertex::NULL_ID);
607 }
608}
609
618{
620 return m_cells.end();
621 }
622
623 // Swap the element with the last internal cell
624 if (id != m_lastInternalCellId) {
625 m_cells.swap(id, m_lastInternalCellId);
626 }
627
628 // Get the iterator pointing to the updated position of the element
629 CellIterator iterator = m_cells.find(id);
630
631 // Update the interior flag
632 iterator->setInterior(false);
633
634 // Update cell counters
635 --m_nInternalCells;
636 ++m_nGhostCells;
637
638 // Update the last internal and first ghost markers
639 m_firstGhostCellId = id;
640 if (m_nInternalCells == 0) {
641 m_lastInternalCellId = Cell::NULL_ID;
642 } else {
643 m_lastInternalCellId = m_cells.getSizeMarker(m_nInternalCells - 1, Cell::NULL_ID);
644 }
645
646 // Set ghost information
647 setGhostCellInfo(id, owner, haloLayer);
648
649 // Return the iterator to the new position
650 return iterator;
651}
652
659{
661 return m_cells.end();
662 }
663
664 // Swap the cell with the first ghost
665 if (id != m_firstGhostCellId) {
666 m_cells.swap(id, m_firstGhostCellId);
667 }
668
669 // Get the iterator pointing to the updated position of the element
670 CellIterator iterator = m_cells.find(id);
671
672 // Update the interior flag
673 iterator->setInterior(true);
674
675 // Update cell counters
676 ++m_nInternalCells;
677 --m_nGhostCells;
678
679 // Update the last internal and first ghost markers
680 m_lastInternalCellId = id;
681 if (m_nGhostCells == 0) {
682 m_firstGhostCellId = Cell::NULL_ID;
683 } else {
684 CellIterator firstGhostCellIterator = iterator;
685 ++firstGhostCellIterator;
686 m_firstGhostCellId = firstGhostCellIterator->getId();
687 }
688
689 // Unset ghost information
690 unsetGhostCellInfo(id);
691
692 // Return the iterator to the new position
693 return iterator;
694}
695
702{
703 return m_nGhostCells;
704}
705
712{
713 return getGhostCellCount();
714}
715
722{
723 return m_cells[m_firstGhostCellId];
724}
725
735
742{
743 return m_cells[m_firstGhostCellId];
744}
745
752{
753 return getFirstGhostCell();
754}
755
772PatchKernel::CellIterator PatchKernel::addCell(const Cell &source, int owner, long id)
773{
774 return addCell(source, owner, 0, id);
775}
776
793{
794 return addCell(source, owner, 0, id);
795}
796
814{
815 return addCell(type, owner, 0, id);
816}
817
835PatchKernel::CellIterator PatchKernel::addCell(ElementType type, const std::vector<long> &connectivity,
836 int owner, long id)
837{
838 return addCell(type, connectivity, owner, 0, id);
839}
840
859PatchKernel::CellIterator PatchKernel::addCell(ElementType type, std::unique_ptr<long[]> &&connectStorage,
860 int owner, long id)
861{
862 return addCell(type, std::move(connectStorage), owner, 0, id);
863}
864
883PatchKernel::CellIterator PatchKernel::addCell(const Cell &source, int owner, int haloLayer, long id)
884{
885 Cell cell = source;
886 cell.setId(id);
887
888 return addCell(std::move(cell), owner, haloLayer, id);
889}
890
908PatchKernel::CellIterator PatchKernel::addCell(Cell &&source, int owner, int haloLayer, long id)
909{
910 if (id < 0) {
911 id = source.getId();
912 }
913
914 int connectSize = source.getConnectSize();
915 std::unique_ptr<long[]> connectStorage = std::unique_ptr<long[]>(new long[connectSize]);
916 if (!source.hasInfo()){
917 std::copy(source.getConnect(), source.getConnect() + connectSize, connectStorage.get());
918 }
919
920 CellIterator iterator = addCell(source.getType(), std::move(connectStorage), owner, haloLayer, id);
921
922 Cell &cell = (*iterator);
923 id = cell.getId();
924 cell = std::move(source);
925 cell.setId(id);
926
927 return iterator;
928}
929
948PatchKernel::CellIterator PatchKernel::addCell(ElementType type, int owner, int haloLayer, long id)
949{
950 std::unique_ptr<long[]> connectStorage;
952 int connectSize = ReferenceElementInfo::getInfo(type).nVertices;
953 connectStorage = std::unique_ptr<long[]>(new long[connectSize]);
954 } else {
955 connectStorage = std::unique_ptr<long[]>(nullptr);
956 }
957
958 return addCell(type, std::move(connectStorage), owner, haloLayer, id);
959}
960
980PatchKernel::CellIterator PatchKernel::addCell(ElementType type, const std::vector<long> &connectivity,
981 int owner, int haloLayer, long id)
982{
983 int connectSize = connectivity.size();
984 std::unique_ptr<long[]> connectStorage = std::unique_ptr<long[]>(new long[connectSize]);
985 std::copy(connectivity.data(), connectivity.data() + connectSize, connectStorage.get());
986
987 return addCell(type, std::move(connectStorage), owner, haloLayer, id);
988}
989
1010PatchKernel::CellIterator PatchKernel::addCell(ElementType type, std::unique_ptr<long[]> &&connectStorage,
1011 int owner, int haloLayer, long id)
1012{
1014 return cellEnd();
1015 }
1016
1017 if (Cell::getDimension(type) > getDimension()) {
1018 return cellEnd();
1019 }
1020
1021 CellIterator iterator;
1022 if (owner == getRank()) {
1023 iterator = _addInternalCell(type, std::move(connectStorage), id);
1024 } else {
1025 iterator = _addGhostCell(type, std::move(connectStorage), owner, haloLayer, id);
1026 }
1027
1028 return iterator;
1029}
1030
1049PatchKernel::CellIterator PatchKernel::_addGhostCell(ElementType type, std::unique_ptr<long[]> &&connectStorage,
1050 int owner, int haloLayer, long id)
1051{
1052 // Get the id of the cell
1053 if (m_cellIdGenerator) {
1054 if (id < 0) {
1055 id = m_cellIdGenerator->generate();
1056 } else {
1057 m_cellIdGenerator->setAssigned(id);
1058 }
1059 } else if (id < 0) {
1060 throw std::runtime_error("No valid id has been provided for the cell.");
1061 }
1062
1063 // Create the cell
1064 //
1065 // If there are internal cells, the ghost cell should be inserted
1066 // after the last internal cell.
1067 bool storeInterfaces = (getInterfacesBuildStrategy() != INTERFACES_NONE);
1068 bool storeAdjacencies = storeInterfaces || (getAdjacenciesBuildStrategy() != ADJACENCIES_NONE);
1069
1070 CellIterator iterator;
1071 if (m_lastInternalCellId < 0) {
1072 iterator = m_cells.emreclaim(id, id, type, std::move(connectStorage), false, storeInterfaces, storeAdjacencies);
1073 } else {
1074 iterator = m_cells.emreclaimAfter(m_lastInternalCellId, id, id, type, std::move(connectStorage), false, storeInterfaces, storeAdjacencies);
1075 }
1076 m_nGhostCells++;
1077
1078 // Update the id of the first ghost cell
1079 if (m_firstGhostCellId < 0) {
1080 m_firstGhostCellId = id;
1081 } else if (m_cells.rawIndex(m_firstGhostCellId) > m_cells.rawIndex(id)) {
1082 m_firstGhostCellId = id;
1083 }
1084
1085 // Set ghost information
1086 setGhostCellInfo(id, owner, haloLayer);
1087
1088 // Set the alteration flags of the cell
1089 setAddedCellAlterationFlags(id);
1090
1091 return iterator;
1092}
1093
1109PatchKernel::CellIterator PatchKernel::restoreCell(ElementType type, std::unique_ptr<long[]> &&connectStorage,
1110 int owner, int haloLayer, long id)
1111{
1113 return cellEnd();
1114 }
1115
1116 if (Cell::getDimension(type) > getDimension()) {
1117 return cellEnd();
1118 }
1119
1120 CellIterator iterator = m_cells.find(id);
1121 if (iterator == m_cells.end()) {
1122 throw std::runtime_error("Unable to restore the specified cell: the kernel doesn't contain an entry for that cell.");
1123 }
1124
1125 // There is not need to set the id of the cell as assigned, because
1126 // also the index generator will be restored.
1127 if (owner == getRank()) {
1128 _restoreInternalCell(iterator, type, std::move(connectStorage));
1129 } else {
1130 _restoreGhostCell(iterator, type, std::move(connectStorage), owner, haloLayer);
1131 }
1132
1133 return iterator;
1134}
1135
1150void PatchKernel::_restoreGhostCell(const CellIterator &iterator, ElementType type,
1151 std::unique_ptr<long[]> &&connectStorage,
1152 int owner, int haloLayer)
1153{
1154 // Restore the cell
1155 //
1156 // There is no need to set the id of the cell as assigned, because
1157 // also the index generator will be restored.
1158 bool storeInterfaces = (getInterfacesBuildStrategy() != INTERFACES_NONE);
1159 bool storeAdjacencies = storeInterfaces || (getAdjacenciesBuildStrategy() != ADJACENCIES_NONE);
1160
1161 long cellId = iterator.getId();
1162 Cell &cell = *iterator;
1163 cell.initialize(iterator.getId(), type, std::move(connectStorage), false, storeInterfaces, storeAdjacencies);
1164 m_nGhostCells++;
1165
1166 // Set ghost information
1167 setGhostCellInfo(cellId, owner, haloLayer);
1168
1169 // Set the alteration flags of the cell
1170 setRestoredCellAlterationFlags(cellId);
1171}
1172
1178void PatchKernel::_deleteGhostCell(long id)
1179{
1180 // Unset ghost information
1181 unsetGhostCellInfo(id);
1182
1183 // Set the alteration flags of the cell
1184 setDeletedCellAlterationFlags(id);
1185
1186 // Delete cell
1187 m_cells.erase(id, true);
1188 m_nGhostCells--;
1189 if (id == m_firstGhostCellId) {
1191 }
1192
1193 // Cell id is no longer used
1194 if (m_cellIdGenerator) {
1195 m_cellIdGenerator->trash(id);
1196 }
1197}
1198
1205{
1206 if (m_nGhostCells > 0) {
1207 return m_cells.find(m_firstGhostCellId);
1208 } else {
1209 return m_cells.end();
1210 }
1211}
1212
1222
1229{
1230 return m_cells.end();
1231}
1232
1242
1249{
1250 if (m_nGhostCells > 0) {
1251 return m_cells.find(m_firstGhostCellId);
1252 } else {
1253 return m_cells.cend();
1254 }
1255}
1256
1266
1273{
1274 return m_cells.cend();
1275}
1276
1286
1291{
1292 if (m_nGhostCells == 0) {
1293 m_firstGhostCellId = Cell::NULL_ID;
1294 } else if (m_nInternalCells == 0) {
1295 CellIterator firstghostCellItr = cellBegin();
1296 m_firstGhostCellId = firstghostCellItr->getId();
1297 } else {
1298 m_firstGhostCellId = m_cells.getSizeMarker(m_nInternalCells, Cell::NULL_ID);
1299 }
1300}
1301
1317std::vector<adaption::Info> PatchKernel::partition(MPI_Comm communicator, const std::unordered_map<long, int> &cellRanks, bool trackPartitioning, bool squeezeStorage, std::size_t haloSize)
1318{
1319 setCommunicator(communicator);
1320
1321 setHaloSize(haloSize);
1322
1323 return partition(cellRanks, trackPartitioning, squeezeStorage);
1324}
1325
1338std::vector<adaption::Info> PatchKernel::partition(const std::unordered_map<long, int> &cellRanks, bool trackPartitioning, bool squeezeStorage)
1339{
1340 partitioningPrepare(cellRanks, false);
1341
1342 std::vector<adaption::Info> partitioningData = partitioningAlter(trackPartitioning, squeezeStorage);
1343
1345
1346 return partitioningData;
1347}
1348
1363std::vector<adaption::Info> PatchKernel::partition(MPI_Comm communicator, bool trackPartitioning, bool squeezeStorage, std::size_t haloSize)
1364{
1365 setCommunicator(communicator);
1366
1367 setHaloSize(haloSize);
1368
1369 std::unordered_map<long, double> dummyCellWeights;
1370
1371 return partition(dummyCellWeights, trackPartitioning, squeezeStorage);
1372}
1373
1385std::vector<adaption::Info> PatchKernel::partition(bool trackPartitioning, bool squeezeStorage)
1386{
1387 std::unordered_map<long, double> dummyCellWeights;
1388
1389 partitioningPrepare(dummyCellWeights, false);
1390
1391 std::vector<adaption::Info> partitioningData = partitioningAlter(trackPartitioning, squeezeStorage);
1392
1394
1395 return partitioningData;
1396}
1397
1415std::vector<adaption::Info> PatchKernel::partition(MPI_Comm communicator, const std::unordered_map<long, double> &cellWeights, bool trackPartitioning, bool squeezeStorage, std::size_t haloSize)
1416{
1417 setCommunicator(communicator);
1418
1419 setHaloSize(haloSize);
1420
1421 return partition(cellWeights, trackPartitioning, squeezeStorage);
1422}
1423
1438std::vector<adaption::Info> PatchKernel::partition(const std::unordered_map<long, double> &cellWeights, bool trackPartitioning, bool squeezeStorage)
1439{
1440 partitioningPrepare(cellWeights, false);
1441
1442 std::vector<adaption::Info> partitioningData = partitioningAlter(trackPartitioning, squeezeStorage);
1443
1445
1446 return partitioningData;
1447}
1448
1462std::vector<adaption::Info> PatchKernel::partitioningPrepare(MPI_Comm communicator, const std::unordered_map<long, int> &cellRanks, bool trackPartitioning, std::size_t haloSize)
1463{
1464 setCommunicator(communicator);
1465
1466 setHaloSize(haloSize);
1467
1468 return partitioningPrepare(cellRanks, trackPartitioning);
1469}
1470
1481std::vector<adaption::Info> PatchKernel::partitioningPrepare(const std::unordered_map<long, int> &cellRanks, bool trackPartitioning)
1482{
1483 // Patch needs to support partitioning
1484 if (!isPartitioningSupported()) {
1485 throw std::runtime_error ("The patch does not support partitioning!");
1486 }
1487
1488 // Communicator has to be set
1489 if (!isCommunicatorSet()) {
1490 throw std::runtime_error ("There is no communicator set for the patch.");
1491 }
1492
1493 // Check partitioning status
1494 PartitioningStatus partitioningStatus = getPartitioningStatus(true);
1495 if (partitioningStatus != PARTITIONING_CLEAN) {
1496 throw std::runtime_error ("A partitioning is already in progress.");
1497 }
1498
1499 std::vector<adaption::Info> partitioningData = _partitioningPrepare(cellRanks, trackPartitioning);
1500
1501 // Update the status
1502 setPartitioningStatus(PARTITIONING_PREPARED);
1503
1504 return partitioningData;
1505}
1506
1519std::vector<adaption::Info> PatchKernel::partitioningPrepare(MPI_Comm communicator, bool trackPartitioning, std::size_t haloSize)
1520{
1521 setCommunicator(communicator);
1522
1523 setHaloSize(haloSize);
1524
1525 std::unordered_map<long, double> dummyCellWeights;
1526
1527 return partitioningPrepare(dummyCellWeights, trackPartitioning);
1528}
1529
1539std::vector<adaption::Info> PatchKernel::partitioningPrepare(bool trackPartitioning)
1540{
1541 std::unordered_map<long, double> dummyCellWeights;
1542
1543 return partitioningPrepare(dummyCellWeights, trackPartitioning);
1544}
1545
1561std::vector<adaption::Info> PatchKernel::partitioningPrepare(MPI_Comm communicator, const std::unordered_map<long, double> &cellWeights, bool trackPartitioning, std::size_t haloSize)
1562{
1563 setCommunicator(communicator);
1564
1565 setHaloSize(haloSize);
1566
1567 return partitioningPrepare(cellWeights, trackPartitioning);
1568}
1569
1582std::vector<adaption::Info> PatchKernel::partitioningPrepare(const std::unordered_map<long, double> &cellWeights, bool trackPartitioning)
1583{
1584 // Patch needs to support partitioning
1585 if (!isPartitioningSupported()) {
1586 throw std::runtime_error ("The patch does not support partitioning!");
1587 }
1588
1589 // Check partitioning status
1590 PartitioningStatus partitioningStatus = getPartitioningStatus(true);
1591 if (partitioningStatus != PARTITIONING_CLEAN) {
1592 throw std::runtime_error ("A partitioning is already in progress.");
1593 }
1594
1595 // Execute the partitioning preparation
1596 std::vector<adaption::Info> partitioningData = _partitioningPrepare(cellWeights, DEFAULT_PARTITIONING_WEIGTH, trackPartitioning);
1597
1598 // Update the status
1599 setPartitioningStatus(PARTITIONING_PREPARED);
1600
1601 return partitioningData;
1602}
1603
1619std::vector<adaption::Info> PatchKernel::partitioningAlter(bool trackPartitioning, bool squeezeStorage)
1620{
1621 std::vector<adaption::Info> partitioningData;
1622 if (!isPartitioningSupported()) {
1623 return partitioningData;
1624 }
1625
1626 // Check partitioning status
1627 PartitioningStatus partitioningStatus = getPartitioningStatus();
1628 if (partitioningStatus == PARTITIONING_CLEAN) {
1629 return partitioningData;
1630 } else if (partitioningStatus != PARTITIONING_PREPARED) {
1631 throw std::runtime_error ("The prepare function has no been called.");
1632 }
1633
1634 // Partition the patch
1635 partitioningData = _partitioningAlter(trackPartitioning);
1636
1637 // Finalize patch alterations
1638 finalizeAlterations(squeezeStorage);
1639
1640 // Update the status
1641 setPartitioningStatus(PARTITIONING_ALTERED);
1642
1643 return partitioningData;
1644}
1645
1653{
1654 if (!isPartitioningSupported()) {
1655 return;
1656 }
1657
1658 PartitioningStatus partitioningStatus = getPartitioningStatus();
1659 if (partitioningStatus == PARTITIONING_CLEAN) {
1660 return;
1661 } else if (partitioningStatus == PARTITIONING_PREPARED) {
1662 throw std::runtime_error ("It is not yet possible to abort a partitioning.");
1663 } else if (partitioningStatus != PARTITIONING_ALTERED) {
1664 throw std::runtime_error ("The alter function has no been called.");
1665 }
1666
1667 // Clean-up the partitioning
1669
1670 if (m_partitioningOutgoings.size() > 0) {
1671 std::unordered_map<long, int>().swap(m_partitioningOutgoings);
1672 }
1673
1674 // Update the status
1675 setPartitioningStatus(PARTITIONING_CLEAN);
1676}
1677
1687{
1688 return (getProcessorCount() > 1);
1689}
1690
1697{
1698 return (getPartitioningMode() != PARTITIONING_DISABLED);
1699}
1700
1713{
1714 return m_partitioningMode;
1715}
1716
1726{
1727 m_partitioningMode = mode;
1728}
1729
1737{
1738 int partitioningStatus = static_cast<int>(m_partitioningStatus);
1739
1740 if (global && isPartitioned()) {
1741 const auto &communicator = getCommunicator();
1742 MPI_Allreduce(MPI_IN_PLACE, &partitioningStatus, 1, MPI_INT, MPI_MAX, communicator);
1743 }
1744
1745 return static_cast<PartitioningStatus>(partitioningStatus);
1746}
1747
1754{
1755 m_partitioningStatus = status;
1756}
1757
1758
1775{
1776 std::unordered_map<long, double> dummyCellWeights;
1777
1778 return evalPartitioningUnbalance(dummyCellWeights);
1779}
1780
1789double PatchKernel::evalPartitioningUnbalance(const std::unordered_map<long, double> &cellWeights) const
1790{
1791 if (!isPartitioned()) {
1792 return 0.;
1793 }
1794
1795 // Evaluate partition weight
1796 double partitionWeight;
1797 if (!cellWeights.empty()) {
1800
1801 partitionWeight = 0.;
1802 for (CellConstIterator cellItr = beginItr; cellItr != endItr; ++cellItr) {
1803 long cellId = cellItr.getId();
1804
1805 double cellWeight;
1806 auto weightItr = cellWeights.find(cellId);
1807 if (weightItr != cellWeights.end()) {
1808 cellWeight = weightItr->second;
1809 } else {
1810 cellWeight = DEFAULT_PARTITIONING_WEIGTH;
1811 }
1812
1813 partitionWeight += cellWeight;
1814 }
1815 } else {
1817 }
1818
1819 // Evalaute global weights
1820 double totalPartitionWeight;
1821 MPI_Allreduce(&partitionWeight, &totalPartitionWeight, 1, MPI_DOUBLE, MPI_SUM, getCommunicator());
1822
1823 double maximumPartitionWeight;
1824 MPI_Allreduce(&partitionWeight, &maximumPartitionWeight, 1, MPI_DOUBLE, MPI_MAX, getCommunicator());
1825
1826 double meanPartitionWeight = totalPartitionWeight / getProcessorCount();
1827
1828 // Evaluate the unbalance
1829 double unbalance = (maximumPartitionWeight / meanPartitionWeight - 1.);
1830
1831 return unbalance;
1832}
1833
1837#if BITPIT_ENABLE_METIS==1
1845#else
1850#endif
1863std::vector<adaption::Info> PatchKernel::_partitioningPrepare(const std::unordered_map<long, double> &cellWeights, double defaultWeight, bool trackPartitioning)
1864{
1865 // Check if the patch needs a non-trivial partitioning
1866 //
1867 // A non-trivial partitioning is needed only if the patch is not empty
1868 // and if there are multiple processes associated with the patch.
1869 bool isCellRankEvaluationNeeded = (!empty(true) && (getProcessorCount() > 1));
1870
1871 // Default partitioning can only be used when the patch is all on a single process
1872 if (isCellRankEvaluationNeeded) {
1873 if (isDistributed()) {
1874 throw std::runtime_error("Default partitioning can only be used when the patch is all on a single process");
1875 }
1876 }
1877
1878 // Evaluate partitioning
1879 //
1880 // Only the owner of the patch need to evaluate the partitioning.
1881#if BITPIT_ENABLE_METIS==1
1882 std::unordered_map<long, int> cellRanks;
1883 if (isCellRankEvaluationNeeded && getRank() == getOwner()) {
1884 // Initialize map between patch vertices and METIS nodes
1885 //
1886 // METIS node numbering needs to be contiguous and needs to start from zero.
1887 std::unordered_map<long, idx_t> vertexToNodeMap;
1888
1889 idx_t nodeId = 0;
1890 for (const Vertex &vertex : m_vertices) {
1891 vertexToNodeMap[vertex.getId()] = nodeId;
1892 ++nodeId;
1893 }
1894
1895 // Create METIS mesh
1896 idx_t nElements = getInternalCellCount();
1897 idx_t nNodes = getInternalVertexCount();
1898
1899 idx_t connectSize = 0;
1900 for (const Cell &cell : m_cells){
1901 connectSize += static_cast<idx_t>(cell.getVertexCount());
1902 }
1903
1904 std::vector<idx_t> connectStorage(connectSize);
1905 std::vector<idx_t> connectRanges(nElements + 1);
1906 std::vector<idx_t> elementWeights(nElements, static_cast<idx_t>(std::max(defaultWeight, 1.)));
1907
1908 idx_t i = 0;
1909 idx_t j = 0;
1910 connectRanges[0] = 0;
1911 for (const Cell &cell : m_cells) {
1912 // Element connectivity
1913 for (long vertex : cell.getVertexIds()) {
1914 connectStorage[j] = vertexToNodeMap[vertex];
1915 ++j;
1916 }
1917 connectRanges[i + 1] = j;
1918
1919 // Element weight
1920 auto weightItr = cellWeights.find(cell.getId());
1921 if (weightItr != cellWeights.end()) {
1922 elementWeights[i] = static_cast<idx_t>(std::max(std::round(weightItr->second), 1.));
1923 }
1924
1925 // Increase counters
1926 i++;
1927 }
1928
1929 // Get the number of common nodes two elements should share to be considered neighbours
1930 idx_t nCommonNodes = getDimension();
1931
1932 // Get the number of partitions the mesh should be split into
1933 idx_t nPartitions = getProcessorCount();
1934
1935 // Evaluate partitioning
1936 idx_t objectiveValue;
1937
1938 std::vector<idx_t> elementRanks(nElements);
1939 std::vector<idx_t> nodeRanks(nNodes);
1940
1941 int status = METIS_PartMeshDual(&nElements, &nNodes, connectRanges.data(), connectStorage.data(),
1942 elementWeights.data(), nullptr, &nCommonNodes, &nPartitions, nullptr,
1943 nullptr, &objectiveValue, elementRanks.data(), nodeRanks.data());
1944
1945 if (status != METIS_OK) {
1946 throw std::runtime_error("Error during partitioning (METIS error " + std::to_string(status) + ")");
1947 }
1948
1949 // Fill the cell ranks
1950 int metisId = 0;
1951 for (const Cell &cell : m_cells) {
1952 int metisRank = static_cast<int>(elementRanks[metisId]);
1953 if (metisRank != getRank()) {
1954 cellRanks[cell.getId()] = metisRank;
1955 }
1956 ++metisId;
1957 }
1958 }
1959
1960 return _partitioningPrepare(cellRanks, trackPartitioning);
1961#else
1962 BITPIT_UNUSED(cellWeights);
1963 BITPIT_UNUSED(defaultWeight);
1964 BITPIT_UNUSED(trackPartitioning);
1965
1966 throw std::runtime_error("METIS library is required for automatic patch partitioning.");
1967#endif
1968}
1969
1980std::vector<adaption::Info> PatchKernel::_partitioningPrepare(const std::unordered_map<long, int> &cellRanks, bool trackPartitioning)
1981{
1982 // Fill partitioning ranks
1983 int patchRank = getRank();
1984
1985 std::set<int> recvRanks;
1986 m_partitioningOutgoings.clear();
1987 for (auto &entry : cellRanks) {
1988 int recvRank = entry.second;
1989 if (recvRank == patchRank) {
1990 continue;
1991 }
1992
1993 long cellId = entry.first;
1994 if (m_ghostCellInfo.count(cellId) > 0) {
1995 continue;
1996 }
1997
1998 m_partitioningOutgoings.insert(entry);
1999 recvRanks.insert(recvRank);
2000 }
2001
2002 // Identify exchange entries
2003 int nRanks = getProcessorCount();
2004
2005 int nExchanges = recvRanks.size();
2006 std::vector<std::pair<int, int>> exchanges;
2007 exchanges.reserve(nExchanges);
2008 for (int recvRank : recvRanks) {
2009 exchanges.emplace_back(patchRank, recvRank);
2010 }
2011
2012 int exchangesGatherCount = 2 * nExchanges;
2013 std::vector<int> exchangeGatherCount(nRanks);
2014 MPI_Allgather(&exchangesGatherCount, 1, MPI_INT, exchangeGatherCount.data(), 1, MPI_INT, m_communicator);
2015
2016 std::vector<int> exchangesGatherDispls(nRanks, 0);
2017 for (int i = 1; i < nRanks; ++i) {
2018 exchangesGatherDispls[i] = exchangesGatherDispls[i - 1] + exchangeGatherCount[i - 1];
2019 }
2020
2021 int nGlobalExchanges = nExchanges;
2022 MPI_Allreduce(MPI_IN_PLACE, &nGlobalExchanges, 1, MPI_INT, MPI_SUM, m_communicator);
2023
2024 m_partitioningGlobalExchanges.resize(nGlobalExchanges);
2025 MPI_Allgatherv(exchanges.data(), exchangesGatherCount, MPI_INT, m_partitioningGlobalExchanges.data(),
2026 exchangeGatherCount.data(), exchangesGatherDispls.data(), MPI_INT, m_communicator);
2027
2028 std::sort(m_partitioningGlobalExchanges.begin(), m_partitioningGlobalExchanges.end(), greater<std::pair<int,int>>());
2029
2030 // Get global list of ranks that will send data
2031 std::unordered_set<int> globalSendRanks;
2032 for (const std::pair<int, int> &entry : m_partitioningGlobalExchanges) {
2033 globalSendRanks.insert(entry.first);
2034 }
2035
2036 // Get global list of ranks that will receive data
2037 std::unordered_set<int> globalRecvRanks;
2038 for (const std::pair<int, int> &entry : m_partitioningGlobalExchanges) {
2039 globalRecvRanks.insert(entry.second);
2040 }
2041
2042 // Identify if this is a serialization or a normal partitioning
2043 //
2044 // We are serializing the patch if all the processes are sending all
2045 // their cells to the same rank.
2046 m_partitioningSerialization = (globalRecvRanks.size() == 1);
2047 if (m_partitioningSerialization) {
2048 int receiverRank = *(globalRecvRanks.begin());
2049 if (patchRank != receiverRank) {
2050 if (m_partitioningOutgoings.size() != (std::size_t) getInternalCellCount()) {
2051 m_partitioningSerialization = false;
2052 }
2053 }
2054
2055 MPI_Allreduce(MPI_IN_PLACE, &m_partitioningSerialization, 1, MPI_C_BOOL, MPI_LAND, m_communicator);
2056 }
2057
2058 // Build the information on the cells that will be sent
2059 //
2060 // Only internal cells are tracked.
2061 std::vector<adaption::Info> partitioningData;
2062 if (trackPartitioning) {
2063 for (const std::pair<int, int> &entry : m_partitioningGlobalExchanges) {
2064 int sendRank = entry.first;
2065 if (sendRank != patchRank) {
2066 continue;
2067 }
2068
2069 int recvRank = entry.second;
2070
2071 std::vector<long> previous;
2072 for (const auto &entry : m_partitioningOutgoings) {
2073 int cellRank = entry.second;
2074 if (cellRank != recvRank) {
2075 continue;
2076 }
2077
2078 long cellId = entry.first;
2079 previous.push_back(cellId);
2080 }
2081
2082 if (!previous.empty()) {
2083 partitioningData.emplace_back();
2084 adaption::Info &partitioningInfo = partitioningData.back();
2085 partitioningInfo.entity = adaption::ENTITY_CELL;
2086 partitioningInfo.type = adaption::TYPE_PARTITION_SEND;
2087 partitioningInfo.rank = recvRank;
2088 partitioningInfo.previous = std::move(previous);
2089 }
2090 }
2091 }
2092
2093 return partitioningData;
2094}
2095
2105std::vector<adaption::Info> PatchKernel::_partitioningAlter(bool trackPartitioning)
2106{
2107 // Adjacencies need to be up-to-date
2109
2110 // Ghost final ghost owners
2111 //
2112 // If we are serializing the patch, we need to delete all the ghosts cells.
2113 //
2114 // If we are not serializing the patch, some of the cells that will be send
2115 // during partitioning may be ghosts on other processes. Therefore, we
2116 // need to identify the owners of the ghosts after the partitioning.
2117 std::unordered_map<long, int> ghostCellOwnershipChanges;
2118 if (!m_partitioningSerialization) {
2119 if (isPartitioned()) {
2120 ghostCellOwnershipChanges = _partitioningAlter_evalGhostCellOwnershipChanges();
2121 }
2122 } else {
2123 _partitioningAlter_deleteGhosts();
2124 }
2125
2126 // Communicate patch data structures
2127 int patchRank = getRank();
2128
2129 m_partitioningCellsTag = communications::tags().generate(m_communicator);
2130 m_partitioningVerticesTag = communications::tags().generate(m_communicator);
2131
2132 std::unordered_set<int> batchSendRanks;
2133 std::unordered_set<int> batchRecvRanks;
2134
2135 std::vector<adaption::Info> partitioningData;
2136 std::vector<adaption::Info> rankPartitioningData;
2137 while (!m_partitioningGlobalExchanges.empty()) {
2138 // Add exchanges to the batch
2139 //
2140 // Inside a batch we can mix send and receives, but a process can be
2141 // either a sender or a receiver, it cannot be both.
2142 batchSendRanks.clear();
2143 batchRecvRanks.clear();
2144 while (!m_partitioningGlobalExchanges.empty()) {
2145 const std::pair<int, int> &entry = m_partitioningGlobalExchanges.back();
2146
2147 int entrySendRank = entry.first;
2148 if (batchRecvRanks.count(entrySendRank)) {
2149 break;
2150 }
2151
2152 int entryRecvRank = entry.second;
2153 if (batchSendRanks.count(entryRecvRank)) {
2154 break;
2155 }
2156
2157 batchSendRanks.insert(entrySendRank);
2158 batchRecvRanks.insert(entryRecvRank);
2159 m_partitioningGlobalExchanges.pop_back();
2160 }
2161
2162 // Detect the role of the current process
2163 bool isSender = (batchSendRanks.count(patchRank) > 0);
2164 bool isReceiver = (batchRecvRanks.count(patchRank) > 0);
2165
2166 // Perform sends/receives
2167 if (isSender) {
2168 rankPartitioningData = _partitioningAlter_sendCells(batchRecvRanks, trackPartitioning, &ghostCellOwnershipChanges);
2169 } else if (isReceiver) {
2170 rankPartitioningData = _partitioningAlter_receiveCells(batchSendRanks, trackPartitioning, &ghostCellOwnershipChanges);
2171 }
2172
2173 if (trackPartitioning) {
2174 for (adaption::Info &rankPartitioningInfo : rankPartitioningData) {
2175 partitioningData.emplace_back(std::move(rankPartitioningInfo));
2176 }
2177 }
2178
2179 // Update ghost ownership
2180 for (int sendRank : batchSendRanks) {
2181 _partitioningAlter_applyGhostCellOwnershipChanges(sendRank, &ghostCellOwnershipChanges);
2182 }
2183 }
2184
2185 assert(ghostCellOwnershipChanges.size() == 0);
2186
2187 communications::tags().trash(m_partitioningCellsTag, m_communicator);
2188 communications::tags().trash(m_partitioningVerticesTag, m_communicator);
2189
2190 return partitioningData;
2191}
2192
2204std::unordered_map<long, int> PatchKernel::_partitioningAlter_evalGhostCellOwnershipChanges()
2205{
2206 int patchRank = getRank();
2207
2208 // Initialize communications
2209 //
2210 // The communications will exchange the ranks on ghosts cells.
2211 DataCommunicator notificationCommunicator(getCommunicator());
2212 size_t notificationDataSize = sizeof(patchRank);
2213
2214 // Set and start the receives
2215 for (const auto &entry : getGhostCellExchangeTargets()) {
2216 const int rank = entry.first;
2217 const auto &targetCells = entry.second;
2218
2219 notificationCommunicator.setRecv(rank, targetCells.size() * notificationDataSize);
2220 notificationCommunicator.startRecv(rank);
2221 }
2222
2223 // Set and start the sends
2224 //
2225 // Data buffer is filled with the ranks that will own source cells after
2226 // the partitioning.
2227 for (const auto &entry : getGhostCellExchangeSources()) {
2228 const int rank = entry.first;
2229 auto &sourceCells = entry.second;
2230
2231 notificationCommunicator.setSend(rank, sourceCells.size() * notificationDataSize);
2232 SendBuffer &buffer = notificationCommunicator.getSendBuffer(rank);
2233 for (long id : sourceCells) {
2234 int finalOwner;
2235 if (m_partitioningOutgoings.count(id) > 0) {
2236 finalOwner = m_partitioningOutgoings.at(id);
2237 } else {
2238 finalOwner = patchRank;
2239 }
2240
2241 buffer << finalOwner;
2242 }
2243 notificationCommunicator.startSend(rank);
2244 }
2245
2246 // Receive the final owners of of the ghosts
2247 //
2248 // Data buffer contains the ranks that will own ghost cells after the
2249 // partitioning.
2250 std::unordered_map<long, int> ghostCellOwnershipChanges;
2251
2252 int nCompletedRecvs = 0;
2253 while (nCompletedRecvs < notificationCommunicator.getRecvCount()) {
2254 int rank = notificationCommunicator.waitAnyRecv();
2255 const auto &list = getGhostCellExchangeTargets(rank);
2256
2257 RecvBuffer &buffer = notificationCommunicator.getRecvBuffer(rank);
2258 for (long id : list) {
2259 int finalOwner;
2260 buffer >> finalOwner;
2261
2262 if (finalOwner != m_ghostCellInfo.at(id).owner) {
2263 ghostCellOwnershipChanges[id] = finalOwner;
2264 }
2265 }
2266
2267 ++nCompletedRecvs;
2268 }
2269
2270 // Wait for the sends to finish
2271 notificationCommunicator.waitAllSends();
2272
2273 return ghostCellOwnershipChanges;
2274}
2275
2287void PatchKernel::_partitioningAlter_applyGhostCellOwnershipChanges(int sendRank,
2288 std::unordered_map<long, int> *ghostCellOwnershipChanges)
2289{
2290 for (auto itr = ghostCellOwnershipChanges->begin(); itr != ghostCellOwnershipChanges->end();) {
2291 // Consider only ghosts previously owned by the sender
2292 long ghostCellId = itr->first;
2293 const GhostCellInfo &ghostCellInfo = m_ghostCellInfo.at(ghostCellId);
2294 int previousGhostCellOwner = ghostCellInfo.owner;
2295 if (previousGhostCellOwner != sendRank) {
2296 ++itr;
2297 continue;
2298 }
2299
2300 // Update ghost owner
2301 int finalGhostCellOwner = itr->second;
2302 setGhostCellInfo(ghostCellId, finalGhostCellOwner, ghostCellInfo.haloLayer);
2303 itr = ghostCellOwnershipChanges->erase(itr);
2304 }
2305}
2306
2310void PatchKernel::_partitioningAlter_deleteGhosts()
2311{
2312 // Delete ghost cells
2313 std::vector<long> cellsDeleteList;
2314 cellsDeleteList.reserve(m_ghostCellInfo.size());
2315 for (const auto &entry : m_ghostCellInfo) {
2316 long cellId = entry.first;
2317 cellsDeleteList.emplace_back(cellId);
2318 }
2319
2320 deleteCells(cellsDeleteList);
2321
2322 // Prune stale adjacencies
2324
2325 // Prune stale interfaces
2326 //
2327 // Interfaces will be re-created after the partitioning.
2329
2330 // Delete vertices no longer used
2332
2333 // Convert all ghost vertices to internal vertices
2334 while (getGhostVertexCount() > 0) {
2335 ghostVertex2InternalVertex(m_firstGhostVertexId);
2336 }
2337}
2338
2354std::vector<adaption::Info> PatchKernel::_partitioningAlter_sendCells(const std::unordered_set<int> &recvRanks, bool trackPartitioning,
2355 std::unordered_map<long, int> *ghostCellOwnershipChanges)
2356{
2357 // Initialize adaption data
2358 std::vector<adaption::Info> partitioningData;
2359 if (trackPartitioning) {
2360 partitioningData.reserve(recvRanks.size());
2361 }
2362
2363 // Detect if the rank is sending all its cells
2364 std::size_t nOutgoingsOverall = m_partitioningOutgoings.size();
2365 bool sendingAllCells = (nOutgoingsOverall == (std::size_t) getInternalCellCount());
2366
2367 //
2368 // Send data to the receivers
2369 //
2370 const double ACTIVATE_MEMORY_LIMIT_THRESHOLD = 0.15;
2371
2372 int patchDimension = getDimension();
2373
2374 MPI_Request verticesSizeRequest = MPI_REQUEST_NULL;
2375 MPI_Request cellsSizeRequest = MPI_REQUEST_NULL;
2376
2377 MPI_Request verticesRequest = MPI_REQUEST_NULL;
2378 MPI_Request cellsRequest = MPI_REQUEST_NULL;
2379
2380 OBinaryStream verticesBuffer;
2381 OBinaryStream cellsBuffer;
2382
2383 std::vector<long> neighsList;
2384 std::unordered_set<long> frontierNeighs;
2385 std::unordered_set<long> frontierVertices;
2386 std::unordered_set<long> frontierFaceVertices;
2387 std::unordered_set<ConstCellHalfEdge, ConstCellHalfEdge::Hasher> frontierEdges;
2388
2389 std::unordered_set<long> outgoingCells;
2390 std::unordered_set<long> frameCells;
2391 std::unordered_map<long, std::size_t> haloCells;
2392 std::vector<long> firstHaloLayerCells;
2393
2394 std::unordered_set<long> frameVertices;
2395 std::unordered_set<long> haloVertices;
2396
2397 std::vector<long> cellSendList;
2398 std::unordered_set<long> vertexSendList;
2399
2400 std::vector<long> frameCellsOverall;
2401 std::unordered_set<long> ghostHaloCellsOverall;
2402 std::unordered_set<long> innerFrontierCellsOverall;
2403
2404 for (int recvRank : recvRanks) {
2405 //
2406 // Fill the list of cells explicitly marked for sending
2407 //
2408 outgoingCells.clear();
2409 for (const auto &rankEntry : m_partitioningOutgoings) {
2410 int rank = rankEntry.second;
2411 if (rank != recvRank) {
2412 continue;
2413 }
2414
2415 long cellId = rankEntry.first;
2416 outgoingCells.insert(cellId);
2417 }
2418
2419 //
2420 // Identify frame and halo
2421 //
2422 // Along with the cells explicitly marked for sending, we need to send
2423 // also a halo of surrounding cells. Those cells will be used by the
2424 // receiver to connect the cells it receives to the existing cells and
2425 // to generate the ghost cells.
2426 //
2427 // The faces that tell apart cells explicitly marked for sending from
2428 // halo cells are called "frontier faces". We can identify frontier
2429 // faces looping through the outgoing cells an looking for faces with
2430 // a neighbour not explicitly marked for being sent to the receiver
2431 // rank.
2432 //
2433 // Frame is made up by cells explicitly marked for being sent to the
2434 // receiver rank that have a face, an edge or a vertex on a frontier
2435 // face.
2436 //
2437 // The first layer of halo cells is made up by cells not explicitly
2438 // marked for being sent to the receiver that have a face, an edge
2439 // or a vertex on a frontier face. Cells on subsequent halo layers
2440 // are identifies as cells not explicitly marked for being sent
2441 // that are neighbours of the the previous halo layer. At this
2442 // stage we are only building the first layer of halo cells.
2443 //
2444 // We also identify outgoing cells that have a face/edge/vertex on
2445 // the inner frontier. A face/edge/vertex is on the inner frontier
2446 // if its on the frontier and one of the cells that contain that
2447 // item is not an outgoing cell (i.e., is not explicitly marked for
2448 // being sent to any rank). Cells on inner frontier will belong to
2449 // the first layer of ghost cells for the current rank.
2450 //
2451 // There are no halo nor frame cells if we are serializing a patch.
2452 //
2453 // NOTE: for three-dimensional unstructured non-conformal meshes we
2454 // need to explicitly search cells that have an edge on the frontier,
2455 // searching for cells that have a vertex on the frontier is not
2456 // enough (for unstructured meshes we may have a cell that touches the
2457 // frontier only through an edge).
2458 if (!m_partitioningSerialization) {
2459 haloCells.clear();
2460 frameCells.clear();
2461
2462 frontierVertices.clear();
2463 frontierEdges.clear();
2464 for (long cellId : outgoingCells) {
2465 Cell &cell = m_cells.at(cellId);
2466
2467 const int nFaces = cell.getFaceCount();
2468 for (int face = 0; face < nFaces; ++face) {
2469 int nFaceAdjacencies = cell.getAdjacencyCount(face);
2470 if (nFaceAdjacencies == 0) {
2471 continue;
2472 }
2473
2474 const long *faceAdjacencies = cell.getAdjacencies(face);
2475 for (int i = 0; i < nFaceAdjacencies; ++i) {
2476 // Check if the we are on the frontier
2477 long neighId = faceAdjacencies[i];
2478 if (outgoingCells.count(neighId) > 0) {
2479 continue;
2480 }
2481
2482 // Select the cell with only one adjacency
2483 long frontierCellId;
2484 long frontierFace;
2485 const Cell *frontierCell;
2486 if (nFaceAdjacencies == 1) {
2487 frontierCellId = cellId;
2488 frontierFace = face;
2489 frontierCell = &cell;
2490 } else {
2491 const Cell &neigh = getCell(neighId);
2492
2493 frontierCellId = neighId;
2494 frontierFace = findAdjoinNeighFace(cell, face, neigh);
2495 frontierCell = &neigh;
2496
2497 assert(frontierFace >= 0);
2498 }
2499
2500 // Clear neighbour list
2501 frontierNeighs.clear();
2502
2503 //
2504 // Add frontier face neighbours
2505 //
2506
2507 // Check if the face is on the inner frontier
2508 //
2509 // To be on the inner frontier, the face needs to be
2510 // on a cell not explicitly marked for being sent to
2511 // any rank. We are iterating on the cells explicitly
2512 // marked for being sent to the receiver rank, hence
2513 // the current cell is definitely an outgoing cell.
2514 // Therefore, the face will be on the inner frontier
2515 // if the neighbour is not an outgoing cell.
2516 bool innerFrontierFace = (m_partitioningOutgoings.count(neighId) == 0);
2517
2518 // Add the neighbours to the list
2519 //
2520 // If the face is on the inner frontier, the outgoing
2521 // cell is the current cell (neighbour cannot be an
2522 // outgoing cell, otherwise the face will not be on
2523 // the inner frontier).
2524 frontierNeighs.insert(cellId);
2525 frontierNeighs.insert(neighId);
2526
2527 if (innerFrontierFace) {
2528 innerFrontierCellsOverall.insert(cellId);
2529 }
2530
2531 //
2532 // Add frontier vertex neighbours
2533 //
2534 ConstProxyVector<int> frontierFaceLocalVerticesIds = frontierCell->getFaceLocalVertexIds(frontierFace);
2535 int nFrontierFaceVertices = frontierFaceLocalVerticesIds.size();
2536
2537 frontierFaceVertices.clear();
2538 for (int k = 0; k < nFrontierFaceVertices; ++k) {
2539 long vertexId = frontierCell->getFaceVertexId(frontierFace, k);
2540 frontierFaceVertices.insert(vertexId);
2541
2542 // Avoid adding duplicate vertices
2543 auto frontierVertexItr = frontierVertices.find(vertexId);
2544 if (frontierVertexItr != frontierVertices.end()) {
2545 continue;
2546 }
2547
2548 // Add vertex to the list of frontier vertices
2549 frontierVertices.insert(vertexId);
2550
2551 // Find vertex neighbours
2552 neighsList.clear();
2553 int vertex = frontierFaceLocalVerticesIds[k];
2554 findCellVertexNeighs(frontierCellId, vertex, &neighsList);
2555
2556 // Check if the vertex is on the inner frontier
2557 bool innerFrontierVertex = (m_partitioningOutgoings.count(frontierCellId) == 0);
2558 if (!innerFrontierVertex) {
2559 for (long frontierNeighId : neighsList) {
2560 if (m_partitioningOutgoings.count(frontierNeighId) == 0) {
2561 innerFrontierVertex = true;
2562 break;
2563 }
2564 }
2565 }
2566
2567 // Add neighbours to the list
2568 for (long frontierNeighId : neighsList) {
2569 frontierNeighs.insert(frontierNeighId);
2570 if (innerFrontierVertex) {
2571 if (m_partitioningOutgoings.count(frontierNeighId) > 0) {
2572 innerFrontierCellsOverall.insert(frontierNeighId);
2573 }
2574 }
2575 }
2576 }
2577
2578 //
2579 // Add frontier edge neighbours
2580 //
2581 if (patchDimension == 3) {
2582 int nFrontierCellEdges = frontierCell->getEdgeCount();
2583 for (int edge = 0; edge < nFrontierCellEdges; ++edge) {
2584 // Edge information
2585 int nEdgeVertices = frontierCell->getEdgeVertexCount(edge);
2586
2587 ConstCellHalfEdge frontierEdge = ConstCellHalfEdge(*frontierCell, edge, ConstCellHalfEdge::WINDING_NATURAL);
2588
2589 // Discard edges that do not belong to the
2590 // current frontier face
2591 ConstProxyVector<long> edgeVertexIds = frontierEdge.getVertexIds();
2592
2593 bool isFrontierFaceEdge = true;
2594 for (int k = 0; k < nEdgeVertices; ++k) {
2595 long edgeVertexId = edgeVertexIds[k];
2596 if (frontierFaceVertices.count(edgeVertexId) == 0) {
2597 isFrontierFaceEdge = false;
2598 break;
2599 }
2600 }
2601
2602 if (!isFrontierFaceEdge) {
2603 continue;
2604 }
2605
2606 // Avoid adding duplicate edges
2607 frontierEdge.setWinding(ConstCellHalfEdge::WINDING_REVERSE);
2608 auto frontierEdgeItr = frontierEdges.find(frontierEdge);
2609 if (frontierEdgeItr != frontierEdges.end()) {
2610 continue;
2611 } else {
2612 frontierEdge.setWinding(ConstCellHalfEdge::WINDING_NATURAL);
2613 frontierEdgeItr = frontierEdges.find(frontierEdge);
2614 if (frontierEdgeItr != frontierEdges.end()) {
2615 continue;
2616 }
2617 }
2618
2619 // Add edge to the list of frontier edges
2620 frontierEdges.insert(std::move(frontierEdge));
2621
2622 // Find edge neighbours
2623 neighsList.clear();
2624 findCellEdgeNeighs(frontierCellId, edge, &neighsList);
2625
2626 // Check if the vertex is on the inner frontier
2627 bool innerFrontierEdge = (m_partitioningOutgoings.count(frontierCellId) == 0);
2628 if (!innerFrontierEdge) {
2629 for (long frontierNeighId : neighsList) {
2630 if (m_partitioningOutgoings.count(frontierNeighId) == 0) {
2631 innerFrontierEdge = true;
2632 break;
2633 }
2634 }
2635 }
2636
2637 // Add neighbours to the list
2638 for (long frontierNeighId : neighsList) {
2639 frontierNeighs.insert(frontierNeighId);
2640 if (innerFrontierEdge) {
2641 if (m_partitioningOutgoings.count(frontierNeighId) > 0) {
2642 innerFrontierCellsOverall.insert(frontierNeighId);
2643 }
2644 }
2645 }
2646 }
2647 }
2648
2649 // Tell apart frame and halo cells
2650 for (long frontierNeighId : frontierNeighs) {
2651 if (outgoingCells.count(frontierNeighId) > 0) {
2652 frameCells.insert(frontierNeighId);
2653 } else {
2654 haloCells.insert({frontierNeighId, 0});
2655 }
2656 }
2657 }
2658 }
2659 }
2660
2661 // Identify cells on subsequent frame layers
2662 //
2663 // Only cells sent to the receiver rank can be frame cells.
2664 if (m_haloSize > 1) {
2665 auto frameSelector = [&outgoingCells](long cellId) {
2666 return (outgoingCells.count(cellId) != 0);
2667 };
2668
2669 auto frameBuilder = [&frameCells](long cellId, int layer) {
2670 BITPIT_UNUSED(layer);
2671
2672 frameCells.insert(cellId);
2673
2674 return false;
2675 };
2676
2677 processCellsNeighbours(frameCells, m_haloSize - 1, frameSelector, frameBuilder);
2678 }
2679
2680 // Identify cells on subsequent halo layers
2681 //
2682 // The selector should discard cells in the frame, this ensures that
2683 // the halo will not extend to the cells being sent to the receiver
2684 // rank.
2685 if (m_haloSize > 1) {
2686 firstHaloLayerCells.clear();
2687 firstHaloLayerCells.reserve(haloCells.size());
2688 for (const auto &haloEntry : haloCells) {
2689 firstHaloLayerCells.push_back(haloEntry.first);
2690 }
2691
2692 auto haloSelector = [&frameCells](long cellId) {
2693 return (frameCells.count(cellId) == 0);
2694 };
2695
2696 auto haloBuilder = [&haloCells](long cellId, int layer) {
2697 haloCells.insert({cellId, layer + 1});
2698
2699 return false;
2700 };
2701
2702 processCellsNeighbours(firstHaloLayerCells, m_haloSize - 1, haloSelector, haloBuilder);
2703 }
2704 }
2705
2706 // Order cells to send
2707 //
2708 // To allow the receiver to efficently store the cells, first we
2709 // send the cells that are internal for the receiver, then the
2710 // halo cells.
2711 std::size_t nOutgoingCells = outgoingCells.size();
2712 std::size_t nHaloCells = haloCells.size();
2713
2714 cellSendList.assign(outgoingCells.begin(), outgoingCells.end());
2715
2716 std::size_t haloIndex = cellSendList.size();
2717 cellSendList.resize(cellSendList.size() + nHaloCells);
2718 for (const auto &haloEntry : haloCells) {
2719 cellSendList[haloIndex] = haloEntry.first;
2720 ++haloIndex;
2721 }
2722
2723 // Update list of overall frame cells
2724 frameCellsOverall.insert(frameCellsOverall.end(), frameCells.begin(), frameCells.end());
2725
2726 // Update list of halo cells
2727 for (const auto &haloEntry : haloCells) {
2728 long cellId = haloEntry.first;
2729 const Cell &cell = getCell(cellId);
2730 if (cell.isInterior()) {
2731 continue;
2732 }
2733
2734 ghostHaloCellsOverall.insert(cellId);
2735 }
2736
2737 //
2738 // Communicate cell buffer size
2739 //
2740
2741 // Wait for previous comunications to finish
2742 if (cellsSizeRequest != MPI_REQUEST_NULL) {
2743 MPI_Wait(&cellsSizeRequest, MPI_STATUS_IGNORE);
2744 }
2745
2746 // Start the communication
2747 long cellsBufferSize = 2 * sizeof(long) + 3 * nHaloCells * sizeof(int) + 2 * (nOutgoingCells + nHaloCells) * sizeof(bool);
2748 for (const long cellId : cellSendList) {
2749 cellsBufferSize += m_cells.at(cellId).getBinarySize();
2750 }
2751
2752 MPI_Isend(&cellsBufferSize, 1, MPI_LONG, recvRank, m_partitioningCellsTag, m_communicator, &cellsSizeRequest);
2753
2754 //
2755 // Create the list of vertices to send
2756 //
2757 // On serialization there are no frame nor halo cells, however the
2758 // vertices on border faces are considered frame vertices, becuase
2759 // they will be used to link the cells on the recevier (they will
2760 // be duplicate vertices on the receiver).
2761 //
2762 frameVertices.clear();
2763 haloVertices.clear();
2764 vertexSendList.clear();
2765 for (const long cellId : cellSendList) {
2766 const Cell &cell = m_cells.at(cellId);
2767
2768 ConstProxyVector<long> cellVertexIds = cell.getVertexIds();
2769 int nCellVertices = cellVertexIds.size();
2770 for (int j = 0; j < nCellVertices; ++j) {
2771 long vertexId = cellVertexIds[j];
2772
2773 if (frameCells.count(cellId) > 0) {
2774 frameVertices.insert(vertexId);
2775 } else if (haloCells.count(cellId) > 0) {
2776 haloVertices.insert(vertexId);
2777 }
2778
2779 vertexSendList.insert(vertexId);
2780 }
2781 }
2782
2783 if (m_partitioningSerialization) {
2784 for (const long cellId : cellSendList) {
2785 const Cell &cell = m_cells.at(cellId);
2786
2787 int nCellFaces = cell.getFaceCount();
2788 for (int face = 0; face < nCellFaces; ++face) {
2789 if (!cell.isFaceBorder(face)) {
2790 continue;
2791 }
2792
2793 int nFaceVertices = cell.getFaceVertexCount(face);
2794 for (int k = 0; k < nFaceVertices; ++k) {
2795 long vertexId = cell.getFaceVertexId(face, k);
2796 frameVertices.insert(vertexId);
2797 }
2798 }
2799 }
2800 }
2801
2802 //
2803 // Communicate vertex buffer size
2804 //
2805
2806 // Wait for previous comunications to finish
2807 if (verticesSizeRequest != MPI_REQUEST_NULL) {
2808 MPI_Wait(&verticesSizeRequest, MPI_STATUS_IGNORE);
2809 }
2810
2811 // Start the communication
2812 long verticesBufferSize = sizeof(long) + 2 * vertexSendList.size() * sizeof(bool);
2813 for (long vertexId : vertexSendList) {
2814 verticesBufferSize += m_vertices[vertexId].getBinarySize();
2815 }
2816
2817 MPI_Isend(&verticesBufferSize, 1, MPI_LONG, recvRank, m_partitioningVerticesTag, m_communicator, &verticesSizeRequest);
2818
2819 //
2820 // Send vertex data
2821 //
2822
2823 // Wait for previous comunications to finish
2824 if (verticesRequest != MPI_REQUEST_NULL) {
2825 MPI_Wait(&verticesRequest, MPI_STATUS_IGNORE);
2826 }
2827
2828 // Fill buffer with vertex data
2829 verticesBuffer.setSize(verticesBufferSize);
2830 verticesBuffer.seekg(0);
2831
2832 verticesBuffer << (long) vertexSendList.size();
2833 for (long vertexId : vertexSendList) {
2834 // Vertex information
2835 bool isFrame = (frameVertices.count(vertexId) > 0);
2836 bool isHalo = (haloVertices.count(vertexId) > 0);
2837
2838 verticesBuffer << isFrame;
2839 verticesBuffer << isHalo;
2840
2841 // Certex data
2842 verticesBuffer << m_vertices[vertexId];
2843 }
2844
2845 if (verticesBufferSize != (long) verticesBuffer.getSize()) {
2846 throw std::runtime_error ("Cell buffer size does not match calculated size");
2847 }
2848
2849 MPI_Isend(verticesBuffer.data(), verticesBuffer.getSize(), MPI_CHAR, recvRank, m_partitioningVerticesTag, m_communicator, &verticesRequest);
2850
2851 //
2852 // Send cell data
2853 //
2854
2855 // Wait for previous comunications to finish
2856 if (cellsRequest != MPI_REQUEST_NULL) {
2857 MPI_Wait(&cellsRequest, MPI_STATUS_IGNORE);
2858 }
2859
2860 // Fill the buffer with cell data
2861 cellsBuffer.setSize(cellsBufferSize);
2862 cellsBuffer.seekg(0);
2863
2864 cellsBuffer << (long) nOutgoingCells;
2865 cellsBuffer << (long) nHaloCells;
2866 for (long cellId : cellSendList) {
2867 // Cell information
2868 bool isFrame = (frameCells.count(cellId) > 0);
2869 bool isHalo = (haloCells.count(cellId) > 0);
2870
2871 cellsBuffer << isFrame;
2872 cellsBuffer << isHalo;
2873
2874 // Cell owner on receiver
2875 //
2876 // This is only needed if the cell is on the halo, in the other
2877 // case the owner is always the receiver itself.
2878 if (isHalo) {
2879 int cellOwnerOnReceiver;
2880 if (m_partitioningOutgoings.count(cellId) > 0) {
2881 cellOwnerOnReceiver = m_partitioningOutgoings.at(cellId);
2882 } else if (m_ghostCellInfo.count(cellId) > 0) {
2883 cellOwnerOnReceiver = m_ghostCellInfo.at(cellId).owner;
2884 } else {
2885 cellOwnerOnReceiver = m_rank;
2886 }
2887
2888 cellsBuffer << cellOwnerOnReceiver;
2889
2890 int cellHaloLayerOnReceiver;
2891 if (cellOwnerOnReceiver != recvRank) {
2892 cellHaloLayerOnReceiver = haloCells.at(cellId);
2893 } else {
2894 cellHaloLayerOnReceiver = -1;
2895 }
2896
2897 cellsBuffer << cellHaloLayerOnReceiver;
2898
2899 int ghostCellOwnershipChange;
2900 if (ghostCellOwnershipChanges->count(cellId) > 0) {
2901 ghostCellOwnershipChange = ghostCellOwnershipChanges->at(cellId);
2902 } else {
2903 ghostCellOwnershipChange = -1;
2904 }
2905
2906 cellsBuffer << ghostCellOwnershipChange;
2907 }
2908
2909 // Cell data
2910 const Cell &cell = m_cells.at(cellId);
2911
2912 cellsBuffer << cell;
2913 }
2914
2915 if (cellsBufferSize != (long) cellsBuffer.getSize()) {
2916 throw std::runtime_error ("Cell buffer size does not match calculated size");
2917 }
2918
2919 MPI_Isend(cellsBuffer.data(), cellsBuffer.getSize(), MPI_CHAR, recvRank, m_partitioningCellsTag, m_communicator, &cellsRequest);
2920
2921 //
2922 // Update adaption info
2923 //
2924 if (trackPartitioning) {
2925 // Update partition
2926 //
2927 // The ids of the cells send will be stored accordingly to the send
2928 // order, this is the same order that will be used on the process
2929 // that has received the cell. Since the order is the same, the
2930 // two processes are able to exchange cell data without additional
2931 // communications (they already know the list of cells for which
2932 // data is needed and the order in which these data will be sent).
2933 partitioningData.emplace_back();
2934 adaption::Info &partitioningInfo = partitioningData.back();
2935 partitioningInfo.entity = adaption::ENTITY_CELL;
2936 partitioningInfo.type = adaption::TYPE_PARTITION_SEND;
2937 partitioningInfo.rank = recvRank;
2938
2939 partitioningInfo.previous.resize(nOutgoingCells);
2940 for (std::size_t i = 0; i < nOutgoingCells; ++i) {
2941 long cellId = cellSendList[i];
2942 partitioningInfo.previous[i] = cellId;
2943 }
2944 }
2945
2946 // Delete outgoing cells not in the frame
2947 //
2948 // Frame cells cannot be deleted just now, because they may be ghost cells
2949 // for other processes.
2950 std::vector<long> deleteList;
2951
2952 deleteList.reserve(nOutgoingCells - frameCells.size());
2953 for (std::size_t i = 0; i < nOutgoingCells; ++i) {
2954 long cellId = cellSendList[i];
2955 if (frameCells.count(cellId) == 0) {
2956 deleteList.push_back(cellId);
2957 }
2958 }
2959
2960 deleteCells(deleteList);
2961
2962 // Prune cell adjacencies and interfaces
2963 //
2964 // At this stage we cannot fully update adjacencies and interfaces, but
2965 // we need to remove stale adjacencies and interfaces.
2967
2969
2970 // If we are sending many cells try to reduced the used memory
2971 bool keepMemoryLimited = (nOutgoingCells > ACTIVATE_MEMORY_LIMIT_THRESHOLD * getInternalCellCount());
2972 if (keepMemoryLimited) {
2973 // Squeeze cells
2974 squeezeCells();
2975
2976 // Squeeze interfaces
2978
2979 // Delete orphan vertices
2982 }
2983 }
2984
2985 //
2986 // Update ghost and frame cells
2987 //
2988
2989 // If the process is sending all its cells we can just clear the patch.
2990 if (!sendingAllCells) {
2991 std::vector<long> deleteList;
2992
2993 // Convert inner cells into ghosts
2994 //
2995 // All cells on the inner frontier should become ghost cells. These
2996 // cells belongs to the first halo layer and defines the seeds for
2997 // growing the subsequent halo layers.
2998 for (long cellId : innerFrontierCellsOverall) {
2999 const Cell &cell = getCell(cellId);
3000 std::size_t cellHaloLayer = 0;
3001 if (!confirmCellHaloLayer(cell, cellHaloLayer, m_partitioningOutgoings)) {
3002 continue;
3003 }
3004
3005 int cellOwner = m_partitioningOutgoings.at(cellId);
3006 internalCell2GhostCell(cellId, cellOwner, cellHaloLayer);
3007 }
3008
3009 auto ghostFrameSelector = [this](long cellId) {
3010 return (m_partitioningOutgoings.count(cellId) != 0);
3011 };
3012
3013 auto ghostFrameBuilder = [this](long cellId, int layer) {
3014 int cellOwner = m_partitioningOutgoings.at(cellId);
3015 internalCell2GhostCell(cellId, cellOwner, layer + 1);
3016
3017 return false;
3018 };
3019
3020 processCellsNeighbours(innerFrontierCellsOverall, m_haloSize - 1, ghostFrameSelector, ghostFrameBuilder);
3021
3022 // Delete frame cells that are not ghosts
3023 //
3024 // Now that the new ghosts have been created, we can delete all the
3025 // frontier cells that are not ghosts.
3026 deleteList.clear();
3027 for (long cellId : frameCellsOverall) {
3028 const Cell &cell = getCell(cellId);
3029 if (!cell.isInterior()) {
3030 continue;
3031 }
3032
3033 deleteList.push_back(cellId);
3034 ghostCellOwnershipChanges->erase(cellId);
3035 }
3036
3037 deleteCells(deleteList);
3038
3039 // Prune cell adjacencies
3040 //
3041 // At this stage we cannot fully update the adjacencies, but we need
3042 // to remove stale adjacencies.
3044
3045 // Delete or update ghosts
3046 //
3047 // Some of the halo cells may be stale ghosts. Ghosts in the first halo
3048 // layer are considered stale if they have has no inner neighbours. Ghost
3049 // on subsequent layers are considered stale if they have no neighbours
3050 // in the previous halo layer.
3051 //
3052 // A layer at a time, stale ghost cells are found and deleted, valid ghosts
3053 // are identified because we need to re-compute the layer layer associated
3054 // with those ghosts.
3055 std::vector<long> neighIds;
3056 std::vector<long> updateList;
3057
3058 for (int haloLayer = 0; haloLayer < static_cast<int>(m_haloSize); ++haloLayer) {
3059 deleteList.clear();
3060 for (long cellId : ghostHaloCellsOverall) {
3061 // Consider only cells in the current halo layer
3062 const GhostCellInfo &ghostInfo = m_ghostCellInfo.at(cellId);
3063 if (ghostInfo.haloLayer != haloLayer) {
3064 continue;
3065 }
3066
3067 // Cells for which the halo layer is not confirmed will be delete, cells
3068 // for which the halo layer is confirmed will be updated.
3069 const Cell &cell = getCell(cellId);
3070 if (confirmCellHaloLayer(cell, haloLayer, m_partitioningOutgoings)) {
3071 updateList.push_back(cellId);
3072 } else {
3073 deleteList.push_back(cellId);
3074 }
3075 }
3076
3077 // Delete stale ghost in this layer
3078 for (long cellId : deleteList) {
3079 ghostCellOwnershipChanges->erase(cellId);
3080 ghostHaloCellsOverall.erase(cellId);
3081 }
3082 deleteCells(deleteList);
3083
3084 // Prune stale adjacencies
3086 }
3087
3088 // Compute layer associated with ghosts in the halo of the outgoing cells
3089 //
3090 // Some cells have been deleted because they are now on a different processor,
3091 // we need to update the halo layer associated with the ghost cells left in
3092 // the halo of the outgoing cells.
3093 for (long ghostId : updateList) {
3094 computeCellHaloLayer(ghostId);
3095 }
3096
3097 // Prune stale interfaces
3099
3100 // Delete orphan vertices
3102 } else {
3103 // The process has sent all its cells, the patch is now empty
3104 reset();
3105 ghostCellOwnershipChanges->clear();
3106 }
3107 // Wait for previous communications to finish
3108 if (cellsSizeRequest != MPI_REQUEST_NULL) {
3109 MPI_Wait(&cellsSizeRequest, MPI_STATUS_IGNORE);
3110 }
3111
3112 if (verticesSizeRequest != MPI_REQUEST_NULL) {
3113 MPI_Wait(&verticesSizeRequest, MPI_STATUS_IGNORE);
3114 }
3115
3116 if (verticesRequest != MPI_REQUEST_NULL) {
3117 MPI_Wait(&verticesRequest, MPI_STATUS_IGNORE);
3118 }
3119
3120 if (cellsRequest != MPI_REQUEST_NULL) {
3121 MPI_Wait(&cellsRequest, MPI_STATUS_IGNORE);
3122 }
3123
3124 // Return adaption info
3125 return partitioningData;
3126}
3127
3143std::vector<adaption::Info> PatchKernel::_partitioningAlter_receiveCells(const std::unordered_set<int> &sendRanks, bool trackPartitioning,
3144 std::unordered_map<long, int> *ghostCellOwnershipChanges)
3145{
3146 //
3147 // Start receiving buffer sizes
3148 //
3149 int nSendRanks = sendRanks.size();
3150
3151 std::vector<MPI_Request> cellsSizeRequests(nSendRanks, MPI_REQUEST_NULL);
3152 std::vector<MPI_Request> verticesSizeRequests(nSendRanks, MPI_REQUEST_NULL);
3153
3154 std::vector<long> cellsBufferSizes(nSendRanks);
3155 std::vector<long> verticesBufferSizes(nSendRanks);
3156
3157 std::unordered_map<int, int> sendRankIndexes;
3158 sendRankIndexes.reserve(nSendRanks);
3159
3160 for (int sendRank : sendRanks) {
3161 int sendRankIndex = sendRankIndexes.size();
3162 sendRankIndexes[sendRank] = sendRankIndex;
3163
3164 // Cell buffer size
3165 long &cellsBufferSize = cellsBufferSizes[sendRankIndex];
3166 MPI_Request &cellsSizeRequest = cellsSizeRequests[sendRankIndex];
3167 MPI_Irecv(&cellsBufferSize, 1, MPI_LONG, sendRank, m_partitioningCellsTag, m_communicator, &cellsSizeRequest);
3168
3169 // Vertex buffer size
3170 long &verticesBufferSize = verticesBufferSizes[sendRankIndex];
3171 MPI_Request &verticesSizeRequest = verticesSizeRequests[sendRankIndex];
3172 MPI_Irecv(&verticesBufferSize, 1, MPI_LONG, sendRank, m_partitioningVerticesTag, m_communicator, &verticesSizeRequest);
3173 }
3174
3175 // Initialize data structured for data exchange
3176 std::vector<MPI_Request> cellsRequests(nSendRanks, MPI_REQUEST_NULL);
3177 std::vector<MPI_Request> verticesRequests(nSendRanks, MPI_REQUEST_NULL);
3178
3179 std::vector<std::unique_ptr<IBinaryStream>> cellsBuffers(nSendRanks);
3180 std::vector<std::unique_ptr<IBinaryStream>> verticesBuffers(nSendRanks);
3181
3182 // Fill partitioning info
3183 std::vector<adaption::Info> partitioningData;
3184 if (trackPartitioning) {
3185 partitioningData.resize(nSendRanks);
3186 for (int sendRank : sendRanks) {
3187 int sendRankIndex = sendRankIndexes.at(sendRank);
3188
3189 adaption::Info &partitioningInfo = partitioningData[sendRankIndex];
3190 partitioningInfo.entity = adaption::ENTITY_CELL;
3191 partitioningInfo.type = adaption::TYPE_PARTITION_RECV;
3192 partitioningInfo.rank = sendRank;
3193 }
3194 }
3195
3196 // Mark border interfaces of ghost cells as dangling
3197 //
3198 // We may received cells that connect to the existing mesh through one
3199 // of the faces that are now borders. Marking those border interfaces as
3200 // dangling allows to delete them and create new internal interfaces.
3201 if (getInterfacesBuildStrategy() != INTERFACES_NONE) {
3202 CellConstIterator endItr = ghostCellConstEnd();
3203 for (CellConstIterator itr = ghostCellConstBegin(); itr != endItr; ++itr) {
3204 const Cell &cell = *itr;
3205 const long *interfaces = cell.getInterfaces();
3206 const int nCellInterfaces = cell.getInterfaceCount();
3207
3208 setCellAlterationFlags(cell.getId(), FLAG_INTERFACES_DIRTY);
3209
3210 for (int k = 0; k < nCellInterfaces; ++k) {
3211 long interfaceId = interfaces[k];
3212 const Interface &interface = getInterface(interfaceId);
3213 if (interface.isBorder()) {
3214 setInterfaceAlterationFlags(interfaceId, FLAG_DANGLING);
3215 }
3216 }
3217 }
3218 }
3219
3220 // Receive data
3221 int patchRank = getRank();
3222
3223 int nSendCompleted = 0;
3224 std::vector<int> sendCompletedIndexes(nSendRanks);
3225 std::vector<MPI_Status> sendCompletedStatuses(nSendRanks);
3226
3227 std::vector<long> neighIds;
3228
3229 std::unordered_set<long> duplicateCellsCandidates;
3230
3231 std::array<double, 3> *candidateVertexCoords;
3232 Vertex::Less vertexLess(10 * std::numeric_limits<double>::epsilon());
3233 auto vertexCoordsLess = [this, &vertexLess, &candidateVertexCoords](const long &id_1, const long &id_2)
3234 {
3235 const std::array<double, 3> &coords_1 = (id_1 >= 0) ? this->getVertex(id_1).getCoords() : *candidateVertexCoords;
3236 const std::array<double, 3> &coords_2 = (id_2 >= 0) ? this->getVertex(id_2).getCoords() : *candidateVertexCoords;
3237
3238 return vertexLess(coords_1, coords_2);
3239 };
3240 std::set<long, decltype(vertexCoordsLess)> duplicateVerticesCandidates(vertexCoordsLess);
3241
3242 std::unordered_map<long, long> verticesMap;
3243
3244 std::unordered_set<long> validReceivedAdjacencies;
3245 std::vector<long> addedCells;
3246 std::unordered_map<long, FlatVector2D<long>> duplicateCellsReceivedAdjacencies;
3247 std::unordered_map<long, long> cellsMap;
3248
3249 std::unordered_set<int> awaitingSendRanks = sendRanks;
3250 while (!awaitingSendRanks.empty()) {
3251 //
3252 // Duplicate cell and vertex candidates
3253 //
3254
3255 // Duplicate cell candidates
3256 //
3257 // The sender is sending a halo of cells surroudning the cells explicitly
3258 // marked for sending. This halo will be used for connecting the received
3259 // cells to the existing ones and to build the ghosts.
3260 //
3261 // We may receive duplicate cells for ghost targets and ghost sources. The
3262 // data structures that contains the list of these cells may not be updated
3263 // so we need to build the list on the fly. THe list will contain ghost
3264 // cells (target) and their neighbours (sources).
3265 duplicateCellsCandidates.clear();
3266 std::vector<long> firstHaloLayerCells;
3267 for (const auto &ghostOwnerEntry : m_ghostCellInfo) {
3268 long ghostId = ghostOwnerEntry.first;
3269
3270 duplicateCellsCandidates.insert(ghostId);
3271 if (getCellHaloLayer(ghostId) == 0) {
3272 firstHaloLayerCells.push_back(ghostId);
3273 }
3274 }
3275
3276 auto duplicateCandidatesSelector = [this](long cellId) {
3277 BITPIT_UNUSED(cellId);
3278
3279 return (m_ghostCellInfo.count(cellId) == 0);
3280 };
3281
3282 auto duplicateCandidatesBuilder = [&duplicateCellsCandidates](long cellId, int layer) {
3283 BITPIT_UNUSED(layer);
3284
3285 duplicateCellsCandidates.insert({cellId});
3286
3287 return false;
3288 };
3289
3290 processCellsNeighbours(firstHaloLayerCells, m_haloSize, duplicateCandidatesSelector, duplicateCandidatesBuilder);
3291
3292 // Duplicate vertex candidates
3293 //
3294 // During normal partitioning, vertices of the duplicate cells candidates
3295 // are candidates for duplicate vertices.
3296 //
3297 // During mesh serialization, we will not receive duplicate cells, but in
3298 // order to link the recevied cells with the current one we still need to
3299 // properly identify duplicate vertices. Since we have delete all ghost
3300 // information, all the vertices of border faces need to be added to the
3301 // duplicate vertex candidates.
3302 duplicateVerticesCandidates.clear();
3303 if (!m_partitioningSerialization) {
3304 for (long cellId : duplicateCellsCandidates) {
3305 const Cell &cell = m_cells.at(cellId);
3306 ConstProxyVector<long> cellVertexIds = cell.getVertexIds();
3307 for (long vertexId : cellVertexIds) {
3308 duplicateVerticesCandidates.insert(vertexId);
3309 }
3310 }
3311 } else {
3312 for (const Cell &cell : m_cells) {
3313 int nCellFaces = cell.getFaceCount();
3314 for (int face = 0; face < nCellFaces; ++face) {
3315 if (!cell.isFaceBorder(face)) {
3316 continue;
3317 }
3318
3319 int nFaceVertices = cell.getFaceVertexCount(face);
3320 for (int k = 0; k < nFaceVertices; ++k) {
3321 long vertexId = cell.getFaceVertexId(face, k);
3322 duplicateVerticesCandidates.insert(vertexId);
3323 }
3324 }
3325 }
3326 }
3327
3328 //
3329 // Identify rank that is sending data
3330 //
3331 // We need to start the receives and identify a rank for which both
3332 // cell and vertex recevies are active.
3333 int sendRank = -1;
3334 while (sendRank < 0) {
3335 // Start receiving vertices
3336 MPI_Waitsome(nSendRanks, verticesSizeRequests.data(), &nSendCompleted, sendCompletedIndexes.data(), sendCompletedStatuses.data());
3337 for (int i = 0; i < nSendCompleted; ++i) {
3338 int sourceIndex = sendCompletedIndexes[i];
3339 int sourceRank = sendCompletedStatuses[i].MPI_SOURCE;
3340
3341 std::size_t bufferSize = verticesBufferSizes[sourceIndex];
3342 std::unique_ptr<IBinaryStream> buffer = std::unique_ptr<IBinaryStream>(new IBinaryStream(bufferSize));
3343 MPI_Request *request = verticesRequests.data() + sourceIndex;
3344 MPI_Irecv(buffer->data(), buffer->getSize(), MPI_CHAR, sourceRank, m_partitioningVerticesTag, m_communicator, request);
3345 verticesBuffers[sourceIndex] = std::move(buffer);
3346 }
3347
3348 // Start receiving cells
3349 MPI_Waitsome(nSendRanks, cellsSizeRequests.data(), &nSendCompleted, sendCompletedIndexes.data(), sendCompletedStatuses.data());
3350 for (int i = 0; i < nSendCompleted; ++i) {
3351 int sourceIndex = sendCompletedIndexes[i];
3352 int sourceRank = sendCompletedStatuses[i].MPI_SOURCE;
3353
3354 std::size_t bufferSize = cellsBufferSizes[sourceIndex];
3355 std::unique_ptr<IBinaryStream> buffer = std::unique_ptr<IBinaryStream>(new IBinaryStream(bufferSize));
3356 MPI_Request *request = cellsRequests.data() + sourceIndex;
3357 MPI_Irecv(buffer->data(), buffer->getSize(), MPI_CHAR, sourceRank, m_partitioningCellsTag, m_communicator, request);
3358 cellsBuffers[sourceIndex] = std::move(buffer);
3359 }
3360
3361 // Search for a rank that started sending both vertices and cells.
3362 // If there aren't any, wait for other request to complete.
3363 for (int awaitingSendRank : awaitingSendRanks) {
3364 int sourceIndex = sendRankIndexes.at(awaitingSendRank);
3365 if (verticesRequests[sourceIndex] == MPI_REQUEST_NULL) {
3366 continue;
3367 } else if (cellsRequests[sourceIndex] == MPI_REQUEST_NULL) {
3368 continue;
3369 }
3370
3371 sendRank = awaitingSendRank;
3372 break;
3373 }
3374 }
3375
3376 int sendRankIndex = sendRankIndexes.at(sendRank);
3377
3378 //
3379 // Process vertices
3380 //
3381
3382 // Wait until vertex communication is completed
3383 MPI_Request *verticesRequest = verticesRequests.data() + sendRankIndex;
3384 MPI_Wait(verticesRequest, MPI_STATUS_IGNORE);
3385
3386 // Add vertices
3387 IBinaryStream &verticesBuffer = *(verticesBuffers[sendRankIndex]);
3388
3389 long nRecvVertices;
3390 verticesBuffer >> nRecvVertices;
3391
3392 reserveVertices(getVertexCount() + nRecvVertices);
3393
3394 verticesMap.clear();
3395 for (long i = 0; i < nRecvVertices; ++i) {
3396 // Vertex data
3397 bool isFrame;
3398 verticesBuffer >> isFrame;
3399
3400 bool isHalo;
3401 verticesBuffer >> isHalo;
3402
3403 Vertex vertex;
3404 verticesBuffer >> vertex;
3405 long originalVertexId = vertex.getId();
3406
3407 // Set vertex interior flag
3408 //
3409 // All new vertices will be temporarly added as internal vertices,
3410 // is needed they will be converted to ghost vertices when updating
3411 // ghost information.
3412 vertex.setInterior(true);
3413
3414 // Check if the vertex is a duplicate
3415 //
3416 // Only frame and halo vertices may be a duplicate.
3417 //
3418 //
3419 // If the vertex is a duplicate, it will be possible to obtain the
3420 // id of the id of the coincident vertex already in the patch.
3421 long vertexId;
3422
3423 bool isDuplicate = (isFrame || isHalo);
3424 if (isDuplicate) {
3425 // The container that stores the list of possible duplicate
3426 // vertices uses a customized comparator that allows to
3427 // compare the coordinates of the stored vertices with the
3428 // coordinates of an external vertex. To check if the external
3429 // vertex is among the vertex in the container, the search
3430 // should be performed using the NULL_ID as the key.
3431 candidateVertexCoords = &(vertex.getCoords());
3432 auto candidateVertexItr = duplicateVerticesCandidates.find(Cell::NULL_ID);
3433 isDuplicate = (candidateVertexItr != duplicateVerticesCandidates.end());
3434 if (isDuplicate) {
3435 vertexId = *candidateVertexItr;
3436 }
3437 }
3438
3439 // Add the vertex
3440 //
3441 // If the id of the received vertex is already assigned, let the
3442 // patch generate a new id. Otherwise, keep the id of the received
3443 // vertex.
3444 if (!isDuplicate) {
3445 if (m_vertexIdGenerator) {
3446 if (m_vertexIdGenerator->isAssigned(originalVertexId)) {
3447 vertex.setId(Vertex::NULL_ID);
3448 }
3449 } else if (m_vertices.exists(originalVertexId)) {
3450 throw std::runtime_error("A vertex with the same id of the received vertex already exists.");
3451 }
3452
3453 VertexIterator vertexIterator = addVertex(std::move(vertex));
3454 vertexId = vertexIterator.getId();
3455 }
3456
3457 if (originalVertexId != vertexId) {
3458 verticesMap.insert({{originalVertexId, vertexId}});
3459 }
3460 }
3461
3462 // Cleanup
3463 verticesBuffers[sendRankIndex].reset();
3464
3465 //
3466 // Process cells
3467 //
3468
3469 // Wait until cell communication is completed
3470 MPI_Request *cellsRequest = cellsRequests.data() + sendRankIndex;
3471 MPI_Wait(cellsRequest, MPI_STATUS_IGNORE);
3472
3473 // Receive cell data
3474 //
3475 // Internal cells will be sent first.
3476 IBinaryStream &cellsBuffer = *(cellsBuffers[sendRankIndex]);
3477
3478 long nReceivedInternalCells;
3479 cellsBuffer >> nReceivedInternalCells;
3480
3481 long nReceivedHalo;
3482 cellsBuffer >> nReceivedHalo;
3483
3484 long nReceivedCells = nReceivedInternalCells + nReceivedHalo;
3485
3486 reserveCells(getCellCount() + nReceivedCells);
3487
3488 if (trackPartitioning) {
3489 // Only internal cells are tracked.
3490 partitioningData[sendRankIndex].current.reserve(nReceivedInternalCells);
3491 }
3492
3493 validReceivedAdjacencies.clear();
3494 validReceivedAdjacencies.reserve(nReceivedCells);
3495
3496 addedCells.clear();
3497 addedCells.reserve(nReceivedCells);
3498
3499 duplicateCellsReceivedAdjacencies.clear();
3500
3501 cellsMap.clear();
3502
3503 for (long i = 0; i < nReceivedCells; ++i) {
3504 // Cell data
3505 bool isFrame;
3506 cellsBuffer >> isFrame;
3507
3508 bool isHalo;
3509 cellsBuffer >> isHalo;
3510
3511 int cellOwner;
3512 int cellHaloLayer;
3513 int ghostCellOwnershipChange;
3514 if (isHalo) {
3515 cellsBuffer >> cellOwner;
3516 cellsBuffer >> cellHaloLayer;
3517 cellsBuffer >> ghostCellOwnershipChange;
3518 } else {
3519 cellOwner = patchRank;
3520 cellHaloLayer = -1;
3521 ghostCellOwnershipChange = -1;
3522 }
3523
3524 Cell cell;
3525 cellsBuffer >> cell;
3526
3527 long cellOriginalId = cell.getId();
3528
3529 // Set cell interior flag
3530 bool isInterior = (cellOwner == patchRank);
3531 cell.setInterior(isInterior);
3532
3533 // Remap connectivity
3534 if (!verticesMap.empty()) {
3535 cell.renumberVertices(verticesMap);
3536 }
3537
3538 // Check if the cells is a duplicate
3539 //
3540 // The received cell may be one of the current ghosts.
3541 long cellId = Cell::NULL_ID;
3542 if (isHalo || isFrame) {
3543 for (long candidateId : duplicateCellsCandidates) {
3544 const Cell &candidateCell = m_cells.at(candidateId);
3545 if (cell.hasSameConnect(candidateCell)) {
3546 cellId = candidateId;
3547 break;
3548 }
3549 }
3550 }
3551
3552 // If the cell is not a duplicate add it in the cell data structure,
3553 // otherwise merge the connectivity of the duplicate cell to the
3554 // existing cell. This ensure that the received cell will be
3555 // properly connected to the received cells
3556 bool isTracked = false;
3557 if (cellId < 0) {
3558 // Reset the interfaces of the cell
3559 if (getInterfacesBuildStrategy() != INTERFACES_NONE) {
3560 cell.resetInterfaces();
3561 }
3562
3563 // Add cell
3564 //
3565 // If the id of the received cell is already assigned, let the
3566 // patch generate a new id. Otherwise, keep the id of the received
3567 // cell.
3568
3569 if (m_cellIdGenerator) {
3570 if (m_cellIdGenerator->isAssigned(cellOriginalId)) {
3571 cell.setId(Cell::NULL_ID);
3572 }
3573 } else if (m_cells.exists(cellOriginalId)) {
3574 throw std::runtime_error("A cell with the same id of the received cell already exists.");
3575 }
3576
3577 CellIterator cellIterator = addCell(std::move(cell), cellOwner, cellHaloLayer);
3578 cellId = cellIterator.getId();
3579 addedCells.push_back(cellId);
3580
3581 // Setup ghost ownership changes
3582 if (!cellIterator->isInterior()) {
3583 if (ghostCellOwnershipChange >= 0) {
3584 ghostCellOwnershipChanges->insert({cellId, ghostCellOwnershipChange});
3585 }
3586 }
3587
3588 // Interior cells are tacked
3589 isTracked = (trackPartitioning && isInterior);
3590 } else {
3591 // Check if the existing cells needs to become an internal cell
3592 const Cell &localCell = m_cells[cellId];
3593 if (isInterior && !localCell.isInterior()) {
3594 ghostCell2InternalCell(cellId);
3595 ghostCellOwnershipChanges->erase(cellId);
3596 isTracked = trackPartitioning;
3597 }
3598
3599 // Update the halo layer associated with ghost cells
3600 if (!isInterior) {
3601 const GhostCellInfo &ghostInfo = m_ghostCellInfo.at(cellId);
3602 if (ghostInfo.haloLayer > cellHaloLayer) {
3603 setGhostCellInfo(cellId, ghostInfo.owner, cellHaloLayer);
3604 }
3605 }
3606
3607 // Save the adjacencies of the received cell, this adjacencies
3608 // will link together the recevied cell to the existing ones.
3609 FlatVector2D<long> &cellAdjacencies = duplicateCellsReceivedAdjacencies[cellId];
3610
3611 int nCellFaces = cell.getFaceCount();
3612 int nCellAdjacencies = cell.getAdjacencyCount();
3613 cellAdjacencies.reserve(nCellFaces, nCellAdjacencies);
3614 for (int face = 0; face < nCellFaces; ++face) {
3615 int nFaceAdjacencies = cell.getAdjacencyCount(face);
3616 const long *faceAdjacencies = cell.getAdjacencies(face);
3617 cellAdjacencies.pushBack(nFaceAdjacencies, faceAdjacencies);
3618 }
3619 }
3620
3621 // The interfaces of the cell need to be updated
3622 if (getInterfacesBuildStrategy() != INTERFACES_NONE) {
3623 setCellAlterationFlags(cellId, FLAG_INTERFACES_DIRTY);
3624 }
3625
3626 // Add the cell to the cell map
3627 if (cellOriginalId != cellId) {
3628 cellsMap.insert({{cellOriginalId, cellId}});
3629 }
3630
3631 // Add original cell id to the list of valid received adjacencies
3632 validReceivedAdjacencies.insert(cellOriginalId);
3633
3634 // Update tracking information
3635 //
3636 // Only new internal cells and internal cells that were
3637 // previously ghosts are tracked.
3638 //
3639 // The ids of the cells send will be stored accordingly to the
3640 // receive order, this is the same order that will be used on the
3641 // process that has sent the cell. Since the order is the same,
3642 // the two processes are able to exchange cell data without
3643 // additional communications (they already know the list of cells
3644 // for which data is needed and the order in which these data will
3645 // be sent).
3646 if (isTracked) {
3647 partitioningData[sendRankIndex].current.emplace_back(cellId);
3648 }
3649 }
3650
3651 // Cleanup
3652 cellsBuffers[sendRankIndex].reset();
3653
3654 // Update adjacencies amonge added cells
3655 for (long cellId : addedCells) {
3656 Cell &cell = m_cells.at(cellId);
3657
3658 // Remove stale adjacencies
3659 int nCellFaces = cell.getFaceCount();
3660 for (int face = 0; face < nCellFaces; ++face) {
3661 int nFaceAdjacencies = cell.getAdjacencyCount(face);
3662 const long *faceAdjacencies = cell.getAdjacencies(face);
3663
3664 int k = 0;
3665 while (k < nFaceAdjacencies) {
3666 long senderAdjacencyId = faceAdjacencies[k];
3667 if (validReceivedAdjacencies.count(senderAdjacencyId) == 0) {
3668 cell.deleteAdjacency(face, k);
3669 --nFaceAdjacencies;
3670 } else {
3671 ++k;
3672 }
3673 }
3674 }
3675
3676 // Remap adjacencies
3677 if (!cellsMap.empty()) {
3678 int nCellAdjacencies = cell.getAdjacencyCount();
3679 long *cellAdjacencies = cell.getAdjacencies();
3680 for (int k = 0; k < nCellAdjacencies; ++k) {
3681 long &cellAdjacencyId = cellAdjacencies[k];
3682 auto cellsMapItr = cellsMap.find(cellAdjacencyId);
3683 if (cellsMapItr != cellsMap.end()) {
3684 cellAdjacencyId = cellsMapItr->second;
3685 }
3686 }
3687 }
3688 }
3689
3690 // Link received cells with the initial cells
3691 //
3692 // If we are serializing the patch, we don't have enough information to
3693 // link the recevied cells with the initial cells (ghost cells have been
3694 // deleted before receiving the cells). Cells that need to be linked will
3695 // have some faces without any adjacency, therefore to link those cells
3696 // we rebuild the adjacencies of all the cells that havefaces with no
3697 // adjacencis. Authentic border cells will have their adjacencies rebuilt
3698 // also if this may not be needed, but this is still the faster way to
3699 // link the received cells.
3700 if (!m_partitioningSerialization) {
3701 for (auto &entry : duplicateCellsReceivedAdjacencies) {
3702 long cellId = entry.first;
3703 Cell &cell = m_cells.at(cellId);
3704
3705 int nCellFaces = cell.getFaceCount();
3706 FlatVector2D<long> &cellReceivedAdjacencies = entry.second;
3707 for (int face = 0; face < nCellFaces; ++face) {
3708 int nFaceLinkAdjacencies = cellReceivedAdjacencies.getItemCount(face);
3709 for (int k = 0; k < nFaceLinkAdjacencies; ++k) {
3710 // We need to updated the adjacencies only if they are cells
3711 // that have been send.
3712 long receivedAdjacencyId = cellReceivedAdjacencies.getItem(face, k);
3713 if (validReceivedAdjacencies.count(receivedAdjacencyId) == 0) {
3714 continue;
3715 }
3716
3717 // If the send cell is already in the adjacency list there is
3718 // nothing to update.
3719 long localAdjacencyId;
3720 auto ajacenciyCellMapItr = cellsMap.find(receivedAdjacencyId);
3721 if (ajacenciyCellMapItr != cellsMap.end()) {
3722 localAdjacencyId = ajacenciyCellMapItr->second;
3723 } else {
3724 localAdjacencyId = receivedAdjacencyId;
3725 }
3726
3727 if (cell.findAdjacency(face, localAdjacencyId) >= 0) {
3728 continue;
3729 }
3730
3731 cell.pushAdjacency(face, localAdjacencyId);
3732 }
3733 }
3734
3735 unsetCellAlterationFlags(cellId, FLAG_ADJACENCIES_DIRTY);
3736 }
3737 } else {
3738 for (const Cell &cell : m_cells) {
3739 int nCellFaces = cell.getFaceCount();
3740 for (int face = 0; face < nCellFaces; ++face) {
3741 if (cell.isFaceBorder(face)) {
3742 long cellId = cell.getId();
3743 setCellAlterationFlags(cellId, FLAG_ADJACENCIES_DIRTY);
3744 break;
3745 }
3746 }
3747 }
3748 }
3749
3750 // Receive is now complete
3751 awaitingSendRanks.erase(sendRank);
3752 }
3753
3754 // Update adjacencies
3755 if (getAdjacenciesBuildStrategy() == ADJACENCIES_AUTOMATIC) {
3757 }
3758
3759 // Update interfaces
3760 if (getInterfacesBuildStrategy() == INTERFACES_AUTOMATIC) {
3762 }
3763
3764 // Return adaption data
3765 return partitioningData;
3766}
3767
3776
3784{
3785 auto cellInfoItr = m_ghostCellInfo.find(id);
3786 if (cellInfoItr == m_ghostCellInfo.end()) {
3787 return m_rank;
3788 } else {
3789 return cellInfoItr->second.owner;
3790 }
3791}
3792
3800{
3801 auto cellInfoItr = m_ghostCellInfo.find(id);
3802 if (cellInfoItr == m_ghostCellInfo.end()) {
3803 return m_rank;
3804 } else {
3805 return cellInfoItr->second.owner;
3806 }
3807}
3808
3816{
3817 auto cellInfoItr = m_ghostCellInfo.find(id);
3818 if (cellInfoItr == m_ghostCellInfo.end()) {
3819 return -1;
3820 } else {
3821 return cellInfoItr->second.haloLayer;
3822 }
3823}
3824
3832{
3833 auto vertexInfoItr = m_ghostVertexInfo.find(id);
3834 if (vertexInfoItr == m_ghostVertexInfo.end()) {
3835 return m_rank;
3836 } else {
3837 return vertexInfoItr->second.owner;
3838 }
3839}
3840
3848{
3849 auto vertexInfoItr = m_ghostVertexInfo.find(id);
3850 if (vertexInfoItr == m_ghostVertexInfo.end()) {
3851 return m_rank;
3852 } else {
3853 return vertexInfoItr->second.owner;
3854 }
3855}
3856
3864{
3865 return (m_ghostCellExchangeTargets.count(rank) > 0);
3866}
3867
3874{
3875 std::vector<int> neighRanks;
3876 neighRanks.reserve(m_ghostCellExchangeTargets.size());
3877 for (const auto &entry : m_ghostCellExchangeTargets) {
3878 neighRanks.push_back(entry.first);
3879 }
3880
3881 return neighRanks;
3882}
3883
3900const std::unordered_map<int, std::vector<long>> & PatchKernel::getGhostVertexExchangeTargets() const
3901{
3902 return m_ghostVertexExchangeTargets;
3903}
3904
3921const std::vector<long> & PatchKernel::getGhostVertexExchangeTargets(int rank) const
3922{
3923 return m_ghostVertexExchangeTargets.at(rank);
3924}
3925
3943const std::unordered_map<int, std::vector<long>> & PatchKernel::getGhostVertexExchangeSources() const
3944{
3945 return m_ghostVertexExchangeSources;
3946}
3947
3965const std::vector<long> & PatchKernel::getGhostVertexExchangeSources(int rank) const
3966{
3967 return m_ghostVertexExchangeSources.at(rank);
3968}
3969
3986const std::unordered_map<int, std::vector<long>> & PatchKernel::getGhostCellExchangeTargets() const
3987{
3988 return m_ghostCellExchangeTargets;
3989}
3990
4007const std::unordered_map<int, std::vector<long>> & PatchKernel::getGhostExchangeTargets() const
4008{
4010}
4011
4028const std::vector<long> & PatchKernel::getGhostCellExchangeTargets(int rank) const
4029{
4030 return m_ghostCellExchangeTargets.at(rank);
4031}
4032
4049const std::vector<long> & PatchKernel::getGhostExchangeTargets(int rank) const
4050{
4051 return getGhostCellExchangeTargets(rank);
4052}
4053
4070const std::unordered_map<int, std::vector<long>> & PatchKernel::getGhostCellExchangeSources() const
4071{
4072 return m_ghostCellExchangeSources;
4073}
4074
4091const std::unordered_map<int, std::vector<long>> & PatchKernel::getGhostExchangeSources() const
4092{
4094}
4095
4112const std::vector<long> & PatchKernel::getGhostCellExchangeSources(int rank) const
4113{
4114 return m_ghostCellExchangeSources.at(rank);
4115}
4116
4133const std::vector<long> & PatchKernel::getGhostExchangeSources(int rank) const
4134{
4135 return getGhostCellExchangeSources(rank);
4136}
4137
4144void PatchKernel::setGhostVertexInfo(long id, int owner)
4145{
4146 auto ghostVertexInfoItr = m_ghostVertexInfo.find(id);
4147 if (ghostVertexInfoItr != m_ghostVertexInfo.end()) {
4148 ghostVertexInfoItr->second.owner = owner;
4149 } else {
4150 GhostVertexInfo ghostVertexInfo;
4151 ghostVertexInfo.owner = owner;
4152 m_ghostVertexInfo.insert({id, std::move(ghostVertexInfo)});
4153 }
4154
4155 setPartitioningInfoDirty(true);
4156}
4157
4163void PatchKernel::unsetGhostVertexInfo(long id)
4164{
4165 auto ghostVertexInfoItr = m_ghostVertexInfo.find(id);
4166 if (ghostVertexInfoItr == m_ghostVertexInfo.end()) {
4167 return;
4168 }
4169
4170 m_ghostVertexInfo.erase(ghostVertexInfoItr);
4171
4172 setPartitioningInfoDirty(true);
4173}
4174
4178void PatchKernel::clearGhostVerticesInfo()
4179{
4180 m_ghostVertexInfo.clear();
4181
4182 setPartitioningInfoDirty(true);
4183}
4184
4192void PatchKernel::setGhostCellInfo(long id, int owner, int haloLayer)
4193{
4194 auto ghostCellInfoItr = m_ghostCellInfo.find(id);
4195 if (ghostCellInfoItr != m_ghostCellInfo.end()) {
4196 ghostCellInfoItr->second.owner = owner;
4197 ghostCellInfoItr->second.haloLayer = haloLayer;
4198 } else {
4199 GhostCellInfo ghostCellInfo;
4200 ghostCellInfo.owner = owner;
4201 ghostCellInfo.haloLayer = haloLayer;
4202 m_ghostCellInfo.insert({id, std::move(ghostCellInfo)});
4203 }
4204
4205 setPartitioningInfoDirty(true);
4206}
4207
4213void PatchKernel::unsetGhostCellInfo(long id)
4214{
4215 auto ghostCellInfoItr = m_ghostCellInfo.find(id);
4216 if (ghostCellInfoItr == m_ghostCellInfo.end()) {
4217 return;
4218 }
4219
4220 m_ghostCellInfo.erase(ghostCellInfoItr);
4221
4222 setPartitioningInfoDirty(true);
4223}
4224
4234void PatchKernel::computeCellHaloLayer(int id)
4235{
4236 // Early return if the cell is interior
4237 if (getCell(id).isInterior()) {
4238 unsetGhostCellInfo(id);
4239 return;
4240 }
4241
4242 // Process the neighbours until we find an internal cell
4243 bool haloLayerIdentified = false;
4244
4245 std::array<long, 1> layerUpdateSeed = {{ id }};
4246
4247 auto layerUpdateSelector = [](long cellId) {
4248 BITPIT_UNUSED(cellId);
4249
4250 return true;
4251 };
4252
4253 auto layerUpdateProcessor = [this, id, &haloLayerIdentified](long cellId, int layer) {
4254 const Cell &cell = getCell(cellId);
4255 if (cell.isInterior()) {
4256 const GhostCellInfo &cellGhostInfo = m_ghostCellInfo.at(id);
4257 if (cellGhostInfo.haloLayer != layer) {
4258 setGhostCellInfo(id, cellGhostInfo.owner, layer);
4259 }
4260
4261 haloLayerIdentified = true;
4262
4263 return true;
4264 }
4265
4266 return false;
4267 };
4268
4269 processCellsNeighbours(layerUpdateSeed, m_haloSize, layerUpdateSelector, layerUpdateProcessor);
4270
4271 if (!haloLayerIdentified) {
4272 throw std::runtime_error ("Unable to identify the halo layer of the cell.");
4273 }
4274}
4275
4279void PatchKernel::clearGhostCellsInfo()
4280{
4281 m_ghostCellInfo.clear();
4282
4283 setPartitioningInfoDirty(true);
4284}
4285
4295{
4296 if (!isPartitioned()) {
4297 return false;
4298 }
4299
4300 bool partitioningInfoDirty = m_partitioningInfoDirty;
4301 if (global) {
4302 const auto &communicator = getCommunicator();
4303 MPI_Allreduce(MPI_IN_PLACE, &partitioningInfoDirty, 1, MPI_C_BOOL, MPI_LOR, communicator);
4304 }
4305
4306 return partitioningInfoDirty;
4307}
4308
4314void PatchKernel::setPartitioningInfoDirty(bool dirty)
4315{
4316 if (dirty && !isPartitioned()) {
4317 return;
4318 }
4319
4320 m_partitioningInfoDirty = dirty;
4321}
4322
4329void PatchKernel::updatePartitioningInfo(bool forcedUpdated)
4330{
4331 // Check if ghost information are dirty
4332 if (!forcedUpdated && !arePartitioningInfoDirty()) {
4333 return;
4334 }
4335
4336 // Cell exchange data
4337 updateGhostCellExchangeInfo();
4338
4339 // Vertex exchange data
4340 updateGhostVertexExchangeInfo();
4341
4342 // Update patch owner
4343 updateOwner();
4344
4345 // Partitioning information are now up-to-date
4346 setPartitioningInfoDirty(false);
4347}
4348
4381void PatchKernel::updateGhostVertexExchangeInfo()
4382{
4383 // Patch information
4384 int patchRank = getRank();
4385
4386 // Idenfity owners of target and source vertices
4387 std::unordered_map<long, int> exchangeVertexOwners = evaluateExchangeVertexOwners();
4388
4389 //
4390 // Clear ghosts
4391 //
4392
4393 // List of vertices that are no more ghosts.
4394 //
4395 // Previous ghost vertices will be converted to internal vertices.
4396 VertexConstIterator previousBeginItr = ghostVertexConstBegin();
4397 VertexConstIterator previousEndItr = ghostVertexConstEnd();
4398
4399 std::vector<long> previousGhosts;
4400 previousGhosts.reserve(getGhostVertexCount());
4401 for (VertexConstIterator vertexItr = previousBeginItr; vertexItr != previousEndItr; ++vertexItr) {
4402 long vertexId = vertexItr.getId();
4403 if (exchangeVertexOwners.count(vertexId) != 0) {
4404 continue;
4405 }
4406
4407 previousGhosts.push_back(vertexId);
4408 }
4409
4410 // Convert previous ghosts to internal vertices
4411 for (long vertexId : previousGhosts) {
4413 }
4414
4415 //
4416 // Initialize ghost
4417 //
4418
4419 // List of new ghost vertices
4420 //
4421 // Vertices need to be sorted with their order in the storage.
4422 std::map<std::size_t, long> newGhosts;
4423 for (const auto &entry : exchangeVertexOwners) {
4424 int vertexOwner = entry.second;
4425 if (vertexOwner == patchRank) {
4426 continue;
4427 }
4428
4429 long vertexId = entry.first;
4430 VertexIterator vertexItr = m_vertices.find(vertexId);
4431 if (!vertexItr->isInterior()) {
4432 continue;
4433 }
4434
4435 newGhosts.insert({vertexItr.getRawIndex(), vertexId});
4436 }
4437
4438 // Set the owner of the existing ghosts
4439 VertexConstIterator beginItr = ghostVertexConstEnd();
4440 VertexConstIterator endItr = ghostVertexConstEnd();
4441
4442 for (VertexConstIterator vertexItr = beginItr; vertexItr != endItr; ++vertexItr) {
4443 long vertexId = vertexItr->getId();
4444 int vertexOwner = exchangeVertexOwners.at(vertexId);
4445 setGhostVertexInfo(vertexId, vertexOwner);
4446 }
4447
4448 // Create new ghosts
4449 //
4450 // The list of ghosts is processed backwards to reduce the number of
4451 // vertices that need to be swapped (there is a high probability that
4452 // most of the ghost vertices are already at the end of the storage).
4453 //
4454 // If a vertex is already a ghost, we still need to update its owner.
4455 for (auto iter = newGhosts.rbegin(); iter != newGhosts.rend(); ++iter) {
4456 long vertexId = iter->second;
4457 int vertexOwner = exchangeVertexOwners.at(vertexId);
4458 internalVertex2GhostVertex(vertexId, vertexOwner);
4459 }
4460
4461 //
4462 // Identify exchange targets
4463 //
4464 // See function Doxygen documentation for an explanation of which ranks are
4465 // involved in vertex data exchange.
4466
4467 // Clear targets
4468 m_ghostVertexExchangeTargets.clear();
4469
4470 // Update targets
4471 for (const auto &entry : m_ghostVertexInfo) {
4472 int ghostVertexOwner = entry.second.owner;
4473 long ghostVertexId = entry.first;
4474 m_ghostVertexExchangeTargets[ghostVertexOwner].push_back(ghostVertexId);
4475 }
4476
4477 // Sort the targets
4478 for (auto &entry : m_ghostVertexExchangeTargets) {
4479 std::vector<long> &rankTargets = entry.second;
4480 std::sort(rankTargets.begin(), rankTargets.end(), VertexPositionLess(*this));
4481 }
4482
4483 //
4484 // Identify exchange sources
4485 //
4486 // See function Doxygen documentation for an explanation of which ranks are
4487 // involved in vertex data exchange.
4488
4489 // Compute source distribution
4490 //
4491 // For each source cell, we need to know the ranks that have that cell
4492 // among their ghosts.
4493 std::unordered_map<long, std::unordered_set<int>> sourcesDistribution;
4494 for (const auto &entry : getGhostCellExchangeSources()) {
4495 const int rank = entry.first;
4496 const std::vector<long> &cellList = entry.second;
4497 for (long cellId : cellList) {
4498 sourcesDistribution[cellId].insert(rank);
4499 }
4500 }
4501
4502 // Initialize data communicator
4503 DataCommunicator sourceDataCommunicator(getCommunicator());
4504
4505 // Prepare the sends
4506 for (const auto &entry : getGhostCellExchangeSources()) {
4507 const int rank = entry.first;
4508 const std::vector<long> &cellList = entry.second;
4509
4510 // Set the sends
4511 std::size_t bufferSize = 0;
4512 for (long cellId : cellList) {
4513 bufferSize += sizeof(int) + sizeof(int) * sourcesDistribution[cellId].size();
4514 }
4515 sourceDataCommunicator.setSend(rank, bufferSize);
4516
4517 // Fill send buffer
4518 SendBuffer &buffer = sourceDataCommunicator.getSendBuffer(rank);
4519 for (long cellId : cellList) {
4520 std::unordered_set<int> &cellRankDistribution = sourcesDistribution.at(cellId);
4521 int cellRankDistributionSize = cellRankDistribution.size();
4522
4523 buffer << static_cast<int>(cellRankDistributionSize);
4524 for (int sourceRank : cellRankDistribution) {
4525 buffer << sourceRank;
4526 }
4527 }
4528 }
4529
4530 // Discover the receives
4531 sourceDataCommunicator.discoverRecvs();
4532
4533 // Start the communications
4534 sourceDataCommunicator.startAllRecvs();
4535 sourceDataCommunicator.startAllSends();
4536
4537 // Clear the sources
4538 m_ghostVertexExchangeSources.clear();
4539
4540 // Initialize list of unique sources
4541 std::unordered_map<long, std::unordered_set<int>> uniqueSources;
4542
4543 // Identifiy sources for ranks defined by source cells
4544 for (auto &entry : m_ghostCellExchangeSources) {
4545 int rank = entry.first;
4546
4547 // Identify unique sources
4548 for (long cellId : entry.second) {
4549 const Cell &cell = getCell(cellId);
4550 ConstProxyVector<long> cellVertexIds = cell.getVertexIds();
4551 for (long vertexId : cellVertexIds) {
4552 // Ghost vertices are not sources.
4553 if (m_ghostVertexInfo.count(vertexId) > 0) {
4554 continue;
4555 }
4556
4557 // Add the source to the list
4558 uniqueSources[rank].insert(vertexId);
4559 }
4560 }
4561 }
4562
4563 // Identifiy sources for ranks defined by target cells
4564 //
4565 // For each ghost we receive the list of ranks that have that cell among
4566 // their ghosts, then we loop thorugh the vertices of the ghost cell and
4567 // we add the vertices owned by this process to the sources of the all
4568 // the ranks that contain the ghost cell.
4569 std::vector<int> cellRankDistribution;
4570
4571 int nCompletedCellRecvs = 0;
4572 while (nCompletedCellRecvs < sourceDataCommunicator.getRecvCount()) {
4573 int rank = sourceDataCommunicator.waitAnyRecv();
4574 const std::vector<long> &cellList = getGhostCellExchangeTargets(rank);
4575
4576 RecvBuffer &buffer = sourceDataCommunicator.getRecvBuffer(rank);
4577 for (long cellId : cellList) {
4578 int cellRankDistributionSize;
4579 buffer >> cellRankDistributionSize;
4580 cellRankDistribution.resize(cellRankDistributionSize);
4581 for (int i = 0; i < cellRankDistributionSize; ++i) {
4582 buffer >> cellRankDistribution[i];
4583 }
4584
4585 const Cell &cell = getCell(cellId);
4586 ConstProxyVector<long> cellVertexIds = cell.getVertexIds();
4587 for (long vertexId : cellVertexIds) {
4588 // Ghost vertices are not sources.
4589 if (m_ghostVertexInfo.count(vertexId) > 0) {
4590 continue;
4591 }
4592
4593 // Add the source to the list
4594 for (int cellRank : cellRankDistribution) {
4595 if (cellRank == patchRank) {
4596 continue;
4597 }
4598
4599 uniqueSources[cellRank].insert(vertexId);
4600 }
4601 }
4602 }
4603
4604 ++nCompletedCellRecvs;
4605 }
4606
4607 // Store and sort sources
4608 for (const auto &entry : uniqueSources) {
4609 int rank = entry.first;
4610 const std::unordered_set<int> &rankUniqueSources = entry.second;
4611
4612 // Store sources
4613 std::vector<long> &rankSources = m_ghostVertexExchangeSources[rank];
4614 rankSources.assign(rankUniqueSources.begin(), rankUniqueSources.end());
4615
4616 // Sort sources
4617 std::sort(rankSources.begin(), rankSources.end(), VertexPositionLess(*this));
4618 }
4619}
4620
4637std::unordered_map<long, int> PatchKernel::evaluateExchangeVertexOwners() const
4638{
4639 // Patch information
4640 int patchRank = getRank();
4641
4642 // Initialize communicator
4643 DataCommunicator vertexOwnerCommunicator(getCommunicator());
4644
4645 // Start receives
4646 for (const auto &entry : m_ghostCellExchangeTargets) {
4647 int rank = entry.first;
4648 const std::vector<long> &cellIds = entry.second;
4649
4650 std::size_t bufferSize = 0;
4651 for (long cellId : cellIds) {
4652 const Cell &cell = getCell(cellId);
4653 bufferSize += sizeof(int) * cell.getVertexCount();
4654 }
4655 vertexOwnerCommunicator.setRecv(rank, bufferSize);
4656 vertexOwnerCommunicator.startRecv(rank);
4657 }
4658
4659 // Initialize owners using local information
4660 std::unordered_map<long, int> exchangeVertexOwners;
4661
4662 for (const auto &entry : m_ghostCellExchangeSources) {
4663 for (long cellId : entry.second) {
4664 const Cell &cell = getCell(cellId);
4665 for (long vertexId : cell.getVertexIds()) {
4666 auto exchangeVertexOwnerItr = exchangeVertexOwners.find(vertexId);
4667 if (exchangeVertexOwnerItr == exchangeVertexOwners.end()) {
4668 exchangeVertexOwners.insert({vertexId, patchRank});
4669 }
4670 }
4671 }
4672 }
4673
4674 for (const auto &entry : m_ghostCellExchangeTargets) {
4675 int targetCellOwner = entry.first;
4676 for (long cellId : entry.second) {
4677 const Cell &cell = getCell(cellId);
4678 for (long vertexId : cell.getVertexIds()) {
4679 auto exchangeVertexOwnerItr = exchangeVertexOwners.find(vertexId);
4680 if (exchangeVertexOwnerItr == exchangeVertexOwners.end()) {
4681 exchangeVertexOwners.insert({vertexId, targetCellOwner});
4682 } else {
4683 int &currentVertexOwner = exchangeVertexOwnerItr->second;
4684 if (targetCellOwner < currentVertexOwner) {
4685 currentVertexOwner = targetCellOwner;
4686 }
4687 }
4688 }
4689 }
4690 }
4691
4692 // Send local vertex owners to the neighbouring partitions
4693 for (const auto &entry : m_ghostCellExchangeSources) {
4694 int rank = entry.first;
4695 const std::vector<long> &cellIds = entry.second;
4696
4697 std::size_t bufferSize = 0;
4698 for (long cellId : cellIds) {
4699 const Cell &cell = getCell(cellId);
4700 bufferSize += sizeof(int) * cell.getVertexCount();
4701 }
4702 vertexOwnerCommunicator.setSend(rank, bufferSize);
4703
4704 SendBuffer &buffer = vertexOwnerCommunicator.getSendBuffer(rank);
4705 for (long cellId : cellIds) {
4706 const Cell &cell = getCell(cellId);
4707 for (long vertexId : cell.getVertexIds()) {
4708 buffer << exchangeVertexOwners.at(vertexId);
4709 }
4710 }
4711 vertexOwnerCommunicator.startSend(rank);
4712 }
4713
4714 // Receive vertex owners from the neighbouring partitions
4715 int nCompletedRecvs = 0;
4716 while (nCompletedRecvs < vertexOwnerCommunicator.getRecvCount()) {
4717 int rank = vertexOwnerCommunicator.waitAnyRecv();
4718 const std::vector<long> &cellIds = m_ghostCellExchangeTargets.at(rank);
4719 RecvBuffer &buffer = vertexOwnerCommunicator.getRecvBuffer(rank);
4720
4721 for (long cellId : cellIds) {
4722 const Cell &cell = getCell(cellId);
4723 for (long vertexId : cell.getVertexIds()) {
4724 int remoteVertexOwner;
4725 buffer >> remoteVertexOwner;
4726
4727 int &currentVertexOwner = exchangeVertexOwners.at(vertexId);
4728 if (remoteVertexOwner < currentVertexOwner) {
4729 currentVertexOwner = remoteVertexOwner;
4730 }
4731 }
4732 }
4733
4734 ++nCompletedRecvs;
4735 }
4736
4737 // Wait until all sends are complete
4738 vertexOwnerCommunicator.waitAllSends();
4739
4740 return exchangeVertexOwners;
4741}
4742
4746void PatchKernel::updateGhostCellExchangeInfo()
4747{
4748 // Clear targets
4749 m_ghostCellExchangeTargets.clear();
4750
4751 // Update targets
4752 for (const auto &entry : m_ghostCellInfo) {
4753 int ghostOwner = entry.second.owner;
4754 long ghostCellId = entry.first;
4755 m_ghostCellExchangeTargets[ghostOwner].push_back(ghostCellId);
4756 }
4757
4758 // Sort the targets
4759 for (auto &entry : m_ghostCellExchangeTargets) {
4760 std::vector<long> &rankTargets = entry.second;
4761 std::sort(rankTargets.begin(), rankTargets.end(), CellPositionLess(*this));
4762 }
4763
4764 // Clear the sources
4765 m_ghostCellExchangeSources.clear();
4766
4767 // Build the sources
4768 for (auto &entry : m_ghostCellExchangeTargets) {
4769 int rank = entry.first;
4770
4771 // Generate the source list
4772 std::vector<long> rankSources = _findGhostCellExchangeSources(rank);
4773 if (rankSources.empty()) {
4774 m_ghostCellExchangeSources.erase(rank);
4775 continue;
4776 }
4777
4778 // Sort the sources
4779 std::sort(rankSources.begin(), rankSources.end(), CellPositionLess(*this));
4780
4781 // Store list
4782 m_ghostCellExchangeSources[rank] = std::move(rankSources);
4783 }
4784}
4785
4794{
4795 // Get targets for the specified rank
4796 //
4797 // If there are no targets, there will be no sources either.
4798 auto ghostExchangeTargetsItr = m_ghostCellExchangeTargets.find(rank);
4799 if (ghostExchangeTargetsItr == m_ghostCellExchangeTargets.end()) {
4800 return std::vector<long>();
4801 }
4802
4803 const std::vector<long> &rankTargets = ghostExchangeTargetsItr->second;
4804
4805 // Generate sources from the targets
4806 std::vector<long> exchangeSources;
4807
4808 auto sourceSelector = [](long cellId) {
4809 BITPIT_UNUSED(cellId);
4810
4811 return true;
4812 };
4813
4814 auto sourceBuilder = [this, &exchangeSources](long cellId, int layer) {
4815 BITPIT_UNUSED(layer);
4816
4817 if (m_ghostCellInfo.count(cellId) == 0) {
4818 exchangeSources.push_back(cellId);
4819 }
4820
4821 return false;
4822 };
4823
4824 processCellsNeighbours(rankTargets, m_haloSize, sourceSelector, sourceBuilder);
4825
4826 return exchangeSources;
4827}
4828
4829}
4830
4831#endif
The Cell class defines the cells.
Definition cell.hpp:42
void initialize(long id, ElementType type, bool interior, bool storeNeighbourhood)
Definition cell.cpp:257
void trash(int tag, MPI_Comm communicator)
int generate(MPI_Comm communicator)
The DataCommunicator class provides the infrastructure needed to exchange data among processes.
long getId() const
Definition element.cpp:510
int getDimension() const
Definition element.cpp:1119
static ConstProxyVector< long > getVertexIds(ElementType type, const long *connectivity)
Definition element.cpp:1193
void setId(long id)
Definition element.cpp:498
int getVertexCount() const
Definition element.cpp:1152
VertexIterator vertexEnd()
void unsetCellAlterationFlags(AlterationFlags flags)
CellIterator cellBegin()
InterfacesBuildStrategy getInterfacesBuildStrategy() const
CellConstIterator internalCellConstEnd() const
AdjacenciesBuildStrategy getAdjacenciesBuildStrategy() const
int getVertexOwner(long id) const
const std::unordered_map< int, std::vector< long > > & getGhostCellExchangeSources() const
virtual std::vector< long > _findGhostCellExchangeSources(int rank)
VertexIterator restoreVertex(const std::array< double, 3 > &coords, int owner, long id)
AdaptionMode getAdaptionMode() const
CellIterator restoreCell(ElementType type, std::unique_ptr< long[]> &&connectStorage, int owner, int haloLayer, long id)
CellConstIterator internalCellConstBegin() const
VertexIterator vertexBegin()
virtual std::vector< adaption::Info > _partitioningPrepare(const std::unordered_map< long, double > &cellWeights, double defaultWeight, bool trackPartitioning)
virtual long getVertexCount() const
const std::unordered_map< int, std::vector< long > > & getGhostCellExchangeTargets() const
void addPointToBoundingBox(const std::array< double, 3 > &point)
VertexIterator addVertex(const Vertex &source, long id=Vertex::NULL_ID)
bool empty(bool global=true) const
PartitioningMode getPartitioningMode() const
void processCellsNeighbours(const SeedContainer &seedIds, int nLayers, Function function) const
VertexIterator internalVertex2GhostVertex(long id, int owner)
virtual std::vector< adaption::Info > _partitioningAlter(bool trackPartitioning)
Cell & getCell(long id)
int getVertexRank(long id) const
std::vector< adaption::Info > partition(MPI_Comm communicator, const std::unordered_map< long, int > &cellRanks, bool trackPartitioning, bool squeezeStorage=false, std::size_t haloSize=1)
CellIterator addCell(const Cell &source, long id=Element::NULL_ID)
PartitioningStatus getPartitioningStatus(bool global=false) const
void updateInterfaces(bool forcedUpdated=false)
CellIterator cellEnd()
const std::unordered_map< int BITPIT_COMMA std::vector< long > > & getGhostExchangeTargets() const
bool arePartitioningInfoDirty(bool global=true) const
CellIterator internalCell2GhostCell(long id, int owner, int haloLayer)
CellIterator ghostCell2InternalCell(long id)
VertexConstIterator ghostVertexConstEnd() const
int getOwner(bool allowDirty=false) const
double evalPartitioningUnbalance() const
CellConstIterator ghostCellConstEnd() const
static const double DEFAULT_PARTITIONING_WEIGTH
VertexIterator ghostVertexBegin()
const MPI_Comm & getCommunicator() const
std::size_t getHaloSize() const
virtual int findAdjoinNeighFace(const Cell &cell, int cellFace, const Cell &neigh) const
CellConstIterator ghostConstEnd() const
virtual void reset()
void setHaloSize(std::size_t haloSize)
Vertex & getVertex(long id)
virtual std::size_t _getMaxHaloSize()
std::vector< adaption::Info > partitioningPrepare(MPI_Comm communicator, const std::unordered_map< long, int > &cellRanks, bool trackPartitioning, std::size_t haloSize=1)
virtual long getCellCount() const
VertexIterator ghostVertex2InternalVertex(long id)
VertexConstIterator ghostVertexConstBegin() const
const std::unordered_map< int, std::vector< long > > & getGhostVertexExchangeTargets() const
void removePointFromBoundingBox(const std::array< double, 3 > &point)
std::vector< long > findCellVertexNeighs(long id, bool complete=true) const
bool isDistributed(bool allowDirty=false) const
std::vector< adaption::Info > partitioningAlter(bool trackPartitioning=true, bool squeezeStorage=false)
int getCellHaloLayer(long id) const
const std::unordered_map< int, std::vector< long > > & getGhostVertexExchangeSources() const
void updateAdjacencies(bool forcedUpdated=false)
void setPartitioningMode(PartitioningMode mode)
const std::unordered_map< int BITPIT_COMMA std::vector< long > > & getGhostExchangeSources() const
std::vector< long > findCellEdgeNeighs(long id, bool complete=true) const
long getInternalCellCount() const
void setPartitioningStatus(PartitioningStatus status)
CellConstIterator ghostCellConstBegin() const
long getInternalVertexCount() const
CellConstIterator ghostConstBegin() const
bool deleteCells(const IdStorage &ids)
bool reserveCells(size_t nCells)
std::vector< int > getNeighbourRanks()
void setCellAlterationFlags(AlterationFlags flags)
bool reserveVertices(size_t nVertices)
virtual void _setHaloSize(std::size_t haloSize)
int getCellOwner(long id) const
virtual void setCommunicator(MPI_Comm communicator)
id_t getId(const id_t &fallback=-1) const noexcept
Iterator for the class PiercedStorage.
static bool hasInfo(ElementType type)
static BITPIT_PUBLIC_API const ReferenceElementInfo & getInfo(ElementType type)
void setParallel(uint16_t, uint16_t)
Definition VTK.cpp:205
The Vertex class defines the vertexs.
Definition vertex.hpp:42
long getId() const
Definition vertex.cpp:226
void initialize(long id, const std::array< double, 3 > &coords, bool interior)
Definition vertex.cpp:131
std::array< double, 3 > & getCoords()
Definition vertex.cpp:246
#define BITPIT_UNUSED(variable)
Definition compiler.hpp:63
CommunicationTags & tags()
The Info struct defines the information associated to an adaption.
Definition adaption.hpp:63
--- layout: doxygen_footer ---