Loading...
Searching...
No Matches
system_matrix.cpp
1/*---------------------------------------------------------------------------*\
2 *
3 * bitpit
4 *
5 * Copyright (C) 2015-2021 OPTIMAD engineering Srl
6 *
7 * -------------------------------------------------------------------------
8 * License
9 * This file is part of bitpit.
10 *
11 * bitpit is free software: you can redistribute it and/or modify it
12 * under the terms of the GNU Lesser General Public License v3 (LGPL)
13 * as published by the Free Software Foundation.
14 *
15 * bitpit is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
18 * License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with bitpit. If not, see <http://www.gnu.org/licenses/>.
22 *
23\*---------------------------------------------------------------------------*/
24
25#include "system_matrix.hpp"
26
27namespace bitpit {
28
46#if BITPIT_ENABLE_MPI==1
78#endif
79
80#if BITPIT_ENABLE_MPI==1
85 : SparseMatrix(MPI_COMM_SELF)
86{
87}
88
94SparseMatrix::SparseMatrix(MPI_Comm communicator)
95 : m_blockSize(0), m_nRows(0), m_nCols(0), m_nNZ(0), m_maxRowNZ(0), m_lastRow(-1),
96 m_assembled(false),
97 m_partitioned(false),
98 m_global_nRows(0), m_global_nCols(0), m_global_nNZ(0),
99 m_global_maxRowNZ(0), m_global_rowOffset(0), m_global_colOffset(0)
100{
101 // Set the communicator
102 setCommunicator(communicator);
103}
104#else
109 : m_blockSize(0), m_nRows(0), m_nCols(0), m_nNZ(0), m_maxRowNZ(0), m_lastRow(-1),
110 m_assembled(false)
111{
112}
113#endif
114
115#if BITPIT_ENABLE_MPI==1
127SparseMatrix::SparseMatrix(long nRows, long nCols, long nNZ)
128 : SparseMatrix(MPI_COMM_SELF, false, 1, nRows, nCols, nNZ)
129{
130}
131
144SparseMatrix::SparseMatrix(int blockSize, long nRows, long nCols, long nNZ)
145 : SparseMatrix(MPI_COMM_SELF, false, blockSize, nRows, nCols, nNZ)
146{
147}
148
164SparseMatrix::SparseMatrix(MPI_Comm communicator, bool partitioned, long nRows, long nCols, long nNZ)
165 : SparseMatrix(communicator)
166{
167 _initialize(partitioned, 1, nRows, nCols, nNZ);
168}
169
186SparseMatrix::SparseMatrix(MPI_Comm communicator, bool partitioned, int blockSize, long nRows, long nCols, long nNZ)
187 : SparseMatrix(communicator)
188{
189 _initialize(partitioned, blockSize, nRows, nCols, nNZ);
190}
191#else
203SparseMatrix::SparseMatrix(long nRows, long nCols, long nNZ)
204 : SparseMatrix(1, nRows, nCols, nNZ)
205{
206}
207
220SparseMatrix::SparseMatrix(int blockSize, long nRows, long nCols, long nNZ)
221 : SparseMatrix()
222{
223 _initialize(blockSize, nRows, nCols, nNZ);
224}
225#endif
226
227#if BITPIT_ENABLE_MPI==1
232 : m_blockSize(other.m_blockSize),
233 m_nRows(other.m_nRows),
234 m_nCols(other.m_nCols),
235 m_nNZ(other.m_nNZ),
236 m_maxRowNZ(other.m_maxRowNZ),
237 m_lastRow(other.m_lastRow),
238 m_assembled(other.m_assembled),
239 m_partitioned(other.m_partitioned),
240 m_global_nRows(other.m_global_nRows),
241 m_global_nCols(other.m_global_nCols),
242 m_global_nNZ(other.m_global_nNZ),
243 m_global_maxRowNZ(other.m_global_maxRowNZ),
244 m_global_rowOffset(other.m_global_rowOffset),
245 m_global_colOffset(other.m_global_colOffset),
246 m_pattern(other.m_pattern),
247 m_values (other.m_values)
248{
249 // Set the communicator
250 setCommunicator(other.m_communicator);
251}
252#endif
253
258{
259#if BITPIT_ENABLE_MPI==1
260 // Free the MPI communicator
261 freeCommunicator();
262#endif
263}
264
265#if BITPIT_ENABLE_MPI==1
280void SparseMatrix::initialize(bool partitioned, long nRows, long nCols, long nNZ)
281{
282 _initialize(partitioned, 1, nRows, nCols, nNZ);
283}
284
300void SparseMatrix::initialize(bool partitioned, int blockSize, long nRows, long nCols, long nNZ)
301{
302 _initialize(partitioned, blockSize, nRows, nCols, nNZ);
303}
304#endif
305
317void SparseMatrix::initialize(long nRows, long nCols, long nNZ)
318{
319 initialize(1, nRows, nCols, nNZ);
320}
321
334void SparseMatrix::initialize(int blockSize, long nRows, long nCols, long nNZ)
335{
336#if BITPIT_ENABLE_MPI==1
337 _initialize(false, blockSize, nRows, nCols, nNZ);
338#else
339 _initialize(blockSize, nRows, nCols, nNZ);
340#endif
341}
342
343#if BITPIT_ENABLE_MPI==1
359void SparseMatrix::_initialize(bool partitioned, int blockSize, long nRows, long nCols, long nNZ)
360{
361 // Serial initialization
362 _initialize(blockSize, nRows, nCols, nNZ);
363
364 // Parallel initialization
365 m_partitioned = partitioned;
366
367 m_global_nRows = m_nRows;
368 m_global_nCols = m_nCols;
369 if (m_partitioned) {
370 MPI_Allreduce(MPI_IN_PLACE, &m_global_nRows, 1, MPI_LONG, MPI_SUM, m_communicator);
371 MPI_Allreduce(MPI_IN_PLACE, &m_global_nCols, 1, MPI_LONG, MPI_SUM, m_communicator);
372 }
373
374 m_global_rowOffset = 0;
375 m_global_colOffset = 0;
376 if (m_partitioned) {
377 int nProcessors;
378 MPI_Comm_size(m_communicator, &nProcessors);
379
380 std::vector<long> nGlobalRows(nProcessors);
381 MPI_Allgather(&nRows, 1, MPI_LONG, nGlobalRows.data(), 1, MPI_LONG, m_communicator);
382
383 std::vector<long> nGlobalCols(nProcessors);
384 MPI_Allgather(&nCols, 1, MPI_LONG, nGlobalCols.data(), 1, MPI_LONG, m_communicator);
385
386 int rank;
387 MPI_Comm_rank(m_communicator, &rank);
388 for (int i = 0; i < rank; ++i) {
389 m_global_rowOffset += nGlobalRows[i];
390 m_global_colOffset += nGlobalCols[i];
391 }
392 }
393}
394#endif
395
407void SparseMatrix::_initialize(int blockSize, long nRows, long nCols, long nNZ)
408{
409 assert(blockSize >= 0);
410 assert(nRows >= 0);
411 assert(nCols >= 0);
412 assert(nNZ >= 0);
413
414 clear();
415
416 m_blockSize = blockSize;
417 m_nRows = nRows;
418 m_nCols = nCols;
419
422}
423
429void SparseMatrix::clear(bool release)
430{
431 clearPatternStorage(release);
432 clearValueStorage(release);
433
434 m_blockSize = 0;
435 m_nRows = 0;
436 m_nCols = 0;
437 m_nNZ = 0;
438 m_maxRowNZ = 0;
439 m_lastRow = -1;
440
441#if BITPIT_ENABLE_MPI==1
442 m_partitioned = false;
443
444 m_global_nRows = 0;
445 m_global_nCols = 0;
446 m_global_nNZ = 0;
447 m_global_maxRowNZ = 0;
448 m_global_rowOffset = 0;
449 m_global_colOffset = 0;
450#endif
451
452 m_assembled = false;
453}
454
465
474void SparseMatrix::display(std::ostream &stream, double negligiblity, int indent) const
475{
476 // Initialize padding
477 std::string padding(indent, ' ');
478
479 // Display base information
480 int blockSize = getBlockSize();
481 int nBlockElements = blockSize * blockSize;
482
483 stream << padding << "General information" << std::endl;
484 stream << padding << " Block size ............................................ " << blockSize << std::endl;
485
486 // Gather local information
487 long nRows = getRowCount();
488 long nCols = getColCount();
489
490 long nRowElements = getRowElementCount();
491 long nColElements = getColElementCount();
492
493 long nNZBlocks = getNZCount();
494 long nNZElements = getNZElementCount();
495
496 long maxRowNZBlocks = getMaxRowNZCount();
497 long maxRowNZElements = getMaxRowNZElementCount();
498
499 // Count local non-negligible blocks and elements
500 int nZeroStreak = 0;
501 long nNZBlockValues = 0;
502 long nNZElementValues = 0;
503 for (long k = 0; k < nNZElements; ++k) {
504 double value = m_values[k];
505 if (!bitpit::utils::DoubleFloatingEqual()(value, 0., negligiblity, negligiblity)) {
506 ++nNZElementValues;
507 nZeroStreak = 0;
508 } else {
509 ++nZeroStreak;
510 }
511
512 if ((k % nBlockElements) == 0) {
513 if (nZeroStreak < nBlockElements) {
514 ++nNZBlockValues;
515 }
516 nZeroStreak = 0;
517 }
518 }
519
520 // Evaluate local sparisty
521 std::size_t nBlocks = nRows * nCols;
522 std::size_t nElements = nRowElements * nColElements;
523
524 double blockSparsity = static_cast<double>(nBlocks - nNZBlocks) / nBlocks;
525 double elementSparsity = static_cast<double>(nElements - nNZElements) / nElements;
526
527 double blockValueSparsity = static_cast<double>(nBlocks - nNZBlockValues) / nBlocks;
528 double elementValueSparsity = static_cast<double>(nElements - nNZElementValues) / nElements;
529
530 // Evaluate local memory usage
531 double valueStorageMemory = static_cast<double>(nNZElements * sizeof(decltype(m_values)::value_type));
532
533 // Display local information
534 stream << padding << "Local information: " << std::endl;
535 stream << padding << " Maximum number of non-zero blocks per row ............. " << maxRowNZBlocks << std::endl;
536 stream << padding << " Maximum number of non-zero elements per row ........... " << maxRowNZElements << std::endl;
537 stream << padding << " Number of block rows .................................. " << nRows << std::endl;
538 stream << padding << " Number of block columns ............................... " << nCols << std::endl;
539 stream << padding << " Number of non-zero blocks (pattern) ................... " << nNZBlocks << std::endl;
540 stream << padding << " Number of non-zero elements (pattern) ................. " << nNZElements << std::endl;
541 stream << padding << " Number of non-zero blocks (non-neglibile values) ...... " << nNZBlockValues << std::endl;
542 stream << padding << " Number of non-zero elements (non-neglibile values) .... " << nNZElementValues << std::endl;
543 stream << padding << " Sparsity of the blocks (pattern) ...................... " << blockSparsity << std::endl;
544 stream << padding << " Sparsity of the elements (pattern) .................... " << elementSparsity << std::endl;
545 stream << padding << " Sparsity of the blocks (non-neglibile values) ......... " << blockValueSparsity << std::endl;
546 stream << padding << " Sparsity of the elements (non-neglibile values) ....... " << elementValueSparsity << std::endl;
547
548 stream << padding << " Memory used by the value storage ...................... ";
549 if (valueStorageMemory > 1024 * 1024 * 1024) {
550 stream << valueStorageMemory / (1024 * 1024 * 1024) << " GB";
551 } else if (valueStorageMemory > 1024 * 1024) {
552 stream << valueStorageMemory / (1024 * 1024) << " MB";
553 } else if (valueStorageMemory > 1024) {
554 stream << valueStorageMemory / (1024) << " KB";
555 }
556 stream << std::endl;
557
558#if BITPIT_ENABLE_MPI==1
559 // Display global information
560 if (isPartitioned()) {
561 // Get MPI data types
562 MPI_Datatype valueMPIDataType;
563 if (std::is_same<decltype(m_values)::value_type, double>::value) {
564 valueMPIDataType = MPI_DOUBLE;
565 } else if (std::is_same<decltype(m_values)::value_type, float>::value) {
566 valueMPIDataType = MPI_FLOAT;
567 } else {
568 throw std::runtime_error("Unable to identify the MPI data type of the matrix values.");
569 }
570
571 // Gather global information
572 long nGlobalRows = getRowGlobalCount();
573 long nGlobalCols = getColGlobalCount();
574
575 long nGlobalRowElements = getRowGlobalElementCount();
576 long nGlobalColElements = getColGlobalElementCount();
577
578 long nGlobalNZBlocks = getNZGlobalCount();
579 long nGlobalNZElements = getNZGlobalElementCount();
580
581 long maxGlobalRowNZBlocks;
582 long maxGlobalRowNZElements;
583 MPI_Allreduce(&maxRowNZBlocks, &maxGlobalRowNZBlocks, 1, MPI_LONG, MPI_MAX, getCommunicator());
584 MPI_Allreduce(&maxRowNZElements, &maxGlobalRowNZElements, 1, MPI_LONG, MPI_MAX, getCommunicator());
585
586 // Count global non-negligible blocks and elements
587 long nGlobalNZBlockValues;
588 long nGlobalNZElementValues;
589 MPI_Allreduce(&nNZBlockValues, &nGlobalNZBlockValues, 1, MPI_LONG, MPI_SUM, getCommunicator());
590 MPI_Allreduce(&nNZElementValues, &nGlobalNZElementValues, 1, MPI_LONG, MPI_SUM, getCommunicator());
591
592 // Evaluate global sparisty
593 std::size_t nGlobalBlocks = nGlobalRows * nGlobalCols;
594 std::size_t nGlobalElements = nGlobalRowElements * nGlobalColElements;
595
596 double globalBlockSparsity = static_cast<double>(nGlobalBlocks - nGlobalNZBlocks) / nGlobalBlocks;
597 double globalElementSparsity = static_cast<double>(nGlobalElements - nGlobalNZElements) / nGlobalElements;
598
599 double globalBlockValueSparsity = static_cast<double>(nGlobalBlocks - nGlobalNZBlockValues) / nGlobalBlocks;
600 double globalElementValueSparsity = static_cast<double>(nGlobalElements - nGlobalNZElementValues) / nGlobalElements;
601
602 // Evaluate global memory usage
603 double valueStorageGlobalMemory;
604 MPI_Allreduce(&valueStorageMemory, &valueStorageGlobalMemory, 1, valueMPIDataType, MPI_SUM, getCommunicator());
605
606 // Display information
607 stream << padding << "Global information: " << std::endl;
608 stream << padding << " Maximum number of non-zero blocks per row ............. " << maxGlobalRowNZBlocks << std::endl;
609 stream << padding << " Maximum number of non-zero elements per row ........... " << maxGlobalRowNZElements << std::endl;
610 stream << padding << " Number of block columns ............................... " << nGlobalCols << std::endl;
611 stream << padding << " Number of block rows .................................. " << nGlobalRows << std::endl;
612 stream << padding << " Number of non-zero blocks (pattern) ................... " << nGlobalNZBlocks << std::endl;
613 stream << padding << " Number of non-zero elements (pattern) ................. " << nGlobalNZElements << std::endl;
614 stream << padding << " Number of non-zero blocks (non-neglibile values) ...... " << nGlobalNZBlockValues << std::endl;
615 stream << padding << " Number of non-zero elements (non-neglibile values) .... " << nGlobalNZElementValues << std::endl;
616 stream << padding << " Sparsity of the blocks (pattern) ...................... " << globalBlockSparsity << std::endl;
617 stream << padding << " Sparsity of the elements (pattern) .................... " << globalElementSparsity << std::endl;
618 stream << padding << " Sparsity of the blocks (non-neglibile values) ......... " << globalBlockValueSparsity << std::endl;
619 stream << padding << " Sparsity of the elements (non-neglibile values) ....... " << globalElementValueSparsity << std::endl;
620
621 stream << padding << " Memory used by the value storage ...................... ";
622 if (valueStorageGlobalMemory > 1024 * 1024 * 1024) {
623 stream << valueStorageGlobalMemory / (1024 * 1024 * 1024) << " GB";
624 } else if (valueStorageGlobalMemory > 1024 * 1024) {
625 stream << valueStorageGlobalMemory / (1024 * 1024) << " MB";
626 } else if (valueStorageGlobalMemory > 1024) {
627 stream << valueStorageGlobalMemory / (1024) << " KB";
628 }
629 stream << std::endl;
630 }
631#endif
632
633 // Display information
634 stream << padding << "Display information: " << std::endl;
635 stream << padding << " Negligibility threshold ............................... " << negligiblity << std::endl;
636}
637
642{
643 long nNZ = getNZCount();
644
645 m_pattern.reserve(nNZ);
646}
647
654{
655 long nRows = getRowCount();
656
657 m_pattern.reserve(nRows, nNZ);
658}
659
666{
667 m_pattern.shrinkToFit();
668}
669
676{
677 m_pattern.clear(release);
678}
679
684{
685 long nNZ = getNZCount();
686
688}
689
696{
697 long nNZElements = getNZElementCount(nNZ);
698
699 m_values.reserve(nNZElements);
700}
701
708{
709 m_values.shrink_to_fit();
710}
711
718{
719 m_values.clear();
720
721 if (release) {
723 }
724}
725
732#if BITPIT_ENABLE_MPI==1
736#endif
738{
739 // Early return if the matrix is already assembled.
740 if (isAssembled()) {
741 return;
742 }
743
744 // Assembly can be called only after adding all the rows
745 if (countMissingRows() != 0) {
746 throw std::runtime_error("Assembly can be called only after adding all the rows.");
747 }
748
749#if BITPIT_ENABLE_MPI==1
750 // Update global information of the non-zero elements
751 if (m_partitioned) {
752 MPI_Allreduce(&m_maxRowNZ, &m_global_maxRowNZ, 1, MPI_LONG, MPI_MAX, m_communicator);
753 MPI_Allreduce(&m_nNZ, &m_global_nNZ, 1, MPI_LONG, MPI_SUM, m_communicator);
754 } else {
755 m_global_maxRowNZ = m_maxRowNZ;
756 m_global_nNZ = m_nNZ;
757 }
758#endif
759
760 // The function is now assembled
761 m_assembled = true;
762}
763
770{
771 return m_assembled;
772}
773
780{
781 long nRows = getRowCount();
782 long nAddedRows = countAddedRows();
783
784 return (nRows - nAddedRows);
785}
786
793{
794 return (m_lastRow + 1);
795}
796
803{
804 return m_blockSize;
805}
806
813{
814 return m_nRows;
815}
816
823{
824 return m_nCols;
825}
826
833{
834 long nElements = getBlockSize() * getRowCount();
835
836 return nElements;
837}
838
845{
846 long nElements = getBlockSize() * getColCount();
847
848 return nElements;
849}
850
857{
858 return m_nNZ;
859}
860
866long SparseMatrix::getRowNZCount(long row) const
867{
868 return m_pattern.getItemCount(row);
869}
870
877{
878 return m_maxRowNZ;
879}
880
889{
890 long nNZ = getNZCount();
891
892 return getNZElementCount(nNZ);
893}
894
903long SparseMatrix::getNZElementCount(long nNZ) const
904{
905 int blockSize = getBlockSize();
906 long nElements = blockSize * blockSize * nNZ;
907
908 return nElements;
909}
910
919{
920 long nElements = getBlockSize() * getRowNZCount(row);
921
922 return nElements;
923}
924
933{
934 long nElements = getBlockSize() * getMaxRowNZCount();
935
936 return nElements;
937}
938
939#if BITPIT_ENABLE_MPI==1
946{
947 return m_partitioned;
948}
949
955const MPI_Comm & SparseMatrix::getCommunicator() const
956{
957 return m_communicator;
958}
959
966void SparseMatrix::setCommunicator(MPI_Comm communicator)
967{
968 if ((communicator != MPI_COMM_NULL) && (communicator != MPI_COMM_SELF)) {
969 MPI_Comm_dup(communicator, &m_communicator);
970 } else {
971 m_communicator = MPI_COMM_SELF;
972 }
973}
974
978void SparseMatrix::freeCommunicator()
979{
980 if (m_communicator != MPI_COMM_SELF) {
981 int finalizedCalled;
982 MPI_Finalized(&finalizedCalled);
983 if (!finalizedCalled) {
984 MPI_Comm_free(&m_communicator);
985 }
986 }
987}
988
995{
996 return m_global_nRows;
997}
998
1005{
1006 return m_global_rowOffset;
1007}
1008
1015{
1016 long nElements = getBlockSize() * getRowGlobalCount();
1017
1018 return nElements;
1019}
1020
1027{
1028 long offset = getBlockSize() * getRowGlobalOffset();
1029
1030 return offset;
1031}
1032
1039{
1040 return m_global_nCols;
1041}
1042
1049{
1050 return m_global_colOffset;
1051}
1052
1059{
1060 long nElements = getBlockSize() * getColGlobalCount();
1061
1062 return nElements;
1063}
1064
1071{
1072 long offset = getBlockSize() * getColGlobalOffset();
1073
1074 return offset;
1075}
1076
1083{
1084 return m_global_nNZ;
1085}
1086
1093{
1094 return m_global_maxRowNZ;
1095}
1096
1103{
1104 long nGlobalNZ = getNZGlobalCount();
1105
1106 return getNZElementCount(nGlobalNZ);
1107}
1108
1115{
1116 long nElement = getBlockSize() * getMaxRowNZGlobalCount();
1117
1118 return nElement;
1119}
1120
1127{
1128 const std::size_t *rowExtents = m_pattern.indices();
1129
1130 std::vector<long> localGlobalRows;
1131 localGlobalRows.reserve(m_nRows);
1132 for (long i = 0; i < m_nRows; ++i) {
1133 std::size_t nRowNZ = rowExtents[i + 1] - rowExtents[i];
1134 if (nRowNZ > 0) {
1135 localGlobalRows.push_back(m_global_rowOffset + i);
1136 }
1137 }
1138
1139 return localGlobalRows;
1140}
1141
1148{
1149 return std::vector<long>();
1150}
1151
1158{
1159 long firstGlobalCol = m_global_colOffset;
1160 long lastGlobalCol = m_global_colOffset + m_nCols - 1;
1161
1162 const long *globalCols = m_pattern.data();
1163
1164 std::vector<long> localGlobalCols;
1165 localGlobalCols.reserve(m_nNZ);
1166 for (long k = 0; k < m_nNZ; ++k) {
1167 long globalCol = globalCols[k];
1168 if (globalCol < firstGlobalCol || globalCol >= lastGlobalCol) {
1169 utils::addToOrderedVector<long>(globalCol, localGlobalCols);
1170 }
1171 }
1172
1173 return localGlobalCols;
1174}
1175
1182{
1183 long firstGlobalCol = m_global_colOffset;
1184 long lastGlobalCol = m_global_colOffset + m_nCols - 1;
1185
1186 const long *globalCols = m_pattern.data();
1187
1188 std::vector<long> ghostGlobalCols;
1189 if (m_nNZ > 0) {
1190 ghostGlobalCols.reserve(m_nNZ);
1191 for (long k = 0; k < m_nNZ; ++k) {
1192 long globalCol = globalCols[k];
1193 if (globalCol >= firstGlobalCol || globalCol < lastGlobalCol) {
1194 utils::addToOrderedVector<long>(globalCol, ghostGlobalCols);
1195 }
1196 }
1197 }
1198
1199 return ghostGlobalCols;
1200}
1201#endif
1202
1214void SparseMatrix::addRow(const std::vector<long> &rowPattern, const std::vector<double> &rowValues)
1215{
1216 addRow(rowPattern.size(), rowPattern.data(), rowValues.data());
1217}
1218
1231void SparseMatrix::addRow(long nRowNZ, const long *rowPattern, const double *rowValues)
1232{
1233 if (countMissingRows() == 0) {
1234 throw std::runtime_error("Unable to add another row: all rows have already been defined.");
1235 }
1236
1237 // Add the row pattern
1238 m_pattern.pushBack(nRowNZ, rowPattern);
1239
1240 // Add row values
1241 const int blockSize = getBlockSize();
1242 const int nBlockElements = blockSize * blockSize;
1243
1244 m_values.insert(m_values.end(), rowValues, rowValues + nBlockElements * nRowNZ);
1245
1246 // Update the non-zero element counters
1247 m_maxRowNZ = std::max(nRowNZ, m_maxRowNZ);
1248 m_nNZ += nRowNZ;
1249
1250 // Update the index of the last row
1251 m_lastRow++;
1252}
1253
1261{
1262 ConstProxyVector<long> pattern;
1263 getRowPattern(row, &pattern);
1264
1265 return pattern;
1266}
1267
1275{
1276 const std::size_t rowPatternSize = getRowNZCount(row);
1277 const long *rowPattern = getRowPatternData(row);
1278 pattern->set(rowPattern, rowPatternSize);
1279}
1280
1288{
1290 getRowValues(row, &values);
1291
1292 return values;
1293}
1294
1302{
1303 const int blockSize = getBlockSize();
1304 const int nBlockElements = blockSize * blockSize;
1305
1306 const std::size_t nRowValues = nBlockElements * getRowNZCount(row);
1307 const double *rowValues = getRowValuesData(row);
1308
1309 values->set(rowValues, nRowValues);
1310}
1311
1319{
1320 return const_cast<long *>(const_cast<const SparseMatrix *>(this)->getRowPatternData(row));
1321}
1322
1329const long * SparseMatrix::getRowPatternData(long row) const
1330{
1331 const std::size_t *rowExtent = m_pattern.indices(row);
1332 const std::size_t rowPatternBegin = rowExtent[0];
1333
1334 const long *rowPattern = m_pattern.data() + rowPatternBegin;
1335
1336 return rowPattern;
1337}
1338
1351{
1352 return const_cast<double *>(const_cast<const SparseMatrix *>(this)->getRowValuesData(row));
1353}
1354
1366const double * SparseMatrix::getRowValuesData(long row) const
1367{
1368 const int blockSize = getBlockSize();
1369 const int nBlockElements = blockSize * blockSize;
1370
1371 const std::size_t *rowExtent = m_pattern.indices(row);
1372 const std::size_t valuesBegin = nBlockElements * rowExtent[0];
1373
1374 const double *rowValues = m_values.data() + valuesBegin;
1375
1376 return rowValues;
1377}
1378
1387std::unique_ptr<SparseMatrix> SparseMatrix::computeTranspose() const
1388{
1389 // Early return if the matrix is not assembled.
1390 if (!isAssembled()) {
1391 return nullptr;
1392 }
1393
1394 // Create the transpose matrix
1395 std::unique_ptr<SparseMatrix> transpose;
1396#if BITPIT_ENABLE_MPI==1
1397 transpose = std::unique_ptr<SparseMatrix>(new SparseMatrix(m_communicator, m_partitioned, m_nCols, m_nRows, m_nNZ));
1398#else
1399 transpose = std::unique_ptr<SparseMatrix>(new SparseMatrix(m_nCols, m_nRows, m_nNZ));
1400#endif
1401
1402 // Create an empty transpose pattern
1403 std::vector<std::size_t> transposeRowSizes(transpose->m_nRows, 0);
1404 for (int i = 0; i < m_nNZ; ++i) {
1405 long column = *(m_pattern.data() + i);
1406 ++transposeRowSizes[column];
1407 }
1408
1409 transpose->m_pattern.initialize(transpose->m_nRows, transposeRowSizes.data(), static_cast<long>(0));
1410
1411 // Create the empty storage for the values
1412 transpose->m_values.resize(m_blockSize * m_nNZ);
1413
1414 // Set non-zero information
1415 transpose->m_nNZ = m_nNZ;
1416
1417 transpose->m_maxRowNZ = 0;
1418 for (int i = 0; i < transpose->m_nRows; ++i) {
1419 transpose->m_maxRowNZ = std::max(static_cast<long>(transposeRowSizes[i]), transpose->m_maxRowNZ);
1420 }
1421
1422 // Fill patter and values of the transpose matrix
1423 const std::size_t *rowExtents = m_pattern.indices();
1424 std::fill(transposeRowSizes.begin(), transposeRowSizes.end(), 0);
1425 for (long row = 0; row < m_nRows; ++row) {
1426 const std::size_t rowPatternSize = rowExtents[row + 1] - rowExtents[row];
1427 const long *rowPattern = m_pattern.data() + rowExtents[row];
1428 const double *rowValues = m_values.data() + rowExtents[row];
1429 for (std::size_t k = 0; k < rowPatternSize; ++k) {
1430 long column = rowPattern[k];
1431 const std::size_t transposeRowLastIndex = *(transpose->m_pattern.indices(column)) + transposeRowSizes[column];
1432
1433 long *transposeRowLastPattern = transpose->m_pattern.data() + transposeRowLastIndex;
1434 *transposeRowLastPattern = row;
1435
1436 double *transposeRowLastValue = transpose->m_values.data() + transposeRowLastIndex;
1437 *transposeRowLastValue = rowValues[k];
1438
1439 ++transposeRowSizes[column];
1440 }
1441 }
1442 transpose->m_lastRow = transpose->m_nRows - 1;
1443
1444 // Assembly the transpose matrix
1445 transpose->assembly();
1446
1447 return transpose;
1448}
1449
1450}
const std::size_t * indices() const noexcept
void clear(bool release=true)
void reserve(std::size_t nVectors, std::size_t nItems=0)
std::size_t getItemCount() const
Metafunction for generating a list of elements that can be either stored in an external vectror or,...
void set(__PXV_POINTER__ data, std::size_t size)
long countMissingRows() const
const MPI_Comm & getCommunicator() const
long getColElementCount() const
void initialize(long nRows, long nCols, long nNZ)
long getColGlobalCount() const
ConstProxyVector< double > getRowValues(long row) const
long getColGlobalOffset() const
long getNZGlobalElementCount() const
long getNZElementCount() const
void clear(bool release=false)
long getMaxRowNZCount() const
long getRowNZElementCount(long row) const
virtual void display(std::ostream &stream, double negligiblity, int indent=0) const
long getRowGlobalCount() const
std::unique_ptr< SparseMatrix > computeTranspose() const
long getRowElementCount() const
std::vector< long > extractLocalGlobalRows() const
long getRowGlobalElementCount() const
long getMaxRowNZGlobalElementCount() const
std::vector< long > extractGhostGlobalRows() const
ConstProxyVector< long > getRowPattern(long row) const
long getMaxRowNZElementCount() const
long getNZGlobalCount() const
long getRowGlobalElementOffset() const
void addRow(const std::vector< long > &rowPattern, const std::vector< double > &rowValues)
long * getRowPatternData(long row)
long getColGlobalElementCount() const
long getColGlobalElementOffset() const
std::vector< long > extractLocalGlobalCols() const
long getRowNZCount(long row) const
void clearValueStorage(bool release)
std::vector< long > extractGhostGlobalCols() const
void clearPatternStorage(bool release)
double * getRowValuesData(long row)
long getRowGlobalOffset() const
long getMaxRowNZGlobalCount() const
long countAddedRows() const
--- layout: doxygen_footer ---