Parallel 2D adaptive mesh refinement (AMR) with data using PABLO.
Parallel 2D adaptive mesh refinement (AMR) with data using PABLOThis example is the parallel version of PABLO_example_00002.
Here one quadtree is instantiated in the same way on every process. Therefore, each process refines globally the initial quadrant 3 times. Till this moment, there's no parallel paradigm in action, actually the code is replicated on every process.
At this point a load-balance method call is performed and the grid is partitioned and distributed among the processes of the world communicator. From now on, each process owns one portion of the grid.
The test continues as in example 00002, but after every refinement/coarsening a load-balance call is introduced to keep the computational burden well distributed.
To run: ./PABLO_example_00005
To see the result visit: PABLO website
#if BITPIT_ENABLE_MPI==1
#include <mpi.h>
#endif
#include "bitpit_PABLO.hpp"
using namespace std;
using namespace bitpit;
void run()
{
int iter = 0;
uint32_t idx=0;
pablo5.
write(
"pablo00005_iter"+to_string(
static_cast<unsigned long long>(iter)));
for (iter=1; iter<3; iter++){
pablo5.
write(
"pablo00005_iter"+to_string(
static_cast<unsigned long long>(iter)));
}
#if BITPIT_ENABLE_MPI==1
#endif
double xc, yc;
xc = yc = 0.5;
double radius = 0.4;
for (iter=3; iter<9; iter++){
for (unsigned int i=0; i<nocts; i++){
vector<array<double,3> > nodes = pablo5.
getNodes(i);
for (int j=0; j<4; j++){
double x = nodes[j][0];
double y = nodes[j][1];
if ((
pow((x-xc),2.0)+
pow((y-yc),2.0) <=
pow(radius,2.0))){
}
}
}
#if BITPIT_ENABLE_MPI==1
#endif
pablo5.
write(
"pablo00005_iter"+to_string(
static_cast<unsigned long long>(iter)));
}
pablo5.
write(
"pablo00005_iter"+to_string(
static_cast<unsigned long long>(iter)));
xc = yc = 0.35;
radius = 0.15;
for (iter=10; iter<15; iter++){
for (unsigned int i=0; i<nocts; i++){
vector<array<double,3> > nodes = pablo5.
getNodes(i);
for (int j=0; j<4; j++){
double x = nodes[j][0];
double y = nodes[j][1];
if ((
pow((x-xc),2.0)+
pow((y-yc),2.0) <=
pow(radius,2.0))){
}
}
}
#if BITPIT_ENABLE_MPI==1
#endif
pablo5.
write(
"pablo00005_iter"+to_string(
static_cast<unsigned long long>(iter)));
}
pablo5.
write(
"pablo00005_iter"+to_string(
static_cast<unsigned long long>(iter)));
}
int main(int argc, char *argv[])
{
#if BITPIT_ENABLE_MPI==1
MPI_Init(&argc,&argv);
#else
#endif
int nProcs;
int rank;
#if BITPIT_ENABLE_MPI==1
MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#else
nProcs = 1;
rank = 0;
#endif
log::manager().initialize(log::MODE_SEPARATE, false, nProcs, rank);
log::cout() << log::fileVerbosity(log::LEVEL_INFO);
log::cout() << log::disableConsole();
try {
run();
} catch (const std::exception &exception) {
log::cout() << exception.what();
exit(1);
}
#if BITPIT_ENABLE_MPI==1
MPI_Finalize();
#endif
}
void setMarker(uint32_t idx, int8_t marker)
uint32_t getNumOctants() const
bool adaptGlobalCoarse(bool mapper_flag=false)
bool adapt(bool mapper_flag=false)
void loadBalance(const dvector *weight=NULL)
void computeConnectivity()
bool adaptGlobalRefine(bool mapper_flag=false)
void updateConnectivity()
void setBalance(uint32_t idx, bool balance)
std::array< T, d > pow(std::array< T, d > &x, double p)
#define BITPIT_UNUSED(variable)
---
layout: doxygen_footer
---