// Number of values transmitted per grid point
const int nvalues = 1 // level
+ dim // grid point index
+ dim // coordinates
+ groupdata0.numvars; // grid function values
// Data transmitted from this process
vector<CCTK_REAL> data;
data.reserve(10000);
for (const auto &leveldata : ghext->leveldata) {
const auto &groupdata = *leveldata.groupdata.at(gi);
const int tl = 0;
const auto &geom = ghext->amrcore->Geom(leveldata.level);
vect<CCTK_REAL, dim> x0, dx;
for (int d = 0; d < dim; ++d) {
dx[d] = geom.CellSize(d);
x0[d] = geom.ProbLo(d) + 0.5 * groupdata.indextype[d] * dx[d];
}
vect<int, dim> icoord;
for (int d = 0; d < dim; ++d) {
if (outdirs[d])
icoord[d] = INT_MIN;
else
icoord[d] = lrint((outcoords[d] - x0[d]) / dx[d]);
}
const auto &mfab = *groupdata.mfab.at(tl);
for (amrex::MFIter mfi(mfab); mfi.isValid(); ++mfi) {
const amrex::Array4<const CCTK_REAL> &vars = mfab.array(mfi);
const vect<int, dim> vmin{vars.begin.x, vars.begin.y, vars.begin.z};
const vect<int, dim> vmax{vars.end.x, vars.end.y, vars.end.z};
bool output_something = true;
vect<int, dim> imin, imax;
for (int d = 0; d < dim; ++d) {
if (outdirs[d]) {
// output everything
imin[d] = vmin[d];
imax[d] = vmax[d];
} else if (icoord[d] >= vmin[d] && icoord[d] < vmax[d]) {
// output one point
imin[d] = icoord[d];
imax[d] = icoord[d] + 1;
} else {
// output nothing
output_something = false;
}
}
if (output_something) {
for (int k = imin[2]; k < imax[2]; ++k) {
for (int j = imin[1]; j < imax[1]; ++j) {
for (int i = imin[0]; i < imax[0]; ++i) {
const array<int, dim> I{i, j, k};
const auto old_size = data.size();
data.push_back(leveldata.level);
for (int d = 0; d < dim; ++d)
data.push_back(I[d]);
for (int d = 0; d < dim; ++d)
data.push_back(x0[d] + I[d] * dx[d]);
for (int vi = 0; vi < groupdata.numvars; ++vi)
data.push_back(vars(i, j, k, vi));
assert(data.size() == old_size + nvalues);
}
}
}
} // if output_something
} // for mfi
} // for leveldata
assert(data.size() % nvalues == 0);
const MPI_Comm comm = amrex::ParallelDescriptor::Communicator();
const int myproc = amrex::ParallelDescriptor::MyProc();
const int nprocs = amrex::ParallelDescriptor::NProcs();
const int ioproc = 0;
vector<string> varnames;
for (int vi = 0; vi < groupdata0.numvars; ++vi)
varnames.push_back(CCTK_VarName(groupdata0.firstvarindex + vi));
const string sep = "\t";
ofstream file;
if (myproc == ioproc) {
file.open(filename);
// get more precision for floats, could also use
// https://stackoverflow.com/a/30968371
file << setprecision(numeric_limits<CCTK_REAL>::digits10 + 1) << scientific;
// Output header
int col = 0;
file << "# " << ++col << ":iteration";
file << sep << ++col << ":time";
file << sep << ++col << ":level";
for (int d = 0; d < dim; ++d)
file << sep << ++col << ":"
<< "ijk"[d];
for (int d = 0; d < dim; ++d)
file << sep << ++col << ":"
<< "xyz"[d];
for (const auto &varname : varnames)
file << sep << col++ << ":" << varname;
file << "\n";
}
// Loop over all processes
for (int proc = 0; proc < nprocs; ++proc) {
if (myproc == ioproc || myproc == proc) {
vector<CCTK_REAL> out_data;
if (proc == ioproc) {
// Optimize self-communication
swap(out_data, data);
} else {
if (myproc == ioproc) {
// Receive data
int npoints_out;
MPI_Recv(&npoints_out, 1, MPI_INT, proc, 0, comm, MPI_STATUS_IGNORE);
out_data.resize(npoints_out);
static_assert(is_same_v<CCTK_REAL, double>);
MPI_Recv(out_data.data(), npoints_out, MPI_DOUBLE, proc, 0, comm,
MPI_STATUS_IGNORE);
} else {
// Send data
assert(data.size() <= INT_MAX);
const int npoints = data.size();
MPI_Send(&npoints, 1, MPI_INT, ioproc, 0, comm);
static_assert(is_same_v<CCTK_REAL, double>);
MPI_Send(data.data(), npoints, MPI_DOUBLE, ioproc, 0, comm);
}
}
if (myproc == ioproc) {
// Output data
assert(out_data.size() % nvalues == 0);
size_t pos = 0;
while (pos < out_data.size()) {
file << cctkGH->cctk_iteration << sep << cctkGH->cctk_time;
for (int v = 0; v < 4; ++v)
file << sep << int(out_data.at(pos++));
for (int v = 4; v < nvalues; ++v)
file << sep << out_data.at(pos++);
file << "\n";
}
}