Skip to content

Commit

Permalink
Make Serial Mesh files work without APF
Browse files Browse the repository at this point in the history
  • Loading branch information
davschneller committed Oct 28, 2023
1 parent 151c921 commit cef998a
Show file tree
Hide file tree
Showing 7 changed files with 196 additions and 92 deletions.
12 changes: 12 additions & 0 deletions src/aux/Distributor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#ifndef PUMGEN_AUX_DISTRIBUTOR_H_
#define PUMGEN_AUX_DISTRIBUTOR_H_

#include <cstdlib>

// no namespace for now

constexpr std::size_t getChunksize(std::size_t total, int rank, int size) {
return (total / size) + (total % size > rank);
}

#endif // PUMGEN_AUX_DISTRIBUTOR_H_
86 changes: 19 additions & 67 deletions src/input/SerialMeshFile.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,20 @@
#endif // PARALLEL

#include "ApfConvertWrapper.h"
#include "aux/Distributor.h"
#include <PCU.h>
#include <apfMDS.h>
#include <apfMesh2.h>
#include <gmi_null.h>
#include <vector>

#include "MeshInput.h"
#include "MeshData.h"
#include "utils/logger.h"

/**
* Read a mesh from a serial file
*/
template <typename T> class SerialMeshFile : public MeshInput {
template <typename T> class SerialMeshFile : public FullStorageMeshData {
private:
#ifdef PARALLEL
MPI_Comm m_comm;
Expand Down Expand Up @@ -73,77 +75,27 @@ template <typename T> class SerialMeshFile : public MeshInput {

const std::size_t nVertices = m_meshReader.nVertices();
const std::size_t nElements = m_meshReader.nElements();
std::size_t nLocalVertices = (nVertices + m_nProcs - 1) / m_nProcs;
std::size_t nLocalElements = (nElements + m_nProcs - 1) / m_nProcs;
if (m_rank == m_nProcs - 1) {
nLocalVertices = nVertices - (m_nProcs - 1) * nLocalVertices;
nLocalElements = nElements - (m_nProcs - 1) * nLocalElements;
}
const std::size_t nLocalVertices = getChunksize(nVertices, m_rank, m_nProcs);
const std::size_t nLocalElements = getChunksize(nElements, m_rank, m_nProcs);

gmi_register_null();
gmi_model* model = gmi_load(".null");
m_mesh = apf::makeEmptyMdsMesh(model, 3, false);

// Create elements
apf::GlobalToVert vertMap;
{
std::vector<ElementID> elements;
{
std::vector<std::size_t> elementsInt(nLocalElements * 4);
m_meshReader.readElements(elementsInt.data());
elements = std::vector<ElementID>(elementsInt.begin(), elementsInt.end());
}
logInfo(m_rank) << "Create APF connectivity";
apf::construct(m_mesh, elements.data(), nLocalElements, apf::Mesh::TET, vertMap);
}
setup(nLocalElements, nLocalVertices);

apf::alignMdsRemotes(m_mesh);
apf::deriveMdsModel(m_mesh);
logInfo(m_rank) << "Read vertex coordinates";
m_meshReader.readVertices(geometryData.data());

// Set vertices
{
std::vector<double> vertices(nLocalVertices * 3);
m_meshReader.readVertices(vertices.data());
logInfo(m_rank) << "Set coordinates in APF";
apf::setCoords(m_mesh, vertices.data(), nLocalVertices, vertMap);
}
logInfo(m_rank) << "Read cell vertices";
m_meshReader.readElements(connectivityData.data());

// Set boundaries
apf::MeshTag* boundaryTag = m_mesh->createIntTag("boundary condition", 1);
{
std::vector<int> boundaries(nLocalElements * 4);
m_meshReader.readBoundaries(boundaries.data());
apf::MeshIterator* it = m_mesh->begin(3);
std::size_t i = 0;
while (apf::MeshEntity* element = m_mesh->iterate(it)) {
apf::Adjacent adjacent;
m_mesh->getAdjacent(element, 2, adjacent);

for (int j = 0; j < 4; j++) {
if (!boundaries[i * 4 + j]) {
continue;
}

m_mesh->setIntTag(adjacent[j], boundaryTag, &boundaries[i * 4 + j]);
}

i++;
}
m_mesh->end(it);
}
logInfo(m_rank) << "Read cell groups";
m_meshReader.readGroups(groupData.data());

// Set groups
apf::MeshTag* groupTag = m_mesh->createIntTag("group", 1);
{
std::vector<int> groups(nLocalElements);
m_meshReader.readGroups(groups.data());
apf::MeshIterator* it = m_mesh->begin(3);
std::size_t i = 0;
while (apf::MeshEntity* element = m_mesh->iterate(it)) {
m_mesh->setIntTag(element, groupTag, &groups[i]);
i++;
logInfo(m_rank) << "Read boundary conditions";
std::vector<int> preBoundaryData(nLocalElements * 4);
m_meshReader.readBoundaries(preBoundaryData.data());
for (std::size_t i = 0; i < nLocalElements; ++i) {
for (int j = 0; j < 4; ++j) {
setBoundary(i, j, preBoundaryData[4 * i + j]);
}
m_mesh->end(it);
}
}
};
Expand Down
151 changes: 151 additions & 0 deletions src/input/SerialMeshFileApf.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
/**
* @file
* This file is part of PUMGen
*
* For conditions of distribution and use, please see the copyright
* notice in the file 'COPYING' at the root directory of this package
* and the copyright notice at https://github.com/SeisSol/PUMGen
*
* @copyright 2017 Technical University of Munich
* @author Sebastian Rettenberger <sebastian.rettenberger@tum.de>
*/

#ifndef SERIAL_MESH_FILE_H
#define SERIAL_MESH_FILE_H

#ifdef PARALLEL
#include <mpi.h>
#endif // PARALLEL

#include "ApfConvertWrapper.h"
#include "aux/Distributor.h"
#include <PCU.h>
#include <apfMDS.h>
#include <apfMesh2.h>
#include <gmi_null.h>
#include <vector>

#include "MeshInput.h"

/**
* Read a mesh from a serial file
*/
template <typename T> class SerialMeshFile : public MeshInput {
private:
#ifdef PARALLEL
MPI_Comm m_comm;
#endif // PARALLEL

int m_rank;
int m_nProcs;

T m_meshReader;

public:
#ifdef PARALLEL
SerialMeshFile(const char* meshFile, MPI_Comm comm = MPI_COMM_WORLD)
: m_comm(comm), m_meshReader(comm) {
init();
open(meshFile);
}
#else // PARALLEL
SerialMeshFile(const char* meshFile) {
init();
open(meshFile);
}
#endif // PARALLEL

private:
/**
* Sets some parameters (called from the constructor)
*/
void init() {
#ifdef PARALLEL
MPI_Comm_rank(m_comm, &m_rank);
MPI_Comm_size(m_comm, &m_nProcs);
#else // PARALLLEL
m_rank = 0;
m_nProcs = 1;
#endif // PARALLEL
}

void open(const char* meshFile) {
m_meshReader.open(meshFile);

const std::size_t nVertices = m_meshReader.nVertices();
const std::size_t nElements = m_meshReader.nElements();
const std::size_t nLocalVertices = getChunksize(nVertices, m_rank, m_nProcs);
const std::size_t nLocalElements = getChunksize(nElements, m_rank, m_nProcs);

gmi_register_null();
gmi_model* model = gmi_load(".null");
m_mesh = apf::makeEmptyMdsMesh(model, 3, false);

// Create elements
logInfo(m_rank) << "Handle cell connectivity";
apf::GlobalToVert vertMap;
{
std::vector<ElementID> elements;
{
std::vector<std::size_t> elementsInt(nLocalElements * 4);
m_meshReader.readElements(elementsInt.data());
elements = std::vector<ElementID>(elementsInt.begin(), elementsInt.end());
}
logInfo(m_rank) << "Create APF connectivity";
apf::construct(m_mesh, elements.data(), nLocalElements, apf::Mesh::TET, vertMap);
}

apf::alignMdsRemotes(m_mesh);
apf::deriveMdsModel(m_mesh);

// Set vertices
logInfo(m_rank) << "Handle vertex coordinates";
{
std::vector<double> vertices(nLocalVertices * 3);
m_meshReader.readVertices(vertices.data());
apf::setCoords(m_mesh, vertices.data(), nLocalVertices, vertMap);
}

// Set boundaries
logInfo(m_rank) << "Handle boundary conditions";
apf::MeshTag* boundaryTag = m_mesh->createIntTag("boundary condition", 1);
{
std::vector<int> boundaries(nLocalElements * 4);
m_meshReader.readBoundaries(boundaries.data());
apf::MeshIterator* it = m_mesh->begin(3);
std::size_t i = 0;
while (apf::MeshEntity* element = m_mesh->iterate(it)) {
apf::Adjacent adjacent;
m_mesh->getAdjacent(element, 2, adjacent);

for (int j = 0; j < 4; j++) {
if (!boundaries[i * 4 + j]) {
continue;
}

m_mesh->setIntTag(adjacent[j], boundaryTag, &boundaries[i * 4 + j]);
}

i++;
}
m_mesh->end(it);
}

// Set groups
logInfo(m_rank) << "Handle cell groups";
apf::MeshTag* groupTag = m_mesh->createIntTag("group", 1);
{
std::vector<int> groups(nLocalElements);
m_meshReader.readGroups(groups.data());
apf::MeshIterator* it = m_mesh->begin(3);
std::size_t i = 0;
while (apf::MeshEntity* element = m_mesh->iterate(it)) {
m_mesh->setIntTag(element, groupTag, &groups[i]);
i++;
}
m_mesh->end(it);
}
}
};

#endif // SERIAL_MESH_FILE_H
2 changes: 1 addition & 1 deletion src/meshreader/GMSHBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ template <std::size_t D> class GMSHBuilder : public tndm::GMSHMeshBuilder {
std::vector<int> bcs;

void setNumVertices(std::size_t numVertices) { vertices.resize(numVertices); }
void setVertex(long id, std::array<double, 3> const& x) {
void setVertex(long id, std::array<double, D> const& x) {
for (std::size_t i = 0; i < D; ++i) {
vertices[id][i] = x[i];
}
Expand Down
8 changes: 4 additions & 4 deletions src/meshreader/ParallelGMSHReader.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
#include <cstddef>
#include <vector>

#include "aux/Distributor.h"

namespace puml {

template <typename P> class ParallelGMSHReader {
Expand Down Expand Up @@ -40,7 +42,7 @@ template <typename P> class ParallelGMSHReader {
}

MPI_Bcast(&nVertices_, 1, tndm::mpi_type_t<decltype(nVertices_)>(), 0, comm_);
MPI_Bcast(&nElements_, 1, tndm::mpi_type_t<decltype(nVertices_)>(), 0, comm_);
MPI_Bcast(&nElements_, 1, tndm::mpi_type_t<decltype(nElements_)>(), 0, comm_);
}

[[nodiscard]] std::size_t nVertices() const { return nVertices_; }
Expand Down Expand Up @@ -140,13 +142,11 @@ template <typename P> class ParallelGMSHReader {
MPI_Comm_rank(comm_, &rank);
MPI_Comm_size(comm_, &procs);

const std::size_t chunkSize = (numElements + procs - 1) / procs;
auto sendcounts = std::vector<int>(procs);
auto displs = std::vector<int>(procs + 1);
std::fill(sendcounts.begin(), sendcounts.end(), numPerElement * chunkSize);
sendcounts.back() = numPerElement * (numElements - (procs - 1u) * chunkSize);
displs[0] = 0;
for (int i = 0; i < procs; ++i) {
sendcounts[i] = numPerElement * getChunksize(numElements, i, procs);
displs[i + 1] = displs[i] + sendcounts[i];
}

Expand Down
21 changes: 5 additions & 16 deletions src/meshreader/ParallelGambitReader.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include "GambitReader.h"
#include "ParallelMeshReader.h"
#include "aux/Distributor.h"

#include "third_party/MPITraits.h"

Expand All @@ -37,7 +38,7 @@ class ParallelGambitReader : public ParallelMeshReader<GambitReader> {
* This is a collective operation.
*/
void readGroups(int* groups) {
std::size_t chunkSize = (nElements() + m_nProcs - 1) / m_nProcs;
std::size_t chunkSize = getChunksize(nElements(), m_rank, m_nProcs);

if (m_rank == 0) {
std::size_t maxChunkSize = chunkSize;
Expand All @@ -50,9 +51,7 @@ class ParallelGambitReader : public ParallelMeshReader<GambitReader> {
for (int i = 0; i < m_nProcs; i++) {
logInfo() << "Reading group information part" << (i + 1) << "of" << m_nProcs;

if (i == m_nProcs - 1) {
chunkSize = nElements() - (m_nProcs - 1) * chunkSize;
}
chunkSize = getChunksize(nElements(), i, m_nProcs);

m_serialReader.readGroups(i * maxChunkSize, chunkSize, map.data());

Expand Down Expand Up @@ -92,10 +91,6 @@ class ParallelGambitReader : public ParallelMeshReader<GambitReader> {

MPI_Waitall((m_nProcs - 1) * 2, requests.data(), MPI_STATUSES_IGNORE);
} else {
if (m_rank == m_nProcs - 1) {
chunkSize = nElements() - (m_nProcs - 1) * chunkSize;
}

// Allocate enough memory
std::vector<int> buf(chunkSize * 2);

Expand Down Expand Up @@ -126,7 +121,7 @@ class ParallelGambitReader : public ParallelMeshReader<GambitReader> {
* @todo Only tetrahedral meshes are supported
*/
void readBoundaries(int* boundaries) {
std::size_t chunkSize = (nElements() + m_nProcs - 1) / m_nProcs;
std::size_t chunkSize = getChunksize(nBoundaries(), m_rank, m_nProcs);

if (m_rank == 0) {
std::size_t maxChunkSize = chunkSize;
Expand All @@ -140,9 +135,7 @@ class ParallelGambitReader : public ParallelMeshReader<GambitReader> {
for (std::size_t i = 0; i < nChunks; i++) {
logInfo() << "Reading boundary conditions part" << (i + 1) << "of" << nChunks;

if (i == nChunks - 1) {
chunkSize = nBoundaries() - (nChunks - 1) * chunkSize;
}
chunkSize = getChunksize(nBoundaries(), i, m_nProcs);

m_serialReader.readBoundaries(i * maxChunkSize, chunkSize, faces.data());

Expand Down Expand Up @@ -188,10 +181,6 @@ class ParallelGambitReader : public ParallelMeshReader<GambitReader> {
}
MPI_Waitall(m_nProcs - 1, requests.data(), MPI_STATUSES_IGNORE);
} else {
if (m_rank == m_nProcs - 1) {
chunkSize = nElements() - (m_nProcs - 1) * chunkSize;
}

// Allocate enough memory
std::vector<int> buf(chunkSize * 2);

Expand Down
Loading

0 comments on commit cef998a

Please sign in to comment.