From cef998a6885df61f8776834281fa97167d5151c3 Mon Sep 17 00:00:00 2001 From: David Schneller Date: Sat, 28 Oct 2023 16:36:33 +0200 Subject: [PATCH] Make Serial Mesh files work without APF --- src/aux/Distributor.h | 12 ++ src/input/SerialMeshFile.h | 86 ++++----------- src/input/SerialMeshFileApf.h | 151 ++++++++++++++++++++++++++ src/meshreader/GMSHBuilder.h | 2 +- src/meshreader/ParallelGMSHReader.h | 8 +- src/meshreader/ParallelGambitReader.h | 21 +--- src/pumgen.cpp | 8 +- 7 files changed, 196 insertions(+), 92 deletions(-) create mode 100644 src/aux/Distributor.h create mode 100644 src/input/SerialMeshFileApf.h diff --git a/src/aux/Distributor.h b/src/aux/Distributor.h new file mode 100644 index 0000000..b5918a9 --- /dev/null +++ b/src/aux/Distributor.h @@ -0,0 +1,12 @@ +#ifndef PUMGEN_AUX_DISTRIBUTOR_H_ +#define PUMGEN_AUX_DISTRIBUTOR_H_ + +#include + +// no namespace for now + +constexpr std::size_t getChunksize(std::size_t total, int rank, int size) { + return (total / size) + (total % size > rank); +} + +#endif // PUMGEN_AUX_DISTRIBUTOR_H_ diff --git a/src/input/SerialMeshFile.h b/src/input/SerialMeshFile.h index 5377314..3332710 100644 --- a/src/input/SerialMeshFile.h +++ b/src/input/SerialMeshFile.h @@ -18,18 +18,20 @@ #endif // PARALLEL #include "ApfConvertWrapper.h" +#include "aux/Distributor.h" #include #include #include #include #include -#include "MeshInput.h" +#include "MeshData.h" +#include "utils/logger.h" /** * Read a mesh from a serial file */ -template class SerialMeshFile : public MeshInput { +template class SerialMeshFile : public FullStorageMeshData { private: #ifdef PARALLEL MPI_Comm m_comm; @@ -73,77 +75,27 @@ template class SerialMeshFile : public MeshInput { const std::size_t nVertices = m_meshReader.nVertices(); const std::size_t nElements = m_meshReader.nElements(); - std::size_t nLocalVertices = (nVertices + m_nProcs - 1) / m_nProcs; - std::size_t nLocalElements = (nElements + m_nProcs - 1) / m_nProcs; - if (m_rank == m_nProcs - 1) { - nLocalVertices = nVertices - (m_nProcs - 1) * nLocalVertices; - nLocalElements = nElements - (m_nProcs - 1) * nLocalElements; - } + const std::size_t nLocalVertices = getChunksize(nVertices, m_rank, m_nProcs); + const std::size_t nLocalElements = getChunksize(nElements, m_rank, m_nProcs); - gmi_register_null(); - gmi_model* model = gmi_load(".null"); - m_mesh = apf::makeEmptyMdsMesh(model, 3, false); - - // Create elements - apf::GlobalToVert vertMap; - { - std::vector elements; - { - std::vector elementsInt(nLocalElements * 4); - m_meshReader.readElements(elementsInt.data()); - elements = std::vector(elementsInt.begin(), elementsInt.end()); - } - logInfo(m_rank) << "Create APF connectivity"; - apf::construct(m_mesh, elements.data(), nLocalElements, apf::Mesh::TET, vertMap); - } + setup(nLocalElements, nLocalVertices); - apf::alignMdsRemotes(m_mesh); - apf::deriveMdsModel(m_mesh); + logInfo(m_rank) << "Read vertex coordinates"; + m_meshReader.readVertices(geometryData.data()); - // Set vertices - { - std::vector vertices(nLocalVertices * 3); - m_meshReader.readVertices(vertices.data()); - logInfo(m_rank) << "Set coordinates in APF"; - apf::setCoords(m_mesh, vertices.data(), nLocalVertices, vertMap); - } + logInfo(m_rank) << "Read cell vertices"; + m_meshReader.readElements(connectivityData.data()); - // Set boundaries - apf::MeshTag* boundaryTag = m_mesh->createIntTag("boundary condition", 1); - { - std::vector boundaries(nLocalElements * 4); - m_meshReader.readBoundaries(boundaries.data()); - apf::MeshIterator* it = m_mesh->begin(3); - std::size_t i = 0; - while (apf::MeshEntity* element = m_mesh->iterate(it)) { - apf::Adjacent adjacent; - m_mesh->getAdjacent(element, 2, adjacent); - - for (int j = 0; j < 4; j++) { - if (!boundaries[i * 4 + j]) { - continue; - } - - m_mesh->setIntTag(adjacent[j], boundaryTag, &boundaries[i * 4 + j]); - } - - i++; - } - m_mesh->end(it); - } + logInfo(m_rank) << "Read cell groups"; + m_meshReader.readGroups(groupData.data()); - // Set groups - apf::MeshTag* groupTag = m_mesh->createIntTag("group", 1); - { - std::vector groups(nLocalElements); - m_meshReader.readGroups(groups.data()); - apf::MeshIterator* it = m_mesh->begin(3); - std::size_t i = 0; - while (apf::MeshEntity* element = m_mesh->iterate(it)) { - m_mesh->setIntTag(element, groupTag, &groups[i]); - i++; + logInfo(m_rank) << "Read boundary conditions"; + std::vector preBoundaryData(nLocalElements * 4); + m_meshReader.readBoundaries(preBoundaryData.data()); + for (std::size_t i = 0; i < nLocalElements; ++i) { + for (int j = 0; j < 4; ++j) { + setBoundary(i, j, preBoundaryData[4 * i + j]); } - m_mesh->end(it); } } }; diff --git a/src/input/SerialMeshFileApf.h b/src/input/SerialMeshFileApf.h new file mode 100644 index 0000000..10ed374 --- /dev/null +++ b/src/input/SerialMeshFileApf.h @@ -0,0 +1,151 @@ +/** + * @file + * This file is part of PUMGen + * + * For conditions of distribution and use, please see the copyright + * notice in the file 'COPYING' at the root directory of this package + * and the copyright notice at https://github.com/SeisSol/PUMGen + * + * @copyright 2017 Technical University of Munich + * @author Sebastian Rettenberger + */ + +#ifndef SERIAL_MESH_FILE_H +#define SERIAL_MESH_FILE_H + +#ifdef PARALLEL +#include +#endif // PARALLEL + +#include "ApfConvertWrapper.h" +#include "aux/Distributor.h" +#include +#include +#include +#include +#include + +#include "MeshInput.h" + +/** + * Read a mesh from a serial file + */ +template class SerialMeshFile : public MeshInput { + private: +#ifdef PARALLEL + MPI_Comm m_comm; +#endif // PARALLEL + + int m_rank; + int m_nProcs; + + T m_meshReader; + + public: +#ifdef PARALLEL + SerialMeshFile(const char* meshFile, MPI_Comm comm = MPI_COMM_WORLD) + : m_comm(comm), m_meshReader(comm) { + init(); + open(meshFile); + } +#else // PARALLEL + SerialMeshFile(const char* meshFile) { + init(); + open(meshFile); + } +#endif // PARALLEL + + private: + /** + * Sets some parameters (called from the constructor) + */ + void init() { +#ifdef PARALLEL + MPI_Comm_rank(m_comm, &m_rank); + MPI_Comm_size(m_comm, &m_nProcs); +#else // PARALLLEL + m_rank = 0; + m_nProcs = 1; +#endif // PARALLEL + } + + void open(const char* meshFile) { + m_meshReader.open(meshFile); + + const std::size_t nVertices = m_meshReader.nVertices(); + const std::size_t nElements = m_meshReader.nElements(); + const std::size_t nLocalVertices = getChunksize(nVertices, m_rank, m_nProcs); + const std::size_t nLocalElements = getChunksize(nElements, m_rank, m_nProcs); + + gmi_register_null(); + gmi_model* model = gmi_load(".null"); + m_mesh = apf::makeEmptyMdsMesh(model, 3, false); + + // Create elements + logInfo(m_rank) << "Handle cell connectivity"; + apf::GlobalToVert vertMap; + { + std::vector elements; + { + std::vector elementsInt(nLocalElements * 4); + m_meshReader.readElements(elementsInt.data()); + elements = std::vector(elementsInt.begin(), elementsInt.end()); + } + logInfo(m_rank) << "Create APF connectivity"; + apf::construct(m_mesh, elements.data(), nLocalElements, apf::Mesh::TET, vertMap); + } + + apf::alignMdsRemotes(m_mesh); + apf::deriveMdsModel(m_mesh); + + // Set vertices + logInfo(m_rank) << "Handle vertex coordinates"; + { + std::vector vertices(nLocalVertices * 3); + m_meshReader.readVertices(vertices.data()); + apf::setCoords(m_mesh, vertices.data(), nLocalVertices, vertMap); + } + + // Set boundaries + logInfo(m_rank) << "Handle boundary conditions"; + apf::MeshTag* boundaryTag = m_mesh->createIntTag("boundary condition", 1); + { + std::vector boundaries(nLocalElements * 4); + m_meshReader.readBoundaries(boundaries.data()); + apf::MeshIterator* it = m_mesh->begin(3); + std::size_t i = 0; + while (apf::MeshEntity* element = m_mesh->iterate(it)) { + apf::Adjacent adjacent; + m_mesh->getAdjacent(element, 2, adjacent); + + for (int j = 0; j < 4; j++) { + if (!boundaries[i * 4 + j]) { + continue; + } + + m_mesh->setIntTag(adjacent[j], boundaryTag, &boundaries[i * 4 + j]); + } + + i++; + } + m_mesh->end(it); + } + + // Set groups + logInfo(m_rank) << "Handle cell groups"; + apf::MeshTag* groupTag = m_mesh->createIntTag("group", 1); + { + std::vector groups(nLocalElements); + m_meshReader.readGroups(groups.data()); + apf::MeshIterator* it = m_mesh->begin(3); + std::size_t i = 0; + while (apf::MeshEntity* element = m_mesh->iterate(it)) { + m_mesh->setIntTag(element, groupTag, &groups[i]); + i++; + } + m_mesh->end(it); + } + } +}; + +#endif // SERIAL_MESH_FILE_H diff --git a/src/meshreader/GMSHBuilder.h b/src/meshreader/GMSHBuilder.h index 1224920..4bc11be 100644 --- a/src/meshreader/GMSHBuilder.h +++ b/src/meshreader/GMSHBuilder.h @@ -36,7 +36,7 @@ template class GMSHBuilder : public tndm::GMSHMeshBuilder { std::vector bcs; void setNumVertices(std::size_t numVertices) { vertices.resize(numVertices); } - void setVertex(long id, std::array const& x) { + void setVertex(long id, std::array const& x) { for (std::size_t i = 0; i < D; ++i) { vertices[id][i] = x[i]; } diff --git a/src/meshreader/ParallelGMSHReader.h b/src/meshreader/ParallelGMSHReader.h index 66b2902..eed6677 100644 --- a/src/meshreader/ParallelGMSHReader.h +++ b/src/meshreader/ParallelGMSHReader.h @@ -11,6 +11,8 @@ #include #include +#include "aux/Distributor.h" + namespace puml { template class ParallelGMSHReader { @@ -40,7 +42,7 @@ template class ParallelGMSHReader { } MPI_Bcast(&nVertices_, 1, tndm::mpi_type_t(), 0, comm_); - MPI_Bcast(&nElements_, 1, tndm::mpi_type_t(), 0, comm_); + MPI_Bcast(&nElements_, 1, tndm::mpi_type_t(), 0, comm_); } [[nodiscard]] std::size_t nVertices() const { return nVertices_; } @@ -140,13 +142,11 @@ template class ParallelGMSHReader { MPI_Comm_rank(comm_, &rank); MPI_Comm_size(comm_, &procs); - const std::size_t chunkSize = (numElements + procs - 1) / procs; auto sendcounts = std::vector(procs); auto displs = std::vector(procs + 1); - std::fill(sendcounts.begin(), sendcounts.end(), numPerElement * chunkSize); - sendcounts.back() = numPerElement * (numElements - (procs - 1u) * chunkSize); displs[0] = 0; for (int i = 0; i < procs; ++i) { + sendcounts[i] = numPerElement * getChunksize(numElements, i, procs); displs[i + 1] = displs[i] + sendcounts[i]; } diff --git a/src/meshreader/ParallelGambitReader.h b/src/meshreader/ParallelGambitReader.h index 2bfc970..b5b3d8c 100644 --- a/src/meshreader/ParallelGambitReader.h +++ b/src/meshreader/ParallelGambitReader.h @@ -17,6 +17,7 @@ #include "GambitReader.h" #include "ParallelMeshReader.h" +#include "aux/Distributor.h" #include "third_party/MPITraits.h" @@ -37,7 +38,7 @@ class ParallelGambitReader : public ParallelMeshReader { * This is a collective operation. */ void readGroups(int* groups) { - std::size_t chunkSize = (nElements() + m_nProcs - 1) / m_nProcs; + std::size_t chunkSize = getChunksize(nElements(), m_rank, m_nProcs); if (m_rank == 0) { std::size_t maxChunkSize = chunkSize; @@ -50,9 +51,7 @@ class ParallelGambitReader : public ParallelMeshReader { for (int i = 0; i < m_nProcs; i++) { logInfo() << "Reading group information part" << (i + 1) << "of" << m_nProcs; - if (i == m_nProcs - 1) { - chunkSize = nElements() - (m_nProcs - 1) * chunkSize; - } + chunkSize = getChunksize(nElements(), i, m_nProcs); m_serialReader.readGroups(i * maxChunkSize, chunkSize, map.data()); @@ -92,10 +91,6 @@ class ParallelGambitReader : public ParallelMeshReader { MPI_Waitall((m_nProcs - 1) * 2, requests.data(), MPI_STATUSES_IGNORE); } else { - if (m_rank == m_nProcs - 1) { - chunkSize = nElements() - (m_nProcs - 1) * chunkSize; - } - // Allocate enough memory std::vector buf(chunkSize * 2); @@ -126,7 +121,7 @@ class ParallelGambitReader : public ParallelMeshReader { * @todo Only tetrahedral meshes are supported */ void readBoundaries(int* boundaries) { - std::size_t chunkSize = (nElements() + m_nProcs - 1) / m_nProcs; + std::size_t chunkSize = getChunksize(nBoundaries(), m_rank, m_nProcs); if (m_rank == 0) { std::size_t maxChunkSize = chunkSize; @@ -140,9 +135,7 @@ class ParallelGambitReader : public ParallelMeshReader { for (std::size_t i = 0; i < nChunks; i++) { logInfo() << "Reading boundary conditions part" << (i + 1) << "of" << nChunks; - if (i == nChunks - 1) { - chunkSize = nBoundaries() - (nChunks - 1) * chunkSize; - } + chunkSize = getChunksize(nBoundaries(), i, m_nProcs); m_serialReader.readBoundaries(i * maxChunkSize, chunkSize, faces.data()); @@ -188,10 +181,6 @@ class ParallelGambitReader : public ParallelMeshReader { } MPI_Waitall(m_nProcs - 1, requests.data(), MPI_STATUSES_IGNORE); } else { - if (m_rank == m_nProcs - 1) { - chunkSize = nElements() - (m_nProcs - 1) * chunkSize; - } - // Allocate enough memory std::vector buf(chunkSize * 2); diff --git a/src/pumgen.cpp b/src/pumgen.cpp index e202192..b3b7fc3 100644 --- a/src/pumgen.cpp +++ b/src/pumgen.cpp @@ -300,19 +300,19 @@ int main(int argc, char* argv[]) { switch (args.getArgument("source", 0)) { case 0: logInfo(rank) << "Using Gambit mesh"; - // meshInput = new SerialMeshFile(inputFile); + meshInput = new SerialMeshFile(inputFile); break; case 1: logInfo(rank) << "Using Fidap mesh"; - // meshInput = new SerialMeshFile(inputFile); + meshInput = new SerialMeshFile(inputFile); break; case 2: logInfo(rank) << "Using GMSH mesh format 2 (msh2) mesh"; - // meshInput = new SerialMeshFile>(inputFile); + meshInput = new SerialMeshFile>(inputFile); break; case 3: logInfo(rank) << "Using GMSH mesh format 4 (msh4) mesh"; - // meshInput = new SerialMeshFile>(inputFile); + meshInput = new SerialMeshFile>(inputFile); break; case 4: #ifdef USE_NETCDF