Loading src/TNL/Communicators/MpiCommunicator.h +87 −19 Original line number Diff line number Diff line Loading @@ -10,16 +10,17 @@ #pragma once #ifdef HAVE_MPI #include <iostream> #include <fstream> #ifdef HAVE_MPI #include <mpi.h> #endif #include <TNL/String.h> #include <TNL/Logger.h> #include <TNL/Config/ConfigDescription.h> namespace TNL { namespace Communicators { Loading @@ -27,7 +28,7 @@ class MpiCommunicator { public: // TODO: this was private #ifdef HAVE_MPI inline static MPI_Datatype MPIDataType( const signed char* ) { return MPI_CHAR; }; inline static MPI_Datatype MPIDataType( const signed short int* ) { return MPI_SHORT; }; inline static MPI_Datatype MPIDataType( const signed int* ) { return MPI_INT; }; Loading @@ -41,6 +42,9 @@ class MpiCommunicator inline static MPI_Datatype MPIDataType( const long double* ) { return MPI_LONG_DOUBLE; }; using Request = MPI::Request; #else using Request = int; #endif static bool isDistributed() { Loading @@ -58,8 +62,11 @@ class MpiCommunicator const String& prefix = "" ) { #ifdef HAVE_MPI if(IsInitialized()) //i.e. - isUsed { redirect = parameters.getParameter< bool >( "redirect-mpi-output" ); setupRedirection(); } #endif return true; } Loading @@ -80,6 +87,7 @@ class MpiCommunicator static void setupRedirection() { #ifdef HAVE_MPI if(isDistributed() && redirect ) { //redirect all stdout to files, only 0 take to go to console Loading @@ -96,10 +104,12 @@ class MpiCommunicator std::cout.rdbuf(psbuf); } } #endif }; static void Finalize() { #ifdef HAVE_MPI if(isDistributed()) { if(MPI::COMM_WORLD.Get_rank()!=0) Loading @@ -109,21 +119,39 @@ class MpiCommunicator } } MPI::Finalize(); #endif }; static bool IsInitialized() { return MPI::Is_initialized(); #ifdef HAVE_MPI return MPI::Is_initialized() && !MPI::Is_finalized(); #else return false; #endif }; static int GetRank() { //CHECK_INICIALIZED_RET(MPI::COMM_WORLD.Get_rank()); #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Get_rank(); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif }; static int GetSize() { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Get_size(); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif }; //dim-number of dimesions, distr array of guess distr - 0 for computation Loading @@ -131,6 +159,7 @@ class MpiCommunicator //more information in MPI documentation static void DimsCreate(int nproc, int dim, int *distr) { #ifdef HAVE_MPI /***HACK for linear distribution***/ int sum=0; for(int i=0;i<dim;i++) Loading @@ -147,34 +176,63 @@ class MpiCommunicator /***END OF HACK***/ MPI_Dims_create(nproc, dim, distr); #endif }; static void Barrier() { MPI::COMM_WORLD.Barrier(); #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); MPI::COMM_WORLD.Barrier();; #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); #endif }; template <typename T> static Request ISend( const T *data, int count, int dest) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Isend((void*) data, count, MPIDataType(data) , dest, 0); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif } template <typename T> static Request IRecv( const T *data, int count, int src) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Irecv((void*) data, count, MPIDataType(data) , src, 0); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif } static void WaitAll(Request *reqs, int length) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); MPI::Request::Waitall(length, reqs); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); #endif }; template< typename T > static void Bcast( T& data, int count, int root) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); MPI::COMM_WORLD.Bcast((void*) &data, count, MPIDataType(data), root); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); #endif } /* template< typename T > Loading Loading @@ -204,22 +262,32 @@ class MpiCommunicator } } #ifdef HAVE_MPI static MPI::Request NullRequest; #else static int NullRequest; #endif static std::streambuf *psbuf; static std::streambuf *backup; static std::ofstream filestr; static bool redirect; static bool inited; }; #ifdef HAVE_MPI MPI::Request MpiCommunicator::NullRequest; #else int MpiCommunicator::NullRequest; #endif std::streambuf *MpiCommunicator::psbuf; std::streambuf *MpiCommunicator::backup; std::ofstream MpiCommunicator::filestr; bool MpiCommunicator::redirect; bool MpiCommunicator::inited; }//namespace Communicators } // namespace TNL #endif src/TNL/Meshes/DistributedMeshes/DistributedGridIO.h +12 −9 Original line number Diff line number Diff line Loading @@ -11,7 +11,12 @@ #pragma once #include <iostream> #ifdef HAVE_MPI #ifdef MPIIO #include <mpi.h> #endif #endif #include <TNL/File.h> #include <TNL/Communicators/MpiCommunicator.h> Loading Loading @@ -153,6 +158,8 @@ class DistributedGridIO<MeshFunctionType,LocalCopy> * BAD IMPLEMENTTION creating MPI-Types at every save! -- I dont want contamine more places by MPI.. */ #ifdef HAVE_MPI #ifdef MPIIO template<typename MeshFunctionType> class DistributedGridIO<MeshFunctionType,MpiIO> { Loading @@ -167,7 +174,7 @@ class DistributedGridIO<MeshFunctionType,MpiIO> static bool save(const String& fileName, MeshFunctionType &meshFunction) { #ifdef MPIIO auto *distrGrid=meshFunction.getMesh().getDistributedMesh(); if(distrGrid==NULL) //not distributed Loading @@ -191,8 +198,7 @@ class DistributedGridIO<MeshFunctionType,MpiIO> int headerSize=0; using Comm = typename TNL::Communicators::MpiCommunicator; if(Comm::GetRank()==0) if(Communicators::MpiCommunicator::GetRank()==0) { headerSize=writeMeshFunctionHeader(file,meshFunction,dataCount); } Loading @@ -212,10 +218,6 @@ class DistributedGridIO<MeshFunctionType,MpiIO> MPI_Type_free(&atype); MPI_Type_free(&ftype); return true; #else std::cerr << "MPI-IO is not supported by your system." << std::endl; return false; #endif }; template<typename DitsributedGridType> Loading Loading @@ -401,7 +403,8 @@ class DistributedGridIO<MeshFunctionType,MpiIO> }; }; #endif #endif } } } src/TNL/Meshes/DistributedMeshes/DistributedGrid_2D.h +1 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,7 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >> public: DistributedMesh() : isSet( false ), domainDecomposition( 0 ) {}; : domainDecomposition( 0 ), isSet( false ) {}; void setDomainDecomposition( const CoordinatesType& domainDecomposition ) { Loading src/TNL/Problems/HeatEquationProblem_impl.h +2 −4 Original line number Diff line number Diff line Loading @@ -24,7 +24,7 @@ #include "HeatEquationProblem.h" //#define MPIIO #define MPIIO #include <TNL/Meshes/DistributedMeshes/DistributedGridIO.h> Loading Loading @@ -149,7 +149,7 @@ setInitialCondition( const Config::ParameterContainer& parameters, if(CommunicatorType::isDistributed()) { std::cout<<"Nodes Distribution: " << uPointer->getMesh().getDistributedMesh()->printProcessDistr() << std::endl; Meshes::DistributedMeshes::DistributedGridIO<MeshFunctionType,Meshes::DistributedMeshes::LocalCopy> ::load(initialConditionFile, *uPointer ); Meshes::DistributedMeshes::DistributedGridIO<MeshFunctionType,Meshes::DistributedMeshes::MpiIO> ::load(initialConditionFile, *uPointer ); uPointer->template synchronize<CommunicatorType>(); } else Loading Loading @@ -252,9 +252,7 @@ getExplicitUpdate( const RealType& time, this->explicitUpdater.setDifferentialOperator( this->differentialOperatorPointer ); this->explicitUpdater.setBoundaryConditions( this->boundaryConditionPointer ); this->explicitUpdater.setRightHandSide( this->rightHandSidePointer ); std::cerr << "Starting updater ... " << std::endl; this->explicitUpdater.template update< typename Mesh::Cell, CommType >( time, tau, meshPointer, this->uPointer, fuPointer ); std::cerr << "Updater done ... " << std::endl; } Loading src/TNL/Solvers/PDE/ExplicitUpdater.h +0 −2 Original line number Diff line number Diff line Loading @@ -149,10 +149,8 @@ class ExplicitUpdater ( meshPointer, userDataPointer ); std::cerr << __FILE__ << ":" << __LINE__ << "Starting synchronization..." << std::endl; if(CommunicatorType::isDistributed()) fuPointer->template synchronize<CommunicatorType>(); std::cerr << __FILE__ << ":" << __LINE__ << "Synchronization done..." << std::endl; } Loading Loading
src/TNL/Communicators/MpiCommunicator.h +87 −19 Original line number Diff line number Diff line Loading @@ -10,16 +10,17 @@ #pragma once #ifdef HAVE_MPI #include <iostream> #include <fstream> #ifdef HAVE_MPI #include <mpi.h> #endif #include <TNL/String.h> #include <TNL/Logger.h> #include <TNL/Config/ConfigDescription.h> namespace TNL { namespace Communicators { Loading @@ -27,7 +28,7 @@ class MpiCommunicator { public: // TODO: this was private #ifdef HAVE_MPI inline static MPI_Datatype MPIDataType( const signed char* ) { return MPI_CHAR; }; inline static MPI_Datatype MPIDataType( const signed short int* ) { return MPI_SHORT; }; inline static MPI_Datatype MPIDataType( const signed int* ) { return MPI_INT; }; Loading @@ -41,6 +42,9 @@ class MpiCommunicator inline static MPI_Datatype MPIDataType( const long double* ) { return MPI_LONG_DOUBLE; }; using Request = MPI::Request; #else using Request = int; #endif static bool isDistributed() { Loading @@ -58,8 +62,11 @@ class MpiCommunicator const String& prefix = "" ) { #ifdef HAVE_MPI if(IsInitialized()) //i.e. - isUsed { redirect = parameters.getParameter< bool >( "redirect-mpi-output" ); setupRedirection(); } #endif return true; } Loading @@ -80,6 +87,7 @@ class MpiCommunicator static void setupRedirection() { #ifdef HAVE_MPI if(isDistributed() && redirect ) { //redirect all stdout to files, only 0 take to go to console Loading @@ -96,10 +104,12 @@ class MpiCommunicator std::cout.rdbuf(psbuf); } } #endif }; static void Finalize() { #ifdef HAVE_MPI if(isDistributed()) { if(MPI::COMM_WORLD.Get_rank()!=0) Loading @@ -109,21 +119,39 @@ class MpiCommunicator } } MPI::Finalize(); #endif }; static bool IsInitialized() { return MPI::Is_initialized(); #ifdef HAVE_MPI return MPI::Is_initialized() && !MPI::Is_finalized(); #else return false; #endif }; static int GetRank() { //CHECK_INICIALIZED_RET(MPI::COMM_WORLD.Get_rank()); #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Get_rank(); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif }; static int GetSize() { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Get_size(); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif }; //dim-number of dimesions, distr array of guess distr - 0 for computation Loading @@ -131,6 +159,7 @@ class MpiCommunicator //more information in MPI documentation static void DimsCreate(int nproc, int dim, int *distr) { #ifdef HAVE_MPI /***HACK for linear distribution***/ int sum=0; for(int i=0;i<dim;i++) Loading @@ -147,34 +176,63 @@ class MpiCommunicator /***END OF HACK***/ MPI_Dims_create(nproc, dim, distr); #endif }; static void Barrier() { MPI::COMM_WORLD.Barrier(); #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); MPI::COMM_WORLD.Barrier();; #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); #endif }; template <typename T> static Request ISend( const T *data, int count, int dest) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Isend((void*) data, count, MPIDataType(data) , dest, 0); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif } template <typename T> static Request IRecv( const T *data, int count, int src) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); return MPI::COMM_WORLD.Irecv((void*) data, count, MPIDataType(data) , src, 0); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); return 0; #endif } static void WaitAll(Request *reqs, int length) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); MPI::Request::Waitall(length, reqs); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); #endif }; template< typename T > static void Bcast( T& data, int count, int root) { #ifdef HAVE_MPI TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not inicialized"); MPI::COMM_WORLD.Bcast((void*) &data, count, MPIDataType(data), root); #else TNL_ASSERT_TRUE(false, "Fatal Error - MPI in not compiled"); #endif } /* template< typename T > Loading Loading @@ -204,22 +262,32 @@ class MpiCommunicator } } #ifdef HAVE_MPI static MPI::Request NullRequest; #else static int NullRequest; #endif static std::streambuf *psbuf; static std::streambuf *backup; static std::ofstream filestr; static bool redirect; static bool inited; }; #ifdef HAVE_MPI MPI::Request MpiCommunicator::NullRequest; #else int MpiCommunicator::NullRequest; #endif std::streambuf *MpiCommunicator::psbuf; std::streambuf *MpiCommunicator::backup; std::ofstream MpiCommunicator::filestr; bool MpiCommunicator::redirect; bool MpiCommunicator::inited; }//namespace Communicators } // namespace TNL #endif
src/TNL/Meshes/DistributedMeshes/DistributedGridIO.h +12 −9 Original line number Diff line number Diff line Loading @@ -11,7 +11,12 @@ #pragma once #include <iostream> #ifdef HAVE_MPI #ifdef MPIIO #include <mpi.h> #endif #endif #include <TNL/File.h> #include <TNL/Communicators/MpiCommunicator.h> Loading Loading @@ -153,6 +158,8 @@ class DistributedGridIO<MeshFunctionType,LocalCopy> * BAD IMPLEMENTTION creating MPI-Types at every save! -- I dont want contamine more places by MPI.. */ #ifdef HAVE_MPI #ifdef MPIIO template<typename MeshFunctionType> class DistributedGridIO<MeshFunctionType,MpiIO> { Loading @@ -167,7 +174,7 @@ class DistributedGridIO<MeshFunctionType,MpiIO> static bool save(const String& fileName, MeshFunctionType &meshFunction) { #ifdef MPIIO auto *distrGrid=meshFunction.getMesh().getDistributedMesh(); if(distrGrid==NULL) //not distributed Loading @@ -191,8 +198,7 @@ class DistributedGridIO<MeshFunctionType,MpiIO> int headerSize=0; using Comm = typename TNL::Communicators::MpiCommunicator; if(Comm::GetRank()==0) if(Communicators::MpiCommunicator::GetRank()==0) { headerSize=writeMeshFunctionHeader(file,meshFunction,dataCount); } Loading @@ -212,10 +218,6 @@ class DistributedGridIO<MeshFunctionType,MpiIO> MPI_Type_free(&atype); MPI_Type_free(&ftype); return true; #else std::cerr << "MPI-IO is not supported by your system." << std::endl; return false; #endif }; template<typename DitsributedGridType> Loading Loading @@ -401,7 +403,8 @@ class DistributedGridIO<MeshFunctionType,MpiIO> }; }; #endif #endif } } }
src/TNL/Meshes/DistributedMeshes/DistributedGrid_2D.h +1 −1 Original line number Diff line number Diff line Loading @@ -33,7 +33,7 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >> public: DistributedMesh() : isSet( false ), domainDecomposition( 0 ) {}; : domainDecomposition( 0 ), isSet( false ) {}; void setDomainDecomposition( const CoordinatesType& domainDecomposition ) { Loading
src/TNL/Problems/HeatEquationProblem_impl.h +2 −4 Original line number Diff line number Diff line Loading @@ -24,7 +24,7 @@ #include "HeatEquationProblem.h" //#define MPIIO #define MPIIO #include <TNL/Meshes/DistributedMeshes/DistributedGridIO.h> Loading Loading @@ -149,7 +149,7 @@ setInitialCondition( const Config::ParameterContainer& parameters, if(CommunicatorType::isDistributed()) { std::cout<<"Nodes Distribution: " << uPointer->getMesh().getDistributedMesh()->printProcessDistr() << std::endl; Meshes::DistributedMeshes::DistributedGridIO<MeshFunctionType,Meshes::DistributedMeshes::LocalCopy> ::load(initialConditionFile, *uPointer ); Meshes::DistributedMeshes::DistributedGridIO<MeshFunctionType,Meshes::DistributedMeshes::MpiIO> ::load(initialConditionFile, *uPointer ); uPointer->template synchronize<CommunicatorType>(); } else Loading Loading @@ -252,9 +252,7 @@ getExplicitUpdate( const RealType& time, this->explicitUpdater.setDifferentialOperator( this->differentialOperatorPointer ); this->explicitUpdater.setBoundaryConditions( this->boundaryConditionPointer ); this->explicitUpdater.setRightHandSide( this->rightHandSidePointer ); std::cerr << "Starting updater ... " << std::endl; this->explicitUpdater.template update< typename Mesh::Cell, CommType >( time, tau, meshPointer, this->uPointer, fuPointer ); std::cerr << "Updater done ... " << std::endl; } Loading
src/TNL/Solvers/PDE/ExplicitUpdater.h +0 −2 Original line number Diff line number Diff line Loading @@ -149,10 +149,8 @@ class ExplicitUpdater ( meshPointer, userDataPointer ); std::cerr << __FILE__ << ":" << __LINE__ << "Starting synchronization..." << std::endl; if(CommunicatorType::isDistributed()) fuPointer->template synchronize<CommunicatorType>(); std::cerr << __FILE__ << ":" << __LINE__ << "Synchronization done..." << std::endl; } Loading