Loading src/TNL/MPI.h +1 −0 Original line number Diff line number Diff line Loading @@ -22,6 +22,7 @@ #include "MPI/selectGPU.h" #include "MPI/Wrappers.h" #include "MPI/Utils.h" #include "MPI/Comm.h" #include "MPI/ScopedInitializer.h" #include "MPI/Config.h" #include "MPI/Print.h" src/TNL/MPI/Comm.h 0 → 100644 +249 −0 Original line number Diff line number Diff line // Copyright (c) 2004-2022 Tomáš Oberhuber et al. // // This file is part of TNL - Template Numerical Library (https://tnl-project.org/) // // SPDX-License-Identifier: MIT #pragma once #include <stdexcept> #include <memory> #include "Wrappers.h" namespace TNL { namespace MPI { /** * \brief An RAII wrapper for custom MPI communicators. * * This is an RAII wrapper for custom MPI communicators created by calls to * \ref MPI_Comm_create, \ref MPI_Comm_split, or similar functions. It is based * on \ref std::shared_ptr so copy-constructible and copy-assignable, copies of * the object represent the same communicator that is deallocated only when the * internal reference counter drops to zero. * * Note that predefined communicators (i.e. \ref MPI_COMM_WORLD, * \ref MPI_COMM_NULL and \ref MPI_COMM_SELF) can be used to initialize this * class, but other handles of the \ref MPI_Comm type _cannot_ be used to * initialize this class. * * This class follows the factory pattern, i.e. it provides static methods such * as \ref Comm::create or \ref Comm::split that return an instance of a new * communicator. */ class Comm { private: struct Wrapper { MPI_Comm comm = MPI_COMM_NULL; Wrapper() = default; Wrapper( const Wrapper& other ) = delete; Wrapper( Wrapper&& other ) = default; Wrapper& operator=( const Wrapper& other ) = delete; Wrapper& operator=( Wrapper&& other ) = default; Wrapper( MPI_Comm comm ) : comm( comm ) {} ~Wrapper() { #ifdef HAVE_MPI // cannot free a predefined handle if( comm != MPI_COMM_NULL && comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF ) MPI_Comm_free( &comm ); #endif } }; std::shared_ptr< Wrapper > wrapper; //! \brief Internal constructor for the factory methods - initialization by the wrapper. Comm( std::shared_ptr< Wrapper >&& wrapper ) : wrapper( std::move( wrapper ) ) {} public: //! \brief Constructs an empty communicator with a null handle (`MPI_COMM_NULL`). Comm() = default; //! \brief Deleted copy-constructor. Comm( const Comm& other ) = default; //! \brief Default move-constructor. Comm( Comm&& other ) = default; //! \brief Deleted copy-assignment operator. Comm& operator=( const Comm& other ) = default; //! \brief Default move-assignment operator. Comm& operator=( Comm&& other ) = default; /** * \brief Constructs a communicator initialized by given predefined communicator. * * Note that only predefined communicators (i.e. \ref MPI_COMM_WORLD, * \ref MPI_COMM_NULL and \ref MPI_COMM_SELF) can be used to initialize this * class. Other handles of the \ref MPI_Comm type _cannot_ be used to * initialize this class. * * \throws std::logic_error when the \e comm handle is not a predefined * communicator. */ Comm( MPI_Comm comm ) { if( comm != MPI_COMM_NULL && comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF ) throw std::logic_error( "Only predefined communicators (MPI_COMM_WORLD, MPI_COMM_NULL and " "MPI_COMM_SELF) can be used to initialize this class. Other " "handles of the MPI_Comm type *cannot* be used to initialize " "the TNL::MPI::Comm class." ); wrapper = std::make_shared< Wrapper >( comm ); } //! \brief Factory method – wrapper for \ref MPI_Comm_dup static Comm duplicate( MPI_Comm comm ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_dup( comm, &newcomm ); return { std::make_shared< Wrapper >( newcomm ) }; #else return { std::make_shared< Wrapper >( comm ) }; #endif } //! \brief Non-static factory method – wrapper for \ref MPI_Comm_dup Comm duplicate() const { return duplicate( *this ); } //! \brief Factory method – wrapper for \ref MPI_Comm_split static Comm split( MPI_Comm comm, int color, int key ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_split( comm, color, key, &newcomm ); return { std::make_shared< Wrapper >( newcomm ) }; #else return { std::make_shared< Wrapper >( comm ) }; #endif } //! \brief Non-static factory method – wrapper for \ref MPI_Comm_split Comm split( int color, int key ) const { return split( *this, color, key ); } //! \brief Factory method – wrapper for \ref MPI_Comm_split_type static Comm split_type( MPI_Comm comm, int split_type, int key, MPI_Info info ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_split_type( comm, split_type, key, info, &newcomm ); return { std::make_shared< Wrapper >( newcomm ) }; #else return { std::make_shared< Wrapper >( comm ) }; #endif } //! \brief Non-static factory method – wrapper for \ref MPI_Comm_split_type Comm split_type( int split_type, int key, MPI_Info info ) const { return Comm::split_type( *this, split_type, key, info ); } /** * \brief Access the MPI communicator associated with this object. * * This routine permits the implicit conversion from \ref Comm to * \ref MPI_Comm. * * \b Warning: The obtained \ref MPI_Comm handle becomes invalid when the * originating \ref Comm object is destroyed. For example, the following * code is invalid, because the \ref Comm object managing the lifetime of * the communicator is destroyed as soon as it is cast to \ref MPI_Comm: * * \code{.cpp} * const MPI_Comm comm = MPI::Comm::duplicate( MPI_COMM_WORLD ); * const int nproc = MPI::GetSize( comm ); * \endcode */ operator const MPI_Comm&() const { return wrapper->comm; } //! \brief Determines the rank of the calling process in the communicator. int rank() const { return GetRank( *this ); } //! \brief Returns the size of the group associated with a communicator. int size() const { return GetSize( *this ); } //! \brief Compares two communicators – wrapper for \ref MPI_Comm_compare. int compare( MPI_Comm comm2 ) const { #ifdef HAVE_MPI int result; MPI_Comm_compare( *this, comm2, &result ); return result; #else return MPI_IDENT; #endif } /** * \brief Wait for all processes within a communicator to reach the barrier. * * This routine is a collective operation that blocks each process until all * processes have entered it, then releases all of the processes * "simultaneously". It is equivalent to calling \ref MPI_Barrier with the * MPI communicator associated with this object. */ void barrier() const { Barrier( *this ); } }; /** * \brief Returns a local rank ID of the current process within a group of * processes running on a shared-memory node. * * The given MPI communicator is split into groups according to the * `MPI_COMM_TYPE_SHARED` type (from MPI-3) and the rank ID of the process * within the group is returned. */ inline int getRankOnNode( MPI_Comm communicator = MPI_COMM_WORLD ) { #ifdef HAVE_MPI const int rank = GetRank( communicator ); const MPI::Comm local_comm = MPI::Comm::split_type( communicator, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL ); return local_comm.rank(); #else return 0; #endif } } // namespace MPI } // namespace TNL src/TNL/MPI/DummyDefs.h +10 −0 Original line number Diff line number Diff line Loading @@ -9,6 +9,7 @@ #ifndef HAVE_MPI using MPI_Request = int; using MPI_Comm = int; using MPI_Info = int; enum MPI_Op { Loading @@ -26,6 +27,15 @@ enum MPI_Op MPI_MAXLOC, }; // Comparison results enum { MPI_IDENT, MPI_CONGRUENT, MPI_SIMILAR, MPI_UNEQUAL }; // MPI_Init_thread constants enum { Loading src/TNL/MPI/Utils.h +0 −27 Original line number Diff line number Diff line Loading @@ -42,33 +42,6 @@ restoreRedirection() } } /** * \brief Returns a local rank ID of the current process within a group of * processes running on a shared-memory node. * * The given MPI communicator is split into groups according to the * `MPI_COMM_TYPE_SHARED` type (from MPI-3) and the rank ID of the process * within the group is returned. */ inline int getRankOnNode( MPI_Comm communicator = MPI_COMM_WORLD ) { #ifdef HAVE_MPI const int rank = GetRank( communicator ); MPI_Comm local_comm; MPI_Comm_split_type( communicator, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &local_comm ); const int local_rank = GetRank( local_comm ); MPI_Comm_free( &local_comm ); return local_rank; #else return 0; #endif } /** * \brief Applies the given reduction operation to the values among all ranks * in the given communicator. Loading src/TNL/MPI/Wrappers.h +0 −12 Original line number Diff line number Diff line Loading @@ -134,18 +134,6 @@ GetSize( MPI_Comm communicator = MPI_COMM_WORLD ) // wrappers for MPI helper functions inline MPI_Comm Comm_split( MPI_Comm comm, int color, int key ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_split( comm, color, key, &newcomm ); return newcomm; #else return comm; #endif } /** * \brief Wrapper for \ref MPI_Dims_create. * Loading Loading
src/TNL/MPI.h +1 −0 Original line number Diff line number Diff line Loading @@ -22,6 +22,7 @@ #include "MPI/selectGPU.h" #include "MPI/Wrappers.h" #include "MPI/Utils.h" #include "MPI/Comm.h" #include "MPI/ScopedInitializer.h" #include "MPI/Config.h" #include "MPI/Print.h"
src/TNL/MPI/Comm.h 0 → 100644 +249 −0 Original line number Diff line number Diff line // Copyright (c) 2004-2022 Tomáš Oberhuber et al. // // This file is part of TNL - Template Numerical Library (https://tnl-project.org/) // // SPDX-License-Identifier: MIT #pragma once #include <stdexcept> #include <memory> #include "Wrappers.h" namespace TNL { namespace MPI { /** * \brief An RAII wrapper for custom MPI communicators. * * This is an RAII wrapper for custom MPI communicators created by calls to * \ref MPI_Comm_create, \ref MPI_Comm_split, or similar functions. It is based * on \ref std::shared_ptr so copy-constructible and copy-assignable, copies of * the object represent the same communicator that is deallocated only when the * internal reference counter drops to zero. * * Note that predefined communicators (i.e. \ref MPI_COMM_WORLD, * \ref MPI_COMM_NULL and \ref MPI_COMM_SELF) can be used to initialize this * class, but other handles of the \ref MPI_Comm type _cannot_ be used to * initialize this class. * * This class follows the factory pattern, i.e. it provides static methods such * as \ref Comm::create or \ref Comm::split that return an instance of a new * communicator. */ class Comm { private: struct Wrapper { MPI_Comm comm = MPI_COMM_NULL; Wrapper() = default; Wrapper( const Wrapper& other ) = delete; Wrapper( Wrapper&& other ) = default; Wrapper& operator=( const Wrapper& other ) = delete; Wrapper& operator=( Wrapper&& other ) = default; Wrapper( MPI_Comm comm ) : comm( comm ) {} ~Wrapper() { #ifdef HAVE_MPI // cannot free a predefined handle if( comm != MPI_COMM_NULL && comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF ) MPI_Comm_free( &comm ); #endif } }; std::shared_ptr< Wrapper > wrapper; //! \brief Internal constructor for the factory methods - initialization by the wrapper. Comm( std::shared_ptr< Wrapper >&& wrapper ) : wrapper( std::move( wrapper ) ) {} public: //! \brief Constructs an empty communicator with a null handle (`MPI_COMM_NULL`). Comm() = default; //! \brief Deleted copy-constructor. Comm( const Comm& other ) = default; //! \brief Default move-constructor. Comm( Comm&& other ) = default; //! \brief Deleted copy-assignment operator. Comm& operator=( const Comm& other ) = default; //! \brief Default move-assignment operator. Comm& operator=( Comm&& other ) = default; /** * \brief Constructs a communicator initialized by given predefined communicator. * * Note that only predefined communicators (i.e. \ref MPI_COMM_WORLD, * \ref MPI_COMM_NULL and \ref MPI_COMM_SELF) can be used to initialize this * class. Other handles of the \ref MPI_Comm type _cannot_ be used to * initialize this class. * * \throws std::logic_error when the \e comm handle is not a predefined * communicator. */ Comm( MPI_Comm comm ) { if( comm != MPI_COMM_NULL && comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF ) throw std::logic_error( "Only predefined communicators (MPI_COMM_WORLD, MPI_COMM_NULL and " "MPI_COMM_SELF) can be used to initialize this class. Other " "handles of the MPI_Comm type *cannot* be used to initialize " "the TNL::MPI::Comm class." ); wrapper = std::make_shared< Wrapper >( comm ); } //! \brief Factory method – wrapper for \ref MPI_Comm_dup static Comm duplicate( MPI_Comm comm ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_dup( comm, &newcomm ); return { std::make_shared< Wrapper >( newcomm ) }; #else return { std::make_shared< Wrapper >( comm ) }; #endif } //! \brief Non-static factory method – wrapper for \ref MPI_Comm_dup Comm duplicate() const { return duplicate( *this ); } //! \brief Factory method – wrapper for \ref MPI_Comm_split static Comm split( MPI_Comm comm, int color, int key ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_split( comm, color, key, &newcomm ); return { std::make_shared< Wrapper >( newcomm ) }; #else return { std::make_shared< Wrapper >( comm ) }; #endif } //! \brief Non-static factory method – wrapper for \ref MPI_Comm_split Comm split( int color, int key ) const { return split( *this, color, key ); } //! \brief Factory method – wrapper for \ref MPI_Comm_split_type static Comm split_type( MPI_Comm comm, int split_type, int key, MPI_Info info ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_split_type( comm, split_type, key, info, &newcomm ); return { std::make_shared< Wrapper >( newcomm ) }; #else return { std::make_shared< Wrapper >( comm ) }; #endif } //! \brief Non-static factory method – wrapper for \ref MPI_Comm_split_type Comm split_type( int split_type, int key, MPI_Info info ) const { return Comm::split_type( *this, split_type, key, info ); } /** * \brief Access the MPI communicator associated with this object. * * This routine permits the implicit conversion from \ref Comm to * \ref MPI_Comm. * * \b Warning: The obtained \ref MPI_Comm handle becomes invalid when the * originating \ref Comm object is destroyed. For example, the following * code is invalid, because the \ref Comm object managing the lifetime of * the communicator is destroyed as soon as it is cast to \ref MPI_Comm: * * \code{.cpp} * const MPI_Comm comm = MPI::Comm::duplicate( MPI_COMM_WORLD ); * const int nproc = MPI::GetSize( comm ); * \endcode */ operator const MPI_Comm&() const { return wrapper->comm; } //! \brief Determines the rank of the calling process in the communicator. int rank() const { return GetRank( *this ); } //! \brief Returns the size of the group associated with a communicator. int size() const { return GetSize( *this ); } //! \brief Compares two communicators – wrapper for \ref MPI_Comm_compare. int compare( MPI_Comm comm2 ) const { #ifdef HAVE_MPI int result; MPI_Comm_compare( *this, comm2, &result ); return result; #else return MPI_IDENT; #endif } /** * \brief Wait for all processes within a communicator to reach the barrier. * * This routine is a collective operation that blocks each process until all * processes have entered it, then releases all of the processes * "simultaneously". It is equivalent to calling \ref MPI_Barrier with the * MPI communicator associated with this object. */ void barrier() const { Barrier( *this ); } }; /** * \brief Returns a local rank ID of the current process within a group of * processes running on a shared-memory node. * * The given MPI communicator is split into groups according to the * `MPI_COMM_TYPE_SHARED` type (from MPI-3) and the rank ID of the process * within the group is returned. */ inline int getRankOnNode( MPI_Comm communicator = MPI_COMM_WORLD ) { #ifdef HAVE_MPI const int rank = GetRank( communicator ); const MPI::Comm local_comm = MPI::Comm::split_type( communicator, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL ); return local_comm.rank(); #else return 0; #endif } } // namespace MPI } // namespace TNL
src/TNL/MPI/DummyDefs.h +10 −0 Original line number Diff line number Diff line Loading @@ -9,6 +9,7 @@ #ifndef HAVE_MPI using MPI_Request = int; using MPI_Comm = int; using MPI_Info = int; enum MPI_Op { Loading @@ -26,6 +27,15 @@ enum MPI_Op MPI_MAXLOC, }; // Comparison results enum { MPI_IDENT, MPI_CONGRUENT, MPI_SIMILAR, MPI_UNEQUAL }; // MPI_Init_thread constants enum { Loading
src/TNL/MPI/Utils.h +0 −27 Original line number Diff line number Diff line Loading @@ -42,33 +42,6 @@ restoreRedirection() } } /** * \brief Returns a local rank ID of the current process within a group of * processes running on a shared-memory node. * * The given MPI communicator is split into groups according to the * `MPI_COMM_TYPE_SHARED` type (from MPI-3) and the rank ID of the process * within the group is returned. */ inline int getRankOnNode( MPI_Comm communicator = MPI_COMM_WORLD ) { #ifdef HAVE_MPI const int rank = GetRank( communicator ); MPI_Comm local_comm; MPI_Comm_split_type( communicator, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &local_comm ); const int local_rank = GetRank( local_comm ); MPI_Comm_free( &local_comm ); return local_rank; #else return 0; #endif } /** * \brief Applies the given reduction operation to the values among all ranks * in the given communicator. Loading
src/TNL/MPI/Wrappers.h +0 −12 Original line number Diff line number Diff line Loading @@ -134,18 +134,6 @@ GetSize( MPI_Comm communicator = MPI_COMM_WORLD ) // wrappers for MPI helper functions inline MPI_Comm Comm_split( MPI_Comm comm, int color, int key ) { #ifdef HAVE_MPI MPI_Comm newcomm; MPI_Comm_split( comm, color, key, &newcomm ); return newcomm; #else return comm; #endif } /** * \brief Wrapper for \ref MPI_Dims_create. * Loading