From 7c9f7acf506ba9d07d4ffb16bb82928b50104415 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Wed, 7 Jul 2021 09:54:30 +0200 Subject: [PATCH 01/10] MPI: removed default values for MPI_Comm parameters and moved NullGroup checking outside HAVE_MPI - The "NullGroup" should not be used even when built without MPI, otherwise the behaviour is very bug-prone because "NullGroup" usage is not caught and changing the build type leads to a different semantics. - "AllGroup" is not a good default value for the parameters, considering that the class attributes are initialized to "NullGroup". --- src/TNL/Containers/DistributedArray.h | 4 ++-- src/TNL/MPI/Wrappers.h | 26 +++++++++++++------------- src/TNL/Matrices/DistributedMatrix.h | 4 ++-- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/TNL/Containers/DistributedArray.h b/src/TNL/Containers/DistributedArray.h index 2c2690acd..ef43741c5 100644 --- a/src/TNL/Containers/DistributedArray.h +++ b/src/TNL/Containers/DistributedArray.h @@ -77,9 +77,9 @@ public: */ explicit DistributedArray( const DistributedArray& array, const AllocatorType& allocator ); - DistributedArray( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm group = MPI::AllGroup(), const AllocatorType& allocator = AllocatorType() ); + DistributedArray( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm group, const AllocatorType& allocator = AllocatorType() ); - void setDistribution( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm group = MPI::AllGroup() ); + void setDistribution( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm group ); const LocalRangeType& getLocalRange() const; diff --git a/src/TNL/MPI/Wrappers.h b/src/TNL/MPI/Wrappers.h index b2c97e076..828f2ad45 100644 --- a/src/TNL/MPI/Wrappers.h +++ b/src/TNL/MPI/Wrappers.h @@ -133,9 +133,9 @@ inline bool Finalized() inline int GetRank( MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "GetRank cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "GetRank cannot be called with NullGroup" ); int rank; MPI_Comm_rank( group, &rank ); return rank; @@ -146,9 +146,9 @@ inline int GetRank( MPI_Comm group = AllGroup() ) inline int GetSize( MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "GetSize cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "GetSize cannot be called with NullGroup" ); int size; MPI_Comm_size( group, &size ); return size; @@ -210,9 +210,9 @@ inline void Compute_dims( int nproc, int ndims, int* dims ) inline void Barrier( MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "Barrier cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Barrier cannot be called with NullGroup" ); MPI_Barrier(group); #endif } @@ -232,9 +232,9 @@ void Send( const T* data, int tag, MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "Send cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Send cannot be called with NullGroup" ); MPI_Send( (const void*) data, count, getDataType(), dest, tag, group ); #endif } @@ -246,9 +246,9 @@ void Recv( T* data, int tag, MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "Recv cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Recv cannot be called with NullGroup" ); MPI_Recv( (void*) data, count, getDataType(), src, tag, group, MPI_STATUS_IGNORE ); #endif } @@ -264,9 +264,9 @@ void Sendrecv( const T* sendData, int receiveTag, MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "Sendrecv cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Sendrecv cannot be called with NullGroup" ); MPI_Sendrecv( (void*) sendData, sendCount, getDataType(), @@ -291,9 +291,9 @@ MPI_Request Isend( const T* data, int tag, MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "Isend cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Isend cannot be called with NullGroup" ); MPI_Request req; MPI_Isend( (const void*) data, count, getDataType(), dest, tag, group, &req ); return req; @@ -309,9 +309,9 @@ MPI_Request Irecv( T* data, int tag, MPI_Comm group = AllGroup() ) { + TNL_ASSERT_NE( group, NullGroup(), "Irecv cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Irecv cannot be called with NullGroup" ); MPI_Request req; MPI_Irecv( (void*) data, count, getDataType(), src, tag, group, &req ); return req; @@ -327,8 +327,8 @@ void Allreduce( const T* data, const MPI_Op& op, MPI_Comm group) { -#ifdef HAVE_MPI TNL_ASSERT_NE( group, NullGroup(), "Allreduce cannot be called with NullGroup" ); +#ifdef HAVE_MPI getTimerAllreduce().start(); MPI_Allreduce( (const void*) data, (void*) reduced_data, count, getDataType(), op, group ); getTimerAllreduce().stop(); @@ -344,8 +344,8 @@ void Allreduce( T* data, const MPI_Op& op, MPI_Comm group) { -#ifdef HAVE_MPI TNL_ASSERT_NE( group, NullGroup(), "Allreduce cannot be called with NullGroup" ); +#ifdef HAVE_MPI getTimerAllreduce().start(); MPI_Allreduce( MPI_IN_PLACE, (void*) data, count, getDataType(), op, group ); getTimerAllreduce().stop(); @@ -360,8 +360,8 @@ void Reduce( const T* data, int root, MPI_Comm group) { -#ifdef HAVE_MPI TNL_ASSERT_NE( group, NullGroup(), "Reduce cannot be called with NullGroup" ); +#ifdef HAVE_MPI MPI_Reduce( (const void*) data, (void*) reduced_data, count, getDataType(), op, root, group ); #else std::memcpy( (void*) reduced_data, (void*) data, count * sizeof(T) ); @@ -371,9 +371,9 @@ void Reduce( const T* data, template< typename T > void Bcast( T* data, int count, int root, MPI_Comm group) { + TNL_ASSERT_NE( group, NullGroup(), "Bcast cannot be called with NullGroup" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - TNL_ASSERT_NE( group, NullGroup(), "Bcast cannot be called with NullGroup" ); MPI_Bcast( (void*) data, count, getDataType(), root, group ); #endif } @@ -385,8 +385,8 @@ void Alltoall( const T* sendData, int receiveCount, MPI_Comm group ) { -#ifdef HAVE_MPI TNL_ASSERT_NE( group, NullGroup(), "Alltoall cannot be called with NullGroup" ); +#ifdef HAVE_MPI MPI_Alltoall( (const void*) sendData, sendCount, getDataType(), diff --git a/src/TNL/Matrices/DistributedMatrix.h b/src/TNL/Matrices/DistributedMatrix.h index 2deed3abf..b274699af 100644 --- a/src/TNL/Matrices/DistributedMatrix.h +++ b/src/TNL/Matrices/DistributedMatrix.h @@ -48,9 +48,9 @@ public: DistributedMatrix( DistributedMatrix& ) = default; - DistributedMatrix( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group = MPI::AllGroup() ); + DistributedMatrix( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group ); - void setDistribution( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group = MPI::AllGroup() ); + void setDistribution( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group ); const LocalRangeType& getLocalRowRange() const; -- GitLab From 6019eedf3ffc4bb9ed5bbfa55e503d4706f840e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Tue, 31 Aug 2021 13:58:04 +0200 Subject: [PATCH 02/10] Removed __cuda_callable__ from methods in DistributedNDArray and DistributedNDArrayView CUDA kernels should not ever work with distributed data structures, they should always get the underlying *local* data structure. --- src/TNL/Containers/DistributedNDArray.h | 12 ---------- src/TNL/Containers/DistributedNDArrayView.h | 25 +-------------------- 2 files changed, 1 insertion(+), 36 deletions(-) diff --git a/src/TNL/Containers/DistributedNDArray.h b/src/TNL/Containers/DistributedNDArray.h index 22e67a36e..8f1c213d9 100644 --- a/src/TNL/Containers/DistributedNDArray.h +++ b/src/TNL/Containers/DistributedNDArray.h @@ -83,14 +83,12 @@ public: return localArray.getAllocator(); } - __cuda_callable__ MPI_Comm getCommunicationGroup() const { return group; } // Returns the *global* sizes - __cuda_callable__ const SizesHolderType& getSizes() const { return globalSizes; @@ -98,33 +96,28 @@ public: // Returns the *global* size template< std::size_t level > - __cuda_callable__ IndexType getSize() const { return globalSizes.template getSize< level >(); } - __cuda_callable__ LocalBeginsType getLocalBegins() const { return localBegins; } - __cuda_callable__ SizesHolderType getLocalEnds() const { return localEnds; } template< std::size_t level > - __cuda_callable__ LocalRangeType getLocalRange() const { return LocalRangeType( localBegins.template getSize< level >(), localEnds.template getSize< level >() ); } // returns the local storage size - __cuda_callable__ IndexType getLocalStorageSize() const { return localArray.getStorageSize(); @@ -142,7 +135,6 @@ public: // returns the *local* storage index for given *global* indices template< typename... IndexTypes > - __cuda_callable__ IndexType getStorageIndex( IndexTypes&&... indices ) const { @@ -155,13 +147,11 @@ public: return __ndarray_impl::call_with_unshifted_indices< LocalBeginsType >( localBegins, getStorageIndex, std::forward< IndexTypes >( indices )... ); } - __cuda_callable__ ValueType* getData() { return localArray.getData(); } - __cuda_callable__ std::add_const_t< ValueType >* getData() const { return localArray.getData(); @@ -207,13 +197,11 @@ public: return localArray[ index - localBegins.template getSize< 0 >() ]; } - __cuda_callable__ ViewType getView() { return ViewType( localArray.getView(), globalSizes, localBegins, localEnds, group ); } - __cuda_callable__ ConstViewType getConstView() const { return ConstViewType( localArray.getConstView(), globalSizes, localBegins, localEnds, group ); diff --git a/src/TNL/Containers/DistributedNDArrayView.h b/src/TNL/Containers/DistributedNDArrayView.h index f3f672fa8..13d9cf6dd 100644 --- a/src/TNL/Containers/DistributedNDArrayView.h +++ b/src/TNL/Containers/DistributedNDArrayView.h @@ -37,27 +37,20 @@ public: using LocalViewType = NDArrayView; using ConstLocalViewType = typename NDArrayView::ConstViewType; - __cuda_callable__ DistributedNDArrayView() = default; // explicit initialization by local array view, global sizes and local begins and ends - __cuda_callable__ DistributedNDArrayView( NDArrayView localView, SizesHolderType globalSizes, LocalBeginsType localBegins, SizesHolderType localEnds, MPI_Comm group ) : localView(localView), group(group), globalSizes(globalSizes), localBegins(localBegins), localEnds(localEnds) {} - // Copy-constructor does shallow copy, so views can be passed-by-value into - // CUDA kernels and they can be captured-by-value in __cuda_callable__ - // lambda functions. - __cuda_callable__ + // copy-constructor does shallow copy DistributedNDArrayView( const DistributedNDArrayView& ) = default; // default move-constructor - __cuda_callable__ DistributedNDArrayView( DistributedNDArrayView&& ) = default; // Copy-assignment does deep copy, just like regular array, but the sizes // must match (i.e. copy-assignment cannot resize). - __cuda_callable__ DistributedNDArrayView& operator=( const DistributedNDArrayView& other ) = default; // There is no move-assignment operator, so expressions like `a = b.getView()` @@ -76,7 +69,6 @@ public: } // methods for rebinding (reinitialization) - __cuda_callable__ void bind( DistributedNDArrayView view ) { localView.bind( view.localView ); @@ -87,20 +79,17 @@ public: } // binds to the given raw pointer and changes the indexer - __cuda_callable__ void bind( ValueType* data, typename LocalViewType::IndexerType indexer ) { localView.bind( data, indexer ); } // binds to the given raw pointer and preserves the current indexer - __cuda_callable__ void bind( ValueType* data ) { localView.bind( data ); } - __cuda_callable__ void reset() { localView.reset(); @@ -115,14 +104,12 @@ public: return NDArrayView::getDimension(); } - __cuda_callable__ MPI_Comm getCommunicationGroup() const { return group; } // Returns the *global* sizes - __cuda_callable__ const SizesHolderType& getSizes() const { return globalSizes; @@ -130,33 +117,28 @@ public: // Returns the *global* size template< std::size_t level > - __cuda_callable__ IndexType getSize() const { return globalSizes.template getSize< level >(); } - __cuda_callable__ LocalBeginsType getLocalBegins() const { return localBegins; } - __cuda_callable__ SizesHolderType getLocalEnds() const { return localEnds; } template< std::size_t level > - __cuda_callable__ LocalRangeType getLocalRange() const { return LocalRangeType( localBegins.template getSize< level >(), localEnds.template getSize< level >() ); } // returns the local storage size - __cuda_callable__ IndexType getLocalStorageSize() const { return localView.getStorageSize(); @@ -174,7 +156,6 @@ public: // returns the *local* storage index for given *global* indices template< typename... IndexTypes > - __cuda_callable__ IndexType getStorageIndex( IndexTypes&&... indices ) const { @@ -187,13 +168,11 @@ public: return __ndarray_impl::call_with_unshifted_indices< LocalBeginsType >( localBegins, getStorageIndex, std::forward< IndexTypes >( indices )... ); } - __cuda_callable__ ValueType* getData() { return localView.getData(); } - __cuda_callable__ std::add_const_t< ValueType >* getData() const { return localView.getData(); @@ -239,13 +218,11 @@ public: return localView[ index - localBegins.template getSize< 0 >() ]; } - __cuda_callable__ ViewType getView() { return ViewType( *this ); } - __cuda_callable__ ConstViewType getConstView() const { return ConstViewType( localView, globalSizes, localBegins, localEnds, group ); -- GitLab From 55d3074b62a676ab55f46f9eaec62c5ac268c112 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Tue, 31 Aug 2021 14:50:32 +0200 Subject: [PATCH 03/10] Removed MpiCommunicator --- .../HeatEquationBenchmarkProblem.h | 24 +-- .../HeatEquationBenchmarkProblem_impl.h | 69 +++--- .../tnl-benchmark-heat-equation.h | 11 +- .../tnl-direct-eikonal-solver.h | 5 +- .../hamilton-jacobi/tnlDirectEikonalProblem.h | 6 +- .../tnlDirectEikonalProblem_impl.h | 38 ++-- .../hamilton-jacobi/tnlFastSweepingMethod.h | 16 +- .../tnlFastSweepingMethod1D_impl.h | 12 +- .../tnlFastSweepingMethod2D_impl.h | 54 +++-- .../tnlFastSweepingMethod3D_impl.h | 65 +++--- src/Examples/flow-sw/navierStokes.h | 9 +- src/Examples/flow-sw/navierStokesProblem.h | 25 +-- .../flow-sw/navierStokesProblem_impl.h | 67 +++--- src/Examples/flow-vl/navierStokes.h | 9 +- src/Examples/flow-vl/navierStokesProblem.h | 25 +-- .../flow-vl/navierStokesProblem_impl.h | 67 +++--- src/Examples/flow/navierStokes.h | 9 +- src/Examples/flow/navierStokesProblem.h | 23 +- src/Examples/flow/navierStokesProblem_impl.h | 67 +++--- .../heat-equation/tnl-heat-equation-eoc.h | 5 +- .../heat-equation/tnl-heat-equation.h | 11 +- src/Examples/inviscid-flow-sw/euler.h | 11 +- src/Examples/inviscid-flow-sw/eulerProblem.h | 25 +-- .../inviscid-flow-sw/eulerProblem_impl.h | 67 +++--- src/Examples/inviscid-flow-vl/euler.h | 11 +- src/Examples/inviscid-flow-vl/eulerProblem.h | 25 +-- .../inviscid-flow-vl/eulerProblem_impl.h | 67 +++--- src/Examples/inviscid-flow/euler.h | 11 +- src/Examples/inviscid-flow/eulerProblem.h | 24 +-- .../inviscid-flow/eulerProblem_impl.h | 48 ++--- .../tnl-transport-equation-eoc.h | 25 ++- .../tnl-transport-equation.h | 23 +- .../transportEquationProblem.h | 16 +- .../transportEquationProblemEoc.h | 12 +- .../transportEquationProblemEoc_impl.h | 9 +- .../transportEquationProblem_impl.h | 37 ++-- src/TNL/Communicators/MpiCommunicator.h | 196 ------------------ src/TNL/Problems/HeatEquationEocProblem.h | 9 +- .../Problems/HeatEquationEocProblem_impl.h | 7 +- src/TNL/Problems/HeatEquationProblem.h | 6 +- src/TNL/Problems/HeatEquationProblem_impl.h | 45 ++-- src/TNL/Problems/MeanCurvatureFlowProblem.h | 8 +- src/TNL/Problems/PDEProblem.h | 2 - src/TNL/Problems/PDEProblem_impl.h | 68 +++--- src/TNL/Solvers/PDE/ExplicitTimeStepper.h | 1 - src/TNL/Solvers/PDE/ExplicitUpdater.h | 41 ++-- .../Solvers/PDE/TimeDependentPDESolver_impl.h | 9 +- .../PDE/TimeIndependentPDESolver_impl.h | 6 +- src/TNL/Solvers/Solver.h | 5 +- src/TNL/Solvers/SolverConfig_impl.h | 2 +- src/TNL/Solvers/SolverInitiator.h | 2 +- src/TNL/Solvers/SolverInitiator_impl.h | 35 ++-- src/TNL/Solvers/SolverStarter_impl.h | 4 +- src/TNL/Solvers/Solver_impl.h | 2 +- src/Tools/tnl-quickstart/main.h.in | 39 ++-- src/Tools/tnl-quickstart/problem.h.in | 12 +- src/Tools/tnl-quickstart/problem_impl.h.in | 50 ++--- 57 files changed, 577 insertions(+), 1000 deletions(-) delete mode 100644 src/TNL/Communicators/MpiCommunicator.h diff --git a/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem.h b/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem.h index 70c0d2b90..6389161cc 100644 --- a/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem.h +++ b/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem.h @@ -12,11 +12,9 @@ using namespace TNL::Problems; template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > class HeatEquationBenchmarkProblem: public PDEProblem< Mesh, - Communicator, typename DifferentialOperator::RealType, typename Mesh::DeviceType, typename DifferentialOperator::IndexType > @@ -28,12 +26,10 @@ class HeatEquationBenchmarkProblem: typedef typename DifferentialOperator::IndexType IndexType; typedef Functions::MeshFunctionView< Mesh > MeshFunctionViewType; typedef Pointers::SharedPointer< MeshFunctionViewType, DeviceType > MeshFunctionViewPointer; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; typedef Pointers::SharedPointer< DifferentialOperator > DifferentialOperatorPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; - - typedef Communicator CommunicatorType; using typename BaseType::MeshType; using typename BaseType::MeshPointer; @@ -67,9 +63,9 @@ class HeatEquationBenchmarkProblem: const RealType& tau, DofVectorPointer& _uPointer, DofVectorPointer& _fuPointer ); - + void applyBoundaryConditions( const RealType& time, - DofVectorPointer& dofs ); + DofVectorPointer& dofs ); template< typename MatrixPointer > void assemblyLinearSystem( const RealType& time, @@ -77,7 +73,7 @@ class HeatEquationBenchmarkProblem: DofVectorPointer& dofs, MatrixPointer& matrix, DofVectorPointer& rightHandSide ); - + ~HeatEquationBenchmarkProblem(); protected: @@ -85,19 +81,19 @@ class HeatEquationBenchmarkProblem: DifferentialOperatorPointer differentialOperatorPointer; BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + MeshFunctionViewPointer fu, u; - + String cudaKernelType; - + MeshType* cudaMesh; BoundaryCondition* cudaBoundaryConditions; RightHandSide* cudaRightHandSide; DifferentialOperator* cudaDifferentialOperator; - + TNL::ExplicitUpdater< Mesh, MeshFunctionViewType, DifferentialOperator, BoundaryCondition, RightHandSide > tuningExplicitUpdater; TNL::Solvers::PDE::ExplicitUpdater< Mesh, MeshFunctionViewType, DifferentialOperator, BoundaryCondition, RightHandSide > explicitUpdater; - + }; #include "HeatEquationBenchmarkProblem_impl.h" diff --git a/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem_impl.h b/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem_impl.h index a1eb09a36..9283a8c02 100644 --- a/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem_impl.h +++ b/src/Benchmarks/HeatEquation/HeatEquationBenchmarkProblem_impl.h @@ -19,9 +19,8 @@ template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: + typename DifferentialOperator > +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: HeatEquationBenchmarkProblem() : cudaMesh( 0 ), cudaBoundaryConditions( 0 ), @@ -33,10 +32,9 @@ HeatEquationBenchmarkProblem() template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > String -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getPrologHeader() const { if( this->cudaKernelType == "pure-c" ) @@ -53,10 +51,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > void -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -68,10 +65,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > bool -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -95,10 +91,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > -typename HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >::IndexType -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: + typename DifferentialOperator > +typename HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >::IndexType +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getDofs() const { /**** @@ -111,10 +106,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > void -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: bindDofs( DofVectorPointer& dofsPointer ) { this->u->bind( this->getMesh(), *dofsPointer ); @@ -123,10 +117,9 @@ bindDofs( DofVectorPointer& dofsPointer ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > bool -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofsPointer ) { @@ -144,11 +137,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > template< typename Matrix > bool -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setupLinearSystem( Matrix& matrix ) { const IndexType dofs = this->getDofs(); @@ -170,10 +162,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > bool -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofsPointer ) @@ -383,10 +374,9 @@ heatEquationTemplatedCompact( const GridType* grid, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > void -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& uDofs, @@ -532,7 +522,7 @@ getExplicitUpdate( const RealType& time, this->u->bind( mesh, *uDofs ); this->fu->bind( mesh, *fuDofs ); //explicitUpdater.setGPUTransferTimer( this->gpuTransferTimer ); - this->explicitUpdater.template update< typename Mesh::Cell, CommunicatorType >( time, tau, mesh, this->u, this->fu ); + this->explicitUpdater.template update< typename Mesh::Cell >( time, tau, mesh, this->u, this->fu ); } if( this->cudaKernelType == "tunning" ) { @@ -636,10 +626,9 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > void -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: applyBoundaryConditions( const RealType& time, DofVectorPointer& uDofs ) { @@ -719,11 +708,10 @@ applyBoundaryConditions( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > + typename DifferentialOperator > template< typename MatrixPointer > void -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -750,9 +738,8 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename DifferentialOperator, - typename Communicator > -HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator, Communicator >:: + typename DifferentialOperator > +HeatEquationBenchmarkProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: ~HeatEquationBenchmarkProblem() { if( this->cudaMesh ) Cuda::freeFromDevice( this->cudaMesh ); diff --git a/src/Benchmarks/HeatEquation/tnl-benchmark-heat-equation.h b/src/Benchmarks/HeatEquation/tnl-benchmark-heat-equation.h index e0a3318c5..8ed3472e5 100644 --- a/src/Benchmarks/HeatEquation/tnl-benchmark-heat-equation.h +++ b/src/Benchmarks/HeatEquation/tnl-benchmark-heat-equation.h @@ -49,8 +49,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class HeatEquationBenchmarkSetter { public: @@ -78,12 +77,12 @@ class HeatEquationBenchmarkSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, Constant, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, CommunicatorType > Problem; + typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } /*typedef Operators::NeumannBoundaryConditions< MeshType, Constant, Real, Index > BoundaryConditions; - typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, CommunicatorType > Problem; + typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters );*/ } @@ -91,12 +90,12 @@ class HeatEquationBenchmarkSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, MeshFunction, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, CommunicatorType > Problem; + typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } typedef Operators::NeumannBoundaryConditions< MeshType, MeshFunction, Real, Index > BoundaryConditions; - typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, CommunicatorType > Problem; + typedef HeatEquationBenchmarkProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters );*/ return false; diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnl-direct-eikonal-solver.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnl-direct-eikonal-solver.h index 95bbb30fa..557f074f3 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnl-direct-eikonal-solver.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnl-direct-eikonal-solver.h @@ -44,8 +44,7 @@ template< typename Real, typename Index, typename MeshType, typename MeshConfig, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class DirectEikonalSolverSetter { public: @@ -60,7 +59,7 @@ class DirectEikonalSolverSetter { static const int Dimension = MeshType::getMeshDimension(); typedef Functions::Analytic::Constant< Dimension, Real > Anisotropy; - typedef tnlDirectEikonalProblem< MeshType, CommunicatorType, Anisotropy > Problem; + typedef tnlDirectEikonalProblem< MeshType, Anisotropy > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); }; diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem.h index 4706e1ab5..0abcbaa36 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem.h @@ -21,13 +21,11 @@ #include template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real = typename Mesh::RealType, typename Index = typename Mesh::IndexType > class tnlDirectEikonalProblem : public Problems::PDEProblem< Mesh, - Communicator, Real, typename Mesh::DeviceType, Index > @@ -38,7 +36,7 @@ class tnlDirectEikonalProblem typedef typename Mesh::DeviceType DeviceType; typedef Index IndexType; typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; - typedef Problems::PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef Problems::PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; using AnisotropyType = Anisotropy; using AnisotropyPointer = Pointers::SharedPointer< AnisotropyType, DeviceType >; using MeshFunctionPointer = Pointers::SharedPointer< MeshFunctionType >; @@ -48,8 +46,6 @@ class tnlDirectEikonalProblem using MeshPointer = Pointers::SharedPointer< MeshType >; using DofVectorPointer = Pointers::SharedPointer< DofVectorType >; - typedef Communicator CommunicatorType; - static constexpr bool isTimeDependent() { return false; }; static String getType(); diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem_impl.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem_impl.h index c21d3deb3..2c37cd58a 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem_impl.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlDirectEikonalProblem_impl.h @@ -12,17 +12,18 @@ */ #pragma once + #include +#include #include "tnlDirectEikonalProblem.h" template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > String -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: getType() { return String( "DirectEikonalProblem< " + @@ -33,24 +34,22 @@ getType() } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > String -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: getPrologHeader() const { return String( "Direct eikonal solver" ); } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > void -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { @@ -58,24 +57,22 @@ writeProlog( Logger& logger, } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > bool -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: writeEpilog( Logger& logger ) { return true; } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > bool -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -83,43 +80,40 @@ setup( const Config::ParameterContainer& parameters, } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > Index -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: getDofs() const { return this->getMesh()->template getEntitiesCount< typename MeshType::Cell >(); } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > void -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: bindDofs( DofVectorPointer& dofs ) { this->u->bind( this->getMesh(), *dofs ); } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > bool -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { this->bindDofs( dofs ); String inputFile = parameters.getParameter< String >( "input-file" ); this->initialData->setMesh( this->getMesh() ); - if( CommunicatorType::isDistributed() ) + if( TNL::MPI::GetSize() > 1 ) { std::cout<<"Nodes Distribution: " << this->distributedMeshPointer->printProcessDistr() << std::endl; if( ! Functions::readDistributedMeshFunction( *this->distributedMeshPointer, *this->initialData, "u", inputFile ) ) @@ -142,12 +136,11 @@ setInitialCondition( const Config::ParameterContainer& parameters, } template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > bool -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: makeSnapshot( ) { std::cout << std::endl << "Writing output." << std::endl; @@ -157,7 +150,7 @@ makeSnapshot( ) FileName fileName; fileName.setFileNameBase( "u-" ); - if(CommunicatorType::isDistributed()) + if( TNL::MPI::GetSize() > 1 ) { fileName.setExtension( "pvti" ); Functions::writeDistributedMeshFunction( *this->distributedMeshPointer, *this->initialData, "u", fileName.getFileName() ); @@ -171,15 +164,14 @@ makeSnapshot( ) template< typename Mesh, - typename Communicator, typename Anisotropy, typename Real, typename Index > bool -tnlDirectEikonalProblem< Mesh, Communicator, Anisotropy, Real, Index >:: +tnlDirectEikonalProblem< Mesh, Anisotropy, Real, Index >:: solve( DofVectorPointer& dofs ) { - FastSweepingMethod< MeshType, Communicator,AnisotropyType > fsm; + FastSweepingMethod< MeshType, AnisotropyType > fsm; fsm.solve( *this->getDistributedMesh(), this->getMesh(), u, anisotropy, initialData ); makeSnapshot(); diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod.h index 2cc5d4073..df5e90d1a 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod.h @@ -18,8 +18,7 @@ template< typename Mesh, - typename Communicator, - typename Anisotropy = Functions::Analytic::Constant< Mesh::getMeshDimension(), typename Mesh::RealType > > + typename Anisotropy = Functions::Analytic::Constant< Mesh::getMeshDimension(), typename Mesh::RealType > > class FastSweepingMethod { }; @@ -27,9 +26,8 @@ class FastSweepingMethod template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > -class FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Communicator, Anisotropy > +class FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Anisotropy > : public tnlDirectEikonalMethodsBase< Meshes::Grid< 1, Real, Device, Index > > { //static_assert( std::is_same< Device, TNL::Devices::Host >::value, "The fast sweeping method works only on CPU." ); @@ -73,9 +71,8 @@ class FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Communicator, template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > -class FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy > +class FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy > : public tnlDirectEikonalMethodsBase< Meshes::Grid< 2, Real, Device, Index > > { //static_assert( std::is_same< Device, TNL::Devices::Host >::value, "The fast sweeping method works only on CPU." ); @@ -88,12 +85,10 @@ class FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, typedef Index IndexType; typedef Anisotropy AnisotropyType; typedef tnlDirectEikonalMethodsBase< Meshes::Grid< 2, Real, Device, Index > > BaseType; - typedef Communicator CommunicatorType; typedef Containers::StaticVector< 2, Index > StaticVector; using MeshPointer = Pointers::SharedPointer< MeshType >; using AnisotropyPointer = Pointers::SharedPointer< AnisotropyType, DeviceType >; - using MPI = Communicators::MpiCommunicator; using typename BaseType::InterfaceMapType; using typename BaseType::MeshFunctionType; @@ -131,9 +126,8 @@ class FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > -class FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy > +class FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy > : public tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > > { //static_assert( std::is_same< Device, TNL::Devices::Host >::value, "The fast sweeping method works only on CPU." ); @@ -146,12 +140,10 @@ class FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, typedef Index IndexType; typedef Anisotropy AnisotropyType; typedef tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > > BaseType; - typedef Communicator CommunicatorType; typedef Containers::StaticVector< 3, Index > StaticVector; using MeshPointer = Pointers::SharedPointer< MeshType >; using AnisotropyPointer = Pointers::SharedPointer< AnisotropyType, DeviceType >; - using MPI = Communicators::MpiCommunicator; using typename BaseType::InterfaceMapType; using typename BaseType::MeshFunctionType; diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod1D_impl.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod1D_impl.h index fda3141f4..11fa3464e 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod1D_impl.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod1D_impl.h @@ -18,9 +18,8 @@ template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > -FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Anisotropy >:: FastSweepingMethod() : maxIterations( 1 ) { @@ -30,10 +29,9 @@ FastSweepingMethod() template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > const Index& -FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Anisotropy >:: getMaxIterations() const { @@ -42,10 +40,9 @@ getMaxIterations() const template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Anisotropy >:: setMaxIterations( const IndexType& maxIterations ) { @@ -54,10 +51,9 @@ setMaxIterations( const IndexType& maxIterations ) template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 1, Real, Device, Index >, Anisotropy >:: solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributedMesh, const MeshPointer& mesh, MeshFunctionPointer& Aux, diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h index 01a5307d7..a119c8c7a 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h @@ -15,13 +15,13 @@ #include #include +#include template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > -FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy >:: FastSweepingMethod() : maxIterations( 1 ) { @@ -31,10 +31,9 @@ FastSweepingMethod() template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > const Index& -FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy >:: getMaxIterations() const { @@ -43,10 +42,9 @@ getMaxIterations() const template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy >:: setMaxIterations( const IndexType& maxIterations ) { @@ -55,10 +53,9 @@ setMaxIterations( const IndexType& maxIterations ) template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy >:: solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributedMesh, const MeshPointer& mesh, MeshFunctionPointer& Aux, @@ -73,7 +70,7 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed // Setting overlaps ( WITHOUT MPI SHOULD BE 0 ) StaticVector vecLowerOverlaps = 0; StaticVector vecUpperOverlaps = 0; - if( CommunicatorType::isDistributed() ) + if( TNL::MPI::GetSize() > 1 ) { //Distributed mesh for MPI overlaps (without MPI null pointer) vecLowerOverlaps = distributedMesh.getLowerOverlap(); @@ -375,13 +372,14 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed /**----------------------MPI-TO-DO---------------------------------------------**/ #ifdef HAVE_MPI - if( CommunicatorType::isDistributed() ){ + if( TNL::MPI::GetSize() > 1 ) { getInfoFromNeighbours( calculatedBefore, calculateMPIAgain, distributedMesh ); synchronizer.synchronize( aux ); } #endif - if( !CommunicatorType::isDistributed() ) // If we start the solver without MPI, we need calculated 0! + // If we start the solver without MPI, we need calculated 0! + if( TNL::MPI::GetSize() == 1 ) calculatedBefore = 0; } iteration++; @@ -392,10 +390,9 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed // PROTECTED FUNCTIONS: -template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > +template< typename Real, typename Device, typename Index, typename Anisotropy > bool -FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy >:: goThroughSweep( const StaticVector boundsFrom, const StaticVector boundsTo, MeshFunctionType& aux, const InterfaceMapType& interfaceMap, const AnisotropyPointer& anisotropy ) @@ -430,54 +427,53 @@ goThroughSweep( const StaticVector boundsFrom, const StaticVector boundsTo, #ifdef HAVE_MPI -template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > +template< typename Real, typename Device, typename Index, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 2, Real, Device, Index >, Anisotropy >:: getInfoFromNeighbours( int& calculatedBefore, int& calculateMPIAgain, const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributedMesh ) { int calculateFromNeighbours[4] = {0,0,0,0}; const int *neighbours = distributedMesh.getNeighbors(); // Getting neighbors of distributed mesh - MPI::Request *requestsInformation; - requestsInformation = new MPI::Request[ distributedMesh.getNeighborsCount() ]; + MPI_Request *requestsInformation; + requestsInformation = new MPI_Request[ distributedMesh.getNeighborsCount() ]; int neighCount = 0; // should this thread calculate again? if( neighbours[0] != -1 ) // LEFT { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[0], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[0], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, TNL::MPI::AllGroup() ); } if( neighbours[1] != -1 ) // RIGHT { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[1], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[1], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, TNL::MPI::AllGroup() ); } if( neighbours[2] != -1 ) //UP { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[2], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[2], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, TNL::MPI::AllGroup() ); } if( neighbours[5] != -1 ) //DOWN { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[5], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[5], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, TNL::MPI::AllGroup() ); } - MPI::WaitAll( requestsInformation, neighCount ); + TNL::MPI::Waitall( requestsInformation, neighCount ); - MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, MPI::AllGroup ); + TNL::MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, TNL::MPI::AllGroup() ); calculateMPIAgain = calculateFromNeighbours[0] || calculateFromNeighbours[1] || calculateFromNeighbours[2] || calculateFromNeighbours[3]; } diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h index e7f82880c..21d0e5a67 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h @@ -15,13 +15,13 @@ #include #include +#include template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > -FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy >:: FastSweepingMethod() : maxIterations( 1 ) { @@ -31,10 +31,9 @@ FastSweepingMethod() template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > const Index& -FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy >:: getMaxIterations() const { @@ -43,10 +42,9 @@ getMaxIterations() const template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy >:: setMaxIterations( const IndexType& maxIterations ) { @@ -55,10 +53,9 @@ setMaxIterations( const IndexType& maxIterations ) template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy >:: solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributedMesh, const MeshPointer& mesh, MeshFunctionPointer& Aux, @@ -73,7 +70,7 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed // getting overlaps ( WITHOUT MPI SHOULD BE 0 ) StaticVector vecLowerOverlaps = 0; StaticVector vecUpperOverlaps = 0; - if( CommunicatorType::isDistributed() ) + if( TNL::MPI::GetSize() > 1 ) { //Distributed mesh for MPI overlaps (without MPI null pointer) vecLowerOverlaps = distributedMesh.getLowerOverlap(); @@ -363,7 +360,7 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed } #ifdef HAVE_MPI - if( CommunicatorType::isDistributed() ) + if( TNL::MPI::GetSize() > 1 ) { getInfoFromNeighbours( calculatedBefore, calculateMPIAgain, distributedMesh ); @@ -373,8 +370,10 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed } #endif - if( !CommunicatorType::isDistributed() ) // If we start the solver without MPI, we need calculatedBefore 0! - calculatedBefore = 0; //otherwise we would go throw the FSM code and CUDA FSM code again uselessly + // If we start the solver without MPI, we need calculatedBefore 0! + // otherwise we would go throw the FSM code and CUDA FSM code again uselessly + if( TNL::MPI::GetSize() == 1 ) + calculatedBefore = 0; } //aux.write( "aux", "aux-8.vti" ); iteration++; @@ -387,10 +386,9 @@ solve( const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributed // PROTECTED FUNCTIONS: -template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > +template< typename Real, typename Device, typename Index, typename Anisotropy > bool -FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy >:: goThroughSweep( const StaticVector boundsFrom, const StaticVector boundsTo, MeshFunctionType& aux, const InterfaceMapType& interfaceMap, const AnisotropyPointer& anisotropy ) @@ -431,72 +429,71 @@ goThroughSweep( const StaticVector boundsFrom, const StaticVector boundsTo, #ifdef HAVE_MPI -template< typename Real, typename Device, typename Index, - typename Communicator, typename Anisotropy > +template< typename Real, typename Device, typename Index, typename Anisotropy > void -FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Communicator, Anisotropy >:: +FastSweepingMethod< Meshes::Grid< 3, Real, Device, Index >, Anisotropy >:: getInfoFromNeighbours( int& calculatedBefore, int& calculateMPIAgain, const Meshes::DistributedMeshes::DistributedMesh< MeshType >& distributedMesh ) { int calculateFromNeighbours[6] = {0,0,0,0,0,0}; const int *neighbours = distributedMesh.getNeighbors(); // Getting neighbors of distributed mesh - MPI::Request *requestsInformation; - requestsInformation = new MPI::Request[ distributedMesh.getNeighborsCount() ]; + MPI_Request *requestsInformation; + requestsInformation = new MPI_Request[ distributedMesh.getNeighborsCount() ]; int neighCount = 0; // should this thread calculate again? if( neighbours[0] != -1 ) // WEST { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[0], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[0], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, TNL::MPI::AllGroup() ); } if( neighbours[1] != -1 ) // EAST { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[1], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[1], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, TNL::MPI::AllGroup() ); } if( neighbours[2] != -1 ) //NORTH { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[2], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[2], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, TNL::MPI::AllGroup() ); } if( neighbours[5] != -1 ) //SOUTH { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[5], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[5], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, TNL::MPI::AllGroup() ); } if( neighbours[8] != -1 ) // TOP { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[8], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[8], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[4], 1, neighbours[8], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[4], 1, neighbours[8], 0, TNL::MPI::AllGroup() ); } if( neighbours[17] != -1 ) //BOTTOM { requestsInformation[neighCount++] = - MPI::ISend( &calculatedBefore, 1, neighbours[17], 0, MPI::AllGroup ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[17], 0, TNL::MPI::AllGroup() ); requestsInformation[neighCount++] = - MPI::IRecv( &calculateFromNeighbours[5], 1, neighbours[17], 0, MPI::AllGroup ); + TNL::MPI::Irecv( &calculateFromNeighbours[5], 1, neighbours[17], 0, TNL::MPI::AllGroup() ); } - MPI::WaitAll( requestsInformation, neighCount ); + TNL::MPI::Waitall( requestsInformation, neighCount ); - MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, MPI::AllGroup ); + TNL::MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, TNL::MPI::AllGroup() ); calculateMPIAgain = calculateFromNeighbours[0] || calculateFromNeighbours[1] || calculateFromNeighbours[2] || calculateFromNeighbours[3] || calculateFromNeighbours[4] || calculateFromNeighbours[5]; diff --git a/src/Examples/flow-sw/navierStokes.h b/src/Examples/flow-sw/navierStokes.h index 0d37ad41c..d26045c91 100644 --- a/src/Examples/flow-sw/navierStokes.h +++ b/src/Examples/flow-sw/navierStokes.h @@ -55,8 +55,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename Communicator > + typename SolverStarter > class navierStokesSetter { public: @@ -82,17 +81,17 @@ class navierStokesSetter if( boundaryConditionsType == "cavity" ) { typedef BoundaryConditionsCavity< MeshType, Constant, Real, Index > BoundaryConditions; - typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "boiler" ) { typedef BoundaryConditionsBoiler< MeshType, Constant, Real, Index > BoundaryConditions; - typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); - } + } return true;} diff --git a/src/Examples/flow-sw/navierStokesProblem.h b/src/Examples/flow-sw/navierStokesProblem.h index 2d9fca570..39bea30d6 100644 --- a/src/Examples/flow-sw/navierStokesProblem.h +++ b/src/Examples/flow-sw/navierStokesProblem.h @@ -23,27 +23,25 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > class navierStokesProblem: public PDEProblem< Mesh, - Communicator, typename InviscidOperators::RealType, typename Mesh::DeviceType, typename InviscidOperators::IndexType > { public: - + typedef typename InviscidOperators::RealType RealType; typedef typename Mesh::DeviceType DeviceType; typedef typename InviscidOperators::IndexType IndexType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; - + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; + using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - static const int Dimensions = Mesh::getMeshDimension(); + static const int Dimensions = Mesh::getMeshDimension(); typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef CompressibleConservativeVariables< MeshType > ConservativeVariablesType; @@ -54,7 +52,6 @@ class navierStokesProblem: typedef Pointers::SharedPointer< InviscidOperators > InviscidOperatorsPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; - using CommunicatorType = Communicator; String getPrologHeader() const; @@ -82,7 +79,7 @@ class navierStokesProblem: const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -103,20 +100,20 @@ class navierStokesProblem: protected: InviscidOperatorsPointer inviscidOperatorsPointer; - + BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + ConservativeVariablesPointer conservativeVariables, conservativeVariablesRHS; - + VelocityFieldPointer velocity; MeshFunctionPointer pressure; - + RealType gamma; RealType speedIncrement; RealType cavitySpeed; - RealType speedIncrementUntil; + RealType speedIncrementUntil; }; } // namespace TNL diff --git a/src/Examples/flow-sw/navierStokesProblem_impl.h b/src/Examples/flow-sw/navierStokesProblem_impl.h index e42c80894..24aaf5a80 100644 --- a/src/Examples/flow-sw/navierStokesProblem_impl.h +++ b/src/Examples/flow-sw/navierStokesProblem_impl.h @@ -33,10 +33,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > String -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getPrologHeader() const { return String( "Inviscid flow solver" ); @@ -45,10 +44,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -60,10 +58,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -80,10 +77,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > -typename navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >::IndexType -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: + typename InviscidOperators > +typename navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >::IndexType +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getDofs() const { /**** @@ -96,10 +92,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: bindDofs( DofVectorPointer& dofVector ) { this->conservativeVariables->bind( this->getMesh(), *dofVector ); @@ -108,10 +103,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -136,11 +130,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setupLinearSystem( Matrix& matrix ) { /* const IndexType dofs = this->getDofs( mesh ); @@ -162,10 +155,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -201,10 +193,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -279,7 +270,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterContinuity.setDifferentialOperator( this->inviscidOperatorsPointer->getContinuityOperator() ); explicitUpdaterContinuity.setBoundaryConditions( this->boundaryConditionPointer->getDensityBoundaryCondition() ); explicitUpdaterContinuity.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterContinuity.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterContinuity.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariables->getDensity(), this->conservativeVariablesRHS->getDensity() ); @@ -291,7 +282,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumX.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumXOperator() ); explicitUpdaterMomentumX.setBoundaryConditions( this->boundaryConditionPointer->getMomentumXBoundaryCondition() ); explicitUpdaterMomentumX.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumX.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumX.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 0 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 0 ] ); //, fuRhoVelocityX ); @@ -302,7 +293,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumY.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumYOperator() ); explicitUpdaterMomentumY.setBoundaryConditions( this->boundaryConditionPointer->getMomentumYBoundaryCondition() ); explicitUpdaterMomentumY.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumY.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumY.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 1 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 1 ] ); //, fuRhoVelocityX ); @@ -314,7 +305,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumZ.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumZOperator() ); explicitUpdaterMomentumZ.setBoundaryConditions( this->boundaryConditionPointer->getMomentumZBoundaryCondition() ); explicitUpdaterMomentumZ.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumZ.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumZ.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 2 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 2 ] ); //, fuRhoVelocityX ); @@ -328,7 +319,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterEnergy.setDifferentialOperator( this->inviscidOperatorsPointer->getEnergyOperator() ); explicitUpdaterEnergy.setBoundaryConditions( this->boundaryConditionPointer->getEnergyBoundaryCondition() ); explicitUpdaterEnergy.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterEnergy.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterEnergy.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariablesRHS->getEnergy(), // uRhoVelocityX, this->conservativeVariablesRHS->getEnergy() ); //, fuRhoVelocityX ); @@ -345,11 +336,10 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -380,10 +370,9 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) diff --git a/src/Examples/flow-vl/navierStokes.h b/src/Examples/flow-vl/navierStokes.h index 0d37ad41c..d26045c91 100644 --- a/src/Examples/flow-vl/navierStokes.h +++ b/src/Examples/flow-vl/navierStokes.h @@ -55,8 +55,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename Communicator > + typename SolverStarter > class navierStokesSetter { public: @@ -82,17 +81,17 @@ class navierStokesSetter if( boundaryConditionsType == "cavity" ) { typedef BoundaryConditionsCavity< MeshType, Constant, Real, Index > BoundaryConditions; - typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "boiler" ) { typedef BoundaryConditionsBoiler< MeshType, Constant, Real, Index > BoundaryConditions; - typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); - } + } return true;} diff --git a/src/Examples/flow-vl/navierStokesProblem.h b/src/Examples/flow-vl/navierStokesProblem.h index f0a979325..537986093 100644 --- a/src/Examples/flow-vl/navierStokesProblem.h +++ b/src/Examples/flow-vl/navierStokesProblem.h @@ -23,28 +23,26 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > class navierStokesProblem: public PDEProblem< Mesh, - Communicator, typename InviscidOperators::RealType, typename Mesh::DeviceType, typename InviscidOperators::IndexType > { public: - + typedef typename InviscidOperators::RealType RealType; typedef typename Mesh::DeviceType DeviceType; typedef typename InviscidOperators::IndexType IndexType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; - + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; + using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - static const int Dimensions = Mesh::getMeshDimension(); + static const int Dimensions = Mesh::getMeshDimension(); typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef CompressibleConservativeVariables< MeshType > ConservativeVariablesType; @@ -55,7 +53,6 @@ class navierStokesProblem: typedef Pointers::SharedPointer< InviscidOperators > InviscidOperatorsPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; - using CommunicatorType = Communicator; String getPrologHeader() const; @@ -83,7 +80,7 @@ class navierStokesProblem: const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -104,20 +101,20 @@ class navierStokesProblem: protected: InviscidOperatorsPointer inviscidOperatorsPointer; - + BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + ConservativeVariablesPointer conservativeVariables, conservativeVariablesRHS; - + VelocityFieldPointer velocity; MeshFunctionPointer pressure; - + RealType gamma; RealType speedIncrement; RealType cavitySpeed; - RealType speedIncrementUntil; + RealType speedIncrementUntil; }; } // namespace TNL diff --git a/src/Examples/flow-vl/navierStokesProblem_impl.h b/src/Examples/flow-vl/navierStokesProblem_impl.h index e42c80894..24aaf5a80 100644 --- a/src/Examples/flow-vl/navierStokesProblem_impl.h +++ b/src/Examples/flow-vl/navierStokesProblem_impl.h @@ -33,10 +33,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > String -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getPrologHeader() const { return String( "Inviscid flow solver" ); @@ -45,10 +44,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -60,10 +58,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -80,10 +77,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > -typename navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >::IndexType -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: + typename InviscidOperators > +typename navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >::IndexType +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getDofs() const { /**** @@ -96,10 +92,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: bindDofs( DofVectorPointer& dofVector ) { this->conservativeVariables->bind( this->getMesh(), *dofVector ); @@ -108,10 +103,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -136,11 +130,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setupLinearSystem( Matrix& matrix ) { /* const IndexType dofs = this->getDofs( mesh ); @@ -162,10 +155,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -201,10 +193,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -279,7 +270,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterContinuity.setDifferentialOperator( this->inviscidOperatorsPointer->getContinuityOperator() ); explicitUpdaterContinuity.setBoundaryConditions( this->boundaryConditionPointer->getDensityBoundaryCondition() ); explicitUpdaterContinuity.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterContinuity.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterContinuity.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariables->getDensity(), this->conservativeVariablesRHS->getDensity() ); @@ -291,7 +282,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumX.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumXOperator() ); explicitUpdaterMomentumX.setBoundaryConditions( this->boundaryConditionPointer->getMomentumXBoundaryCondition() ); explicitUpdaterMomentumX.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumX.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumX.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 0 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 0 ] ); //, fuRhoVelocityX ); @@ -302,7 +293,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumY.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumYOperator() ); explicitUpdaterMomentumY.setBoundaryConditions( this->boundaryConditionPointer->getMomentumYBoundaryCondition() ); explicitUpdaterMomentumY.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumY.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumY.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 1 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 1 ] ); //, fuRhoVelocityX ); @@ -314,7 +305,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumZ.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumZOperator() ); explicitUpdaterMomentumZ.setBoundaryConditions( this->boundaryConditionPointer->getMomentumZBoundaryCondition() ); explicitUpdaterMomentumZ.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumZ.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumZ.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 2 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 2 ] ); //, fuRhoVelocityX ); @@ -328,7 +319,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterEnergy.setDifferentialOperator( this->inviscidOperatorsPointer->getEnergyOperator() ); explicitUpdaterEnergy.setBoundaryConditions( this->boundaryConditionPointer->getEnergyBoundaryCondition() ); explicitUpdaterEnergy.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterEnergy.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterEnergy.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariablesRHS->getEnergy(), // uRhoVelocityX, this->conservativeVariablesRHS->getEnergy() ); //, fuRhoVelocityX ); @@ -345,11 +336,10 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -380,10 +370,9 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) diff --git a/src/Examples/flow/navierStokes.h b/src/Examples/flow/navierStokes.h index 5b37345bc..ad2621349 100644 --- a/src/Examples/flow/navierStokes.h +++ b/src/Examples/flow/navierStokes.h @@ -55,8 +55,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename Communicator > + typename SolverStarter > class navierStokesSetter { public: @@ -83,17 +82,17 @@ class navierStokesSetter if( boundaryConditionsType == "cavity" ) { typedef BoundaryConditionsCavity< MeshType, Constant, Real, Index > BoundaryConditions; - typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "boiler" ) { typedef BoundaryConditionsBoiler< MeshType, Constant, Real, Index > BoundaryConditions; - typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef navierStokesProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); - } + } return true;} diff --git a/src/Examples/flow/navierStokesProblem.h b/src/Examples/flow/navierStokesProblem.h index cf68ab805..4d9785c65 100644 --- a/src/Examples/flow/navierStokesProblem.h +++ b/src/Examples/flow/navierStokesProblem.h @@ -23,28 +23,26 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > class navierStokesProblem: public PDEProblem< Mesh, - Communicator, typename InviscidOperators::RealType, typename Mesh::DeviceType, typename InviscidOperators::IndexType > { public: - + typedef typename InviscidOperators::RealType RealType; typedef typename Mesh::DeviceType DeviceType; typedef typename InviscidOperators::IndexType IndexType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; - + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; + using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - static const int Dimensions = Mesh::getMeshDimension(); + static const int Dimensions = Mesh::getMeshDimension(); typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef CompressibleConservativeVariables< MeshType > ConservativeVariablesType; @@ -55,7 +53,6 @@ class navierStokesProblem: typedef Pointers::SharedPointer< InviscidOperators > InviscidOperatorsPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; - using CommunicatorType = Communicator; String getPrologHeader() const; @@ -83,7 +80,7 @@ class navierStokesProblem: const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -104,16 +101,16 @@ class navierStokesProblem: protected: InviscidOperatorsPointer inviscidOperatorsPointer; - + BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + ConservativeVariablesPointer conservativeVariables, conservativeVariablesRHS; - + VelocityFieldPointer velocity; MeshFunctionPointer pressure; - + RealType gamma; RealType speedIncrement; RealType cavitySpeed; diff --git a/src/Examples/flow/navierStokesProblem_impl.h b/src/Examples/flow/navierStokesProblem_impl.h index c4c5795c8..c02927f90 100644 --- a/src/Examples/flow/navierStokesProblem_impl.h +++ b/src/Examples/flow/navierStokesProblem_impl.h @@ -45,10 +45,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > String -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getPrologHeader() const { return String( "Inviscid flow solver" ); @@ -57,10 +56,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -72,10 +70,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -92,10 +89,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > -typename navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >::IndexType -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: + typename InviscidOperators > +typename navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >::IndexType +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getDofs() const { /**** @@ -108,10 +104,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: bindDofs( DofVectorPointer& dofVector ) { this->conservativeVariables->bind( this->getMesh(), *dofVector ); @@ -120,10 +115,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -148,11 +142,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setupLinearSystem( Matrix& matrix ) { /* const IndexType dofs = this->getDofs( mesh ); @@ -174,10 +167,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -213,10 +205,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -286,7 +277,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterContinuity.setDifferentialOperator( this->inviscidOperatorsPointer->getContinuityOperator() ); explicitUpdaterContinuity.setBoundaryConditions( this->boundaryConditionPointer->getDensityBoundaryCondition() ); explicitUpdaterContinuity.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterContinuity.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterContinuity.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariables->getDensity(), this->conservativeVariablesRHS->getDensity() ); @@ -297,7 +288,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumX.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumXOperator() ); explicitUpdaterMomentumX.setBoundaryConditions( this->boundaryConditionPointer->getMomentumXBoundaryCondition() ); explicitUpdaterMomentumX.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumX.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumX.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 0 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 0 ] ); //, fuRhoVelocityX ); @@ -308,7 +299,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumY.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumYOperator() ); explicitUpdaterMomentumY.setBoundaryConditions( this->boundaryConditionPointer->getMomentumYBoundaryCondition() ); explicitUpdaterMomentumY.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumY.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumY.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 1 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 1 ] ); //, fuRhoVelocityX ); @@ -320,7 +311,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumZ.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumZOperator() ); explicitUpdaterMomentumZ.setBoundaryConditions( this->boundaryConditionPointer->getMomentumZBoundaryCondition() ); explicitUpdaterMomentumZ.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumZ.template update< typename Mesh::Cell, CommunicatorType >( time, tau, mesh, + explicitUpdaterMomentumZ.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 2 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 2 ] ); //, fuRhoVelocityX ); } @@ -332,7 +323,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterEnergy.setDifferentialOperator( this->inviscidOperatorsPointer->getEnergyOperator() ); explicitUpdaterEnergy.setBoundaryConditions( this->boundaryConditionPointer->getEnergyBoundaryCondition() ); explicitUpdaterEnergy.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterEnergy.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterEnergy.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariables->getEnergy(), // uRhoVelocityX, this->conservativeVariablesRHS->getEnergy() ); //, fuRhoVelocityX ); @@ -349,11 +340,10 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > void -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -384,10 +374,9 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +navierStokesProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) diff --git a/src/Examples/heat-equation/tnl-heat-equation-eoc.h b/src/Examples/heat-equation/tnl-heat-equation-eoc.h index 4d585d941..4fe7f01c9 100644 --- a/src/Examples/heat-equation/tnl-heat-equation-eoc.h +++ b/src/Examples/heat-equation/tnl-heat-equation-eoc.h @@ -43,8 +43,7 @@ template< typename Real, typename Index, typename MeshType, typename MeshConfig, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class heatEquationSetter { public: @@ -64,7 +63,7 @@ class heatEquationSetter typedef HeatEquationEocRhs< ExactOperator, TestFunction > RightHandSide; typedef Containers::StaticVector < MeshType::getMeshDimension(), Real > Point; typedef Operators::DirichletBoundaryConditions< MeshType, TestFunction, Dimension, Real, Index > BoundaryConditions; - typedef HeatEquationEocProblem< MeshType, BoundaryConditions, RightHandSide, CommunicatorType, ApproximateOperator > Solver; + typedef HeatEquationEocProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Solver; SolverStarter solverStarter; return solverStarter.template run< Solver >( parameters ); }; diff --git a/src/Examples/heat-equation/tnl-heat-equation.h b/src/Examples/heat-equation/tnl-heat-equation.h index d4e3b1c93..429f28906 100644 --- a/src/Examples/heat-equation/tnl-heat-equation.h +++ b/src/Examples/heat-equation/tnl-heat-equation.h @@ -55,8 +55,7 @@ template< typename Real, typename Index, typename MeshType, typename MeshConfig, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class heatEquationSetter { public: @@ -78,12 +77,12 @@ class heatEquationSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, Constant > BoundaryConditions; - typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide,CommunicatorType, ApproximateOperator > Problem; + typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } typedef Operators::NeumannBoundaryConditions< MeshType, Constant, Real, Index > BoundaryConditions; - typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide, CommunicatorType, ApproximateOperator > Problem; + typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } @@ -91,12 +90,12 @@ class heatEquationSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, MeshFunction > BoundaryConditions; - typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide,CommunicatorType, ApproximateOperator > Problem; + typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } typedef Operators::NeumannBoundaryConditions< MeshType, MeshFunction, Real, Index > BoundaryConditions; - typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide,CommunicatorType, ApproximateOperator > Problem; + typedef HeatEquationProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); }; diff --git a/src/Examples/inviscid-flow-sw/euler.h b/src/Examples/inviscid-flow-sw/euler.h index d5e13808d..7f7923213 100644 --- a/src/Examples/inviscid-flow-sw/euler.h +++ b/src/Examples/inviscid-flow-sw/euler.h @@ -49,8 +49,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename Communicator > + typename SolverStarter > class eulerSetter { public: @@ -78,12 +77,12 @@ class eulerSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, Constant, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } typedef Operators::NeumannBoundaryConditions< MeshType, Constant, Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } @@ -91,14 +90,14 @@ class eulerSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, MeshFunction, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "neumann" ) { typedef Operators::NeumannBoundaryConditions< MeshType, MeshFunction, Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } diff --git a/src/Examples/inviscid-flow-sw/eulerProblem.h b/src/Examples/inviscid-flow-sw/eulerProblem.h index 5de25b8c0..2a5ef790c 100644 --- a/src/Examples/inviscid-flow-sw/eulerProblem.h +++ b/src/Examples/inviscid-flow-sw/eulerProblem.h @@ -23,28 +23,26 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > class eulerProblem: public PDEProblem< Mesh, - Communicator, typename InviscidOperators::RealType, typename Mesh::DeviceType, typename InviscidOperators::IndexType > { public: - + typedef typename InviscidOperators::RealType RealType; typedef typename Mesh::DeviceType DeviceType; typedef typename InviscidOperators::IndexType IndexType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; - + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; + using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - static const int Dimensions = Mesh::getMeshDimension(); + static const int Dimensions = Mesh::getMeshDimension(); typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef CompressibleConservativeVariables< MeshType > ConservativeVariablesType; @@ -55,7 +53,6 @@ class eulerProblem: typedef Pointers::SharedPointer< InviscidOperators > InviscidOperatorsPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; - using CommunicatorType = Communicator; String getPrologHeader() const; @@ -83,7 +80,7 @@ class eulerProblem: const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -104,17 +101,17 @@ class eulerProblem: protected: InviscidOperatorsPointer inviscidOperatorsPointer; - + BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + ConservativeVariablesPointer conservativeVariables, conservativeVariablesRHS; - + VelocityFieldPointer velocity; MeshFunctionPointer pressure; - - RealType gamma; + + RealType gamma; }; } // namespace TNL diff --git a/src/Examples/inviscid-flow-sw/eulerProblem_impl.h b/src/Examples/inviscid-flow-sw/eulerProblem_impl.h index f56fb295a..86d4201c6 100644 --- a/src/Examples/inviscid-flow-sw/eulerProblem_impl.h +++ b/src/Examples/inviscid-flow-sw/eulerProblem_impl.h @@ -33,10 +33,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > String -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getPrologHeader() const { return String( "Inviscid flow solver" ); @@ -45,10 +44,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -60,10 +58,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -80,10 +77,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > -typename eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >::IndexType -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: + typename InviscidOperators > +typename eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >::IndexType +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getDofs() const { /**** @@ -96,10 +92,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: bindDofs( DofVectorPointer& dofVector ) { this->conservativeVariables->bind( this->getMesh(), *dofVector ); @@ -108,10 +103,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -133,11 +127,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setupLinearSystem( Matrix& matrix ) { /* const IndexType dofs = this->getDofs( mesh ); @@ -159,10 +152,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -198,10 +190,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -251,7 +242,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterContinuity.setDifferentialOperator( this->inviscidOperatorsPointer->getContinuityOperator() ); explicitUpdaterContinuity.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterContinuity.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterContinuity.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterContinuity.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariables->getDensity(), this->conservativeVariablesRHS->getDensity() ); @@ -263,7 +254,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumX.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumXOperator() ); explicitUpdaterMomentumX.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumX.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumX.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumX.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 0 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 0 ] ); //, fuRhoVelocityX ); @@ -274,7 +265,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumY.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumYOperator() ); explicitUpdaterMomentumY.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumY.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumY.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumY.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 1 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 1 ] ); //, fuRhoVelocityX ); @@ -286,7 +277,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumZ.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumZOperator() ); explicitUpdaterMomentumZ.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumZ.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumZ.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumZ.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 2 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 2 ] ); //, fuRhoVelocityX ); @@ -300,7 +291,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterEnergy.setDifferentialOperator( this->inviscidOperatorsPointer->getEnergyOperator() ); explicitUpdaterEnergy.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterEnergy.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterEnergy.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterEnergy.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariablesRHS->getEnergy(), // uRhoVelocityX, this->conservativeVariablesRHS->getEnergy() ); //, fuRhoVelocityX ); @@ -317,11 +308,10 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -352,10 +342,9 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) diff --git a/src/Examples/inviscid-flow-vl/euler.h b/src/Examples/inviscid-flow-vl/euler.h index d5e13808d..7f7923213 100644 --- a/src/Examples/inviscid-flow-vl/euler.h +++ b/src/Examples/inviscid-flow-vl/euler.h @@ -49,8 +49,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename Communicator > + typename SolverStarter > class eulerSetter { public: @@ -78,12 +77,12 @@ class eulerSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, Constant, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } typedef Operators::NeumannBoundaryConditions< MeshType, Constant, Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } @@ -91,14 +90,14 @@ class eulerSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, MeshFunction, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "neumann" ) { typedef Operators::NeumannBoundaryConditions< MeshType, MeshFunction, Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator, Communicator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } diff --git a/src/Examples/inviscid-flow-vl/eulerProblem.h b/src/Examples/inviscid-flow-vl/eulerProblem.h index 5de25b8c0..2a5ef790c 100644 --- a/src/Examples/inviscid-flow-vl/eulerProblem.h +++ b/src/Examples/inviscid-flow-vl/eulerProblem.h @@ -23,28 +23,26 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > class eulerProblem: public PDEProblem< Mesh, - Communicator, typename InviscidOperators::RealType, typename Mesh::DeviceType, typename InviscidOperators::IndexType > { public: - + typedef typename InviscidOperators::RealType RealType; typedef typename Mesh::DeviceType DeviceType; typedef typename InviscidOperators::IndexType IndexType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; - + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; + using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - static const int Dimensions = Mesh::getMeshDimension(); + static const int Dimensions = Mesh::getMeshDimension(); typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef CompressibleConservativeVariables< MeshType > ConservativeVariablesType; @@ -55,7 +53,6 @@ class eulerProblem: typedef Pointers::SharedPointer< InviscidOperators > InviscidOperatorsPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; - using CommunicatorType = Communicator; String getPrologHeader() const; @@ -83,7 +80,7 @@ class eulerProblem: const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -104,17 +101,17 @@ class eulerProblem: protected: InviscidOperatorsPointer inviscidOperatorsPointer; - + BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + ConservativeVariablesPointer conservativeVariables, conservativeVariablesRHS; - + VelocityFieldPointer velocity; MeshFunctionPointer pressure; - - RealType gamma; + + RealType gamma; }; } // namespace TNL diff --git a/src/Examples/inviscid-flow-vl/eulerProblem_impl.h b/src/Examples/inviscid-flow-vl/eulerProblem_impl.h index f56fb295a..86d4201c6 100644 --- a/src/Examples/inviscid-flow-vl/eulerProblem_impl.h +++ b/src/Examples/inviscid-flow-vl/eulerProblem_impl.h @@ -33,10 +33,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > String -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getPrologHeader() const { return String( "Inviscid flow solver" ); @@ -45,10 +44,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -60,10 +58,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -80,10 +77,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > -typename eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >::IndexType -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: + typename InviscidOperators > +typename eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >::IndexType +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getDofs() const { /**** @@ -96,10 +92,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: bindDofs( DofVectorPointer& dofVector ) { this->conservativeVariables->bind( this->getMesh(), *dofVector ); @@ -108,10 +103,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -133,11 +127,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setupLinearSystem( Matrix& matrix ) { /* const IndexType dofs = this->getDofs( mesh ); @@ -159,10 +152,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -198,10 +190,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -251,7 +242,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterContinuity.setDifferentialOperator( this->inviscidOperatorsPointer->getContinuityOperator() ); explicitUpdaterContinuity.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterContinuity.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterContinuity.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterContinuity.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariables->getDensity(), this->conservativeVariablesRHS->getDensity() ); @@ -263,7 +254,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumX.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumXOperator() ); explicitUpdaterMomentumX.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumX.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumX.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumX.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 0 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 0 ] ); //, fuRhoVelocityX ); @@ -274,7 +265,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumY.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumYOperator() ); explicitUpdaterMomentumY.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumY.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumY.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumY.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 1 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 1 ] ); //, fuRhoVelocityX ); @@ -286,7 +277,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumZ.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumZOperator() ); explicitUpdaterMomentumZ.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumZ.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumZ.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterMomentumZ.template update< typename Mesh::Cell >( time, tau, mesh, ( *this->conservativeVariables->getMomentum() )[ 2 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 2 ] ); //, fuRhoVelocityX ); @@ -300,7 +291,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterEnergy.setDifferentialOperator( this->inviscidOperatorsPointer->getEnergyOperator() ); explicitUpdaterEnergy.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterEnergy.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterEnergy.template update< typename Mesh::Cell, CommunicatorType >( + explicitUpdaterEnergy.template update< typename Mesh::Cell >( time, tau, mesh, this->conservativeVariablesRHS->getEnergy(), // uRhoVelocityX, this->conservativeVariablesRHS->getEnergy() ); //, fuRhoVelocityX ); @@ -317,11 +308,10 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > template< typename Matrix > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -352,10 +342,9 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename InviscidOperators, - typename Communicator > + typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators, Communicator >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) diff --git a/src/Examples/inviscid-flow/euler.h b/src/Examples/inviscid-flow/euler.h index ea361c1b4..107b2a742 100644 --- a/src/Examples/inviscid-flow/euler.h +++ b/src/Examples/inviscid-flow/euler.h @@ -50,8 +50,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class eulerSetter { public: @@ -79,14 +78,14 @@ class eulerSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, Constant, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, CommunicatorType, ApproximateOperator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "neumann" ) { typedef Operators::NeumannBoundaryConditions< MeshType, Constant, Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, CommunicatorType, ApproximateOperator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } @@ -95,14 +94,14 @@ class eulerSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, MeshFunction, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, CommunicatorType, ApproximateOperator> Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator> Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } if( boundaryConditionsType == "neumann" ) { typedef Operators::NeumannBoundaryConditions< MeshType, MeshFunction, Real, Index > BoundaryConditions; - typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, CommunicatorType, ApproximateOperator > Problem; + typedef eulerProblem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } diff --git a/src/Examples/inviscid-flow/eulerProblem.h b/src/Examples/inviscid-flow/eulerProblem.h index 7b26ee007..d205a3ac2 100644 --- a/src/Examples/inviscid-flow/eulerProblem.h +++ b/src/Examples/inviscid-flow/eulerProblem.h @@ -22,29 +22,25 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > class eulerProblem: public PDEProblem< Mesh, - Communicator, typename InviscidOperators::RealType, typename Mesh::DeviceType, typename InviscidOperators::IndexType > { public: - + typedef typename InviscidOperators::RealType RealType; typedef typename Mesh::DeviceType DeviceType; typedef typename InviscidOperators::IndexType IndexType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; - typedef Communicator CommunicatorType; - using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - static const int Dimensions = Mesh::getMeshDimension(); + static const int Dimensions = Mesh::getMeshDimension(); typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef CompressibleConservativeVariables< MeshType > ConservativeVariablesType; @@ -82,9 +78,9 @@ class eulerProblem: const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, - DofVectorPointer& dofs ); + DofVectorPointer& dofs ); template< typename Matrix > void assemblyLinearSystem( const RealType& time, @@ -100,17 +96,17 @@ class eulerProblem: protected: InviscidOperatorsPointer inviscidOperatorsPointer; - + BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + ConservativeVariablesPointer conservativeVariables, conservativeVariablesRHS; - + VelocityFieldPointer velocity; MeshFunctionPointer pressure; - - RealType gamma; + + RealType gamma; }; } // namespace TNL diff --git a/src/Examples/inviscid-flow/eulerProblem_impl.h b/src/Examples/inviscid-flow/eulerProblem_impl.h index 5a7a42d1e..78e19e0b3 100644 --- a/src/Examples/inviscid-flow/eulerProblem_impl.h +++ b/src/Examples/inviscid-flow/eulerProblem_impl.h @@ -34,10 +34,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > String -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getPrologHeader() const { return String( "Inviscid flow solver" ); @@ -46,10 +45,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -61,10 +59,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -81,10 +78,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > -typename eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >::IndexType -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +typename eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >::IndexType +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getDofs() const { /**** @@ -97,10 +93,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: bindDofs( DofVectorPointer& dofVector ) { this->conservativeVariables->bind( this->getMesh(), *dofVector ); @@ -109,10 +104,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -134,11 +128,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > template< typename Matrix > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: setupLinearSystem( Matrix& matrix ) { /* const IndexType dofs = this->getDofs(); @@ -160,10 +153,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -196,10 +188,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -242,7 +233,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterContinuity.setDifferentialOperator( this->inviscidOperatorsPointer->getContinuityOperator() ); explicitUpdaterContinuity.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterContinuity.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterContinuity.template update< typename Mesh::Cell, Communicator >( time, tau, this->getMesh(), + explicitUpdaterContinuity.template update< typename Mesh::Cell >( time, tau, this->getMesh(), this->conservativeVariables->getDensity(), this->conservativeVariablesRHS->getDensity() ); @@ -253,7 +244,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumX.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumXOperator() ); explicitUpdaterMomentumX.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumX.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumX.template update< typename Mesh::Cell, Communicator >( time, tau, this->getMesh(), + explicitUpdaterMomentumX.template update< typename Mesh::Cell >( time, tau, this->getMesh(), ( *this->conservativeVariables->getMomentum() )[ 0 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 0 ] ); //, fuRhoVelocityX ); @@ -263,7 +254,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumY.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumYOperator() ); explicitUpdaterMomentumY.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumY.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumY.template update< typename Mesh::Cell, Communicator >( time, tau, this->getMesh(), + explicitUpdaterMomentumY.template update< typename Mesh::Cell >( time, tau, this->getMesh(), ( *this->conservativeVariables->getMomentum() )[ 1 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 1 ] ); //, fuRhoVelocityX ); } @@ -274,7 +265,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterMomentumZ.setDifferentialOperator( this->inviscidOperatorsPointer->getMomentumZOperator() ); explicitUpdaterMomentumZ.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterMomentumZ.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterMomentumZ.template update< typename Mesh::Cell, Communicator >( time, tau, this->getMesh(), + explicitUpdaterMomentumZ.template update< typename Mesh::Cell >( time, tau, this->getMesh(), ( *this->conservativeVariables->getMomentum() )[ 2 ], // uRhoVelocityX, ( *this->conservativeVariablesRHS->getMomentum() )[ 2 ] ); //, fuRhoVelocityX ); } @@ -287,7 +278,7 @@ getExplicitUpdate( const RealType& time, explicitUpdaterEnergy.setDifferentialOperator( this->inviscidOperatorsPointer->getEnergyOperator() ); explicitUpdaterEnergy.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdaterEnergy.setRightHandSide( this->rightHandSidePointer ); - explicitUpdaterEnergy.template update< typename Mesh::Cell, Communicator >( time, tau, this->getMesh(), + explicitUpdaterEnergy.template update< typename Mesh::Cell >( time, tau, this->getMesh(), this->conservativeVariables->getEnergy(), // uRhoVelocityX, this->conservativeVariablesRHS->getEnergy() ); //, fuRhoVelocityX ); @@ -301,10 +292,9 @@ getExplicitUpdate( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -315,11 +305,10 @@ applyBoundaryConditions( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > template< typename Matrix > void -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -349,10 +338,9 @@ assemblyLinearSystem( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename InviscidOperators > bool -eulerProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, InviscidOperators >:: +eulerProblem< Mesh, BoundaryCondition, RightHandSide, InviscidOperators >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) diff --git a/src/Examples/transport-equation/tnl-transport-equation-eoc.h b/src/Examples/transport-equation/tnl-transport-equation-eoc.h index dbf7610ef..f619c25bb 100644 --- a/src/Examples/transport-equation/tnl-transport-equation-eoc.h +++ b/src/Examples/transport-equation/tnl-transport-equation-eoc.h @@ -48,16 +48,16 @@ template< typename ConfigTag >class advectionConfig Functions::Analytic::VectorNorm< 3, double >::configSetup( config, "vector-norm-" ); Operators::Analytic::Heaviside< 3, double >::configSetup( config, "heaviside-" ); Operators::Analytic::Shift< 3, double >::configSetup( config, "heaviside-" ); - + config.addDelimiter( "Velocity field" ); config.addEntry< String >( "velocity-field", "Type of velocity field.", "constant" ); config.addEntryEnum< String >( "constant" ); Functions::VectorField< 3, Functions::Analytic::Constant< 3 > >::configSetup( config, "velocity-field-" ); - + config.addDelimiter( "Numerical scheme" ); typedef Meshes::Grid< 3 > MeshType; Operators::Advection::LaxFridrichs< MeshType >::configSetup( config ); - + config.addDelimiter( "Boundary conditions" ); config.addEntry< String >( "boundary-conditions-type", "Choose the boundary conditions type.", "dirichlet"); config.addEntryEnum< String >( "dirichlet" ); @@ -71,8 +71,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class advectionSetter { public: @@ -80,16 +79,16 @@ class advectionSetter typedef Real RealType; typedef Device DeviceType; typedef Index IndexType; - + static const int Dimensions = MeshType::getMeshDimension(); - + template< typename Problem > static bool callSolverStarter( const Config::ParameterContainer& parameters ) { SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } - + template< typename DifferentialOperatorType > static bool setBoundaryConditionsType( const Config::ParameterContainer& parameters ) { @@ -98,26 +97,26 @@ class advectionSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, ConstantFunctionType, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef transportEquationProblemEoc< MeshType, BoundaryConditions, ConstantFunctionType, CommunicatorType, DifferentialOperatorType > Problem; + typedef transportEquationProblemEoc< MeshType, BoundaryConditions, ConstantFunctionType, DifferentialOperatorType > Problem; return callSolverStarter< Problem >( parameters ); } if( boundaryConditionsType == "neumann" ) { typedef Operators::DirichletBoundaryConditions< MeshType, ConstantFunctionType, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef transportEquationProblemEoc< MeshType, BoundaryConditions, ConstantFunctionType, CommunicatorType, DifferentialOperatorType > Problem; + typedef transportEquationProblemEoc< MeshType, BoundaryConditions, ConstantFunctionType, DifferentialOperatorType > Problem; return callSolverStarter< Problem >( parameters ); } std::cerr << "Unknown boundary conditions type: " << boundaryConditionsType << "." << std::endl; return false; } - + template< typename VelocityFieldType > static bool setDifferentialOperatorType( const Config::ParameterContainer& parameters ) { typedef Operators::Advection::LaxFridrichs< MeshType, Real, Index, VelocityFieldType > DifferentialOperatorType; return setBoundaryConditionsType< DifferentialOperatorType >( parameters ); } - + static bool setVelocityFieldType( const Config::ParameterContainer& parameters ) { String velocityFieldType = parameters.getParameter< String >( "velocity-field" ); @@ -132,7 +131,7 @@ class advectionSetter static bool run( const Config::ParameterContainer& parameters ) { return setVelocityFieldType( parameters ); - } + } }; int main( int argc, char* argv[] ) diff --git a/src/Examples/transport-equation/tnl-transport-equation.h b/src/Examples/transport-equation/tnl-transport-equation.h index 9e669033a..a8ad7062f 100644 --- a/src/Examples/transport-equation/tnl-transport-equation.h +++ b/src/Examples/transport-equation/tnl-transport-equation.h @@ -43,10 +43,10 @@ template< typename ConfigTag >class advectionConfig config.addEntry< String >( "velocity-field", "Type of velocity field.", "constant" ); config.addEntryEnum< String >( "constant" ); Functions::VectorField< 3, Functions::Analytic::Constant< 3 > >::configSetup( config, "velocity-field-" ); - + typedef Meshes::Grid< 3 > MeshType; Operators::Advection::LaxFridrichs< MeshType >::configSetup( config ); - + config.addEntry< String >( "boundary-conditions-type", "Choose the boundary conditions type.", "dirichlet"); config.addEntryEnum< String >( "dirichlet" ); config.addEntryEnum< String >( "neumann" ); @@ -62,8 +62,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename CommunicatorType > + typename SolverStarter > class advectionSetter { public: @@ -71,16 +70,16 @@ class advectionSetter typedef Real RealType; typedef Device DeviceType; typedef Index IndexType; - + static const int Dimensions = MeshType::getMeshDimension(); - + template< typename Problem > static bool callSolverStarter( const Config::ParameterContainer& parameters ) { SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); } - + template< typename DifferentialOperatorType > static bool setBoundaryConditionsType( const Config::ParameterContainer& parameters ) { @@ -89,19 +88,19 @@ class advectionSetter if( boundaryConditionsType == "dirichlet" ) { typedef Operators::DirichletBoundaryConditions< MeshType, ConstantFunctionType, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef transportEquationProblem< MeshType, BoundaryConditions, ConstantFunctionType, CommunicatorType, DifferentialOperatorType > Problem; + typedef transportEquationProblem< MeshType, BoundaryConditions, ConstantFunctionType, DifferentialOperatorType > Problem; return callSolverStarter< Problem >( parameters ); } if( boundaryConditionsType == "neumann" ) { typedef Operators::DirichletBoundaryConditions< MeshType, ConstantFunctionType, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef transportEquationProblem< MeshType, BoundaryConditions, ConstantFunctionType, CommunicatorType, DifferentialOperatorType > Problem; + typedef transportEquationProblem< MeshType, BoundaryConditions, ConstantFunctionType, DifferentialOperatorType > Problem; return callSolverStarter< Problem >( parameters ); } std::cerr << "Unknown boundary conditions type: " << boundaryConditionsType << "." << std::endl; return false; } - + template< typename VelocityFieldType > static bool setDifferentialOperatorType( const Config::ParameterContainer& parameters ) { @@ -113,7 +112,7 @@ class advectionSetter typedef Operators::Advection::LaxFridrichs< MeshType, Real, Index, VelocityFieldType > DifferentialOperatorType; return setBoundaryConditionsType< DifferentialOperatorType >( parameters ); } - + static bool setVelocityFieldType( const Config::ParameterContainer& parameters ) { String velocityFieldType = parameters.getParameter< String >( "velocity-field" ); @@ -128,7 +127,7 @@ class advectionSetter static bool run( const Config::ParameterContainer& parameters ) { return setVelocityFieldType( parameters ); - } + } }; int main( int argc, char* argv[] ) diff --git a/src/Examples/transport-equation/transportEquationProblem.h b/src/Examples/transport-equation/transportEquationProblem.h index a4f932dfb..07f8d4a96 100644 --- a/src/Examples/transport-equation/transportEquationProblem.h +++ b/src/Examples/transport-equation/transportEquationProblem.h @@ -21,11 +21,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > class transportEquationProblem: public PDEProblem< Mesh, - Communicator, typename DifferentialOperator::RealType, typename Mesh::DeviceType, typename DifferentialOperator::IndexType > @@ -36,7 +34,7 @@ public PDEProblem< Mesh, typedef typename Mesh::DeviceType DeviceType; typedef typename DifferentialOperator::IndexType IndexType; typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; typedef Pointers::SharedPointer< MeshFunctionType, DeviceType > MeshFunctionPointer; typedef Pointers::SharedPointer< DifferentialOperator > DifferentialOperatorPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; @@ -44,8 +42,6 @@ public PDEProblem< Mesh, typedef typename DifferentialOperator::VelocityFieldType VelocityFieldType; typedef Pointers::SharedPointer< VelocityFieldType, DeviceType > VelocityFieldPointer; - typedef Communicator CommunicatorType; - using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; @@ -77,9 +73,9 @@ public PDEProblem< Mesh, const RealType& tau, DofVectorPointer& _u, DofVectorPointer& _fu ); - + void applyBoundaryConditions( const RealType& time, - DofVectorPointer& dofs ); + DofVectorPointer& dofs ); template< typename Matrix > void assemblyLinearSystem( const RealType& time, @@ -97,9 +93,9 @@ public PDEProblem< Mesh, BoundaryConditionPointer boundaryConditionPointer; RightHandSidePointer rightHandSidePointer; - + VelocityFieldPointer velocityField; - + int dimension; String choice; RealType size; @@ -108,7 +104,7 @@ public PDEProblem< Mesh, RealType speedX; RealType speedY; RealType speedZ; - RealType schemeSize; + RealType schemeSize; }; } // namespace TNL diff --git a/src/Examples/transport-equation/transportEquationProblemEoc.h b/src/Examples/transport-equation/transportEquationProblemEoc.h index 88ef03a89..b0af429f1 100644 --- a/src/Examples/transport-equation/transportEquationProblemEoc.h +++ b/src/Examples/transport-equation/transportEquationProblemEoc.h @@ -22,10 +22,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > class transportEquationProblemEoc: -public transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator > +public transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator > { public: @@ -33,21 +32,20 @@ public transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communi typedef typename Mesh::DeviceType DeviceType; typedef typename DifferentialOperator::IndexType IndexType; typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; - typedef transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator > BaseType; + typedef transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator > BaseType; typedef Pointers::SharedPointer< MeshFunctionType, DeviceType > MeshFunctionPointer; typedef Pointers::SharedPointer< DifferentialOperator > DifferentialOperatorPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; typedef typename DifferentialOperator::VelocityFieldType VelocityFieldType; typedef Pointers::SharedPointer< VelocityFieldType, DeviceType > VelocityFieldPointer; - - - typedef Communicator CommunicatorType; + + using typename BaseType::MeshType; using typename BaseType::MeshPointer; using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - + //using BaseType::getExplicitUpdate; String getPrologHeader() const; diff --git a/src/Examples/transport-equation/transportEquationProblemEoc_impl.h b/src/Examples/transport-equation/transportEquationProblemEoc_impl.h index 5f5bd575f..46a54a1e5 100644 --- a/src/Examples/transport-equation/transportEquationProblemEoc_impl.h +++ b/src/Examples/transport-equation/transportEquationProblemEoc_impl.h @@ -27,10 +27,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > String -transportEquationProblemEoc< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblemEoc< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getPrologHeader() const { return String( "Transport Equation EOC" ); @@ -40,10 +39,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -transportEquationProblemEoc< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblemEoc< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -115,10 +113,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -transportEquationProblemEoc< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblemEoc< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { diff --git a/src/Examples/transport-equation/transportEquationProblem_impl.h b/src/Examples/transport-equation/transportEquationProblem_impl.h index 26d5ee9a5..012c1754b 100644 --- a/src/Examples/transport-equation/transportEquationProblem_impl.h +++ b/src/Examples/transport-equation/transportEquationProblem_impl.h @@ -24,10 +24,9 @@ namespace TNL { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > String -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getPrologHeader() const { return String( "Transport Equation" ); @@ -36,10 +35,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { /**** @@ -51,10 +49,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -68,10 +65,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > -typename transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >::IndexType -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +typename transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >::IndexType +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getDofs() const { /**** @@ -84,10 +80,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: bindDofs( DofVectorPointer& dofVector ) { //const IndexType dofs = this->getMesh()->template getEntitiesCount< typename MeshType::Cell >(); @@ -97,10 +92,9 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { @@ -117,11 +111,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > template< typename Matrix > bool -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setupLinearSystem( Matrix& matrix ) { /*const IndexType dofs = this->getDofs(); @@ -143,10 +136,9 @@ setupLinearSystem( Matrix& matrix ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -166,10 +158,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -197,16 +188,15 @@ getExplicitUpdate( const RealType& time, explicitUpdater.setDifferentialOperator( this->differentialOperatorPointer ); explicitUpdater.setBoundaryConditions( this->boundaryConditionPointer ); explicitUpdater.setRightHandSide( this->rightHandSidePointer ); - explicitUpdater.template update< typename Mesh::Cell, Communicator >( time, tau, mesh, u, fu ); + explicitUpdater.template update< typename Mesh::Cell >( time, tau, mesh, u, fu ); } template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: applyBoundaryConditions( const RealType& time, DofVectorPointer& dofs ) { @@ -217,11 +207,10 @@ applyBoundaryConditions( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > template< typename Matrix > void -transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +transportEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, diff --git a/src/TNL/Communicators/MpiCommunicator.h b/src/TNL/Communicators/MpiCommunicator.h deleted file mode 100644 index cd5162968..000000000 --- a/src/TNL/Communicators/MpiCommunicator.h +++ /dev/null @@ -1,196 +0,0 @@ -/*************************************************************************** - MpiCommunicator.h - description - ------------------- - begin : Apr 23, 2005 - copyright : (C) 2005 by Tomas Oberhuber et al. - email : tomas.oberhuber@fjfi.cvut.cz - ***************************************************************************/ - -/* See Copyright Notice in tnl/Copyright */ - -#pragma once - -#include -#include -#include -#include - -namespace TNL { -//! \brief Namespace for TNL communicators. -namespace Communicators { -namespace { - -//! \brief MPI communicator. -class [[deprecated("use the functions in the TNL::MPI namespace instead")]] -MpiCommunicator -{ - public: -#ifdef HAVE_MPI - using Request = MPI_Request; - using CommunicationGroup = MPI_Comm; -#else - using Request = int; - using CommunicationGroup = int; -#endif - - static bool isDistributed() - { -#ifdef HAVE_MPI - return GetSize(AllGroup)>1; -#else - return false; -#endif - } - - static void configSetup( Config::ConfigDescription& config, const String& prefix = "" ) - { - MPI::configSetup( config, prefix ); - } - - static bool setup( const Config::ParameterContainer& parameters, - const String& prefix = "" ) - { - return MPI::setup( parameters, prefix ); - } - - static void Init( int& argc, char**& argv, int required_thread_level = MPI_THREAD_SINGLE ) - { - MPI::Init( argc, argv, required_thread_level ); - - // silence warnings about (potentially) unused variables - (void) NullGroup; - } - - static void Finalize() - { - MPI::Finalize(); - } - - static bool IsInitialized() - { - return MPI::isInitialized(); - } - - static int GetRank(CommunicationGroup group = AllGroup ) - { - return MPI::GetRank( group ); - } - - static int GetSize(CommunicationGroup group = AllGroup ) - { - return MPI::GetSize( group ); - } - - static void Barrier( CommunicationGroup group = AllGroup ) - { - MPI::Barrier( group ); - } - - template - static void Send( const T* data, int count, int dest, int tag, CommunicationGroup group = AllGroup ) - { - MPI::Send( data, count, dest, tag, group ); - } - - template - static void Recv( T* data, int count, int src, int tag, CommunicationGroup group = AllGroup ) - { - MPI::Recv( data, count, src, tag, group ); - } - - template - static Request ISend( const T* data, int count, int dest, int tag, CommunicationGroup group = AllGroup ) - { - return MPI::Isend( data, count, dest, tag, group ); - } - - template - static Request IRecv( T* data, int count, int src, int tag, CommunicationGroup group = AllGroup ) - { - return MPI::Irecv( data, count, src, tag, group ); - } - - static void WaitAll(Request *reqs, int length) - { - MPI::Waitall( reqs, length ); - } - - template< typename T > - static void Bcast( T* data, int count, int root, CommunicationGroup group) - { - MPI::Bcast( data, count, root, group ); - } - - template< typename T > - static void Allreduce( const T* data, - T* reduced_data, - int count, - const MPI_Op &op, - CommunicationGroup group) - { - MPI::Allreduce( data, reduced_data, count, op, group ); - } - - // in-place variant of Allreduce - template< typename T > - static void Allreduce( T* data, - int count, - const MPI_Op &op, - CommunicationGroup group) - { - MPI::Allreduce( data, count, op, group ); - } - - template< typename T > - static void Reduce( const T* data, - T* reduced_data, - int count, - const MPI_Op &op, - int root, - CommunicationGroup group) - { - MPI::Reduce( data, reduced_data, count, op, root, group ); - } - - template< typename T > - static void SendReceive( const T* sendData, - int sendCount, - int destination, - int sendTag, - T* receiveData, - int receiveCount, - int source, - int receiveTag, - CommunicationGroup group ) - { - MPI::Sendrecv( sendData, sendCount, destination, sendTag, receiveData, receiveCount, source, receiveTag, group ); - } - - template< typename T > - static void Alltoall( const T* sendData, - int sendCount, - T* receiveData, - int receiveCount, - CommunicationGroup group ) - { - MPI::Alltoall( sendData, sendCount, receiveData, receiveCount, group ); - } - -#ifdef HAVE_MPI - static MPI_Comm AllGroup; - static MPI_Comm NullGroup; -#else - static constexpr int AllGroup = 1; - static constexpr int NullGroup = 0; -#endif - private: -}; - -#ifdef HAVE_MPI -MPI_Comm MpiCommunicator::AllGroup = MPI_COMM_WORLD; -MPI_Comm MpiCommunicator::NullGroup = MPI_COMM_NULL; -#endif - -} // namespace -} // namespace Communicators -} // namespace TNL diff --git a/src/TNL/Problems/HeatEquationEocProblem.h b/src/TNL/Problems/HeatEquationEocProblem.h index 78dd640b3..2f624bc4f 100644 --- a/src/TNL/Problems/HeatEquationEocProblem.h +++ b/src/TNL/Problems/HeatEquationEocProblem.h @@ -25,15 +25,14 @@ namespace Problems { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator = Operators::LinearDiffusion< Mesh, typename BoundaryCondition::RealType > > -class HeatEquationEocProblem : public HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator > +class HeatEquationEocProblem : public HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator > { public: - - typedef HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator > BaseType; - + + typedef HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator > BaseType; + using typename BaseType::MeshPointer; bool setup( const Config::ParameterContainer& parameters, diff --git a/src/TNL/Problems/HeatEquationEocProblem_impl.h b/src/TNL/Problems/HeatEquationEocProblem_impl.h index f7c7aea5c..4284e1043 100644 --- a/src/TNL/Problems/HeatEquationEocProblem_impl.h +++ b/src/TNL/Problems/HeatEquationEocProblem_impl.h @@ -17,7 +17,7 @@ #pragma once -#include "HeatEquationProblem.h" +#include "HeatEquationEocProblem.h" namespace TNL { namespace Problems { @@ -25,10 +25,9 @@ namespace Problems { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -HeatEquationEocProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationEocProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -40,7 +39,7 @@ setup( const Config::ParameterContainer& parameters, this->explicitUpdater.setRightHandSide( this->rightHandSidePointer ); this->systemAssembler.setDifferentialOperator( this->differentialOperatorPointer ); this->systemAssembler.setBoundaryConditions( this->boundaryConditionPointer ); - this->systemAssembler.setRightHandSide( this->rightHandSidePointer ); + this->systemAssembler.setRightHandSide( this->rightHandSidePointer ); return true; } diff --git a/src/TNL/Problems/HeatEquationProblem.h b/src/TNL/Problems/HeatEquationProblem.h index 0c8cb1026..c0fdbebb7 100644 --- a/src/TNL/Problems/HeatEquationProblem.h +++ b/src/TNL/Problems/HeatEquationProblem.h @@ -32,11 +32,9 @@ namespace Problems { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator = Operators::LinearDiffusion< Mesh, typename BoundaryCondition::RealType > > class HeatEquationProblem : public PDEProblem< Mesh, - Communicator, typename Mesh::RealType, typename Mesh::DeviceType, typename Mesh::IndexType > @@ -48,7 +46,7 @@ class HeatEquationProblem : public PDEProblem< Mesh, typedef typename Mesh::IndexType IndexType; typedef Functions::MeshFunctionView< Mesh > MeshFunctionType; typedef Pointers::SharedPointer< MeshFunctionType, DeviceType > MeshFunctionPointer; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; typedef Pointers::SharedPointer< DifferentialOperator > DifferentialOperatorPointer; typedef Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; @@ -59,8 +57,6 @@ class HeatEquationProblem : public PDEProblem< Mesh, using typename BaseType::DofVectorPointer; using typename BaseType::MatrixType; - typedef Communicator CommunicatorType; - String getPrologHeader() const; void writeProlog( Logger& logger, diff --git a/src/TNL/Problems/HeatEquationProblem_impl.h b/src/TNL/Problems/HeatEquationProblem_impl.h index 097161b8b..12f2e5bc6 100644 --- a/src/TNL/Problems/HeatEquationProblem_impl.h +++ b/src/TNL/Problems/HeatEquationProblem_impl.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "HeatEquationProblem.h" @@ -29,10 +30,9 @@ namespace Problems { template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > String -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getPrologHeader() const { return String( "Heat equation" ); @@ -41,10 +41,9 @@ getPrologHeader() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { } @@ -52,10 +51,9 @@ writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) cons template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: writeEpilog( Logger& logger ) { return true; @@ -64,10 +62,9 @@ writeEpilog( Logger& logger ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setup( const Config::ParameterContainer& parameters, const String& prefix ) { @@ -96,10 +93,9 @@ setup( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > -typename HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >::IndexType -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +typename HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >::IndexType +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getDofs() const { /**** @@ -111,10 +107,9 @@ getDofs() const template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: bindDofs( DofVectorPointer& dofVector ) { this->uPointer->bind( this->getMesh(), *dofVector ); @@ -123,16 +118,15 @@ bindDofs( DofVectorPointer& dofVector ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setInitialCondition( const Config::ParameterContainer& parameters, DofVectorPointer& dofs ) { this->bindDofs( dofs ); const String& initialConditionFile = parameters.getParameter< String >( "initial-condition" ); - if(CommunicatorType::isDistributed()) + if( MPI::GetSize() > 1 ) { std::cout<<"Nodes Distribution: " << this->distributedMeshPointer->printProcessDistr() << std::endl; if( ! Functions::readDistributedMeshFunction( *this->distributedMeshPointer, *this->uPointer, "u", initialConditionFile ) ) @@ -157,11 +151,10 @@ setInitialCondition( const Config::ParameterContainer& parameters, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > template< typename MatrixPointer > bool -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setupLinearSystem( MatrixPointer& matrixPointer ) { const IndexType dofs = this->getDofs(); @@ -182,10 +175,9 @@ setupLinearSystem( MatrixPointer& matrixPointer ) template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > bool -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -198,7 +190,7 @@ makeSnapshot( const RealType& time, fileName.setFileNameBase( "u-" ); fileName.setIndex( step ); - if(CommunicatorType::isDistributed()) + if( MPI::GetSize() > 1 ) { fileName.setExtension( "pvti" ); Functions::writeDistributedMeshFunction( *this->distributedMeshPointer, *this->uPointer, "u", fileName.getFileName() ); @@ -214,10 +206,9 @@ makeSnapshot( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& uDofs, @@ -234,16 +225,15 @@ getExplicitUpdate( const RealType& time, this->bindDofs( uDofs ); this->fuPointer->bind( this->getMesh(), *fuDofs ); - this->explicitUpdater.template update< typename Mesh::Cell, Communicator >( time, tau, this->getMesh(), this->uPointer, this->fuPointer ); + this->explicitUpdater.template update< typename Mesh::Cell >( time, tau, this->getMesh(), this->uPointer, this->fuPointer ); } template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > void -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: applyBoundaryConditions( const RealType& time, DofVectorPointer& uDofs ) { @@ -254,11 +244,10 @@ applyBoundaryConditions( const RealType& time, template< typename Mesh, typename BoundaryCondition, typename RightHandSide, - typename Communicator, typename DifferentialOperator > template< typename MatrixPointer > void -HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, Communicator, DifferentialOperator >:: +HeatEquationProblem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& dofsPointer, diff --git a/src/TNL/Problems/MeanCurvatureFlowProblem.h b/src/TNL/Problems/MeanCurvatureFlowProblem.h index 415216dce..a2a94cf44 100644 --- a/src/TNL/Problems/MeanCurvatureFlowProblem.h +++ b/src/TNL/Problems/MeanCurvatureFlowProblem.h @@ -26,7 +26,6 @@ namespace TNL { namespace Problems { template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator = @@ -35,7 +34,6 @@ template< typename Mesh, typename Mesh::GlobalIndexType, false > > class MeanCurvatureFlowProblem : public PDEProblem< Mesh, - Communicator, typename DifferentialOperator::RealType, typename Mesh::DeviceType, typename DifferentialOperator::IndexType > @@ -46,7 +44,7 @@ class MeanCurvatureFlowProblem : public PDEProblem< Mesh, typedef typename Mesh::DeviceType DeviceType; typedef typename DifferentialOperator::IndexType IndexType; typedef Functions::MeshFunction< Mesh > MeshFunctionType; - typedef PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; typedef CSR< RealType, DeviceType, IndexType> MatrixType; using typename BaseType::MeshType; @@ -69,7 +67,7 @@ class MeanCurvatureFlowProblem : public PDEProblem< Mesh, template< typename Matrix > bool setupLinearSystem( const MeshType& mesh, Matrix& matrix ); - + bool makeSnapshot( const RealType& time, const IndexType& step, const MeshType& mesh, @@ -104,7 +102,7 @@ class MeanCurvatureFlowProblem : public PDEProblem< Mesh, DifferentialOperator differentialOperator; BoundaryCondition boundaryCondition; - + RightHandSide rightHandSide; }; diff --git a/src/TNL/Problems/PDEProblem.h b/src/TNL/Problems/PDEProblem.h index 96c2db251..47848fdf4 100644 --- a/src/TNL/Problems/PDEProblem.h +++ b/src/TNL/Problems/PDEProblem.h @@ -21,7 +21,6 @@ namespace TNL { namespace Problems { template< typename Mesh, - typename Communicator, typename Real = typename Mesh::RealType, typename Device = typename Mesh::DeviceType, typename Index = typename Mesh::GlobalIndexType > @@ -49,7 +48,6 @@ class PDEProblem : public Problem< Real, Device, Index > TNL::Matrices::GeneralMatrix, SegmentsType >; - using CommunicatorType = Communicator; using CommonDataType = CommonData; using CommonDataPointer = Pointers::SharedPointer< CommonDataType, DeviceType >; diff --git a/src/TNL/Problems/PDEProblem_impl.h b/src/TNL/Problems/PDEProblem_impl.h index 0eaf4df06..a30528b14 100644 --- a/src/TNL/Problems/PDEProblem_impl.h +++ b/src/TNL/Problems/PDEProblem_impl.h @@ -17,59 +17,54 @@ namespace TNL { namespace Problems { template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > String -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: getPrologHeader() const { return String( "General PDE Problem" ); } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > void -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: writeProlog( Logger& logger, const Config::ParameterContainer& parameters ) const { } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > bool -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: writeEpilog( Logger& logger ) const { return true; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -typename PDEProblem< Mesh, Communicator, Real, Device, Index >::IndexType -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +typename PDEProblem< Mesh, Real, Device, Index >::IndexType +PDEProblem< Mesh, Real, Device, Index >:: subdomainOverlapSize() { return 1; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > void -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: getSubdomainOverlaps( const Config::ParameterContainer& parameters, const String& prefix, const MeshType& mesh, @@ -81,24 +76,22 @@ getSubdomainOverlaps( const Config::ParameterContainer& parameters, } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > void -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: setMesh( MeshPointer& meshPointer) { this->meshPointer = meshPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > void -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: setMesh( DistributedMeshPointer& distributedMeshPointer) { this->distributedMeshPointer = distributedMeshPointer; @@ -108,92 +101,84 @@ setMesh( DistributedMeshPointer& distributedMeshPointer) } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -const typename PDEProblem< Mesh, Communicator, Real, Device, Index >::MeshPointer& -PDEProblem< Mesh, Communicator, Real, Device, Index >::getMesh() const +const typename PDEProblem< Mesh, Real, Device, Index >::MeshPointer& +PDEProblem< Mesh, Real, Device, Index >::getMesh() const { return this->meshPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -typename PDEProblem< Mesh, Communicator, Real, Device, Index >::MeshPointer& -PDEProblem< Mesh, Communicator, Real, Device, Index >::getMesh() +typename PDEProblem< Mesh, Real, Device, Index >::MeshPointer& +PDEProblem< Mesh, Real, Device, Index >::getMesh() { return this->meshPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -const typename PDEProblem< Mesh, Communicator, Real, Device, Index >::DistributedMeshPointer& -PDEProblem< Mesh, Communicator, Real, Device, Index >::getDistributedMesh() const +const typename PDEProblem< Mesh, Real, Device, Index >::DistributedMeshPointer& +PDEProblem< Mesh, Real, Device, Index >::getDistributedMesh() const { return this->distributedMeshPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -typename PDEProblem< Mesh, Communicator, Real, Device, Index >::DistributedMeshPointer& -PDEProblem< Mesh, Communicator, Real, Device, Index >::getDistributedMesh() +typename PDEProblem< Mesh, Real, Device, Index >::DistributedMeshPointer& +PDEProblem< Mesh, Real, Device, Index >::getDistributedMesh() { return this->distributedMeshPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > void -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: setCommonData( CommonDataPointer& commonData ) { this->commonDataPointer = commonData; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -const typename PDEProblem< Mesh, Communicator, Real, Device, Index >::CommonDataPointer& -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +const typename PDEProblem< Mesh, Real, Device, Index >::CommonDataPointer& +PDEProblem< Mesh, Real, Device, Index >:: getCommonData() const { return this->commonDataPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > -typename PDEProblem< Mesh, Communicator, Real, Device, Index >::CommonDataPointer& -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +typename PDEProblem< Mesh, Real, Device, Index >::CommonDataPointer& +PDEProblem< Mesh, Real, Device, Index >:: getCommonData() { return this->commonDataPointer; } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > bool -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: preIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) @@ -202,13 +187,12 @@ preIterate( const RealType& time, } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > template< typename Matrix > void -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: saveFailedLinearSystem( const Matrix& matrix, const DofVectorType& dofs, const DofVectorType& rhs ) const @@ -220,12 +204,11 @@ saveFailedLinearSystem( const Matrix& matrix, } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > bool -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: postIterate( const RealType& time, const RealType& tau, DofVectorPointer& dofs ) @@ -234,12 +217,11 @@ postIterate( const RealType& time, } template< typename Mesh, - typename Communicator, typename Real, typename Device, typename Index > Solvers::SolverMonitor* -PDEProblem< Mesh, Communicator, Real, Device, Index >:: +PDEProblem< Mesh, Real, Device, Index >:: getSolverMonitor() { return 0; diff --git a/src/TNL/Solvers/PDE/ExplicitTimeStepper.h b/src/TNL/Solvers/PDE/ExplicitTimeStepper.h index d6fd8a053..be65a9512 100644 --- a/src/TNL/Solvers/PDE/ExplicitTimeStepper.h +++ b/src/TNL/Solvers/PDE/ExplicitTimeStepper.h @@ -35,7 +35,6 @@ class ExplicitTimeStepper using DofVectorType = typename ProblemType::DofVectorType; using DofVectorPointer = Pointers::SharedPointer< DofVectorType, DeviceType >; using SolverMonitorType = IterativeSolverMonitor< RealType, IndexType >; - using CommunicatorType = typename Problem::CommunicatorType; using OdeSolverType = OdeSolver< ExplicitTimeStepper< Problem, OdeSolver >, SolverMonitorType >; using OdeSolverPointer = Pointers::SharedPointer< OdeSolverType, DeviceType >; diff --git a/src/TNL/Solvers/PDE/ExplicitUpdater.h b/src/TNL/Solvers/PDE/ExplicitUpdater.h index 16a661d22..c6cbcce12 100644 --- a/src/TNL/Solvers/PDE/ExplicitUpdater.h +++ b/src/TNL/Solvers/PDE/ExplicitUpdater.h @@ -20,7 +20,7 @@ namespace TNL { namespace Solvers { -namespace PDE { +namespace PDE { template< typename Real, typename MeshFunction, @@ -30,7 +30,7 @@ template< typename Real, class ExplicitUpdaterTraverserUserData { public: - + Real time; const DifferentialOperator* differentialOperator; @@ -40,7 +40,7 @@ class ExplicitUpdaterTraverserUserData const RightHandSide* rightHandSide; MeshFunction *u, *fu; - + ExplicitUpdaterTraverserUserData() : time( 0.0 ), differentialOperator( NULL ), @@ -49,8 +49,8 @@ class ExplicitUpdaterTraverserUserData u( NULL ), fu( NULL ) {} - - + + /*void setUserData( const Real& time, const DifferentialOperator* differentialOperator, const BoundaryConditions* boundaryConditions, @@ -91,24 +91,23 @@ class ExplicitUpdater typedef Pointers::SharedPointer< RightHandSide, DeviceType > RightHandSidePointer; typedef Pointers::SharedPointer< MeshFunction, DeviceType > MeshFunctionPointer; typedef Pointers::SharedPointer< TraverserUserData, DeviceType > TraverserUserDataPointer; - + void setDifferentialOperator( const DifferentialOperatorPointer& differentialOperatorPointer ) { this->userData.differentialOperator = &differentialOperatorPointer.template getData< DeviceType >(); } - + void setBoundaryConditions( const BoundaryConditionsPointer& boundaryConditionsPointer ) { this->userData.boundaryConditions = &boundaryConditionsPointer.template getData< DeviceType >(); } - + void setRightHandSide( const RightHandSidePointer& rightHandSidePointer ) { this->userData.rightHandSide = &rightHandSidePointer.template getData< DeviceType >(); } - - template< typename EntityType, - typename CommunicatorType > + + template< typename EntityType > void update( const RealType& time, const RealType& tau, const MeshPointer& meshPointer, @@ -127,7 +126,7 @@ class ExplicitUpdater "The first MeshFunction in the parameters was not bound properly." ); TNL_ASSERT_EQ( fuPointer->getData().getSize(), meshPointer->template getEntitiesCount< EntityType >(), "The second MeshFunction in the parameters was not bound properly." ); - + TNL_ASSERT_TRUE( this->userData.differentialOperator, "The differential operator is not correctly set-up. Use method setDifferentialOperator() to do it." ); TNL_ASSERT_TRUE( this->userData.rightHandSide, @@ -141,30 +140,30 @@ class ExplicitUpdater ( meshPointer, userData ); } - + template< typename EntityType > void applyBoundaryConditions( const MeshPointer& meshPointer, const RealType& time, MeshFunctionPointer& uPointer ) { TNL_ASSERT_TRUE( this->userData.boundaryConditions, - "The boundary conditions are not correctly set-up. Use method setBoundaryCondtions() to do it." ); + "The boundary conditions are not correctly set-up. Use method setBoundaryCondtions() to do it." ); TNL_ASSERT_TRUE( &uPointer.template modifyData< DeviceType >(), "The function u is not correctly set-up. It was not bound probably with DOFs." ); this->userData.time = time; - this->userData.u = &uPointer.template modifyData< DeviceType >(); + this->userData.u = &uPointer.template modifyData< DeviceType >(); Meshes::Traverser< MeshType, EntityType > meshTraverser; meshTraverser.template processBoundaryEntities< TraverserBoundaryEntitiesProcessor > ( meshPointer, userData ); // TODO: I think that this is not necessary - /*if(CommunicatorType::isDistributed()) - fuPointer->template synchronize();*/ + /*if( MPI::GetSize() > 1 ) + fuPointer->template synchronize();*/ } - + class TraverserBoundaryEntitiesProcessor { public: @@ -194,12 +193,12 @@ class ExplicitUpdater const EntityType& entity ) { typedef Functions::FunctionAdapter< MeshType, RightHandSide > FunctionAdapter; - ( *userData.fu )( entity ) = + ( *userData.fu )( entity ) = ( *userData.differentialOperator )( *userData.u, entity, userData.time ) + FunctionAdapter::getValue( *userData.rightHandSide, entity, userData.time ); - + } - }; + }; protected: diff --git a/src/TNL/Solvers/PDE/TimeDependentPDESolver_impl.h b/src/TNL/Solvers/PDE/TimeDependentPDESolver_impl.h index 9b8530348..7bf2ca33c 100644 --- a/src/TNL/Solvers/PDE/TimeDependentPDESolver_impl.h +++ b/src/TNL/Solvers/PDE/TimeDependentPDESolver_impl.h @@ -13,6 +13,7 @@ #include "TimeDependentPDESolver.h" #include #include +#include namespace TNL { namespace Solvers { @@ -61,7 +62,7 @@ setup( const Config::ParameterContainer& parameters, // const String& meshFile = parameters.getParameter< String >( "mesh" ); const String& meshFileFormat = parameters.getParameter< String >( "mesh-format" ); - if( Problem::CommunicatorType::isDistributed() ) { + if( MPI::GetSize() > 1 ) { if( ! Meshes::loadDistributedMesh( *distributedMeshPointer, meshFile, meshFileFormat ) ) return false; problem->setMesh( distributedMeshPointer ); @@ -138,13 +139,13 @@ writeProlog( Logger& logger, logger.writeHeader( problem->getPrologHeader() ); problem->writeProlog( logger, parameters ); logger.writeSeparator(); - if( Problem::CommunicatorType::isDistributed() ) + if( MPI::GetSize() > 1 ) distributedMeshPointer->writeProlog( logger ); else meshPointer->writeProlog( logger ); logger.writeSeparator(); logger.writeParameter< String >( "Time discretisation:", "time-discretisation", parameters ); - if( Problem::CommunicatorType::isDistributed() ) + if( MPI::GetSize() > 1 ) logger.writeParameter< double >( "Initial time step:", this->getRefinedTimeStep( distributedMeshPointer->getLocalMesh(), this->timeStep ) ); else logger.writeParameter< double >( "Initial time step:", this->getRefinedTimeStep( *meshPointer, this->timeStep ) ); @@ -303,7 +304,7 @@ solve() * Initialize the time stepper */ this->timeStepper.setProblem( * ( this->problem ) ); - if( Problem::CommunicatorType::isDistributed() ) { + if( MPI::GetSize() > 1 ) { this->timeStepper.init( distributedMeshPointer->getLocalMesh() ); this->timeStepper.setTimeStep( this->getRefinedTimeStep( distributedMeshPointer->getLocalMesh(), this->timeStep ) ); } diff --git a/src/TNL/Solvers/PDE/TimeIndependentPDESolver_impl.h b/src/TNL/Solvers/PDE/TimeIndependentPDESolver_impl.h index bd1ccf02b..edc7086ac 100644 --- a/src/TNL/Solvers/PDE/TimeIndependentPDESolver_impl.h +++ b/src/TNL/Solvers/PDE/TimeIndependentPDESolver_impl.h @@ -20,6 +20,7 @@ #include #include #include +#include namespace TNL { namespace Solvers { @@ -52,7 +53,7 @@ setup( const Config::ParameterContainer& parameters, // const String& meshFile = parameters.getParameter< String >( "mesh" ); const String& meshFileFormat = parameters.getParameter< String >( "mesh-format" ); - if( Problem::CommunicatorType::isDistributed() ) { + if( MPI::GetSize() > 1 ) { if( ! Meshes::loadDistributedMesh( *distributedMeshPointer, meshFile, meshFileFormat ) ) return false; problem->setMesh( distributedMeshPointer ); @@ -95,7 +96,6 @@ setup( const Config::ParameterContainer& parameters, * Set-up the initial condition */ std::cout << "Setting up the initial condition ... "; - typedef typename Problem :: DofVectorType DofVectorType; if( ! this->problem->setInitialCondition( parameters, this->dofs ) ) return false; std::cout << " [ OK ]" << std::endl; @@ -112,7 +112,7 @@ writeProlog( Logger& logger, logger.writeHeader( problem->getPrologHeader() ); problem->writeProlog( logger, parameters ); logger.writeSeparator(); - if( Problem::CommunicatorType::isDistributed() ) + if( MPI::GetSize() > 1 ) distributedMeshPointer->writeProlog( logger ); else meshPointer->writeProlog( logger ); diff --git a/src/TNL/Solvers/Solver.h b/src/TNL/Solvers/Solver.h index 446569993..8f1d7070f 100644 --- a/src/TNL/Solvers/Solver.h +++ b/src/TNL/Solvers/Solver.h @@ -13,9 +13,9 @@ #include namespace TNL { -namespace Solvers { +namespace Solvers { -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, template< typename ConfTag > class ProblemConfig, typename ConfigTag = DefaultBuildConfigTag > class Solver @@ -30,4 +30,3 @@ class Solver } // namespace TNL #include - diff --git a/src/TNL/Solvers/SolverConfig_impl.h b/src/TNL/Solvers/SolverConfig_impl.h index aa8f6181a..3e4e70522 100644 --- a/src/TNL/Solvers/SolverConfig_impl.h +++ b/src/TNL/Solvers/SolverConfig_impl.h @@ -82,7 +82,7 @@ bool SolverConfig< ConfigTag, ProblemConfig >::configSetup( Config::ConfigDescri * Time discretisation */ config.addDelimiter( " === Time discretisation parameters ==== " ); - using PDEProblem = Problems::PDEProblem< Meshes::Grid<1, double, Devices::Host, int>, Communicators::MpiCommunicator >; + using PDEProblem = Problems::PDEProblem< Meshes::Grid<1, double, Devices::Host, int> >; using ExplicitTimeStepper = PDE::ExplicitTimeStepper< PDEProblem, ODE::Euler >; PDE::TimeDependentPDESolver< PDEProblem, ExplicitTimeStepper >::configSetup( config ); ExplicitTimeStepper::configSetup( config ); diff --git a/src/TNL/Solvers/SolverInitiator.h b/src/TNL/Solvers/SolverInitiator.h index 062857520..dec9d2e11 100644 --- a/src/TNL/Solvers/SolverInitiator.h +++ b/src/TNL/Solvers/SolverInitiator.h @@ -16,7 +16,7 @@ namespace TNL { namespace Solvers { -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename ConfigTag > class SolverInitiator { diff --git a/src/TNL/Solvers/SolverInitiator_impl.h b/src/TNL/Solvers/SolverInitiator_impl.h index debac52af..1678a8a30 100644 --- a/src/TNL/Solvers/SolverInitiator_impl.h +++ b/src/TNL/Solvers/SolverInitiator_impl.h @@ -18,25 +18,23 @@ #include #include -#include - namespace TNL { namespace Solvers { -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename ConfigTag, bool enabled = ConfigTagReal< ConfigTag, Real >::enabled > class SolverInitiatorRealResolver {}; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename ConfigTag, bool enabled = ConfigTagDevice< ConfigTag, Device >::enabled > class SolverInitiatorDeviceResolver {}; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename Index, @@ -44,7 +42,7 @@ template< template< typename Real, typename Device, typename Index, typename Mes bool enabled = ConfigTagIndex< ConfigTag, Index >::enabled > class SolverInitiatorIndexResolver {}; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename Index, @@ -53,7 +51,7 @@ template< template< typename Real, typename Device, typename Index, typename Mes class SolverInitiatorMeshResolver {}; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename ConfigTag > bool SolverInitiator< ProblemSetter, ConfigTag > :: run( const Config::ParameterContainer& parameters ) { @@ -68,7 +66,7 @@ bool SolverInitiator< ProblemSetter, ConfigTag > :: run( const Config::Parameter return false; }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename ConfigTag > class SolverInitiatorRealResolver< ProblemSetter, Real, ConfigTag, true > @@ -86,7 +84,7 @@ class SolverInitiatorRealResolver< ProblemSetter, Real, ConfigTag, true > } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename ConfigTag > class SolverInitiatorRealResolver< ProblemSetter, Real, ConfigTag, false > @@ -99,7 +97,7 @@ class SolverInitiatorRealResolver< ProblemSetter, Real, ConfigTag, false > } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename ConfigTag > @@ -120,7 +118,7 @@ class SolverInitiatorDeviceResolver< ProblemSetter, Real, Device, ConfigTag, tru } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename ConfigTag > @@ -134,7 +132,7 @@ class SolverInitiatorDeviceResolver< ProblemSetter, Real, Device, ConfigTag, fal } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename Index, @@ -149,7 +147,7 @@ class SolverInitiatorIndexResolver< ProblemSetter, Real, Device, Index, ConfigTa } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename Index, @@ -163,7 +161,7 @@ class SolverInitiatorIndexResolver< ProblemSetter, Real, Device, Index, ConfigTa } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename Index, @@ -173,28 +171,25 @@ class SolverInitiatorMeshResolver< ProblemSetter, Real, Device, Index, ConfigTag public: static bool run( const Config::ParameterContainer& parameters ) { - using CommunicatorType = Communicators::MpiCommunicator; return ProblemSetter< Real, Device, Index, Meshes::DummyMesh< Real, Device, Index >, ConfigTag, - SolverStarter< ConfigTag >, CommunicatorType >::template run< Real, Device, Index, ConfigTag >( parameters ); + SolverStarter< ConfigTag > >::template run< Real, Device, Index, ConfigTag >( parameters ); } }; -template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename ConfigTag, typename SolverStarter > class ProblemSetter, typename Real, typename Device, typename Index, typename ConfigTag > class SolverInitiatorMeshResolver< ProblemSetter, Real, Device, Index, ConfigTag, true > { - using CommunicatorType = Communicators::MpiCommunicator; - // wrapper for MeshTypeResolver template< typename MeshType > - using ProblemSetterWrapper = ProblemSetter< Real, Device, Index, MeshType, ConfigTag, SolverStarter< ConfigTag >, CommunicatorType >; + using ProblemSetterWrapper = ProblemSetter< Real, Device, Index, MeshType, ConfigTag, SolverStarter< ConfigTag > >; public: static bool run( const Config::ParameterContainer& parameters ) diff --git a/src/TNL/Solvers/SolverStarter_impl.h b/src/TNL/Solvers/SolverStarter_impl.h index dbecdaad9..49a43f8c8 100644 --- a/src/TNL/Solvers/SolverStarter_impl.h +++ b/src/TNL/Solvers/SolverStarter_impl.h @@ -133,7 +133,7 @@ class UserDefinedTimeDiscretisationSetter< Problem, ConfigTag, void > return SolverStarterTimeDiscretisationSetter< Problem, ExplicitTimeDiscretisationTag, ConfigTag >::run( problem, parameters ); if( timeDiscretisation == "semi-implicit" ) { - if( Problem::CommunicatorType::isDistributed() ) + if( MPI::GetSize() > 1 ) { std::cerr << "TNL currently does not support semi-implicit solvers with MPI." << std::endl; return false; @@ -142,7 +142,7 @@ class UserDefinedTimeDiscretisationSetter< Problem, ConfigTag, void > } if( timeDiscretisation == "implicit" ) { - if( Problem::CommunicatorType::isDistributed() ) + if( MPI::GetSize() > 1 ) { std::cerr << "TNL currently does not support implicit solvers with MPI." << std::endl; return false; diff --git a/src/TNL/Solvers/Solver_impl.h b/src/TNL/Solvers/Solver_impl.h index bc1f43c77..a054ad7d9 100644 --- a/src/TNL/Solvers/Solver_impl.h +++ b/src/TNL/Solvers/Solver_impl.h @@ -21,7 +21,7 @@ namespace TNL { namespace Solvers { -template< template< typename Real, typename Device, typename Index, typename MeshType, typename MeshConfig, typename SolverStarter, typename CommunicatorType > class ProblemSetter, +template< template< typename Real, typename Device, typename Index, typename MeshType, typename MeshConfig, typename SolverStarter > class ProblemSetter, template< typename MeshConfig > class ProblemConfig, typename MeshConfig > bool diff --git a/src/Tools/tnl-quickstart/main.h.in b/src/Tools/tnl-quickstart/main.h.in index 305f59e1a..52c9475cf 100644 --- a/src/Tools/tnl-quickstart/main.h.in +++ b/src/Tools/tnl-quickstart/main.h.in @@ -12,16 +12,16 @@ using namespace TNL; typedef {problemBaseName}BuildConfigTag BuildConfig; - -/**** - * Uncomment the following (and comment the previous line) for the complete build. - * This will include support for all floating point precisions, all indexing types - * and more solvers. You may then choose between them from the command line. - * The compile time may, however, take tens of minutes or even several hours, - * especially if CUDA is enabled. Use this, if you want, only for the final build, - * not in the development phase. - */ -//typedef TNL::Solvers::DefaultConfigTag BuildConfig; + +/**** + * Uncomment the following (and comment the previous line) for the complete build. + * This will include support for all floating point precisions, all indexing types + * and more solvers. You may then choose between them from the command line. + * The compile time may, however, take tens of minutes or even several hours, + * especially if CUDA is enabled. Use this, if you want, only for the final build, + * not in the development phase. + */ +//typedef TNL::Solvers::DefaultConfigTag BuildConfig; template< typename ConfigTag > class {problemBaseName}Config @@ -47,8 +47,7 @@ template< typename Real, typename Index, typename MeshType, typename ConfigTag, - typename SolverStarter, - typename Communicator > + typename SolverStarter > class {problemBaseName}Setter {{ public: @@ -61,14 +60,14 @@ class {problemBaseName}Setter {{ enum {{ Dimension = MeshType::getMeshDimension() }}; typedef {operatorName}< MeshType, Real, Index > ApproximateOperator; - typedef {problemBaseName}Rhs< MeshType, Real > RightHandSide; + typedef {problemBaseName}Rhs< MeshType, Real > RightHandSide; typedef Containers::StaticVector < MeshType::getMeshDimension(), Real > Vertex; /**** * Resolve the template arguments of your solver here. * The following code is for the Dirichlet and the Neumann boundary conditions. - * Both can be constant or defined as discrete functions as Functions::MeshFunction. - */ + * Both can be constant or defined as discrete functions as Functions::MeshFunction. + */ String boundaryConditionsType = parameters.getParameter< String >( "boundary-conditions-type" ); if( parameters.checkParameter( "boundary-conditions-constant" ) ) {{ @@ -76,25 +75,25 @@ class {problemBaseName}Setter if( boundaryConditionsType == "dirichlet" ) {{ typedef Operators::DirichletBoundaryConditions< MeshType, ConstantFunction, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef {problemBaseName}Problem< MeshType, Communicator, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; + typedef {problemBaseName}Problem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); }} typedef Operators::NeumannBoundaryConditions< MeshType, ConstantFunction, Real, Index > BoundaryConditions; - typedef {problemBaseName}Problem< MeshType, Communicator, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; + typedef {problemBaseName}Problem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); }} typedef Functions::MeshFunction< MeshType > MeshFunction; if( boundaryConditionsType == "dirichlet" ) - {{ + {{ typedef Operators::DirichletBoundaryConditions< MeshType, MeshFunction, MeshType::getMeshDimension(), Real, Index > BoundaryConditions; - typedef {problemBaseName}Problem< MeshType, Communicator, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; + typedef {problemBaseName}Problem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); }} typedef Operators::NeumannBoundaryConditions< MeshType, MeshFunction, Real, Index > BoundaryConditions; - typedef {problemBaseName}Problem< MeshType, Communicator, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; + typedef {problemBaseName}Problem< MeshType, BoundaryConditions, RightHandSide, ApproximateOperator > Problem; SolverStarter solverStarter; return solverStarter.template run< Problem >( parameters ); }} diff --git a/src/Tools/tnl-quickstart/problem.h.in b/src/Tools/tnl-quickstart/problem.h.in index d72120c1f..270580a0e 100644 --- a/src/Tools/tnl-quickstart/problem.h.in +++ b/src/Tools/tnl-quickstart/problem.h.in @@ -8,13 +8,11 @@ template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > class {problemBaseName}Problem: public TNL::Problems::PDEProblem< Mesh, - Communicator, typename DifferentialOperator::RealType, typename Mesh::DeviceType, typename Mesh::IndexType > @@ -25,7 +23,7 @@ class {problemBaseName}Problem: typedef typename Mesh::DeviceType DeviceType; typedef typename Mesh::IndexType IndexType; typedef TNL::Functions::MeshFunction< Mesh, Mesh::getMeshDimension(), RealType > MeshFunctionType; - typedef TNL::Problems::PDEProblem< Mesh, Communicator, RealType, DeviceType, IndexType > BaseType; + typedef TNL::Problems::PDEProblem< Mesh, RealType, DeviceType, IndexType > BaseType; typedef TNL::Pointers::SharedPointer< MeshFunctionType > MeshFunctionPointer; typedef TNL::Pointers::SharedPointer< DifferentialOperator > DifferentialOperatorPointer; typedef TNL::Pointers::SharedPointer< BoundaryCondition > BoundaryConditionPointer; @@ -36,8 +34,6 @@ class {problemBaseName}Problem: using typename BaseType::DofVectorType; using typename BaseType::DofVectorPointer; - using CommunicatorType = Communicator; - TNL::String getPrologHeader() const; void writeProlog( TNL::Logger& logger, @@ -74,16 +70,16 @@ class {problemBaseName}Problem: DofVectorPointer& rightHandSide ); protected: - + DifferentialOperatorPointer differentialOperator; BoundaryConditionPointer boundaryCondition; RightHandSidePointer rightHandSide; - + TNL::Solvers::PDE::ExplicitUpdater< Mesh, MeshFunctionType, DifferentialOperator, BoundaryCondition, RightHandSide > explicitUpdater; - TNL::Solvers::PDE::LinearSystemAssembler< Mesh, + TNL::Solvers::PDE::LinearSystemAssembler< Mesh, MeshFunctionType, DifferentialOperator, BoundaryCondition, diff --git a/src/Tools/tnl-quickstart/problem_impl.h.in b/src/Tools/tnl-quickstart/problem_impl.h.in index 64db5682c..4bec09a35 100644 --- a/src/Tools/tnl-quickstart/problem_impl.h.in +++ b/src/Tools/tnl-quickstart/problem_impl.h.in @@ -8,24 +8,22 @@ #include template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > -TNL::String -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +TNL::String +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getPrologHeader() const -{{ +{{ return TNL::String( "{problemName}" ); }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > void -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: writeProlog( TNL::Logger& logger, const TNL::Config::ParameterContainer& parameters ) const {{ /**** @@ -35,12 +33,11 @@ writeProlog( TNL::Logger& logger, const TNL::Config::ParameterContainer& paramet }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > bool -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setup( const TNL::Config::ParameterContainer& parameters, const TNL::String& prefix ) {{ @@ -51,12 +48,11 @@ setup( const TNL::Config::ParameterContainer& parameters, }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > -typename {problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >::IndexType - {problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +typename {problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >::IndexType + {problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getDofs() const {{ /**** @@ -67,24 +63,22 @@ getDofs() const }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > void -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: -bindDofs( DofVectorPointer& dofVector ) +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: +bindDofs( DofVectorPointer& dofVector ) {{ }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > bool -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: -setInitialCondition( const TNL::Config::ParameterContainer& parameters, +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: +setInitialCondition( const TNL::Config::ParameterContainer& parameters, DofVectorPointer& dofs ) {{ const TNL::String& initialConditionFile = parameters.getParameter< TNL::String >( "initial-condition" ); @@ -94,17 +88,16 @@ setInitialCondition( const TNL::Config::ParameterContainer& parameters, std::cerr << "I am not able to load the initial condition from the file " << initialConditionFile << "." << std::endl; return false; }} - return true; + return true; }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > template< typename MatrixPointer > bool -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: setupLinearSystem( MatrixPointer& matrixPointer ) {{ const IndexType dofs = this->getDofs(); @@ -122,12 +115,11 @@ setupLinearSystem( MatrixPointer& matrixPointer ) }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > bool -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: makeSnapshot( const RealType& time, const IndexType& step, DofVectorPointer& dofs ) @@ -144,12 +136,11 @@ makeSnapshot( const RealType& time, }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > void -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: getExplicitUpdate( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -164,22 +155,21 @@ getExplicitUpdate( const RealType& time, * You may use supporting mesh dependent data if you need. */ - TNL::Pointers::SharedPointer< MeshFunctionType > uPointer( this->getMesh(), _u ); - TNL::Pointers::SharedPointer< MeshFunctionType > fuPointer( this->getMesh(), _fu ); + TNL::Pointers::SharedPointer< MeshFunctionType > uPointer( this->getMesh(), _u ); + TNL::Pointers::SharedPointer< MeshFunctionType > fuPointer( this->getMesh(), _fu ); this->explicitUpdater.setDifferentialOperator( this->differentialOperator ); this->explicitUpdater.setBoundaryConditions( this->boundaryCondition ); this->explicitUpdater.setRightHandSide( this->rightHandSide ); - this->explicitUpdater.template update< typename Mesh::Cell, CommunicatorType >( time, tau, this->getMesh(), uPointer, fuPointer ); + this->explicitUpdater.template update< typename Mesh::Cell >( time, tau, this->getMesh(), uPointer, fuPointer ); }} template< typename Mesh, - typename Communicator, typename BoundaryCondition, typename RightHandSide, typename DifferentialOperator > template< typename MatrixPointer > void -{problemBaseName}Problem< Mesh, Communicator, BoundaryCondition, RightHandSide, DifferentialOperator >:: +{problemBaseName}Problem< Mesh, BoundaryCondition, RightHandSide, DifferentialOperator >:: assemblyLinearSystem( const RealType& time, const RealType& tau, DofVectorPointer& _u, @@ -196,7 +186,7 @@ assemblyLinearSystem( const RealType& time, this->systemAssembler.setDifferentialOperator( this->differentialOperator ); this->systemAssembler.setBoundaryConditions( this->boundaryCondition ); this->systemAssembler.setRightHandSide( this->rightHandSide ); - this->systemAssembler.template assembly< typename Mesh::Cell, typename MatrixPointer::ObjectType >( + this->systemAssembler.template assembly< typename Mesh::Cell, typename MatrixPointer::ObjectType >( time, tau, this->getMesh(), -- GitLab From fe38b075b3225fb727bbdf99ef9412d42610d3fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Tue, 31 Aug 2021 14:54:08 +0200 Subject: [PATCH 04/10] MPI refactoring - removed wrapper functions: AllGroup, NullGroup, NullRequest - added MPI_COMM_WORLD and other handles to MPI/DummyDefs.h - renamed getCommunicationGroup to getCommunicator in all data structures - improved naming to match the MPI terminology: communicator instead of group --- .../DistSpMV/tnl-benchmark-distributed-spmv.h | 16 +- src/Benchmarks/LinearSolvers/benchmarks.h | 2 +- .../tnl-benchmark-linear-solvers.h | 12 +- src/Benchmarks/ODESolvers/Euler.hpp | 2 +- src/Benchmarks/ODESolvers/Merson.hpp | 4 +- .../ODESolvers/tnl-benchmark-ode-solvers.h | 2 - .../tnlFastSweepingMethod2D_impl.h | 18 +-- .../tnlFastSweepingMethod3D_impl.h | 26 ++-- src/Python/pytnl/tnl_mpi/DistributedMesh.h | 4 +- src/TNL/Algorithms/detail/DistributedScan.h | 10 +- src/TNL/Algorithms/distributedScan.h | 4 +- src/TNL/Containers/Array.h | 4 +- src/TNL/Containers/ArrayView.h | 2 +- src/TNL/Containers/DistributedArray.h | 6 +- src/TNL/Containers/DistributedArray.hpp | 16 +- src/TNL/Containers/DistributedArrayView.h | 12 +- src/TNL/Containers/DistributedArrayView.hpp | 38 ++--- src/TNL/Containers/DistributedNDArray.h | 32 ++-- .../DistributedNDArraySynchronizer.h | 30 ++-- src/TNL/Containers/DistributedNDArrayView.h | 26 ++-- src/TNL/Containers/DistributedVectorView.hpp | 48 +++--- .../Expressions/DistributedComparison.h | 80 +++++----- .../DistributedExpressionTemplates.h | 20 +-- .../DistributedVerticalOperations.h | 56 +++---- src/TNL/Containers/Partitioner.h | 26 ++-- src/TNL/Functions/MeshFunctionIO.h | 4 +- src/TNL/MPI/DummyDefs.h | 17 +++ src/TNL/MPI/Utils.h | 14 +- src/TNL/MPI/Wrappers.h | 139 +++++++----------- src/TNL/Matrices/DistributedMatrix.h | 12 +- src/TNL/Matrices/DistributedMatrix_impl.h | 44 +++--- src/TNL/Matrices/DistributedSpMV.h | 24 +-- .../DistributedMeshes/DistributedGrid.h | 9 +- .../DistributedMeshes/DistributedGrid.hpp | 26 ++-- .../DistributedGridSynchronizer.h | 12 +- .../DistributedMeshes/DistributedMesh.h | 14 +- .../DistributedMeshSynchronizer.h | 38 ++--- .../DistributedMeshes/distributeSubentities.h | 40 ++--- src/TNL/Meshes/Readers/PVTIReader.h | 16 +- src/TNL/Meshes/Readers/PVTUReader.h | 20 +-- src/TNL/Meshes/Writers/PVTIWriter.hpp | 16 +- src/TNL/Meshes/Writers/PVTUWriter.h | 2 +- src/TNL/Meshes/Writers/PVTUWriter.hpp | 6 +- src/TNL/Solvers/Linear/BICGStabL_impl.h | 2 +- src/TNL/Solvers/Linear/GMRES_impl.h | 16 +- src/TNL/Solvers/Linear/Traits.h | 4 +- src/TNL/Solvers/ODE/Merson_impl.h | 2 +- src/TNL/String.h | 64 ++++---- src/TNL/TypeTraits.h | 6 +- src/Tools/tnl-game-of-life.cpp | 6 +- src/Tools/tnl-test-distributed-mesh.h | 4 +- .../Algorithms/distributedScanTest.h | 24 +-- .../Containers/DistributedArrayTest.h | 18 +-- .../Containers/VectorBinaryOperationsTest.h | 18 +-- .../Containers/VectorUnaryOperationsTest.h | 22 +-- .../Containers/VectorVerticalOperationsTest.h | 12 +- .../DistributedNDArrayOverlaps_1D_test.h | 16 +- .../DistributedNDArrayOverlaps_semi1D_test.h | 16 +- .../ndarray/DistributedNDArray_1D_test.h | 16 +- .../ndarray/DistributedNDArray_semi1D_test.h | 16 +- .../Matrices/DistributedMatrixTest.h | 24 +-- .../CutDistributedMeshFunctionTest.cpp | 4 +- .../DistributedMeshes/DistributedMeshTest.h | 30 ++-- 63 files changed, 627 insertions(+), 642 deletions(-) diff --git a/src/Benchmarks/DistSpMV/tnl-benchmark-distributed-spmv.h b/src/Benchmarks/DistSpMV/tnl-benchmark-distributed-spmv.h index b1f6bca03..29b464755 100644 --- a/src/Benchmarks/DistSpMV/tnl-benchmark-distributed-spmv.h +++ b/src/Benchmarks/DistSpMV/tnl-benchmark-distributed-spmv.h @@ -108,7 +108,7 @@ benchmarkDistributedSpmv( Benchmark& benchmark, // benchmark function auto compute = [&]() { matrix.vectorProduct( x, y ); - TNL::MPI::Barrier( matrix.getCommunicationGroup() ); + TNL::MPI::Barrier( matrix.getCommunicator() ); }; benchmark.time< typename Matrix::DeviceType >( reset, performer, compute ); @@ -223,13 +223,13 @@ struct SpmvBenchmark VectorType& vector ) { // set up the distributed matrix - const auto group = TNL::MPI::AllGroup(); - const auto localRange = Partitioner::splitRange( matrix.getRows(), group ); - DistributedMatrix distributedMatrix( localRange, matrix.getRows(), matrix.getColumns(), group ); - DistributedVector distributedVector( localRange, 0, matrix.getRows(), group ); + const auto communicator = MPI_COMM_WORLD; + const auto localRange = Partitioner::splitRange( matrix.getRows(), communicator ); + DistributedMatrix distributedMatrix( localRange, matrix.getRows(), matrix.getColumns(), communicator ); + DistributedVector distributedVector( localRange, 0, matrix.getRows(), communicator ); // copy the row lengths from the global matrix to the distributed matrix - DistributedRowLengths distributedRowLengths( localRange, 0, matrix.getRows(), group ); + DistributedRowLengths distributedRowLengths( localRange, 0, matrix.getRows(), communicator ); for( IndexType i = 0; i < distributedMatrix.getLocalMatrix().getRows(); i++ ) { const auto gi = distributedMatrix.getLocalRowRange().getGlobalIndex( i ); distributedRowLengths[ gi ] = matrix.getRowCapacity( gi ); @@ -265,8 +265,8 @@ struct SpmvBenchmark DistributedVector distributedY; distributedY.setLike( distributedVector ); distributedMatrix.vectorProduct( distributedVector, distributedY ); - const int rank = TNL::MPI::GetRank( distributedMatrix.getCommunicationGroup() ); - const int nproc = TNL::MPI::GetSize( distributedMatrix.getCommunicationGroup() ); + const int rank = TNL::MPI::GetRank( distributedMatrix.getCommunicator() ); + const int nproc = TNL::MPI::GetSize( distributedMatrix.getCommunicator() ); typename VectorType::ViewType subY( &y[ Partitioner::getOffset( matrix.getRows(), rank, nproc ) ], Partitioner::getSizeForRank( matrix.getRows(), rank, nproc ) ); TNL_ASSERT_EQ( distributedY.getLocalView(), subY, "WRONG RESULT !!!" ); diff --git a/src/Benchmarks/LinearSolvers/benchmarks.h b/src/Benchmarks/LinearSolvers/benchmarks.h index c10c996e3..b9e130c39 100644 --- a/src/Benchmarks/LinearSolvers/benchmarks.h +++ b/src/Benchmarks/LinearSolvers/benchmarks.h @@ -36,7 +36,7 @@ void barrier( const Matrix& matrix ) template< typename Matrix > void barrier( const Matrices::DistributedMatrix< Matrix >& matrix ) { - TNL::MPI::Barrier( matrix.getCommunicationGroup() ); + TNL::MPI::Barrier( matrix.getCommunicator() ); } template< typename Device > diff --git a/src/Benchmarks/LinearSolvers/tnl-benchmark-linear-solvers.h b/src/Benchmarks/LinearSolvers/tnl-benchmark-linear-solvers.h index 35d63bca6..393fafb49 100644 --- a/src/Benchmarks/LinearSolvers/tnl-benchmark-linear-solvers.h +++ b/src/Benchmarks/LinearSolvers/tnl-benchmark-linear-solvers.h @@ -430,14 +430,14 @@ struct LinearSolversBenchmark const VectorType& b ) { // set up the distributed matrix - const auto group = TNL::MPI::AllGroup(); - const auto localRange = Partitioner::splitRange( matrixPointer->getRows(), group ); - SharedPointer< DistributedMatrix > distMatrixPointer( localRange, matrixPointer->getRows(), matrixPointer->getColumns(), group ); - DistributedVector dist_x0( localRange, 0, matrixPointer->getRows(), group ); - DistributedVector dist_b( localRange, 0, matrixPointer->getRows(), group ); + const auto communicator = MPI_COMM_WORLD; + const auto localRange = Partitioner::splitRange( matrixPointer->getRows(), communicator ); + SharedPointer< DistributedMatrix > distMatrixPointer( localRange, matrixPointer->getRows(), matrixPointer->getColumns(), communicator ); + DistributedVector dist_x0( localRange, 0, matrixPointer->getRows(), communicator ); + DistributedVector dist_b( localRange, 0, matrixPointer->getRows(), communicator ); // copy the row capacities from the global matrix to the distributed matrix - DistributedRowLengths distributedRowLengths( localRange, 0, matrixPointer->getRows(), group ); + DistributedRowLengths distributedRowLengths( localRange, 0, matrixPointer->getRows(), communicator ); for( IndexType i = 0; i < distMatrixPointer->getLocalMatrix().getRows(); i++ ) { const auto gi = distMatrixPointer->getLocalRowRange().getGlobalIndex( i ); distributedRowLengths[ gi ] = matrixPointer->getRowCapacity( gi ); diff --git a/src/Benchmarks/ODESolvers/Euler.hpp b/src/Benchmarks/ODESolvers/Euler.hpp index fcc8654be..840c0a45a 100644 --- a/src/Benchmarks/ODESolvers/Euler.hpp +++ b/src/Benchmarks/ODESolvers/Euler.hpp @@ -200,7 +200,7 @@ void Euler< Problem, SolverMonitor >::computeNewTimeLevel( DofVectorPointer& u, } localResidue /= tau * ( RealType ) size; - TNL::MPI::Allreduce( &localResidue, ¤tResidue, 1, MPI_SUM, TNL::MPI::AllGroup() ); + TNL::MPI::Allreduce( &localResidue, ¤tResidue, 1, MPI_SUM, MPI_COMM_WORLD ); //std::cerr << "Local residue = " << localResidue << " - globalResidue = " << currentResidue << std::endl; } diff --git a/src/Benchmarks/ODESolvers/Merson.hpp b/src/Benchmarks/ODESolvers/Merson.hpp index b45faa1b4..3bb6f9c96 100644 --- a/src/Benchmarks/ODESolvers/Merson.hpp +++ b/src/Benchmarks/ODESolvers/Merson.hpp @@ -403,7 +403,7 @@ typename Problem :: RealType Merson< Problem, SolverMonitor >::computeError( con } #endif } - TNL::MPI::Allreduce( &eps, &maxEps, 1, MPI_MAX, TNL::MPI::AllGroup() ); + TNL::MPI::Allreduce( &eps, &maxEps, 1, MPI_MAX, MPI_COMM_WORLD ); return maxEps; } @@ -465,7 +465,7 @@ void Merson< Problem, SolverMonitor >::computeNewTimeLevel( const RealType time, } localResidue /= tau * ( RealType ) size; - TNL::MPI::Allreduce( &localResidue, ¤tResidue, 1, MPI_SUM, TNL::MPI::AllGroup() ); + TNL::MPI::Allreduce( &localResidue, ¤tResidue, 1, MPI_SUM, MPI_COMM_WORLD ); /*#ifdef USE_MPI TNLMPI::Allreduce( localResidue, currentResidue, 1, MPI_SUM); #else diff --git a/src/Benchmarks/ODESolvers/tnl-benchmark-ode-solvers.h b/src/Benchmarks/ODESolvers/tnl-benchmark-ode-solvers.h index 0d8d3c04e..4def52d52 100644 --- a/src/Benchmarks/ODESolvers/tnl-benchmark-ode-solvers.h +++ b/src/Benchmarks/ODESolvers/tnl-benchmark-ode-solvers.h @@ -134,8 +134,6 @@ struct ODESolversBenchmark const Config::ParameterContainer& parameters, size_t dofs ) { - //const auto group = TNL::MPI::AllGroup(); - std::cout << "Iterative solvers:" << std::endl; benchmarkODESolvers< Real, Index >( benchmark, parameters, dofs ); } diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h index a119c8c7a..856b39c39 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod2D_impl.h @@ -443,37 +443,37 @@ getInfoFromNeighbours( int& calculatedBefore, int& calculateMPIAgain, if( neighbours[0] != -1 ) // LEFT { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[0], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[0], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, MPI_COMM_WORLD ); } if( neighbours[1] != -1 ) // RIGHT { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[1], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[1], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, MPI_COMM_WORLD ); } if( neighbours[2] != -1 ) //UP { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[2], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[2], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, MPI_COMM_WORLD ); } if( neighbours[5] != -1 ) //DOWN { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[5], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[5], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, MPI_COMM_WORLD ); } TNL::MPI::Waitall( requestsInformation, neighCount ); - TNL::MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, TNL::MPI::AllGroup() ); + TNL::MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, MPI_COMM_WORLD ); calculateMPIAgain = calculateFromNeighbours[0] || calculateFromNeighbours[1] || calculateFromNeighbours[2] || calculateFromNeighbours[3]; } diff --git a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h index 21d0e5a67..64dd1d9a3 100644 --- a/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h +++ b/src/Examples/Hamilton-Jacobi/Solvers/hamilton-jacobi/tnlFastSweepingMethod3D_impl.h @@ -446,54 +446,54 @@ getInfoFromNeighbours( int& calculatedBefore, int& calculateMPIAgain, if( neighbours[0] != -1 ) // WEST { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[0], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[0], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[0], 1, neighbours[0], 0, MPI_COMM_WORLD ); } if( neighbours[1] != -1 ) // EAST { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[1], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[1], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[1], 1, neighbours[1], 0, MPI_COMM_WORLD ); } if( neighbours[2] != -1 ) //NORTH { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[2], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[2], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[2], 1, neighbours[2], 0, MPI_COMM_WORLD ); } if( neighbours[5] != -1 ) //SOUTH { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[5], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[5], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[3], 1, neighbours[5], 0, MPI_COMM_WORLD ); } if( neighbours[8] != -1 ) // TOP { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[8], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[8], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[4], 1, neighbours[8], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[4], 1, neighbours[8], 0, MPI_COMM_WORLD ); } if( neighbours[17] != -1 ) //BOTTOM { requestsInformation[neighCount++] = - TNL::MPI::Isend( &calculatedBefore, 1, neighbours[17], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Isend( &calculatedBefore, 1, neighbours[17], 0, MPI_COMM_WORLD ); requestsInformation[neighCount++] = - TNL::MPI::Irecv( &calculateFromNeighbours[5], 1, neighbours[17], 0, TNL::MPI::AllGroup() ); + TNL::MPI::Irecv( &calculateFromNeighbours[5], 1, neighbours[17], 0, MPI_COMM_WORLD ); } TNL::MPI::Waitall( requestsInformation, neighCount ); - TNL::MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, TNL::MPI::AllGroup() ); + TNL::MPI::Allreduce( &calculatedBefore, &calculatedBefore, 1, MPI_LOR, MPI_COMM_WORLD ); calculateMPIAgain = calculateFromNeighbours[0] || calculateFromNeighbours[1] || calculateFromNeighbours[2] || calculateFromNeighbours[3] || calculateFromNeighbours[4] || calculateFromNeighbours[5]; diff --git a/src/Python/pytnl/tnl_mpi/DistributedMesh.h b/src/Python/pytnl/tnl_mpi/DistributedMesh.h index 64afe5978..7bb4440eb 100644 --- a/src/Python/pytnl/tnl_mpi/DistributedMesh.h +++ b/src/Python/pytnl/tnl_mpi/DistributedMesh.h @@ -9,8 +9,8 @@ void export_DistributedMesh( py::module & m, const char* name ) auto mesh = py::class_< Mesh >( m, name ) .def(py::init<>()) .def_static("getMeshDimension", &Mesh::getMeshDimension) -// .def("setCommunicationGroup", &Mesh::setCommunicationGroup) -// .def("getCommunicationGroup", &Mesh::getCommunicationGroup) +// .def("setmunicationGroup", &Mesh::setCommunicationGroup) +// .def("getmunicationGroup", &Mesh::getCommunicationGroup) .def("getLocalMesh", py::overload_cast<>(&Mesh::getLocalMesh), py::return_value_policy::reference_internal) .def("setGhostLevels", &Mesh::setGhostLevels) .def("getGhostLevels", &Mesh::getGhostLevels) diff --git a/src/TNL/Algorithms/detail/DistributedScan.h b/src/TNL/Algorithms/detail/DistributedScan.h index 933056d92..fdb756c79 100644 --- a/src/TNL/Algorithms/detail/DistributedScan.h +++ b/src/TNL/Algorithms/detail/DistributedScan.h @@ -38,8 +38,8 @@ struct DistributedScan using ValueType = typename OutputDistributedArray::ValueType; using DeviceType = typename OutputDistributedArray::DeviceType; - const auto group = input.getCommunicationGroup(); - if( group != MPI::NullGroup() ) { + const auto communicator = input.getCommunicator(); + if( communicator != MPI_COMM_NULL ) { // adjust begin and end for the local range const auto localRange = input.getLocalRange(); begin = min( max( begin, localRange.getBegin() ), localRange.getEnd() ) - localRange.getBegin(); @@ -52,18 +52,18 @@ struct DistributedScan const ValueType local_result = block_results.getElement( block_results.getSize() - 1 ); // exchange local results between ranks - const int nproc = MPI::GetSize( group ); + const int nproc = MPI::GetSize( communicator ); ValueType dataForScatter[ nproc ]; for( int i = 0; i < nproc; i++ ) dataForScatter[ i ] = local_result; Containers::Array< ValueType, Devices::Host > rank_results( nproc ); // NOTE: exchanging general data types does not work with MPI - MPI::Alltoall( dataForScatter, 1, rank_results.getData(), 1, group ); + MPI::Alltoall( dataForScatter, 1, rank_results.getData(), 1, communicator ); // compute the scan of the per-rank results Scan< Devices::Host, ScanType::Exclusive, ScanPhaseType::WriteInSecondPhase >::perform( rank_results, rank_results, 0, nproc, 0, reduction, identity ); // perform the second phase, using the per-block and per-rank results - const int rank = MPI::GetRank( group ); + const int rank = MPI::GetRank( communicator ); Scan< DeviceType, Type, PhaseType >::performSecondPhase( inputLocalView, outputLocalView, block_results, begin, end, begin, reduction, identity, rank_results[ rank ] ); } } diff --git a/src/TNL/Algorithms/distributedScan.h b/src/TNL/Algorithms/distributedScan.h index 39724f10a..e6b997fb8 100644 --- a/src/TNL/Algorithms/distributedScan.h +++ b/src/TNL/Algorithms/distributedScan.h @@ -61,7 +61,7 @@ distributedInclusiveScan( const InputDistributedArray& input, { static_assert( std::is_same< typename InputDistributedArray::DeviceType, typename OutputDistributedArray::DeviceType >::value, "The input and output arrays must have the same device type." ); - TNL_ASSERT_EQ( input.getCommunicationGroup(), output.getCommunicationGroup(), + TNL_ASSERT_EQ( input.getCommunicator(), output.getCommunicator(), "The input and output arrays must have the same MPI communicator." ); TNL_ASSERT_EQ( input.getLocalRange(), output.getLocalRange(), "The input and output arrays must have the same local range on all ranks." ); @@ -136,7 +136,7 @@ distributedExclusiveScan( const InputDistributedArray& input, { static_assert( std::is_same< typename InputDistributedArray::DeviceType, typename OutputDistributedArray::DeviceType >::value, "The input and output arrays must have the same device type." ); - TNL_ASSERT_EQ( input.getCommunicationGroup(), output.getCommunicationGroup(), + TNL_ASSERT_EQ( input.getCommunicator(), output.getCommunicator(), "The input and output arrays must have the same MPI communicator." ); TNL_ASSERT_EQ( input.getLocalRange(), output.getLocalRange(), "The input and output arrays must have the same local range on all ranks." ); diff --git a/src/TNL/Containers/Array.h b/src/TNL/Containers/Array.h index 0fd1f3365..8e76ea9f3 100644 --- a/src/TNL/Containers/Array.h +++ b/src/TNL/Containers/Array.h @@ -794,10 +794,10 @@ template< typename Value, typename Device, typename Index, typename Allocator > File& operator>>( File&& file, Array< Value, Device, Index, Allocator >& array ); template< typename Value, typename Device, typename Index, typename Allocator > -void send( const Array< Value, Device, Index, Allocator >& array, int dest, int tag = 0, MPI_Comm comm = MPI::AllGroup() ); +void send( const Array< Value, Device, Index, Allocator >& array, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD ); template< typename Value, typename Device, typename Index, typename Allocator > -void receive( Array< Value, Device, Index, Allocator >& array, int src, int tag = 0, MPI_Comm comm = MPI::AllGroup() ); +void receive( Array< Value, Device, Index, Allocator >& array, int src, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD ); } // namespace Containers diff --git a/src/TNL/Containers/ArrayView.h b/src/TNL/Containers/ArrayView.h index 12d259818..ad032491d 100644 --- a/src/TNL/Containers/ArrayView.h +++ b/src/TNL/Containers/ArrayView.h @@ -597,7 +597,7 @@ template< typename Value, typename Device, typename Index > File& operator>>( File&& file, ArrayView< Value, Device, Index > view ); template< typename Value, typename Device, typename Index > -void send( const ArrayView< Value, Device, Index >& view, int dest, int tag = 0, MPI_Comm comm = MPI::AllGroup() ); +void send( const ArrayView< Value, Device, Index >& view, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD ); } // namespace Containers diff --git a/src/TNL/Containers/DistributedArray.h b/src/TNL/Containers/DistributedArray.h index ef43741c5..07b681f27 100644 --- a/src/TNL/Containers/DistributedArray.h +++ b/src/TNL/Containers/DistributedArray.h @@ -77,15 +77,15 @@ public: */ explicit DistributedArray( const DistributedArray& array, const AllocatorType& allocator ); - DistributedArray( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm group, const AllocatorType& allocator = AllocatorType() ); + DistributedArray( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm communicator, const AllocatorType& allocator = AllocatorType() ); - void setDistribution( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm group ); + void setDistribution( LocalRangeType localRange, Index ghosts, Index globalSize, MPI_Comm communicator ); const LocalRangeType& getLocalRange() const; IndexType getGhosts() const; - MPI_Comm getCommunicationGroup() const; + MPI_Comm getCommunicator() const; AllocatorType getAllocator() const; diff --git a/src/TNL/Containers/DistributedArray.hpp b/src/TNL/Containers/DistributedArray.hpp index bda82c8bd..b09443c65 100644 --- a/src/TNL/Containers/DistributedArray.hpp +++ b/src/TNL/Containers/DistributedArray.hpp @@ -69,10 +69,10 @@ template< typename Value, typename Index, typename Allocator > DistributedArray< Value, Device, Index, Allocator >:: -DistributedArray( LocalRangeType localRange, IndexType ghosts, IndexType globalSize, MPI_Comm group, const Allocator& allocator ) +DistributedArray( LocalRangeType localRange, IndexType ghosts, IndexType globalSize, MPI_Comm communicator, const Allocator& allocator ) : localData( allocator ) { - setDistribution( localRange, ghosts, globalSize, group ); + setDistribution( localRange, ghosts, globalSize, communicator ); } template< typename Value, @@ -81,12 +81,12 @@ template< typename Value, typename Allocator > void DistributedArray< Value, Device, Index, Allocator >:: -setDistribution( LocalRangeType localRange, IndexType ghosts, IndexType globalSize, MPI_Comm group ) +setDistribution( LocalRangeType localRange, IndexType ghosts, IndexType globalSize, MPI_Comm communicator ) { TNL_ASSERT_LE( localRange.getEnd(), globalSize, "end of the local range is outside of the global range" ); - if( group != MPI::NullGroup() ) + if( communicator != MPI_COMM_NULL ) localData.setSize( localRange.getSize() + ghosts ); - view.bind( localRange, ghosts, globalSize, group, localData.getView() ); + view.bind( localRange, ghosts, globalSize, communicator, localData.getView() ); } template< typename Value, @@ -117,9 +117,9 @@ template< typename Value, typename Allocator > MPI_Comm DistributedArray< Value, Device, Index, Allocator >:: -getCommunicationGroup() const +getCommunicator() const { - return view.getCommunicationGroup(); + return view.getCommunicator(); } template< typename Value, @@ -301,7 +301,7 @@ DistributedArray< Value, Device, Index, Allocator >:: setLike( const Array& array ) { localData.setLike( array.getConstLocalViewWithGhosts() ); - view.bind( array.getLocalRange(), array.getGhosts(), array.getSize(), array.getCommunicationGroup(), localData.getView() ); + view.bind( array.getLocalRange(), array.getGhosts(), array.getSize(), array.getCommunicator(), localData.getView() ); // set, but do not unset, the synchronizer if( array.getSynchronizer() ) setSynchronizerHelper( view, array ); diff --git a/src/TNL/Containers/DistributedArrayView.h b/src/TNL/Containers/DistributedArrayView.h index b99d08076..25398d6d9 100644 --- a/src/TNL/Containers/DistributedArrayView.h +++ b/src/TNL/Containers/DistributedArrayView.h @@ -50,8 +50,8 @@ public: ~DistributedArrayView(); // Initialization by raw data - DistributedArrayView( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, MPI_Comm group, LocalViewType localData ) - : localRange(localRange), ghosts(ghosts), globalSize(globalSize), group(group), localData(localData) + DistributedArrayView( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, MPI_Comm communicator, LocalViewType localData ) + : localRange(localRange), ghosts(ghosts), globalSize(globalSize), communicator(communicator), localData(localData) { TNL_ASSERT_EQ( localData.getSize(), localRange.getSize() + ghosts, "The local array size does not match the local range of the distributed array." ); @@ -71,14 +71,14 @@ public: DistributedArrayView( DistributedArrayView&& ) = default; // method for rebinding (reinitialization) to raw data - void bind( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, MPI_Comm group, LocalViewType localData ); + void bind( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, MPI_Comm communicator, LocalViewType localData ); // Note that you can also bind directly to DistributedArray and other types implicitly // convertible to DistributedArrayView. void bind( DistributedArrayView view ); // binding to local array via raw pointer - // (local range, ghosts, global size and communication group are preserved) + // (local range, ghosts, global size and communicators are preserved) template< typename Value_ > void bind( Value_* data, IndexType localSize ); @@ -86,7 +86,7 @@ public: IndexType getGhosts() const; - MPI_Comm getCommunicationGroup() const; + MPI_Comm getCommunicator() const; LocalViewType getLocalView(); @@ -235,7 +235,7 @@ protected: LocalRangeType localRange; IndexType ghosts = 0; IndexType globalSize = 0; - MPI_Comm group = MPI::NullGroup(); + MPI_Comm communicator = MPI_COMM_NULL; LocalViewType localData; std::shared_ptr< SynchronizerType > synchronizer = nullptr; diff --git a/src/TNL/Containers/DistributedArrayView.hpp b/src/TNL/Containers/DistributedArrayView.hpp index cb9edba19..c3f0d02b5 100644 --- a/src/TNL/Containers/DistributedArrayView.hpp +++ b/src/TNL/Containers/DistributedArrayView.hpp @@ -39,7 +39,7 @@ DistributedArrayView( const DistributedArrayView< Value_, Device, Index >& view : localRange( view.getLocalRange() ), ghosts( view.getGhosts() ), globalSize( view.getSize() ), - group( view.getCommunicationGroup() ), + communicator( view.getCommunicator() ), localData( view.getConstLocalViewWithGhosts() ), synchronizer( view.getSynchronizer() ), valuesPerElement( view.getValuesPerElement() ) @@ -50,7 +50,7 @@ template< typename Value, typename Index > void DistributedArrayView< Value, Device, Index >:: -bind( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, MPI_Comm group, LocalViewType localData ) +bind( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, MPI_Comm communicator, LocalViewType localData ) { TNL_ASSERT_EQ( localData.getSize(), localRange.getSize() + ghosts, "The local array size does not match the local range of the distributed array." ); @@ -59,7 +59,7 @@ bind( const LocalRangeType& localRange, IndexType ghosts, IndexType globalSize, this->localRange = localRange; this->ghosts = ghosts; this->globalSize = globalSize; - this->group = group; + this->communicator = communicator; this->localData.bind( localData ); } @@ -73,7 +73,7 @@ bind( DistributedArrayView view ) localRange = view.getLocalRange(); ghosts = view.getGhosts(); globalSize = view.getSize(); - group = view.getCommunicationGroup(); + communicator = view.getCommunicator(); localData.bind( view.getLocalViewWithGhosts() ); // set, but do not unset, the synchronizer if( view.getSynchronizer() ) @@ -118,9 +118,9 @@ template< typename Value, typename Index > MPI_Comm DistributedArrayView< Value, Device, Index >:: -getCommunicationGroup() const +getCommunicator() const { - return group; + return communicator; } template< typename Value, @@ -281,7 +281,7 @@ reset() localRange.reset(); ghosts = 0; globalSize = 0; - group = MPI::NullGroup(); + communicator = MPI_COMM_NULL; localData.reset(); } @@ -374,9 +374,9 @@ operator=( const DistributedArrayView& view ) TNL_ASSERT_EQ( getSize(), view.getSize(), "The sizes of the array views must be equal, views are not resizable." ); TNL_ASSERT_EQ( getLocalRange(), view.getLocalRange(), "The local ranges must be equal, views are not resizable." ); TNL_ASSERT_EQ( getGhosts(), view.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( getCommunicationGroup(), view.getCommunicationGroup(), "The communication groups of the array views must be equal." ); + TNL_ASSERT_EQ( getCommunicator(), view.getCommunicator(), "The communicators of the array views must be equal." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); view.waitForSynchronization(); @@ -396,9 +396,9 @@ operator=( const Array& array ) TNL_ASSERT_EQ( getSize(), array.getSize(), "The global sizes must be equal, views are not resizable." ); TNL_ASSERT_EQ( getLocalRange(), array.getLocalRange(), "The local ranges must be equal, views are not resizable." ); TNL_ASSERT_EQ( getGhosts(), array.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( getCommunicationGroup(), array.getCommunicationGroup(), "The communication groups must be equal." ); + TNL_ASSERT_EQ( getCommunicator(), array.getCommunicator(), "The communicators must be equal." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); array.waitForSynchronization(); @@ -415,8 +415,8 @@ bool DistributedArrayView< Value, Device, Index >:: operator==( const Array& array ) const { - // we can't run allreduce if the communication groups are different - if( group != array.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( communicator != array.getCommunicator() ) return false; const bool localResult = localRange == array.getLocalRange() && @@ -425,8 +425,8 @@ operator==( const Array& array ) const // compare without ghosts getConstLocalView() == array.getConstLocalView(); bool result = true; - if( group != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, group ); + if( communicator != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, communicator ); return result; } @@ -480,21 +480,21 @@ DistributedArrayView< Value, Device, Index >:: print( std::ostream& str ) const { // The following does not work properly - /*if( MPI::GetRank( group ) == 0 ) + /*if( MPI::GetRank( communicator ) == 0 ) { str << "[ "; for( IndexType i = 0; i < localData.getSize(); i++ ) str << ", " << localData.getElement( i ); - for( int proc = 1; proc < MPI::GetSize( group ); proc++ ) + for( int proc = 1; proc < MPI::GetSize( communicator ); proc++ ) { Array< std::remove_const_t< Value >, Device, Index > localArray; - receive( localArray, proc, 0, group ); + receive( localArray, proc, 0, communicator ); for( IndexType i = 0; i < localArray.getSize(); i++ ) str << ", " << localArray.getElement( i ); } str << " ]"; } - else send( this->localData, 0, 0, this->group );*/ + else send( this->localData, 0, 0, this->communicator );*/ return str; } diff --git a/src/TNL/Containers/DistributedNDArray.h b/src/TNL/Containers/DistributedNDArray.h index 8f1c213d9..c42d16668 100644 --- a/src/TNL/Containers/DistributedNDArray.h +++ b/src/TNL/Containers/DistributedNDArray.h @@ -68,7 +68,7 @@ public: globalSizes = other.getSizes(); localBegins = other.getLocalBegins(); localEnds = other.getLocalEnds(); - group = other.getCommunicationGroup(); + communicator = other.getCommunicator(); localArray = other.getConstLocalView(); return *this; } @@ -83,9 +83,9 @@ public: return localArray.getAllocator(); } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return group; + return communicator; } // Returns the *global* sizes @@ -199,19 +199,19 @@ public: ViewType getView() { - return ViewType( localArray.getView(), globalSizes, localBegins, localEnds, group ); + return ViewType( localArray.getView(), globalSizes, localBegins, localEnds, communicator ); } ConstViewType getConstView() const { - return ConstViewType( localArray.getConstView(), globalSizes, localBegins, localEnds, group ); + return ConstViewType( localArray.getConstView(), globalSizes, localBegins, localEnds, communicator ); } // TODO: overlaps should be skipped, otherwise it works only after synchronization bool operator==( const DistributedNDArray& other ) const { - // we can't run allreduce if the communication groups are different - if( group != other.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( communicator != other.getCommunicator() ) return false; const bool localResult = globalSizes == other.globalSizes && @@ -219,8 +219,8 @@ public: localEnds == other.localEnds && localArray == other.localArray; bool result = true; - if( group != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, group ); + if( communicator != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, communicator ); return result; } @@ -362,7 +362,7 @@ public: } template< std::size_t level > - void setDistribution( IndexType begin, IndexType end, MPI_Comm group = MPI::AllGroup() ) + void setDistribution( IndexType begin, IndexType end, MPI_Comm communicator = MPI_COMM_WORLD ) { static_assert( SizesHolderType::template getStaticSize< level >() == 0, "NDArray cannot be distributed in static dimensions." ); TNL_ASSERT_GE( begin, 0, "begin must be non-negative" ); @@ -370,9 +370,9 @@ public: TNL_ASSERT_LT( begin, end, "begin must be lesser than end" ); localBegins.template setSize< level >( begin ); localEnds.template setSize< level >( end ); - TNL_ASSERT( this->group == MPI::NullGroup() || this->group == group, - std::cerr << "different groups cannot be combined for different dimensions" ); - this->group = group; + TNL_ASSERT( this->communicator == MPI_COMM_NULL || this->communicator == communicator, + std::cerr << "different communicators cannot be combined for different dimensions" ); + this->communicator = communicator; } // Computes the distributed storage size and allocates the local array @@ -401,7 +401,7 @@ public: void setLike( const DistributedNDArray& other ) { localArray.setLike( other.localArray ); - group = other.getCommunicationGroup(); + communicator = other.getCommunicator(); globalSizes = other.getSizes(); localBegins = other.localBegins; localEnds = other.localEnds; @@ -410,7 +410,7 @@ public: void reset() { localArray.reset(); - group = MPI::NullGroup(); + communicator = MPI_COMM_NULL; globalSizes = SizesHolderType{}; localBegins = LocalBeginsType{}; localEnds = SizesHolderType{}; @@ -437,7 +437,7 @@ public: protected: NDArray localArray; - MPI_Comm group = MPI::NullGroup(); + MPI_Comm communicator = MPI_COMM_NULL; SizesHolderType globalSizes; // static sizes should have different type: localBegin is always 0, localEnd is always the full size LocalBeginsType localBegins; diff --git a/src/TNL/Containers/DistributedNDArraySynchronizer.h b/src/TNL/Containers/DistributedNDArraySynchronizer.h index 73cdda7a0..0993ba148 100644 --- a/src/TNL/Containers/DistributedNDArraySynchronizer.h +++ b/src/TNL/Containers/DistributedNDArraySynchronizer.h @@ -147,7 +147,7 @@ public: #endif // skip allocation on repeated calls - compare only sizes, not the actual data - if( array_view.getCommunicationGroup() != array.getCommunicationGroup() || + if( array_view.getCommunicator() != array.getCommunicator() || array_view.getSizes() != array.getSizes() || array_view.getLocalBegins() != array.getLocalBegins() || array_view.getLocalEnds() != array.getLocalEnds() ) @@ -251,10 +251,10 @@ protected: // issue all send and receive async operations RequestsVector requests; - const MPI_Comm group = array_view.getCommunicationGroup(); + const MPI_Comm communicator = array_view.getCommunicator(); Algorithms::staticFor< std::size_t, 0, DistributedNDArray::getDimension() >( [&] ( auto dim ) { - sendHelper< dim >( buffers, requests, group, tag_offset, mask ); + sendHelper< dim >( buffers, requests, communicator, tag_offset, mask ); } ); @@ -320,9 +320,9 @@ protected: dim_buffers.right_recv_offsets.template setSize< dim >( localEnds.template getSize< dim >() ); // FIXME: set proper neighbor IDs !!! - const MPI_Comm group = array_view.getCommunicationGroup(); - const int rank = MPI::GetRank(group); - const int nproc = MPI::GetSize(group); + const MPI_Comm communicator = array_view.getCommunicator(); + const int rank = MPI::GetRank(communicator); + const int nproc = MPI::GetSize(communicator); dim_buffers.left_neighbor = (rank + nproc - 1) % nproc; dim_buffers.right_neighbor = (rank + 1) % nproc; } @@ -381,7 +381,7 @@ protected: } template< std::size_t dim > - static void sendHelper( Buffers& buffers, RequestsVector& requests, MPI_Comm group, int tag_offset, SyncDirection mask ) + static void sendHelper( Buffers& buffers, RequestsVector& requests, MPI_Comm communicator, int tag_offset, SyncDirection mask ) { constexpr std::size_t overlap = DistributedNDArrayView::LocalViewType::IndexerType::template getOverlap< dim >(); if( overlap == 0 ) @@ -393,33 +393,33 @@ protected: if( mask & SyncDirection::Left ) { requests.push_back( MPI::Isend( dim_buffers.left_send_view.getData(), dim_buffers.left_send_view.getStorageSize(), - dim_buffers.left_neighbor, tag_offset + 0, group ) ); + dim_buffers.left_neighbor, tag_offset + 0, communicator ) ); requests.push_back( MPI::Irecv( dim_buffers.right_recv_view.getData(), dim_buffers.right_recv_view.getStorageSize(), - dim_buffers.right_neighbor, tag_offset + 0, group ) ); + dim_buffers.right_neighbor, tag_offset + 0, communicator ) ); } if( mask & SyncDirection::Right ) { requests.push_back( MPI::Isend( dim_buffers.right_send_view.getData(), dim_buffers.right_send_view.getStorageSize(), - dim_buffers.right_neighbor, tag_offset + 1, group ) ); + dim_buffers.right_neighbor, tag_offset + 1, communicator ) ); requests.push_back( MPI::Irecv( dim_buffers.left_recv_view.getData(), dim_buffers.left_recv_view.getStorageSize(), - dim_buffers.left_neighbor, tag_offset + 1, group ) ); + dim_buffers.left_neighbor, tag_offset + 1, communicator ) ); } } else { requests.push_back( MPI::Isend( dim_buffers.left_send_view.getData() + 0, dim_buffers.left_send_view.getStorageSize() / 27 * 9, - dim_buffers.left_neighbor, tag_offset + 0, group ) ); + dim_buffers.left_neighbor, tag_offset + 0, communicator ) ); requests.push_back( MPI::Irecv( dim_buffers.left_recv_view.getData() + dim_buffers.left_recv_view.getStorageSize() / 27 * 18, dim_buffers.left_recv_view.getStorageSize() / 27 * 9, - dim_buffers.left_neighbor, tag_offset + 1, group ) ); + dim_buffers.left_neighbor, tag_offset + 1, communicator ) ); requests.push_back( MPI::Isend( dim_buffers.right_send_view.getData() + dim_buffers.left_recv_view.getStorageSize() / 27 * 18, dim_buffers.right_send_view.getStorageSize() / 27 * 9, - dim_buffers.right_neighbor, tag_offset + 1, group ) ); + dim_buffers.right_neighbor, tag_offset + 1, communicator ) ); requests.push_back( MPI::Irecv( dim_buffers.right_recv_view.getData() + 0, dim_buffers.right_recv_view.getStorageSize() / 27 * 9, - dim_buffers.right_neighbor, tag_offset + 0, group ) ); + dim_buffers.right_neighbor, tag_offset + 0, communicator ) ); } } diff --git a/src/TNL/Containers/DistributedNDArrayView.h b/src/TNL/Containers/DistributedNDArrayView.h index 13d9cf6dd..7ee0a58d0 100644 --- a/src/TNL/Containers/DistributedNDArrayView.h +++ b/src/TNL/Containers/DistributedNDArrayView.h @@ -40,8 +40,8 @@ public: DistributedNDArrayView() = default; // explicit initialization by local array view, global sizes and local begins and ends - DistributedNDArrayView( NDArrayView localView, SizesHolderType globalSizes, LocalBeginsType localBegins, SizesHolderType localEnds, MPI_Comm group ) - : localView(localView), group(group), globalSizes(globalSizes), localBegins(localBegins), localEnds(localEnds) {} + DistributedNDArrayView( NDArrayView localView, SizesHolderType globalSizes, LocalBeginsType localBegins, SizesHolderType localEnds, MPI_Comm communicator ) + : localView(localView), communicator(communicator), globalSizes(globalSizes), localBegins(localBegins), localEnds(localEnds) {} // copy-constructor does shallow copy DistributedNDArrayView( const DistributedNDArrayView& ) = default; @@ -63,7 +63,7 @@ public: globalSizes = other.getSizes(); localBegins = other.getLocalBegins(); localEnds = other.getLocalEnds(); - group = other.getCommunicationGroup(); + communicator = other.getCommunicator(); localView = other.getConstLocalView(); return *this; } @@ -72,7 +72,7 @@ public: void bind( DistributedNDArrayView view ) { localView.bind( view.localView ); - group = view.group; + communicator = view.communicator; globalSizes = view.globalSizes; localBegins = view.localBegins; localEnds = view.localEnds; @@ -93,7 +93,7 @@ public: void reset() { localView.reset(); - group = MPI::NullGroup(); + communicator = MPI_COMM_NULL; globalSizes = SizesHolderType{}; localBegins = LocalBeginsType{}; localEnds = SizesHolderType{}; @@ -104,9 +104,9 @@ public: return NDArrayView::getDimension(); } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return group; + return communicator; } // Returns the *global* sizes @@ -225,14 +225,14 @@ public: ConstViewType getConstView() const { - return ConstViewType( localView, globalSizes, localBegins, localEnds, group ); + return ConstViewType( localView, globalSizes, localBegins, localEnds, communicator ); } // TODO: overlaps should be skipped, otherwise it works only after synchronization bool operator==( const DistributedNDArrayView& other ) const { - // we can't run allreduce if the communication groups are different - if( group != other.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( communicator != other.getCommunicator() ) return false; const bool localResult = globalSizes == other.globalSizes && @@ -240,8 +240,8 @@ public: localEnds == other.localEnds && localView == other.localView; bool result = true; - if( group != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, group ); + if( communicator != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, communicator ); return result; } @@ -370,7 +370,7 @@ public: protected: NDArrayView localView; - MPI_Comm group = MPI::NullGroup(); + MPI_Comm communicator = MPI_COMM_NULL; SizesHolderType globalSizes; // static sizes should have different type: localBegin is always 0, localEnd is always the full size LocalBeginsType localBegins; diff --git a/src/TNL/Containers/DistributedVectorView.hpp b/src/TNL/Containers/DistributedVectorView.hpp index 69ad4c74b..5dc4b7d65 100644 --- a/src/TNL/Containers/DistributedVectorView.hpp +++ b/src/TNL/Containers/DistributedVectorView.hpp @@ -96,10 +96,10 @@ operator=( const Vector& vector ) "The local ranges must be equal, views are not resizable." ); TNL_ASSERT_EQ( this->getGhosts(), vector.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( this->getCommunicationGroup(), vector.getCommunicationGroup(), - "The communication groups of the array views must be equal." ); + TNL_ASSERT_EQ( this->getCommunicator(), vector.getCommunicator(), + "The communicators of the array views must be equal." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); vector.waitForSynchronization(); @@ -122,10 +122,10 @@ operator+=( const Vector& vector ) "Multiary operations are supported only on vectors which are distributed the same way." ); TNL_ASSERT_EQ( this->getGhosts(), vector.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( this->getCommunicationGroup(), vector.getCommunicationGroup(), - "Multiary operations are supported only on vectors within the same communication group." ); + TNL_ASSERT_EQ( this->getCommunicator(), vector.getCommunicator(), + "Multiary operations are supported only on vectors within the same communicator." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); vector.waitForSynchronization(); @@ -148,10 +148,10 @@ operator-=( const Vector& vector ) "Multiary operations are supported only on vectors which are distributed the same way." ); TNL_ASSERT_EQ( this->getGhosts(), vector.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( this->getCommunicationGroup(), vector.getCommunicationGroup(), - "Multiary operations are supported only on vectors within the same communication group." ); + TNL_ASSERT_EQ( this->getCommunicator(), vector.getCommunicator(), + "Multiary operations are supported only on vectors within the same communicator." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); vector.waitForSynchronization(); @@ -174,10 +174,10 @@ operator*=( const Vector& vector ) "Multiary operations are supported only on vectors which are distributed the same way." ); TNL_ASSERT_EQ( this->getGhosts(), vector.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( this->getCommunicationGroup(), vector.getCommunicationGroup(), - "Multiary operations are supported only on vectors within the same communication group." ); + TNL_ASSERT_EQ( this->getCommunicator(), vector.getCommunicator(), + "Multiary operations are supported only on vectors within the same communicator." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); vector.waitForSynchronization(); @@ -200,10 +200,10 @@ operator/=( const Vector& vector ) "Multiary operations are supported only on vectors which are distributed the same way." ); TNL_ASSERT_EQ( this->getGhosts(), vector.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( this->getCommunicationGroup(), vector.getCommunicationGroup(), - "Multiary operations are supported only on vectors within the same communication group." ); + TNL_ASSERT_EQ( this->getCommunicator(), vector.getCommunicator(), + "Multiary operations are supported only on vectors within the same communicator." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); vector.waitForSynchronization(); @@ -226,10 +226,10 @@ operator%=( const Vector& vector ) "Multiary operations are supported only on vectors which are distributed the same way." ); TNL_ASSERT_EQ( this->getGhosts(), vector.getGhosts(), "Ghosts must be equal, views are not resizable." ); - TNL_ASSERT_EQ( this->getCommunicationGroup(), vector.getCommunicationGroup(), - "Multiary operations are supported only on vectors within the same communication group." ); + TNL_ASSERT_EQ( this->getCommunicator(), vector.getCommunicator(), + "Multiary operations are supported only on vectors within the same communicator." ); - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { // TODO: it might be better to split the local and ghost parts and synchronize in the middle this->waitForSynchronization(); vector.waitForSynchronization(); @@ -246,7 +246,7 @@ DistributedVectorView< Real, Device, Index >& DistributedVectorView< Real, Device, Index >:: operator=( Scalar c ) { - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { getLocalView() = c; this->startSynchronization(); } @@ -261,7 +261,7 @@ DistributedVectorView< Real, Device, Index >& DistributedVectorView< Real, Device, Index >:: operator+=( Scalar c ) { - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { getLocalView() += c; this->startSynchronization(); } @@ -276,7 +276,7 @@ DistributedVectorView< Real, Device, Index >& DistributedVectorView< Real, Device, Index >:: operator-=( Scalar c ) { - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { getLocalView() -= c; this->startSynchronization(); } @@ -291,7 +291,7 @@ DistributedVectorView< Real, Device, Index >& DistributedVectorView< Real, Device, Index >:: operator*=( Scalar c ) { - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { getLocalView() *= c; this->startSynchronization(); } @@ -306,7 +306,7 @@ DistributedVectorView< Real, Device, Index >& DistributedVectorView< Real, Device, Index >:: operator/=( Scalar c ) { - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { getLocalView() /= c; this->startSynchronization(); } @@ -321,7 +321,7 @@ DistributedVectorView< Real, Device, Index >& DistributedVectorView< Real, Device, Index >:: operator%=( Scalar c ) { - if( this->getCommunicationGroup() != MPI::NullGroup() ) { + if( this->getCommunicator() != MPI_COMM_NULL ) { getLocalView() %= c; this->startSynchronization(); } diff --git a/src/TNL/Containers/Expressions/DistributedComparison.h b/src/TNL/Containers/Expressions/DistributedComparison.h index 10bf2d117..2b13722d9 100644 --- a/src/TNL/Containers/Expressions/DistributedComparison.h +++ b/src/TNL/Containers/Expressions/DistributedComparison.h @@ -33,8 +33,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, VectorExpression { static bool EQ( const T1& a, const T2& b ) { - // we can't run allreduce if the communication groups are different - if( a.getCommunicationGroup() != b.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( a.getCommunicator() != b.getCommunicator() ) return false; const bool localResult = a.getLocalRange() == b.getLocalRange() && @@ -43,8 +43,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, VectorExpression // compare without ghosts a.getConstLocalView() == b.getConstLocalView(); bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -59,13 +59,13 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, VectorExpression TNL_ASSERT_EQ( a.getLocalRange(), b.getLocalRange(), "Local ranges of expressions to be compared do not match." ); TNL_ASSERT_EQ( a.getGhosts(), b.getGhosts(), "Ghosts of expressions to be compared do not match." ); - // we can't run allreduce if the communication groups are different - if( a.getCommunicationGroup() != b.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( a.getCommunicator() != b.getCommunicator() ) return false; const bool localResult = a.getConstLocalView() < b.getConstLocalView(); bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -75,13 +75,13 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, VectorExpression TNL_ASSERT_EQ( a.getLocalRange(), b.getLocalRange(), "Local ranges of expressions to be compared do not match." ); TNL_ASSERT_EQ( a.getGhosts(), b.getGhosts(), "Ghosts of expressions to be compared do not match." ); - // we can't run allreduce if the communication groups are different - if( a.getCommunicationGroup() != b.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( a.getCommunicator() != b.getCommunicator() ) return false; const bool localResult = a.getConstLocalView() <= b.getConstLocalView(); bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -91,13 +91,13 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, VectorExpression TNL_ASSERT_EQ( a.getLocalRange(), b.getLocalRange(), "Local ranges of expressions to be compared do not match." ); TNL_ASSERT_EQ( a.getGhosts(), b.getGhosts(), "Ghosts of expressions to be compared do not match." ); - // we can't run allreduce if the communication groups are different - if( a.getCommunicationGroup() != b.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( a.getCommunicator() != b.getCommunicator() ) return false; const bool localResult = a.getConstLocalView() > b.getConstLocalView(); bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -107,13 +107,13 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, VectorExpression TNL_ASSERT_EQ( a.getLocalRange(), b.getLocalRange(), "Local ranges of expressions to be compared do not match." ); TNL_ASSERT_EQ( a.getGhosts(), b.getGhosts(), "Ghosts of expressions to be compared do not match." ); - // we can't run allreduce if the communication groups are different - if( a.getCommunicationGroup() != b.getCommunicationGroup() ) + // we can't run allreduce if the communicators are different + if( a.getCommunicator() != b.getCommunicator() ) return false; const bool localResult = a.getConstLocalView() >= b.getConstLocalView(); bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } }; @@ -128,8 +128,8 @@ struct DistributedComparison< T1, T2, ArithmeticVariable, VectorExpressionVariab { const bool localResult = a == b.getConstLocalView(); bool result = true; - if( b.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicationGroup() ); + if( b.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicator() ); return result; } @@ -142,8 +142,8 @@ struct DistributedComparison< T1, T2, ArithmeticVariable, VectorExpressionVariab { const bool localResult = a < b.getConstLocalView(); bool result = true; - if( b.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicationGroup() ); + if( b.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicator() ); return result; } @@ -151,8 +151,8 @@ struct DistributedComparison< T1, T2, ArithmeticVariable, VectorExpressionVariab { const bool localResult = a <= b.getConstLocalView(); bool result = true; - if( b.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicationGroup() ); + if( b.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicator() ); return result; } @@ -160,8 +160,8 @@ struct DistributedComparison< T1, T2, ArithmeticVariable, VectorExpressionVariab { const bool localResult = a > b.getConstLocalView(); bool result = true; - if( b.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicationGroup() ); + if( b.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicator() ); return result; } @@ -169,8 +169,8 @@ struct DistributedComparison< T1, T2, ArithmeticVariable, VectorExpressionVariab { const bool localResult = a >= b.getConstLocalView(); bool result = true; - if( b.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicationGroup() ); + if( b.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, b.getCommunicator() ); return result; } }; @@ -185,8 +185,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, ArithmeticVariab { const bool localResult = a.getConstLocalView() == b; bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -199,8 +199,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, ArithmeticVariab { const bool localResult = a.getConstLocalView() < b; bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -208,8 +208,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, ArithmeticVariab { const bool localResult = a.getConstLocalView() <= b; bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -217,8 +217,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, ArithmeticVariab { const bool localResult = a.getConstLocalView() > b; bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } @@ -226,8 +226,8 @@ struct DistributedComparison< T1, T2, VectorExpressionVariable, ArithmeticVariab { const bool localResult = a.getConstLocalView() >= b; bool result = true; - if( a.getCommunicationGroup() != MPI::NullGroup() ) - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicationGroup() ); + if( a.getCommunicator() != MPI_COMM_NULL ) + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, a.getCommunicator() ); return result; } }; diff --git a/src/TNL/Containers/Expressions/DistributedExpressionTemplates.h b/src/TNL/Containers/Expressions/DistributedExpressionTemplates.h index a713b00d4..1a5fea876 100644 --- a/src/TNL/Containers/Expressions/DistributedExpressionTemplates.h +++ b/src/TNL/Containers/Expressions/DistributedExpressionTemplates.h @@ -82,8 +82,8 @@ struct DistributedBinaryExpressionTemplate< T1, T2, Operation, VectorExpressionV "Distributed expressions are supported only on vectors which are distributed the same way." ); TNL_ASSERT_EQ( op1.getGhosts(), op2.getGhosts(), "Distributed expressions are supported only on vectors which are distributed the same way." ); - TNL_ASSERT_EQ( op1.getCommunicationGroup(), op2.getCommunicationGroup(), - "Distributed expressions are supported only on vectors within the same communication group." ); + TNL_ASSERT_EQ( op1.getCommunicator(), op2.getCommunicator(), + "Distributed expressions are supported only on vectors within the same communicator." ); } RealType getElement( const IndexType i ) const @@ -114,9 +114,9 @@ struct DistributedBinaryExpressionTemplate< T1, T2, Operation, VectorExpressionV return op1.getGhosts(); } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return op1.getCommunicationGroup(); + return op1.getCommunicator(); } ConstLocalViewType getConstLocalView() const @@ -197,9 +197,9 @@ struct DistributedBinaryExpressionTemplate< T1, T2, Operation, VectorExpressionV return op1.getGhosts(); } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return op1.getCommunicationGroup(); + return op1.getCommunicator(); } ConstLocalViewType getConstLocalView() const @@ -279,9 +279,9 @@ struct DistributedBinaryExpressionTemplate< T1, T2, Operation, ArithmeticVariabl return op2.getGhosts(); } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return op2.getCommunicationGroup(); + return op2.getCommunicator(); } ConstLocalViewType getConstLocalView() const @@ -362,9 +362,9 @@ struct DistributedUnaryExpressionTemplate return operand.getGhosts(); } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return operand.getCommunicationGroup(); + return operand.getCommunicator(); } ConstLocalViewType getConstLocalView() const diff --git a/src/TNL/Containers/Expressions/DistributedVerticalOperations.h b/src/TNL/Containers/Expressions/DistributedVerticalOperations.h index f3b826a9f..de11d7e29 100644 --- a/src/TNL/Containers/Expressions/DistributedVerticalOperations.h +++ b/src/TNL/Containers/Expressions/DistributedVerticalOperations.h @@ -25,9 +25,9 @@ auto DistributedExpressionMin( const Expression& expression ) -> std::decay_t< d static_assert( std::numeric_limits< ResultType >::is_specialized, "std::numeric_limits is not specialized for the reduction's result type" ); ResultType result = std::numeric_limits< ResultType >::max(); - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::Min{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_MIN, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_MIN, expression.getCommunicator() ); } return result; } @@ -43,21 +43,21 @@ auto DistributedExpressionArgMin( const Expression& expression ) static_assert( std::numeric_limits< RealType >::is_specialized, "std::numeric_limits is not specialized for the reduction's real type" ); ResultType result( -1, std::numeric_limits< RealType >::max() ); - const auto group = expression.getCommunicationGroup(); - if( group != MPI::NullGroup() ) { + const MPI_Comm communicator = expression.getCommunicator(); + if( communicator != MPI_COMM_NULL ) { // compute local argMin ResultType localResult = Algorithms::reduceWithArgument( expression.getConstLocalView(), TNL::MinWithArg{} ); // transform local index to global index localResult.second += expression.getLocalRange().getBegin(); // scatter local result to all processes and gather their results - const int nproc = MPI::GetSize( group ); + const int nproc = MPI::GetSize( communicator ); ResultType dataForScatter[ nproc ]; for( int i = 0; i < nproc; i++ ) dataForScatter[ i ] = localResult; ResultType gatheredResults[ nproc ]; // NOTE: exchanging general data types does not work with MPI - //MPI::Alltoall( dataForScatter, 1, gatheredResults, 1, group ); - MPI::Alltoall( (char*) dataForScatter, sizeof(ResultType), (char*) gatheredResults, sizeof(ResultType), group ); + //MPI::Alltoall( dataForScatter, 1, gatheredResults, 1, communicator ); + MPI::Alltoall( (char*) dataForScatter, sizeof(ResultType), (char*) gatheredResults, sizeof(ResultType), communicator ); // reduce the gathered data const auto* _data = gatheredResults; // workaround for nvcc which does not allow to capture variable-length arrays (even in pure host code!) @@ -76,9 +76,9 @@ auto DistributedExpressionMax( const Expression& expression ) -> std::decay_t< d static_assert( std::numeric_limits< ResultType >::is_specialized, "std::numeric_limits is not specialized for the reduction's result type" ); ResultType result = std::numeric_limits< ResultType >::lowest(); - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::Max{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_MAX, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_MAX, expression.getCommunicator() ); } return result; } @@ -94,21 +94,21 @@ auto DistributedExpressionArgMax( const Expression& expression ) static_assert( std::numeric_limits< RealType >::is_specialized, "std::numeric_limits is not specialized for the reduction's real type" ); ResultType result( -1, std::numeric_limits< RealType >::lowest() ); - const auto group = expression.getCommunicationGroup(); - if( group != MPI::NullGroup() ) { + const MPI_Comm communicator = expression.getCommunicator(); + if( communicator != MPI_COMM_NULL ) { // compute local argMax ResultType localResult = Algorithms::reduceWithArgument( expression.getConstLocalView(), TNL::MaxWithArg{} ); // transform local index to global index localResult.second += expression.getLocalRange().getBegin(); // scatter local result to all processes and gather their results - const int nproc = MPI::GetSize( group ); + const int nproc = MPI::GetSize( communicator ); ResultType dataForScatter[ nproc ]; for( int i = 0; i < nproc; i++ ) dataForScatter[ i ] = localResult; ResultType gatheredResults[ nproc ]; // NOTE: exchanging general data types does not work with MPI - //MPI::Alltoall( dataForScatter, 1, gatheredResults, 1, group ); - MPI::Alltoall( (char*) dataForScatter, sizeof(ResultType), (char*) gatheredResults, sizeof(ResultType), group ); + //MPI::Alltoall( dataForScatter, 1, gatheredResults, 1, communicator ); + MPI::Alltoall( (char*) dataForScatter, sizeof(ResultType), (char*) gatheredResults, sizeof(ResultType), communicator ); // reduce the gathered data const auto* _data = gatheredResults; // workaround for nvcc which does not allow to capture variable-length arrays (even in pure host code!) @@ -125,9 +125,9 @@ auto DistributedExpressionSum( const Expression& expression ) -> std::decay_t< d using ResultType = std::decay_t< decltype( expression[0] ) >; ResultType result = 0; - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::Plus{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_SUM, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_SUM, expression.getCommunicator() ); } return result; } @@ -138,9 +138,9 @@ auto DistributedExpressionProduct( const Expression& expression ) -> std::decay_ using ResultType = std::decay_t< decltype( expression[0] ) >; ResultType result = 1; - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::Multiplies{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_PROD, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_PROD, expression.getCommunicator() ); } return result; } @@ -153,9 +153,9 @@ auto DistributedExpressionLogicalAnd( const Expression& expression ) -> std::dec static_assert( std::numeric_limits< ResultType >::is_specialized, "std::numeric_limits is not specialized for the reduction's result type" ); ResultType result = std::numeric_limits< ResultType >::max(); - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::LogicalAnd{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_LAND, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_LAND, expression.getCommunicator() ); } return result; } @@ -166,9 +166,9 @@ auto DistributedExpressionLogicalOr( const Expression& expression ) -> std::deca using ResultType = std::decay_t< decltype( expression[0] || expression[0] ) >; ResultType result = 0; - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::LogicalOr{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_LOR, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_LOR, expression.getCommunicator() ); } return result; } @@ -181,9 +181,9 @@ auto DistributedExpressionBinaryAnd( const Expression& expression ) -> std::deca static_assert( std::numeric_limits< ResultType >::is_specialized, "std::numeric_limits is not specialized for the reduction's result type" ); ResultType result = std::numeric_limits< ResultType >::max(); - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::BitAnd{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_BAND, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_BAND, expression.getCommunicator() ); } return result; } @@ -194,9 +194,9 @@ auto DistributedExpressionBinaryOr( const Expression& expression ) -> std::decay using ResultType = std::decay_t< decltype( expression[0] | expression[0] ) >; ResultType result = 0; - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::BitOr{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_BOR, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_BOR, expression.getCommunicator() ); } return result; } @@ -207,9 +207,9 @@ auto DistributedExpressionBinaryXor( const Expression& expression ) -> std::deca using ResultType = std::decay_t< decltype( expression[0] ^ expression[0] ) >; ResultType result = 0; - if( expression.getCommunicationGroup() != MPI::NullGroup() ) { + if( expression.getCommunicator() != MPI_COMM_NULL ) { const ResultType localResult = Algorithms::reduce( expression.getConstLocalView(), TNL::BitXor{} ); - MPI::Allreduce( &localResult, &result, 1, MPI_BXOR, expression.getCommunicationGroup() ); + MPI::Allreduce( &localResult, &result, 1, MPI_BXOR, expression.getCommunicator() ); } return result; } diff --git a/src/TNL/Containers/Partitioner.h b/src/TNL/Containers/Partitioner.h index 6d3605b5a..8001dfdce 100644 --- a/src/TNL/Containers/Partitioner.h +++ b/src/TNL/Containers/Partitioner.h @@ -28,11 +28,11 @@ class Partitioner public: using SubrangeType = Subrange< Index >; - static SubrangeType splitRange( Index globalSize, MPI_Comm group ) + static SubrangeType splitRange( Index globalSize, MPI_Comm communicator ) { - if( group != MPI::NullGroup() ) { - const int rank = MPI::GetRank( group ); - const int partitions = MPI::GetSize( group ); + if( communicator != MPI_COMM_NULL ) { + const int rank = MPI::GetRank( communicator ); + const int partitions = MPI::GetSize( communicator ); const Index begin = TNL::min( globalSize, rank * globalSize / partitions ); const Index end = TNL::min( globalSize, (rank + 1) * globalSize / partitions ); return SubrangeType( begin, end ); @@ -77,7 +77,7 @@ public: SubrangeType localRange; int overlaps; - MPI_Comm group; + MPI_Comm communicator; public: using ByteArrayView = typename Base::ByteArrayView; @@ -92,8 +92,8 @@ public: ArraySynchronizer() = delete; - ArraySynchronizer( SubrangeType localRange, int overlaps, MPI_Comm group ) - : localRange(localRange), overlaps(overlaps), group(group) + ArraySynchronizer( SubrangeType localRange, int overlaps, MPI_Comm communicator ) + : localRange(localRange), overlaps(overlaps), communicator(communicator) {} virtual void synchronizeByteArray( ByteArrayView array, int bytesPerValue ) override @@ -107,8 +107,8 @@ public: TNL_ASSERT_EQ( array.getSize(), bytesPerValue * (localRange.getSize() + 2 * overlaps), "unexpected array size" ); - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); const int left = (rank > 0) ? rank - 1 : nproc - 1; const int right = (rank < nproc - 1) ? rank + 1 : 0; @@ -119,21 +119,21 @@ public: requests.push_back( MPI::Irecv( array.getData() + bytesPerValue * localRange.getSize(), bytesPerValue * overlaps, - left, 0, group ) ); + left, 0, communicator ) ); requests.push_back( MPI::Irecv( array.getData() + bytesPerValue * (localRange.getSize() + overlaps), bytesPerValue * overlaps, - right, 0, group ) ); + right, 0, communicator ) ); // issue all async send operations requests.push_back( MPI::Isend( array.getData(), bytesPerValue * overlaps, - left, 0, group ) ); + left, 0, communicator ) ); requests.push_back( MPI::Isend( array.getData() + bytesPerValue * (localRange.getSize() - overlaps), bytesPerValue * overlaps, - right, 0, group ) ); + right, 0, communicator ) ); return requests; } diff --git a/src/TNL/Functions/MeshFunctionIO.h b/src/TNL/Functions/MeshFunctionIO.h index 35ed81897..46962df90 100644 --- a/src/TNL/Functions/MeshFunctionIO.h +++ b/src/TNL/Functions/MeshFunctionIO.h @@ -230,9 +230,9 @@ writeDistributedMeshFunction( const Meshes::DistributedMeshes::DistributedMesh< } if( format == "pvti" ) { - const MPI_Comm group = distributedMesh.getCommunicationGroup(); + const MPI_Comm communicator = distributedMesh.getCommunicator(); std::ofstream file; - if( TNL::MPI::GetRank( group ) == 0 ) + if( TNL::MPI::GetRank( communicator ) == 0 ) file.open( fileName ); using PVTI = Meshes::Writers::PVTIWriter< typename MeshFunction::MeshType >; diff --git a/src/TNL/MPI/DummyDefs.h b/src/TNL/MPI/DummyDefs.h index 578e46dfe..7109275dc 100644 --- a/src/TNL/MPI/DummyDefs.h +++ b/src/TNL/MPI/DummyDefs.h @@ -48,4 +48,21 @@ enum { #define MPI_GRAPH 2 /* graph topology */ #define MPI_KEYVAL_INVALID -1 /* invalid key value */ +// MPI handles +// (According to the MPI standard, they are only link-time constants (not +// compile-time constants). OpenMPI implements them as global variables.) +#define MPI_COMM_WORLD 1 +#define MPI_COMM_SELF MPI_COMM_WORLD +// NULL handles +#define MPI_GROUP_NULL 0 +#define MPI_COMM_NULL 0 +#define MPI_REQUEST_NULL 0 +#define MPI_MESSAGE_NULL 0 +#define MPI_OP_NULL 0 +#define MPI_ERRHANDLER_NULL 0 +#define MPI_INFO_NULL 0 +#define MPI_WIN_NULL 0 +#define MPI_FILE_NULL 0 +#define MPI_T_ENUM_NULL 0 + #endif diff --git a/src/TNL/MPI/Utils.h b/src/TNL/MPI/Utils.h index bb58a1a12..2cc848124 100644 --- a/src/TNL/MPI/Utils.h +++ b/src/TNL/MPI/Utils.h @@ -50,16 +50,16 @@ inline void restoreRedirection() * `MPI_COMM_TYPE_SHARED` type (from MPI-3) and the rank ID of the process * within the group is returned. */ -inline int getRankOnNode( MPI_Comm group = AllGroup() ) +inline int getRankOnNode( MPI_Comm communicator = MPI_COMM_WORLD ) { #ifdef HAVE_MPI - const int rank = GetRank(group); + const int rank = GetRank( communicator ); MPI_Info info; MPI_Info_create( &info ); MPI_Comm local_comm; - MPI_Comm_split_type( group, MPI_COMM_TYPE_SHARED, rank, info, &local_comm ); + MPI_Comm_split_type( communicator, MPI_COMM_TYPE_SHARED, rank, info, &local_comm ); const int local_rank = GetRank( local_comm ); @@ -83,16 +83,16 @@ inline int getRankOnNode( MPI_Comm group = AllGroup() ) * * \param value Value of the current rank to be reduced. * \param op The reduction operation to be applied. - * \param group The communicator comprising ranks that participate in the - * collective operation. + * \param communicator The communicator comprising ranks that participate in the + * collective operation. * \return The reduced value (it is ensured that all ranks receive the same * value). */ template< typename T > -T reduce( T value, const MPI_Op& op, MPI_Comm group = AllGroup() ) +T reduce( T value, const MPI_Op& op, MPI_Comm communicator = MPI_COMM_WORLD ) { // call the in-place variant of Allreduce - Allreduce( &value, 1, op, group ); + Allreduce( &value, 1, op, communicator ); // return the reduced value return value; } diff --git a/src/TNL/MPI/Wrappers.h b/src/TNL/MPI/Wrappers.h index 828f2ad45..dfaf4e4f4 100644 --- a/src/TNL/MPI/Wrappers.h +++ b/src/TNL/MPI/Wrappers.h @@ -31,35 +31,6 @@ namespace MPI { // forward declaration to break cyclic inclusion inline void selectGPU(); -// function wrappers for MPI constants - -inline MPI_Comm AllGroup() -{ -#ifdef HAVE_MPI - return MPI_COMM_WORLD; -#else - return 1; -#endif -} - -inline MPI_Comm NullGroup() -{ -#ifdef HAVE_MPI - return MPI_COMM_NULL; -#else - return 0; -#endif -} - -inline MPI_Request NullRequest() -{ -#ifdef HAVE_MPI - return MPI_REQUEST_NULL; -#else - return 0; -#endif -} - // wrappers for basic MPI functions inline void Init( int& argc, char**& argv, int required_thread_level = MPI_THREAD_SINGLE ) @@ -112,45 +83,45 @@ inline void Finalize() inline bool Initialized() { #ifdef HAVE_MPI - int flag; - MPI_Initialized(&flag); - return flag; + int flag; + MPI_Initialized( &flag ); + return flag; #else - return true; + return true; #endif } inline bool Finalized() { #ifdef HAVE_MPI - int flag; - MPI_Finalized(&flag); - return flag; + int flag; + MPI_Finalized( &flag ); + return flag; #else - return false; + return false; #endif } -inline int GetRank( MPI_Comm group = AllGroup() ) +inline int GetRank( MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "GetRank cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "GetRank cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); int rank; - MPI_Comm_rank( group, &rank ); + MPI_Comm_rank( communicator, &rank ); return rank; #else return 0; #endif } -inline int GetSize( MPI_Comm group = AllGroup() ) +inline int GetSize( MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "GetSize cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "GetSize cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); int size; - MPI_Comm_size( group, &size ); + MPI_Comm_size( communicator, &size ); return size; #else return 1; @@ -173,17 +144,17 @@ inline MPI_Comm Comm_split( MPI_Comm comm, int color, int key ) /** * \brief Wrapper for \ref MPI_Dims_create. * - * \param nproc - number of processes in the group to be distributed + * \param nnodes - number of nodes in the grid * \param ndims - number of dimensions of the Cartesian grid * \param dims - distribution of processes into the \e dim-dimensional * Cartesian grid (array of length \e ndims) * * Negative input values of \e dims[i] are erroneous. An error will occur if - * \e nproc is not a multiple of the product of all non-zero values \e dims[i]. + * \e nnodes is not a multiple of the product of all non-zero values \e dims[i]. * * See the MPI documentation for more information. */ -inline void Compute_dims( int nproc, int ndims, int* dims ) +inline void Compute_dims( int nnodes, int ndims, int* dims ) { #ifdef HAVE_MPI int prod = 1; @@ -194,12 +165,12 @@ inline void Compute_dims( int nproc, int ndims, int* dims ) prod *= dims[ i ]; } - if( nproc % prod != 0 ) + if( nnodes % prod != 0 ) throw std::logic_error( "The program tries to call MPI_Dims_create with wrong dimensions." "The product of the non-zero values dims[i] is " + std::to_string(prod) + " and the " - "number of processes (" + std::to_string(nproc) + ") is not a multiple of the product." ); + "number of processes (" + std::to_string(nnodes) + ") is not a multiple of the product." ); - MPI_Dims_create( nproc, ndims, dims ); + MPI_Dims_create( nnodes, ndims, dims ); #else for( int i = 0; i < ndims; i++) dims[ i ] = 1; @@ -208,12 +179,12 @@ inline void Compute_dims( int nproc, int ndims, int* dims ) // wrappers for MPI communication functions -inline void Barrier( MPI_Comm group = AllGroup() ) +inline void Barrier( MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "Barrier cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Barrier cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - MPI_Barrier(group); + MPI_Barrier( communicator ); #endif } @@ -230,12 +201,12 @@ void Send( const T* data, int count, int dest, int tag, - MPI_Comm group = AllGroup() ) + MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "Send cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Send cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - MPI_Send( (const void*) data, count, getDataType(), dest, tag, group ); + MPI_Send( (const void*) data, count, getDataType(), dest, tag, communicator ); #endif } @@ -244,12 +215,12 @@ void Recv( T* data, int count, int src, int tag, - MPI_Comm group = AllGroup() ) + MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "Recv cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Recv cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - MPI_Recv( (void*) data, count, getDataType(), src, tag, group, MPI_STATUS_IGNORE ); + MPI_Recv( (void*) data, count, getDataType(), src, tag, communicator, MPI_STATUS_IGNORE ); #endif } @@ -262,9 +233,9 @@ void Sendrecv( const T* sendData, int receiveCount, int source, int receiveTag, - MPI_Comm group = AllGroup() ) + MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "Sendrecv cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Sendrecv cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); MPI_Sendrecv( (void*) sendData, @@ -277,7 +248,7 @@ void Sendrecv( const T* sendData, getDataType(), source, receiveTag, - group, + communicator, MPI_STATUS_IGNORE ); #else throw Exceptions::MPISupportMissing(); @@ -289,16 +260,16 @@ MPI_Request Isend( const T* data, int count, int dest, int tag, - MPI_Comm group = AllGroup() ) + MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "Isend cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Isend cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); MPI_Request req; - MPI_Isend( (const void*) data, count, getDataType(), dest, tag, group, &req ); + MPI_Isend( (const void*) data, count, getDataType(), dest, tag, communicator, &req ); return req; #else - return NullRequest(); + return MPI_REQUEST_NULL; #endif } @@ -307,16 +278,16 @@ MPI_Request Irecv( T* data, int count, int src, int tag, - MPI_Comm group = AllGroup() ) + MPI_Comm communicator = MPI_COMM_WORLD ) { - TNL_ASSERT_NE( group, NullGroup(), "Irecv cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Irecv cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); MPI_Request req; - MPI_Irecv( (void*) data, count, getDataType(), src, tag, group, &req ); + MPI_Irecv( (void*) data, count, getDataType(), src, tag, communicator, &req ); return req; #else - return NullRequest(); + return MPI_REQUEST_NULL; #endif } @@ -325,12 +296,12 @@ void Allreduce( const T* data, T* reduced_data, int count, const MPI_Op& op, - MPI_Comm group) + MPI_Comm communicator) { - TNL_ASSERT_NE( group, NullGroup(), "Allreduce cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Allreduce cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI getTimerAllreduce().start(); - MPI_Allreduce( (const void*) data, (void*) reduced_data, count, getDataType(), op, group ); + MPI_Allreduce( (const void*) data, (void*) reduced_data, count, getDataType(), op, communicator ); getTimerAllreduce().stop(); #else std::memcpy( (void*) reduced_data, (const void*) data, count * sizeof(T) ); @@ -342,12 +313,12 @@ template< typename T > void Allreduce( T* data, int count, const MPI_Op& op, - MPI_Comm group) + MPI_Comm communicator) { - TNL_ASSERT_NE( group, NullGroup(), "Allreduce cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Allreduce cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI getTimerAllreduce().start(); - MPI_Allreduce( MPI_IN_PLACE, (void*) data, count, getDataType(), op, group ); + MPI_Allreduce( MPI_IN_PLACE, (void*) data, count, getDataType(), op, communicator ); getTimerAllreduce().stop(); #endif } @@ -358,23 +329,23 @@ void Reduce( const T* data, int count, const MPI_Op& op, int root, - MPI_Comm group) + MPI_Comm communicator) { - TNL_ASSERT_NE( group, NullGroup(), "Reduce cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Reduce cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI - MPI_Reduce( (const void*) data, (void*) reduced_data, count, getDataType(), op, root, group ); + MPI_Reduce( (const void*) data, (void*) reduced_data, count, getDataType(), op, root, communicator ); #else std::memcpy( (void*) reduced_data, (void*) data, count * sizeof(T) ); #endif } template< typename T > -void Bcast( T* data, int count, int root, MPI_Comm group) +void Bcast( T* data, int count, int root, MPI_Comm communicator) { - TNL_ASSERT_NE( group, NullGroup(), "Bcast cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Bcast cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI TNL_ASSERT_TRUE( Initialized() && ! Finalized(), "Fatal Error - MPI is not initialized" ); - MPI_Bcast( (void*) data, count, getDataType(), root, group ); + MPI_Bcast( (void*) data, count, getDataType(), root, communicator ); #endif } @@ -383,9 +354,9 @@ void Alltoall( const T* sendData, int sendCount, T* receiveData, int receiveCount, - MPI_Comm group ) + MPI_Comm communicator ) { - TNL_ASSERT_NE( group, NullGroup(), "Alltoall cannot be called with NullGroup" ); + TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Alltoall cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI MPI_Alltoall( (const void*) sendData, sendCount, @@ -393,7 +364,7 @@ void Alltoall( const T* sendData, (void*) receiveData, receiveCount, getDataType(), - group ); + communicator ); #else TNL_ASSERT_EQ( sendCount, receiveCount, "sendCount must be equal to receiveCount when running without MPI." ); std::memcpy( (void*) receiveData, (const void*) sendData, sendCount * sizeof(T) ); diff --git a/src/TNL/Matrices/DistributedMatrix.h b/src/TNL/Matrices/DistributedMatrix.h index b274699af..205ec0580 100644 --- a/src/TNL/Matrices/DistributedMatrix.h +++ b/src/TNL/Matrices/DistributedMatrix.h @@ -48,13 +48,13 @@ public: DistributedMatrix( DistributedMatrix& ) = default; - DistributedMatrix( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group ); + DistributedMatrix( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm communicator ); - void setDistribution( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group ); + void setDistribution( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm communicator ); const LocalRangeType& getLocalRowRange() const; - MPI_Comm getCommunicationGroup() const; + MPI_Comm getCommunicator() const; const Matrix& getLocalMatrix() const; @@ -104,7 +104,7 @@ public: // multiplication with a global vector template< typename InVector, typename OutVector > - typename std::enable_if< ! HasGetCommunicationGroupMethod< InVector >::value >::type + typename std::enable_if< ! HasGetCommunicatorMethod< InVector >::value >::type vectorProduct( const InVector& inVector, OutVector& outVector ) const; @@ -115,7 +115,7 @@ public: // (not const because it modifies internal bufers) template< typename InVector, typename OutVector > - typename std::enable_if< HasGetCommunicationGroupMethod< InVector >::value >::type + typename std::enable_if< HasGetCommunicatorMethod< InVector >::value >::type vectorProduct( const InVector& inVector, OutVector& outVector ) const; @@ -129,7 +129,7 @@ public: protected: LocalRangeType localRowRange; IndexType rows = 0; // global rows count - MPI_Comm group = MPI::NullGroup(); + MPI_Comm communicator = MPI_COMM_NULL; Matrix localMatrix; DistributedSpMV< Matrix > spmv; diff --git a/src/TNL/Matrices/DistributedMatrix_impl.h b/src/TNL/Matrices/DistributedMatrix_impl.h index 8bc5d0982..be5103220 100644 --- a/src/TNL/Matrices/DistributedMatrix_impl.h +++ b/src/TNL/Matrices/DistributedMatrix_impl.h @@ -19,20 +19,20 @@ namespace Matrices { template< typename Matrix > DistributedMatrix< Matrix >:: -DistributedMatrix( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group ) +DistributedMatrix( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm communicator ) { - setDistribution( localRowRange, rows, columns, group ); + setDistribution( localRowRange, rows, columns, communicator ); } template< typename Matrix > void DistributedMatrix< Matrix >:: -setDistribution( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm group ) +setDistribution( LocalRangeType localRowRange, IndexType rows, IndexType columns, MPI_Comm communicator ) { this->localRowRange = localRowRange; this->rows = rows; - this->group = group; - if( group != MPI::NullGroup() ) + this->communicator = communicator; + if( communicator != MPI_COMM_NULL ) localMatrix.setDimensions( localRowRange.getSize(), columns ); spmv.reset(); @@ -49,9 +49,9 @@ getLocalRowRange() const template< typename Matrix > MPI_Comm DistributedMatrix< Matrix >:: -getCommunicationGroup() const +getCommunicator() const { - return group; + return communicator; } template< typename Matrix > @@ -104,7 +104,7 @@ setLike( const MatrixT& matrix ) { localRowRange = matrix.getLocalRowRange(); rows = matrix.getRows(); - group = matrix.getCommunicationGroup(); + communicator = matrix.getCommunicator(); localMatrix.setLike( matrix.getLocalMatrix() ); spmv.reset(); @@ -117,7 +117,7 @@ reset() { localRowRange.reset(); rows = 0; - group = MPI::NullGroup(); + communicator = MPI_COMM_NULL; localMatrix.reset(); spmv.reset(); @@ -147,9 +147,9 @@ setRowCapacities( const RowCapacitiesVector& rowCapacities ) { TNL_ASSERT_EQ( rowCapacities.getSize(), getRows(), "row lengths vector has wrong size" ); TNL_ASSERT_EQ( rowCapacities.getLocalRange(), getLocalRowRange(), "row lengths vector has wrong distribution" ); - TNL_ASSERT_EQ( rowCapacities.getCommunicationGroup(), getCommunicationGroup(), "row lengths vector has wrong communication group" ); + TNL_ASSERT_EQ( rowCapacities.getCommunicator(), getCommunicator(), "row lengths vector has wrong communicator" ); - if( getCommunicationGroup() != MPI::NullGroup() ) { + if( getCommunicator() != MPI_COMM_NULL ) { localMatrix.setRowCapacities( rowCapacities.getConstLocalView() ); spmv.reset(); @@ -162,8 +162,8 @@ void DistributedMatrix< Matrix >:: getCompressedRowLengths( Vector& rowLengths ) const { - if( getCommunicationGroup() != MPI::NullGroup() ) { - rowLengths.setDistribution( getLocalRowRange(), 0, getRows(), getCommunicationGroup() ); + if( getCommunicator() != MPI_COMM_NULL ) { + rowLengths.setDistribution( getLocalRowRange(), 0, getRows(), getCommunicator() ); auto localRowLengths = rowLengths.getLocalView(); localMatrix.getCompressedRowLengths( localRowLengths ); } @@ -230,7 +230,7 @@ getRow( IndexType row ) const template< typename Matrix > template< typename InVector, typename OutVector > -typename std::enable_if< ! HasGetCommunicationGroupMethod< InVector >::value >::type +typename std::enable_if< ! HasGetCommunicatorMethod< InVector >::value >::type DistributedMatrix< Matrix >:: vectorProduct( const InVector& inVector, OutVector& outVector ) const @@ -238,7 +238,7 @@ vectorProduct( const InVector& inVector, TNL_ASSERT_EQ( inVector.getSize(), getColumns(), "input vector has wrong size" ); TNL_ASSERT_EQ( outVector.getSize(), getRows(), "output vector has wrong size" ); TNL_ASSERT_EQ( outVector.getLocalRange(), getLocalRowRange(), "output vector has wrong distribution" ); - TNL_ASSERT_EQ( outVector.getCommunicationGroup(), getCommunicationGroup(), "output vector has wrong communication group" ); + TNL_ASSERT_EQ( outVector.getCommunicator(), getCommunicator(), "output vector has wrong communicator" ); auto outView = outVector.getLocalView(); localMatrix.vectorProduct( inVector, outView ); @@ -249,32 +249,32 @@ void DistributedMatrix< Matrix >:: updateVectorProductCommunicationPattern() { - if( getCommunicationGroup() == MPI::NullGroup() ) + if( getCommunicator() == MPI_COMM_NULL ) return; - spmv.updateCommunicationPattern( getLocalMatrix(), getCommunicationGroup() ); + spmv.updateCommunicationPattern( getLocalMatrix(), getCommunicator() ); } template< typename Matrix > template< typename InVector, typename OutVector > -typename std::enable_if< HasGetCommunicationGroupMethod< InVector >::value >::type +typename std::enable_if< HasGetCommunicatorMethod< InVector >::value >::type DistributedMatrix< Matrix >:: vectorProduct( const InVector& inVector, OutVector& outVector ) const { TNL_ASSERT_EQ( inVector.getLocalRange(), getLocalRowRange(), "input vector has wrong distribution" ); - TNL_ASSERT_EQ( inVector.getCommunicationGroup(), getCommunicationGroup(), "input vector has wrong communication group" ); + TNL_ASSERT_EQ( inVector.getCommunicator(), getCommunicator(), "input vector has wrong communicator" ); TNL_ASSERT_EQ( outVector.getSize(), getRows(), "output vector has wrong size" ); TNL_ASSERT_EQ( outVector.getLocalRange(), getLocalRowRange(), "output vector has wrong distribution" ); - TNL_ASSERT_EQ( outVector.getCommunicationGroup(), getCommunicationGroup(), "output vector has wrong communication group" ); + TNL_ASSERT_EQ( outVector.getCommunicator(), getCommunicator(), "output vector has wrong communicator" ); - if( getCommunicationGroup() == MPI::NullGroup() ) + if( getCommunicator() == MPI_COMM_NULL ) return; if( inVector.getGhosts() == 0 ) { // NOTE: this branch is deprecated and kept only due to existing benchmarks TNL_ASSERT_EQ( inVector.getSize(), getColumns(), "input vector has wrong size" ); - const_cast< DistributedMatrix* >( this )->spmv.vectorProduct( outVector, localMatrix, localRowRange, inVector, getCommunicationGroup() ); + const_cast< DistributedMatrix* >( this )->spmv.vectorProduct( outVector, localMatrix, localRowRange, inVector, getCommunicator() ); } else { TNL_ASSERT_EQ( inVector.getConstLocalViewWithGhosts().getSize(), localMatrix.getColumns(), "the matrix uses non-local and non-ghost column indices" ); diff --git a/src/TNL/Matrices/DistributedSpMV.h b/src/TNL/Matrices/DistributedSpMV.h index bea864ead..e9e49a025 100644 --- a/src/TNL/Matrices/DistributedSpMV.h +++ b/src/TNL/Matrices/DistributedSpMV.h @@ -53,10 +53,10 @@ public: // - assembly of the i-th row involves traversal of the local matrix stored // in the i-th process // - assembly of the full matrix needs all-to-all communication - void updateCommunicationPattern( const MatrixType& localMatrix, const LocalRangeType& localRowRange, MPI_Comm group ) + void updateCommunicationPattern( const MatrixType& localMatrix, const LocalRangeType& localRowRange, MPI_Comm communicator ) { - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); commPatternStarts.setDimensions( nproc, nproc ); commPatternEnds.setDimensions( nproc, nproc ); @@ -67,7 +67,7 @@ public: sendbuf.setValue( localRowRange.getBegin() ); MPI::Alltoall( sendbuf.getData(), 1, globalOffsets.getData(), 1, - group ); + communicator ); } const auto globalOffsetsView = globalOffsets.getConstView(); auto getOwner = [=] __cuda_callable__ ( IndexType global_idx ) -> int @@ -150,10 +150,10 @@ public: // assemble the commPattern* matrices MPI::Alltoall( &preCommPatternStarts(0, 0), nproc, &commPatternStarts(0, 0), nproc, - group ); + communicator ); MPI::Alltoall( &preCommPatternEnds(0, 0), nproc, &commPatternEnds(0, 0), nproc, - group ); + communicator ); } template< typename InVector, @@ -162,10 +162,10 @@ public: const MatrixType& localMatrix, const LocalRangeType& localRowRange, const InVector& inVector, - MPI_Comm group ) + MPI_Comm communicator ) { - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); // handle trivial case if( nproc == 1 ) { @@ -177,7 +177,7 @@ public: // update communication pattern if( commPatternStarts.getRows() != nproc || commPatternEnds.getRows() != nproc ) - updateCommunicationPattern( localMatrix, localRowRange, group ); + updateCommunicationPattern( localMatrix, localRowRange, communicator ); // prepare buffers globalBuffer.init( localRowRange.getBegin(), @@ -198,7 +198,7 @@ public: commRequests.push_back( MPI::Isend( inVector.getConstLocalView().getData() + commPatternStarts( i, rank ) - localRowRange.getBegin(), commPatternEnds( i, rank ) - commPatternStarts( i, rank ), - i, 0, group ) ); + i, 0, communicator ) ); } // receive data that we need @@ -209,7 +209,7 @@ public: commRequests.push_back( MPI::Irecv( globalBuffer.getPointer( commPatternStarts( rank, j ) ), commPatternEnds( rank, j ) - commPatternStarts( rank, j ), - j, 0, group ) ); + j, 0, communicator ) ); } // general variant diff --git a/src/TNL/Meshes/DistributedMeshes/DistributedGrid.h b/src/TNL/Meshes/DistributedMeshes/DistributedGrid.h index c112b8d65..92a569c5f 100644 --- a/src/TNL/Meshes/DistributedMeshes/DistributedGrid.h +++ b/src/TNL/Meshes/DistributedMeshes/DistributedGrid.h @@ -84,9 +84,9 @@ class DistributedMesh< Grid< Dimension, Real, Device, Index > > const CoordinatesType& getSubdomainCoordinates() const; - //aka MPI-communcicator - void setCommunicationGroup( MPI_Comm group ); - MPI_Comm getCommunicationGroup() const; + void setCommunicator( MPI_Comm communicator ); + + MPI_Comm getCommunicator() const; template< int EntityDimension > IndexType getEntitiesCount() const; @@ -140,8 +140,7 @@ class DistributedMesh< Grid< Dimension, Real, Device, Index > > bool isSet = false; - //aka MPI-communicator - MPI_Comm group = MPI::AllGroup(); + MPI_Comm communicator = MPI_COMM_WORLD; }; template< int Dimension, typename Real, typename Device, typename Index > diff --git a/src/TNL/Meshes/DistributedMeshes/DistributedGrid.hpp b/src/TNL/Meshes/DistributedMeshes/DistributedGrid.hpp index 043c61c74..6426ba3e6 100644 --- a/src/TNL/Meshes/DistributedMeshes/DistributedGrid.hpp +++ b/src/TNL/Meshes/DistributedMeshes/DistributedGrid.hpp @@ -48,7 +48,7 @@ setGlobalGrid( const GridType& globalGrid ) this->neighbors[ i ] = -1; // use MPI only if have more than one process - this->distributed = MPI::GetSize(group) > 1; + this->distributed = MPI::GetSize( communicator ) > 1; if( !this->distributed ) { @@ -66,12 +66,12 @@ setGlobalGrid( const GridType& globalGrid ) int dims[ Dimension ]; for( int i = 0; i < Dimension; i++ ) dims[ i ] = this->domainDecomposition[ i ]; - MPI::Compute_dims( MPI::GetSize(group), Dimension, dims ); + MPI::Compute_dims( MPI::GetSize( communicator ), Dimension, dims ); for( int i = 0; i < Dimension; i++ ) this->domainDecomposition[ i ] = dims[ i ]; - int size = MPI::GetSize(group); - int tmp = MPI::GetRank(group); + int size = MPI::GetSize( communicator ); + int tmp = MPI::GetRank( communicator ); for( int i = Dimension - 1; i >= 0; i-- ) { size = size / this->domainDecomposition[ i ]; @@ -262,17 +262,17 @@ getEntitiesCount() const template< int Dimension, typename Real, typename Device, typename Index > void DistributedMesh< Grid< Dimension, Real, Device, Index > >:: -setCommunicationGroup( MPI_Comm group ) +setCommunicator( MPI_Comm communicator ) { - this->group = group; + this->communicator = communicator; } template< int Dimension, typename Real, typename Device, typename Index > MPI_Comm DistributedMesh< Grid< Dimension, Real, Device, Index > >:: -getCommunicationGroup() const +getCommunicator() const { - return this->group; + return this->communicator; } template< int Dimension, typename Real, typename Device, typename Index > @@ -372,8 +372,8 @@ SetupByCut( DistributedGridType &inputDistributedGrid, isInCut &= fixedIndexs[i] > begin[reducedDimensions[i]] && fixedIndexs[i] < begin[reducedDimensions[i]] + size[reducedDimensions[i]]; } - // create new group with used nodes - const MPI_Comm oldGroup = inputDistributedGrid.getCommunicationGroup(); + // create new communicator with used nodes + const MPI_Comm oldCommunicator = inputDistributedGrid.getCommunicator(); if(isInCut) { this->isSet=true; @@ -420,7 +420,7 @@ SetupByCut( DistributedGridType &inputDistributedGrid, // TODO: set interiorBegin, interiorEnd const int newRank = getRankOfProcCoord(this->subdomainCoordinates); - this->group = MPI::Comm_split( oldGroup, 1, newRank ); + this->communicator = MPI::Comm_split( oldCommunicator, 1, newRank ); setupNeighbors(); @@ -435,7 +435,7 @@ SetupByCut( DistributedGridType &inputDistributedGrid, } else { - this->group = MPI::Comm_split( oldGroup, MPI_UNDEFINED, 0 ); + this->communicator = MPI::Comm_split( oldCommunicator, MPI_UNDEFINED, 0 ); return false; } } @@ -485,7 +485,7 @@ operator==( const DistributedMesh& other ) const && subdomainCoordinates == other.subdomainCoordinates && distributed == other.distributed && isSet == other.isSet - && group == other.group; + && communicator == other.communicator; } template< int Dimension, typename Real, typename Device, typename Index > diff --git a/src/TNL/Meshes/DistributedMeshes/DistributedGridSynchronizer.h b/src/TNL/Meshes/DistributedMeshes/DistributedGridSynchronizer.h index afc5c6c77..d33c2c618 100644 --- a/src/TNL/Meshes/DistributedMeshes/DistributedGridSynchronizer.h +++ b/src/TNL/Meshes/DistributedMeshes/DistributedGridSynchronizer.h @@ -144,7 +144,7 @@ class DistributedMeshSynchronizer< DistributedMesh< Grid< MeshDimension, GridRea //async send and receive MPI_Request requests[2*this->getNeighborsCount()]; - MPI_Comm group = distributedGrid->getCommunicationGroup(); + MPI_Comm communicator = distributedGrid->getCommunicator(); int requestsCount( 0 ); //send everything, recieve everything @@ -156,16 +156,16 @@ class DistributedMeshSynchronizer< DistributedMesh< Grid< MeshDimension, GridRea if( neighbors[ i ] != -1 ) { //TNL_MPI_PRINT( "Sending data to node " << neighbors[ i ] ); - requests[ requestsCount++ ] = MPI::Isend( reinterpret_cast( sendBuffers[ i ].getData() ), sendSizes[ i ], neighbors[ i ], 0, group ); + requests[ requestsCount++ ] = MPI::Isend( reinterpret_cast( sendBuffers[ i ].getData() ), sendSizes[ i ], neighbors[ i ], 0, communicator ); //TNL_MPI_PRINT( "Receiving data from node " << neighbors[ i ] ); - requests[ requestsCount++ ] = MPI::Irecv( reinterpret_cast( recieveBuffers[ i ].getData() ), sendSizes[ i ], neighbors[ i ], 0, group ); + requests[ requestsCount++ ] = MPI::Irecv( reinterpret_cast( recieveBuffers[ i ].getData() ), sendSizes[ i ], neighbors[ i ], 0, communicator ); } else if( periodicBoundaries && sendSizes[ i ] !=0 ) - { + { //TNL_MPI_PRINT( "Sending data to node " << periodicNeighbors[ i ] ); - requests[ requestsCount++ ] = MPI::Isend( reinterpret_cast( sendBuffers[ i ].getData() ), sendSizes[ i ], periodicNeighbors[ i ], 1, group ); + requests[ requestsCount++ ] = MPI::Isend( reinterpret_cast( sendBuffers[ i ].getData() ), sendSizes[ i ], periodicNeighbors[ i ], 1, communicator ); //TNL_MPI_PRINT( "Receiving data to node " << periodicNeighbors[ i ] ); - requests[ requestsCount++ ] = MPI::Irecv( reinterpret_cast( recieveBuffers[ i ].getData() ), sendSizes[ i ], periodicNeighbors[ i ], 1, group ); + requests[ requestsCount++ ] = MPI::Irecv( reinterpret_cast( recieveBuffers[ i ].getData() ), sendSizes[ i ], periodicNeighbors[ i ], 1, communicator ); } } diff --git a/src/TNL/Meshes/DistributedMeshes/DistributedMesh.h b/src/TNL/Meshes/DistributedMeshes/DistributedMesh.h index 21116d357..210b6fa00 100644 --- a/src/TNL/Meshes/DistributedMeshes/DistributedMesh.h +++ b/src/TNL/Meshes/DistributedMeshes/DistributedMesh.h @@ -55,7 +55,7 @@ public: { GlobalIndexStorageFamily< Mesh >::operator=( other ); localMesh = other.getLocalMesh(); - group = other.getCommunicationGroup(); + communicator = other.getCommunicator(); ghostLevels = other.getGhostLevels(); vtkPointGhostTypesArray = other.vtkPointGhostTypes(); vtkCellGhostTypesArray = other.vtkCellGhostTypes(); @@ -66,7 +66,7 @@ public: { return ( GlobalIndexStorageFamily< Mesh, DeviceType >::operator==( other ) && localMesh == other.getLocalMesh() && - group == other.getCommunicationGroup() && + communicator == other.getCommunicator() && ghostLevels == other.getGhostLevels() && vtkPointGhostTypesArray == other.vtkPointGhostTypes() && vtkCellGhostTypesArray == other.vtkCellGhostTypes() ); @@ -99,14 +99,14 @@ public: /** * Methods specific to the distributed mesh */ - void setCommunicationGroup( MPI_Comm group ) + void setCommunicator( MPI_Comm communicator ) { - this->group = group; + this->communicator = communicator; } - MPI_Comm getCommunicationGroup() const + MPI_Comm getCommunicator() const { - return group; + return communicator; } const MeshType& getLocalMesh() const @@ -234,7 +234,7 @@ public: protected: MeshType localMesh; - MPI_Comm group = MPI::NullGroup(); + MPI_Comm communicator = MPI_COMM_NULL; int ghostLevels = 0; // vtkGhostType arrays for points and cells (cached for output into VTK formats) diff --git a/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h b/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h index 0353ded09..4a75d8ee0 100644 --- a/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h +++ b/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h @@ -61,9 +61,9 @@ public: TNL_ASSERT_EQ( mesh.template getGlobalIndices< EntityDimension >().getSize(), mesh.getLocalMesh().template getEntitiesCount< EntityDimension >(), "Global indices are not allocated properly." ); - group = mesh.getCommunicationGroup(); - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + communicator = mesh.getCommunicator(); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); // exchange the global index offsets so that each rank can determine the // owner of every entity by its global index @@ -74,7 +74,7 @@ public: sendbuf.setValue( ownStart ); MPI::Alltoall( sendbuf.getData(), 1, globalOffsets.getData(), 1, - group ); + communicator ); } // count local ghost entities for each rank @@ -113,7 +113,7 @@ public: sendbuf.setElement( j, i, localGhostCounts[ i ] ); MPI::Alltoall( &sendbuf(0, 0), nproc, &ghostEntitiesCounts(0, 0), nproc, - group ); + communicator ); } // allocate ghost offsets @@ -140,7 +140,7 @@ public: requests.push_back( MPI::Isend( mesh.template getGlobalIndices< EntityDimension >().getData() + ghostOffset, ghostEntitiesCounts( rank, i ), - i, 0, group ) ); + i, 0, communicator ) ); ghostOffset += ghostEntitiesCounts( rank, i ); } // update ghost offsets @@ -155,7 +155,7 @@ public: requests.push_back( MPI::Irecv( ghostNeighbors.getData() + ghostNeighborOffsets[ j ], ghostEntitiesCounts( j, rank ), - j, 0, group ) ); + j, 0, communicator ) ); } } @@ -210,8 +210,8 @@ public: TNL_ASSERT_EQ( array.getSize(), bytesPerValue * ghostOffsets[ ghostOffsets.getSize() - 1 ], "The array does not have the expected size." ); - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); // allocate send buffers (setSize does nothing if the array size is already correct) sendBuffers.setSize( bytesPerValue * ghostNeighborOffsets[ nproc ] ); @@ -225,7 +225,7 @@ public: requests.push_back( MPI::Irecv( array.getData() + bytesPerValue * ghostOffsets[ j ], bytesPerValue * ghostEntitiesCounts( rank, j ), - j, 0, group ) ); + j, 0, communicator ) ); } } @@ -249,7 +249,7 @@ public: requests.push_back( MPI::Isend( sendBuffersView.getData() + bytesPerValue * ghostNeighborOffsets[ i ], bytesPerValue * ghostEntitiesCounts( i, rank ), - i, 0, group ) ); + i, 0, communicator ) ); } } @@ -269,8 +269,8 @@ public: { TNL_ASSERT_EQ( pattern.getRows(), ghostOffsets[ ghostOffsets.getSize() - 1 ], "invalid sparse pattern matrix" ); - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); // buffer for asynchronous communication requests RequestsVector requests; @@ -310,7 +310,7 @@ public: requests.push_back( MPI::Isend( send_rowCapacities.getData() + send_rankOffsets[ i ], ghostNeighborOffsets[ i + 1 ] - ghostNeighborOffsets[ i ], - i, 1, group ) ); + i, 1, communicator ) ); } // allocate column indices @@ -338,7 +338,7 @@ public: requests.push_back( MPI::Isend( send_columnIndices.getData() + send_rowPointers[ send_rankOffsets[ i ] ], send_rowPointers[ send_rankOffsets[ i + 1 ] ] - send_rowPointers[ send_rankOffsets[ i ] ], - i, 0, group ) ); + i, 0, communicator ) ); } } @@ -373,7 +373,7 @@ public: row_lengths_requests.push_back( MPI::Irecv( recv_rowPointers.getData() + recv_rankOffsets[ i ], ghostOffsets[ i + 1 ] - ghostOffsets[ i ], - i, 1, group ) ); + i, 1, communicator ) ); } } @@ -397,7 +397,7 @@ public: requests.push_back( MPI::Irecv( recv_columnIndices.getData() + recv_rowPointers[ recv_rankOffsets[ i ] ], recv_rowPointers[ recv_rankOffsets[ i + 1 ] ] - recv_rowPointers[ recv_rankOffsets[ i ] ], - i, 0, group ) ); + i, 0, communicator ) ); } } @@ -445,8 +445,8 @@ public: } protected: - // communication group taken from the distributed mesh - MPI_Comm group; + // communicator taken from the distributed mesh + MPI_Comm communicator; /** * Global offsets: array of size nproc where the i-th value is the lowest diff --git a/src/TNL/Meshes/DistributedMeshes/distributeSubentities.h b/src/TNL/Meshes/DistributedMeshes/distributeSubentities.h index cfe5a9246..5982ca369 100644 --- a/src/TNL/Meshes/DistributedMeshes/distributeSubentities.h +++ b/src/TNL/Meshes/DistributedMeshes/distributeSubentities.h @@ -23,12 +23,12 @@ namespace DistributedMeshes { template< typename GlobalIndexType > auto -exchangeGhostEntitySeeds( MPI_Comm group, +exchangeGhostEntitySeeds( MPI_Comm communicator, const std::vector< std::vector< GlobalIndexType > >& seeds_vertex_indices, const std::vector< std::vector< GlobalIndexType > >& seeds_entity_offsets ) { - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); // exchange sizes of the arrays Containers::Array< GlobalIndexType, Devices::Host, int > sizes_vertex_indices( nproc ), sizes_entity_offsets( nproc ); @@ -40,10 +40,10 @@ exchangeGhostEntitySeeds( MPI_Comm group, } MPI::Alltoall( sendbuf_indices.getData(), 1, sizes_vertex_indices.getData(), 1, - group ); + communicator ); MPI::Alltoall( sendbuf_offsets.getData(), 1, sizes_entity_offsets.getData(), 1, - group ); + communicator ); } // allocate arrays for the results @@ -65,11 +65,11 @@ exchangeGhostEntitySeeds( MPI_Comm group, requests.push_back( MPI::Irecv( foreign_seeds_vertex_indices[ j ].data(), foreign_seeds_vertex_indices[ j ].size(), - j, 0, group ) ); + j, 0, communicator ) ); requests.push_back( MPI::Irecv( foreign_seeds_entity_offsets[ j ].data(), foreign_seeds_entity_offsets[ j ].size(), - j, 1, group ) ); + j, 1, communicator ) ); } // issue all async send operations @@ -79,11 +79,11 @@ exchangeGhostEntitySeeds( MPI_Comm group, requests.push_back( MPI::Isend( seeds_vertex_indices[ i ].data(), seeds_vertex_indices[ i ].size(), - i, 0, group ) ); + i, 0, communicator ) ); requests.push_back( MPI::Isend( seeds_entity_offsets[ i ].data(), seeds_entity_offsets[ i ].size(), - i, 1, group ) ); + i, 1, communicator ) ); } // wait for all communications to finish @@ -94,12 +94,12 @@ exchangeGhostEntitySeeds( MPI_Comm group, template< typename GlobalIndexType > auto -exchangeGhostIndices( MPI_Comm group, +exchangeGhostIndices( MPI_Comm communicator, const std::vector< std::vector< GlobalIndexType > >& foreign_ghost_indices, const std::vector< std::vector< GlobalIndexType > >& seeds_local_indices ) { - const int rank = MPI::GetRank( group ); - const int nproc = MPI::GetSize( group ); + const int rank = MPI::GetRank( communicator ); + const int nproc = MPI::GetSize( communicator ); // allocate arrays for the results std::vector< std::vector< GlobalIndexType > > ghost_indices; @@ -117,7 +117,7 @@ exchangeGhostIndices( MPI_Comm group, requests.push_back( MPI::Irecv( ghost_indices[ j ].data(), ghost_indices[ j ].size(), - j, 0, group ) ); + j, 0, communicator ) ); } // issue all async send operations @@ -127,7 +127,7 @@ exchangeGhostIndices( MPI_Comm group, requests.push_back( MPI::Isend( foreign_ghost_indices[ i ].data(), foreign_ghost_indices[ i ].size(), - i, 0, group ) ); + i, 0, communicator ) ); } // wait for all communications to finish @@ -155,8 +155,8 @@ distributeSubentities( DistributedMesh& mesh, bool preferHighRanks = true ) if( mesh.getGhostLevels() <= 0 ) throw std::logic_error( "There are no ghost levels on the distributed mesh." ); - const int rank = MPI::GetRank( mesh.getCommunicationGroup() ); - const int nproc = MPI::GetSize( mesh.getCommunicationGroup() ); + const int rank = MPI::GetRank( mesh.getCommunicator() ); + const int nproc = MPI::GetSize( mesh.getCommunicator() ); // 0. exchange cell data to prepare getCellOwner for use in getEntityOwner DistributedMeshSynchronizer< DistributedMesh, DistributedMesh::getMeshDimension() > cell_synchronizer; @@ -238,7 +238,7 @@ distributeSubentities( DistributedMesh& mesh, bool preferHighRanks = true ) sendbuf.setValue( localEntitiesCount ); MPI::Alltoall( sendbuf.getData(), 1, globalOffsets.getData(), 1, - mesh.getCommunicationGroup() ); + mesh.getCommunicator() ); } Algorithms::inplaceExclusiveScan( globalOffsets ); @@ -289,7 +289,7 @@ distributeSubentities( DistributedMesh& mesh, bool preferHighRanks = true ) } // 5. exchange seeds for ghost entities - const auto foreign_seeds = exchangeGhostEntitySeeds( mesh.getCommunicationGroup(), seeds_vertex_indices, seeds_entity_offsets ); + const auto foreign_seeds = exchangeGhostEntitySeeds( mesh.getCommunicator(), seeds_vertex_indices, seeds_entity_offsets ); const auto& foreign_seeds_vertex_indices = std::get< 0 >( foreign_seeds ); const auto& foreign_seeds_entity_offsets = std::get< 1 >( foreign_seeds ); @@ -374,7 +374,7 @@ distributeSubentities( DistributedMesh& mesh, bool preferHighRanks = true ) }); // 6b. exchange global ghost indices - const auto ghost_indices = exchangeGhostIndices( mesh.getCommunicationGroup(), foreign_ghost_indices, seeds_local_indices ); + const auto ghost_indices = exchangeGhostIndices( mesh.getCommunicator(), foreign_ghost_indices, seeds_local_indices ); // 6c. set the global indices of our ghost entities bool done = true; @@ -388,7 +388,7 @@ distributeSubentities( DistributedMesh& mesh, bool preferHighRanks = true ) // 6d. check if finished bool all_done = false; - MPI::Allreduce( &done, &all_done, 1, MPI_LAND, mesh.getCommunicationGroup() ); + MPI::Allreduce( &done, &all_done, 1, MPI_LAND, mesh.getCommunicator() ); if( all_done ) break; } diff --git a/src/TNL/Meshes/Readers/PVTIReader.h b/src/TNL/Meshes/Readers/PVTIReader.h index 742578dfc..2d53908e4 100644 --- a/src/TNL/Meshes/Readers/PVTIReader.h +++ b/src/TNL/Meshes/Readers/PVTIReader.h @@ -142,13 +142,13 @@ class PVTIReader throw MeshReaderError( "PVTIReader", "the file does not contain any element." ); // check that the number of pieces matches the number of MPI ranks - const int nproc = MPI::GetSize( group ); + const int nproc = MPI::GetSize( communicator ); if( (int) pieceSources.size() != nproc ) throw MeshReaderError( "PVTIReader", "the number of subdomains does not match the number of MPI ranks (" + std::to_string(pieceSources.size()) + " vs " + std::to_string(nproc) + ")." ); // read the local piece source - const int rank = MPI::GetRank( group ); + const int rank = MPI::GetRank( communicator ); localReader.setFileName( pieceSources[ rank ] ); localReader.detectMesh(); @@ -178,8 +178,8 @@ class PVTIReader public: PVTIReader() = default; - PVTIReader( const std::string& fileName, MPI_Comm group = MPI::AllGroup() ) - : XMLVTK( fileName ), group( group ) + PVTIReader( const std::string& fileName, MPI_Comm communicator = MPI_COMM_WORLD ) + : XMLVTK( fileName ), communicator( communicator ) {} virtual void detectMesh() override @@ -219,8 +219,8 @@ public: if( meshType != "Meshes::DistributedGrid" ) throw MeshReaderError( "MeshReader", "the file does not contain a distributed structured grid, it is " + meshType ); - // set the communication group - mesh.setCommunicationGroup( group ); + // set the communicator + mesh.setCommunicator( communicator ); // TODO: set the domain decomposition // mesh.setDomainDecomposition( decomposition ); @@ -250,7 +250,7 @@ public: localReader.loadMesh( localMesh ); if( localMesh != mesh.getLocalMesh() ) { std::stringstream msg; - msg << "The grid from the " << MPI::GetRank( group ) << "-th subdomain .vti file does not match the local grid of the DistributedGrid." + msg << "The grid from the " << MPI::GetRank( communicator ) << "-th subdomain .vti file does not match the local grid of the DistributedGrid." << "\n- Grid from the .vti file:\n" << localMesh << "\n- Local grid from the DistributedGrid:\n" << mesh.getLocalMesh(); throw MeshReaderError( "PVTIReader", msg.str() ); @@ -318,7 +318,7 @@ public: } protected: - MPI_Comm group; + MPI_Comm communicator; int ghostLevels = 0; int minCommonVertices = 0; diff --git a/src/TNL/Meshes/Readers/PVTUReader.h b/src/TNL/Meshes/Readers/PVTUReader.h index 99b05b53c..827f2c579 100644 --- a/src/TNL/Meshes/Readers/PVTUReader.h +++ b/src/TNL/Meshes/Readers/PVTUReader.h @@ -68,13 +68,13 @@ class PVTUReader throw MeshReaderError( "PVTUReader", "the file does not contain any element." ); // check that the number of pieces matches the number of MPI ranks - const int nproc = MPI::GetSize( group ); + const int nproc = MPI::GetSize( communicator ); if( (int) pieceSources.size() != nproc ) throw MeshReaderError( "PVTUReader", "the number of subdomains does not match the number of MPI ranks (" + std::to_string(pieceSources.size()) + " vs " + std::to_string(nproc) + ")." ); // read the local piece source - const int rank = MPI::GetRank( group ); + const int rank = MPI::GetRank( communicator ); localReader.setFileName( pieceSources[ rank ] ); localReader.detectMesh(); @@ -103,8 +103,8 @@ class PVTUReader public: PVTUReader() = default; - PVTUReader( const std::string& fileName, MPI_Comm group = MPI::AllGroup() ) - : XMLVTK( fileName ), group( group ) + PVTUReader( const std::string& fileName, MPI_Comm communicator = MPI_COMM_WORLD ) + : XMLVTK( fileName ), communicator( communicator ) {} virtual void detectMesh() override @@ -222,14 +222,14 @@ public: if( minCount == 0 ) { // split the communicator, remove the ranks which did not get a subdomain const int color = (pointsCount > 0 && cellsCount > 0) ? 0 : MPI_UNDEFINED; - const MPI_Comm subgroup = MPI::Comm_split( group, color, 0 ); + const MPI_Comm subCommunicator = MPI::Comm_split( communicator, color, 0 ); - // set the communication group - mesh.setCommunicationGroup( subgroup ); + // set the communicator + mesh.setCommunicator( subCommunicator ); } else { - // set the communication group - mesh.setCommunicationGroup( group ); + // set the communicator + mesh.setCommunicator( communicator ); } } @@ -255,7 +255,7 @@ public: } protected: - MPI_Comm group; + MPI_Comm communicator; int ghostLevels = 0; int minCommonVertices = 0; diff --git a/src/TNL/Meshes/Writers/PVTIWriter.hpp b/src/TNL/Meshes/Writers/PVTIWriter.hpp index 9fa1293fe..3922aa2f4 100644 --- a/src/TNL/Meshes/Writers/PVTIWriter.hpp +++ b/src/TNL/Meshes/Writers/PVTIWriter.hpp @@ -207,12 +207,12 @@ std::string PVTIWriter< Grid >::addPiece( const String& mainFileName, const DistributedMeshes::DistributedMesh< Grid >& distributedMesh ) { - const MPI_Comm group = distributedMesh.getCommunicationGroup(); + const MPI_Comm communicator = distributedMesh.getCommunicator(); const typename Grid::CoordinatesType& globalBegin = distributedMesh.getGlobalBegin() - distributedMesh.getLowerOverlap(); const typename Grid::CoordinatesType& globalEnd = globalBegin + distributedMesh.getLocalSize() + distributedMesh.getUpperOverlap(); // exchange globalBegin and globalEnd among the ranks - const int nproc = MPI::GetSize( group ); + const int nproc = MPI::GetSize( communicator ); typename Grid::CoordinatesType beginsForScatter[ nproc ]; typename Grid::CoordinatesType endsForScatter[ nproc ]; for( int i = 0; i < nproc; i++ ) { @@ -222,16 +222,16 @@ PVTIWriter< Grid >::addPiece( const String& mainFileName, typename Grid::CoordinatesType globalBegins[ nproc ]; typename Grid::CoordinatesType globalEnds[ nproc ]; // NOTE: exchanging general data types does not work with MPI - //MPI::Alltoall( beginsForScatter, 1, globalBegins, 1, group ); - //MPI::Alltoall( endsForScatter, 1, globalEnds, 1, group ); - MPI::Alltoall( (char*) beginsForScatter, sizeof(typename Grid::CoordinatesType), (char*) globalBegins, sizeof(typename Grid::CoordinatesType), group ); - MPI::Alltoall( (char*) endsForScatter, sizeof(typename Grid::CoordinatesType), (char*) globalEnds, sizeof(typename Grid::CoordinatesType), group ); + //MPI::Alltoall( beginsForScatter, 1, globalBegins, 1, communicator ); + //MPI::Alltoall( endsForScatter, 1, globalEnds, 1, communicator ); + MPI::Alltoall( (char*) beginsForScatter, sizeof(typename Grid::CoordinatesType), (char*) globalBegins, sizeof(typename Grid::CoordinatesType), communicator ); + MPI::Alltoall( (char*) endsForScatter, sizeof(typename Grid::CoordinatesType), (char*) globalEnds, sizeof(typename Grid::CoordinatesType), communicator ); // add pieces for all ranks, return the source for the current rank std::string source; - for( int i = 0; i < MPI::GetSize( group ); i++ ) { + for( int i = 0; i < MPI::GetSize( communicator ); i++ ) { const std::string s = addPiece( mainFileName, i, globalBegins[ i ], globalEnds[ i ] ); - if( i == MPI::GetRank( group ) ) + if( i == MPI::GetRank( communicator ) ) source = s; } return source; diff --git a/src/TNL/Meshes/Writers/PVTUWriter.h b/src/TNL/Meshes/Writers/PVTUWriter.h index 2f332d20e..92167c6fd 100644 --- a/src/TNL/Meshes/Writers/PVTUWriter.h +++ b/src/TNL/Meshes/Writers/PVTUWriter.h @@ -66,7 +66,7 @@ public: // add all pieces and return the source path for the current rank // (useful for parallel writing) std::string addPiece( const String& mainFileName, - const MPI_Comm group ); + const MPI_Comm communicator ); ~PVTUWriter(); diff --git a/src/TNL/Meshes/Writers/PVTUWriter.hpp b/src/TNL/Meshes/Writers/PVTUWriter.hpp index affee65a2..03b1e9aee 100644 --- a/src/TNL/Meshes/Writers/PVTUWriter.hpp +++ b/src/TNL/Meshes/Writers/PVTUWriter.hpp @@ -139,12 +139,12 @@ PVTUWriter< Mesh >::addPiece( const String& mainFileName, template< typename Mesh > std::string PVTUWriter< Mesh >::addPiece( const String& mainFileName, - const MPI_Comm group ) + const MPI_Comm communicator ) { std::string source; - for( int i = 0; i < MPI::GetSize( group ); i++ ) { + for( int i = 0; i < MPI::GetSize( communicator ); i++ ) { const std::string s = addPiece( mainFileName, i ); - if( i == MPI::GetRank( group ) ) + if( i == MPI::GetRank( communicator ) ) source = s; } return source; diff --git a/src/TNL/Solvers/Linear/BICGStabL_impl.h b/src/TNL/Solvers/Linear/BICGStabL_impl.h index 7c4549854..9eeb0700a 100644 --- a/src/TNL/Solvers/Linear/BICGStabL_impl.h +++ b/src/TNL/Solvers/Linear/BICGStabL_impl.h @@ -50,7 +50,7 @@ solve( ConstVectorViewType b, VectorViewType x ) RealType alpha, beta, gamma, rho_0, rho_1, omega, b_norm; // initial binding to M_tmp sets the correct local range, global size and - // communication group for distributed views + // communicator for distributed views VectorViewType r_0( M_tmp ), r_j( M_tmp ), r_i( M_tmp ), u_0( M_tmp ), Au( M_tmp ), u( M_tmp ); r_0.bind( R.getData(), size ); u_0.bind( U.getData(), size ); diff --git a/src/TNL/Solvers/Linear/GMRES_impl.h b/src/TNL/Solvers/Linear/GMRES_impl.h index 36d925f26..2dc716290 100644 --- a/src/TNL/Solvers/Linear/GMRES_impl.h +++ b/src/TNL/Solvers/Linear/GMRES_impl.h @@ -183,7 +183,7 @@ GMRES< Matrix >:: orthogonalize_CGS( const int m, const RealType normb, const RealType beta ) { // initial binding to _M_tmp sets the correct local range, global size and - // communication group for distributed views + // communicator for distributed views VectorViewType v_i( _M_tmp.getView() ); // VectorViewType v_k( _M_tmp.getView() ); @@ -279,7 +279,7 @@ GMRES< Matrix >:: orthogonalize_MGS( const int m, const RealType normb, const RealType beta ) { // initial binding to _M_tmp sets the correct local range, global size and - // communication group for distributed views + // communicator for distributed views VectorViewType v_i( _M_tmp.getView() ); VectorViewType v_k( _M_tmp.getView() ); @@ -353,7 +353,7 @@ GMRES< Matrix >:: orthogonalize_CWY( const int m, const RealType normb, const RealType beta ) { // initial binding to _M_tmp sets the correct local range, global size and - // communication group for distributed views + // communicator for distributed views VectorViewType v_i( _M_tmp.getView() ); VectorViewType y_i( _M_tmp.getView() ); @@ -510,7 +510,7 @@ hauseholder_generate( const int i, norm_yi_squared = 2 * (normz * normz + std::fabs( y_ii ) * normz); } // no-op if the problem is not distributed - MPI::Bcast( &norm_yi_squared, 1, 0, Traits::getCommunicationGroup( *this->matrix ) ); + MPI::Bcast( &norm_yi_squared, 1, 0, Traits::getCommunicator( *this->matrix ) ); // XXX: normalization is slower, but more stable // y_i *= 1.0 / std::sqrt( norm_yi_squared ); @@ -534,7 +534,7 @@ hauseholder_generate( const int i, i, aux ); // no-op if the problem is not distributed - MPI::Allreduce( aux, i, MPI_SUM, Traits::getCommunicationGroup( *this->matrix ) ); + MPI::Allreduce( aux, i, MPI_SUM, Traits::getCommunicator( *this->matrix ) ); // [T_i]_{0..i-1} = - T_{i-1} * t_i * aux for( int k = 0; k < i; k++ ) { @@ -559,7 +559,7 @@ hauseholder_apply_trunc( HostView out, HostView YL_i( &YL[ i * (restarting_max + 1) ], restarting_max + 1 ); Algorithms::MultiDeviceMemoryOperations< Devices::Host, DeviceType >::copy( YL_i.getData(), Traits::getLocalView( y_i ).getData(), YL_i.getSize() ); // no-op if the problem is not distributed - MPI::Bcast( YL_i.getData(), YL_i.getSize(), 0, Traits::getCommunicationGroup( *this->matrix ) ); + MPI::Bcast( YL_i.getData(), YL_i.getSize(), 0, Traits::getCommunicator( *this->matrix ) ); // NOTE: aux = t_i * (y_i, z) = 1 since t_i = 2 / ||y_i||^2 and // (y_i, z) = ||z_trunc||^2 + |z_i| ||z_trunc|| = ||y_i||^2 / 2 @@ -579,7 +579,7 @@ hauseholder_apply_trunc( HostView out, } // no-op if the problem is not distributed - MPI::Bcast( out.getData(), i + 1, 0, Traits::getCommunicationGroup( *this->matrix ) ); + MPI::Bcast( out.getData(), i + 1, 0, Traits::getCommunicator( *this->matrix ) ); } template< typename Matrix > @@ -634,7 +634,7 @@ hauseholder_cwy_transposed( VectorViewType z, i + 1, aux ); // no-op if the problem is not distributed - MPI::Allreduce( aux, i + 1, MPI_SUM, Traits::getCommunicationGroup( *this->matrix ) ); + MPI::Allreduce( aux, i + 1, MPI_SUM, Traits::getCommunicator( *this->matrix ) ); // aux = T_i^T * aux // Note that T_i^T is lower triangular, so we can overwrite the aux vector with the result in place diff --git a/src/TNL/Solvers/Linear/Traits.h b/src/TNL/Solvers/Linear/Traits.h index d98b78294..adeebf853 100644 --- a/src/TNL/Solvers/Linear/Traits.h +++ b/src/TNL/Solvers/Linear/Traits.h @@ -49,7 +49,7 @@ struct Traits static ConstLocalViewType getConstLocalView( ConstVectorViewType v ) { return v; } static LocalViewType getLocalView( VectorViewType v ) { return v; } - static MPI_Comm getCommunicationGroup( const Matrix& m ) { return MPI::AllGroup(); } + static MPI_Comm getCommunicator( const Matrix& m ) { return MPI_COMM_WORLD; } static void startSynchronization( VectorViewType v ) {} static void waitForSynchronization( VectorViewType v ) {} }; @@ -88,7 +88,7 @@ struct Traits< Matrices::DistributedMatrix< Matrix > > static ConstLocalViewType getConstLocalView( ConstVectorViewType v ) { return v.getConstLocalView(); } static LocalViewType getLocalView( VectorViewType v ) { return v.getLocalView(); } - static MPI_Comm getCommunicationGroup( const Matrices::DistributedMatrix< Matrix >& m ) { return m.getCommunicationGroup(); } + static MPI_Comm getCommunicator( const Matrices::DistributedMatrix< Matrix >& m ) { return m.getCommunicator(); } static void startSynchronization( VectorViewType v ) { v.startSynchronization(); } static void waitForSynchronization( VectorViewType v ) { v.waitForSynchronization(); } }; diff --git a/src/TNL/Solvers/ODE/Merson_impl.h b/src/TNL/Solvers/ODE/Merson_impl.h index 247318f33..76398123c 100644 --- a/src/TNL/Solvers/ODE/Merson_impl.h +++ b/src/TNL/Solvers/ODE/Merson_impl.h @@ -156,7 +156,7 @@ bool Merson< Problem, SolverMonitor >::solve( DofVectorPointer& _u ) { const RealType localError = max( currentTau / 3.0 * abs( 0.2 * k1 -0.9 * k3 + 0.8 * k4 -0.1 * k5 ) ); - MPI::Allreduce( &localError, &error, 1, MPI_MAX, MPI::AllGroup() ); + MPI::Allreduce( &localError, &error, 1, MPI_MAX, MPI_COMM_WORLD ); } if( adaptivity == 0.0 || error < adaptivity ) diff --git a/src/TNL/String.h b/src/TNL/String.h index b0fb6644c..bb4f02524 100644 --- a/src/TNL/String.h +++ b/src/TNL/String.h @@ -25,20 +25,20 @@ namespace TNL { * \brief Class for managing strings. * * The following example shows common use of String. - * + * * \par Example * \include StringExample.cpp * \par Output * \include StringExample.out - * + * * In addition to methods of this class, check the following related functions: - * + * * \ref convertToString - * + * * \ref operator+ - * + * * \ref mpiSend - * + * * \ref mpiReceive */ class String @@ -54,7 +54,7 @@ class String NoSkip, ///< Do not skip empty characters SkipEmpty ///< Skip empty characters. }; - + /** * \brief Default constructor. * @@ -91,7 +91,7 @@ class String * \brief Inherited constructors. */ using std::string::string; - + /** * \brief Inherited assignment operators. */ @@ -165,12 +165,12 @@ class String * Appends character \e str to this string. */ String& operator+=( char str ); - + /** * \brief This function concatenates strings and returns a newly constructed string object. */ String operator+( char str ) const; - + /** * \brief This function checks whether the given string is equal to \e str. * @@ -180,7 +180,7 @@ class String /** * \brief This function overloads \ref operator!=. - * + * * It returns \e true when the given string is NOT equal to \e str. Otherwise it returns \e true. */ bool operator!=( char str ) const; @@ -203,14 +203,14 @@ class String /** * \brief This function overloads \ref operator==. - * + * * It returns \e true when the given string is equal to \e str. Otherwise it returns \e false. */ bool operator==( const char* str ) const; /** * \brief This function overloads \ref operator!=. - * + * * It returns \e true when the given string is NOT equal to \e str. Otherwise it returns \e true. */ bool operator!=( const char* str ) const; @@ -230,17 +230,17 @@ class String * \brief This function concatenates C strings \e str and returns a newly constructed string object. */ String operator+( const std::string& str ) const; - + /** * \brief This function overloads \ref operator==. - * + * * It returns \e true when the given string is equal to \e str. Otherwise it returns \e false. */ bool operator==( const std::string& str ) const; /** * \brief This function overloads \ref operator!=. - * + * * It returns \e true when the given string is NOT equal to \e str. Otherwise it returns \e true. */ bool operator!=( const std::string& str ) const; @@ -270,7 +270,7 @@ class String /** * \brief This function overloads \ref operator!=. - * + * * It returns \e true when the given string is NOT equal to \e str. Otherwise it returns \e true. */ bool operator!=( const String& str ) const; @@ -296,7 +296,7 @@ class String * It replaces \e pattern in this string with a string \e replaceWith. * If parameter \e count is defined, the function makes replacement only count occurrences, * of the given pattern. If \e count is zero, all pattern occurrences are replaced. - * + * * @param pattern to be replaced. * @param replaceWith string the \e pattern will be replaced with. * @param count number of occurrences to be replaced. All occurrences are replaced if \e count is zero.. @@ -314,13 +314,13 @@ class String * \brief Trims/strips this string. * * Removes all 'spaces' from given string except for single 'spaces' between words. - * + * * @param strip can be used to change the character to be removed. - * + * * \par Example * \include StringExampleStrip.cpp * \par Output - * \include StringExampleStrip.out + * \include StringExampleStrip.out */ String strip( char strip = ' ' ) const; @@ -332,14 +332,14 @@ class String * anywhere in the given string, this function returns a single-element list * containing given sting. If \e skipEmpty equals \e SkipEmpty no empty substrings are * inserted into the resulting container. - * + * * @param separator is a character separating substrings in given string. - * @param skipEmpty - * + * @param skipEmpty + * * \par Example * \include StringExampleSplit.cpp * \par Output - * \include StringExampleSplit.out + * \include StringExampleSplit.out */ std::vector< String > split( const char separator = ' ', SplitSkip skipEmpty = SplitSkip::NoSkip ) const; @@ -371,8 +371,8 @@ String operator+( const std::string& string1, const String& string2 ); /** * \brief Converts \e value of type \e T to a String. - * - * \tparam T can be any type fir which operator << is defined. + * + * \tparam T can be any type fir which operator << is defined. */ template< typename T > String convertToString( const T& value ) @@ -384,7 +384,7 @@ String convertToString( const T& value ) /** * \brief Specialization of function \ref convertToString for boolean. - * + * * The boolean type is converted to 'true' or 'false'. */ template<> inline String convertToString( const bool& b ) @@ -397,21 +397,21 @@ template<> inline String convertToString( const bool& b ) /** * \brief Sends the string to the target MPI process. - * + * * @param str string to be sent * @param target target MPI process ID * @param tag MPI tag - * @param mpi_comm MPI communication group + * @param mpi_comm MPI communicator */ void mpiSend( const String& str, int target, int tag = 0, MPI_Comm mpi_comm = MPI_COMM_WORLD ); /** * \brief Receives a string from the target MPI process. - * + * * @param str says where the received string is to be saved to * @param source source MPI process ID * @param tag MPI tag - * @param mpi_comm MPI communication group + * @param mpi_comm MPI communicator */ void mpiReceive( String& str, int source, int tag = 0, MPI_Comm mpi_comm = MPI_COMM_WORLD ); diff --git a/src/TNL/TypeTraits.h b/src/TNL/TypeTraits.h index 3a199e1b2..fdb04340f 100644 --- a/src/TNL/TypeTraits.h +++ b/src/TNL/TypeTraits.h @@ -241,16 +241,16 @@ struct IsViewType {}; /** - * \brief Type trait for checking if T has getCommunicationGroup method. + * \brief Type trait for checking if T has getCommunicator method. */ template< typename T > -class HasGetCommunicationGroupMethod +class HasGetCommunicatorMethod { private: typedef char YesType[1]; typedef char NoType[2]; - template< typename C > static YesType& test( decltype(std::declval< C >().getCommunicationGroup()) ); + template< typename C > static YesType& test( decltype(std::declval< C >().getCommunicator()) ); template< typename C > static NoType& test(...); public: diff --git a/src/Tools/tnl-game-of-life.cpp b/src/Tools/tnl-game-of-life.cpp index f864a6df1..8292d5483 100644 --- a/src/Tools/tnl-game-of-life.cpp +++ b/src/Tools/tnl-game-of-life.cpp @@ -139,7 +139,7 @@ bool runGameOfLife( const Mesh& mesh ) } } Index max_count; - TNL::MPI::Allreduce( &count, &max_count, 1, MPI_MAX, mesh.getCommunicationGroup() ); + TNL::MPI::Allreduce( &count, &max_count, 1, MPI_MAX, mesh.getCommunicator() ); std::cout << "Rank " << TNL::MPI::GetRank() << ": count=" << count << ", max_count=" << max_count << std::endl; // FIXME: this is not reliable Index reference_cell = 0; @@ -206,7 +206,7 @@ bool runGameOfLife( const Mesh& mesh ) if( mesh.getGhostLevels() > 0 ) pvtu.template writePCellData< std::uint8_t >( Meshes::VTK::ghostArrayName() ); pvtu.template writePCellData< typename VectorType::RealType >( "function values" ); - const std::string subfilePath = pvtu.addPiece( mainFilePath, mesh.getCommunicationGroup() ); + const std::string subfilePath = pvtu.addPiece( mainFilePath, mesh.getCommunicator() ); // create a .vtu file for local data std::ofstream subfile( subfilePath ); @@ -283,7 +283,7 @@ bool runGameOfLife( const Mesh& mesh ) // check if finished const bool done = max( f_in ) == 0 || iteration > max_iter || f_in == f_out; - TNL::MPI::Allreduce( &done, &all_done, 1, MPI_LAND, mesh.getCommunicationGroup() ); + TNL::MPI::Allreduce( &done, &all_done, 1, MPI_LAND, mesh.getCommunicator() ); } while( all_done == false ); diff --git a/src/Tools/tnl-test-distributed-mesh.h b/src/Tools/tnl-test-distributed-mesh.h index 833a5d8ac..65e4c6348 100644 --- a/src/Tools/tnl-test-distributed-mesh.h +++ b/src/Tools/tnl-test-distributed-mesh.h @@ -266,7 +266,7 @@ bool testPropagationOverFaces( const Mesh& mesh, int max_iterations ) pvtu.template writePCellData< std::uint8_t >( Meshes::VTK::ghostArrayName() ); pvtu.template writePCellData< Real >( "function values" ); pvtu.template writePCellData< Real >( "test values" ); - const std::string subfilePath = pvtu.addPiece( mainFilePath, mesh.getCommunicationGroup() ); + const std::string subfilePath = pvtu.addPiece( mainFilePath, mesh.getCommunicator() ); // create a .vtu file for local data using Writer = Meshes::Writers::VTUWriter< LocalMesh >; @@ -391,7 +391,7 @@ bool testPropagationOverFaces( const Mesh& mesh, int max_iterations ) // check if finished const bool done = sum( f_K.getData() ) == prev_sum || iteration > max_iterations; - TNL::MPI::Allreduce( &done, &all_done, 1, MPI_LAND, mesh.getCommunicationGroup() ); + TNL::MPI::Allreduce( &done, &all_done, 1, MPI_LAND, mesh.getCommunicator() ); } while( all_done == false ); diff --git a/src/UnitTests/Algorithms/distributedScanTest.h b/src/UnitTests/Algorithms/distributedScanTest.h index 102f49dc6..1ba5277d4 100644 --- a/src/UnitTests/Algorithms/distributedScanTest.h +++ b/src/UnitTests/Algorithms/distributedScanTest.h @@ -22,7 +22,7 @@ using namespace TNL::MPI; * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. */ template< typename DistributedArray > class DistributedScanTest @@ -40,15 +40,15 @@ protected: using Synchronizer = typename Partitioner< IndexType >::template ArraySynchronizer< DeviceType >; using HostSynchronizer = typename Partitioner< IndexType >::template ArraySynchronizer< Devices::Sequential >; - const MPI_Comm group = AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; DistributedArrayType a, b, c; DistributedArrayView a_view, b_view, c_view; DistributedVectorView av_view, bv_view, cv_view; HostDistributedArrayType array_host, input_host, expected_host; - const int rank = GetRank(group); - const int nproc = GetSize(group); + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); // should be small enough to have fast tests, but large enough to test // scan with multiple CUDA grids @@ -64,15 +64,15 @@ protected: { resetWorkingArrays(); input_host = a; - input_host.setSynchronizer( std::make_shared( a.getLocalRange(), ghosts / 2, group ) ); + input_host.setSynchronizer( std::make_shared( a.getLocalRange(), ghosts / 2, communicator ) ); expected_host = input_host; } void resetWorkingArrays() { - localRange = Partitioner< IndexType >::splitRange( globalSize, group ); - a.setDistribution( localRange, ghosts, globalSize, group ); - a.setSynchronizer( std::make_shared( localRange, ghosts / 2, group ) ); + localRange = Partitioner< IndexType >::splitRange( globalSize, communicator ); + a.setDistribution( localRange, ghosts, globalSize, communicator ); + a.setSynchronizer( std::make_shared( localRange, ghosts / 2, communicator ) ); a.setValue( -1 ); c = b = a; @@ -633,8 +633,8 @@ TYPED_TEST( DistributedScanTest, distributedInplaceExclusiveScan_linear_sequence TYPED_TEST( DistributedScanTest, multiplication ) { - this->localRange = Partitioner< typename TestFixture::IndexType >::splitRange( 10, this->group ); - this->input_host.setDistribution( this->localRange, 0, 10, this->group ); + this->localRange = Partitioner< typename TestFixture::IndexType >::splitRange( 10, this->communicator ); + this->input_host.setDistribution( this->localRange, 0, 10, this->communicator ); this->input_host.setValue( 2 ); this->expected_host = this->input_host; @@ -713,8 +713,8 @@ TYPED_TEST( DistributedScanTest, empty_range ) { using IndexType = typename TestFixture::IndexType; - this->localRange = Partitioner< typename TestFixture::IndexType >::splitRange( 42, this->group ); - this->input_host.setDistribution( this->localRange, 0, 42, this->group ); + this->localRange = Partitioner< typename TestFixture::IndexType >::splitRange( 42, this->communicator ); + this->input_host.setDistribution( this->localRange, 0, 42, this->communicator ); this->input_host.setValue( 1 ); this->expected_host = this->input_host; diff --git a/src/UnitTests/Containers/DistributedArrayTest.h b/src/UnitTests/Containers/DistributedArrayTest.h index bc0edb445..2675d5ad9 100644 --- a/src/UnitTests/Containers/DistributedArrayTest.h +++ b/src/UnitTests/Containers/DistributedArrayTest.h @@ -23,7 +23,7 @@ using namespace TNL::MPI; * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. */ template< typename DistributedArray > class DistributedArrayTest @@ -39,12 +39,12 @@ protected: const int globalSize = 97; // prime number to force non-uniform distribution - const MPI_Comm group = AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; DistributedArrayType distributedArray; - const int rank = GetRank(group); - const int nproc = GetSize(group); + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); // some arbitrary even value (but must be 0 if not distributed) const int ghosts = (nproc > 1) ? 4 : 0; @@ -52,15 +52,15 @@ protected: DistributedArrayTest() { using LocalRangeType = typename DistributedArray::LocalRangeType; - const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, group ); - distributedArray.setDistribution( localRange, ghosts, globalSize, group ); + const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, communicator ); + distributedArray.setDistribution( localRange, ghosts, globalSize, communicator ); using Synchronizer = typename Partitioner< IndexType >::template ArraySynchronizer< DeviceType >; - distributedArray.setSynchronizer( std::make_shared( localRange, ghosts / 2, group ) ); + distributedArray.setSynchronizer( std::make_shared( localRange, ghosts / 2, communicator ) ); EXPECT_EQ( distributedArray.getLocalRange(), localRange ); EXPECT_EQ( distributedArray.getGhosts(), ghosts ); - EXPECT_EQ( distributedArray.getCommunicationGroup(), group ); + EXPECT_EQ( distributedArray.getCommunicator(), communicator ); } }; @@ -87,7 +87,7 @@ TYPED_TEST( DistributedArrayTest, checkSumOfLocalSizes ) { const int localSize = this->distributedArray.getLocalView().getSize(); int sumOfLocalSizes = 0; - Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->group ); + Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->communicator ); EXPECT_EQ( sumOfLocalSizes, this->globalSize ); EXPECT_EQ( this->distributedArray.getSize(), this->globalSize ); } diff --git a/src/UnitTests/Containers/VectorBinaryOperationsTest.h b/src/UnitTests/Containers/VectorBinaryOperationsTest.h index 341418f85..eff28be44 100644 --- a/src/UnitTests/Containers/VectorBinaryOperationsTest.h +++ b/src/UnitTests/Containers/VectorBinaryOperationsTest.h @@ -65,10 +65,10 @@ protected: using LeftVector = DistributedVector< LeftReal, typename Left::DeviceType, typename Left::IndexType >; using RightVector = DistributedVector< RightReal, typename Right::DeviceType, typename Right::IndexType >; - const MPI_Comm group = AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; - const int rank = GetRank(group); - const int nproc = GetSize(group); + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); // some arbitrary value (but must be 0 if not distributed) const int ghosts = (nproc > 1) ? 4 : 0; @@ -97,14 +97,14 @@ protected: #ifdef DISTRIBUTED_VECTOR using LocalRangeType = typename LeftVector::LocalRangeType; using Synchronizer = typename Partitioner< typename Left::IndexType >::template ArraySynchronizer< typename Left::DeviceType >; - const LocalRangeType localRange = Partitioner< typename Left::IndexType >::splitRange( size, group ); + const LocalRangeType localRange = Partitioner< typename Left::IndexType >::splitRange( size, communicator ); - _L1.setDistribution( localRange, ghosts, size, group ); - _L2.setDistribution( localRange, ghosts, size, group ); - _R1.setDistribution( localRange, ghosts, size, group ); - _R2.setDistribution( localRange, ghosts, size, group ); + _L1.setDistribution( localRange, ghosts, size, communicator ); + _L2.setDistribution( localRange, ghosts, size, communicator ); + _R1.setDistribution( localRange, ghosts, size, communicator ); + _R2.setDistribution( localRange, ghosts, size, communicator ); - auto synchronizer = std::make_shared( localRange, ghosts / 2, group ); + auto synchronizer = std::make_shared( localRange, ghosts / 2, communicator ); _L1.setSynchronizer( synchronizer ); _L2.setSynchronizer( synchronizer ); _R1.setSynchronizer( synchronizer ); diff --git a/src/UnitTests/Containers/VectorUnaryOperationsTest.h b/src/UnitTests/Containers/VectorUnaryOperationsTest.h index eb3c65633..c1552cc28 100644 --- a/src/UnitTests/Containers/VectorUnaryOperationsTest.h +++ b/src/UnitTests/Containers/VectorUnaryOperationsTest.h @@ -56,10 +56,10 @@ protected: template< typename Real > using Vector = DistributedVector< Real, typename VectorOrView::DeviceType, typename VectorOrView::IndexType >; - const MPI_Comm group = AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; - const int rank = GetRank(group); - const int nproc = GetSize(group); + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); // some arbitrary even value (but must be 0 if not distributed) const int ghosts = (nproc > 1) ? 4 : 0; @@ -183,14 +183,14 @@ TYPED_TEST_SUITE( VectorUnaryOperationsTest, VectorTypes ); using VectorType = typename TestFixture::VectorType; \ using VectorOrView = typename TestFixture::VectorOrView; \ using LocalRangeType = typename VectorOrView::LocalRangeType; \ - const LocalRangeType localRange = Partitioner< typename VectorOrView::IndexType >::splitRange( size, this->group ); \ + const LocalRangeType localRange = Partitioner< typename VectorOrView::IndexType >::splitRange( size, this->communicator ); \ using Synchronizer = typename Partitioner< typename VectorOrView::IndexType >::template ArraySynchronizer< typename VectorOrView::DeviceType >; \ \ VectorType _V1, _V2; \ - _V1.setDistribution( localRange, this->ghosts, size, this->group ); \ - _V2.setDistribution( localRange, this->ghosts, size, this->group ); \ + _V1.setDistribution( localRange, this->ghosts, size, this->communicator ); \ + _V2.setDistribution( localRange, this->ghosts, size, this->communicator ); \ \ - auto _synchronizer = std::make_shared( localRange, this->ghosts / 2, this->group ); \ + auto _synchronizer = std::make_shared( localRange, this->ghosts / 2, this->communicator ); \ _V1.setSynchronizer( _synchronizer ); \ _V2.setSynchronizer( _synchronizer ); \ \ @@ -207,13 +207,13 @@ TYPED_TEST_SUITE( VectorUnaryOperationsTest, VectorTypes ); using HostVector = typename VectorType::template Self< RealType, Devices::Host >; \ using HostExpectedVector = typename ExpectedVector::template Self< typename ExpectedVector::RealType, Devices::Host >; \ using LocalRangeType = typename VectorOrView::LocalRangeType; \ - const LocalRangeType localRange = Partitioner< typename VectorOrView::IndexType >::splitRange( size, this->group ); \ + const LocalRangeType localRange = Partitioner< typename VectorOrView::IndexType >::splitRange( size, this->communicator ); \ using Synchronizer = typename Partitioner< typename VectorOrView::IndexType >::template ArraySynchronizer< typename VectorOrView::DeviceType >; \ \ HostVector _V1h; \ HostExpectedVector expected_h; \ - _V1h.setDistribution( localRange, this->ghosts, size, this->group ); \ - expected_h.setDistribution( localRange, this->ghosts, size, this->group ); \ + _V1h.setDistribution( localRange, this->ghosts, size, this->communicator ); \ + expected_h.setDistribution( localRange, this->ghosts, size, this->communicator ); \ \ const double h = (double) (end - begin) / size; \ for( int i = localRange.getBegin(); i < localRange.getEnd(); i++ ) \ @@ -229,7 +229,7 @@ TYPED_TEST_SUITE( VectorUnaryOperationsTest, VectorTypes ); VectorOrView V1( _V1 ); \ ExpectedVector expected; expected = expected_h; \ \ - auto _synchronizer = std::make_shared( localRange, this->ghosts / 2, this->group ); \ + auto _synchronizer = std::make_shared( localRange, this->ghosts / 2, this->communicator ); \ _V1.setSynchronizer( _synchronizer ); \ expected.setSynchronizer( _synchronizer ); \ expected.startSynchronization(); \ diff --git a/src/UnitTests/Containers/VectorVerticalOperationsTest.h b/src/UnitTests/Containers/VectorVerticalOperationsTest.h index b201f563d..facbdf9a9 100644 --- a/src/UnitTests/Containers/VectorVerticalOperationsTest.h +++ b/src/UnitTests/Containers/VectorVerticalOperationsTest.h @@ -57,10 +57,10 @@ protected: template< typename Real > using Vector = DistributedVector< Real, typename VectorOrView::DeviceType, typename VectorOrView::IndexType >; - const MPI_Comm group = AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; - const int rank = GetRank(group); - const int nproc = GetSize(group); + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); // some arbitrary value (but must be 0 if not distributed) const int ghosts = (nproc > 1) ? 4 : 0; @@ -85,9 +85,9 @@ protected: #ifdef DISTRIBUTED_VECTOR using LocalRangeType = typename VectorOrView::LocalRangeType; using Synchronizer = typename Partitioner< typename VectorOrView::IndexType >::template ArraySynchronizer< typename VectorOrView::DeviceType >; - const LocalRangeType localRange = Partitioner< typename VectorOrView::IndexType >::splitRange( size, group ); - _V1.setDistribution( localRange, ghosts, size, group ); - _V1.setSynchronizer( std::make_shared( localRange, ghosts / 2, group ) ); + const LocalRangeType localRange = Partitioner< typename VectorOrView::IndexType >::splitRange( size, communicator ); + _V1.setDistribution( localRange, ghosts, size, communicator ); + _V1.setSynchronizer( std::make_shared( localRange, ghosts / 2, communicator ) ); #else _V1.setSize( size ); #endif diff --git a/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_1D_test.h b/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_1D_test.h index 10422b094..a6f3a6b83 100644 --- a/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_1D_test.h +++ b/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_1D_test.h @@ -23,7 +23,7 @@ using namespace TNL::Containers; * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. */ template< typename DistributedNDArray > class DistributedNDArrayOverlaps_1D_test @@ -42,23 +42,23 @@ protected: const int globalSize = 97; // prime number to force non-uniform distribution const int overlaps = __ndarray_impl::get< 0 >( typename DistributedNDArray::OverlapsType{} ); - const MPI_Comm group = TNL::MPI::AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; DistributedNDArrayType distributedNDArray; - const int rank = TNL::MPI::GetRank(group); - const int nproc = TNL::MPI::GetSize(group); + const int rank = TNL::MPI::GetRank(communicator); + const int nproc = TNL::MPI::GetSize(communicator); DistributedNDArrayOverlaps_1D_test() { using LocalRangeType = typename DistributedNDArray::LocalRangeType; - const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, group ); + const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, communicator ); distributedNDArray.setSizes( globalSize ); - distributedNDArray.template setDistribution< 0 >( localRange.getBegin(), localRange.getEnd(), group ); + distributedNDArray.template setDistribution< 0 >( localRange.getBegin(), localRange.getEnd(), communicator ); distributedNDArray.allocate(); EXPECT_EQ( distributedNDArray.template getLocalRange< 0 >(), localRange ); - EXPECT_EQ( distributedNDArray.getCommunicationGroup(), group ); + EXPECT_EQ( distributedNDArray.getCommunicator(), communicator ); } }; @@ -88,7 +88,7 @@ TYPED_TEST( DistributedNDArrayOverlaps_1D_test, checkSumOfLocalSizes ) const auto localRange = this->distributedNDArray.template getLocalRange< 0 >(); const int localSize = localRange.getEnd() - localRange.getBegin(); int sumOfLocalSizes = 0; - TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->group ); + TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->communicator ); EXPECT_EQ( sumOfLocalSizes, this->globalSize ); EXPECT_EQ( this->distributedNDArray.template getSize< 0 >(), this->globalSize ); diff --git a/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_semi1D_test.h b/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_semi1D_test.h index 1801f64ba..0aac8756d 100644 --- a/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_semi1D_test.h +++ b/src/UnitTests/Containers/ndarray/DistributedNDArrayOverlaps_semi1D_test.h @@ -25,7 +25,7 @@ static constexpr int Q = 9; * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. */ template< typename DistributedNDArray > class DistributedNDArrayOverlaps_semi1D_test @@ -44,23 +44,23 @@ protected: const int globalSize = 97; // prime number to force non-uniform distribution const int overlaps = __ndarray_impl::get< 1 >( typename DistributedNDArray::OverlapsType{} ); - const MPI_Comm group = TNL::MPI::AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; DistributedNDArrayType distributedNDArray; - const int rank = TNL::MPI::GetRank(group); - const int nproc = TNL::MPI::GetSize(group); + const int rank = TNL::MPI::GetRank(communicator); + const int nproc = TNL::MPI::GetSize(communicator); DistributedNDArrayOverlaps_semi1D_test() { using LocalRangeType = typename DistributedNDArray::LocalRangeType; - const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, group ); + const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, communicator ); distributedNDArray.setSizes( 0, globalSize, globalSize / 2 ); - distributedNDArray.template setDistribution< 1 >( localRange.getBegin(), localRange.getEnd(), group ); + distributedNDArray.template setDistribution< 1 >( localRange.getBegin(), localRange.getEnd(), communicator ); distributedNDArray.allocate(); EXPECT_EQ( distributedNDArray.template getLocalRange< 1 >(), localRange ); - EXPECT_EQ( distributedNDArray.getCommunicationGroup(), group ); + EXPECT_EQ( distributedNDArray.getCommunicator(), communicator ); } }; @@ -90,7 +90,7 @@ TYPED_TEST( DistributedNDArrayOverlaps_semi1D_test, checkSumOfLocalSizes ) const auto localRange = this->distributedNDArray.template getLocalRange< 1 >(); const int localSize = localRange.getEnd() - localRange.getBegin(); int sumOfLocalSizes = 0; - TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->group ); + TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->communicator ); EXPECT_EQ( sumOfLocalSizes, this->globalSize ); EXPECT_EQ( this->distributedNDArray.template getSize< 1 >(), this->globalSize ); diff --git a/src/UnitTests/Containers/ndarray/DistributedNDArray_1D_test.h b/src/UnitTests/Containers/ndarray/DistributedNDArray_1D_test.h index e55192971..9f3a6225e 100644 --- a/src/UnitTests/Containers/ndarray/DistributedNDArray_1D_test.h +++ b/src/UnitTests/Containers/ndarray/DistributedNDArray_1D_test.h @@ -22,7 +22,7 @@ using namespace TNL::Containers; * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. */ template< typename DistributedNDArray > class DistributedNDArray_1D_test @@ -40,23 +40,23 @@ protected: const int globalSize = 97; // prime number to force non-uniform distribution - const MPI_Comm group = TNL::MPI::AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; DistributedNDArrayType distributedNDArray; - const int rank = TNL::MPI::GetRank(group); - const int nproc = TNL::MPI::GetSize(group); + const int rank = TNL::MPI::GetRank(communicator); + const int nproc = TNL::MPI::GetSize(communicator); DistributedNDArray_1D_test() { using LocalRangeType = typename DistributedNDArray::LocalRangeType; - const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, group ); + const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, communicator ); distributedNDArray.setSizes( globalSize ); - distributedNDArray.template setDistribution< 0 >( localRange.getBegin(), localRange.getEnd(), group ); + distributedNDArray.template setDistribution< 0 >( localRange.getBegin(), localRange.getEnd(), communicator ); distributedNDArray.allocate(); EXPECT_EQ( distributedNDArray.template getLocalRange< 0 >(), localRange ); - EXPECT_EQ( distributedNDArray.getCommunicationGroup(), group ); + EXPECT_EQ( distributedNDArray.getCommunicator(), communicator ); } }; @@ -82,7 +82,7 @@ TYPED_TEST( DistributedNDArray_1D_test, checkSumOfLocalSizes ) const auto localRange = this->distributedNDArray.template getLocalRange< 0 >(); const int localSize = localRange.getEnd() - localRange.getBegin(); int sumOfLocalSizes = 0; - TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->group ); + TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->communicator ); EXPECT_EQ( sumOfLocalSizes, this->globalSize ); EXPECT_EQ( this->distributedNDArray.template getSize< 0 >(), this->globalSize ); } diff --git a/src/UnitTests/Containers/ndarray/DistributedNDArray_semi1D_test.h b/src/UnitTests/Containers/ndarray/DistributedNDArray_semi1D_test.h index e6ad0df75..986bbe7b4 100644 --- a/src/UnitTests/Containers/ndarray/DistributedNDArray_semi1D_test.h +++ b/src/UnitTests/Containers/ndarray/DistributedNDArray_semi1D_test.h @@ -24,7 +24,7 @@ static constexpr int Q = 9; * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. */ template< typename DistributedNDArray > class DistributedNDArray_semi1D_test @@ -42,23 +42,23 @@ protected: const int globalSize = 97; // prime number to force non-uniform distribution - const MPI_Comm group = TNL::MPI::AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; DistributedNDArrayType distributedNDArray; - const int rank = TNL::MPI::GetRank(group); - const int nproc = TNL::MPI::GetSize(group); + const int rank = TNL::MPI::GetRank(communicator); + const int nproc = TNL::MPI::GetSize(communicator); DistributedNDArray_semi1D_test() { using LocalRangeType = typename DistributedNDArray::LocalRangeType; - const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, group ); + const LocalRangeType localRange = Partitioner< IndexType >::splitRange( globalSize, communicator ); distributedNDArray.setSizes( 0, globalSize, globalSize / 2 ); - distributedNDArray.template setDistribution< 1 >( localRange.getBegin(), localRange.getEnd(), group ); + distributedNDArray.template setDistribution< 1 >( localRange.getBegin(), localRange.getEnd(), communicator ); distributedNDArray.allocate(); EXPECT_EQ( distributedNDArray.template getLocalRange< 1 >(), localRange ); - EXPECT_EQ( distributedNDArray.getCommunicationGroup(), group ); + EXPECT_EQ( distributedNDArray.getCommunicator(), communicator ); } }; @@ -84,7 +84,7 @@ TYPED_TEST( DistributedNDArray_semi1D_test, checkSumOfLocalSizes ) const auto localRange = this->distributedNDArray.template getLocalRange< 1 >(); const int localSize = localRange.getEnd() - localRange.getBegin(); int sumOfLocalSizes = 0; - TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->group ); + TNL::MPI::Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->communicator ); EXPECT_EQ( sumOfLocalSizes, this->globalSize ); EXPECT_EQ( this->distributedNDArray.template getSize< 1 >(), this->globalSize ); } diff --git a/src/UnitTests/Matrices/DistributedMatrixTest.h b/src/UnitTests/Matrices/DistributedMatrixTest.h index b5298cc24..ddce21e9b 100644 --- a/src/UnitTests/Matrices/DistributedMatrixTest.h +++ b/src/UnitTests/Matrices/DistributedMatrixTest.h @@ -55,7 +55,7 @@ void setMatrix( Matrix& matrix, const RowCapacities& rowCapacities ) * * - Number of processes is not limited. * - Global size is hardcoded as 97 to force non-uniform distribution. - * - Communication group is hardcoded as AllGroup -- it may be changed as needed. + * - Communicator is hardcoded as MPI_COMM_WORLD -- it may be changed as needed. * - Matrix format is hardcoded as CSR. */ template< typename DistributedMatrix > @@ -74,10 +74,10 @@ protected: const int globalSize = 97; // prime number to force non-uniform distribution - const MPI_Comm group = AllGroup(); + const MPI_Comm communicator = MPI_COMM_WORLD; - const int rank = GetRank(group); - const int nproc = GetSize(group); + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); DistributedMatrixType matrix; @@ -86,12 +86,12 @@ protected: DistributedMatrixTest() { using LocalRangeType = typename DistributedMatrix::LocalRangeType; - const LocalRangeType localRange = Containers::Partitioner< IndexType >::splitRange( globalSize, group ); - matrix.setDistribution( localRange, globalSize, globalSize, group ); - rowCapacities.setDistribution( localRange, 0, globalSize, group ); + const LocalRangeType localRange = Containers::Partitioner< IndexType >::splitRange( globalSize, communicator ); + matrix.setDistribution( localRange, globalSize, globalSize, communicator ); + rowCapacities.setDistribution( localRange, 0, globalSize, communicator ); EXPECT_EQ( matrix.getLocalRowRange(), localRange ); - EXPECT_EQ( matrix.getCommunicationGroup(), group ); + EXPECT_EQ( matrix.getCommunicator(), communicator ); setLinearSequence( rowCapacities, 1 ); } @@ -112,7 +112,7 @@ TYPED_TEST( DistributedMatrixTest, checkSumOfLocalSizes ) { const int localSize = this->matrix.getLocalMatrix().getRows(); int sumOfLocalSizes = 0; - Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->group ); + Allreduce( &localSize, &sumOfLocalSizes, 1, MPI_SUM, this->communicator ); EXPECT_EQ( sumOfLocalSizes, this->globalSize ); EXPECT_EQ( this->matrix.getRows(), this->globalSize ); } @@ -212,7 +212,7 @@ TYPED_TEST( DistributedMatrixTest, vectorProduct_globalInput ) GlobalVector inVector( this->globalSize ); inVector.setValue( 1 ); - DistributedVector outVector( this->matrix.getLocalRowRange(), 0, this->globalSize, this->matrix.getCommunicationGroup() ); + DistributedVector outVector( this->matrix.getLocalRowRange(), 0, this->globalSize, this->matrix.getCommunicator() ); this->matrix.vectorProduct( inVector, outVector ); EXPECT_EQ( outVector, this->rowCapacities ) @@ -227,9 +227,9 @@ TYPED_TEST( DistributedMatrixTest, vectorProduct_distributedInput ) this->matrix.setRowCapacities( this->rowCapacities ); setMatrix( this->matrix, this->rowCapacities ); - DistributedVector inVector( this->matrix.getLocalRowRange(), 0, this->globalSize, this->matrix.getCommunicationGroup() ); + DistributedVector inVector( this->matrix.getLocalRowRange(), 0, this->globalSize, this->matrix.getCommunicator() ); inVector.setValue( 1 ); - DistributedVector outVector( this->matrix.getLocalRowRange(), 0, this->globalSize, this->matrix.getCommunicationGroup() ); + DistributedVector outVector( this->matrix.getLocalRowRange(), 0, this->globalSize, this->matrix.getCommunicator() ); this->matrix.vectorProduct( inVector, outVector ); EXPECT_EQ( outVector, this->rowCapacities ) diff --git a/src/UnitTests/Meshes/DistributedMeshes/CutDistributedMeshFunctionTest.cpp b/src/UnitTests/Meshes/DistributedMeshes/CutDistributedMeshFunctionTest.cpp index 205b53482..55fb33e10 100644 --- a/src/UnitTests/Meshes/DistributedMeshes/CutDistributedMeshFunctionTest.cpp +++ b/src/UnitTests/Meshes/DistributedMeshes/CutDistributedMeshFunctionTest.cpp @@ -609,8 +609,8 @@ TEST(CutDistributedMeshFunction, 3D_2_Save) // DistributedGridIO,MpiIO> ::save(TEST_FILE_NAME, cutMeshFunction ); //save globalgrid for debug render - MPI_Comm group=cutDistributedGrid.getCommunicationGroup(); - if(TNL::MPI::GetRank(group)==0) + MPI_Comm communicator=cutDistributedGrid.getCommunicator(); + if(TNL::MPI::GetRank(communicator)==0) { // FIXME: save was removed from Grid (but this is just for debugging...) // File meshFile; diff --git a/src/UnitTests/Meshes/DistributedMeshes/DistributedMeshTest.h b/src/UnitTests/Meshes/DistributedMeshes/DistributedMeshTest.h index f09f22287..18ed6b4bf 100644 --- a/src/UnitTests/Meshes/DistributedMeshes/DistributedMeshTest.h +++ b/src/UnitTests/Meshes/DistributedMeshes/DistributedMeshTest.h @@ -50,11 +50,11 @@ struct GridDistributor< TNL::Meshes::Grid< 2, Real, Device, Index > > GridDistributor() = delete; - GridDistributor( CoordinatesType rank_sizes, MPI_Comm group ) - : rank(TNL::MPI::GetRank(group)), - nproc(TNL::MPI::GetSize(group)), + GridDistributor( CoordinatesType rank_sizes, MPI_Comm communicator ) + : rank(TNL::MPI::GetRank(communicator)), + nproc(TNL::MPI::GetSize(communicator)), rank_sizes(rank_sizes), - group(group) + communicator(communicator) {} void decompose( const GridType& grid, @@ -258,8 +258,8 @@ struct GridDistributor< TNL::Meshes::Grid< 2, Real, Device, Index > > cells_indices[ pair.second ] = cell_new_global_indices.at( pair.first ); } - // set the communication group - mesh.setCommunicationGroup( group ); + // set the communicator + mesh.setCommunicator( communicator ); if( overlap > 0 ) { // distribute faces @@ -324,7 +324,7 @@ struct GridDistributor< TNL::Meshes::Grid< 2, Real, Device, Index > > // input parameters int rank, nproc; CoordinatesType rank_sizes; - MPI_Comm group; + MPI_Comm communicator; // output attributes (byproduct of the decomposition, useful for testing) CoordinatesType rank_coordinates, local_size, vert_begin, vert_end, cell_begin, cell_end; Index verticesCount, cellsCount, localVerticesCount, localCellsCount; @@ -337,7 +337,7 @@ void validateMesh( const Mesh& mesh, const Distributor& distributor, int ghostLe using Device = typename Mesh::DeviceType; // check basic interface - EXPECT_EQ( mesh.getCommunicationGroup(), TNL::MPI::AllGroup() ); + EXPECT_EQ( mesh.getCommunicator(), MPI_COMM_WORLD ); EXPECT_EQ( mesh.getGhostLevels(), ghostLevels ); if( ghostLevels > 0 ) { EXPECT_EQ( mesh.template getGlobalIndices< 0 >().getSize(), mesh.getLocalMesh().template getEntitiesCount< 0 >() ); @@ -396,10 +396,10 @@ void validateMesh( const Mesh& mesh, const Distributor& distributor, int ghostLe cell_sendbuf.setValue( distributor.localCellsCount ); TNL::MPI::Alltoall( vert_sendbuf.getData(), 1, vert_offsets.getData(), 1, - distributor.group ); + distributor.communicator ); TNL::MPI::Alltoall( cell_sendbuf.getData(), 1, cell_offsets.getData(), 1, - distributor.group ); + distributor.communicator ); } vert_offsets.setElement( distributor.nproc, 0 ); cell_offsets.setElement( distributor.nproc, 0 ); @@ -702,7 +702,7 @@ TEST( DistributedMeshTest, 2D_ghostLevel0 ) const int nproc = TNL::MPI::GetSize(); grid.setDimensions( nproc, nproc ); Mesh mesh; - GridDistributor< GridType > distributor( std::sqrt(nproc), TNL::MPI::AllGroup() ); + GridDistributor< GridType > distributor( std::sqrt(nproc), MPI_COMM_WORLD ); const int ghostLevels = 0; distributor.decompose( grid, mesh, ghostLevels ); validateMesh( mesh, distributor, ghostLevels ); @@ -719,7 +719,7 @@ TEST( DistributedMeshTest, 2D_ghostLevel1 ) const int nproc = TNL::MPI::GetSize(); grid.setDimensions( nproc, nproc ); Mesh mesh; - GridDistributor< GridType > distributor( std::sqrt(nproc), TNL::MPI::AllGroup() ); + GridDistributor< GridType > distributor( std::sqrt(nproc), MPI_COMM_WORLD ); const int ghostLevels = 1; distributor.decompose( grid, mesh, ghostLevels ); validateMesh( mesh, distributor, ghostLevels ); @@ -737,7 +737,7 @@ TEST( DistributedMeshTest, 2D_ghostLevel2 ) const int nproc = TNL::MPI::GetSize(); grid.setDimensions( nproc, nproc ); Mesh mesh; - GridDistributor< GridType > distributor( std::sqrt(nproc), TNL::MPI::AllGroup() ); + GridDistributor< GridType > distributor( std::sqrt(nproc), MPI_COMM_WORLD ); const int ghostLevels = 2; distributor.decompose( grid, mesh, ghostLevels ); validateMesh( mesh, distributor, ghostLevels ); @@ -755,7 +755,7 @@ TEST( DistributedMeshTest, PVTUWriterReader ) const int nproc = TNL::MPI::GetSize(); grid.setDimensions( nproc, nproc ); Mesh mesh; - GridDistributor< GridType > distributor( std::sqrt(nproc), TNL::MPI::AllGroup() ); + GridDistributor< GridType > distributor( std::sqrt(nproc), MPI_COMM_WORLD ); const int ghostLevels = 2; distributor.decompose( grid, mesh, ghostLevels ); @@ -776,7 +776,7 @@ TEST( DistributedMeshTest, PVTUWriterReader ) pvtu.template writePCellData< std::uint8_t >( Meshes::VTK::ghostArrayName() ); pvtu.template writePCellData< typename Mesh::GlobalIndexType >( "GlobalIndex" ); } - subfilePath = pvtu.addPiece( mainFilePath, mesh.getCommunicationGroup() ); + subfilePath = pvtu.addPiece( mainFilePath, mesh.getCommunicator() ); // create a .vtu file for local data using Writer = Meshes::Writers::VTUWriter< LocalMesh >; -- GitLab From 8c2688a6cdf72f8af696a25a5917f674c271f54d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Thu, 2 Sep 2021 22:01:26 +0200 Subject: [PATCH 05/10] MPI wrappers: added missing default communicator --- src/TNL/MPI/Wrappers.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/TNL/MPI/Wrappers.h b/src/TNL/MPI/Wrappers.h index dfaf4e4f4..8a4cb9bdb 100644 --- a/src/TNL/MPI/Wrappers.h +++ b/src/TNL/MPI/Wrappers.h @@ -296,7 +296,7 @@ void Allreduce( const T* data, T* reduced_data, int count, const MPI_Op& op, - MPI_Comm communicator) + MPI_Comm communicator = MPI_COMM_WORLD ) { TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Allreduce cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI @@ -313,7 +313,7 @@ template< typename T > void Allreduce( T* data, int count, const MPI_Op& op, - MPI_Comm communicator) + MPI_Comm communicator = MPI_COMM_WORLD ) { TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Allreduce cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI @@ -329,7 +329,7 @@ void Reduce( const T* data, int count, const MPI_Op& op, int root, - MPI_Comm communicator) + MPI_Comm communicator = MPI_COMM_WORLD ) { TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Reduce cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI @@ -340,7 +340,10 @@ void Reduce( const T* data, } template< typename T > -void Bcast( T* data, int count, int root, MPI_Comm communicator) +void Bcast( T* data, + int count, + int root, + MPI_Comm communicator = MPI_COMM_WORLD ) { TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Bcast cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI @@ -354,7 +357,7 @@ void Alltoall( const T* sendData, int sendCount, T* receiveData, int receiveCount, - MPI_Comm communicator ) + MPI_Comm communicator = MPI_COMM_WORLD ) { TNL_ASSERT_NE( communicator, MPI_COMM_NULL, "Alltoall cannot be called with MPI_COMM_NULL" ); #ifdef HAVE_MPI -- GitLab From 4883203c574645391289b27fdfdc5a0456ee19e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Thu, 2 Sep 2021 13:09:34 +0200 Subject: [PATCH 06/10] Refactored enable_if_type into the main TypeTraits.h file --- src/TNL/Containers/Expressions/TypeTraits.h | 3 --- .../DistributedMeshes/DistributedMeshSynchronizer.h | 2 +- src/TNL/Meshes/Writers/VerticesPerEntity.h | 9 +-------- src/TNL/TypeTraits.h | 6 ++++++ 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/TNL/Containers/Expressions/TypeTraits.h b/src/TNL/Containers/Expressions/TypeTraits.h index 9af3ef818..933ec54c8 100644 --- a/src/TNL/Containers/Expressions/TypeTraits.h +++ b/src/TNL/Containers/Expressions/TypeTraits.h @@ -92,9 +92,6 @@ using EnableIfDistributedBinaryExpression_t = std::enable_if_t< // helper trait class for recursively turning expression template classes into compatible vectors -template -struct enable_if_type { typedef R type; }; - template< typename R, typename Enable = void > struct RemoveExpressionTemplate { diff --git a/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h b/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h index 4a75d8ee0..63b302840 100644 --- a/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h +++ b/src/TNL/Meshes/DistributedMeshes/DistributedMeshSynchronizer.h @@ -28,7 +28,7 @@ struct HasMeshType {}; template< typename T > -struct HasMeshType< T, typename Containers::Expressions::enable_if_type< typename T::MeshType >::type > +struct HasMeshType< T, typename enable_if_type< typename T::MeshType >::type > : public std::true_type {}; diff --git a/src/TNL/Meshes/Writers/VerticesPerEntity.h b/src/TNL/Meshes/Writers/VerticesPerEntity.h index 5cefb37c7..5ae5e356e 100644 --- a/src/TNL/Meshes/Writers/VerticesPerEntity.h +++ b/src/TNL/Meshes/Writers/VerticesPerEntity.h @@ -12,8 +12,7 @@ #pragma once -#include - +#include #include namespace TNL { @@ -22,12 +21,6 @@ namespace Writers { namespace details { -template< typename T, typename R = void > -struct enable_if_type -{ - using type = R; -}; - template< typename T, typename Enable = void > struct has_entity_topology : std::false_type {}; diff --git a/src/TNL/TypeTraits.h b/src/TNL/TypeTraits.h index fdb04340f..f3d1b88c0 100644 --- a/src/TNL/TypeTraits.h +++ b/src/TNL/TypeTraits.h @@ -15,6 +15,12 @@ namespace TNL { +template< typename T, typename R = void > +struct enable_if_type +{ + using type = R; +}; + /** * \brief Type trait for checking if T has getArrayData method. */ -- GitLab From 9d33be3e9bc399dd79409f0aab06c2aff7701b04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Thu, 2 Sep 2021 13:33:30 +0200 Subject: [PATCH 07/10] Refactored IsViewType to work even for types that do not have a ViewType member type --- src/TNL/TypeTraits.h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/TNL/TypeTraits.h b/src/TNL/TypeTraits.h index f3d1b88c0..66179440b 100644 --- a/src/TNL/TypeTraits.h +++ b/src/TNL/TypeTraits.h @@ -242,9 +242,17 @@ struct IsStaticArrayType */ template< typename T > struct IsViewType -: public std::integral_constant< bool, - std::is_same< typename std::decay_t::ViewType, T >::value > -{}; +{ +private: + template< typename C > static constexpr auto test(C) + -> std::integral_constant< bool, + std::is_same< typename C::ViewType, C >::value + >; + static constexpr std::false_type test(...); + +public: + static constexpr bool value = decltype( test(std::decay_t{}) )::value; +}; /** * \brief Type trait for checking if T has getCommunicator method. -- GitLab From e059f1a144aa79cc5fd40211383a0dbebc7bae61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Thu, 2 Sep 2021 14:54:28 +0200 Subject: [PATCH 08/10] Replaced send/receive for Array(,View) and mpiSend/mpiReceive for String with a general implementation in the MPI namespace Also added analogous functions: MPI::sendrecv, MPI::bcast. --- src/TNL/Containers/Array.h | 7 -- src/TNL/Containers/Array.hpp | 21 ----- src/TNL/Containers/ArrayView.h | 5 -- src/TNL/Containers/ArrayView.hpp | 12 --- src/TNL/MPI/Print.h | 10 +-- src/TNL/MPI/Utils.h | 123 ++++++++++++++++++++++++++ src/TNL/String.h | 45 +++------- src/TNL/String.hpp | 57 +++---------- src/UnitTests/CMakeLists.txt | 1 + src/UnitTests/MPI/CMakeLists.txt | 14 +++ src/UnitTests/MPI/MPIUtilsTest.cpp | 133 +++++++++++++++++++++++++++++ 11 files changed, 298 insertions(+), 130 deletions(-) create mode 100644 src/UnitTests/MPI/CMakeLists.txt create mode 100644 src/UnitTests/MPI/MPIUtilsTest.cpp diff --git a/src/TNL/Containers/Array.h b/src/TNL/Containers/Array.h index 8e76ea9f3..bccc09650 100644 --- a/src/TNL/Containers/Array.h +++ b/src/TNL/Containers/Array.h @@ -793,13 +793,6 @@ File& operator>>( File& file, Array< Value, Device, Index, Allocator >& array ); template< typename Value, typename Device, typename Index, typename Allocator > File& operator>>( File&& file, Array< Value, Device, Index, Allocator >& array ); -template< typename Value, typename Device, typename Index, typename Allocator > -void send( const Array< Value, Device, Index, Allocator >& array, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD ); - -template< typename Value, typename Device, typename Index, typename Allocator > -void receive( Array< Value, Device, Index, Allocator >& array, int src, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD ); - - } // namespace Containers } // namespace TNL diff --git a/src/TNL/Containers/Array.hpp b/src/TNL/Containers/Array.hpp index e01566e50..180e94980 100644 --- a/src/TNL/Containers/Array.hpp +++ b/src/TNL/Containers/Array.hpp @@ -845,26 +845,5 @@ File& operator>>( File&& file, Array< Value, Device, Index, Allocator >& array ) return f >> array; } -template< typename Value, typename Device, typename Index, typename Allocator > -void send( const Array< Value, Device, Index, Allocator >& array, int dest, int tag, MPI_Comm comm ) -{ - send( array.getConstView(), dest, tag, comm ); -} - -template< typename Value, typename Device, typename Index, typename Allocator > -void receive( Array< Value, Device, Index, Allocator >& array, int src, int tag, MPI_Comm comm ) -{ -#ifdef HAVE_MPI - TNL_ASSERT_TRUE( false, "Does not work" ); - MPI_Status status; - Index size; - MPI_Recv( ( void* ) size, 1, MPI::getDataType< Index >(), src, tag, comm, &status ); - std::cerr << "Size = " << size << std::endl; - array.setSize( size ); - MPI_Recv( ( void* ) array.getData(), size * sizeof( Value ), MPI_BYTE, src, tag, comm, &status ); -#endif -} - - } // namespace Containers } // namespace TNL diff --git a/src/TNL/Containers/ArrayView.h b/src/TNL/Containers/ArrayView.h index ad032491d..8431bd96a 100644 --- a/src/TNL/Containers/ArrayView.h +++ b/src/TNL/Containers/ArrayView.h @@ -18,7 +18,6 @@ #include #include #include -#include namespace TNL { namespace Containers { @@ -596,10 +595,6 @@ File& operator>>( File& file, ArrayView< Value, Device, Index > view ); template< typename Value, typename Device, typename Index > File& operator>>( File&& file, ArrayView< Value, Device, Index > view ); -template< typename Value, typename Device, typename Index > -void send( const ArrayView< Value, Device, Index >& view, int dest, int tag = 0, MPI_Comm comm = MPI_COMM_WORLD ); - - } // namespace Containers } // namespace TNL diff --git a/src/TNL/Containers/ArrayView.hpp b/src/TNL/Containers/ArrayView.hpp index 7771f7dc6..402c920e0 100644 --- a/src/TNL/Containers/ArrayView.hpp +++ b/src/TNL/Containers/ArrayView.hpp @@ -472,17 +472,5 @@ File& operator>>( File&& file, ArrayView< Value, Device, Index > view ) return f >> view; } -template< typename Value, typename Device, typename Index > -void send( const ArrayView< Value, Device, Index >& view, int dest, int tag, MPI_Comm comm ) -{ -#ifdef HAVE_MPI - TNL_ASSERT_TRUE( false, "Does not work" ); - auto size = view.getSize(); - MPI_Send( ( const void* ) size, 1, MPI::getDataType< Index >(), dest, tag, comm ); - MPI_Send( ( const void* ) view.getData(), view.getSize() * sizeof( Value ), MPI_BYTE, dest, tag, comm ); -#endif -} - - } // namespace Containers } // namespace TNL diff --git a/src/TNL/MPI/Print.h b/src/TNL/MPI/Print.h index 5cd4819a2..631c3aaaf 100644 --- a/src/TNL/MPI/Print.h +++ b/src/TNL/MPI/Print.h @@ -14,7 +14,7 @@ #include #include -#include +#include #ifdef HAVE_MPI #define TNL_MPI_PRINT( message ) \ @@ -28,7 +28,7 @@ else __tnl_mpi_print_stream_ << "Node " << TNL::MPI::GetRank() << " of " << TNL::MPI::GetSize() << " : " \ << message << std::endl; \ TNL::String __tnl_mpi_print_string_( __tnl_mpi_print_stream_.str() ); \ - mpiSend( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \ + TNL::MPI::send( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \ } \ else \ { \ @@ -36,7 +36,7 @@ else for( int __tnl_mpi_print_j = 1; __tnl_mpi_print_j < TNL::MPI::GetSize(); __tnl_mpi_print_j++ ) \ { \ TNL::String __tnl_mpi_print_string_; \ - mpiReceive( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \ + TNL::MPI::recv( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \ std::cerr << __tnl_mpi_print_string_; \ } \ } \ @@ -79,7 +79,7 @@ else __tnl_mpi_print_stream_ << "Node " << TNL::MPI::GetRank() << " of " << TNL::MPI::GetSize() << " : " \ << message << std::endl; \ TNL::String __tnl_mpi_print_string_( __tnl_mpi_print_stream_.str() ); \ - mpiSend( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \ + TNL::MPI::send( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \ } \ } \ else \ @@ -93,7 +93,7 @@ else if( __tnl_mpi_print_cond ) \ { \ TNL::String __tnl_mpi_print_string_; \ - mpiReceive( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \ + TNL::MPI::recv( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \ std::cerr << __tnl_mpi_print_string_; \ } \ } \ diff --git a/src/TNL/MPI/Utils.h b/src/TNL/MPI/Utils.h index 2cc848124..85fbaae1c 100644 --- a/src/TNL/MPI/Utils.h +++ b/src/TNL/MPI/Utils.h @@ -11,6 +11,7 @@ #pragma once #include +#include #include "Wrappers.h" @@ -97,5 +98,127 @@ T reduce( T value, const MPI_Op& op, MPI_Comm communicator = MPI_COMM_WORLD ) return value; } +/** + * \brief Send data from an array (or array view or a string) to a different + * rank. + * + * The destination rank must call \ref recv with a corresponding data structure + * to receive the data. + */ +template< typename Array > +void send( const Array& array, int dest, int tag, MPI_Comm communicator = MPI_COMM_WORLD ) +{ + const auto size = array.getSize(); + MPI::Send( &size, 1, dest, tag, communicator ); + MPI::Send( array.getData(), array.getSize(), dest, tag, communicator ); +} + +/** + * \brief Receive data into an array (or a string) from a different rank. + * + * The data must be coming from a rank that called \ref send on a corresponding + * data structure. + */ +template< typename Array > +std::enable_if_t< ! IsViewType< Array >::value > +recv( Array& array, int src, int tag, MPI_Comm communicator = MPI_COMM_WORLD ) +{ + using Index = decltype(array.getSize()); + Index size; + MPI::Recv( &size, 1, src, tag, communicator ); + array.setSize( size ); + MPI::Recv( array.getData(), size, src, tag, communicator ); +} + +/** + * \brief Receive data into an array view from a different rank. + * + * The data must be coming from a rank that called \ref send on a corresponding + * data structure. + * + * Since views are not resizable, the size of the incoming data must match the + * array view size, otherwise \ref std::runtime_error is thrown. + */ +template< typename Array > +std::enable_if_t< IsViewType< Array >::value > +recv( Array& view, int src, int tag, MPI_Comm communicator = MPI_COMM_WORLD ) +{ + using Index = decltype(view.getSize()); + Index size; + MPI::Recv( &size, 1, src, tag, communicator ); + if( size != view.getSize() ) + throw std::runtime_error( "MPI::recv error: The received size (" + std::to_string(size) + ") does not match " + "the array view size (" + std::to_string(view.getSize()) + ")" ); + MPI::Recv( view.getData(), size, src, tag, communicator ); +} + +/** + * \brief Send and receive data from/into an array (or a string) to/from a + * different rank. + */ +template< typename SendArray, typename RecvArray > +std::enable_if_t< ! IsViewType< RecvArray >::value > +sendrecv( const SendArray& sendArray, + int dest, + int sendTag, + RecvArray& recvArray, + int src, + int recvTag, + MPI_Comm communicator = MPI_COMM_WORLD ) +{ + using SendIndex = decltype(sendArray.getSize()); + using RecvIndex = decltype(recvArray.getSize()); + + const SendIndex sendSize = sendArray.getSize(); + RecvIndex recvSize; + MPI::Sendrecv( &sendSize, 1, dest, sendTag, &recvSize, 1, src, recvTag, communicator ); + recvArray.setSize( recvSize ); + MPI::Sendrecv( sendArray.getData(), sendArray.getSize(), dest, sendTag, recvArray.getData(), recvArray.getSize(), src, recvTag, communicator ); +} + +/** + * \brief Send and receive data from an array (or an array view) into an array + * view to/from a different rank. + * + * Since views are not resizable, the size of the incoming data must match the + * array view size, otherwise \ref std::runtime_error is thrown. + */ +template< typename SendArray, typename RecvArray > +std::enable_if_t< IsViewType< RecvArray >::value > +sendrecv( const SendArray& sendArray, + int dest, + int sendTag, + RecvArray& recvArray, + int src, + int recvTag, + MPI_Comm communicator = MPI_COMM_WORLD ) +{ + using SendIndex = decltype(sendArray.getSize()); + using RecvIndex = decltype(recvArray.getSize()); + + const SendIndex sendSize = sendArray.getSize(); + RecvIndex recvSize; + MPI::Sendrecv( &sendSize, 1, dest, sendTag, &recvSize, 1, src, recvTag, communicator ); + if( recvSize != recvArray.getSize() ) + throw std::runtime_error( "MPI::sendrecv error: The received size (" + std::to_string(recvSize) + ") does not match " + "the array view size (" + std::to_string(recvArray.getSize()) + ")" ); + MPI::Sendrecv( sendArray.getData(), sendArray.getSize(), dest, sendTag, recvArray.getData(), recvArray.getSize(), src, recvTag, communicator ); +} + +/** + * \brief Broadcast an array (or a string). + */ +template< typename Array > +std::enable_if_t< ! IsViewType< Array >::value > +bcast( Array& array, + int root, + MPI_Comm communicator = MPI_COMM_WORLD ) +{ + auto size = array.getSize(); + MPI::Bcast( &size, 1, root, communicator ); + array.setSize( size ); + MPI::Bcast( array.getData(), size, root, communicator ); +} + } // namespace MPI } // namespace TNL diff --git a/src/TNL/String.h b/src/TNL/String.h index bb4f02524..3e1e7ff69 100644 --- a/src/TNL/String.h +++ b/src/TNL/String.h @@ -15,10 +15,6 @@ #include #include -#ifdef HAVE_MPI -#include -#endif - namespace TNL { /** @@ -36,10 +32,6 @@ namespace TNL { * \ref convertToString * * \ref operator+ - * - * \ref mpiSend - * - * \ref mpiReceive */ class String : public std::string @@ -138,6 +130,16 @@ class String */ const char* getString() const; + /** + * \brief Returns pointer to data. Alias of \ref std::string::data. + */ + const char* getData() const; + + /** + * \brief Returns pointer to data. Alias of \ref std::string::data. + */ + char* getData(); + /** * \brief Operator for accessing particular chars of the string. * @@ -393,33 +395,6 @@ template<> inline String convertToString( const bool& b ) return "false"; } -#ifdef HAVE_MPI - -/** - * \brief Sends the string to the target MPI process. - * - * @param str string to be sent - * @param target target MPI process ID - * @param tag MPI tag - * @param mpi_comm MPI communicator - */ -void mpiSend( const String& str, int target, int tag = 0, MPI_Comm mpi_comm = MPI_COMM_WORLD ); - -/** - * \brief Receives a string from the target MPI process. - * - * @param str says where the received string is to be saved to - * @param source source MPI process ID - * @param tag MPI tag - * @param mpi_comm MPI communicator - */ -void mpiReceive( String& str, int source, int tag = 0, MPI_Comm mpi_comm = MPI_COMM_WORLD ); - -//! Broadcast to other nodes in MPI cluster -// void MPIBcast( String& str, int root, MPI_Comm mpi_comm = MPI_COMM_WORLD ); - -#endif - } // namespace TNL #include diff --git a/src/TNL/String.hpp b/src/TNL/String.hpp index b55c92f89..4ab788f91 100644 --- a/src/TNL/String.hpp +++ b/src/TNL/String.hpp @@ -1,5 +1,5 @@ /*************************************************************************** - String_impl.h - description + String.hpp - description ------------------- begin : 2004/04/10 16:36 copyright : (C) 2004 by Tomas Oberhuber @@ -13,9 +13,6 @@ #include #include #include -#ifdef HAVE_MPI - #include -#endif namespace TNL { @@ -45,6 +42,17 @@ inline const char* String::getString() const return this->c_str(); } +inline const char* String::getData() const +{ + return data(); +} + +inline char* String::getData() +{ + // NOTE: std::string::data is non-const only since C++17 + return const_cast< char* >( data() ); +} + inline const char& String::operator[]( int i ) const { TNL_ASSERT_GE( i, 0, "Element index must be non-negative." ); @@ -254,45 +262,4 @@ inline String operator+( const std::string& string1, const String& string2 ) return String( string1 ) + string2; } -#ifdef HAVE_MPI -inline void mpiSend( const String& str, int target, int tag, MPI_Comm mpi_comm ) -{ - int size = str.getSize(); - MPI_Send( &size, 1, MPI_INT, target, tag, mpi_comm ); - MPI_Send( const_cast< void* >( ( const void* ) str.getString() ), str.length(), MPI_CHAR, target, tag, mpi_comm ); -} - -inline void mpiReceive( String& str, int source, int tag, MPI_Comm mpi_comm ) -{ - int size; - MPI_Status status; - MPI_Recv( &size, 1, MPI_INT, source, tag, mpi_comm, &status ); - str.setSize( size ); - MPI_Recv( const_cast< void* >( ( const void* ) str.data() ), size, MPI_CHAR, source, tag, mpi_comm, &status ); -} - -/* -inline void String :: MPIBcast( int root, MPI_Comm comm ) -{ - int iproc; - MPI_Comm_rank( MPI_COMM_WORLD, &iproc ); - TNL_ASSERT( string, ); - int len = strlen( string ); - MPI_Bcast( &len, 1, MPI_INT, root, comm ); - if( iproc != root ) - { - if( length < len ) - { - delete[] string; - length = STRING_PAGE * ( len / STRING_PAGE + 1 ); - string = new char[ length ]; - } - } - - MPI_Bcast( string, len + 1, MPI_CHAR, root, comm ); -} -*/ -#endif - - } // namespace TNL diff --git a/src/UnitTests/CMakeLists.txt b/src/UnitTests/CMakeLists.txt index 03c521cec..e2ea5edee 100644 --- a/src/UnitTests/CMakeLists.txt +++ b/src/UnitTests/CMakeLists.txt @@ -4,6 +4,7 @@ ADD_SUBDIRECTORY( Containers ) ADD_SUBDIRECTORY( Functions ) # Matrices are included from src/CMakeLists.txt ADD_SUBDIRECTORY( Meshes ) +ADD_SUBDIRECTORY( MPI ) ADD_SUBDIRECTORY( Pointers ) set( CPP_TESTS AssertTest diff --git a/src/UnitTests/MPI/CMakeLists.txt b/src/UnitTests/MPI/CMakeLists.txt new file mode 100644 index 000000000..5ed7534bd --- /dev/null +++ b/src/UnitTests/MPI/CMakeLists.txt @@ -0,0 +1,14 @@ +set( CPP_TESTS MPIUtilsTest ) + +if( ${BUILD_MPI} ) +foreach( target IN ITEMS ${CPP_TESTS} ) + add_executable( ${target} ${target}.cpp ) + target_compile_options( ${target} PRIVATE ${CXX_TESTS_FLAGS} ) + target_link_libraries( ${target} ${GTEST_BOTH_LIBRARIES} ) + + foreach( np IN ITEMS 2 3 4 ) + set( mpirun_parameters -np ${np} -H localhost:${np} ) + add_test( NAME ${target}_np${np} COMMAND mpirun ${mpirun_parameters} "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${target}${CMAKE_EXECUTABLE_SUFFIX}") + endforeach() +endforeach() +endif() diff --git a/src/UnitTests/MPI/MPIUtilsTest.cpp b/src/UnitTests/MPI/MPIUtilsTest.cpp new file mode 100644 index 000000000..aedc2078b --- /dev/null +++ b/src/UnitTests/MPI/MPIUtilsTest.cpp @@ -0,0 +1,133 @@ +#ifdef HAVE_GTEST +#include + +#include +#include +#include + +#include "../Containers/VectorHelperFunctions.h" + +using namespace TNL; +using namespace TNL::Containers; +using namespace TNL::MPI; + +template< typename T1, typename T2 > +struct Pair +{ + using Left = T1; + using Right = T2; +}; + +template< typename Pair > +class ArrayCommunicationTest +: public ::testing::Test +{ +protected: + using SrcArrayType = typename Pair::Left; + using DestArrayType = typename Pair::Right; + using ValueType = std::decay_t< decltype(SrcArrayType{}[0]) >; + + const MPI_Comm communicator = MPI_COMM_WORLD; + + // source array or view + SrcArrayType srcArray; + // source array + DestArrayType _src; + // destination array + DestArrayType destArray; + + const int rank = GetRank(communicator); + const int nproc = GetSize(communicator); + + ArrayCommunicationTest() + { + _src.setSize( rank ); + for( int i = 0; i < rank; i++ ) + _src[ i ] = ValueType( rank ); + bindOrAssign( srcArray, _src ); + EXPECT_EQ( srcArray.getSize(), rank ); + } +}; + +// types for which ArrayCommunicationTest is instantiated +using ArrayTypes = ::testing::Types< + Pair< Array< int >, Array< int > >, + Pair< ArrayView< int >, Array< int > >, + Pair< String, String > +>; + +TYPED_TEST_SUITE( ArrayCommunicationTest, ArrayTypes ); + + +TYPED_TEST( ArrayCommunicationTest, send_recv ) +{ + using DestArrayType = typename TestFixture::DestArrayType; + using ValueType = typename TestFixture::ValueType; + + const int src = (this->rank - 1 + this->nproc) % this->nproc; + const int dest = (this->rank + 1 + this->nproc) % this->nproc; + + // NOTE: condition avoids a deadlock due to blocking communication + if( this->rank % 2 ) { + send( this->srcArray, dest, 0, this->communicator ); + recv( this->destArray, src, 0, this->communicator ); + } + else { + recv( this->destArray, src, 0, this->communicator ); + send( this->srcArray, dest, 0, this->communicator ); + } + + EXPECT_EQ( this->destArray.getSize(), src ); + DestArrayType expected; + expected.setSize( src ); + for( int i = 0; i < src; i++ ) + expected[ i ] = ValueType( src ); + EXPECT_EQ( this->destArray, expected ); +} + +TYPED_TEST( ArrayCommunicationTest, sendrecv ) +{ + using DestArrayType = typename TestFixture::DestArrayType; + using ValueType = typename TestFixture::ValueType; + + const int src = (this->rank - 1 + this->nproc) % this->nproc; + const int dest = (this->rank + 1 + this->nproc) % this->nproc; + + sendrecv( this->srcArray, dest, 0, this->destArray, src, 0, this->communicator ); + + EXPECT_EQ( this->destArray.getSize(), src ); + DestArrayType expected; + expected.setSize( src ); + for( int i = 0; i < src; i++ ) + expected[ i ] = ValueType( src ); + EXPECT_EQ( this->destArray, expected ); +} + +TYPED_TEST( ArrayCommunicationTest, bcast ) +{ + using DestArrayType = typename TestFixture::DestArrayType; + using ValueType = typename TestFixture::ValueType; + + for( int root = 0; root < this->nproc; root++ ) { + // reset the array on rank + this->destArray.setSize( this->rank ); + for( int i = 0; i < this->rank; i++ ) + this->destArray[ i ] = ValueType( this->rank ); + + // broadcast the dest array (bcast does not make sense for views) + bcast( this->destArray, root, this->communicator ); + + EXPECT_EQ( this->destArray.getSize(), root ); + DestArrayType expected; + expected.setSize( root ); + for( int i = 0; i < root; i++ ) + expected[ i ] = ValueType( root ); + EXPECT_EQ( this->destArray, expected ); + + Barrier( this->communicator ); + } +} + +#endif // HAVE_GTEST + +#include "../main_mpi.h" -- GitLab From c8e6c5d5d6daad12afecf4a261c6931765418003 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Fri, 3 Sep 2021 11:06:38 +0200 Subject: [PATCH 09/10] Removed print method and operator<< for DistributedArray and DistributedArrayView It does not make sense to print a distributed array like this, because it may contain ghost elements. Users should examine the local array view manually. --- src/TNL/Containers/DistributedArray.h | 10 -------- src/TNL/Containers/DistributedArrayView.h | 11 --------- src/TNL/Containers/DistributedArrayView.hpp | 27 --------------------- 3 files changed, 48 deletions(-) diff --git a/src/TNL/Containers/DistributedArray.h b/src/TNL/Containers/DistributedArray.h index 07b681f27..ad4ac7987 100644 --- a/src/TNL/Containers/DistributedArray.h +++ b/src/TNL/Containers/DistributedArray.h @@ -270,16 +270,6 @@ private: static void setSynchronizerHelper( ViewType& view, const Array& array ) {} }; -template< typename Value, - typename Device, - typename Index, - typename Allocator > -std::ostream& operator<<( std::ostream& str, const DistributedArray< Value, Device, Index, Allocator >& array ) -{ - return array.getConstView().print( str ); -} - - } // namespace Containers } // namespace TNL diff --git a/src/TNL/Containers/DistributedArrayView.h b/src/TNL/Containers/DistributedArrayView.h index 25398d6d9..0e69218ff 100644 --- a/src/TNL/Containers/DistributedArrayView.h +++ b/src/TNL/Containers/DistributedArrayView.h @@ -230,7 +230,6 @@ public: template< typename Function > void forElements( IndexType begin, IndexType end, Function&& f ) const; - std::ostream& print( std::ostream& str ) const; protected: LocalRangeType localRange; IndexType ghosts = 0; @@ -242,16 +241,6 @@ protected: int valuesPerElement = 1; }; - -template< typename Value, - typename Device = Devices::Host, - typename Index = int > -std::ostream& operator<<( std::ostream& str, const DistributedArrayView< Value, Device, Index >& view ) -{ - return view.print( str ); -} - - } // namespace Containers } // namespace TNL diff --git a/src/TNL/Containers/DistributedArrayView.hpp b/src/TNL/Containers/DistributedArrayView.hpp index c3f0d02b5..34686dcbe 100644 --- a/src/TNL/Containers/DistributedArrayView.hpp +++ b/src/TNL/Containers/DistributedArrayView.hpp @@ -471,32 +471,5 @@ forElements( IndexType begin, IndexType end, Function&& f ) const } - -template< typename Value, - typename Device, - typename Index > -std::ostream& -DistributedArrayView< Value, Device, Index >:: -print( std::ostream& str ) const -{ - // The following does not work properly - /*if( MPI::GetRank( communicator ) == 0 ) - { - str << "[ "; - for( IndexType i = 0; i < localData.getSize(); i++ ) - str << ", " << localData.getElement( i ); - for( int proc = 1; proc < MPI::GetSize( communicator ); proc++ ) - { - Array< std::remove_const_t< Value >, Device, Index > localArray; - receive( localArray, proc, 0, communicator ); - for( IndexType i = 0; i < localArray.getSize(); i++ ) - str << ", " << localArray.getElement( i ); - } - str << " ]"; - } - else send( this->localData, 0, 0, this->communicator );*/ - return str; -} - } // namespace Containers } // namespace TNL -- GitLab From a8d7afcf2d34f73f5d6eb0ba7cd0379b424b032d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Klinkovsk=C3=BD?= Date: Fri, 3 Sep 2021 11:11:55 +0200 Subject: [PATCH 10/10] Fixed forElements method in DistributedArray and DistributedArrayView --- src/TNL/Containers/DistributedArray.h | 109 +++++++++----------- src/TNL/Containers/DistributedArray.hpp | 6 +- src/TNL/Containers/DistributedArrayView.h | 106 +++++++++---------- src/TNL/Containers/DistributedArrayView.hpp | 22 ++-- 4 files changed, 109 insertions(+), 134 deletions(-) diff --git a/src/TNL/Containers/DistributedArray.h b/src/TNL/Containers/DistributedArray.h index ad4ac7987..0840ffa1c 100644 --- a/src/TNL/Containers/DistributedArray.h +++ b/src/TNL/Containers/DistributedArray.h @@ -193,68 +193,53 @@ public: template< typename Array > bool operator!=( const Array& array ) const; - /** - * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end). - * - * The lambda function is supposed to be declared as - * - * ``` - * f( IndexType elementIdx, ValueType& elementValue ) - * ``` - * - * where - * - * - \e elementIdx is an index of the array element being currently processed - * - \e elementValue is a value of the array element being currently processed - * - * This is performed at the same place where the array is allocated, - * i.e. it is efficient even on GPU. - * - * \param begin The beginning of the array elements interval. - * \param end The end of the array elements interval. - * \param f The lambda function to be processed. - * - * \par Example - * \include Containers/ArrayExample_forElements.cpp - * \par Output - * \include ArrayExample_forElements.out - * - */ - template< typename Function > - void forElements( IndexType begin, IndexType end, Function&& f ); - - /** - * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end) for constant instances of the array. - * - * The lambda function is supposed to be declared as - * - * ``` - * f( IndexType elementIdx, ValueType& elementValue ) - * ``` - * - * where - * - * - \e elementIdx is an index of the array element being currently processed - * - \e elementValue is a value of the array element being currently processed - * - * This is performed at the same place where the array is allocated, - * i.e. it is efficient even on GPU. - * - * \param begin The beginning of the array elements interval. - * \param end The end of the array elements interval. - * \param f The lambda function to be processed. - * - * \par Example - * \include Containers/ArrayExample_forElements.cpp - * \par Output - * \include ArrayExample_forElements.out - * - */ - template< typename Function > - void forElements( IndexType begin, IndexType end, Function&& f ) const; - - - // TODO: serialization (save, load) + /** + * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end). + * + * The lambda function is supposed to be declared as + * + * ``` + * f( IndexType elementIdx, ValueType& elementValue ) + * ``` + * + * where + * + * - \e elementIdx is an index of the array element being currently processed + * - \e elementValue is a value of the array element being currently processed + * + * This is performed at the same place where the array is allocated, + * i.e. it is efficient even on GPU. + * + * \param begin The beginning of the array elements interval. + * \param end The end of the array elements interval. + * \param f The lambda function to be processed. + */ + template< typename Function > + void forElements( IndexType begin, IndexType end, Function&& f ); + + /** + * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end) for constant instances of the array. + * + * The lambda function is supposed to be declared as + * + * ``` + * f( IndexType elementIdx, ValueType& elementValue ) + * ``` + * + * where + * + * - \e elementIdx is an index of the array element being currently processed + * - \e elementValue is a value of the array element being currently processed + * + * This is performed at the same place where the array is allocated, + * i.e. it is efficient even on GPU. + * + * \param begin The beginning of the array elements interval. + * \param end The end of the array elements interval. + * \param f The lambda function to be processed. + */ + template< typename Function > + void forElements( IndexType begin, IndexType end, Function&& f ) const; protected: ViewType view; diff --git a/src/TNL/Containers/DistributedArray.hpp b/src/TNL/Containers/DistributedArray.hpp index b09443c65..769a4fba3 100644 --- a/src/TNL/Containers/DistributedArray.hpp +++ b/src/TNL/Containers/DistributedArray.hpp @@ -14,8 +14,6 @@ #include "DistributedArray.h" -#include - namespace TNL { namespace Containers { @@ -458,7 +456,7 @@ void DistributedArray< Value, Device, Index, Allocator >:: forElements( IndexType begin, IndexType end, Function&& f ) { - this->view.forElements( begin, end, f ); + view.forElements( begin, end, f ); } template< typename Value, @@ -470,7 +468,7 @@ void DistributedArray< Value, Device, Index, Allocator >:: forElements( IndexType begin, IndexType end, Function&& f ) const { - this->view.forElements( begin, end, f ); + view.forElements( begin, end, f ); } } // namespace Containers diff --git a/src/TNL/Containers/DistributedArrayView.h b/src/TNL/Containers/DistributedArrayView.h index 0e69218ff..55d34b5ec 100644 --- a/src/TNL/Containers/DistributedArrayView.h +++ b/src/TNL/Containers/DistributedArrayView.h @@ -170,65 +170,53 @@ public: template< typename Array > bool operator!=( const Array& array ) const; - /** - * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end). - * - * The lambda function is supposed to be declared as - * - * ``` - * f( IndexType elementIdx, ValueType& elementValue ) - * ``` - * - * where - * - * - \e elementIdx is an index of the array element being currently processed - * - \e elementValue is a value of the array element being currently processed - * - * This is performed at the same place where the array is allocated, - * i.e. it is efficient even on GPU. - * - * \param begin The beginning of the array elements interval. - * \param end The end of the array elements interval. - * \param f The lambda function to be processed. - * - * \par Example - * \include Containers/ArrayExample_forElements.cpp - * \par Output - * \include ArrayExample_forElements.out - * - */ - template< typename Function > - void forElements( IndexType begin, IndexType end, Function&& f ); - - /** - * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end) for constant instances of the array. - * - * The lambda function is supposed to be declared as - * - * ``` - * f( IndexType elementIdx, ValueType& elementValue ) - * ``` - * - * where - * - * - \e elementIdx is an index of the array element being currently processed - * - \e elementValue is a value of the array element being currently processed - * - * This is performed at the same place where the array is allocated, - * i.e. it is efficient even on GPU. - * - * \param begin The beginning of the array elements interval. - * \param end The end of the array elements interval. - * \param f The lambda function to be processed. - * - * \par Example - * \include Containers/ArrayExample_forElements.cpp - * \par Output - * \include ArrayExample_forElements.out - * - */ - template< typename Function > - void forElements( IndexType begin, IndexType end, Function&& f ) const; + /** + * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end). + * + * The lambda function is supposed to be declared as + * + * ``` + * f( IndexType elementIdx, ValueType& elementValue ) + * ``` + * + * where + * + * - \e elementIdx is an index of the array element being currently processed + * - \e elementValue is a value of the array element being currently processed + * + * This is performed at the same place where the array is allocated, + * i.e. it is efficient even on GPU. + * + * \param begin The beginning of the array elements interval. + * \param end The end of the array elements interval. + * \param f The lambda function to be processed. + */ + template< typename Function > + void forElements( IndexType begin, IndexType end, Function&& f ); + + /** + * \brief Process the lambda function \e f for each array element in interval [ \e begin, \e end) for constant instances of the array. + * + * The lambda function is supposed to be declared as + * + * ``` + * f( IndexType elementIdx, const ValueType& elementValue ) + * ``` + * + * where + * + * - \e elementIdx is an index of the array element being currently processed + * - \e elementValue is a value of the array element being currently processed + * + * This is performed at the same place where the array is allocated, + * i.e. it is efficient even on GPU. + * + * \param begin The beginning of the array elements interval. + * \param end The end of the array elements interval. + * \param f The lambda function to be processed. + */ + template< typename Function > + void forElements( IndexType begin, IndexType end, Function&& f ) const; protected: LocalRangeType localRange; diff --git a/src/TNL/Containers/DistributedArrayView.hpp b/src/TNL/Containers/DistributedArrayView.hpp index 34686dcbe..312acd6bb 100644 --- a/src/TNL/Containers/DistributedArrayView.hpp +++ b/src/TNL/Containers/DistributedArrayView.hpp @@ -14,6 +14,8 @@ #include "DistributedArrayView.h" +#include + namespace TNL { namespace Containers { @@ -449,15 +451,12 @@ void DistributedArrayView< Value, Device, Index >:: forElements( IndexType begin, IndexType end, Function&& f ) { - IndexType localBegin = max( begin, localRange.getBegin() ); - IndexType localEnd = min( end, localRange.getEnd() ); - auto local_f = [=] __cuda_callable__ ( const IndexType& idx, ValueType& value ) mutable { - f( idx + localRange.getBegin(), value ); + const IndexType localBegin = localRange.getLocalIndex( max( begin, localRange.getBegin() ) ); + const IndexType localEnd = localRange.getLocalIndex( min( end, localRange.getEnd() ) ); + auto local_f = [=] __cuda_callable__ ( IndexType idx, ValueType& value ) mutable { + f( localRange.getGlobalIndex( idx, value ) ); }; - this->localData.forElements( localBegin - localRange.getBegin(), - localEnd - localRange.getBegin(), - local_f ); - + localData.forElements( localBegin, localEnd, local_f ); } template< typename Value, @@ -468,7 +467,12 @@ void DistributedArrayView< Value, Device, Index >:: forElements( IndexType begin, IndexType end, Function&& f ) const { - + const IndexType localBegin = localRange.getLocalIndex( max( begin, localRange.getBegin() ) ); + const IndexType localEnd = localRange.getLocalIndex( min( end, localRange.getEnd() ) ); + auto local_f = [=] __cuda_callable__ ( IndexType idx, const ValueType& value ) { + f( localRange.getGlobalIndex( idx, value ) ); + }; + localData.forElements( localBegin, localEnd, local_f ); } } // namespace Containers -- GitLab