Skip to content
Snippets Groups Projects
Commit c39ce1ce authored by Jakub Klinkovský's avatar Jakub Klinkovský
Browse files

Various small fixes

parent 520414ca
No related branches found
No related tags found
1 merge request!57Small fixes
...@@ -24,8 +24,8 @@ else ...@@ -24,8 +24,8 @@ else
std::stringstream __tnl_mpi_print_stream_; \ std::stringstream __tnl_mpi_print_stream_; \
__tnl_mpi_print_stream_ << "Node " << TNL::Communicators::MpiCommunicator::GetRank() << " of " \ __tnl_mpi_print_stream_ << "Node " << TNL::Communicators::MpiCommunicator::GetRank() << " of " \
<< TNL::Communicators::MpiCommunicator::GetSize() << " : " << message << std::endl; \ << TNL::Communicators::MpiCommunicator::GetSize() << " : " << message << std::endl; \
TNL::String __tnl_mpi_print_string_( __tnl_mpi_print_stream_.str().c_str() ); \ TNL::String __tnl_mpi_print_string_( __tnl_mpi_print_stream_.str() ); \
mpiSend( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \ mpiSend( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \
} \ } \
else \ else \
{ \ { \
...@@ -35,7 +35,7 @@ else ...@@ -35,7 +35,7 @@ else
__tnl_mpi_print_j++ ) \ __tnl_mpi_print_j++ ) \
{ \ { \
TNL::String __tnl_mpi_print_string_; \ TNL::String __tnl_mpi_print_string_; \
mpiReceive( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \ mpiReceive( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \
std::cerr << __tnl_mpi_print_string_; \ std::cerr << __tnl_mpi_print_string_; \
} \ } \
} \ } \
...@@ -77,8 +77,8 @@ else ...@@ -77,8 +77,8 @@ else
std::stringstream __tnl_mpi_print_stream_; \ std::stringstream __tnl_mpi_print_stream_; \
__tnl_mpi_print_stream_ << "Node " << TNL::Communicators::MpiCommunicator::GetRank() << " of " \ __tnl_mpi_print_stream_ << "Node " << TNL::Communicators::MpiCommunicator::GetRank() << " of " \
<< TNL::Communicators::MpiCommunicator::GetSize() << " : " << message << std::endl; \ << TNL::Communicators::MpiCommunicator::GetSize() << " : " << message << std::endl; \
TNL::String __tnl_mpi_print_string_( __tnl_mpi_print_stream_.str().c_str() ); \ TNL::String __tnl_mpi_print_string_( __tnl_mpi_print_stream_.str() ); \
mpiSsend( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \ mpiSend( __tnl_mpi_print_string_, 0, std::numeric_limits< int >::max() ); \
} \ } \
} \ } \
else \ else \
...@@ -94,7 +94,7 @@ else ...@@ -94,7 +94,7 @@ else
if( __tnl_mpi_print_cond ) \ if( __tnl_mpi_print_cond ) \
{ \ { \
TNL::String __tnl_mpi_print_string_; \ TNL::String __tnl_mpi_print_string_; \
mpiReceive( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \ mpiReceive( __tnl_mpi_print_string_, __tnl_mpi_print_j, std::numeric_limits< int >::max() ); \
std::cerr << __tnl_mpi_print_string_; \ std::cerr << __tnl_mpi_print_string_; \
} \ } \
} \ } \
...@@ -103,4 +103,4 @@ else ...@@ -103,4 +103,4 @@ else
#else #else
#define TNL_MPI_PRINT_COND( condition, message ) \ #define TNL_MPI_PRINT_COND( condition, message ) \
std::cerr << message << std::endl; std::cerr << message << std::endl;
#endif #endif
\ No newline at end of file
...@@ -153,12 +153,14 @@ public: ...@@ -153,12 +153,14 @@ public:
updateCommunicationPattern( localMatrix, group ); updateCommunicationPattern( localMatrix, group );
// prepare buffers // prepare buffers
commRequests.clear();
globalBuffer.init( Partitioner::getOffset( localMatrix.getColumns(), rank, nproc ), globalBuffer.init( Partitioner::getOffset( localMatrix.getColumns(), rank, nproc ),
inVector.getConstLocalView(), inVector.getConstLocalView(),
localMatrix.getColumns() - Partitioner::getOffset( localMatrix.getColumns(), rank, nproc ) - inVector.getConstLocalView().getSize() ); localMatrix.getColumns() - Partitioner::getOffset( localMatrix.getColumns(), rank, nproc ) - inVector.getConstLocalView().getSize() );
const auto globalBufferView = globalBuffer.getConstView(); const auto globalBufferView = globalBuffer.getConstView();
// buffer for asynchronous communication requests
std::vector< typename CommunicatorType::Request > commRequests;
// send our data to all processes that need it // send our data to all processes that need it
for( int i = 0; i < commPatternStarts.getRows(); i++ ) { for( int i = 0; i < commPatternStarts.getRows(); i++ ) {
if( i == rank ) if( i == rank )
...@@ -231,21 +233,17 @@ public: ...@@ -231,21 +233,17 @@ public:
commPatternEnds.reset(); commPatternEnds.reset();
localOnlySpan.first = localOnlySpan.second = 0; localOnlySpan.first = localOnlySpan.second = 0;
globalBuffer.reset(); globalBuffer.reset();
commRequests.clear();
} }
protected: protected:
// communication pattern // communication pattern
Matrices::Dense< IndexType, Devices::Host, int, true, Allocators::Host< IndexType > > commPatternStarts, commPatternEnds; Matrices::Dense< IndexType, Devices::Host, int > commPatternStarts, commPatternEnds;
// span of rows with only block-diagonal entries // span of rows with only block-diagonal entries
std::pair< IndexType, IndexType > localOnlySpan; std::pair< IndexType, IndexType > localOnlySpan;
// global buffer for non-local elements of the vector // global buffer for non-local elements of the vector
__DistributedSpMV_impl::ThreePartVector< RealType, DeviceType, IndexType > globalBuffer; __DistributedSpMV_impl::ThreePartVector< RealType, DeviceType, IndexType > globalBuffer;
// buffer for asynchronous communication requests
std::vector< typename CommunicatorType::Request > commRequests;
}; };
} // namespace Matrices } // namespace Matrices
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <TNL/Meshes/DistributedMeshes/BufferEntitiesHelper.h> #include <TNL/Meshes/DistributedMeshes/BufferEntitiesHelper.h>
#include <TNL/Meshes/DistributedMeshes/Directions.h> #include <TNL/Meshes/DistributedMeshes/Directions.h>
#include <TNL/Communicators/MPIPrint.h> #include <TNL/Communicators/MPIPrint.h>
#include <TNL/Pointers/SharedPointer.h>
namespace TNL { namespace TNL {
namespace Functions{ namespace Functions{
......
...@@ -18,11 +18,6 @@ ...@@ -18,11 +18,6 @@
#include <TNL/Meshes/GridDetails/Traverser_Grid2D.h> #include <TNL/Meshes/GridDetails/Traverser_Grid2D.h>
#include <TNL/Meshes/GridDetails/Traverser_Grid3D.h> #include <TNL/Meshes/GridDetails/Traverser_Grid3D.h>
#ifdef USE_MPI
#include <TNL/Meshes/DistributedGridSynchronizer.h>
#endif
namespace TNL { namespace TNL {
namespace Solvers { namespace Solvers {
namespace PDE { namespace PDE {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment