Skip to content
Snippets Groups Projects

Periodic BC in distributed grid

Merged Jakub Klinkovský requested to merge periodic-bc into develop
All threads resolved!
1 file
+ 6
6
Compare changes
  • Side-by-side
  • Inline
@@ -89,7 +89,7 @@ class MpiCommunicator
#ifdef HAVE_MPI
config.addEntry< bool >( "redirect-mpi-output", "Only process with rank 0 prints to console. Other processes are redirected to files.", true );
config.addEntry< bool >( "mpi-gdb-debug", "Wait for GDB to attach the master MPI process.", false );
config.addEntry< int >( "mpi-process-to-attach", "Number of the MPI process to be attached by GDB.", 0 );
config.addEntry< int >( "mpi-process-to-attach", "Number of the MPI process to be attached by GDB. Set -1 for all processes.", 0 );
#endif
}
@@ -276,13 +276,13 @@ class MpiCommunicator
}
template <typename T>
static Request ISend( const T* data, int count, int dest, CommunicationGroup group)
static Request ISend( const T* data, int count, int dest, int tag, CommunicationGroup group)
{
#ifdef HAVE_MPI
TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not initialized");
TNL_ASSERT_NE(group, NullGroup, "ISend cannot be called with NullGroup");
Request req;
MPI_Isend((const void*) data, count, MPIDataType(data) , dest, 0, group, &req);
MPI_Isend( const_cast< void* >( ( const void* ) data ), count, MPIDataType(data) , dest, tag, group, &req);
return req;
#else
throw Exceptions::MPISupportMissing();
@@ -290,13 +290,13 @@ class MpiCommunicator
}
template <typename T>
static Request IRecv( T* data, int count, int src, CommunicationGroup group)
static Request IRecv( T* data, int count, int src, int tag, CommunicationGroup group)
{
#ifdef HAVE_MPI
TNL_ASSERT_TRUE(IsInitialized(), "Fatal Error - MPI communicator is not initialized");
TNL_ASSERT_NE(group, NullGroup, "IRecv cannot be called with NullGroup");
Request req;
MPI_Irecv((void*) data, count, MPIDataType(data) , src, 0, group, &req);
MPI_Irecv((void*) data, count, MPIDataType(data) , src, tag, group, &req);
return req;
#else
throw Exceptions::MPISupportMissing();
@@ -334,7 +334,7 @@ class MpiCommunicator
{
#ifdef HAVE_MPI
TNL_ASSERT_NE(group, NullGroup, "Allreduce cannot be called with NullGroup");
MPI_Allreduce( (const void*) data, (void*) reduced_data,count,MPIDataType(data),op,group);
MPI_Allreduce( const_cast< void* >( ( void* ) data ), (void*) reduced_data,count,MPIDataType(data),op,group);
#else
throw Exceptions::MPISupportMissing();
#endif
@@ -366,7 +366,7 @@ class MpiCommunicator
{
#ifdef HAVE_MPI
TNL_ASSERT_NE(group, NullGroup, "Reduce cannot be called with NullGroup");
MPI_Reduce( (const void*) data, (void*) reduced_data,count,MPIDataType(data),op,root,group);
MPI_Reduce( const_cast< void* >( ( void*) data ), (void*) reduced_data,count,MPIDataType(data),op,root,group);
#else
throw Exceptions::MPISupportMissing();
#endif
@@ -386,7 +386,7 @@ class MpiCommunicator
#ifdef HAVE_MPI
TNL_ASSERT_NE(group, NullGroup, "SendReceive cannot be called with NullGroup");
MPI_Status status;
MPI_Sendrecv( ( const void* ) sendData,
MPI_Sendrecv( const_cast< void* >( ( void* ) sendData ),
sendCount,
MPIDataType( sendData ),
destination,
@@ -412,7 +412,7 @@ class MpiCommunicator
{
#ifdef HAVE_MPI
TNL_ASSERT_NE(group, NullGroup, "SendReceive cannot be called with NullGroup");
MPI_Alltoall( ( const void* ) sendData,
MPI_Alltoall( const_cast< void* >( ( void* ) sendData ),
sendCount,
MPIDataType( sendData ),
( void* ) receiveData,
@@ -433,7 +433,7 @@ class MpiCommunicator
}
}
static void CreateNewGroup(bool meToo,int myRank, CommunicationGroup &oldGroup, CommunicationGroup &newGroup)
static void CreateNewGroup( bool meToo, int myRank, CommunicationGroup &oldGroup, CommunicationGroup &newGroup )
{
#ifdef HAVE_MPI
if(meToo)
Loading