Commit 09827410 authored by Vít Hanousek's avatar Vít Hanousek
Browse files

- Remowe most of old MPI support.

- Add Template abstraction to MPIIsend and MPIIRecv
- Distributed Grid Synchronizer is now indipendend on DOF datatype (bound to basic C types, types known by MPI)
- Distributed Grid Synchronizer now uses WaitAll function.
parent 65360c31
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -12,7 +12,6 @@

#include <TNL/Containers/List.h>
#include <TNL/Config/ConfigDescription.h>
#include <TNL/mpi-supp.h>
#include <TNL/param-types.h>
//#include <TNL/Debugging/StackBacktrace.h>

+45 −53
Original line number Diff line number Diff line
@@ -103,35 +103,34 @@ private:
        }

        //async send
        MPI::Request leftsendreq;
        MPI::Request rightsendreq;
        MPI::Request leftrcvreq;
        MPI::Request rightrcvreq;
        MPI::Request req[4];

        //send everithing, recieve everything 
        if(left!=-1)
        {
            leftsendreq=MPI::COMM_WORLD.Isend((void*) leftsendbuf, size, MPI::DOUBLE , left, 0);
            leftrcvreq=MPI::COMM_WORLD.Irecv((void*) leftrcvbuf, size, MPI::DOUBLE, left, 0);
            req[0]=TNLMPI::ISend(leftsendbuf, size, left);
            req[2]=TNLMPI::IRecv(leftrcvbuf, size, left);
        }
        if(right!=-1)
        else
        {
            rightsendreq=MPI::COMM_WORLD.Isend((void*) rightsendbuf, size, MPI::DOUBLE , right, 0);
            rightrcvreq=MPI::COMM_WORLD.Irecv((void*) rightrcvbuf, size, MPI::DOUBLE, right, 0);
            req[0]=MPI::REQUEST_NULL;
            req[2]=MPI::REQUEST_NULL;
        }        

        //wait until send is done
        if(left!=-1)
        if(right!=-1)
        {
            leftrcvreq.Wait();
            leftsendreq.Wait();
            req[1]=TNLMPI::ISend(rightsendbuf, size, right);
            req[3]=TNLMPI::IRecv(rightrcvbuf, size, right);
        }
        if(right!=-1)
        else
        {
            rightrcvreq.Wait();
            rightsendreq.Wait();
            req[1]=MPI::REQUEST_NULL;
            req[3]=MPI::REQUEST_NULL;
        }

        //wait until send and recv is done
        MPI::Request::Waitall(4, req);

        //copy data form rcv buffers
        if(left!=-1)
        {
@@ -260,28 +259,25 @@ class DistributedGridSynchronizer<DistributedGridType,MeshFunctionType,2>
            overlap,localsize,
            neighbor);
	
        //async send
        MPI::Request sendreq[8];
        MPI::Request rcvreq[8];
        //async send and rcv
        MPI::Request req[16];
		                
        //send everithing, recieve everything 
        for(int i=0;i<8;i++)	
           if(neighbor[i]!=-1)
           {
               sendreq[i]=MPI::COMM_WORLD.Isend((void*) sendbuffs[i], sizes[i], MPI::DOUBLE , neighbor[i], 0);
               rcvreq[i]=MPI::COMM_WORLD.Irecv((void*) rcvbuffs[i], sizes[i], MPI::DOUBLE, neighbor[i], 0);
               req[i]=TNLMPI::ISend(sendbuffs[i], sizes[i], neighbor[i]);
               req[8+i]=TNLMPI::IRecv(rcvbuffs[i], sizes[i], neighbor[i]);
           }

        //wait until send is done
        for(int i=0;i<8;i++)
        {
           if(neighbor[i]!=-1)
		   else
      	   {
               sendreq[i].Wait();
               rcvreq[i].Wait();
           }       
               req[i]=MPI::REQUEST_NULL;
               req[8+i]=MPI::REQUEST_NULL;
           }

        //wait until send is done
        MPI::Request::Waitall(16, req);
        
        //copy data form rcv buffers
        CopyBuffers(meshfunction, rcvbuffs, false,
            leftDst, rightDst, upDst, downDst,
@@ -450,29 +446,26 @@ class DistributedGridSynchronizer<DistributedGridType,MeshFunctionType,3>
            overlap, localsize,
            neighbor);
        
        //async send
        MPI::Request sendreq[26];
        MPI::Request rcvreq[26];
        //async send and rcv
        MPI::Request req[52];
		                
        //send everithing, recieve everything 
        for(int i=0;i<26;i++)	
           if(neighbor[i]!=-1)
           {
                        sendreq[i]=MPI::COMM_WORLD.Isend((void*) sendbuffs[i], sizes[i], MPI::DOUBLE , neighbor[i], 0);
                        rcvreq[i]=MPI::COMM_WORLD.Irecv((void*) rcvbuffs[i], sizes[i], MPI::DOUBLE, neighbor[i], 0);
               req[i]=TNLMPI::ISend(sendbuffs[i], sizes[i], neighbor[i]);
               req[26+i]=TNLMPI::IRecv(rcvbuffs[i], sizes[i], neighbor[i]);
           }

        //wait until send is done
        for(int i=0;i<26;i++)
        {
                if(neighbor[i]!=-1)
		   else
      	   {
                        sendreq[i].Wait();
                        rcvreq[i].Wait();
                }       
               req[i]=MPI::REQUEST_NULL;
               req[26+i]=MPI::REQUEST_NULL;
           }

        //wait until send is done
        MPI::Request::Waitall(52, req);

        //copy data form rcv buffers
               //fill send buffers
        CopyBuffers(meshfunction, rcvbuffs, false,
            westDst, eastDst, nordDst, southDst, bottomDst, topDst,
            xcenter, ycenter, zcenter,
@@ -545,7 +538,6 @@ class DistributedGridSynchronizer<DistributedGridType,MeshFunctionType,3>
            BufferEntities(meshfunction,buffers[TopWest],west,ycenter,top,shortDim.x(),longDim.y(),shortDim.z(),toBuffer);
        if(neighbor[TopEast]!=-1)
            BufferEntities(meshfunction,buffers[TopEast],east,ycenter,top,shortDim.x(),longDim.y(),shortDim.z(),toBuffer);   
        
        //YZ
        if(neighbor[BottomNord]!=-1)
            BufferEntities(meshfunction,buffers[BottomNord],xcenter,nord,bottom,longDim.x(),shortDim.y(),shortDim.z(),toBuffer);
+2 −2
Original line number Diff line number Diff line
@@ -12,8 +12,6 @@

#include <iostream>
#include <sstream>
#include <TNL/mpi-supp.h>


namespace TNL {

@@ -22,6 +20,8 @@ namespace Containers {
   template< class T > class List;
}

class String;

template< typename T >
String convertToString( const T& value );

+0 −68
Original line number Diff line number Diff line
/***************************************************************************
                          mpi-supp.cpp  -  description
                             -------------------
    begin                : 2007/06/21
    copyright            : (C) 2007 by Tomas Oberhuber
    email                : tomas.oberhuber@fjfi.cvut.cz
 ***************************************************************************/

/* See Copyright Notice in tnl/Copyright */

#include <TNL/mpi-supp.h>

namespace TNL {

void MPIInit( int* argc, char** argv[] )
{
#ifdef USE_MPI
   MPI_Init( argc, argv );
#endif
}

void MPIFinalize()
{
#ifdef USE_MPI
   MPI_Finalize();
#endif
}

bool HaveMPI()
{
#ifdef USE_MPI
   return true;
#else
   return false;
#endif
}

int MPIGetRank( MPI_Comm comm )
{
#ifdef USE_MPI
   int rank;
   MPI_Comm_rank( MPI_COMM_WORLD, &rank );
   return rank;
#else
   return 0;
#endif
}

int MPIGetSize( MPI_Comm comm )
{
#ifdef USE_MPI
   int size;
   MPI_Comm_size( comm, &size );
   return size;
#else
   return 0;
#endif
}


void MPIBarrier( MPI_Comm comm )
{
#ifdef USE_MPI
   MPI_Barrier( comm );
#endif
}

} // namespace TNL
+48 −146
Original line number Diff line number Diff line
@@ -20,167 +20,69 @@
   typedef int MPI_Op;
   #define MPI_COMM_WORLD  0
   #define MPI_MAX 0
   #define MPI_MIN 0
   #define MPI_SUM 0
#endif

namespace TNL {
   
class String;

/*#ifdef USE_MPI
inline MPI_Datatype MPIDataType( const signed char ) { return MPI_CHAR; };
inline MPI_Datatype MPIDataType( const signed short int ) { return MPI_SHORT; };
inline MPI_Datatype MPIDataType( const signed int ) { return MPI_INT; };
inline MPI_Datatype MPIDataType( const signed long int ) { return MPI_LONG; };
inline MPI_Datatype MPIDataType( const unsigned char ) { return MPI_UNSIGNED_CHAR; };
inline MPI_Datatype MPIDataType( const unsigned short int ) { return MPI_UNSIGNED_SHORT; };
inline MPI_Datatype MPIDataType( const unsigned int ) { return MPI_UNSIGNED; };
inline MPI_Datatype MPIDataType( const unsigned long int ) { return MPI_UNSIGNED_LONG; };
inline MPI_Datatype MPIDataType( const float& ) { return MPI_FLOAT; };
inline MPI_Datatype MPIDataType( const double& ) { return MPI_DOUBLE; };
inline MPI_Datatype MPIDataType( const long double& ) { return MPI_LONG_DOUBLE; };
#endif
*/
/*
void MPIInit( int* argc, char** argv[] );

void MPIFinalize();

bool HaveMPI();

int MPIGetRank( MPI_Comm comm = MPI_COMM_WORLD );

int MPIGetSize( MPI_Comm comm = MPI_COMM_WORLD );

void MPIBarrier( MPI_Comm comm = MPI_COMM_WORLD );

#ifdef USE_MPI
template< class T > void MPISend( const T& data,
                                  int count,
                                  int dest,
                                  MPI_Comm comm = MPI_COMM_WORLD )
{
   MPI_Send( &data, count, MPIDataType( data ), dest, 0, comm );
};
#else
template< class T > void MPISend( const T&,
                                  int,
                                  int,
                                  MPI_Comm  )
{
};
#endif

#ifdef USE_MPI
template< class T > void MPIRecv( T& data,
                                  int count,
                                  int src,
                                  MPI_Comm comm = MPI_COMM_WORLD )
{
   MPI_Status stat;
   MPI_Recv( data, count, MPIDataType( data ), src, 0, comm, &stat );
};
#else
template< class T > void MPIRecv( T&,
                                  int,
                                  int,
                                  MPI_Comm = MPI_COMM_WORLD )
{};
#endif

#ifdef USE_MPI
template< class T > void MPIBcast( T& data,
                                   int count,
                                   int root,
                                   MPI_Comm comm = MPI_COMM_WORLD )
{
   MPI_Bcast( &data, count, MPIDataType( data ), root, comm );
};

inline void MPIBcast( String& data, intstd::cout, int root, MPI_Comm comm = MPI_COMM_WORLD )
{
   std::cerr << "Call method MPIBcast of mString instead of function MPIBcast( mString&, ... ) " << std::endl;
   abort();
}
#else
template< class T > void MPIBcast( T&,
                                   int,
                                   int,
                                   MPI_Comm = MPI_COMM_WORLD )
{
}
#endif

#ifdef USE_MPI
template< typename T > void MPIReduce( T& data,
                                       T& reduced_data,
                                       int count,
                                       MPI_Op op,
                                       int root,
                                       MPI_Comm comm )
{
   MPI_Reduce( &data,
               &reduced_data,
               count,
               MPIDataType( data ),
               op,
               root,
               comm );
};
#else
template< typename T > void MPIReduce( T& data,
template< typename T > 
void MPIAllreduce( T& data,
                 T& reduced_data,
                 int,
                 MPI_Op,
                                       int,
                 MPI_Comm )
{
    reduced_data = data;
};
#endif

#ifdef USE_MPI
template< typename T > void MPIAllreduce( T& data,
                                          T& reduced_data,
                                          int count,
                                          MPI_Op op,
                                          MPI_Comm comm )
{
   MPI_Allreduce( &data,
                  &reduced_data,
                  count,
                  MPIDataType( data ),
                  op,
                  comm );
};
#else*/
#ifndef USE_MPI
template< typename T > void MPIAllreduce( T& data,
template< typename T >
void MPIReduce( T& data,
                T& reduced_data,
                int,
                MPI_Op,
                int,
                MPI_Comm )
{
   reduced_data = data;
};

template< typename T > void MPIReduce( T& data,
                                       T& reduced_data,
template< typename T > 
void MPIBcast(  T&,
                int,
                                       MPI_Op,
                int,
                                       MPI_Comm )
                MPI_Comm = MPI_COMM_WORLD )
{
   reduced_data = data;
};
   
template< class T > void MPIBcast( T&,
                                   int,
                                   int,
                                   MPI_Comm = MPI_COMM_WORLD )
#endif

namespace TNL {
    namespace TNLMPI{

#ifdef USE_MPI
        
    inline MPI_Datatype MPIDataType( const signed char ) { return MPI_CHAR; };
    inline MPI_Datatype MPIDataType( const signed short int ) { return MPI_SHORT; };
    inline MPI_Datatype MPIDataType( const signed int ) { return MPI_INT; };
    inline MPI_Datatype MPIDataType( const signed long int ) { return MPI_LONG; };
    inline MPI_Datatype MPIDataType( const unsigned char ) { return MPI_UNSIGNED_CHAR; };
    inline MPI_Datatype MPIDataType( const unsigned short int ) { return MPI_UNSIGNED_SHORT; };
    inline MPI_Datatype MPIDataType( const unsigned int ) { return MPI_UNSIGNED; };
    inline MPI_Datatype MPIDataType( const unsigned long int ) { return MPI_UNSIGNED_LONG; };
    inline MPI_Datatype MPIDataType( const float ) { return MPI_FLOAT; };
    inline MPI_Datatype MPIDataType( const double ) { return MPI_DOUBLE; };
    inline MPI_Datatype MPIDataType( const long double ) { return MPI_LONG_DOUBLE; };
    
    template <typename T>
    MPI::Request ISend( const T *data, int count, int dest)
    {
            return MPI::COMM_WORLD.Isend((void*) data, count, MPIDataType(*data) , dest, 0);
    }     

    template <typename T>
    MPI::Request IRecv( const T *data, int count, int src)
    {
            return MPI::COMM_WORLD.Irecv((void*) data, count, MPIDataType(*data) , src, 0);
    }     

#endif

}//namespace MPI
} // namespace TNL