Commit de921733 authored by Tomáš Oberhuber's avatar Tomáš Oberhuber
Browse files

Code refactoring.

Added MPI info logs.
parent 69e3cfa4
Loading
Loading
Loading
Loading
+81 −77
Original line number Diff line number Diff line
@@ -10,14 +10,14 @@

#pragma once



#ifdef HAVE_MPI

#include <mpi.h>
#include <TNL/String.h>
#include <iostream>
#include <fstream>
#include <mpi.h>
#include <TNL/String.h>
#include <TNL/Logger.h>


namespace TNL {
namespace Communicators {
@@ -39,25 +39,19 @@ namespace Communicators {
      inline static MPI_Datatype MPIDataType( const double* ) { return MPI_DOUBLE; };
      inline static MPI_Datatype MPIDataType( const long double* ) { return MPI_LONG_DOUBLE; };
   
        public:
           typedef MPI::Request Request;
        static MPI::Request NullRequest;
        static std::streambuf *psbuf;
        static std::streambuf *backup;
        static std::ofstream filestr;
        
        //can be call before init
        static bool isAvailable()
        {
            return true;
        }
   public:

        //can be called only after init 
      static bool isDistributed()
      {
         return GetSize()>1;
      };

        typedef MPI::Request Request;
        static MPI::Request NullRequest;
        static std::streambuf *psbuf;
        static std::streambuf *backup;
        static std::ofstream filestr;

      static void Init(int argc, char **argv,bool redirect=false)
      {
@@ -179,6 +173,16 @@ namespace Communicators {
        {
             MPI::COMM_WORLD.Reduce((void*) &data, (void*) &reduced_data,count,MPIDataType(data),op,root);
        };*/

      static void writeProlog( Logger& logger ) 
      {
         if( isDistributed() )
         {
            logger.writeParameter( "MPI processes:", GetSize() );
         }
      }
   

    };
    
    MPI::Request MpiCommunicator::NullRequest;
+97 −97
Original line number Diff line number Diff line
@@ -10,6 +10,8 @@

#pragma once

#include <TNL/Logger.h>

namespace TNL {
namespace Communicators {
        
@@ -22,11 +24,6 @@ namespace Communicators {
      typedef int Request;
      static Request NullRequest;

        static bool isAvailable()
        {
            return true;
        }

      static void Init(int argc, char **argv, bool redirect=false)
      {
          NullRequest=-1;
@@ -107,8 +104,11 @@ namespace Communicators {
      {
           MPI::COMM_WORLD.Reduce((void*) &data, (void*) &reduced_data,count,MPIDataType(data),op,root);
      };*/

      static void writeProlog( Logger& logger ){};
};


  int NoDistrCommunicator::NullRequest;

} // namespace Communicators
+12 −6
Original line number Diff line number Diff line
@@ -10,18 +10,23 @@

#pragma once

#include <iostream>
#include <mpi.h>

//#ifdef MPIIO
#include <TNL/Communicators/MpiCommunicator.h>
//#endif

#include <TNL/File.h>
#include <TNL/Meshes/DistributedMeshes/DistributedMesh.h>
#include <TNL/Meshes/DistributedMeshes/CopyEntitiesHelper.h>
#include <TNL/Functions/MeshFunction.h>


#include <iostream>
#include <mpi.h>

#ifdef MPIIO
#include <TNL/Communicators/MpiCommunicator.h>
#endif




namespace TNL {
namespace Meshes {   
@@ -195,7 +200,8 @@ class DistributedGridIO<MeshFunctionType,MpiIO>

       int headerSize=0;

       if(Communicators::MpiCommunicator::GetRank()==0)
       using Comm = typename TNL::Communicators::MpiCommunicator;
       if(Comm::GetRank()==0)
       {
            headerSize=writeMeshFunctionHeader(file,meshFunction,dataCount);
       }
+12 −5
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#pragma once

#include <TNL/Meshes/Grid.h>
#include <TNL/Logger.h>

namespace TNL {
namespace Meshes { 
@@ -29,13 +30,14 @@ class DistributedMesh<Grid< 1, RealType, Device, Index >>

      static constexpr int getMeshDimension() { return 1; };    

     
      DistributedMesh()
      : isSet(false ){};

      const CoordinatesType& getDomainDecomposition()
      {
         isSet=false;
      };
         return this->rank;
      }
      
      //compute everithing 
      template<typename CommunicatorType>
      void setGlobalGrid(GridType globalGrid, CoordinatesType overlap, int *distribution=NULL)
      {
@@ -197,6 +199,11 @@ class DistributedMesh<Grid< 1, RealType, Device, Index >>
         return this->localBegin;
      }
      
      void writeProlog( Logger& logger )
      {
         logger.writeParameter( "Domain decomposition:", this->getDomainDecomposition() );
      }
       
       
    private : 

+225 −219
Original line number Diff line number Diff line
@@ -33,9 +33,12 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
     
   public:
      DistributedMesh()
      : isSet( false ) {};
       
      const CoordinatesType getDomainDecomposition()
      {
            isSet=false;
       };
         return this->domainDecomposition;
      }

      template< typename CommunicatorType >
      void setGlobalGrid( GridType &globalGrid,
@@ -51,7 +54,6 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
           
         Dimensions= GridType::getMeshDimension();
         spaceSteps=globalGrid.getSpaceSteps();
           //Detect MPI and number of process
         distributed=false;
           
         if( CommunicatorType::IsInitialized() )
@@ -67,11 +69,10 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
           
         if( !distributed )
         {
               //Without MPI
               processesCoordinates[0]=0;
               processesCoordinates[1]=0;
               procsdistr[0]=1;
               procsdistr[1]=1;
            subdomainCoordinates[0]=0;
            subdomainCoordinates[1]=0;
            domainDecomposition[0]=1;
            domainDecomposition[1]=1;
            localOrigin=globalGrid.getOrigin();
            localGridSize=globalGrid.getDimensions();
            localSize=globalGrid.getDimensions();
@@ -84,66 +85,68 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
         }
         else
         {
               //With MPI
               //compute node distribution
            int dims[ 2 ];
            if(distribution!=NULL)
            {
                  procsdistr[0]=distribution[0];
                  procsdistr[1]=distribution[1];
               dims[0]=distribution[0];
               dims[1]=distribution[1];
            }
            else
            {
                  procsdistr[0]=0;
                  procsdistr[1]=0;
               dims[0]=0;
               dims[1]=0;
            }
               CommunicatorType::DimsCreate(nproc, 2, procsdistr);

               processesCoordinates[0]=rank%procsdistr[0];
               processesCoordinates[1]=rank/procsdistr[0];        
            CommunicatorType::DimsCreate( nproc, 2, dims );
            domainDecomposition[ 0 ] = dims[ 0 ];
            domainDecomposition[ 1 ] = dims[ 1 ];

            subdomainCoordinates[ 0 ] = rank % domainDecomposition[ 0 ];
            subdomainCoordinates[ 1 ] = rank / domainDecomposition[ 0 ];        

            //compute local mesh size
            globalSize=globalGrid.getDimensions();              
               numberOfLarger[0]=globalGrid.getDimensions().x()%procsdistr[0];
               numberOfLarger[1]=globalGrid.getDimensions().y()%procsdistr[1];
            numberOfLarger[0]=globalGrid.getDimensions().x()%domainDecomposition[0];
            numberOfLarger[1]=globalGrid.getDimensions().y()%domainDecomposition[1];

               localSize.x()=(globalGrid.getDimensions().x()/procsdistr[0]);
               localSize.y()=(globalGrid.getDimensions().y()/procsdistr[1]);
            localSize.x()=(globalGrid.getDimensions().x()/domainDecomposition[0]);
            localSize.y()=(globalGrid.getDimensions().y()/domainDecomposition[1]);

               if(numberOfLarger[0]>processesCoordinates[0])
            if(numberOfLarger[0]>subdomainCoordinates[0])
                 localSize.x()+=1;               
               if(numberOfLarger[1]>processesCoordinates[1])
            if(numberOfLarger[1]>subdomainCoordinates[1])
                localSize.y()+=1;

               if(numberOfLarger[0]>processesCoordinates[0])
                   globalBegin.x()=processesCoordinates[0]*localSize.x();
            if(numberOfLarger[0]>subdomainCoordinates[0])
                globalBegin.x()=subdomainCoordinates[0]*localSize.x();
            else
                   globalBegin.x()=numberOfLarger[0]*(localSize.x()+1)+(processesCoordinates[0]-numberOfLarger[0])*localSize.x();
                globalBegin.x()=numberOfLarger[0]*(localSize.x()+1)+(subdomainCoordinates[0]-numberOfLarger[0])*localSize.x();

               if(numberOfLarger[1]>processesCoordinates[1])
                   globalBegin.y()=processesCoordinates[1]*localSize.y();
            if(numberOfLarger[1]>subdomainCoordinates[1])
                globalBegin.y()=subdomainCoordinates[1]*localSize.y();

            else
                   globalBegin.y()=numberOfLarger[1]*(localSize.y()+1)+(processesCoordinates[1]-numberOfLarger[1])*localSize.y();
                globalBegin.y()=numberOfLarger[1]*(localSize.y()+1)+(subdomainCoordinates[1]-numberOfLarger[1])*localSize.y();

            localOrigin=globalGrid.getOrigin()+TNL::Containers::tnlDotProduct(globalGrid.getSpaceSteps(),globalBegin-overlap);

            //nearnodes
               if(processesCoordinates[0]>0)
                   neighbors[Left]=getRankOfProcCoord(processesCoordinates[0]-1,processesCoordinates[1]);
               if(processesCoordinates[0]<procsdistr[0]-1)
                   neighbors[Right]=getRankOfProcCoord(processesCoordinates[0]+1,processesCoordinates[1]);
               if(processesCoordinates[1]>0)
                   neighbors[Up]=getRankOfProcCoord(processesCoordinates[0],processesCoordinates[1]-1);
               if(processesCoordinates[1]<procsdistr[1]-1)
                   neighbors[Down]=getRankOfProcCoord(processesCoordinates[0],processesCoordinates[1]+1);
               if(processesCoordinates[0]>0 && processesCoordinates[1]>0)
                   neighbors[UpLeft]=getRankOfProcCoord(processesCoordinates[0]-1,processesCoordinates[1]-1);
               if(processesCoordinates[0]>0 && processesCoordinates[1]<procsdistr[1]-1)
                   neighbors[DownLeft]=getRankOfProcCoord(processesCoordinates[0]-1,processesCoordinates[1]+1);
               if(processesCoordinates[0]<procsdistr[0]-1 && processesCoordinates[1]>0)
                   neighbors[UpRight]=getRankOfProcCoord(processesCoordinates[0]+1,processesCoordinates[1]-1);
               if(processesCoordinates[0]<procsdistr[0]-1 && processesCoordinates[1]<procsdistr[1]-1)
                   neighbors[DownRight]=getRankOfProcCoord(processesCoordinates[0]+1,processesCoordinates[1]+1);
            if(subdomainCoordinates[0]>0)
                neighbors[Left]=getRankOfProcCoord(subdomainCoordinates[0]-1,subdomainCoordinates[1]);
            if(subdomainCoordinates[0]<domainDecomposition[0]-1)
                neighbors[Right]=getRankOfProcCoord(subdomainCoordinates[0]+1,subdomainCoordinates[1]);
            if(subdomainCoordinates[1]>0)
                neighbors[Up]=getRankOfProcCoord(subdomainCoordinates[0],subdomainCoordinates[1]-1);
            if(subdomainCoordinates[1]<domainDecomposition[1]-1)
                neighbors[Down]=getRankOfProcCoord(subdomainCoordinates[0],subdomainCoordinates[1]+1);
            if(subdomainCoordinates[0]>0 && subdomainCoordinates[1]>0)
                neighbors[UpLeft]=getRankOfProcCoord(subdomainCoordinates[0]-1,subdomainCoordinates[1]-1);
            if(subdomainCoordinates[0]>0 && subdomainCoordinates[1]<domainDecomposition[1]-1)
                neighbors[DownLeft]=getRankOfProcCoord(subdomainCoordinates[0]-1,subdomainCoordinates[1]+1);
            if(subdomainCoordinates[0]<domainDecomposition[0]-1 && subdomainCoordinates[1]>0)
                neighbors[UpRight]=getRankOfProcCoord(subdomainCoordinates[0]+1,subdomainCoordinates[1]-1);
            if(subdomainCoordinates[0]<domainDecomposition[0]-1 && subdomainCoordinates[1]<domainDecomposition[1]-1)
                neighbors[DownRight]=getRankOfProcCoord(subdomainCoordinates[0]+1,subdomainCoordinates[1]+1);

            localBegin=overlap;

@@ -185,12 +188,12 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
       
      String printProcessCoords()
      {
           return convertToString(processesCoordinates[0])+String("-")+convertToString(processesCoordinates[1]);
         return convertToString(subdomainCoordinates[0])+String("-")+convertToString(subdomainCoordinates[1]);
      };

      String printProcessDistr()
      {
           return convertToString(procsdistr[0])+String("-")+convertToString(procsdistr[1]);
         return convertToString(domainDecomposition[0])+String("-")+convertToString(domainDecomposition[1]);
      };  
       
      bool isDistributed()
@@ -237,11 +240,17 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
         return this->localBegin;
      }
       
      void writeProlog( Logger& logger )
      {
         logger.writeParameter( "Domain decomposition:", this->getDomainDecomposition() );
      }
       
        
   private : 
       
      int getRankOfProcCoord(int x, int y)
      {
            return y*procsdistr[0]+x;
         return y*domainDecomposition[0]+x;
      }
        
      PointType spaceSteps;
@@ -260,15 +269,12 @@ class DistributedMesh<Grid< 2, RealType, Device, Index >>
      int rank;
      int nproc;
        
        int procsdistr[2];
        CoordinatesType processesCoordinates;
      CoordinatesType domainDecomposition;
      CoordinatesType subdomainCoordinates;
      int numberOfLarger[2];
        
      int neighbors[8];

      bool isSet;
        

};

} // namespace DistributedMeshes
Loading