Commit 053bc398 authored by Yury Hayeu's avatar Yury Hayeu
Browse files

Implement heat equation solver

parent 8a23431f
Loading
Loading
Loading
Loading
+8 −1
Original line number Diff line number Diff line
@@ -12,7 +12,11 @@ if (${BUILD_CUDA})
   STRING(REGEX REPLACE "DIMENSION_VALUE" ${DIMENSION} TEMPLATE_CONTENT "${TEMPLATE_CONTENT}")
   STRING(REGEX REPLACE "KERNEL_VALUE" "\"../${KERNEL_HEADER}\"" TEMPLATE_CONTENT "${TEMPLATE_CONTENT}")

   FILE(READ ${SOURCE_FILE} SOURCE_FILE_CONTENT)

   if ( NOT "${SOURCE_FILE_CONTENT}" STREQUAL "${TEMPLATE_CONTENT}" )
      FILE(WRITE ${SOURCE_FILE} "${TEMPLATE_CONTENT}")
   endif()

   SET(EXECUTABLE_NAME "${PREFIX}_${DIMENSION}_${MODULE_NAME}_${TEMPLATE_NAME}")

@@ -63,3 +67,6 @@ GENERATE_CUDA_EXECUTABLE("ImageConvolution" 2 "templates/main_image_solver.h" "k
GENERATE_CUDA_EXECUTABLE("ImageConvolution" 2 "templates/main_image_solver.h" "kernels/sharedData.h")
GENERATE_CUDA_EXECUTABLE("ImageConvolution" 2 "templates/main_image_solver.h" "kernels/sharedKernel.h")
GENERATE_CUDA_EXECUTABLE("ImageConvolution" 2 "templates/main_image_solver.h" "kernels/sharedDataAndKernel.h")

GENERATE_CUDA_EXECUTABLE("HeatEquation" 2 "templates/main_heat_equation_solver.h" "kernels/naive.h")
GENERATE_CUDA_EXECUTABLE("HeatEquation" 2 "templates/main_heat_equation_solver.h" "kernels/sharedDataAndKernel.h")
+6 −6
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ public:
   using ConvolutionLauncher = Convolution< Dimension, Device >;

   static void
   exec( const Vector& dimensions, const Vector& kernelSize, ConstDataStore& input, DataStore& result, ConstDataStore& kernel )
   exec( const Vector& dimensions, const Vector& kernelSize, ConstDataStore& input, DataStore& result, ConstDataStore& kernel, int boundaryValue = 1 )
   {
      auto fetchData = [ = ] __cuda_callable__( Index i )
      {
@@ -48,7 +48,7 @@ public:

      auto fetchBoundary = [ = ] __cuda_callable__( Index i )
      {
         return 1;
         return boundaryValue;
      };

      auto fetchKernel = [ = ] __cuda_callable__( Index i )
@@ -88,7 +88,7 @@ public:
   using ConvolutionLauncher = Convolution< Dimension, Device >;

   static void
   exec( const Vector& dimensions, const Vector& kernelSize, ConstDataStore& input, DataStore& result, ConstDataStore& kernel )
   exec( const Vector& dimensions, const Vector& kernelSize, ConstDataStore& input, DataStore& result, ConstDataStore& kernel, int boundaryValue = 1 )
   {
      auto fetchData = [ = ] __cuda_callable__( Index i, Index j )
      {
@@ -99,7 +99,7 @@ public:

      auto fetchBoundary = [ = ] __cuda_callable__( Index i, Index j )
      {
         return 1;
         return boundaryValue;
      };

      auto fetchKernel = [ = ] __cuda_callable__( Index i, Index j )
@@ -143,7 +143,7 @@ public:
   using ConvolutionLauncher = Convolution< Dimension, Device >;

   static void
   exec( const Vector& dimensions, const Vector& kernelSize, ConstDataStore& input, DataStore& result, ConstDataStore& kernel )
   exec( const Vector& dimensions, const Vector& kernelSize, ConstDataStore& input, DataStore& result, ConstDataStore& kernel, int boundaryValue = 1 )
   {
      auto fetchData = [ = ] __cuda_callable__( Index i, Index j, Index k )
      {
@@ -154,7 +154,7 @@ public:

      auto fetchBoundary = [ = ] __cuda_callable__( Index i, Index j, Index k )
      {
         return 1;
         return boundaryValue;
      };

      auto fetchKernel = [ = ] __cuda_callable__( Index i, Index j, Index k )
+189 −0
Original line number Diff line number Diff line

#pragma once

#include "Solver.h"
#include "DummyTask.h"

#include <TNL/Algorithms/ParallelFor.h>
#include <TNL/Timer.h>

static std::vector< TNL::String > dimensionIds = { "grid-x-size", "grid-y-size" };
static std::vector< TNL::String > kernelSizeIds = { "kernel-x-size", "kernel-y-size" };
static std::vector< TNL::String > domainIds = { "domain-x-size", "domain-y-size" };
static std::string sigmaKey = "sigma";
static std::string timestepKey = "timeStep";
static std::string finalTimeKey = "finalTime";
static std::string outputFilenamePrefix = "outputFilenamePrefix";

template< typename Real = double >
class HeatEquationSolver : public Solver< 2, TNL::Devices::Cuda >
{
public:
   constexpr static int Dimension = 2;
   using Device = TNL::Devices::Cuda;

   using Base = Solver< Dimension, Device >;
   using Vector = TNL::Containers::StaticVector< Dimension, int >;
   using Point = TNL::Containers::StaticVector< Dimension, Real >;
   using DataStore = TNL::Containers::Vector< Real, Device, int >;
   using HostDataStore = TNL::Containers::Vector< Real, TNL::Devices::Host, int >;

   virtual void
   start( const TNL::Config::ParameterContainer& parameters ) const override
   {
      int gridXSize = parameters.getParameter< int >( dimensionIds[ 0 ] );
      int gridYSize = parameters.getParameter< int >( dimensionIds[ 1 ] );

      int kernelXSize = parameters.getParameter< int >( kernelSizeIds[ 0 ] );
      int kernelYSize = parameters.getParameter< int >( kernelSizeIds[ 1 ] );

      Real xDomainSize = parameters.getParameter< Real >( domainIds[ 0 ] );
      Real yDomainSize = parameters.getParameter< Real >( domainIds[ 1 ] );

      Real hx = xDomainSize / (Real) gridXSize;
      Real hy = yDomainSize / (Real) gridYSize;

      Point domain = { xDomainSize, yDomainSize };
      Point spaceSteps = { hx, hy };

      Vector dimensions = { gridXSize, gridYSize };
      Vector kernelSize = { kernelXSize, kernelYSize };

      DataStore function = prepareFunction( parameters, dimensions, domain, spaceSteps );

      auto filenamePrefix = parameters.getParameter< TNL::String >( outputFilenamePrefix );
      auto initialFilename = filenamePrefix + "_initial.txt";

      if( ! writeGNUPlot( initialFilename, dimensions, spaceSteps, domain, function.getConstView() ) ) {
         std::cout << "Did fail during file write";
         return;
      }

      DataStore result;

      result.setLike( function );
      result = 0;

      auto finalTime = parameters.getParameter< Real >( finalTimeKey );

      convolve( dimensions, domain, spaceSteps, kernelSize, function.getConstView(), result.getView(), finalTime );

      auto finalFilename = filenamePrefix + "_final.txt";

      if( ! writeGNUPlot( finalFilename, dimensions, spaceSteps, domain, result.getConstView() ) ) {
         std::cout << "Did fail during file write";
         return;
      }
   }

   virtual TNL::Config::ConfigDescription
   makeInputConfig() const override
   {
      TNL::Config::ConfigDescription config = Base::makeInputConfig();

      config.addDelimiter( "Grid settings:" );
      config.addEntry< int >( dimensionIds[ 0 ], "Grid size along x-axis.", 100 );
      config.addEntry< int >( dimensionIds[ 1 ], "Grid size along y-axis.", 100 );

      config.addDelimiter( "Kernel settings:" );
      config.addEntry< int >( kernelSizeIds[ 0 ], "Kernel size along x-axis.", 3 );
      config.addEntry< int >( kernelSizeIds[ 1 ], "Kernel size along y-axis.", 3 );

      config.addDelimiter( "Problem settings:" );
      config.addEntry< TNL::String >( outputFilenamePrefix, "The prefix in name of the output file", "data" );

      config.addEntry< Real >( domainIds[ 0 ], "Domain size along x-axis.", 4.0 );
      config.addEntry< Real >( domainIds[ 1 ], "Domain size along y-axis.", 4.0 );

      config.addEntry< Real >( sigmaKey, "Sigma in exponential initial condition.", 0.5);

      config.addEntry< Real >( finalTimeKey, "Final time of the simulation.", 0.12);

      return config;
   }

   DataStore
   prepareFunction( const TNL::Config::ParameterContainer& parameters,
                    const Vector& dimensions,
                    const Point& domain,
                    const Point& spaceSteps ) const
   {
      DataStore function;

      function.resize( dimensions.x() * dimensions.y() );

      auto functionView = function.getView();

      auto xDomainSize = parameters.getParameter< Real >( domainIds[ 0 ] );
      auto yDomainSize = parameters.getParameter< Real >( domainIds[ 1 ] );
      auto sigma = parameters.getParameter< Real >( sigmaKey );

      auto init = [ = ] __cuda_callable__( int i, int j ) mutable
      {
         auto index = j * dimensions.x() + i;

         auto x = i * spaceSteps.x() - domain.x() / 2.;
         auto y = j * spaceSteps.y() - domain.y() / 2.;

         functionView[ index ] = exp( sigma * ( x * x + y * y ) );
      };

      TNL::Algorithms::ParallelFor2D< Device >::exec( 0, 0, dimensions.x(), dimensions.y(), init );

      return function;
   }

   void
   convolve( const Vector& dimensions,
             const Point& domain,
             const Point& spaceSteps,
             const Vector& kernelSize,
             typename DataStore::ConstViewType input,
             typename DataStore::ViewType result,
             const Real time ) const
   {
      HostDataStore kernel;

      kernel.resize( kernelSize.x() * kernelSize.y() );

      for( int j = 0; j < kernelSize.y(); j++ ) {
         for( int i = 0; i < kernelSize.x(); i++ ) {
            int index = i + j * kernelSize.x();

            auto x = i * spaceSteps.x() - domain.x() / 2.;
            auto y = j * spaceSteps.y() - domain.y() / 2.;

            kernel[ index ] = ( 1. / ( 4. * M_PI * time ) ) * exp( -( x * x + y * y ) / ( 4. * time ) );
         }
      }

      std::cout << kernel << std::endl;

      DataStore kernelDevice( kernel );

      auto kernelView = kernelDevice.getConstView();

      DummyTask< int, Real, Dimension, Device >::exec( dimensions, kernelSize, input, result, kernelView, 0);
   }

   bool
   writeGNUPlot( const std::string& filename,
                 const Vector& dimensions,
                 const Point& spaceSteps,
                 const Point& domain,
                 const typename DataStore::ConstViewType& map ) const
   {
      std::ofstream out( filename, std::ios::out );

      if( ! out.is_open() )
         return false;

      for( int j = 0; j < dimensions.y(); j++ )
         for( int i = 0; i < dimensions.x(); i++ )
            out << i * spaceSteps.x() - domain.x() / 2. << " "
                << j * spaceSteps.y() - domain.y() / 2. << " "
                << map.getElement( j * dimensions.x() + i ) << std::endl;

      return out.good();
   }
};
+32 −46
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
#include <TNL/Images/RegionOfInterest.h>

static std::vector< TNL::String > dimensionIds = { "x-dimension", "y-dimension", "z-dimension" };
static std::vector< TNL::String > kernelSizeIds = { "x-kernel-size", "y-kernel-size", "z-kernel-size" };
static std::vector< TNL::String > kernelSizeIds = { "kernel-x-size", "kernel-y-size", "kernel-z-size" };
static std::vector< TNL::String > kernels = { "identity",        "gauss3x3",      "gauss5x5",
                                              "sobelHorizontal", "sobelVertical", "edgeDetection" };

@@ -49,6 +49,30 @@ public:
         return;
   }

   virtual TNL::Config::ConfigDescription
   makeInputConfig() const override
   {
      TNL::Config::ConfigDescription config = Base::makeInputConfig();

      config.addDelimiter( "Image settings:" );

      config.addEntry< TNL::String >( "input", "PNG image" );
      config.addEntry< TNL::String >( "output", "PNG image" );
      config.addEntry< TNL::String >( "kernel", "A kernel to apply", kernels[ 0 ] );

      for( const auto& kernel : kernels )
         config.addEntryEnum( kernel );

      config.addDelimiter( "Roi settings:" );

      config.addEntry< int >( "roi-top", "Top (smaller number) line of the region of interest.", -1 );
      config.addEntry< int >( "roi-bottom", "Bottom (larger number) line of the region of interest.", -1 );
      config.addEntry< int >( "roi-left", "Left (smaller number) column of the region of interest.", -1 );
      config.addEntry< int >( "roi-right", "Right (larger number) column of the region of interest.", -1 );

      return config;
   }

   template< typename Image >
   bool
   readImage( const TNL::Config::ParameterContainer& parameters,
@@ -164,17 +188,13 @@ public:
      if( kernel == "identity" ) {
         kernelDimension = { 3, 3 };

         return { 0, 0, 0,
                  0, 1, 0,
                  0, 0, 0 };
         return { 0, 0, 0, 0, 1, 0, 0, 0, 0 };
      }

      if( kernel == "gauss3x3" ) {
         kernelDimension = { 3, 3 };

         HostDataStore kernel = { 1, 2, 1,
                                  2, 4, 2,
                                  1, 2, 1 };
         HostDataStore kernel = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };

         kernel /= 16;

@@ -184,11 +204,7 @@ public:
      if( kernel == "gauss5x5" ) {
         kernelDimension = { 5, 5 };

        HostDataStore kernel = { 1, 4, 7, 4, 1,
                                 4, 16, 26, 16, 4,
                                 7, 26, 41, 26, 7,
                                 4, 16, 26, 16, 4,
                                 1, 4, 7, 4, 1 };
         HostDataStore kernel = { 1, 4, 7, 4, 1, 4, 16, 26, 16, 4, 7, 26, 41, 26, 7, 4, 16, 26, 16, 4, 1, 4, 7, 4, 1 };

         kernel /= 273;

@@ -198,25 +214,19 @@ public:
      if( kernel == "sobelHorizontal" ) {
         kernelDimension = { 3, 3 };

         return { 1, 2, 1,
                  0, 0, 0,
                  -1, -2, -1 };
         return { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
      }

      if( kernel == "sobelVertical" ) {
         kernelDimension = { 3, 3 };

         return { 1, 0, -1,
                  2, 0, -2,
                  1, 0, -1 };
         return { 1, 0, -1, 2, 0, -2, 1, 0, -1 };
      }

      if( kernel == "edgeDetection" ) {
         kernelDimension = { 3, 3 };

         return { -1, -1, -1,
                  -1, 8, -1,
                  -1, -1, -1 };
         return { -1, -1, -1, -1, 8, -1, -1, -1, -1 };
      }

      std::cout << "Unknown kernel " << kernel << ". Exit" << std::endl;
@@ -232,28 +242,4 @@ public:
   {
      DummyTask< int, float, Dimension, Device >::exec( imageDimension, kernelDimension, image, result, kernel );
   }

   virtual TNL::Config::ConfigDescription
   makeInputConfig() const override
   {
      TNL::Config::ConfigDescription config = Base::makeInputConfig();

      config.addDelimiter( "Image settings:" );

      config.addEntry< TNL::String >( "input", "PNG image" );
      config.addEntry< TNL::String >( "output", "PNG image" );
      config.addEntry< TNL::String >( "kernel", "A kernel to apply", kernels[ 0 ] );

      for( const auto& kernel : kernels )
         config.addEntryEnum( kernel);

      config.addDelimiter( "Roi settings:" );

      config.addEntry< int >( "roi-top", "Top (smaller number) line of the region of interest.", -1 );
      config.addEntry< int >( "roi-bottom", "Bottom (larger number) line of the region of interest.", -1 );
      config.addEntry< int >( "roi-left", "Left (smaller number) column of the region of interest.", -1 );
      config.addEntry< int >( "roi-right", "Right (larger number) column of the region of interest.", -1 );

      return config;
   }
};
+26 −0
Original line number Diff line number Diff line

#define KERNEL KERNEL_VALUE
#define DIMENSION DIMENSION_VALUE

#include KERNEL
#include "../support/HeatEquationSolver.h"

#include <TNL/Config/parseCommandLine.h>

using TaskSolver = HeatEquationSolver<>;

int main(int argc, char* argv[])
{
   TaskSolver solver;

   auto config = solver.makeInputConfig();

   TNL::Config::ParameterContainer parameters;

   if( ! parseCommandLine( argc, argv, config, parameters ) )
      return EXIT_FAILURE;

   solver.solve( parameters );

   return 0;
}