Commit 6737734b authored by Jakub Klinkovský's avatar Jakub Klinkovský
Browse files

Merge branch 'JK/memory-helpers' into 'develop'

Replacing deprecated memory helpers

See merge request !70
parents 569f089b 3abe3c55
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ struct Cuda
   {
#ifdef HAVE_CUDA
      TNL_CHECK_CUDA_DEVICE;
      cudaFree( ptr );
      cudaFree( (void*) ptr );
      TNL_CHECK_CUDA_DEVICE;
#else
      throw Exceptions::CudaSupportMissing();
+1 −1
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ struct CudaHost
   {
#ifdef HAVE_CUDA
      TNL_CHECK_CUDA_DEVICE;
      cudaFreeHost( ptr );
      cudaFreeHost( (void*) ptr );
      TNL_CHECK_CUDA_DEVICE;
#else
      throw Exceptions::CudaSupportMissing();
+1 −1
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ struct CudaManaged
   {
#ifdef HAVE_CUDA
      TNL_CHECK_CUDA_DEVICE;
      cudaFree( ptr );
      cudaFree( (void*) ptr );
      TNL_CHECK_CUDA_DEVICE;
#else
      throw Exceptions::CudaSupportMissing();
+2 −45
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@ namespace TNL {
namespace Cuda {

template< typename ObjectType >
[[deprecated("Allocators and MemoryOperations hould be used instead.")]]
[[deprecated("Allocators::Cuda and MultiDeviceMemoryOperations should be used instead.")]]
ObjectType* passToDevice( const ObjectType& object )
{
#ifdef HAVE_CUDA
@@ -45,40 +45,7 @@ ObjectType* passToDevice( const ObjectType& object )
}

template< typename ObjectType >
[[deprecated("Allocators and MemoryOperations hould be used instead.")]]
ObjectType passFromDevice( const ObjectType* object )
{
#ifdef HAVE_CUDA
   ObjectType aux;
   cudaMemcpy( ( void* ) aux,
               ( void* ) &object,
               sizeof( ObjectType ),
               cudaMemcpyDeviceToHost );
   TNL_CHECK_CUDA_DEVICE;
   return aux;
#else
   throw Exceptions::CudaSupportMissing();
#endif
}

template< typename ObjectType >
[[deprecated("Allocators and MemoryOperations hould be used instead.")]]
void passFromDevice( const ObjectType* deviceObject,
                     ObjectType& hostObject )
{
#ifdef HAVE_CUDA
   cudaMemcpy( ( void* ) &hostObject,
               ( void* ) deviceObject,
               sizeof( ObjectType ),
               cudaMemcpyDeviceToHost );
   TNL_CHECK_CUDA_DEVICE;
#else
   throw Exceptions::CudaSupportMissing();
#endif
}

template< typename ObjectType >
[[deprecated("Allocators and MemoryOperations hould be used instead.")]]
[[deprecated("Allocators::Cuda should be used instead.")]]
void freeFromDevice( ObjectType* deviceObject )
{
#ifdef HAVE_CUDA
@@ -89,15 +56,5 @@ void freeFromDevice( ObjectType* deviceObject )
#endif
}

template< typename ObjectType >
void print( const ObjectType* deviceObject, std::ostream& str = std::cout )
{
#ifdef HAVE_CUDA
   ObjectType hostObject;
   passFromDevice( deviceObject, hostObject );
   str << hostObject;
#endif
}

} // namespace Cuda
} // namespace TNL
+13 −8
Original line number Diff line number Diff line
@@ -11,7 +11,8 @@
#pragma once

#include <TNL/Devices/Cuda.h>
#include <TNL/Cuda/MemoryHelpers.h>
#include <TNL/Allocators/Cuda.h>
#include <TNL/Algorithms/MultiDeviceMemoryOperations.h>

#include <TNL/Functions/Analytic/Constant.h>
#include <TNL/Functions/Analytic/ExpBump.h>
@@ -139,7 +140,8 @@ setupFunction( const Config::ParameterContainer& parameters,
   }
   if( std::is_same< Device, Devices::Cuda >::value )
   {
      this->function = Cuda::passToDevice( *auxFunction );
      this->function = Allocators::Cuda< FunctionType >{}.allocate( 1 );
      Algorithms::MultiDeviceMemoryOperations< Devices::Cuda, Devices::Host >::copy( (FunctionType*) this->function, (FunctionType*) auxFunction, 1 );
      delete auxFunction;
      TNL_CHECK_CUDA_DEVICE;
   }
@@ -168,7 +170,8 @@ setupOperator( const Config::ParameterContainer& parameters,
   }
   if( std::is_same< Device, Devices::Cuda >::value )
   {
      this->operator_ = Cuda::passToDevice( *auxOperator );
      this->operator_ = Allocators::Cuda< OperatorType >{}.allocate( 1 );
      Algorithms::MultiDeviceMemoryOperations< Devices::Cuda, Devices::Host >::copy( (OperatorType*) this->operator_, (OperatorType*) auxOperator, 1 );
      delete auxOperator;
      TNL_CHECK_CUDA_DEVICE;
   }
@@ -738,7 +741,7 @@ deleteFunction()
   if( std::is_same< Device, Devices::Cuda >::value )
   {
      if( function )
         Cuda::freeFromDevice( ( FunctionType * ) function );
         Allocators::Cuda< FunctionType >{}.deallocate( (FunctionType*) function, 1 );
   }
}

@@ -758,7 +761,7 @@ deleteOperator()
   if( std::is_same< Device, Devices::Cuda >::value )
   {
      if( operator_ )
         Cuda::freeFromDevice( ( OperatorType * ) operator_ );
         Allocators::Cuda< OperatorType >{}.deallocate( (OperatorType*) operator_, 1 );
   }
}

@@ -906,15 +909,17 @@ std::ostream&
TestFunction< FunctionDimension, Real, Device >::
printFunction( std::ostream& str ) const
{
   FunctionType* f = ( FunctionType* ) this->function;
   if( std::is_same< Device, Devices::Host >::value )
   {
      FunctionType* f = ( FunctionType* ) this->function;
      str << *f;
      return str;
   }
   if( std::is_same< Device, Devices::Cuda >::value )
   {
      Cuda::print( f, str );
      FunctionType f;
      Algorithms::MultiDeviceMemoryOperations< Devices::Host, Devices::Cuda >::copy( &f, (FunctionType*) this->function, 1 );
      str << f;
      return str;
   }
}
Loading