Commit 2d93d568 authored by Jakub Klinkovský's avatar Jakub Klinkovský Committed by Tomáš Oberhuber
Browse files

Removed TNL::is_same and tweaked asserts in Array and ArrayView

parent 603b5edf
Loading
Loading
Loading
Loading
+0 −19
Original line number Diff line number Diff line
@@ -124,25 +124,6 @@
#include <TNL/Debugging/StackBacktrace.h>

namespace TNL {

   // This is alternative implementation of is_same because std::is_same
   // does not work in CUDA device code ("std::integral_constant<bool, (bool)0> ::value").
   // This can be removed when std::_is_same works well.
   //
   template< typename T1, typename T2 >
   struct is_same
   {
      __cuda_callable__
      static constexpr bool value() { return false; }
   };

   template< typename T1 >
   struct is_same< T1, T1 >
   {
      __cuda_callable__
      static constexpr bool value() { return true; }
   };

/**
 * \brief Internal namespace for helper classes used in the TNL_ASSERT_* macros.
 */
+5 −7
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@ Array( const IndexType& size, const Value& value, const AllocatorType& allocator
: allocator( allocator )
{
   this->setSize( size );
   ( *this ) = value;
   *this = value;
}

template< typename Value,
@@ -522,10 +522,9 @@ Array< Value, Device, Index, Allocator >::
operator[]( const Index& i )
{
#ifdef __CUDA_ARCH__
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Cuda >::value() ), "Attempt to access data not allocated on CUDA device from CUDA device." );
   TNL_ASSERT_TRUE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on CUDA device from CUDA device." );
#else
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Host >::value() || TNL::is_same< Device, Devices::Sequential >::value() ),
               "Attempt to access data not allocated on the host from the host." );
   TNL_ASSERT_FALSE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on the host from the host." );
#endif
   TNL_ASSERT_GE( i, (Index) 0, "Element index must be non-negative." );
   TNL_ASSERT_LT( i, this->getSize(), "Element index is out of bounds." );
@@ -542,10 +541,9 @@ Array< Value, Device, Index, Allocator >::
operator[]( const Index& i ) const
{
#ifdef __CUDA_ARCH__
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Cuda >::value() ), "Attempt to access data not allocated on CUDA device from CUDA device." );
   TNL_ASSERT_TRUE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on CUDA device from CUDA device." );
#else
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Host >::value() || TNL::is_same< Device, Devices::Sequential >::value() ),
               "Attempt to access data not allocated on the host from the host." );
   TNL_ASSERT_FALSE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on the host from the host." );
#endif
   TNL_ASSERT_GE( i, (Index) 0, "Element index must be non-negative." );
   TNL_ASSERT_LT( i, this->getSize(), "Element index is out of bounds." );
+4 −6
Original line number Diff line number Diff line
@@ -253,10 +253,9 @@ Value& ArrayView< Value, Device, Index >::
operator[]( Index i )
{
#ifdef __CUDA_ARCH__
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Cuda >::value() ), "Attempt to access data not allocated on CUDA device from CUDA device." );
   TNL_ASSERT_TRUE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on CUDA device from CUDA device." );
#else
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Host >::value() || TNL::is_same< Device, Devices::Sequential >::value() ),
               "Attempt to access data not allocated on the host from the host." );
   TNL_ASSERT_FALSE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on the host from the host." );
#endif
   TNL_ASSERT_GE( i, 0, "Element index must be non-negative." );
   TNL_ASSERT_LT( i, this->getSize(), "Element index is out of bounds." );
@@ -272,10 +271,9 @@ Value& ArrayView< Value, Device, Index >::
operator[]( Index i ) const
{
#ifdef __CUDA_ARCH__
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Cuda >::value() ), "Attempt to access data not allocated on CUDA device from CUDA device." );
   TNL_ASSERT_TRUE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on CUDA device from CUDA device." );
#else
   TNL_ASSERT_TRUE( ( TNL::is_same< Device, Devices::Host >::value() || TNL::is_same< Device, Devices::Sequential >::value() ),
               "Attempt to access data not allocated on the host from the host." );
   TNL_ASSERT_FALSE( (std::is_same< Device, Devices::Cuda >{}()), "Attempt to access data not allocated on the host from the host." );
#endif
   TNL_ASSERT_GE( i, 0, "Element index must be non-negative." );
   TNL_ASSERT_LT( i, this->getSize(), "Element index is out of bounds." );
+2 −2

File changed.

Contains only whitespace changes.

+2 −2

File changed.

Contains only whitespace changes.