Commit dc659479 authored by Tomáš Jakubec's avatar Tomáš Jakubec
Browse files

Fix of recursive application of TNL::abs on vector of vectors.

parent b9f94597
Loading
Loading
Loading
Loading
+77 −1
Original line number Diff line number Diff line
@@ -237,7 +237,7 @@ class Array
      virtual String getSerializationTypeVirtual() const;

      /**
       * \brief Method for setting the array size.
       * \brief Method for setting the array size. Current data will be lost.
       *
       * If the array shares data with other arrays, the data is unbound. If the
       * current data is not shared and the current size is the same as the new
@@ -250,6 +250,50 @@ class Array
       */
      void setSize( Index size );

      /**
       * \brief Method for setting the array size with initializing value
       * applied to new elements.
       *
       * If the array shares data with other arrays, the data is unbound. If the
       * current data is not shared and the current size is the same as the new
       * one, nothing happens.
       *
       * If the array size changes, the current data will be deallocated, thus
       * all pointers and views to the array alements will become invalid.
       *
       * \param size The new size of the array.
       */
      void setSize( Index size , const Value& ini);

      /**
       * \brief Method for setting the array size.
       *
       * If the array shares data with other arrays, the data is unbound. If the
       * current data is not shared and the current size is the same as the new
       * one, nothing happens.
       *
       * If the array size changes, the current data will be deallocated, thus
       * all pointers and views to the array alements will become invalid.
       *
       * \param size The new size of the array.
       */
      void resize( Index size );

      /**
       * \brief Method for setting the array size with initializing value
       * applied to new elements.
       *
       * If the array shares data with other arrays, the data is unbound. If the
       * current data is not shared and the current size is the same as the new
       * one, nothing happens.
       *
       * If the array size changes, the current data will be deallocated, thus
       * all pointers and views to the array alements will become invalid.
       *
       * \param size The new size of the array.
       */
      void resize( Index size , const Value& ini);

      /**
       * \brief Returns the current array size.
       *
@@ -453,6 +497,38 @@ class Array
       */
      __cuda_callable__ Value& operator[]( const Index& i );

      /**
       * \brief Accesses the specified member of the \e i-th element of the array.
       *
       * This method can be called only from the device which has direct access
       * to the memory space where the array was allocated. For example, if the
       * array was allocated in the host memory, it can be called only from
       * host, and if the array was allocated in the device memory, it can be
       * called only from device kernels.
       *
       * \param i The index of the element to be accessed.
       * \return Constant reference to the \e i-th element.
       */
      template<unsigned int ... Indexes>
      //__cuda_callable__
      const auto& operator()( const Index& i, std::integer_sequence<unsigned int, Indexes...> ) const;

      /**
       * \brief Accesses the specified member of the \e i-th element of the array.
       *
       * This method can be called only from the device which has direct access
       * to the memory space where the array was allocated. For example, if the
       * array was allocated in the host memory, it can be called only from
       * host, and if the array was allocated in the device memory, it can be
       * called only from device kernels.
       *
       * \param i The index of the element to be accessed.
       * \return Reference to the \e i-th element.
       */
      template<unsigned int ... Indexes>
      //__cuda_callable__
      auto& operator()( const Index& i, std::integer_sequence<unsigned int, Indexes...> );

      /**
       * \brief Accesses the \e i-th element of the array.
       *
+89 −1
Original line number Diff line number Diff line
@@ -232,7 +232,7 @@ void
Array< Value, Device, Index, Allocator >::
setSize( Index size )
{
   TNL_ASSERT_GE( size, (Index) 0, "Array size must be non-negative." );
   TNL_ASSERT_GE( size, Index(0), "Array size must be non-negative." );

   if( this->size == size && allocationPointer && ! referenceCounter )
      return;
@@ -249,6 +249,61 @@ setSize( Index size )
   }
}

template< typename Value,
          typename Device,
          typename Index,
          typename Allocator >
void
Array< Value, Device, Index, Allocator >::
setSize( Index size , const Value& iniVal )
{
   IndexType prevSize = this->size;

   setSize(size);

   this->setValue(iniVal, prevSize, size);
}


template< typename Value,
          typename Device,
          typename Index,
          typename Allocator >
void
Array< Value, Device, Index, Allocator >::
resize( Index size )
{
   TNL_ASSERT_GE( size, Index(0), "Array size must be non-negative." );

   if( this->size == size && allocationPointer && ! referenceCounter )
      return;

   Array<Value, Device, Index, Allocator> newArray;
   newArray.setSize(size);

   Algorithms::MemoryOperations< Device >::copy(newArray.getData(), this->data, this->size);

   *this = std::move(newArray);
}

template< typename Value,
          typename Device,
          typename Index,
          typename Allocator >
void
Array< Value, Device, Index, Allocator >::
resize( Index size , const Value& iniVal )
{
   IndexType prevSize = this->size;

   resize(size);

   if (prevSize < size) {
       this->setValue(iniVal, prevSize, size);
   }
}


template< typename Value,
          typename Device,
          typename Index,
@@ -528,6 +583,39 @@ operator[]( const Index& i ) const
   return this->data[ i ];
}


template< typename Value,
          typename Device,
          typename Index,
          typename Allocator >
template< unsigned int ... Indexes>
//__cuda_callable__
auto&
Array< Value, Device, Index, Allocator >::
operator()( const Index& i , std::integer_sequence<unsigned int, Indexes...> )
{
   TNL_ASSERT_GE( i, Index(0), "Element index must be non-negative." );
   TNL_ASSERT_LT( i, this->getSize(), "Element index is out of bounds." );
   using std::get;
   return get<Indexes...>(this->data[ i ]);
}

template< typename Value,
          typename Device,
          typename Index,
          typename Allocator >
template< unsigned int ... Indexes>
//__cuda_callable__
const auto&
Array< Value, Device, Index, Allocator >::
operator()( const Index& i, std::integer_sequence<unsigned int, Indexes...> ) const
{
   TNL_ASSERT_GE( i, Index(0), "Element index must be non-negative." );
   TNL_ASSERT_LT( i, this->getSize(), "Element index is out of bounds." );
   using std::get;
   return get<Indexes...>(this->data[ i ]);
}

template< typename Value,
          typename Device,
          typename Index,
+1 −1
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ getView( const IndexType begin, IndexType end )
{
   if( end == 0 )
      end = this->getSize();
   return ViewType( getData() + begin, end - begin );;
   return ViewType( getData() + begin, end - begin );
}

template< typename Value,
+205 −3
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ namespace TNL {
namespace Containers {
namespace Expressions {

template< typename T1, typename T2 >
template< typename T1, typename T2 > // Proč nejsou šablonové argumenty až u funkce evaluate (mohly by být automaticky odvozeny)
struct Addition
{
   __cuda_callable__
@@ -54,6 +54,25 @@ struct Subtraction
   {
      return a - b;
   }

   __cuda_callable__
   static auto evaluate( const T1& a, T2&& b ) -> decltype( a - b )
   {
      return a - std::forward<T2>(b);
   }

   __cuda_callable__
   static auto evaluate( T1&& a, const T2& b ) -> decltype( a - b )
   {
      return std::forward<T1>(a) - b;
   }


   __cuda_callable__
   static auto evaluate( T1&& a, T2&& b ) -> decltype( a - b )
   {
      return std::forward<T1>(a) - std::forward<T1>(b);
   }
};

template< typename T1, typename T2 >
@@ -64,6 +83,25 @@ struct Multiplication
   {
      return a * b;
   }

   __cuda_callable__
   static auto evaluate( const T1& a, T2&& b ) -> decltype( a * b )
   {
      return a * std::forward<T2>(b);
   }

   __cuda_callable__
   static auto evaluate( T1&& a, const T2& b ) -> decltype( a * b )
   {
      return std::forward<T1>(a) * b;
   }


   __cuda_callable__
   static auto evaluate( T1&& a, T2&& b ) -> decltype( a * b )
   {
      return std::forward<T1>(a) * std::forward<T1>(b);
   }
};

template< typename T1, typename T2 >
@@ -74,6 +112,25 @@ struct Division
   {
      return a / b;
   }

   __cuda_callable__
   static auto evaluate( const T1& a, T2&& b ) -> decltype( a / b )
   {
      return a / std::forward<T2>(b);
   }

   __cuda_callable__
   static auto evaluate( T1&& a, const T2& b ) -> decltype( a / b )
   {
      return std::forward<T1>(a) / b;
   }


   __cuda_callable__
   static auto evaluate( T1&& a, T2&& b ) -> decltype( a / b )
   {
      return std::forward<T1>(a) / std::forward<T1>(b);
   }
};

template< typename T1, typename T2 >
@@ -104,16 +161,29 @@ struct Minus
   {
      return -a;
   }

   __cuda_callable__
   static T1 evaluate( T1&& a )
   {
      return -(std::forward<T1>(a));
   }
};

template< typename T1 >
struct Abs
{
   __cuda_callable__
   static auto evaluate( const T1& a ) -> decltype( TNL::abs( a ) )
   static auto evaluate( const T1& a ) -> T1
   {
      return TNL::abs( a );
      using TNL::abs;
      return abs( a );
   }
/*
   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::abs( a ) )
   {
      return TNL::abs(std::forward<T1>(a));
   }*/
};

template< typename T1, typename T2 >
@@ -124,6 +194,12 @@ struct Pow
   {
      return TNL::pow( a, exp );
   }

   __cuda_callable__
   static auto evaluate( T1&& a, const T2& exp ) -> decltype( TNL::pow( a, exp ) )
   {
      return TNL::pow( std::forward<T1>(a), exp );
   }
};

template< typename T1 >
@@ -134,6 +210,12 @@ struct Exp
   {
      return TNL::exp( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::exp( a ) )
   {
      return TNL::exp( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -144,6 +226,12 @@ struct Sqrt
   {
      return TNL::sqrt( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::sqrt( a ) )
   {
      return TNL::sqrt( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -154,6 +242,12 @@ struct Cbrt
   {
      return TNL::cbrt( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::cbrt( a ) )
   {
      return TNL::cbrt( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -164,6 +258,12 @@ struct Log
   {
      return TNL::log( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::log( a ) )
   {
      return TNL::log( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -174,6 +274,12 @@ struct Log10
   {
      return TNL::log10( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::log10( a ) )
   {
      return TNL::log10( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -184,6 +290,12 @@ struct Log2
   {
      return TNL::log2( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::log2( a ) )
   {
      return TNL::log2( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -194,6 +306,12 @@ struct Sin
   {
      return TNL::sin( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::sin( a ) )
   {
      return TNL::sin( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -204,6 +322,12 @@ struct Cos
   {
      return TNL::cos( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::cos( a ) )
   {
      return TNL::cos( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -214,6 +338,12 @@ struct Tan
   {
      return TNL::tan( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::tan( a ) )
   {
      return TNL::tan( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -224,6 +354,12 @@ struct Asin
   {
      return TNL::asin( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::asin( a ) )
   {
      return TNL::asin( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -234,6 +370,12 @@ struct Acos
   {
      return TNL::acos( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::acos( a ) )
   {
      return TNL::acos( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -244,6 +386,12 @@ struct Atan
   {
      return TNL::atan( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::atan( a ) )
   {
      return TNL::atan( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -254,6 +402,12 @@ struct Sinh
   {
      return TNL::sinh( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::sinh( a ) )
   {
      return TNL::sinh( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -264,6 +418,12 @@ struct Cosh
   {
      return TNL::cosh( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::cosh( a ) )
   {
      return TNL::cosh( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -274,6 +434,12 @@ struct Tanh
   {
      return TNL::tanh( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::tanh( a ) )
   {
      return TNL::tanh( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -284,6 +450,12 @@ struct Asinh
   {
      return TNL::asinh( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::asinh( a ) )
   {
      return TNL::asinh( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -294,6 +466,12 @@ struct Acosh
   {
      return TNL::acosh( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::acosh( a ) )
   {
      return TNL::acosh( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -304,6 +482,12 @@ struct Atanh
   {
      return TNL::atanh( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::atanh( a ) )
   {
      return TNL::atanh( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -314,6 +498,12 @@ struct Floor
   {
      return TNL::floor( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::floor( a ) )
   {
      return TNL::floor( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -324,6 +514,12 @@ struct Ceil
   {
      return TNL::ceil( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::ceil( a ) )
   {
      return TNL::ceil( std::forward<T1>(a) );
   }
};

template< typename T1 >
@@ -334,6 +530,12 @@ struct Sign
   {
      return TNL::sign( a );
   }

   __cuda_callable__
   static auto evaluate( T1&& a ) -> decltype( TNL::sign( a ) )
   {
      return TNL::sign( std::forward<T1>(a) );
   }
};

template< typename ResultType >
+5 −0
Original line number Diff line number Diff line
@@ -407,6 +407,7 @@ dot( const Containers::StaticVector< Size, Real1 >& a, const Containers::StaticV
   return (a, b);
}

namespace Containers {

////
// Abs
@@ -418,6 +419,10 @@ abs( const Containers::StaticVector< Size, Real >& a )
   return Containers::Expressions::StaticUnaryExpressionTemplate< Containers::StaticVector< Size, Real >, Containers::Expressions::Abs >( a );
}

}
using Containers::abs;


////
// Power
template< int Size, typename Real, typename ExpType >
Loading