Skip to content
Snippets Groups Projects
ArrayOperationsHost.hpp 5.31 KiB
Newer Older
  • Learn to ignore specific revisions
  • /***************************************************************************
    
    Jakub Klinkovský's avatar
    Jakub Klinkovský committed
                              ArrayOperationsHost.hpp  -  description
    
                                 -------------------
        begin                : Jul 16, 2013
        copyright            : (C) 2013 by Tomas Oberhuber
        email                : tomas.oberhuber@fjfi.cvut.cz
     ***************************************************************************/
    
    /* See Copyright Notice in tnl/Copyright */
    
    
    #include <algorithm>  // std::copy, std::equal
    
    #include <TNL/ParallelFor.h>
    
    #include <TNL/Containers/Algorithms/ArrayOperations.h>
    
    #include <TNL/Containers/Algorithms/Reduction.h>
    
    namespace Containers {
    
    
    template< typename Element >
    
    void
    ArrayOperations< Devices::Host >::
    
    setElement( Element* data,
                const Element& value )
    
       TNL_ASSERT_TRUE( data, "Attempted to set data through a nullptr." );
    
    
    template< typename Element >
    
    Element
    ArrayOperations< Devices::Host >::
    
    getElement( const Element* data )
    
       TNL_ASSERT_TRUE( data, "Attempted to get data through a nullptr." );
    
    
    template< typename Element, typename Index >
    
    ArrayOperations< Devices::Host >::
    
    set( Element* data,
         const Element& value,
         const Index size )
    
       if( size == 0 ) return;
    
       TNL_ASSERT_TRUE( data, "Attempted to set data through a nullptr." );
       auto kernel = [data, value]( Index i )
       {
    
       };
       ParallelFor< Devices::Host >::exec( (Index) 0, size, kernel );
    
    }
    
    template< typename DestinationElement,
              typename SourceElement,
              typename Index >
    
    ArrayOperations< Devices::Host >::
    
    copy( DestinationElement* destination,
          const SourceElement* source,
          const Index size )
    
       if( size == 0 ) return;
    
       TNL_ASSERT_TRUE( destination, "Attempted to copy data to a nullptr." );
       TNL_ASSERT_TRUE( source, "Attempted to copy data from a nullptr." );
    
       // our ParallelFor version is faster than std::copy iff we use more than 1 thread
       if( Devices::Host::isOMPEnabled() && Devices::Host::getMaxThreadsCount() > 1 ) {
    
          auto kernel = [destination, source]( Index i )
          {
    
             destination[ i ] = source[ i ];
    
          };
          ParallelFor< Devices::Host >::exec( (Index) 0, size, kernel );
       }
    
       else {
          // std::copy usually uses std::memcpy for TriviallyCopyable types
          std::copy( source, source + size, destination );
       }
    
    template< typename DestinationElement,
    
    ArrayOperations< Devices::Host >::
    
    copyFromIterator( DestinationElement* destination,
                      Index destinationSize,
                      SourceIterator first,
                      SourceIterator last )
    
       ArrayOperations< void >::copyFromIterator( destination, destinationSize, first, last );
    
    template< typename DestinationElement,
              typename SourceElement,
              typename Index >
    
    bool
    ArrayOperations< Devices::Host >::
    
    compare( const DestinationElement* destination,
             const SourceElement* source,
             const Index size )
    
       if( size == 0 ) return true;
    
       TNL_ASSERT_TRUE( destination, "Attempted to compare data through a nullptr." );
       TNL_ASSERT_TRUE( source, "Attempted to compare data through a nullptr." );
    
    
       if( Devices::Host::isOMPEnabled() && Devices::Host::getMaxThreadsCount() > 1 ) {
          auto fetch = [destination, source] ( Index i ) -> bool { return destination[ i ] == source[ i ]; };
          return Reduction< Devices::Host >::reduce( size, std::logical_and<>{}, fetch, true );
       }
       else {
          // sequential algorithm can return as soon as it finds a mismatch
          return std::equal( source, source + size, destination );
    
    template< typename Element,
              typename Index >
    bool
    ArrayOperations< Devices::Host >::
    containsValue( const Element* data,
                   const Index size,
                   const Element& value )
    {
    
       if( size == 0 ) return false;
    
       TNL_ASSERT_TRUE( data, "Attempted to check data through a nullptr." );
       TNL_ASSERT_GE( size, 0, "" );
    
       if( Devices::Host::isOMPEnabled() && Devices::Host::getMaxThreadsCount() > 1 ) {
          auto fetch = [=] ( Index i ) -> bool { return data[ i ] == value; };
          return Reduction< Devices::Host >::reduce( size, std::logical_or<>{}, fetch, false );
       }
       else {
          // sequential algorithm can return as soon as it finds a match
          return ArrayOperations< void >::containsValue( data, size, value );
       }
    
    template< typename Element,
              typename Index >
    bool
    ArrayOperations< Devices::Host >::
    
    containsOnlyValue( const Element* data,
                       const Index size,
                       const Element& value )
    
       if( size == 0 ) return false;
    
       TNL_ASSERT_TRUE( data, "Attempted to check data through a nullptr." );
       TNL_ASSERT_GE( size, 0, "" );
    
       if( Devices::Host::isOMPEnabled() && Devices::Host::getMaxThreadsCount() > 1 ) {
          auto fetch = [data, value] ( Index i ) -> bool { return data[ i ] == value; };
          return Reduction< Devices::Host >::reduce( size, std::logical_and<>{}, fetch, true );
       }
       else {
          // sequential algorithm can return as soon as it finds a mismatch
          return ArrayOperations< void >::containsOnlyValue( data, size, value );
       }
    
    } // namespace Containers