Commit dbfa5d11 authored by Jakub Klinkovský's avatar Jakub Klinkovský

Serialization in TNL::File: File::save and File::load are specialized by...

Serialization in TNL::File: File::save and File::load are specialized by Allocator instead of Device
parent 399f9627
......@@ -17,7 +17,7 @@ int main()
*/
File file;
file.open( "file-example-cuda-test-file.tnl", std::ios_base::out | std::ios_base::trunc );
file.save< double, double, Devices::Host >( doubleArray, size );
file.save< double, double, Allocators::Host< double > >( doubleArray, size );
file.close();
/***
......@@ -31,7 +31,7 @@ int main()
* Read array from the file to device
*/
file.open( "file-example-cuda-test-file.tnl", std::ios_base::in );
file.load< double, double, Devices::Cuda >( deviceArray, size );
file.load< double, double, Allocators::Cuda< double > >( deviceArray, size );
file.close();
/***
......
......@@ -18,21 +18,21 @@ int main()
*/
File file;
file.open( "test-file.tnl", std::ios_base::out | std::ios_base::trunc );
file.save< double, float, Devices::Host >( doubleArray, size );
file.save< double, float >( doubleArray, size );
file.close();
/***
* Load the array of floats from the file.
*/
file.open( "test-file.tnl", std::ios_base::in );
file.load< float, float, Devices::Host >( floatArray, size );
file.load< float, float >( floatArray, size );
file.close();
/***
* Load the array of floats from the file and convert them to integers.
*/
file.open( "test-file.tnl", std::ios_base::in );
file.load< int, float, Devices::Host >( intArray, size );
file.load< int, float >( intArray, size );
file.close();
/***
......
......@@ -228,13 +228,13 @@ class Array
/**
* \brief Returns a \ref String representation of the array type in C++ style,
* where device is always \ref Devices::Host.
* with a placeholder in place of \e Device and \e Allocator.
*/
static String getSerializationType();
/**
* \brief Returns a \ref String representation of the array type in C++ style,
* where device is always \ref Devices::Host.
* with a placeholder in place of \e Device and \e Allocator.
*/
virtual String getSerializationTypeVirtual() const;
......
......@@ -759,7 +759,7 @@ std::ostream& operator<<( std::ostream& str, const Array< Value, Device, Index,
template< typename Value, typename Device, typename Index, typename Allocator >
File& operator<<( File& file, const Array< Value, Device, Index, Allocator >& array )
{
using IO = detail::ArrayIO< Value, Device, Index >;
using IO = detail::ArrayIO< Value, Index, Allocator >;
saveObjectType( file, IO::getSerializationType() );
const Index size = array.getSize();
file.save( &size );
......@@ -778,7 +778,7 @@ File& operator<<( File&& file, const Array< Value, Device, Index, Allocator >& a
template< typename Value, typename Device, typename Index, typename Allocator >
File& operator>>( File& file, Array< Value, Device, Index, Allocator >& array )
{
using IO = detail::ArrayIO< Value, Device, Index >;
using IO = detail::ArrayIO< Value, Index, Allocator >;
const String type = getObjectType( file );
if( type != IO::getSerializationType() )
throw Exceptions::FileDeserializationError( file.getFileName(), "object type does not match (expected " + IO::getSerializationType() + ", found " + type + ")." );
......
......@@ -19,6 +19,7 @@
#include <TNL/Algorithms/MultiDeviceMemoryOperations.h>
#include <TNL/Containers/detail/ArrayIO.h>
#include <TNL/Containers/detail/ArrayAssignment.h>
#include <TNL/Allocators/Default.h>
#include "ArrayView.h"
......@@ -383,7 +384,7 @@ load( const String& fileName )
template< typename Value, typename Device, typename Index >
File& operator<<( File& file, const ArrayView< Value, Device, Index > view )
{
using IO = detail::ArrayIO< Value, Device, Index >;
using IO = detail::ArrayIO< Value, Index, typename Allocators::Default< Device >::template Allocator< Value > >;
saveObjectType( file, IO::getSerializationType() );
const Index size = view.getSize();
file.save( &size );
......@@ -402,7 +403,7 @@ File& operator<<( File&& file, const ArrayView< Value, Device, Index > view )
template< typename Value, typename Device, typename Index >
File& operator>>( File& file, ArrayView< Value, Device, Index > view )
{
using IO = detail::ArrayIO< Value, Device, Index >;
using IO = detail::ArrayIO< Value, Index, typename Allocators::Default< Device >::template Allocator< Value > >;
const String type = getObjectType( file );
if( type != IO::getSerializationType() )
throw Exceptions::FileDeserializationError( file.getFileName(), "object type does not match (expected " + IO::getSerializationType() + ", found " + type + ")." );
......
......@@ -277,14 +277,14 @@ void StaticArray< Size, Value >::setValue( const ValueType& val )
template< int Size, typename Value >
bool StaticArray< Size, Value >::save( File& file ) const
{
file.save< Value, Value, Devices::Host >( data, Size );
file.save( data, Size );
return true;
}
template< int Size, typename Value >
bool StaticArray< Size, Value >::load( File& file)
{
file.load< Value, Value, Devices::Host >( data, Size );
file.load( data, Size );
return true;
}
......
......@@ -14,29 +14,29 @@
#include <TNL/Object.h>
#include <TNL/File.h>
#include <TNL/TypeInfo.h>
namespace TNL {
namespace Containers {
namespace detail {
template< typename Value,
typename Device,
typename Index,
typename Allocator,
bool Elementwise = std::is_base_of< Object, Value >::value >
struct ArrayIO
{};
template< typename Value,
typename Device,
typename Index >
struct ArrayIO< Value, Device, Index, true >
typename Index,
typename Allocator >
struct ArrayIO< Value, Index, Allocator, true >
{
static String getSerializationType()
{
return String( "Containers::Array< " ) +
TNL::getSerializationType< Value >() + ", " +
TNL::getSerializationType< Devices::Host >() + ", " +
TNL::getSerializationType< Index >() + " >";
TNL::getSerializationType< Value >() + ", [any_device], " +
TNL::getSerializationType< Index >() + ", [any_allocator] >";
}
static void save( File& file,
......@@ -73,16 +73,15 @@ struct ArrayIO< Value, Device, Index, true >
};
template< typename Value,
typename Device,
typename Index >
struct ArrayIO< Value, Device, Index, false >
typename Index,
typename Allocator >
struct ArrayIO< Value, Index, Allocator, false >
{
static String getSerializationType()
{
return String( "Containers::Array< " ) +
TNL::getSerializationType< Value >() + ", " +
TNL::getSerializationType< Devices::Host >() + ", " +
TNL::getSerializationType< Index >() + " >";
TNL::getSerializationType< Value >() + ", [any_device], " +
TNL::getSerializationType< Index >() + ", [any_allocator] >";
}
static void save( File& file,
......@@ -93,7 +92,7 @@ struct ArrayIO< Value, Device, Index, false >
return;
try
{
file.save< Value, Value, Device >( data, elements );
file.save< Value, Value, Allocator >( data, elements );
}
catch(...)
{
......@@ -109,7 +108,7 @@ struct ArrayIO< Value, Device, Index, false >
return;
try
{
file.load< Value, Value, Device >( data, elements );
file.load< Value, Value, Allocator >( data, elements );
}
catch(...)
{
......
......@@ -14,8 +14,8 @@
#include <type_traits>
#include <TNL/String.h>
#include <TNL/Devices/Host.h>
#include <TNL/Devices/Cuda.h>
#include <TNL/Allocators/Host.h>
#include <TNL/Allocators/Cuda.h>
namespace TNL {
......@@ -85,9 +85,9 @@ class File
/**
* \brief Method for loading data from the file.
*
* The data will be stored in \e buffer allocated on device given by the
* \e Device parameter. The data type of the buffer is given by the
* template parameter \e Type. The second template parameter
* The data will be stored in \e buffer which was allocated using the
* allocator of type \e Allocator. The data type of the buffer is given
* by the template parameter \e Type. The second template parameter
* \e SourceType defines the type of data in the source file. If both
* types are different, on-the-fly conversion takes place during the
* data loading.
......@@ -96,31 +96,31 @@ class File
*
* \tparam Type type of data to be loaded to the \e buffer.
* \tparam SourceType type of data stored on the file,
* \tparam Device device where the data are stored after reading. For example \ref Devices::Host or \ref Devices::Cuda.
* \tparam Allocator type of the allocator which was used to allocate \e buffer.
* \param buffer Pointer in memory where the elements are loaded and stored after reading.
* \param elements number of elements to be loaded from the file.
*
*
* The following example shows how to load data directly to GPU.
*
*
* \par Example
* \include FileExampleCuda.cpp
* \par Output
* \include FileExampleCuda.out
* The following example shows how to do on-the-fly data conversion.
*
*
* \par Example
* \include FileExampleSaveAndLoad.cpp
* \par Output
* \include FileExampleSaveAndLoad.out
*/
template< typename Type, typename SourceType = Type, typename Device = Devices::Host >
template< typename Type, typename SourceType = Type, typename Allocator = Allocators::Host< Type > >
void load( Type* buffer, std::streamsize elements = 1 );
/**
* \brief Method for saving data to the file.
*
* The data from the \e buffer (with type \e Type) allocated on the device
* \e Device will be saved into the file. \e TargetType defines as what
* The data from the \e buffer (with type \e Type) which was allocated
* using an allocator of type \e Allocator. \e TargetType defines as what
* data type the buffer shall be saved. If the type is different from the
* data type, on-the-fly data type conversion takes place during the data
* saving.
......@@ -129,40 +129,44 @@ class File
*
* \tparam Type type of data in the \e buffer.
* \tparam TargetType tells as what type data the buffer shall be saved.
* \tparam Device device from where the data are loaded before writing into file. For example \ref Devices::Host or \ref Devices::Cuda.
* \tparam Allocator type of the allocator which was used to allocate \e buffer.
* \tparam Index type of index by which the elements are indexed.
* \param buffer buffer that is going to be saved to the file.
* \param elements number of elements saved to the file.
*
* See \ref File::load for examples.
*/
template< typename Type, typename TargetType = Type, typename Device = Devices::Host >
template< typename Type, typename TargetType = Type, typename Allocator = Allocators::Host< Type > >
void save( const Type* buffer, std::streamsize elements = 1 );
protected:
// implementation for all allocators which allocate data accessible from host
template< typename Type,
typename SourceType,
typename Device,
typename = typename std::enable_if< std::is_same< Device, Devices::Host >::value >::type >
typename Allocator,
typename = std::enable_if_t< ! std::is_same< Allocator, Allocators::Cuda< Type > >::value > >
void load_impl( Type* buffer, std::streamsize elements );
// implementation for \ref Allocators::Cuda
template< typename Type,
typename SourceType,
typename Device,
typename = typename std::enable_if< std::is_same< Device, Devices::Cuda >::value >::type,
typename Allocator,
typename = std::enable_if_t< std::is_same< Allocator, Allocators::Cuda< Type > >::value >,
typename = void >
void load_impl( Type* buffer, std::streamsize elements );
// implementation for all allocators which allocate data accessible from host
template< typename Type,
typename TargetType,
typename Device,
typename = typename std::enable_if< std::is_same< Device, Devices::Host >::value >::type >
typename Allocator,
typename = std::enable_if_t< ! std::is_same< Allocator, Allocators::Cuda< Type > >::value > >
void save_impl( const Type* buffer, std::streamsize elements );
// implementation for \ref Allocators::Cuda
template< typename Type,
typename TargetType,
typename Device,
typename = typename std::enable_if< std::is_same< Device, Devices::Cuda >::value >::type,
typename Allocator,
typename = std::enable_if_t< std::is_same< Allocator, Allocators::Cuda< Type > >::value >,
typename = void >
void save_impl( const Type* buffer, std::streamsize elements );
......
......@@ -80,21 +80,23 @@ inline void File::close()
template< typename Type,
typename SourceType,
typename Device >
typename Allocator >
void File::load( Type* buffer, std::streamsize elements )
{
static_assert( std::is_same< Type, typename Allocator::value_type >::value,
"Allocator::value_type must be the same as Type." );
TNL_ASSERT_GE( elements, 0, "Number of elements to load must be non-negative." );
if( ! elements )
return;
load_impl< Type, SourceType, Device >( buffer, elements );
load_impl< Type, SourceType, Allocator >( buffer, elements );
}
// Host
// Host allocators
template< typename Type,
typename SourceType,
typename Device,
typename Allocator,
typename >
void File::load_impl( Type* buffer, std::streamsize elements )
{
......@@ -117,10 +119,10 @@ void File::load_impl( Type* buffer, std::streamsize elements )
}
}
// Cuda
// Allocators::Cuda
template< typename Type,
typename SourceType,
typename Device,
typename Allocator,
typename, typename >
void File::load_impl( Type* buffer, std::streamsize elements )
{
......@@ -171,21 +173,23 @@ void File::load_impl( Type* buffer, std::streamsize elements )
template< typename Type,
typename TargetType,
typename Device >
typename Allocator >
void File::save( const Type* buffer, std::streamsize elements )
{
static_assert( std::is_same< Type, typename Allocator::value_type >::value,
"Allocator::value_type must be the same as Type." );
TNL_ASSERT_GE( elements, 0, "Number of elements to save must be non-negative." );
if( ! elements )
return;
save_impl< Type, TargetType, Device >( buffer, elements );
save_impl< Type, TargetType, Allocator >( buffer, elements );
}
// Host
// Host allocators
template< typename Type,
typename TargetType,
typename Device,
typename Allocator,
typename >
void File::save_impl( const Type* buffer, std::streamsize elements )
{
......@@ -209,10 +213,10 @@ void File::save_impl( const Type* buffer, std::streamsize elements )
}
}
// Cuda
// Allocators::Cuda
template< typename Type,
typename TargetType,
typename Device,
typename Allocator,
typename, typename >
void File::save_impl( const Type* buffer, std::streamsize elements )
{
......
......@@ -61,15 +61,15 @@ TEST( FileTest, WriteAndReadWithConversion )
int intData[ 3 ];
File file;
ASSERT_NO_THROW( file.open( TEST_FILE_NAME, std::ios_base::out | std::ios_base::trunc ) );
file.save< double, float, Devices::Host >( doubleData, 3 );
file.save< double, float >( doubleData, 3 );
ASSERT_NO_THROW( file.close() );
ASSERT_NO_THROW( file.open( TEST_FILE_NAME, std::ios_base::in ) );
file.load< float, float, Devices::Host >( floatData, 3 );
file.load< float, float >( floatData, 3 );
ASSERT_NO_THROW( file.close() );
ASSERT_NO_THROW( file.open( TEST_FILE_NAME, std::ios_base::in ) );
file.load< int, float, Devices::Host >( intData, 3 );
file.load< int, float >( intData, 3 );
ASSERT_NO_THROW( file.close() );
EXPECT_NEAR( floatData[ 0 ], 3.14159, 0.0001 );
......@@ -112,9 +112,9 @@ TEST( FileTest, WriteAndReadCUDA )
File file;
ASSERT_NO_THROW( file.open( String( TEST_FILE_NAME ), std::ios_base::out ) );
file.save< int, int, Devices::Cuda >( cudaIntData );
file.save< float, float, Devices::Cuda >( cudaFloatData, 3 );
file.save< const double, double, Devices::Cuda >( cudaConstDoubleData );
file.save< int, int, Allocators::Cuda<int> >( cudaIntData );
file.save< float, float, Allocators::Cuda<float> >( cudaFloatData, 3 );
file.save< const double, double, Allocators::Cuda<const double> >( cudaConstDoubleData );
ASSERT_NO_THROW( file.close() );
ASSERT_NO_THROW( file.open( String( TEST_FILE_NAME ), std::ios_base::in ) );
......@@ -127,9 +127,9 @@ TEST( FileTest, WriteAndReadCUDA )
cudaMalloc( ( void** ) &newCudaIntData, sizeof( int ) );
cudaMalloc( ( void** ) &newCudaFloatData, 3 * sizeof( float ) );
cudaMalloc( ( void** ) &newCudaDoubleData, sizeof( double ) );
file.load< int, int, Devices::Cuda >( newCudaIntData, 1 );
file.load< float, float, Devices::Cuda >( newCudaFloatData, 3 );
file.load< double, double, Devices::Cuda >( newCudaDoubleData, 1 );
file.load< int, int, Allocators::Cuda<int> >( newCudaIntData, 1 );
file.load< float, float, Allocators::Cuda<float> >( newCudaFloatData, 3 );
file.load< double, double, Allocators::Cuda<double> >( newCudaDoubleData, 1 );
cudaMemcpy( &newIntData,
newCudaIntData,
sizeof( int ),
......@@ -172,15 +172,15 @@ TEST( FileTest, WriteAndReadCUDAWithConversion )
File file;
ASSERT_NO_THROW( file.open( String( TEST_FILE_NAME ), std::ios_base::out | std::ios_base::trunc ) );
file.save< double, float, Devices::Cuda >( cudaConstDoubleData, 3 );
file.save< double, float, Allocators::Cuda<double> >( cudaConstDoubleData, 3 );
ASSERT_NO_THROW( file.close() );
ASSERT_NO_THROW( file.open( String( TEST_FILE_NAME ), std::ios_base::in ) );
file.load< float, float, Devices::Cuda >( cudaFloatData, 3 );
file.load< float, float, Allocators::Cuda<float> >( cudaFloatData, 3 );
ASSERT_NO_THROW( file.close() );
ASSERT_NO_THROW( file.open( String( TEST_FILE_NAME ), std::ios_base::in ) );
file.load< int, float, Devices::Cuda >( cudaIntData, 3 );
file.load< int, float, Allocators::Cuda<int> >( cudaIntData, 3 );
ASSERT_NO_THROW( file.close() );
cudaMemcpy( floatData,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment