diff --git a/Documentation/Tutorials/Pointers/UniquePointerExample.cpp b/Documentation/Tutorials/Pointers/UniquePointerExample.cpp new file mode 100644 index 0000000000000000000000000000000000000000..55eb9e9c3ce52ca60888611a93bf0fe8b41ed81b --- /dev/null +++ b/Documentation/Tutorials/Pointers/UniquePointerExample.cpp @@ -0,0 +1,41 @@ +#include <iostream> +#include <cstdlib> +#include <TNL/Containers/Array.h> +#include <TNL/Pointers/UniquePointer.h> + + +using namespace TNL; + +using ArrayHost = Containers::Array< int, Devices::Host >; +using ArrayCuda = Containers::Array< int, Devices::Cuda >; + +__global__ void checkArray( const ArrayCuda* ptr ) +{ + printf( "Array size is: %d\n", ptr->getSize() ); + for( int i = 0; i < ptr->getSize(); i++ ) + printf( "a[ %d ] = %d \n", i, ( *ptr )[ i ] ); +} + +int main( int argc, char* argv[] ) +{ + + /*** + * Make unique pointer on array on CPU and manipulate the + * array via the pointer. + */ + Pointers::UniquePointer< ArrayHost > array_host_ptr( 10 ); + *array_host_ptr = 1; + std::cout << "Array = " << *array_host_ptr << std::endl; + + /*** + * Let's do the same in CUDA + */ +#ifdef HAVE_CUDA + Pointers::UniquePointer< ArrayCuda > array_cuda_ptr( 10 ); + array_cuda_ptr.modifyData< Devices::Host >() = 1; + //Pointers::synchronizeSmartPointersOnDevice< Devices::Cuda >(); + //checkArray<<< 1, 1 >>>( &array_cuda_ptr.getData< Devices::Cuda >() ); +#endif + return EXIT_SUCCESS; +} + diff --git a/Documentation/Tutorials/Pointers/UniquePointerExample.cu b/Documentation/Tutorials/Pointers/UniquePointerExample.cu new file mode 120000 index 0000000000000000000000000000000000000000..a7c9828d5b35a010795073ea20ccf4e54b000d24 --- /dev/null +++ b/Documentation/Tutorials/Pointers/UniquePointerExample.cu @@ -0,0 +1 @@ +UniquePointerExample.cpp \ No newline at end of file diff --git a/Documentation/Tutorials/Pointers/tutorial_Pointers.md b/Documentation/Tutorials/Pointers/tutorial_Pointers.md new file mode 100644 index 0000000000000000000000000000000000000000..8e4106ad307d38a18c73a60926b46f828d36ebbf --- /dev/null +++ b/Documentation/Tutorials/Pointers/tutorial_Pointers.md @@ -0,0 +1,37 @@ +\page tutorial_Pointers Cross-device pointers tutorial + +## Introduction + +Smart pointers in TNL are motivated by the smart pointerin the STL library. In addition, they work across different devices and so they make data management easier. + +## Table of Contents +1. [Unique pointers](#unique_pointers) +2. [Shared pointers](#shared_pointers) +3. [Device pointers](#device_pointers) + + +## Unique pointers <a name="unique_pointers"></a> + +Simillar to STL smart pointer `std::unique_ptr` `UniquePointer` is a smart poinetr managing certain dynamicaly allocated object. The object is automatically deallocated when the pointer goes out of scope. The definition of `UniquePointer` reads as: + +``` +template< typename Object, typename Device = typename Object::DeviceType > +class UniquePointer; +``` + +It takes two template parameters: + +1. `Object` is a type of object managed by the pointer. +2. `Device` is a device where the object is to be allocated. + +If the device type is `Devices::Host`, `UniquePointer` behaves as usual unique smart pointer. If the device is different, `Devices::Cuda` for example, the unique pointer creates an image if the object even in the host memory. It means, that one can manipulate the object on the host. All smart pointers are registered in a special register using which they can be easily synchronised before calling a CUDA kernel. This means that all modified images of the objects in the memory are transferred on the GPU. See the following example: + +\include UniquePointerExample.cpp + +The result looks as: + +\include UniquePointerExample.out + +## Shared pointers <a name="shared_pointers"></a> + +## Device pointers <a name="device_pointers"></a>