diff --git a/src/UnitTests/Matrices/CMakeLists.txt b/src/UnitTests/Matrices/CMakeLists.txt
index ef639fc440f71af822eb5822209fe802bb590d1b..a4b06708e6db2af107bb3b211918eeff2dfb93ad 100644
--- a/src/UnitTests/Matrices/CMakeLists.txt
+++ b/src/UnitTests/Matrices/CMakeLists.txt
@@ -13,6 +13,14 @@ set( COMMON_TESTS
             SparseMatrixTest_SlicedEllpack
             SparseMatrixTest_ChunkedEllpack
             SparseMatrixTest_BiEllpack
+            SparseMatrixVectorProductTest_CSRScalar
+            SparseMatrixVectorProductTest_CSRVector
+            SparseMatrixVectorProductTest_CSRHybrid
+            SparseMatrixVectorProductTest_CSRAdaptive
+            SparseMatrixVectorProductTest_Ellpack
+            SparseMatrixVectorProductTest_SlicedEllpack
+            SparseMatrixVectorProductTest_ChunkedEllpack
+            SparseMatrixVectorProductTest_BiEllpack
             SparseMatrixCopyTest
             BinarySparseMatrixTest_CSR
             BinarySparseMatrixTest_Ellpack
diff --git a/src/UnitTests/Matrices/SparseMatrixTest.h b/src/UnitTests/Matrices/SparseMatrixTest.h
index 1ae0fda8a0115d1e49e24c32dffdfd5c9c222a5a..68d7bedb025a1a389417b8eed89d87ded531f9bd 100644
--- a/src/UnitTests/Matrices/SparseMatrixTest.h
+++ b/src/UnitTests/Matrices/SparseMatrixTest.h
@@ -88,13 +88,6 @@ TYPED_TEST( MatrixTest, addElementTest )
     test_AddElement< MatrixType >();
 }
 
-TYPED_TEST( MatrixTest, vectorProductTest )
-{
-    using MatrixType = typename TestFixture::MatrixType;
-
-    test_VectorProduct< MatrixType >();
-}
-
 TYPED_TEST( MatrixTest, forElements )
 {
     using MatrixType = typename TestFixture::MatrixType;
diff --git a/src/UnitTests/Matrices/SparseMatrixTest.hpp b/src/UnitTests/Matrices/SparseMatrixTest.hpp
index f906adfbf1285f72f16a51e58de4bcd4b4222fb8..cca22d85740e03bdb0887e1181c9842c9748ad01 100644
--- a/src/UnitTests/Matrices/SparseMatrixTest.hpp
+++ b/src/UnitTests/Matrices/SparseMatrixTest.hpp
@@ -1026,371 +1026,6 @@ void test_AddElement()
    EXPECT_EQ( m.getElement( 5, 4 ), 20 );
 }
 
-template< typename Matrix >
-void test_VectorProduct()
-{
-   using RealType = typename Matrix::RealType;
-   using DeviceType = typename Matrix::DeviceType;
-   using IndexType = typename Matrix::IndexType;
-   using VectorType = TNL::Containers::Vector< RealType, DeviceType, IndexType >;
-
-   /*
-    * Sets up the following 4x4 sparse matrix:
-    *
-    *    /  1  0  0  0 \
-    *    |  0  2  0  3 |
-    *    |  0  4  0  0 |
-    *    \  0  0  5  0 /
-    */
-
-   const IndexType m_rows_1 = 4;
-   const IndexType m_cols_1 = 4;
-
-   Matrix m_1;
-   m_1.reset();
-   m_1.setDimensions( m_rows_1, m_cols_1 );
-   typename Matrix::RowsCapacitiesType rowLengths_1{ 1, 2, 1, 1 };
-   m_1.setRowCapacities( rowLengths_1 );
-
-   RealType value_1 = 1;
-   m_1.setElement( 0, 0, value_1++ );      // 0th row
-
-   m_1.setElement( 1, 1, value_1++ );      // 1st row
-   m_1.setElement( 1, 3, value_1++ );
-
-   m_1.setElement( 2, 1, value_1++ );      // 2nd row
-
-   m_1.setElement( 3, 2, value_1++ );      // 3rd row
-
-   VectorType inVector_1;
-   inVector_1.setSize( m_cols_1 );
-   for( IndexType i = 0; i < inVector_1.getSize(); i++ )
-       inVector_1.setElement( i, 2 );
-
-   VectorType outVector_1;
-   outVector_1.setSize( m_rows_1 );
-   for( IndexType j = 0; j < outVector_1.getSize(); j++ )
-       outVector_1.setElement( j, 0 );
-
-   m_1.vectorProduct( inVector_1, outVector_1 );
-   EXPECT_EQ( outVector_1.getElement( 0 ),  2 );
-   EXPECT_EQ( outVector_1.getElement( 1 ), 10 );
-   EXPECT_EQ( outVector_1.getElement( 2 ),  8 );
-   EXPECT_EQ( outVector_1.getElement( 3 ), 10 );
-
-   /*
-    * Sets up the following 4x4 sparse matrix:
-    *
-    *    /  1  2  3  0 \
-    *    |  0  0  0  4 |
-    *    |  5  6  7  0 |
-    *    \  0  8  0  0 /
-    */
-
-   const IndexType m_rows_2 = 4;
-   const IndexType m_cols_2 = 4;
-
-   Matrix m_2( m_rows_2, m_cols_2 );
-   typename Matrix::RowsCapacitiesType rowLengths_2{ 3, 1, 3, 1 };
-   m_2.setRowCapacities( rowLengths_2 );
-
-   RealType value_2 = 1;
-   for( IndexType i = 0; i < 3; i++ )      // 0th row
-      m_2.setElement( 0, i, value_2++ );
-
-   m_2.setElement( 1, 3, value_2++ );      // 1st row
-
-   for( IndexType i = 0; i < 3; i++ )      // 2nd row
-      m_2.setElement( 2, i, value_2++ );
-
-   for( IndexType i = 1; i < 2; i++ )      // 3rd row
-      m_2.setElement( 3, i, value_2++ );
-
-   VectorType inVector_2;
-   inVector_2.setSize( m_cols_2 );
-   for( IndexType i = 0; i < inVector_2.getSize(); i++ )
-      inVector_2.setElement( i, 2 );
-
-   VectorType outVector_2;
-   outVector_2.setSize( m_rows_2 );
-   for( IndexType j = 0; j < outVector_2.getSize(); j++ )
-      outVector_2.setElement( j, 0 );
-
-   m_2.vectorProduct( inVector_2, outVector_2 );
-
-   EXPECT_EQ( outVector_2.getElement( 0 ), 12 );
-   EXPECT_EQ( outVector_2.getElement( 1 ),  8 );
-   EXPECT_EQ( outVector_2.getElement( 2 ), 36 );
-   EXPECT_EQ( outVector_2.getElement( 3 ), 16 );
-
-   /*
-    * Sets up the following 4x4 sparse matrix:
-    *
-    *    /  1  2  3  0 \
-    *    |  0  4  5  6 |
-    *    |  7  8  9  0 |
-    *    \  0 10 11 12 /
-    */
-
-   const IndexType m_rows_3 = 4;
-   const IndexType m_cols_3 = 4;
-
-   Matrix m_3( m_rows_3, m_cols_3 );
-   typename Matrix::RowsCapacitiesType rowLengths_3{ 3, 3, 3, 3 };
-   m_3.setRowCapacities( rowLengths_3 );
-
-   RealType value_3 = 1;
-   for( IndexType i = 0; i < 3; i++ )          // 0th row
-      m_3.setElement( 0, i, value_3++ );
-
-   for( IndexType i = 1; i < 4; i++ )
-      m_3.setElement( 1, i, value_3++ );      // 1st row
-
-   for( IndexType i = 0; i < 3; i++ )          // 2nd row
-      m_3.setElement( 2, i, value_3++ );
-
-   for( IndexType i = 1; i < 4; i++ )          // 3rd row
-      m_3.setElement( 3, i, value_3++ );
-
-   VectorType inVector_3;
-   inVector_3.setSize( m_cols_3 );
-   for( IndexType i = 0; i < inVector_3.getSize(); i++ )
-      inVector_3.setElement( i, 2 );
-
-   VectorType outVector_3;
-   outVector_3.setSize( m_rows_3 );
-   for( IndexType j = 0; j < outVector_3.getSize(); j++ )
-      outVector_3.setElement( j, 0 );
-
-   m_3.vectorProduct( inVector_3, outVector_3 );
-
-   EXPECT_EQ( outVector_3.getElement( 0 ), 12 );
-   EXPECT_EQ( outVector_3.getElement( 1 ), 30 );
-   EXPECT_EQ( outVector_3.getElement( 2 ), 48 );
-   EXPECT_EQ( outVector_3.getElement( 3 ), 66 );
-
-   /*
-    * Sets up the following 8x8 sparse matrix:
-    *
-    *    /  1  2  3  0  0  4  0  0 \
-    *    |  0  5  6  7  8  0  0  0 |
-    *    |  9 10 11 12 13  0  0  0 |
-    *    |  0 14 15 16 17  0  0  0 |
-    *    |  0  0 18 19 20 21  0  0 |
-    *    |  0  0  0 22 23 24 25  0 |
-    *    | 26 27 28 29 30  0  0  0 |
-    *    \ 31 32 33 34 35  0  0  0 /
-    */
-
-   const IndexType m_rows_4 = 8;
-   const IndexType m_cols_4 = 8;
-
-   Matrix m_4( m_rows_4, m_cols_4 );
-   typename Matrix::RowsCapacitiesType rowLengths_4{ 4, 4, 5, 4, 4, 4, 5, 5 };
-   m_4.setRowCapacities( rowLengths_4 );
-
-   RealType value_4 = 1;
-   for( IndexType i = 0; i < 3; i++ )       // 0th row
-      m_4.setElement( 0, i, value_4++ );
-
-   m_4.setElement( 0, 5, value_4++ );
-
-   for( IndexType i = 1; i < 5; i++ )       // 1st row
-      m_4.setElement( 1, i, value_4++ );
-
-   for( IndexType i = 0; i < 5; i++ )       // 2nd row
-      m_4.setElement( 2, i, value_4++ );
-
-   for( IndexType i = 1; i < 5; i++ )       // 3rd row
-      m_4.setElement( 3, i, value_4++ );
-
-   for( IndexType i = 2; i < 6; i++ )       // 4th row
-      m_4.setElement( 4, i, value_4++ );
-
-   for( IndexType i = 3; i < 7; i++ )       // 5th row
-      m_4.setElement( 5, i, value_4++ );
-
-   for( IndexType i = 0; i < 5; i++ )       // 6th row
-      m_4.setElement( 6, i, value_4++ );
-
-   for( IndexType i = 0; i < 5; i++ )       // 7th row
-      m_4.setElement( 7, i, value_4++ );
-
-   VectorType inVector_4;
-   inVector_4.setSize( m_cols_4 );
-   for( IndexType i = 0; i < inVector_4.getSize(); i++ )
-      inVector_4.setElement( i, 2 );
-
-   VectorType outVector_4;
-   outVector_4.setSize( m_rows_4 );
-   for( IndexType j = 0; j < outVector_4.getSize(); j++ )
-      outVector_4.setElement( j, 0 );
-
-   m_4.vectorProduct( inVector_4, outVector_4 );
-
-   EXPECT_EQ( outVector_4.getElement( 0 ),  20 );
-   EXPECT_EQ( outVector_4.getElement( 1 ),  52 );
-   EXPECT_EQ( outVector_4.getElement( 2 ), 110 );
-   EXPECT_EQ( outVector_4.getElement( 3 ), 124 );
-   EXPECT_EQ( outVector_4.getElement( 4 ), 156 );
-   EXPECT_EQ( outVector_4.getElement( 5 ), 188 );
-   EXPECT_EQ( outVector_4.getElement( 6 ), 280 );
-   EXPECT_EQ( outVector_4.getElement( 7 ), 330 );
-
-   /*
-    * Sets up the following 8x8 sparse matrix:
-    *
-    *    /  1  2  3  0  4  5  0  1 \   6
-    *    |  0  6  0  7  0  0  0  1 |   3
-    *    |  0  8  9  0 10  0  0  1 |   4
-    *    |  0 11 12 13 14  0  0  1 |   5
-    *    |  0 15  0  0  0  0  0  1 |   2
-    *    |  0 16 17 18 19 20 21  1 |   7
-    *    | 22 23 24 25 26 27 28  1 |   8
-    *    \ 29 30 31 32 33 34 35 36 /   8
-    */
-
-   const IndexType m_rows_5 = 8;
-   const IndexType m_cols_5 = 8;
-
-   Matrix m_5( m_rows_5, m_cols_5 );
-   typename Matrix::RowsCapacitiesType rowLengths_5{ 6, 3, 4, 5, 2, 7, 8, 8 };
-   m_5.setRowCapacities( rowLengths_5 );
-
-   RealType value_5 = 1;
-   for( IndexType i = 0; i < 3; i++ )   // 0th row
-      m_5.setElement( 0, i, value_5++ );
-
-   m_5.setElement( 0, 4, value_5++ );           // 0th row
-   m_5.setElement( 0, 5, value_5++ );
-
-   m_5.setElement( 1, 1, value_5++ );           // 1st row
-   m_5.setElement( 1, 3, value_5++ );
-
-   for( IndexType i = 1; i < 3; i++ )            // 2nd row
-      m_5.setElement( 2, i, value_5++ );
-
-   m_5.setElement( 2, 4, value_5++ );           // 2nd row
-
-   for( IndexType i = 1; i < 5; i++ )            // 3rd row
-      m_5.setElement( 3, i, value_5++ );
-
-   m_5.setElement( 4, 1, value_5++ );           // 4th row
-
-   for( IndexType i = 1; i < 7; i++ )            // 5th row
-      m_5.setElement( 5, i, value_5++ );
-
-   for( IndexType i = 0; i < 7; i++ )            // 6th row
-      m_5.setElement( 6, i, value_5++ );
-
-   for( IndexType i = 0; i < 8; i++ )            // 7th row
-      m_5.setElement( 7, i, value_5++ );
-
-   for( IndexType i = 0; i < 7; i++ )            // 1s at the end of rows
-      m_5.setElement( i, 7, 1);
-
-   VectorType inVector_5;
-   inVector_5.setSize( m_cols_5 );
-   for( IndexType i = 0; i < inVector_5.getSize(); i++ )
-       inVector_5.setElement( i, 2 );
-
-   VectorType outVector_5;
-   outVector_5.setSize( m_rows_5 );
-   for( IndexType j = 0; j < outVector_5.getSize(); j++ )
-       outVector_5.setElement( j, 0 );
-
-   m_5.vectorProduct( inVector_5, outVector_5 );
-
-   EXPECT_EQ( outVector_5.getElement( 0 ),  32 );
-   EXPECT_EQ( outVector_5.getElement( 1 ),  28 );
-   EXPECT_EQ( outVector_5.getElement( 2 ),  56 );
-   EXPECT_EQ( outVector_5.getElement( 3 ), 102 );
-   EXPECT_EQ( outVector_5.getElement( 4 ),  32 );
-   EXPECT_EQ( outVector_5.getElement( 5 ), 224 );
-   EXPECT_EQ( outVector_5.getElement( 6 ), 352 );
-   EXPECT_EQ( outVector_5.getElement( 7 ), 520 );
-
-   /////
-   // Large test
-   const IndexType size( 1051 );
-   //for( int size = 1; size < 1000; size++ )
-   {
-      //std::cerr << " size = " << size << std::endl;
-      // Test with large diagonal matrix
-      Matrix m1( size, size );
-      TNL::Containers::Vector< IndexType, DeviceType, IndexType > rowCapacities( size );
-      rowCapacities.forAllElements( [] __cuda_callable__ ( IndexType i, IndexType& value ) { value = 1; } );
-      m1.setRowCapacities( rowCapacities );
-      auto f1 = [=] __cuda_callable__ ( IndexType row, IndexType localIdx, IndexType& column, RealType& value, bool& compute ) {
-         if( localIdx == 0  )
-         {
-            value = row + 1;
-            column = row;
-         }
-      };
-      m1.forAllElements( f1 );
-      // check that the matrix was initialized
-      m1.getCompressedRowLengths( rowCapacities );
-      EXPECT_EQ( rowCapacities, 1 );
-
-      TNL::Containers::Vector< double, DeviceType, IndexType > in( size, 1.0 ), out( size, 0.0 );
-      m1.vectorProduct( in, out );
-      //std::cerr << out << std::endl;
-      for( IndexType i = 0; i < size; i++ )
-         EXPECT_EQ( out.getElement( i ), i + 1 );
-
-      // Test with large triangular matrix
-      const int rows( size ), columns( size );
-      Matrix m2( rows, columns );
-      rowCapacities.setSize( rows );
-      rowCapacities.forAllElements( [=] __cuda_callable__ ( IndexType i, IndexType& value ) { value = i + 1; } );
-      m2.setRowCapacities( rowCapacities );
-      auto f2 = [=] __cuda_callable__ ( IndexType row, IndexType localIdx, IndexType& column, RealType& value, bool& compute ) {
-         if( localIdx <= row )
-         {
-            value = localIdx + 1;
-            column = localIdx;
-         }
-      };
-      m2.forAllElements( f2 );
-      // check that the matrix was initialized
-      TNL::Containers::Vector< IndexType, DeviceType, IndexType > rowLengths( rows );
-      m2.getCompressedRowLengths( rowLengths );
-      EXPECT_EQ( rowLengths, rowCapacities );
-
-      out.setSize( rows );
-      out = 0.0;
-      m2.vectorProduct( in, out );
-      for( IndexType i = 0; i < rows; i++ )
-         EXPECT_EQ( out.getElement( i ), ( i + 1 ) * ( i + 2 ) / 2 );
-   }
-
-   /**
-    * Long row test
-    */
-   using MatrixSegmentsType = typename Matrix::SegmentsType;
-   constexpr TNL::Algorithms::Segments::ElementsOrganization organization = MatrixSegmentsType::getOrganization();
-   using ChunkedEllpackView_ = TNL::Algorithms::Segments::ChunkedEllpackView< DeviceType, IndexType, organization >;
-   if( ! std::is_same< typename Matrix::SegmentsViewType, ChunkedEllpackView_ >::value )
-   {
-      // TODO: Fix ChunkedEllpack for this test - seems that it allocates too much memory
-      const int columns = 3000;
-      const int rows = 1;
-      Matrix m3( rows, columns );
-      TNL::Containers::Vector< IndexType, DeviceType, IndexType > rowsCapacities( rows );
-      rowsCapacities = columns;
-      m3.setRowCapacities( rowsCapacities );
-      auto f = [] __cuda_callable__ ( IndexType row, IndexType localIdx, IndexType& column, RealType& value, bool& compute ) {
-         column = localIdx;
-         value = localIdx + 1;
-      };
-      m3.forAllElements( f );
-      TNL::Containers::Vector< double, DeviceType, IndexType > in( columns, 1.0 ), out( rows, 0.0 );
-      m3.vectorProduct( in, out );
-      EXPECT_EQ( out.getElement( 0 ), ( double ) columns * ( double ) (columns + 1 ) / 2.0 );
-   }
-}
-
 template< typename Matrix >
 void test_ForElements()
 {
diff --git a/src/UnitTests/Matrices/SparseMatrixTest_CSRHybrid.cpp b/src/UnitTests/Matrices/SparseMatrixTest_CSRHybrid.cpp
index 214ed2ca7c6990dd03932cfc862f82dc1633f865..5aef16abbe2cd0775892555cad0dcc2f65364744 100644
--- a/src/UnitTests/Matrices/SparseMatrixTest_CSRHybrid.cpp
+++ b/src/UnitTests/Matrices/SparseMatrixTest_CSRHybrid.cpp
@@ -1,5 +1,5 @@
 /***************************************************************************
-                          SparseMatrixTest_CSRHybrid.cpp -  description
+                          SparseMatrixVectorProductTest_CSRHybrid.cpp -  description
                              -------------------
     begin                : Jan 23, 2021
     copyright            : (C) 2021 by Tomas Oberhuber et al.
@@ -8,4 +8,4 @@
 
 /* See Copyright Notice in tnl/Copyright */
 
-#include "SparseMatrixTest_CSRHybrid.h"
+#include "SparseMatrixVectorProductTest_CSRHybrid.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..5e3ac36dae8d31e0442515ceef5279ceee531d14
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest.h
@@ -0,0 +1,42 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#pragma once
+
+#include <TNL/Containers/Vector.h>
+#include <TNL/Containers/VectorView.h>
+#include <TNL/Math.h>
+#include <TNL/Algorithms/ParallelFor.h>
+#include <iostream>
+#include <sstream>
+
+#include "SparseMatrixVectorProductTest.hpp"
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+// test fixture for typed tests
+template< typename Matrix >
+class MatrixTest : public ::testing::Test
+{
+protected:
+   using MatrixType = Matrix;
+};
+
+TYPED_TEST_SUITE( MatrixTest, MatrixTypes);
+
+TYPED_TEST( MatrixTest, vectorProductTest )
+{
+    using MatrixType = typename TestFixture::MatrixType;
+
+    test_VectorProduct< MatrixType >();
+}
+
+#endif
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest.hpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d49356042d9c7e521e966d21969bd70170f1bf39
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest.hpp
@@ -0,0 +1,393 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest.hpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#pragma once
+
+#include <functional>
+#include <TNL/Containers/Vector.h>
+#include <TNL/Containers/VectorView.h>
+#include <TNL/Math.h>
+#include <TNL/Algorithms/ParallelFor.h>
+#include <iostream>
+#include <sstream>
+
+// Just for ChunkedEllpack vectorProduct test exception
+#include <TNL/Algorithms/Segments/ChunkedEllpackView.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+template< typename Matrix >
+void test_VectorProduct()
+{
+   using RealType = typename Matrix::RealType;
+   using DeviceType = typename Matrix::DeviceType;
+   using IndexType = typename Matrix::IndexType;
+   using VectorType = TNL::Containers::Vector< RealType, DeviceType, IndexType >;
+
+   /*
+    * Sets up the following 4x4 sparse matrix:
+    *
+    *    /  1  0  0  0 \
+    *    |  0  2  0  3 |
+    *    |  0  4  0  0 |
+    *    \  0  0  5  0 /
+    */
+
+   const IndexType m_rows_1 = 4;
+   const IndexType m_cols_1 = 4;
+
+   Matrix m_1;
+   m_1.reset();
+   m_1.setDimensions( m_rows_1, m_cols_1 );
+   typename Matrix::RowsCapacitiesType rowLengths_1{ 1, 2, 1, 1 };
+   m_1.setRowCapacities( rowLengths_1 );
+
+   RealType value_1 = 1;
+   m_1.setElement( 0, 0, value_1++ );      // 0th row
+
+   m_1.setElement( 1, 1, value_1++ );      // 1st row
+   m_1.setElement( 1, 3, value_1++ );
+
+   m_1.setElement( 2, 1, value_1++ );      // 2nd row
+
+   m_1.setElement( 3, 2, value_1++ );      // 3rd row
+
+   VectorType inVector_1;
+   inVector_1.setSize( m_cols_1 );
+   for( IndexType i = 0; i < inVector_1.getSize(); i++ )
+       inVector_1.setElement( i, 2 );
+
+   VectorType outVector_1;
+   outVector_1.setSize( m_rows_1 );
+   for( IndexType j = 0; j < outVector_1.getSize(); j++ )
+       outVector_1.setElement( j, 0 );
+
+   m_1.vectorProduct( inVector_1, outVector_1 );
+   EXPECT_EQ( outVector_1.getElement( 0 ),  2 );
+   EXPECT_EQ( outVector_1.getElement( 1 ), 10 );
+   EXPECT_EQ( outVector_1.getElement( 2 ),  8 );
+   EXPECT_EQ( outVector_1.getElement( 3 ), 10 );
+
+   /*
+    * Sets up the following 4x4 sparse matrix:
+    *
+    *    /  1  2  3  0 \
+    *    |  0  0  0  4 |
+    *    |  5  6  7  0 |
+    *    \  0  8  0  0 /
+    */
+
+   const IndexType m_rows_2 = 4;
+   const IndexType m_cols_2 = 4;
+
+   Matrix m_2( m_rows_2, m_cols_2 );
+   typename Matrix::RowsCapacitiesType rowLengths_2{ 3, 1, 3, 1 };
+   m_2.setRowCapacities( rowLengths_2 );
+
+   RealType value_2 = 1;
+   for( IndexType i = 0; i < 3; i++ )      // 0th row
+      m_2.setElement( 0, i, value_2++ );
+
+   m_2.setElement( 1, 3, value_2++ );      // 1st row
+
+   for( IndexType i = 0; i < 3; i++ )      // 2nd row
+      m_2.setElement( 2, i, value_2++ );
+
+   for( IndexType i = 1; i < 2; i++ )      // 3rd row
+      m_2.setElement( 3, i, value_2++ );
+
+   VectorType inVector_2;
+   inVector_2.setSize( m_cols_2 );
+   for( IndexType i = 0; i < inVector_2.getSize(); i++ )
+      inVector_2.setElement( i, 2 );
+
+   VectorType outVector_2;
+   outVector_2.setSize( m_rows_2 );
+   for( IndexType j = 0; j < outVector_2.getSize(); j++ )
+      outVector_2.setElement( j, 0 );
+
+   m_2.vectorProduct( inVector_2, outVector_2 );
+
+   EXPECT_EQ( outVector_2.getElement( 0 ), 12 );
+   EXPECT_EQ( outVector_2.getElement( 1 ),  8 );
+   EXPECT_EQ( outVector_2.getElement( 2 ), 36 );
+   EXPECT_EQ( outVector_2.getElement( 3 ), 16 );
+
+   /*
+    * Sets up the following 4x4 sparse matrix:
+    *
+    *    /  1  2  3  0 \
+    *    |  0  4  5  6 |
+    *    |  7  8  9  0 |
+    *    \  0 10 11 12 /
+    */
+
+   const IndexType m_rows_3 = 4;
+   const IndexType m_cols_3 = 4;
+
+   Matrix m_3( m_rows_3, m_cols_3 );
+   typename Matrix::RowsCapacitiesType rowLengths_3{ 3, 3, 3, 3 };
+   m_3.setRowCapacities( rowLengths_3 );
+
+   RealType value_3 = 1;
+   for( IndexType i = 0; i < 3; i++ )          // 0th row
+      m_3.setElement( 0, i, value_3++ );
+
+   for( IndexType i = 1; i < 4; i++ )
+      m_3.setElement( 1, i, value_3++ );      // 1st row
+
+   for( IndexType i = 0; i < 3; i++ )          // 2nd row
+      m_3.setElement( 2, i, value_3++ );
+
+   for( IndexType i = 1; i < 4; i++ )          // 3rd row
+      m_3.setElement( 3, i, value_3++ );
+
+   VectorType inVector_3;
+   inVector_3.setSize( m_cols_3 );
+   for( IndexType i = 0; i < inVector_3.getSize(); i++ )
+      inVector_3.setElement( i, 2 );
+
+   VectorType outVector_3;
+   outVector_3.setSize( m_rows_3 );
+   for( IndexType j = 0; j < outVector_3.getSize(); j++ )
+      outVector_3.setElement( j, 0 );
+
+   m_3.vectorProduct( inVector_3, outVector_3 );
+
+   EXPECT_EQ( outVector_3.getElement( 0 ), 12 );
+   EXPECT_EQ( outVector_3.getElement( 1 ), 30 );
+   EXPECT_EQ( outVector_3.getElement( 2 ), 48 );
+   EXPECT_EQ( outVector_3.getElement( 3 ), 66 );
+
+   /*
+    * Sets up the following 8x8 sparse matrix:
+    *
+    *    /  1  2  3  0  0  4  0  0 \
+    *    |  0  5  6  7  8  0  0  0 |
+    *    |  9 10 11 12 13  0  0  0 |
+    *    |  0 14 15 16 17  0  0  0 |
+    *    |  0  0 18 19 20 21  0  0 |
+    *    |  0  0  0 22 23 24 25  0 |
+    *    | 26 27 28 29 30  0  0  0 |
+    *    \ 31 32 33 34 35  0  0  0 /
+    */
+
+   const IndexType m_rows_4 = 8;
+   const IndexType m_cols_4 = 8;
+
+   Matrix m_4( m_rows_4, m_cols_4 );
+   typename Matrix::RowsCapacitiesType rowLengths_4{ 4, 4, 5, 4, 4, 4, 5, 5 };
+   m_4.setRowCapacities( rowLengths_4 );
+
+   RealType value_4 = 1;
+   for( IndexType i = 0; i < 3; i++ )       // 0th row
+      m_4.setElement( 0, i, value_4++ );
+
+   m_4.setElement( 0, 5, value_4++ );
+
+   for( IndexType i = 1; i < 5; i++ )       // 1st row
+      m_4.setElement( 1, i, value_4++ );
+
+   for( IndexType i = 0; i < 5; i++ )       // 2nd row
+      m_4.setElement( 2, i, value_4++ );
+
+   for( IndexType i = 1; i < 5; i++ )       // 3rd row
+      m_4.setElement( 3, i, value_4++ );
+
+   for( IndexType i = 2; i < 6; i++ )       // 4th row
+      m_4.setElement( 4, i, value_4++ );
+
+   for( IndexType i = 3; i < 7; i++ )       // 5th row
+      m_4.setElement( 5, i, value_4++ );
+
+   for( IndexType i = 0; i < 5; i++ )       // 6th row
+      m_4.setElement( 6, i, value_4++ );
+
+   for( IndexType i = 0; i < 5; i++ )       // 7th row
+      m_4.setElement( 7, i, value_4++ );
+
+   VectorType inVector_4;
+   inVector_4.setSize( m_cols_4 );
+   for( IndexType i = 0; i < inVector_4.getSize(); i++ )
+      inVector_4.setElement( i, 2 );
+
+   VectorType outVector_4;
+   outVector_4.setSize( m_rows_4 );
+   for( IndexType j = 0; j < outVector_4.getSize(); j++ )
+      outVector_4.setElement( j, 0 );
+
+   m_4.vectorProduct( inVector_4, outVector_4 );
+
+   EXPECT_EQ( outVector_4.getElement( 0 ),  20 );
+   EXPECT_EQ( outVector_4.getElement( 1 ),  52 );
+   EXPECT_EQ( outVector_4.getElement( 2 ), 110 );
+   EXPECT_EQ( outVector_4.getElement( 3 ), 124 );
+   EXPECT_EQ( outVector_4.getElement( 4 ), 156 );
+   EXPECT_EQ( outVector_4.getElement( 5 ), 188 );
+   EXPECT_EQ( outVector_4.getElement( 6 ), 280 );
+   EXPECT_EQ( outVector_4.getElement( 7 ), 330 );
+
+   /*
+    * Sets up the following 8x8 sparse matrix:
+    *
+    *    /  1  2  3  0  4  5  0  1 \   6
+    *    |  0  6  0  7  0  0  0  1 |   3
+    *    |  0  8  9  0 10  0  0  1 |   4
+    *    |  0 11 12 13 14  0  0  1 |   5
+    *    |  0 15  0  0  0  0  0  1 |   2
+    *    |  0 16 17 18 19 20 21  1 |   7
+    *    | 22 23 24 25 26 27 28  1 |   8
+    *    \ 29 30 31 32 33 34 35 36 /   8
+    */
+
+   const IndexType m_rows_5 = 8;
+   const IndexType m_cols_5 = 8;
+
+   Matrix m_5( m_rows_5, m_cols_5 );
+   typename Matrix::RowsCapacitiesType rowLengths_5{ 6, 3, 4, 5, 2, 7, 8, 8 };
+   m_5.setRowCapacities( rowLengths_5 );
+
+   RealType value_5 = 1;
+   for( IndexType i = 0; i < 3; i++ )   // 0th row
+      m_5.setElement( 0, i, value_5++ );
+
+   m_5.setElement( 0, 4, value_5++ );           // 0th row
+   m_5.setElement( 0, 5, value_5++ );
+
+   m_5.setElement( 1, 1, value_5++ );           // 1st row
+   m_5.setElement( 1, 3, value_5++ );
+
+   for( IndexType i = 1; i < 3; i++ )            // 2nd row
+      m_5.setElement( 2, i, value_5++ );
+
+   m_5.setElement( 2, 4, value_5++ );           // 2nd row
+
+   for( IndexType i = 1; i < 5; i++ )            // 3rd row
+      m_5.setElement( 3, i, value_5++ );
+
+   m_5.setElement( 4, 1, value_5++ );           // 4th row
+
+   for( IndexType i = 1; i < 7; i++ )            // 5th row
+      m_5.setElement( 5, i, value_5++ );
+
+   for( IndexType i = 0; i < 7; i++ )            // 6th row
+      m_5.setElement( 6, i, value_5++ );
+
+   for( IndexType i = 0; i < 8; i++ )            // 7th row
+      m_5.setElement( 7, i, value_5++ );
+
+   for( IndexType i = 0; i < 7; i++ )            // 1s at the end of rows
+      m_5.setElement( i, 7, 1);
+
+   VectorType inVector_5;
+   inVector_5.setSize( m_cols_5 );
+   for( IndexType i = 0; i < inVector_5.getSize(); i++ )
+       inVector_5.setElement( i, 2 );
+
+   VectorType outVector_5;
+   outVector_5.setSize( m_rows_5 );
+   for( IndexType j = 0; j < outVector_5.getSize(); j++ )
+       outVector_5.setElement( j, 0 );
+
+   m_5.vectorProduct( inVector_5, outVector_5 );
+
+   EXPECT_EQ( outVector_5.getElement( 0 ),  32 );
+   EXPECT_EQ( outVector_5.getElement( 1 ),  28 );
+   EXPECT_EQ( outVector_5.getElement( 2 ),  56 );
+   EXPECT_EQ( outVector_5.getElement( 3 ), 102 );
+   EXPECT_EQ( outVector_5.getElement( 4 ),  32 );
+   EXPECT_EQ( outVector_5.getElement( 5 ), 224 );
+   EXPECT_EQ( outVector_5.getElement( 6 ), 352 );
+   EXPECT_EQ( outVector_5.getElement( 7 ), 520 );
+
+   /////
+   // Large test
+   const IndexType size( 1051 );
+   //for( int size = 1; size < 1000; size++ )
+   {
+      //std::cerr << " size = " << size << std::endl;
+      // Test with large diagonal matrix
+      Matrix m1( size, size );
+      TNL::Containers::Vector< IndexType, DeviceType, IndexType > rowCapacities( size );
+      rowCapacities.forAllElements( [] __cuda_callable__ ( IndexType i, IndexType& value ) { value = 1; } );
+      m1.setRowCapacities( rowCapacities );
+      auto f1 = [=] __cuda_callable__ ( IndexType row, IndexType localIdx, IndexType& column, RealType& value, bool& compute ) {
+         if( localIdx == 0  )
+         {
+            value = row + 1;
+            column = row;
+         }
+      };
+      m1.forAllElements( f1 );
+      // check that the matrix was initialized
+      m1.getCompressedRowLengths( rowCapacities );
+      EXPECT_EQ( rowCapacities, 1 );
+
+      TNL::Containers::Vector< double, DeviceType, IndexType > in( size, 1.0 ), out( size, 0.0 );
+      m1.vectorProduct( in, out );
+      //std::cerr << out << std::endl;
+      for( IndexType i = 0; i < size; i++ )
+         EXPECT_EQ( out.getElement( i ), i + 1 );
+
+      // Test with large triangular matrix
+      const int rows( size ), columns( size );
+      Matrix m2( rows, columns );
+      rowCapacities.setSize( rows );
+      rowCapacities.forAllElements( [=] __cuda_callable__ ( IndexType i, IndexType& value ) { value = i + 1; } );
+      m2.setRowCapacities( rowCapacities );
+      auto f2 = [=] __cuda_callable__ ( IndexType row, IndexType localIdx, IndexType& column, RealType& value, bool& compute ) {
+         if( localIdx <= row )
+         {
+            value = localIdx + 1;
+            column = localIdx;
+         }
+      };
+      m2.forAllElements( f2 );
+      // check that the matrix was initialized
+      TNL::Containers::Vector< IndexType, DeviceType, IndexType > rowLengths( rows );
+      m2.getCompressedRowLengths( rowLengths );
+      EXPECT_EQ( rowLengths, rowCapacities );
+
+      out.setSize( rows );
+      out = 0.0;
+      m2.vectorProduct( in, out );
+      for( IndexType i = 0; i < rows; i++ )
+         EXPECT_EQ( out.getElement( i ), ( i + 1 ) * ( i + 2 ) / 2 );
+   }
+
+   /**
+    * Long row test
+    */
+   using MatrixSegmentsType = typename Matrix::SegmentsType;
+   constexpr TNL::Algorithms::Segments::ElementsOrganization organization = MatrixSegmentsType::getOrganization();
+   using ChunkedEllpackView_ = TNL::Algorithms::Segments::ChunkedEllpackView< DeviceType, IndexType, organization >;
+   if( ! std::is_same< typename Matrix::SegmentsViewType, ChunkedEllpackView_ >::value )
+   {
+      // TODO: Fix ChunkedEllpack for this test - seems that it allocates too much memory
+      const int columns = 3000;
+      const int rows = 1;
+      Matrix m3( rows, columns );
+      TNL::Containers::Vector< IndexType, DeviceType, IndexType > rowsCapacities( rows );
+      rowsCapacities = columns;
+      m3.setRowCapacities( rowsCapacities );
+      auto f = [] __cuda_callable__ ( IndexType row, IndexType localIdx, IndexType& column, RealType& value, bool& compute ) {
+         column = localIdx;
+         value = localIdx + 1;
+      };
+      m3.forAllElements( f );
+      TNL::Containers::Vector< double, DeviceType, IndexType > in( columns, 1.0 ), out( rows, 0.0 );
+      m3.vectorProduct( in, out );
+      EXPECT_EQ( out.getElement( 0 ), ( double ) columns * ( double ) (columns + 1 ) / 2.0 );
+   }
+}
+
+
+#endif
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..319ed66053e11bee70bc0d5c98daec464c3a24a8
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_BiEllpack.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_BiEllpack.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.cu
new file mode 120000
index 0000000000000000000000000000000000000000..f34bb20d59c6f9af52efe54c20550e1f98df051f
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_BiEllpack.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.h
new file mode 100644
index 0000000000000000000000000000000000000000..abdc11ca2fce134d4b03af4be67316ca93442779
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_BiEllpack.h
@@ -0,0 +1,58 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_BiEllpack.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/BiEllpack.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_BiEllpack_segments";
+
+////
+// Row-major format is used for the host system
+template< typename Device, typename Index, typename IndexAllocator >
+using RowMajorBiEllpack = TNL::Algorithms::Segments::BiEllpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::RowMajorOrder >;
+
+////
+// Column-major format is used for GPUs
+template< typename Device, typename Index, typename IndexAllocator >
+using ColumnMajorBiEllpack = TNL::Algorithms::Segments::BiEllpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::ColumnMajorOrder >;
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, RowMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorBiEllpack >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bafa050d0a0b0e41b7e4d90a717f2cfffd6a947e
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRAdaptive.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_CSRAdaptive.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.cu
new file mode 120000
index 0000000000000000000000000000000000000000..28919f7457dd6d473c4b887c050069f221dec37c
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_CSRAdaptive.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.h
new file mode 100644
index 0000000000000000000000000000000000000000..93a0d79fb55f9789a2a4309704483a7f5d2e569e
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRAdaptive.h
@@ -0,0 +1,46 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRAdaptive.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/CSR.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_CSRAdaptive_segments";
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRAdaptive >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a6795b4e1c59e5a022f5a9876d8485fc27ee9627
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRHybrid.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_CSRHybrid.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.cu
new file mode 120000
index 0000000000000000000000000000000000000000..4c81adef3778e1dacc5ce48ead8f5d76d5fbeba9
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_CSRHybrid.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.h
new file mode 100644
index 0000000000000000000000000000000000000000..99b5e440301188adb5d816d13c3d941132e67b5d
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRHybrid.h
@@ -0,0 +1,46 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRHybrid.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/CSR.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_CSRHybrid_segments";
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRHybrid >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bfa16c02b91c7cddb03a63763e994ade31bcb0f8
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRScalar.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_CSRScalar.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.cu
new file mode 120000
index 0000000000000000000000000000000000000000..024a31f15c0372c1d968c8182eb93439305556d7
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_CSRScalar.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.h
new file mode 100644
index 0000000000000000000000000000000000000000..b9586f66e74f77dfa3f929829604e17d304910ca
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRScalar.h
@@ -0,0 +1,46 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRScalar.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/CSR.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_CSRScalar_segments";
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRScalar >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..68075da024e79ccc6cd47911e5de0232a8cc04e0
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRVector.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_CSRVector.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.cu
new file mode 120000
index 0000000000000000000000000000000000000000..91409a4b4800e3f759aba3d19994ed2344fe99e4
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_CSRVector.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.h
new file mode 100644
index 0000000000000000000000000000000000000000..0afe07e82ad9afff7246937220f7dcc28bca983f
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_CSRVector.h
@@ -0,0 +1,46 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_CSRVector.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/CSR.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_CSRVector_segments";
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, TNL::Algorithms::Segments::CSRVector >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1586d819192e5764b173d7d22566258f20eaf6b9
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_ChunkedEllpack.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_ChunkedEllpack.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.cu
new file mode 120000
index 0000000000000000000000000000000000000000..dea4491d64f98fd38c82e6cfa3b1b7b4d488ea05
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_ChunkedEllpack.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.h
new file mode 100644
index 0000000000000000000000000000000000000000..d2cb049f6a7611ebeda132a1810f126f3686add1
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_ChunkedEllpack.h
@@ -0,0 +1,57 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_ChunkedEllpack.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/ChunkedEllpack.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_ChunkedEllpack_segments";
+
+////
+// Row-major format is used for the host system
+template< typename Device, typename Index, typename IndexAllocator >
+using RowMajorChunkedEllpack = TNL::Algorithms::Segments::ChunkedEllpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::RowMajorOrder >;
+
+////
+// Column-major format is used for GPUs
+template< typename Device, typename Index, typename IndexAllocator >
+using ColumnMajorChunkedEllpack = TNL::Algorithms::Segments::ChunkedEllpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::ColumnMajorOrder >;
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+     TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorChunkedEllpack >
+#ifdef HAVE_CUDA
+    ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+    ,TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorChunkedEllpack >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9e2446c38652670607fbf4192434c0b8ace98619
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_Ellpack.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_Ellpack.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.cu
new file mode 120000
index 0000000000000000000000000000000000000000..d30bd03b85d9fcd0761ae96054d8f85adc1f9b44
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_Ellpack.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.h
new file mode 100644
index 0000000000000000000000000000000000000000..abb4213ca2efb4912c8dab33bbec4d7d3d006665
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_Ellpack.h
@@ -0,0 +1,56 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_Ellpack.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/Ellpack.h>
+#include <TNL/Matrices/SparseMatrix.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_Ellpack_segments";
+
+////
+// Row-major format is used for the host system
+template< typename Device, typename Index, typename IndexAlocator >
+using RowMajorEllpack = TNL::Algorithms::Segments::Ellpack< Device, Index, IndexAlocator, TNL::Algorithms::Segments::RowMajorOrder, 32 >;
+
+////
+// Column-major format is used for GPUs
+template< typename Device, typename Index, typename IndexAllocator >
+using ColumnMajorEllpack = TNL::Algorithms::Segments::Ellpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::ColumnMajorOrder, 32 >;
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorEllpack >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorEllpack >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.cpp b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0afb094fda217ecea4181438a9376d41a9be5b60
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.cpp
@@ -0,0 +1,11 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_SlicedEllpack.cpp -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include "SparseMatrixVectorProductTest_SlicedEllpack.h"
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.cu b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.cu
new file mode 120000
index 0000000000000000000000000000000000000000..6c3448930f3a12f8dac48f5638153db3883f94c6
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.cu
@@ -0,0 +1 @@
+SparseMatrixVectorProductTest_SlicedEllpack.cpp
\ No newline at end of file
diff --git a/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.h b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.h
new file mode 100644
index 0000000000000000000000000000000000000000..5efa70d45eaf65e5ce46865838274f3ad883b8df
--- /dev/null
+++ b/src/UnitTests/Matrices/SparseMatrixVectorProductTest_SlicedEllpack.h
@@ -0,0 +1,57 @@
+/***************************************************************************
+                          SparseMatrixVectorProductTest_SlicedEllpack.h -  description
+                             -------------------
+    begin                : Mar 30, 2021
+    copyright            : (C) 2021 by Tomas Oberhuber et al.
+    email                : tomas.oberhuber@fjfi.cvut.cz
+ ***************************************************************************/
+
+/* See Copyright Notice in tnl/Copyright */
+
+#include <iostream>
+#include <TNL/Algorithms/Segments/SlicedEllpack.h>
+#include <TNL/Matrices/SparseMatrix.h>
+#include <TNL/Matrices/MatrixType.h>
+
+#ifdef HAVE_GTEST
+#include <gtest/gtest.h>
+
+const char* saveAndLoadFileName = "test_SparseMatrixTest_SlicedEllpack_segments";
+
+////
+// Row-major format is used for the host system
+template< typename Device, typename Index, typename IndexAllocator >
+using RowMajorSlicedEllpack = TNL::Algorithms::Segments::SlicedEllpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::RowMajorOrder, 32 >;
+
+////
+// Column-major format is used for GPUs
+template< typename Device, typename Index, typename IndexAllocator >
+using ColumnMajorSlicedEllpack = TNL::Algorithms::Segments::SlicedEllpack< Device, Index, IndexAllocator, TNL::Algorithms::Segments::ColumnMajorOrder, 32 >;
+
+// types for which MatrixTest is instantiated
+using MatrixTypes = ::testing::Types
+<
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, int,   TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Host, long,  TNL::Matrices::GeneralMatrix, RowMajorSlicedEllpack >
+#ifdef HAVE_CUDA
+   ,TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, int,   TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< int,     TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< long,    TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< float,   TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >,
+    TNL::Matrices::SparseMatrix< double,  TNL::Devices::Cuda, long,  TNL::Matrices::GeneralMatrix, ColumnMajorSlicedEllpack >
+#endif
+>;
+
+#endif
+
+#include "SparseMatrixVectorProductTest.h"
+#include "../main.h"