diff --git a/src/TNL/Matrices/Dense.hpp b/src/TNL/Matrices/Dense.hpp
index 5504f6408ff02465f48673966a77d7b9427f6d48..21ae1bce6991a067653f63b66c76183632dc5fe0 100644
--- a/src/TNL/Matrices/Dense.hpp
+++ b/src/TNL/Matrices/Dense.hpp
@@ -990,6 +990,7 @@ operator=( const Dense< Real_, Device_, Index_, RowMajorOrder_, RealAllocator_ >
          }
       }
    }
+   this->view = this->getView();
    return *this;
 }
 
diff --git a/src/TNL/Matrices/SparseMatrix.hpp b/src/TNL/Matrices/SparseMatrix.hpp
index c94506084f18d8d8d0bbe58ddc3c3029877de055..14495ad3d5fca5c7018bd67d352ad02f36104bd8 100644
--- a/src/TNL/Matrices/SparseMatrix.hpp
+++ b/src/TNL/Matrices/SparseMatrix.hpp
@@ -542,6 +542,7 @@ operator=( const SparseMatrix& matrix )
    Matrix< Real, Device, Index >::operator=( matrix );
    this->columnIndexes = matrix.columnIndexes;
    this->segments = matrix.segments;
+   this->view = this->getView();
    return *this;
 }
 
@@ -581,7 +582,7 @@ operator=( const Dense< Real_, Device_, Index_, RowMajorOrder, RealAllocator_ >&
    if( std::is_same< DeviceType, RHSDeviceType >::value )
    {
       const auto segments_view = this->segments.getView();
-      auto f = [=] __cuda_callable__ ( RHSIndexType rowIdx, RHSIndexType columnIdx, RHSIndexType globalIndex, const RHSRealType& value, bool& compute ) mutable {
+      auto f = [=] __cuda_callable__ ( RHSIndexType rowIdx, RHSIndexType localIdx, RHSIndexType columnIdx, const RHSRealType& value, bool& compute ) mutable {
          if( value != 0.0 )
          {
             IndexType thisGlobalIdx = segments_view.getGlobalIndex( rowIdx, rowLocalIndexes_view[ rowIdx ]++ );
@@ -650,6 +651,7 @@ operator=( const Dense< Real_, Device_, Index_, RowMajorOrder, RealAllocator_ >&
       }
       //std::cerr << "This matrix = " << std::endl << *this << std::endl;
    }
+   this->view = this->getView();
    return *this;
 
 }
@@ -684,7 +686,7 @@ operator=( const RHSMatrix& matrix )
    auto values_view = this->values.getView();
    columns_view = paddingIndex;
 
-   if( std::is_same< DeviceType, RHSDeviceType >::value )
+   /*if( std::is_same< DeviceType, RHSDeviceType >::value )
    {
       const auto segments_view = this->segments.getView();
       auto f = [=] __cuda_callable__ ( RHSIndexType rowIdx, RHSIndexType localIdx, RHSIndexType columnIndex, const RHSRealType& value, bool& compute ) mutable {
@@ -697,7 +699,7 @@ operator=( const RHSMatrix& matrix )
       };
       matrix.forAllRows( f );
    }
-   else
+   else*/
    {
       const IndexType maxRowLength = max( rowLengths );
       const IndexType bufferRowsCount( 128 );
@@ -747,11 +749,11 @@ operator=( const RHSMatrix& matrix )
                value = thisValuesBuffer_view[ bufferIdx ];
             }
          };
-         //this->forRows( baseRow, lastRow, f2 );
+         this->forRows( baseRow, lastRow, f2 );
          baseRow += bufferRowsCount;
       }
-      //std::cerr << "This matrix = " << std::endl << *this << std::endl;
    }
+   this->view = this->getView();
    return *this;
 }
 
diff --git a/src/TNL/Matrices/SparseMatrixView.hpp b/src/TNL/Matrices/SparseMatrixView.hpp
index 055a1d60e61a4696ba16d642d37725f1157479c3..965a51b8b6811ede34c7e957bf3299e12a1104b7 100644
--- a/src/TNL/Matrices/SparseMatrixView.hpp
+++ b/src/TNL/Matrices/SparseMatrixView.hpp
@@ -415,7 +415,7 @@ forRows( IndexType first, IndexType last, Function& function ) const
    const auto values_view = this->values.getConstView();
    const IndexType paddingIndex_ = this->getPaddingIndex();
    auto f = [=] __cuda_callable__ ( IndexType rowIdx, IndexType localIdx, IndexType globalIdx, bool& compute ) mutable -> bool {
-      function( rowIdx, localIdx, columns_view[ globalIdx ], globalIdx, compute );
+      function( rowIdx, localIdx, columns_view[ globalIdx ], values_view[ globalIdx ], compute );
       return true;
    };
    this->segments.forSegments( first, last, f );
@@ -435,7 +435,7 @@ forRows( IndexType first, IndexType last, Function& function )
    auto values_view = this->values.getView();
    const IndexType paddingIndex_ = this->getPaddingIndex();
    auto f = [=] __cuda_callable__ ( IndexType rowIdx, IndexType localIdx, IndexType globalIdx, bool& compute ) mutable {
-      function( rowIdx, localIdx, columns_view[ globalIdx ], globalIdx, compute );
+      function( rowIdx, localIdx, columns_view[ globalIdx ], values_view[ globalIdx ], compute );
    };
    this->segments.forSegments( first, last, f );
 }
diff --git a/src/UnitTests/Matrices/SparseMatrixCopyTest.h b/src/UnitTests/Matrices/SparseMatrixCopyTest.h
index 8677443b296dc8933ed886834208c315ec8736ea..e9898bb393c21baf0a04c0f7e8462ac613ef81d6 100644
--- a/src/UnitTests/Matrices/SparseMatrixCopyTest.h
+++ b/src/UnitTests/Matrices/SparseMatrixCopyTest.h
@@ -40,7 +40,7 @@ using SE_host  = TNL::Matrices::SparseMatrix< int, TNL::Devices::Host, int, TNL:
 using SE_cuda  = TNL::Matrices::SparseMatrix< int, TNL::Devices::Cuda, int, TNL::Matrices::GeneralMatrix, SlicedEllpackSegments >;
 
 
-#ifdef HAVE_GTEST 
+#ifdef HAVE_GTEST
 #include <gtest/gtest.h>
 
 /*
@@ -99,7 +99,7 @@ void setupUnevenRowSizeMatrix( Matrix& m )
 
     m.setElement( 7, 0, value++ );   // 7th row
 
-    for( int i = 0; i < cols - 1; i++ )  // 8th row 
+    for( int i = 0; i < cols - 1; i++ )  // 8th row
         m.setElement( 8, i, value++ );
 
     m.setElement( 9, 5, value++ );   // 9th row
@@ -159,21 +159,21 @@ void checkUnevenRowSizeMatrix( Matrix& m )
    EXPECT_EQ( m.getElement( 6, 3 ),  0 );
    EXPECT_EQ( m.getElement( 6, 4 ),  0 );
    EXPECT_EQ( m.getElement( 6, 5 ),  0 );
-   
+
    EXPECT_EQ( m.getElement( 7, 0 ), 22 );
    EXPECT_EQ( m.getElement( 7, 1 ),  0 );
    EXPECT_EQ( m.getElement( 7, 2 ),  0 );
    EXPECT_EQ( m.getElement( 7, 3 ),  0 );
    EXPECT_EQ( m.getElement( 7, 4 ),  0 );
    EXPECT_EQ( m.getElement( 7, 5 ),  0 );
-   
+
    EXPECT_EQ( m.getElement( 8, 0 ), 23 );
    EXPECT_EQ( m.getElement( 8, 1 ), 24 );
    EXPECT_EQ( m.getElement( 8, 2 ), 25 );
    EXPECT_EQ( m.getElement( 8, 3 ), 26 );
    EXPECT_EQ( m.getElement( 8, 4 ), 27 );
    EXPECT_EQ( m.getElement( 8, 5 ),  0 );
-   
+
    EXPECT_EQ( m.getElement( 9, 0 ),  0 );
    EXPECT_EQ( m.getElement( 9, 1 ),  0 );
    EXPECT_EQ( m.getElement( 9, 2 ),  0 );
@@ -206,7 +206,7 @@ void setupAntiTriDiagMatrix( Matrix& m )
     rowLengths.setElement( 0, 4);
     rowLengths.setElement( 1,  4 );
     m.setCompressedRowLengths( rowLengths );
-    
+
     int value = 1;
     for( int i = 0; i < rows; i++ )
         for( int j = cols - 1; j > 2; j-- )
@@ -397,39 +397,37 @@ void testCopyAssignment()
 template< typename Matrix1, typename Matrix2 >
 void testConversion()
 {
-    
    {
         SCOPED_TRACE("Tri Diagonal Matrix");
-        
+
         Matrix1 triDiag1;
         setupTriDiagMatrix( triDiag1 );
         checkTriDiagMatrix( triDiag1 );
-        
+
         Matrix2 triDiag2;
-        //TNL::Matrices::copySparseMatrix( triDiag2, triDiag1 );
         triDiag2 = triDiag1;
         checkTriDiagMatrix( triDiag2 );
    }
-   
+
    {
         SCOPED_TRACE("Anti Tri Diagonal Matrix");
-                
+
         Matrix1 antiTriDiag1;
         setupAntiTriDiagMatrix( antiTriDiag1 );
         checkAntiTriDiagMatrix( antiTriDiag1 );
-        
+
         Matrix2 antiTriDiag2;
         //TNL::Matrices::copySparseMatrix( antiTriDiag2, antiTriDiag1 );
         antiTriDiag2 = antiTriDiag1;
         checkAntiTriDiagMatrix( antiTriDiag2 );
    }
-   
+
    {
         SCOPED_TRACE("Uneven Row Size Matrix");
         Matrix1 unevenRowSize1;
         setupUnevenRowSizeMatrix( unevenRowSize1 );
         checkUnevenRowSizeMatrix( unevenRowSize1 );
-        
+
         Matrix2 unevenRowSize2;
         //TNL::Matrices::copySparseMatrix( unevenRowSize2, unevenRowSize1 );
         unevenRowSize2 = unevenRowSize1;