Loading src/UnitTests/Matrices/BinarySparseMatrixTest.hpp +1 −1 Original line number Diff line number Diff line Loading @@ -965,7 +965,7 @@ void test_RowsReduction() TNL::Containers::Vector< ComputeRealType, DeviceType, IndexType > rowSums( rows ); auto rowSums_view = rowSums.getView(); auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, const RealType& value ) -> IndexType { return abs( value ); return TNL::abs( value ); }; auto max_keep = [=] __cuda_callable__ ( const IndexType rowIdx, const IndexType value ) mutable { rowSums_view[ rowIdx ] = value; Loading src/UnitTests/Matrices/SparseMatrixTest.hpp +1 −1 Original line number Diff line number Diff line Loading @@ -1390,7 +1390,7 @@ void test_RowsReduction() TNL::Containers::Vector< RealType, DeviceType, IndexType > rowSums( rows ); auto rowSums_view = rowSums.getView(); auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, const RealType& value ) -> IndexType { return abs( value ); return TNL::abs( value ); }; auto max_keep = [=] __cuda_callable__ ( const IndexType rowIdx, const IndexType value ) mutable { rowSums_view[ rowIdx ] = value; Loading src/UnitTests/Matrices/SymmetricSparseMatrixTest.hpp +1 −1 Original line number Diff line number Diff line Loading @@ -917,7 +917,7 @@ void test_RowsReduction() /*TNL::Containers::Vector< RealType, DeviceType, IndexType > rowSums( m_5.getRows() ); auto rowSums_view = rowSums.getView(); auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, IndexType globalIdx, const RealType& value ) -> IndexType { return abs( value ); return TNL::abs( value ); }; auto max_reduce = [] __cuda_callable__ ( IndexType& aux, const IndexType a ) { aux += a; Loading Loading
src/UnitTests/Matrices/BinarySparseMatrixTest.hpp +1 −1 Original line number Diff line number Diff line Loading @@ -965,7 +965,7 @@ void test_RowsReduction() TNL::Containers::Vector< ComputeRealType, DeviceType, IndexType > rowSums( rows ); auto rowSums_view = rowSums.getView(); auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, const RealType& value ) -> IndexType { return abs( value ); return TNL::abs( value ); }; auto max_keep = [=] __cuda_callable__ ( const IndexType rowIdx, const IndexType value ) mutable { rowSums_view[ rowIdx ] = value; Loading
src/UnitTests/Matrices/SparseMatrixTest.hpp +1 −1 Original line number Diff line number Diff line Loading @@ -1390,7 +1390,7 @@ void test_RowsReduction() TNL::Containers::Vector< RealType, DeviceType, IndexType > rowSums( rows ); auto rowSums_view = rowSums.getView(); auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, const RealType& value ) -> IndexType { return abs( value ); return TNL::abs( value ); }; auto max_keep = [=] __cuda_callable__ ( const IndexType rowIdx, const IndexType value ) mutable { rowSums_view[ rowIdx ] = value; Loading
src/UnitTests/Matrices/SymmetricSparseMatrixTest.hpp +1 −1 Original line number Diff line number Diff line Loading @@ -917,7 +917,7 @@ void test_RowsReduction() /*TNL::Containers::Vector< RealType, DeviceType, IndexType > rowSums( m_5.getRows() ); auto rowSums_view = rowSums.getView(); auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, IndexType globalIdx, const RealType& value ) -> IndexType { return abs( value ); return TNL::abs( value ); }; auto max_reduce = [] __cuda_callable__ ( IndexType& aux, const IndexType a ) { aux += a; Loading