Skip to content
Snippets Groups Projects
Commit 53c48f25 authored by Tomáš Oberhuber's avatar Tomáš Oberhuber
Browse files

Changing order of parameters of Algorithms::Reduction::reduce(withArgument)...

Changing order of parameters of Algorithms::Reduction::reduce(withArgument) from (reduce,fetch) to (fetch,reduce).
parent bf7b251a
No related branches found
No related tags found
1 merge request!89To/matrices adaptive csr
Showing
with 21 additions and 21 deletions
......@@ -36,7 +36,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix trace is " << trace << "." << std::endl;
}
......
......@@ -29,7 +29,7 @@ void getRowExample()
return row.getElement( rowIdx );
};
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix.getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix.getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix trace is " << trace << "." << std::endl;
}
......
......@@ -41,7 +41,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix reads as: " << std::endl << *matrix << std::endl;
std::cout << "Matrix trace is: " << trace << "." << std::endl;
}
......
......@@ -13,7 +13,7 @@ void getRowExample()
using MatrixType = TNL::Matrices::MultidiagonalMatrix< double, Device >;
MatrixType matrix (
matrixSize, // number of matrix columns
diagonalsOffsets,
diagonalsOffsets,
{ { 0.0, 0.0, 1.0 }, // matrix elements
{ 0.0, 2.0, 1.0 },
{ 3.0, 2.0, 1.0 },
......@@ -32,7 +32,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix.getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix.getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix reads as: " << std::endl << matrix << std::endl;
std::cout << "Matrix trace is: " << trace << "." << std::endl;
}
......
......@@ -36,7 +36,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix trace is " << trace << "." << std::endl;
}
......
......@@ -28,7 +28,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix.getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix.getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix trace is " << trace << "." << std::endl;
}
......
......@@ -40,7 +40,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, matrix->getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix reads as: " << std::endl << *matrix << std::endl;
std::cout << "Matrix trace is: " << trace << "." << std::endl;
}
......
......@@ -30,7 +30,7 @@ void getRowExample()
/***
* Compute the matrix trace.
*/
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, view.getRows(), std::plus<>{}, fetch, 0 );
int trace = TNL::Algorithms::Reduction< Device >::reduce( 0, view.getRows(), fetch, std::plus<>{}, 0 );
std::cout << "Matrix reads as: " << std::endl << matrix << std::endl;
std::cout << "Matrix trace is: " << trace << "." << std::endl;
}
......
......@@ -6,5 +6,5 @@ void scalarProduct( double* v1, double* v2, double* product, const int size )
}
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) {
return a + b; };
TNL::Algorithms::Reduction< Device >::reduce( 0, size, reduce, fetch, 0.0 );
TNL::Algorithms::Reduction< Device >::reduce( 0, size, fetch, reduce, 0.0 );
}
\ No newline at end of file
......@@ -8,5 +8,5 @@ void scalarProduct( double* u1, double* u2,
}
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) {
return a + b; };
TNL::Algorithms::Reduction< Device >::reduce( 0, size, reduce, fetch, 0.0 );
TNL::Algorithms::Reduction< Device >::reduce( 0, size, fetch, reduce, 0.0 );
}
\ No newline at end of file
......@@ -22,7 +22,7 @@ bool comparison( const Vector< double, Device >& u, const Vector< double, Device
* Reduce performs logical AND on intermediate results obtained by fetch.
*/
auto reduce = [] __cuda_callable__ ( const bool& a, const bool& b ) { return a && b; };
return Reduction< Device >::reduce( 0, v_view.getSize(), reduce, fetch, true );
return Reduction< Device >::reduce( 0, v_view.getSize(), fetch, reduce, true );
}
int main( int argc, char* argv[] )
......
......@@ -14,7 +14,7 @@ double mapReduce( Vector< double, Device >& u )
auto fetch = [=] __cuda_callable__ ( int i )->double {
return u_view[ i ] > 0 ? u_view[ i ] : 0.0; };
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) { return a + b; };
return Reduction< Device >::reduce( 0, u_view.getSize(), reduce, fetch, 0.0 );
return Reduction< Device >::reduce( 0, u_view.getSize(), fetch, reduce, 0.0 );
}
int main( int argc, char* argv[] )
......
......@@ -16,7 +16,7 @@ double mapReduce( Vector< double, Device >& u )
if( i % 2 == 0 ) return u_view[ i ];
return 0.0; };
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) { return a + b; };
return Reduction< Device >::reduce( 0, u_view.getSize(), reduce, fetch, 0.0 );
return Reduction< Device >::reduce( 0, u_view.getSize(), fetch, reduce, 0.0 );
}
int main( int argc, char* argv[] )
......
......@@ -15,7 +15,7 @@ double mapReduce( Vector< double, Device >& u )
auto fetch = [=] __cuda_callable__ ( int i )->double {
return u_view[ 2 * i ]; };
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) { return a + b; };
return Reduction< Device >::reduce( 0, u_view.getSize() / 2, reduce, fetch, 0.0 );
return Reduction< Device >::reduce( 0, u_view.getSize() / 2, fetch, reduce, 0.0 );
}
int main( int argc, char* argv[] )
......
......@@ -13,7 +13,7 @@ double maximumNorm( const Vector< double, Device >& v )
auto view = v.getConstView();
auto fetch = [=] __cuda_callable__ ( int i ) { return abs( view[ i ] ); };
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) { return max( a, b ); };
return Reduction< Device >::reduce( 0, view.getSize(), reduce, fetch, 0.0 );
return Reduction< Device >::reduce( 0, view.getSize(), fetch, reduce, 0.0 );
}
int main( int argc, char* argv[] )
......
......@@ -17,7 +17,7 @@ double product( const Vector< double, Device >& v )
/***
* Since we compute the product of all elements, the reduction must be initialized by 1.0 not by 0.0.
*/
return Reduction< Device >::reduce( 0, view.getSize(), reduce, fetch, 1.0 );
return Reduction< Device >::reduce( 0, view.getSize(), fetch, reduce, 1.0 );
}
int main( int argc, char* argv[] )
......
......@@ -22,7 +22,7 @@ maximumNorm( const Vector< double, Device >& v )
else if( a == b && bIdx < aIdx )
aIdx = bIdx;
};
return Reduction< Device >::reduceWithArgument( 0, view.getSize(), reduction, fetch, std::numeric_limits< double >::max() );
return Reduction< Device >::reduceWithArgument( 0, view.getSize(), fetch, reduction, std::numeric_limits< double >::max() );
}
int main( int argc, char* argv[] )
......
......@@ -18,7 +18,7 @@ double scalarProduct( const Vector< double, Device >& u, const Vector< double, D
*/
auto fetch = [=] __cuda_callable__ ( int i ) { return u_view[ i ] * v_view[ i ]; };
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) { return a + b; };
return Reduction< Device >::reduce( 0, v_view.getSize(), reduce, fetch, 0.0 );
return Reduction< Device >::reduce( 0, v_view.getSize(), fetch, reduce, 0.0 );
}
int main( int argc, char* argv[] )
......
......@@ -30,7 +30,7 @@ double sum( const Vector< double, Device >& v )
* lambdas defined above and finally value of idempotent element, zero in this case, which serve for the
* reduction initiation.
*/
return Reduction< Device >::reduce( 0, view.getSize(), reduce, fetch, 0.0 );
return Reduction< Device >::reduce( 0, view.getSize(), fetch, reduce, 0.0 );
}
int main( int argc, char* argv[] )
......
......@@ -17,7 +17,7 @@ double updateAndResidue( Vector< double, Device >& u, const Vector< double, Devi
u_view[ i ] += tau * add;
return add * add; };
auto reduce = [] __cuda_callable__ ( const double& a, const double& b ) { return a + b; };
return sqrt( Reduction< Device >::reduce( 0, u_view.getSize(), reduce, fetch, 0.0 ) );
return sqrt( Reduction< Device >::reduce( 0, u_view.getSize(), fetch, reduce, 0.0 ) );
}
int main( int argc, char* argv[] )
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment