Skip to content
Snippets Groups Projects
Commit 9d61ceac authored by Tomáš Oberhuber's avatar Tomáš Oberhuber
Browse files

Writting tutorial on iterative linear solvers.

parent f32bd54e
No related branches found
No related tags found
1 merge request!116Documentation for linear solvers and preconditioners
Showing
with 204 additions and 9 deletions
......@@ -2,6 +2,7 @@ ADD_SUBDIRECTORY( Algorithms )
ADD_SUBDIRECTORY( Containers )
ADD_SUBDIRECTORY( Pointers )
ADD_SUBDIRECTORY( Matrices )
ADD_SUBDIRECTORY( Solvers )
set( COMMON_EXAMPLES )
......
add_subdirectory( Linear )
set( COMMON_EXAMPLES
IterativeLinearSolverExample
)
if( BUILD_CUDA )
foreach( target IN ITEMS ${COMMON_EXAMPLES} )
cuda_add_executable( ${target}-cuda ${target}.cu OPTIONS )
add_custom_command( COMMAND ${target}-cuda > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/${target}.out OUTPUT ${target}.out )
set( CUDA_OUTPUTS ${CUDA_OUTPUTS} ${target}.out )
endforeach()
else()
foreach( target IN ITEMS ${COMMON_EXAMPLES} )
add_executable( ${target} ${target}.cpp )
add_custom_command( COMMAND ${target} > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/${target}.out OUTPUT ${target}.out )
set( HOST_OUTPUTS ${HOST_OUTPUTS} ${target}.out )
endforeach()
endif()
IF( BUILD_CUDA )
ADD_CUSTOM_TARGET( RunLinearSolversExamples-cuda ALL DEPENDS ${CUDA_OUTPUTS} )
ELSE()
ADD_CUSTOM_TARGET( RunLinearSolversExamples ALL DEPENDS ${HOST_OUTPUTS} )
ENDIF()
#include <iostream>
#include <memory>
#include <TNL/Algorithms/ParallelFor.h>
#include <TNL/Matrices/SparseMatrix.h>
#include <TNL/Devices/Host.h>
#include <TNL/Devices/Cuda.h>
#include <TNL/Solvers/Linear/GMRES.h>
template< typename Device >
void iterativeLinearSolverExample()
{
/***
* Set the following matrix (dots represent zero matrix elements):
*
* / 2.5 -1 . . . \
* | -1 2.5 -1 . . |
* | . -1 2.5 -1. . |
* | . . -1 2.5 -1 |
* \ . . . -1 2.5 /
*/
using MatrixType = TNL::Matrices::SparseMatrix< double, Device >;
using Vector = TNL::Containers::Vector< double, Device >;
const int size( 5 );
auto matrix_ptr = std::make_shared< MatrixType >();
matrix_ptr->setDimensions( size, size );
matrix_ptr->setRowCapacities( Vector( { 2, 3, 3, 3, 2 } ) );
auto f = [=] __cuda_callable__ ( typename MatrixType::RowView& row ) mutable {
const int rowIdx = row.getRowIndex();
if( rowIdx == 0 )
{
row.setElement( 0, rowIdx, 2.5 ); // diagonal element
row.setElement( 1, rowIdx+1, -1 ); // element above the diagonal
}
else if( rowIdx == size - 1 )
{
row.setElement( 0, rowIdx-1, -1.0 ); // element below the diagonal
row.setElement( 1, rowIdx, 2.5 ); // diagonal element
}
else
{
row.setElement( 0, rowIdx-1, -1.0 ); // element below the diagonal
row.setElement( 1, rowIdx, 2.5 ); // diagonal element
row.setElement( 2, rowIdx+1, -1.0 ); // element above the diagonal
}
};
/***
* Set the matrix elements.
*/
matrix_ptr->forAllRows( f );
std::cout << *matrix_ptr << std::endl;
/***
* Set the right-hand side vector
*/
Vector x( size, 1.0 );
Vector b( size );
matrix_ptr->vectorProduct( x, b );
std::cout << "Vector b = " << b << std::endl;
/***
* Solver the linear system
*/
using LinearSolver = TNL::Solvers::Linear::GMRES< MatrixType >;
LinearSolver solver;
solver.setMatrix( matrix_ptr );
x = 0.0;
solver.solve( b, x );
std::cout << "Vector x = " << x << std::endl;
}
int main( int argc, char* argv[] )
{
std::cout << "Solving linear system on host: " << std::endl;
iterativeLinearSolverExample< TNL::Devices::Host >();
#ifdef HAVE_CUDA
std::cout << "Solving linear system on CUDA device: " << std::endl;
iterativeLinearSolverExample< TNL::Devices::Cuda >();
#endif
}
IterativeLinearSolverExample.cpp
\ No newline at end of file
......@@ -5,3 +5,4 @@ add_subdirectory( ReductionAndScan )
add_subdirectory( Pointers )
add_subdirectory( Matrices )
add_subdirectory( Meshes )
add_subdirectory( Solvers )
add_subdirectory( Linear )
IF( BUILD_CUDA )
CUDA_ADD_EXECUTABLE( ArrayAllocation ArrayAllocation.cu )
ADD_CUSTOM_COMMAND( COMMAND ArrayAllocation > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ArrayAllocation.out OUTPUT ArrayAllocation.out )
CUDA_ADD_EXECUTABLE( ArrayIO ArrayIO.cu )
ADD_CUSTOM_COMMAND( COMMAND ArrayIO > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ArrayIO.out OUTPUT ArrayIO.out )
CUDA_ADD_EXECUTABLE( ArrayView-1 ArrayView-1.cu )
ADD_CUSTOM_COMMAND( COMMAND ArrayView-1 > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ArrayView-1.out OUTPUT ArrayView-1.out )
CUDA_ADD_EXECUTABLE( ArrayView-2 ArrayView-2.cu )
ADD_CUSTOM_COMMAND( COMMAND ArrayView-2 > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ArrayView-2.out OUTPUT ArrayView-2.out )
CUDA_ADD_EXECUTABLE( ArrayViewForElements ArrayViewForElements.cu )
ADD_CUSTOM_COMMAND( COMMAND ArrayViewForElements > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ArrayViewForElements.out OUTPUT ArrayViewForElements.out )
CUDA_ADD_EXECUTABLE( contains contains.cu )
ADD_CUSTOM_COMMAND( COMMAND contains > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/contains.out OUTPUT contains.out )
CUDA_ADD_EXECUTABLE( ElementsAccessing-1 ElementsAccessing-1.cu )
ADD_CUSTOM_COMMAND( COMMAND ElementsAccessing-1 > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ElementsAccessing-1.out OUTPUT ElementsAccessing-1.out )
CUDA_ADD_EXECUTABLE( ElementsAccessing-2 ElementsAccessing-2.cu )
ADD_CUSTOM_COMMAND( COMMAND ElementsAccessing-2 > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/ElementsAccessing-2.out OUTPUT ElementsAccessing-2.out )
ADD_EXECUTABLE( StaticArrayExample StaticArrayExample.cpp )
ADD_CUSTOM_COMMAND( COMMAND StaticArrayExample > ${TNL_DOCUMENTATION_OUTPUT_SNIPPETS_PATH}/StaticArrayExample.out OUTPUT StaticArrayExample.out )
ENDIF()
IF( BUILD_CUDA )
ADD_CUSTOM_TARGET( TutorialsArrays-cuda ALL DEPENDS
ArrayAllocation.out
ArrayIO.out
ArrayView-1.out
ArrayView-2.out
contains.out
ElementsAccessing-1.out
ElementsAccessing-2.out
ArrayViewForElements.out
StaticArrayExample.out )
ENDIF()
\page tutorial_Linear_solvers Linear solvers tutorial
[TOC]
# Introduction
Solvers of linear systems are one of the most important algorithms in scientific computations. TNL offers the followiing iterative methods:
1. Stationary methods
1. [Jacobi method](https://en.wikipedia.org/wiki/Jacobi_method) (\ref TNL::Solvers::Linear::Jacobi)
2. [Successive-overrelaxation method, SOR]([https://en.wikipedia.org/wiki/Successive_over-relaxation]) (\ref TNL::Solvers::Linear::SOR) - CPU only currently
2. Krylov subspace methods
1. [Conjugate gradient method, CG](https://en.wikipedia.org/wiki/Conjugate_gradient_method) (\ref TNL::Solvers::Linear::CG)
2. [Biconjugate gradient stabilized method, BICGStab](https://en.wikipedia.org/wiki/Biconjugate_gradient_stabilized_method) (\ref TNL::Solvers::Linear::BICGStab)
3. [Biconjugate gradient stabilized method, BICGStab(l)](https://dspace.library.uu.nl/bitstream/handle/1874/16827/sleijpen_93_bicgstab.pdf) (\ref TNL::Solvers::Linear::BICGStabL)
4. [Transpose-free quasi-minimal residual method, TFQMR]([https://second.wiki/wiki/algoritmo_tfqmr]) (\ref TNL::Solvers::Linear::TFQMR)
5. [Generalized minimal residual method, GMRES](https://en.wikipedia.org/wiki/Generalized_minimal_residual_method) (\ref TNL::Solvers::Linear::GMRES) with various methods of orthogonalization
1. Classical Gramm-Schmidt, CGS
2. Classical Gramm-Schmidt with reorthogonalization, CGSR
3. Modified Gramm-Schmidt, MGS
4. Modified Gramm-Schmidt with reorthogonalization, MGSR
5. Compact WY form of the Householder reflections, CWY
The Krylov subspace methods can be combined with the following precoditioners:
1. Jacobi
2. ILU - CPU only currently
# Iterative solvers of linear systems
All iterative solvers for linear systems can be found in the namespace \ref TNL::Solvers::Linear. The following example shows the use the iterative solvers:
\includelineno Solvers/Linear/IterativeLinearSolverExample.cpp
The result looks as follows:
\include IterativeLinearSolverExample.out
\ No newline at end of file
......@@ -10,5 +10,6 @@
6. [Sorting](tutorial_Sorting.html)
7. [Cross-device pointers](tutorial_Pointers.html)
8. [Matrices](tutorial_Matrices.html)
9. [Segments aka sparse formats](tutorial_Segments.html)
10. [Unstructured meshes](tutorial_Meshes.html)
9. [Linear solvers](tutorial_Linear_solvers.html)
10. [Segments aka sparse formats](tutorial_Segments.html)
11. [Unstructured meshes](tutorial_Meshes.html)
......@@ -31,7 +31,7 @@ configSetup( Config::ConfigDescription& config,
const String& prefix )
{
LinearSolver< Matrix >::configSetup( config, prefix );
config.addEntry< String >( prefix + "gmres-variant", "Algorithm to be used for reorthogonalization.", "CWY" );
config.addEntry< String >( prefix + "gmres-variant", "Algorithm used for the orthogonalization.", "MGSR" );
config.addEntryEnum( "CGS" );
config.addEntryEnum( "CGSR" );
config.addEntryEnum( "MGS" );
......
......@@ -146,6 +146,11 @@ class LinearSolver
* \param x vector for the solution of the linear system.
* \return true if the solver converged.
* \return false if the solver did not converge.
*
* \par Example
* \include Solvers/Linear/IterativeLinearSolverExample.cpp
* \par Output
* \include IterativeLinearSolverExample.out
*/
virtual bool solve( ConstVectorViewType b, VectorViewType x ) = 0;
......
......@@ -17,7 +17,7 @@ namespace TNL {
namespace Linear {
/**
* \brief Iterative solver of linear systems based on the Successive-overrelaxation (SOR) method.
* \brief Iterative solver of linear systems based on the Successive-overrelaxation (SOR) or Gauss-Seidel method.
*
* See (Wikipedia)[https://en.wikipedia.org/wiki/Successive_over-relaxation] for more details.
*
......
......@@ -24,13 +24,20 @@ namespace TNL {
*
* ## Stationary methods
* 1. Jacobi method - \ref TNL::Solvers::Linear::Jacobi
* 2. Successive-overrelaxation (SOR) method - \ref TNL::Solvers::Linear::SOR
* 2. Successive-overrelaxation method, SOR - \ref TNL::Solvers::Linear::SOR
*
* ## Krylov subspace methods
* 1. Conjugate gradient (CG) method - \ref TNL::Solvers::Linear::CG
* 2. Biconjugate gradient stabilised (BICGStab) method - \ref TNL::Solvers::Linear::BICGStab
* 3. Generalized minimal residual (GMRES) method - \ref TNL::Solvers::Linear::GMRES
* 4. Transpose-free quasi-minimal residual (TFQMR) method - \ref TNL::Solvers::Linear::TFQMR
* 1. Conjugate gradient method, CG - \ref TNL::Solvers::Linear::CG
* 2. Biconjugate gradient stabilized method, BICGStab - \ref TNL::Solvers::Linear::BICGStab
* 3. BICGStab(l) method - \ref TNL::Solvers::Linear::BICGStabL
* 4. Transpose-free quasi-minimal residual method, TFQMR - \ref TNL::Solvers::Linear::TFQMR
* 5. Generalized minimal residual method, GMERS - \ref TNL::Solvers::Linear::GMRES with various methods of orthogonalization
* 1. [Classical Gramm-Schmidt, CGS](https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process)
* 2. Classical Gramm-Schmidt with reorthogonalization, CGSR
* 3. Modified Gramm-Schmidt, MGS
* 4. Modified Gramm-Schmidt with reorthogonalization, MGSR
* 5. Compact WY form of the Householder reflections, CWY
*
*/
namespace Linear {
} // namespace Linear
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment