diff --git a/tests/benchmarks/array-operations.h b/tests/benchmarks/array-operations.h
index 02d206cfe28eadbfcfb620dd8b8bed46cac35bbc..c9d99e8f42ae2b72d9067f4ab70b3edff07c495d 100644
--- a/tests/benchmarks/array-operations.h
+++ b/tests/benchmarks/array-operations.h
@@ -24,12 +24,16 @@ benchmarkArrayOperations( Benchmark & benchmark,
 
     HostArray hostArray, hostArray2;
     CudaArray deviceArray, deviceArray2;
-    hostArray.setSize( size );
-    if( ! deviceArray.setSize( size ) )
-        return false;
-    hostArray2.setLike( hostArray );
-    if( ! deviceArray2.setLike( deviceArray ) )
+    if( ! hostArray.setSize( size ) ||
+        ! hostArray2.setSize( size ) ||
+        ! deviceArray.setSize( size ) ||
+        ! deviceArray2.setSize( size ) )
+    {
+        const char* msg = "error: allocation of arrays failed";
+        cerr << msg << endl;
+        benchmark.addErrorMessage( msg );
         return false;
+    }
 
     Real resultHost, resultDevice;
 
diff --git a/tests/benchmarks/benchmarks.h b/tests/benchmarks/benchmarks.h
index 0d3a219d554f5e429f732b0747a60f2429b7801f..97fa5a4f1c44e09d5f1bbbdc1265aeddf11ebf7a 100644
--- a/tests/benchmarks/benchmarks.h
+++ b/tests/benchmarks/benchmarks.h
@@ -174,11 +174,52 @@ public:
         }
     }
 
+    void
+    writeErrorMessage( const char* msg,
+                       const int & colspan = 1 )
+    {
+        // initial indent string
+        header_indent = "!";
+        log << endl;
+        for( auto & it : metadataColumns ) {
+            log << header_indent << " " << it.first << endl;
+        }
+
+        // make sure there is a header column for the message
+        if( horizontalGroups.size() == 0 )
+            horizontalGroups.push_back( {"", 1} );
+
+        // dump stacked spanning columns
+        while( horizontalGroups.back().second <= 0 ) {
+            horizontalGroups.pop_back();
+            header_indent.pop_back();
+        }
+        for( int i = 0; i < horizontalGroups.size(); i++ ) {
+            if( horizontalGroups[ i ].second > 0 ) {
+                log << header_indent << " " << horizontalGroups[ i ].first << endl;
+                header_indent += "!";
+            }
+        }
+        if( horizontalGroups.size() > 0 ) {
+            horizontalGroups.back().second -= colspan;
+            header_indent.pop_back();
+        }
+
+        // only when changed (the header has been already adjusted)
+        // print each element on separate line
+        for( auto & it : metadataColumns ) {
+            log << it.second << endl;
+        }
+        log << msg << endl;
+    }
+
     void
     closeTable()
     {
+        log << endl;
         header_indent = body_indent = "";
         header_changed = true;
+        horizontalGroups.clear();
     }
 
     bool save( std::ostream & logFile )
@@ -186,7 +227,7 @@ public:
         closeTable();
         logFile << log.str();
         if( logFile.good() ) {
-            log.str() ="";
+            log.str() = "";
             return true;
         }
         return false;
@@ -361,6 +402,17 @@ public:
         return this->baseTime;
     }
 
+    // Adds an error message to the log. Should be called in places where the
+    // "time" method could not be called (e.g. due to failed allocation).
+    void
+    addErrorMessage( const char* msg,
+                     const int & numberOfComputations = 1 )
+    {
+        // each computation has 3 subcolumns
+        const int colspan = 3 * numberOfComputations;
+        writeErrorMessage( msg, colspan );
+    }
+
     using Logging::save;
 
 protected:
diff --git a/tests/benchmarks/share/tnl-log-to-html.py b/tests/benchmarks/share/tnl-log-to-html.py
index 256a66ea00a99d9de3e5dc8b38f16b0f77e7e4fb..6a2c4c6315a7490150e54be6be078febd5518490 100644
--- a/tests/benchmarks/share/tnl-log-to-html.py
+++ b/tests/benchmarks/share/tnl-log-to-html.py
@@ -253,7 +253,7 @@ class logToHtmlConvertor:
         elements = [line.strip() for line in body]
 
         if len(elements) != len(leafColumns):
-            raise Exception("Error in the table format: header has {} leaf columns, but the corresponding row has {} elements.".format(len(leafColumns), len(row)))
+            raise Exception("Error in the table format: header has {} leaf columns, but the corresponding row has {} elements.".format(len(leafColumns), len(elements)))
 
         row = collections.OrderedDict()
         for element, column in zip(elements, leafColumns):
diff --git a/tests/benchmarks/tnl-cuda-benchmarks.h b/tests/benchmarks/tnl-cuda-benchmarks.h
index 259f130fd000620be5003da62f078ab71dca4a9c..d46626f6ef0c849f8f28fe6721e646b29546de3c 100644
--- a/tests/benchmarks/tnl-cuda-benchmarks.h
+++ b/tests/benchmarks/tnl-cuda-benchmarks.h
@@ -90,8 +90,8 @@ void setCudaTestMatrix( Matrix& matrix,
       if( gridIdx == cudaGrids - 1 )
          cudaGridSize.x = cudaBlocks % tnlCuda::getMaxGridSize();
       setCudaTestMatrixKernel< Matrix >
-       <<< cudaGridSize, cudaBlockSize >>>
-       ( kernel_matrix, elementsPerRow, gridIdx );
+         <<< cudaGridSize, cudaBlockSize >>>
+         ( kernel_matrix, elementsPerRow, gridIdx );
       checkCudaDevice;
    }
    tnlCuda::freeFromDevice( kernel_matrix );
@@ -119,6 +119,11 @@ benchmarkSpMV( Benchmark & benchmark,
    HostVector hostVector, hostVector2;
    CudaVector deviceVector, deviceVector2;
 
+   // create benchmark group
+   tnlList< tnlString > parsedType;
+   parseObjectType( HostMatrix::getType(), parsedType );
+   benchmark.createHorizontalGroup( parsedType[ 0 ], 2 );
+
    if( ! hostRowLengths.setSize( size ) ||
        ! deviceRowLengths.setSize( size ) ||
        ! hostMatrix.setDimensions( size, size ) ||
@@ -128,7 +133,9 @@ benchmarkSpMV( Benchmark & benchmark,
        ! deviceVector.setSize( size ) ||
        ! deviceVector2.setSize( size ) )
    {
-      cerr << "Unable to allocate all matrices and vectors for the SpMV benchmark." << endl;
+      const char* msg = "error: allocation of vectors failed";
+      cerr << msg << endl;
+      benchmark.addErrorMessage( msg, 2 );
       return false;
    }
 
@@ -137,19 +144,19 @@ benchmarkSpMV( Benchmark & benchmark,
 
    if( ! hostMatrix.setCompressedRowsLengths( hostRowLengths ) )
    {
-      cerr << "Unable to allocate host matrix elements." << endl;
+      const char* msg = "error: allocation of host matrix failed";
+      cerr << msg << endl;
+      benchmark.addErrorMessage( msg, 2 );
       return false;
    }
    if( ! deviceMatrix.setCompressedRowsLengths( deviceRowLengths ) )
    {
-      cerr << "Unable to allocate device matrix elements." << endl;
+      const char* msg = "error: allocation of device matrix failed";
+      cerr << msg << endl;
+      benchmark.addErrorMessage( msg, 2 );
       return false;
    }
 
-   tnlList< tnlString > parsedType;
-   parseObjectType( HostMatrix::getType(), parsedType );
-   benchmark.createHorizontalGroup( parsedType[ 0 ], 2 );
-
    const int elements = setHostTestMatrix< HostMatrix >( hostMatrix, elementsPerRow );
    setCudaTestMatrix< DeviceMatrix >( deviceMatrix, elementsPerRow );
    const double datasetSize = loops * elements * ( 2 * sizeof( Real ) + sizeof( int ) ) / oneGB;
diff --git a/tests/benchmarks/vector-operations.h b/tests/benchmarks/vector-operations.h
index 5d15af0ff13b2fb4d64f69e58b8fc6135cd7c9c6..8552fdbc1d2a72d5a9dcc42e6c04a7eccba27a21 100644
--- a/tests/benchmarks/vector-operations.h
+++ b/tests/benchmarks/vector-operations.h
@@ -28,12 +28,16 @@ benchmarkVectorOperations( Benchmark & benchmark,
 
     HostVector hostVector, hostVector2;
     CudaVector deviceVector, deviceVector2;
-    hostVector.setSize( size );
-    if( ! deviceVector.setSize( size ) )
-        return false;
-    hostVector2.setLike( hostVector );
-    if( ! deviceVector2.setLike( deviceVector ) )
+    if( ! hostVector.setSize( size ) ||
+        ! hostVector2.setSize( size ) ||
+        ! deviceVector.setSize( size ) ||
+        ! deviceVector2.setSize( size ) )
+    {
+        const char* msg = "error: allocation of vectors failed";
+        cerr << msg << endl;
+        benchmark.addErrorMessage( msg );
         return false;
+    }
 
     Real resultHost, resultDevice;