Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
T
tnl-dev
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Model registry
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
TNL
tnl-dev
Commits
a72c076d
There was an error fetching the commit references. Please try again later.
Commit
a72c076d
authored
5 years ago
by
Tomáš Oberhuber
Browse files
Options
Downloads
Patches
Plain Diff
Fixed symmetric sparse matrix with unit tests.
parent
cda43f31
No related branches found
No related tags found
1 merge request
!48
Segments
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/TNL/Matrices/SparseMatrixView.hpp
+27
-9
27 additions, 9 deletions
src/TNL/Matrices/SparseMatrixView.hpp
src/UnitTests/Matrices/SymmetricSparseMatrixTest.hpp
+24
-29
24 additions, 29 deletions
src/UnitTests/Matrices/SymmetricSparseMatrixTest.hpp
with
51 additions
and
38 deletions
src/TNL/Matrices/SparseMatrixView.hpp
+
27
−
9
View file @
a72c076d
...
...
@@ -13,7 +13,7 @@
#include
<functional>
#include
<TNL/Matrices/SparseMatrixView.h>
#include
<TNL/Algorithms/Reduction.h>
#include
<TNL/A
tomic
.h>
#include
<TNL/A
lgorithms/AtomicOperations
.h>
namespace
TNL
{
namespace
Matrices
{
...
...
@@ -382,16 +382,24 @@ vectorProduct( const InVector& inVector,
const
IndexType
paddingIndex
=
this
->
getPaddingIndex
();
if
(
isSymmetric
()
)
outVector
*=
outVectorMultiplicator
;
auto
fetch
=
[
=
]
__cuda_callable__
(
IndexType
row
,
IndexType
localIdx
,
IndexType
globalIdx
,
bool
&
compute
)
->
RealType
{
auto
fetch
=
[
=
]
__cuda_callable__
(
IndexType
row
,
IndexType
localIdx
,
IndexType
globalIdx
,
bool
&
compute
)
mutable
->
RealType
{
const
IndexType
column
=
columnIndexesView
[
globalIdx
];
compute
=
(
column
!=
paddingIndex
);
if
(
!
compute
)
return
0.0
;
if
(
isSymmetric
()
)
if
(
isSymmetric
()
&&
column
<
row
)
{
TNL_ASSERT_TRUE
(
false
,
""
);
//Atomic< RealType, DeviceType > atomic;
//if( isBinary() )
if
(
isBinary
()
)
Algorithms
::
AtomicOperations
<
DeviceType
>::
add
(
outVectorView
[
column
],
matrixMultiplicator
*
inVectorView
[
row
]
);
else
{
//std::cerr << outVectorView << std::endl;
Algorithms
::
AtomicOperations
<
DeviceType
>::
add
(
outVectorView
[
column
],
matrixMultiplicator
*
valuesView
[
globalIdx
]
*
inVectorView
[
row
]
);
//outVectorView[ column ] += matrixMultiplicator * valuesView[ globalIdx ] * inVectorView[ row ];
//std::cerr << "Symmetric add to out vector row " << column << " value " << valuesView[ globalIdx ] << " * " << inVectorView[ row ] <<
// " --> " << outVectorView[ column ] << std::endl;
}
}
if
(
isBinary
()
)
return
inVectorView
[
column
];
...
...
@@ -401,10 +409,20 @@ vectorProduct( const InVector& inVector,
sum
+=
value
;
};
auto
keeper
=
[
=
]
__cuda_callable__
(
IndexType
row
,
const
RealType
&
value
)
mutable
{
if
(
outVectorMultiplicator
==
0.0
)
outVectorView
[
row
]
=
matrixMultiplicator
*
value
;
if
(
isSymmetric
()
)
{
//std::cerr << outVectorView << std::endl;
//std::cerr << "Adding " << matrixMultiplicator * value << " to result vector " << outVectorView[ row ];
outVectorView
[
row
]
+=
matrixMultiplicator
*
value
;
//std::cerr << " ---> " << outVectorView[ row ] << std::endl;
}
else
outVectorView
[
row
]
=
outVectorMultiplicator
*
outVectorView
[
row
]
+
matrixMultiplicator
*
value
;
{
if
(
outVectorMultiplicator
==
0.0
)
outVectorView
[
row
]
=
matrixMultiplicator
*
value
;
else
outVectorView
[
row
]
=
outVectorMultiplicator
*
outVectorView
[
row
]
+
matrixMultiplicator
*
value
;
}
};
this
->
segments
.
segmentsReduction
(
0
,
this
->
getRows
(),
fetch
,
reduction
,
keeper
,
(
RealType
)
0.0
);
...
...
This diff is collapsed.
Click to expand it.
src/UnitTests/Matrices/SymmetricSparseMatrixTest.hpp
+
24
−
29
View file @
a72c076d
...
...
@@ -12,6 +12,7 @@
#include
<TNL/Containers/VectorView.h>
#include
<TNL/Math.h>
#include
<TNL/Algorithms/ParallelFor.h>
#include
<TNL/Algorithms/AtomicOperations.h>
#include
<iostream>
#include
<sstream>
...
...
@@ -734,10 +735,10 @@ void test_VectorProduct()
const
IndexType
m_cols_2
=
4
;
Matrix
m_2
(
m_rows_2
,
m_cols_2
,
{
{
0
,
0
,
1
},
{
0
,
1
,
2
},
{
0
,
2
,
3
},
{
1
,
0
,
2
},
{
1
,
2
,
6
},
{
1
,
3
,
8
},
{
0
,
0
,
1
},
{
1
,
0
,
2
},
{
2
,
0
,
3
},
{
2
,
1
,
6
},
{
2
,
2
,
7
},
{
3
,
2
,
8
},
{
3
,
3
,
9
}
}
);
{
3
,
1
,
8
},
{
3
,
3
,
9
}
}
);
VectorType
inVector_2
(
m_cols_2
,
2
);
VectorType
outVector_2
(
m_rows_2
,
0
);
...
...
@@ -835,9 +836,9 @@ void test_VectorProduct()
Matrix
m_5
(
m_rows_5
,
m_cols_5
,{
{
0
,
0
,
1
},
{
1
,
1
,
2
,
},
{
2
,
2
,
3
},
{
2
,
3
,
4
},
{
2
,
4
,
6
},
{
2
,
5
,
9
},
{
3
,
2
,
4
},
{
3
,
3
,
5
},
{
3
,
4
,
7
},
{
3
,
5
,
10
},
{
4
,
2
,
6
},
{
4
,
3
,
7
},
{
4
,
4
,
8
},
{
4
,
5
,
11
},
{
2
,
2
,
3
},
{
3
,
2
,
4
},
{
3
,
3
,
5
},
{
4
,
2
,
6
},
{
4
,
3
,
7
},
{
4
,
4
,
8
},
{
5
,
2
,
9
},
{
5
,
3
,
10
},
{
5
,
4
,
11
},
{
5
,
5
,
12
},
{
6
,
6
,
13
},
{
7
,
7
,
14
}
...
...
@@ -883,9 +884,9 @@ void test_RowsReduction()
Matrix
m_5
(
m_rows_5
,
m_cols_5
,{
{
0
,
0
,
1
},
{
1
,
1
,
2
,
},
{
2
,
2
,
3
},
{
2
,
3
,
4
},
{
2
,
4
,
6
},
{
2
,
5
,
9
},
{
3
,
2
,
4
},
{
3
,
3
,
5
},
{
3
,
4
,
7
},
{
3
,
5
,
10
},
{
4
,
2
,
6
},
{
4
,
3
,
7
},
{
4
,
4
,
8
},
{
4
,
5
,
11
},
{
2
,
2
,
3
},
{
3
,
2
,
4
},
{
3
,
3
,
5
},
{
4
,
2
,
6
},
{
4
,
3
,
7
},
{
4
,
4
,
8
},
{
5
,
2
,
9
},
{
5
,
3
,
10
},
{
5
,
4
,
11
},
{
5
,
5
,
12
},
{
6
,
6
,
13
},
{
7
,
7
,
14
}
...
...
@@ -896,24 +897,28 @@ void test_RowsReduction()
typename
Matrix
::
RowsCapacitiesType
rowLengths
(
m_rows_5
);
typename
Matrix
::
RowsCapacitiesType
rowLengths_true
(
{
1
,
1
,
4
,
4
,
4
,
4
,
1
,
1
}
);
auto
rowLengths_view
=
rowLengths
.
getView
();
auto
fetch
=
[]
__cuda_callable__
(
IndexType
row
,
IndexType
column
,
IndexType
globalIdx
,
const
RealType
&
value
)
->
IndexType
{
rowLengths_view
=
0
;
auto
fetch
=
[
=
]
__cuda_callable__
(
IndexType
row
,
IndexType
column
,
IndexType
globalIdx
,
const
RealType
&
value
)
mutable
->
IndexType
{
if
(
value
!=
0.0
&&
row
!=
column
)
TNL
::
Algorithms
::
AtomicOperations
<
DeviceType
>::
add
(
rowLengths_view
[
column
],
(
IndexType
)
1
);
return
(
value
!=
0.0
);
};
auto
reduce
=
[]
__cuda_callable__
(
IndexType
&
aux
,
const
IndexType
a
)
{
aux
+=
a
;
};
auto
keep
=
[
=
]
__cuda_callable__
(
const
IndexType
rowIdx
,
const
IndexType
value
)
mutable
{
rowLengths_view
[
rowIdx
]
=
value
;
rowLengths_view
[
rowIdx
]
+
=
value
;
};
m_5
.
allRowsReduction
(
fetch
,
reduce
,
keep
,
0
);
EXPECT_EQ
(
rowLengths_true
,
rowLengths
);
m_5
.
getCompressedRowLengths
(
rowLengths
);
EXPECT_EQ
(
rowLengths_true
,
rowLengths
);
typename
Matrix
::
RowsCapacitiesType
rowLengths_symmetric
(
{
1
,
1
,
1
,
2
,
3
,
4
,
1
,
1
}
);
EXPECT_EQ
(
rowLengths_symmetric
,
rowLengths
);
////
// Compute max norm
TNL
::
Containers
::
Vector
<
RealType
,
DeviceType
,
IndexType
>
rowSums
(
m_5
.
getRows
()
);
/*
TNL::Containers::Vector< RealType, DeviceType, IndexType > rowSums( m_5.getRows() );
auto rowSums_view = rowSums.getView();
auto max_fetch = [] __cuda_callable__ ( IndexType row, IndexType column, IndexType globalIdx, const RealType& value ) -> IndexType {
return abs( value );
...
...
@@ -926,7 +931,7 @@ void test_RowsReduction()
};
m_5.allRowsReduction( max_fetch, max_reduce, max_keep, 0 );
const RealType maxNorm = TNL::max( rowSums );
EXPECT_EQ
(
maxNorm
,
260
)
;
// 29+30+31+32+33+34+35+36
EXPECT_EQ( maxNorm, 260 ) ; // 29+30+31+32+33+34+35+36
*/
}
template
<
typename
Matrix
>
...
...
@@ -1018,7 +1023,7 @@ void test_SaveAndLoad( const char* filename )
{
2
,
1
,
4
},
{
2
,
2
,
5
},
{
3
,
2
,
6
},
{
3
,
3
,
7
},
{
4
,
3
,
8
},
{
4
,
4
,
9
},
{
5
,
5
,
10
}
}
);
{
5
,
4
,
10
}
}
);
// Check the set elements
EXPECT_EQ
(
savedMatrix
.
getElement
(
0
,
0
),
1
);
...
...
@@ -1060,9 +1065,6 @@ void test_SaveAndLoad( const char* filename )
ASSERT_NO_THROW
(
savedMatrix
.
save
(
filename
)
);
Matrix
loadedMatrix
;
//typename Matrix::CompressedRowLengthsVector rowLengths2( m_rows );
//rowLengths2 = 3;
//loadedMatrix.setCompressedRowLengths( rowLengths2 );
ASSERT_NO_THROW
(
loadedMatrix
.
load
(
filename
)
);
...
...
@@ -1071,42 +1073,36 @@ void test_SaveAndLoad( const char* filename )
EXPECT_EQ
(
savedMatrix
.
getElement
(
0
,
2
),
loadedMatrix
.
getElement
(
0
,
2
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
0
,
3
),
loadedMatrix
.
getElement
(
0
,
3
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
0
,
4
),
loadedMatrix
.
getElement
(
0
,
4
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
0
,
5
),
loadedMatrix
.
getElement
(
0
,
5
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
1
,
0
),
loadedMatrix
.
getElement
(
1
,
0
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
1
,
1
),
loadedMatrix
.
getElement
(
1
,
1
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
1
,
2
),
loadedMatrix
.
getElement
(
1
,
2
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
1
,
3
),
loadedMatrix
.
getElement
(
1
,
3
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
1
,
4
),
loadedMatrix
.
getElement
(
1
,
4
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
1
,
5
),
loadedMatrix
.
getElement
(
1
,
5
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
2
,
0
),
loadedMatrix
.
getElement
(
2
,
0
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
2
,
1
),
loadedMatrix
.
getElement
(
2
,
1
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
2
,
2
),
loadedMatrix
.
getElement
(
2
,
2
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
2
,
3
),
loadedMatrix
.
getElement
(
2
,
3
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
2
,
4
),
loadedMatrix
.
getElement
(
2
,
4
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
2
,
5
),
loadedMatrix
.
getElement
(
2
,
5
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
3
,
0
),
loadedMatrix
.
getElement
(
3
,
0
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
3
,
1
),
loadedMatrix
.
getElement
(
3
,
1
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
3
,
2
),
loadedMatrix
.
getElement
(
3
,
2
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
3
,
3
),
loadedMatrix
.
getElement
(
3
,
3
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
3
,
4
),
loadedMatrix
.
getElement
(
3
,
4
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
3
,
5
),
loadedMatrix
.
getElement
(
3
,
5
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
4
,
0
),
loadedMatrix
.
getElement
(
4
,
0
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
4
,
1
),
loadedMatrix
.
getElement
(
4
,
1
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
4
,
2
),
loadedMatrix
.
getElement
(
4
,
2
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
4
,
3
),
loadedMatrix
.
getElement
(
4
,
3
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
4
,
4
),
loadedMatrix
.
getElement
(
4
,
4
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
4
,
5
),
loadedMatrix
.
getElement
(
4
,
5
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
5
,
0
),
loadedMatrix
.
getElement
(
5
,
0
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
5
,
1
),
loadedMatrix
.
getElement
(
5
,
1
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
5
,
2
),
loadedMatrix
.
getElement
(
5
,
2
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
5
,
3
),
loadedMatrix
.
getElement
(
5
,
3
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
5
,
4
),
loadedMatrix
.
getElement
(
5
,
4
)
);
EXPECT_EQ
(
savedMatrix
.
getElement
(
5
,
5
),
loadedMatrix
.
getElement
(
5
,
5
)
);
EXPECT_EQ
(
std
::
remove
(
filename
),
0
);
}
...
...
@@ -1130,11 +1126,10 @@ void test_Print()
const
IndexType
m_cols
=
4
;
Matrix
m
(
m_rows
,
m_cols
,
{
{
0
,
0
,
4
},
{
0
,
1
,
1
},
{
1
,
0
,
1
},
{
1
,
1
,
4
},
{
1
,
2
,
1
},
{
2
,
1
,
1
},
{
2
,
2
,
4
},
{
2
,
3
,
1
},
{
3
,
2
,
1
},
{
3
,
3
,
4
},
{
3
,
4
,
1
},
{
4
,
3
,
1
},
{
4
,
4
,
4
}
{
0
,
0
,
4
},
{
1
,
0
,
1
},
{
1
,
1
,
4
},
{
2
,
1
,
1
},
{
2
,
2
,
4
},
{
3
,
2
,
1
},
{
3
,
3
,
4
}
}
);
std
::
stringstream
printed
;
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment