Commit 4261895b authored by Jakub Klinkovský's avatar Jakub Klinkovský
Browse files

Update tests

parent f36eebf2
Loading
Loading
Loading
Loading
Compare 2f631bb8 to ff151d28
Original line number Diff line number Diff line
Subproject commit 2f631bb8087a0355d2b23a75a28d936ce237659d
Subproject commit ff151d2833d48aa630564859cd1401a93a2bb333
+0 −6
Original line number Diff line number Diff line
# CMakeLists.txt
#
#     Author: Fabian Meyer
# Created On: 12 Jul 2019

include_directories(
    ${CMAKE_CURRENT_LIST_DIR}
    ${CATCH2_INCLUDE_DIR}
@@ -10,7 +5,6 @@ include_directories(

set(TEST_SRC
    "main.cpp"

    "gdcpp.cpp"
)

test/assert/eigen_require.h

deleted100644 → 0
+0 −29
Original line number Diff line number Diff line
/* eigen_require.h
 *
 *  Created on: 27 Jun 2019
 *      Author: Fabian Meyer
 */

#ifndef GDC_EIGEN_REQUIRE_H_
#define GDC_EIGEN_REQUIRE_H_

#include <Eigen/Geometry>
#include <catch2/catch.hpp>

#define REQUIRE_MATRIX_APPROX(a, b, eps) do {                                 \
        REQUIRE(a.cols() == b.cols());                                        \
        REQUIRE(a.rows() == b.rows());                                        \
        for(Index _c = 0; _c < a.cols(); ++_c)                                \
            for(Index _r = 0; _r < a.rows(); ++_r)                            \
                REQUIRE(Approx(a(_r, _c)).margin(eps) == b(_r, _c));          \
    } while(0)

#define REQUIRE_MATRIX(a, b) do {                                             \
        REQUIRE(a.cols() == b.cols());                                        \
        REQUIRE(a.rows() == b.rows());                                        \
        for(Index _c = 0; _c < a.cols(); ++_c)                                \
            for(Index _r = 0; _r < a.rows(); ++_r)                            \
                REQUIRE(a(_r, _c) == b(_r, _c));                              \
    } while(0)

#endif
+18 −0
Original line number Diff line number Diff line
#ifndef GDC_TNL_REQUIRE_H_
#define GDC_TNL_REQUIRE_H_

#include <catch2/catch.hpp>

#define REQUIRE_VECTOR_APPROX(a, b, eps) do {                                 \
        REQUIRE(a.getSize() == b.getSize());                                  \
        for(Index _i = 0; _i < a.getSize(); ++_i)                             \
            REQUIRE(Approx(a[_i]).margin(eps) == b[_i]);                      \
    } while(0)

#define REQUIRE_VECTOR(a, b) do {                                             \
        REQUIRE(a.getSize() == b.getSize());                                  \
        for(Index _i = 0; _i < a.getSize(); ++_i)                             \
            REQUIRE(a[_i] == b[_i]);                                          \
    } while(0)

#endif
+31 −58
Original line number Diff line number Diff line
/* gdcpp.cpp
 *
 *     Author: Fabian Meyer
 * Created On: 12 Jul 2019
 */

#include "assert/eigen_require.h"
#include "assert/tnl_require.h"
#include <gdcpp.h>

using namespace gdc;
@@ -12,7 +6,7 @@ using namespace gdc;
template<typename Scalar>
struct Paraboloid
{
    typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
    using Vector = TNL::Containers::Vector<float, TNL::Devices::Host, gdc::Index>;
    Scalar operator()(const Vector &state, Vector &)
    {
        return state(0) * state(0) + state(1) * state(1);
@@ -22,7 +16,7 @@ struct Paraboloid
template<typename Scalar>
struct Rosenbrock
{
    typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
    using Vector = TNL::Containers::Vector<float, TNL::Devices::Host, gdc::Index>;
    Scalar operator()(const Vector &state, Vector &)
    {
        Scalar delta1 = 1 - state(0);
@@ -32,7 +26,7 @@ struct Rosenbrock
    }
};

typedef Eigen::Matrix<float, Eigen::Dynamic, 1> Vector;
using Vector = TNL::Containers::Vector<float, TNL::Devices::Host, gdc::Index>;

TEST_CASE("gradient_descent")
{
@@ -45,17 +39,14 @@ TEST_CASE("gradient_descent")
            GradientDescent<float,
                Paraboloid<float>,
                ConstantStepSize<float>,
                NoCallback<float>,
                ForwardDifferences<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("backward differences")
@@ -63,17 +54,14 @@ TEST_CASE("gradient_descent")
            GradientDescent<float,
                Paraboloid<float>,
                ConstantStepSize<float>,
                NoCallback<float>,
                BackwardDifferences<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("central differences")
@@ -81,17 +69,14 @@ TEST_CASE("gradient_descent")
            GradientDescent<float,
                Paraboloid<float>,
                ConstantStepSize<float>,
                NoCallback<float>,
                CentralDifferences<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("constant step size")
@@ -101,13 +86,11 @@ TEST_CASE("gradient_descent")
                ConstantStepSize<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("Barzilai-Borwein step")
@@ -117,13 +100,11 @@ TEST_CASE("gradient_descent")
                BarzilaiBorwein<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("Wolfe linesearch")
@@ -133,13 +114,11 @@ TEST_CASE("gradient_descent")
                WolfeBacktracking<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("Armijo linesearch")
@@ -149,13 +128,11 @@ TEST_CASE("gradient_descent")
                ArmijoBacktracking<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }

        SECTION("Decrease linesearch")
@@ -165,13 +142,11 @@ TEST_CASE("gradient_descent")
                DecreaseBacktracking<float>> optimizer;
            optimizer.setMaxIterations(100);

            Vector xval(2);
            xval << 2, 2;
            Vector xvalExp(2);
            xvalExp << 0, 0;
            Vector xval = {2, 2};
            Vector xvalExp = {0, 0};

            auto result = optimizer.minimize(xval);
            REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
            REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
        }
    }

@@ -181,12 +156,10 @@ TEST_CASE("gradient_descent")
            WolfeBacktracking<float>> optimizer;
        optimizer.setMaxIterations(3000);
        optimizer.setMomentum(0.9);
        Vector xval(2);
        xval << -0.5, 0.5;
        Vector xvalExp(2);
        xvalExp << 1, 1;
        Vector xval = {-0.5, 0.5};
        Vector xvalExp = {1, 1};

        auto result = optimizer.minimize(xval);
        REQUIRE_MATRIX_APPROX(xvalExp, result.xval, eps);
        REQUIRE_VECTOR_APPROX(xvalExp, result.xval, eps);
    }
}
Loading