Commit 7d476bb5 authored by Jakub Klinkovský's avatar Jakub Klinkovský
Browse files

Added example project files

parent fd93113a
Loading
Loading
Loading
Loading

.gitignore

0 → 100644
+3 −0
Original line number Diff line number Diff line
example*
!example*.cpp
!example*.cu

Makefile

0 → 100644
+30 −0
Original line number Diff line number Diff line
include config.mk

SOURCES := $(wildcard *.cpp)
TARGETS := $(SOURCES:%.cpp=%)
CUDA_SOURCES := $(wildcard *.cu)
CUDA_TARGETS := $(CUDA_SOURCES:%.cu=%)

## targets definitions follow
.PHONY: all host cuda
all: host cuda
host: $(TARGETS)
cuda: $(CUDA_TARGETS)

.PHONY: clean
clean:
	rm -f *.d *.o *.cuo $(TARGETS) $(CUDA_TARGETS)

$(TARGETS): % : %.o
	$(CXX) $(LDFLAGS) -o $@ $< $(LDLIBS)

# use .cuo instead of .cu.o to avoid problems with the implicit rules: https://stackoverflow.com/q/62967939
# (and use the host compiler for linking CUDA, nvcc does not understand that .cuo is an object file)
$(CUDA_TARGETS): % : %.cuo
	$(CXX) $(CUDA_LDFLAGS) -o $@ $< $(CUDA_LDLIBS)

$(SOURCES:%.cpp=%.o): %.o: %.cpp
	$(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $<

$(CUDA_SOURCES:%.cu=%.cuo): %.cuo : %.cu
	$(CUDA_CXX) $(CUDA_CPPFLAGS) $(CUDA_CXXFLAGS) -c -o $@ $<
+3 −1
Original line number Diff line number Diff line
# Example Project

Basic example how to use TNL in a C++/CUDA project with Makefile

See the [TNL documentation](https://mmg-gitlab.fjfi.cvut.cz/doc/tnl/) for details.

config.mk

0 → 100644
+49 −0
Original line number Diff line number Diff line
# configure the include path(s) according to your TNL installation
TNL_INCLUDE_DIRS := -I ~/.local/include

WITH_OPENMP := yes
WITH_DEBUG := no

# If TNL is installed on your system, the CUDA architecture can be detected
# automatically by tnl-cuda-arch. This is done if CUDA_ARCH is set to "auto".
# Otherwise, CUDA_ARCH has to be set manually to the desired CUDA architecture
# number, e.g. 60, 61, etc.
CUDA_ARCH := auto

# compilers
CXX := g++
CUDA_CXX := nvcc

# host compiler flags
CXXFLAGS := -std=c++14 $(TNL_INCLUDE_DIRS)
ifeq ($(WITH_DEBUG),yes)
    CXXFLAGS += -O0 -g
else
    CXXFLAGS += -O3 -DNDEBUG
endif

# CUDA compiler flags
CUDA_CXXFLAGS := -std=c++14 --expt-relaxed-constexpr --expt-extended-lambda $(TNL_INCLUDE_DIRS)
CUDA_CXXFLAGS += -DHAVE_CUDA
ifeq ($(CUDA_ARCH),auto)
    CUDA_CXXFLAGS += $(shell tnl-cuda-arch)
else
    CUDA_CXXFLAGS += -gencode arch=compute_$(CUDA_ARCH),code=sm_$(CUDA_ARCH)
endif

# determine path to the CUDA toolkit installation
# (autodetection is attempted, set it manually if it fails)
CUDA_PATH ?= $(abspath $(dir $(shell command -v nvcc))/..)
#$(info Detected CUDA_PATH: $(CUDA_PATH))

# flags for linking CUDA with the host compiler
CUDA_LDFLAGS := -L $(CUDA_PATH)/lib64
CUDA_LDLIBS := -lcudart -ldl -lrt

# enable OpenMP
ifeq ($(WITH_OPENMP),yes)
    CXXFLAGS += -fopenmp -DHAVE_OPENMP
    LDLIBS += -lgomp
    CUDA_CXXFLAGS += -Xcompiler -fopenmp -DHAVE_OPENMP
    CUDA_LDLIBS += -lgomp
endif

example-cuda.cu

0 → 100644
+9 −0
Original line number Diff line number Diff line
#include <iostream>
#include <TNL/Containers/Array.h>

int main( int argc, char* argv[] )
{
    TNL::Containers::Array< int, TNL::Devices::Cuda > device_array{ 1, 2, 3 };
    std::cout << "device_array = " << device_array << std::endl;
    return EXIT_SUCCESS;
}
Loading