mirror of
				https://github.com/NVIDIA/cuda-samples.git
				synced 2025-11-04 15:47:50 +08:00 
			
		
		
		
	Remove simpleTemplates_nvrtc
This commit is contained in:
		
							parent
							
								
									769a225af3
								
							
						
					
					
						commit
						a461e61485
					
				@ -9,7 +9,7 @@
 | 
			
		||||
        * `cppIntegration` demonstrating calling between .cu and .cpp files (reason: obsolete)
 | 
			
		||||
        * `cppOverload` demonstrating C++ function overloading (reason: obsolete)
 | 
			
		||||
        * `simpleSeparateCompilation` demonstrating NVCC compilation to a static library (reason: trivial)
 | 
			
		||||
        *
 | 
			
		||||
        * `simpleTemplates_nvrtc` demonstrating NVRTC usage for `simpleTemplates` sample (reason: redundant)
 | 
			
		||||
 | 
			
		||||
### CUDA 12.5
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -32,8 +32,7 @@ add_subdirectory(simplePitchLinearTexture)
 | 
			
		||||
add_subdirectory(simplePrintf)
 | 
			
		||||
add_subdirectory(simpleStreams)
 | 
			
		||||
add_subdirectory(simpleSurfaceWrite)
 | 
			
		||||
#add_subdirectory(simpleTemplates)
 | 
			
		||||
#add_subdirectory(simpleTemplates_nvrtc)
 | 
			
		||||
add_subdirectory(simpleTemplates)
 | 
			
		||||
#add_subdirectory(simpleTexture)
 | 
			
		||||
#add_subdirectory(simpleTexture3D)
 | 
			
		||||
#add_subdirectory(simpleTextureDrv)
 | 
			
		||||
 | 
			
		||||
@ -1,18 +0,0 @@
 | 
			
		||||
{
 | 
			
		||||
    "configurations": [
 | 
			
		||||
        {
 | 
			
		||||
            "name": "Linux",
 | 
			
		||||
            "includePath": [
 | 
			
		||||
                "${workspaceFolder}/**",
 | 
			
		||||
                "${workspaceFolder}/../../../Common"
 | 
			
		||||
            ],
 | 
			
		||||
            "defines": [],
 | 
			
		||||
            "compilerPath": "/usr/local/cuda/bin/nvcc",
 | 
			
		||||
            "cStandard": "gnu17",
 | 
			
		||||
            "cppStandard": "gnu++14",
 | 
			
		||||
            "intelliSenseMode": "linux-gcc-x64",
 | 
			
		||||
            "configurationProvider": "ms-vscode.makefile-tools"
 | 
			
		||||
        }
 | 
			
		||||
    ],
 | 
			
		||||
    "version": 4
 | 
			
		||||
}
 | 
			
		||||
@ -1,7 +0,0 @@
 | 
			
		||||
{
 | 
			
		||||
    "recommendations": [
 | 
			
		||||
        "nvidia.nsight-vscode-edition",
 | 
			
		||||
        "ms-vscode.cpptools",
 | 
			
		||||
        "ms-vscode.makefile-tools"
 | 
			
		||||
    ]
 | 
			
		||||
}
 | 
			
		||||
@ -1,10 +0,0 @@
 | 
			
		||||
{
 | 
			
		||||
    "configurations": [
 | 
			
		||||
        {
 | 
			
		||||
            "name": "CUDA C++: Launch",
 | 
			
		||||
            "type": "cuda-gdb",
 | 
			
		||||
            "request": "launch",
 | 
			
		||||
            "program": "${workspaceFolder}/simpleTemplates_nvrtc"
 | 
			
		||||
        }
 | 
			
		||||
    ]
 | 
			
		||||
}
 | 
			
		||||
@ -1,15 +0,0 @@
 | 
			
		||||
{
 | 
			
		||||
    "version": "2.0.0",
 | 
			
		||||
    "tasks": [
 | 
			
		||||
        {
 | 
			
		||||
            "label": "sample",
 | 
			
		||||
            "type": "shell",
 | 
			
		||||
            "command": "make dbg=1",
 | 
			
		||||
            "problemMatcher": ["$nvcc"],
 | 
			
		||||
            "group": {
 | 
			
		||||
                "kind": "build",
 | 
			
		||||
                "isDefault": true
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    ]
 | 
			
		||||
}
 | 
			
		||||
@ -1,409 +0,0 @@
 | 
			
		||||
################################################################################
 | 
			
		||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Redistribution and use in source and binary forms, with or without
 | 
			
		||||
# modification, are permitted provided that the following conditions
 | 
			
		||||
# are met:
 | 
			
		||||
#  * Redistributions of source code must retain the above copyright
 | 
			
		||||
#    notice, this list of conditions and the following disclaimer.
 | 
			
		||||
#  * Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
#    notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
#    documentation and/or other materials provided with the distribution.
 | 
			
		||||
#  * Neither the name of NVIDIA CORPORATION nor the names of its
 | 
			
		||||
#    contributors may be used to endorse or promote products derived
 | 
			
		||||
#    from this software without specific prior written permission.
 | 
			
		||||
#
 | 
			
		||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
 | 
			
		||||
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | 
			
		||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 | 
			
		||||
# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 | 
			
		||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 | 
			
		||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 | 
			
		||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 | 
			
		||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 | 
			
		||||
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
#
 | 
			
		||||
################################################################################
 | 
			
		||||
#
 | 
			
		||||
# Makefile project only supported on Mac OS X and Linux Platforms)
 | 
			
		||||
#
 | 
			
		||||
################################################################################
 | 
			
		||||
 | 
			
		||||
# Location of the CUDA Toolkit
 | 
			
		||||
CUDA_PATH ?= /usr/local/cuda
 | 
			
		||||
 | 
			
		||||
##############################
 | 
			
		||||
# start deprecated interface #
 | 
			
		||||
##############################
 | 
			
		||||
ifeq ($(x86_64),1)
 | 
			
		||||
    $(info WARNING - x86_64 variable has been deprecated)
 | 
			
		||||
    $(info WARNING - please use TARGET_ARCH=x86_64 instead)
 | 
			
		||||
    TARGET_ARCH ?= x86_64
 | 
			
		||||
endif
 | 
			
		||||
ifeq ($(ARMv7),1)
 | 
			
		||||
    $(info WARNING - ARMv7 variable has been deprecated)
 | 
			
		||||
    $(info WARNING - please use TARGET_ARCH=armv7l instead)
 | 
			
		||||
    TARGET_ARCH ?= armv7l
 | 
			
		||||
endif
 | 
			
		||||
ifeq ($(aarch64),1)
 | 
			
		||||
    $(info WARNING - aarch64 variable has been deprecated)
 | 
			
		||||
    $(info WARNING - please use TARGET_ARCH=aarch64 instead)
 | 
			
		||||
    TARGET_ARCH ?= aarch64
 | 
			
		||||
endif
 | 
			
		||||
ifeq ($(ppc64le),1)
 | 
			
		||||
    $(info WARNING - ppc64le variable has been deprecated)
 | 
			
		||||
    $(info WARNING - please use TARGET_ARCH=ppc64le instead)
 | 
			
		||||
    TARGET_ARCH ?= ppc64le
 | 
			
		||||
endif
 | 
			
		||||
ifneq ($(GCC),)
 | 
			
		||||
    $(info WARNING - GCC variable has been deprecated)
 | 
			
		||||
    $(info WARNING - please use HOST_COMPILER=$(GCC) instead)
 | 
			
		||||
    HOST_COMPILER ?= $(GCC)
 | 
			
		||||
endif
 | 
			
		||||
ifneq ($(abi),)
 | 
			
		||||
    $(error ERROR - abi variable has been removed)
 | 
			
		||||
endif
 | 
			
		||||
############################
 | 
			
		||||
# end deprecated interface #
 | 
			
		||||
############################
 | 
			
		||||
 | 
			
		||||
# architecture
 | 
			
		||||
HOST_ARCH   := $(shell uname -m)
 | 
			
		||||
TARGET_ARCH ?= $(HOST_ARCH)
 | 
			
		||||
ifneq (,$(filter $(TARGET_ARCH),x86_64 aarch64 sbsa ppc64le armv7l))
 | 
			
		||||
    ifneq ($(TARGET_ARCH),$(HOST_ARCH))
 | 
			
		||||
        ifneq (,$(filter $(TARGET_ARCH),x86_64 aarch64 sbsa ppc64le))
 | 
			
		||||
            TARGET_SIZE := 64
 | 
			
		||||
        else ifneq (,$(filter $(TARGET_ARCH),armv7l))
 | 
			
		||||
            TARGET_SIZE := 32
 | 
			
		||||
        endif
 | 
			
		||||
    else
 | 
			
		||||
        TARGET_SIZE := $(shell getconf LONG_BIT)
 | 
			
		||||
    endif
 | 
			
		||||
else
 | 
			
		||||
    $(error ERROR - unsupported value $(TARGET_ARCH) for TARGET_ARCH!)
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
# sbsa and aarch64 systems look similar. Need to differentiate them at host level for now.
 | 
			
		||||
ifeq ($(HOST_ARCH),aarch64)
 | 
			
		||||
    ifeq ($(CUDA_PATH)/targets/sbsa-linux,$(shell ls -1d $(CUDA_PATH)/targets/sbsa-linux 2>/dev/null))
 | 
			
		||||
        HOST_ARCH := sbsa
 | 
			
		||||
        TARGET_ARCH := sbsa
 | 
			
		||||
    endif
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ifneq ($(TARGET_ARCH),$(HOST_ARCH))
 | 
			
		||||
    ifeq (,$(filter $(HOST_ARCH)-$(TARGET_ARCH),aarch64-armv7l x86_64-armv7l x86_64-aarch64 x86_64-sbsa x86_64-ppc64le))
 | 
			
		||||
        $(error ERROR - cross compiling from $(HOST_ARCH) to $(TARGET_ARCH) is not supported!)
 | 
			
		||||
    endif
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
# When on native aarch64 system with userspace of 32-bit, change TARGET_ARCH to armv7l
 | 
			
		||||
ifeq ($(HOST_ARCH)-$(TARGET_ARCH)-$(TARGET_SIZE),aarch64-aarch64-32)
 | 
			
		||||
    TARGET_ARCH = armv7l
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
# operating system
 | 
			
		||||
HOST_OS   := $(shell uname -s 2>/dev/null | tr "[:upper:]" "[:lower:]")
 | 
			
		||||
TARGET_OS ?= $(HOST_OS)
 | 
			
		||||
ifeq (,$(filter $(TARGET_OS),linux darwin qnx android))
 | 
			
		||||
    $(error ERROR - unsupported value $(TARGET_OS) for TARGET_OS!)
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
# host compiler
 | 
			
		||||
ifdef HOST_COMPILER
 | 
			
		||||
 CUSTOM_HOST_COMPILER = 1
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ifeq ($(TARGET_OS),darwin)
 | 
			
		||||
    ifeq ($(shell expr `xcodebuild -version | grep -i xcode | awk '{print $$2}' | cut -d'.' -f1` \>= 5),1)
 | 
			
		||||
        HOST_COMPILER ?= clang++
 | 
			
		||||
    endif
 | 
			
		||||
else ifneq ($(TARGET_ARCH),$(HOST_ARCH))
 | 
			
		||||
    ifeq ($(HOST_ARCH)-$(TARGET_ARCH),x86_64-armv7l)
 | 
			
		||||
        ifeq ($(TARGET_OS),linux)
 | 
			
		||||
            HOST_COMPILER ?= arm-linux-gnueabihf-g++
 | 
			
		||||
        else ifeq ($(TARGET_OS),qnx)
 | 
			
		||||
            ifeq ($(QNX_HOST),)
 | 
			
		||||
                $(error ERROR - QNX_HOST must be passed to the QNX host toolchain)
 | 
			
		||||
            endif
 | 
			
		||||
            ifeq ($(QNX_TARGET),)
 | 
			
		||||
                $(error ERROR - QNX_TARGET must be passed to the QNX target toolchain)
 | 
			
		||||
            endif
 | 
			
		||||
            export QNX_HOST
 | 
			
		||||
            export QNX_TARGET
 | 
			
		||||
            HOST_COMPILER ?= $(QNX_HOST)/usr/bin/arm-unknown-nto-qnx6.6.0eabi-g++
 | 
			
		||||
        else ifeq ($(TARGET_OS),android)
 | 
			
		||||
            HOST_COMPILER ?= arm-linux-androideabi-g++
 | 
			
		||||
        endif
 | 
			
		||||
    else ifeq ($(TARGET_ARCH),aarch64)
 | 
			
		||||
        ifeq ($(TARGET_OS), linux)
 | 
			
		||||
            HOST_COMPILER ?= aarch64-linux-gnu-g++
 | 
			
		||||
        else ifeq ($(TARGET_OS),qnx)
 | 
			
		||||
            ifeq ($(QNX_HOST),)
 | 
			
		||||
                $(error ERROR - QNX_HOST must be passed to the QNX host toolchain)
 | 
			
		||||
            endif
 | 
			
		||||
            ifeq ($(QNX_TARGET),)
 | 
			
		||||
                $(error ERROR - QNX_TARGET must be passed to the QNX target toolchain)
 | 
			
		||||
            endif
 | 
			
		||||
            export QNX_HOST
 | 
			
		||||
            export QNX_TARGET
 | 
			
		||||
            HOST_COMPILER ?= $(QNX_HOST)/usr/bin/q++
 | 
			
		||||
        else ifeq ($(TARGET_OS), android)
 | 
			
		||||
            HOST_COMPILER ?= aarch64-linux-android-clang++
 | 
			
		||||
        endif
 | 
			
		||||
    else ifeq ($(TARGET_ARCH),sbsa)
 | 
			
		||||
        HOST_COMPILER ?= aarch64-linux-gnu-g++
 | 
			
		||||
    else ifeq ($(TARGET_ARCH),ppc64le)
 | 
			
		||||
        HOST_COMPILER ?= powerpc64le-linux-gnu-g++
 | 
			
		||||
    endif
 | 
			
		||||
endif
 | 
			
		||||
HOST_COMPILER ?= g++
 | 
			
		||||
NVCC          := $(CUDA_PATH)/bin/nvcc -ccbin $(HOST_COMPILER)
 | 
			
		||||
 | 
			
		||||
# internal flags
 | 
			
		||||
NVCCFLAGS   := -m${TARGET_SIZE}
 | 
			
		||||
CCFLAGS     :=
 | 
			
		||||
LDFLAGS     :=
 | 
			
		||||
 | 
			
		||||
# build flags
 | 
			
		||||
 | 
			
		||||
# Link flag for customized HOST_COMPILER with gcc realpath
 | 
			
		||||
GCC_PATH := $(shell which gcc)
 | 
			
		||||
ifeq ($(CUSTOM_HOST_COMPILER),1)
 | 
			
		||||
    ifneq ($(filter /%,$(HOST_COMPILER)),)
 | 
			
		||||
        ifneq ($(findstring gcc,$(HOST_COMPILER)),)
 | 
			
		||||
            ifneq ($(GCC_PATH),$(HOST_COMPILER))
 | 
			
		||||
                LDFLAGS += -lstdc++
 | 
			
		||||
            endif
 | 
			
		||||
        endif
 | 
			
		||||
    endif
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ifeq ($(TARGET_OS),darwin)
 | 
			
		||||
    LDFLAGS += -rpath $(CUDA_PATH)/lib
 | 
			
		||||
    CCFLAGS += -arch $(HOST_ARCH)
 | 
			
		||||
else ifeq ($(HOST_ARCH)-$(TARGET_ARCH)-$(TARGET_OS),x86_64-armv7l-linux)
 | 
			
		||||
    LDFLAGS += --dynamic-linker=/lib/ld-linux-armhf.so.3
 | 
			
		||||
    CCFLAGS += -mfloat-abi=hard
 | 
			
		||||
else ifeq ($(TARGET_OS),android)
 | 
			
		||||
    LDFLAGS += -pie
 | 
			
		||||
    CCFLAGS += -fpie -fpic -fexceptions
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ifneq ($(TARGET_ARCH),$(HOST_ARCH))
 | 
			
		||||
    ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-linux)
 | 
			
		||||
        ifneq ($(TARGET_FS),)
 | 
			
		||||
            GCCVERSIONLTEQ46 := $(shell expr `$(HOST_COMPILER) -dumpversion` \<= 4.6)
 | 
			
		||||
            ifeq ($(GCCVERSIONLTEQ46),1)
 | 
			
		||||
                CCFLAGS += --sysroot=$(TARGET_FS)
 | 
			
		||||
            endif
 | 
			
		||||
            LDFLAGS += --sysroot=$(TARGET_FS)
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/lib
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib/arm-linux-gnueabihf
 | 
			
		||||
        endif
 | 
			
		||||
    endif
 | 
			
		||||
    ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-linux)
 | 
			
		||||
        ifneq ($(TARGET_FS),)
 | 
			
		||||
            GCCVERSIONLTEQ46 := $(shell expr `$(HOST_COMPILER) -dumpversion` \<= 4.6)
 | 
			
		||||
            ifeq ($(GCCVERSIONLTEQ46),1)
 | 
			
		||||
                CCFLAGS += --sysroot=$(TARGET_FS)
 | 
			
		||||
            endif
 | 
			
		||||
            LDFLAGS += --sysroot=$(TARGET_FS)
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/lib -L$(TARGET_FS)/lib
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/lib/aarch64-linux-gnu -L$(TARGET_FS)/lib/aarch64-linux-gnu
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib -L$(TARGET_FS)/usr/lib
 | 
			
		||||
            LDFLAGS += -rpath-link=$(TARGET_FS)/usr/lib/aarch64-linux-gnu -L$(TARGET_FS)/usr/lib/aarch64-linux-gnu
 | 
			
		||||
            LDFLAGS += --unresolved-symbols=ignore-in-shared-libs
 | 
			
		||||
            CCFLAGS += -isystem=$(TARGET_FS)/usr/include -I$(TARGET_FS)/usr/include -I$(TARGET_FS)/usr/include/libdrm
 | 
			
		||||
            CCFLAGS += -isystem=$(TARGET_FS)/usr/include/aarch64-linux-gnu -I$(TARGET_FS)/usr/include/aarch64-linux-gnu
 | 
			
		||||
        endif
 | 
			
		||||
    endif
 | 
			
		||||
    ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-qnx)
 | 
			
		||||
        NVCCFLAGS += -D_QNX_SOURCE
 | 
			
		||||
        NVCCFLAGS += --qpp-config 8.3.0,gcc_ntoaarch64le
 | 
			
		||||
        CCFLAGS += -DWIN_INTERFACE_CUSTOM -I/usr/include/aarch64-qnx-gnu
 | 
			
		||||
        LDFLAGS += -lsocket
 | 
			
		||||
        LDFLAGS += -L/usr/lib/aarch64-qnx-gnu
 | 
			
		||||
        CCFLAGS += "-Wl\,-rpath-link\,/usr/lib/aarch64-qnx-gnu"
 | 
			
		||||
        ifdef TARGET_OVERRIDE
 | 
			
		||||
            LDFLAGS += -lslog2
 | 
			
		||||
        endif
 | 
			
		||||
 | 
			
		||||
        ifneq ($(TARGET_FS),)
 | 
			
		||||
            LDFLAGS += -L$(TARGET_FS)/usr/lib
 | 
			
		||||
            CCFLAGS += "-Wl\,-rpath-link\,$(TARGET_FS)/usr/lib"
 | 
			
		||||
            LDFLAGS += -L$(TARGET_FS)/usr/libnvidia
 | 
			
		||||
            CCFLAGS += "-Wl\,-rpath-link\,$(TARGET_FS)/usr/libnvidia"
 | 
			
		||||
            CCFLAGS += -I$(TARGET_FS)/../include
 | 
			
		||||
        endif
 | 
			
		||||
    endif
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ifdef TARGET_OVERRIDE # cuda toolkit targets override
 | 
			
		||||
    NVCCFLAGS += -target-dir $(TARGET_OVERRIDE)
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
# Install directory of different arch
 | 
			
		||||
CUDA_INSTALL_TARGET_DIR :=
 | 
			
		||||
ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-linux)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/armv7-linux-gnueabihf/
 | 
			
		||||
else ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-linux)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/aarch64-linux/
 | 
			
		||||
else ifeq ($(TARGET_ARCH)-$(TARGET_OS),sbsa-linux)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/sbsa-linux/
 | 
			
		||||
else ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-android)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/armv7-linux-androideabi/
 | 
			
		||||
else ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-android)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/aarch64-linux-androideabi/
 | 
			
		||||
else ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-qnx)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/ARMv7-linux-QNX/
 | 
			
		||||
else ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-qnx)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/aarch64-qnx/
 | 
			
		||||
else ifeq ($(TARGET_ARCH),ppc64le)
 | 
			
		||||
    CUDA_INSTALL_TARGET_DIR = targets/ppc64le-linux/
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
# Debug build flags
 | 
			
		||||
ifeq ($(dbg),1)
 | 
			
		||||
      NVCCFLAGS += -g -G
 | 
			
		||||
      BUILD_TYPE := debug
 | 
			
		||||
else
 | 
			
		||||
      BUILD_TYPE := release
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ALL_CCFLAGS :=
 | 
			
		||||
ALL_CCFLAGS += $(NVCCFLAGS)
 | 
			
		||||
ALL_CCFLAGS += $(EXTRA_NVCCFLAGS)
 | 
			
		||||
ALL_CCFLAGS += $(addprefix -Xcompiler ,$(CCFLAGS))
 | 
			
		||||
ALL_CCFLAGS += $(addprefix -Xcompiler ,$(EXTRA_CCFLAGS))
 | 
			
		||||
 | 
			
		||||
UBUNTU = $(shell lsb_release -i -s 2>/dev/null | grep -i ubuntu)
 | 
			
		||||
 | 
			
		||||
SAMPLE_ENABLED := 1
 | 
			
		||||
 | 
			
		||||
# This sample is not supported on ARMv7
 | 
			
		||||
ifeq ($(TARGET_ARCH),armv7l)
 | 
			
		||||
  $(info >>> WARNING - simpleTemplates_nvrtc is not supported on ARMv7 - waiving sample <<<)
 | 
			
		||||
  SAMPLE_ENABLED := 0
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ALL_LDFLAGS :=
 | 
			
		||||
ALL_LDFLAGS += $(ALL_CCFLAGS)
 | 
			
		||||
ALL_LDFLAGS += $(addprefix -Xlinker ,$(LDFLAGS))
 | 
			
		||||
ALL_LDFLAGS += $(addprefix -Xlinker ,$(EXTRA_LDFLAGS))
 | 
			
		||||
 | 
			
		||||
# Common includes and paths for CUDA
 | 
			
		||||
INCLUDES  := -I../../../Common
 | 
			
		||||
LIBRARIES :=
 | 
			
		||||
 | 
			
		||||
################################################################################
 | 
			
		||||
 | 
			
		||||
# libNVRTC specific libraries
 | 
			
		||||
ifeq ($(TARGET_OS),darwin)
 | 
			
		||||
 LDFLAGS += -L$(CUDA_PATH)/lib -F/Library/Frameworks -framework CUDA
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ifeq ($(TARGET_OS),darwin)
 | 
			
		||||
  ALL_LDFLAGS += -Xcompiler -F/Library/Frameworks -Xlinker -framework -Xlinker CUDA
 | 
			
		||||
else
 | 
			
		||||
  ifeq ($(TARGET_ARCH),x86_64)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/lib64/stubs
 | 
			
		||||
    CUDA_SEARCH_PATH += $(CUDA_PATH)/lib/stubs
 | 
			
		||||
    CUDA_SEARCH_PATH += $(CUDA_PATH)/targets/x86_64-linux/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-linux)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/armv7-linux-gnueabihf/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-linux)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/aarch64-linux/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),sbsa-linux)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/sbsa-linux/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-android)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/armv7-linux-androideabi/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-android)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/aarch64-linux-androideabi/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),armv7l-qnx)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/ARMv7-linux-QNX/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH)-$(TARGET_OS),aarch64-qnx)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/aarch64-qnx/lib/stubs
 | 
			
		||||
    ifdef TARGET_OVERRIDE
 | 
			
		||||
        CUDA_SEARCH_PATH := $(CUDA_PATH)/targets/$(TARGET_OVERRIDE)/lib/stubs
 | 
			
		||||
    endif
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(TARGET_ARCH),ppc64le)
 | 
			
		||||
    CUDA_SEARCH_PATH ?= $(CUDA_PATH)/targets/ppc64le-linux/lib/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  ifeq ($(HOST_ARCH),ppc64le)
 | 
			
		||||
    CUDA_SEARCH_PATH += $(CUDA_PATH)/lib64/stubs
 | 
			
		||||
  endif
 | 
			
		||||
 | 
			
		||||
  CUDALIB ?= $(shell find -L $(CUDA_SEARCH_PATH) -maxdepth 1 -name libcuda.so 2> /dev/null)
 | 
			
		||||
  ifeq ("$(CUDALIB)","")
 | 
			
		||||
    $(info >>> WARNING - libcuda.so not found, CUDA Driver is not installed.  Please re-install the driver. <<<)
 | 
			
		||||
    SAMPLE_ENABLED := 0
 | 
			
		||||
  else
 | 
			
		||||
    CUDALIB := $(shell echo $(CUDALIB) | sed "s/ .*//" | sed "s/\/libcuda.so//" )
 | 
			
		||||
    LIBRARIES += -L$(CUDALIB) -lcuda
 | 
			
		||||
  endif
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
ALL_CCFLAGS += --threads 0 --std=c++11
 | 
			
		||||
 | 
			
		||||
INCLUDES += -I$(CUDA_PATH)/include
 | 
			
		||||
 | 
			
		||||
LIBRARIES += -lnvrtc
 | 
			
		||||
 | 
			
		||||
ifeq ($(SAMPLE_ENABLED),0)
 | 
			
		||||
EXEC ?= @echo "[@]"
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
################################################################################
 | 
			
		||||
 | 
			
		||||
# Target rules
 | 
			
		||||
all: build
 | 
			
		||||
 | 
			
		||||
build: simpleTemplates_nvrtc
 | 
			
		||||
 | 
			
		||||
check.deps:
 | 
			
		||||
ifeq ($(SAMPLE_ENABLED),0)
 | 
			
		||||
	@echo "Sample will be waived due to the above missing dependencies"
 | 
			
		||||
else
 | 
			
		||||
	@echo "Sample is ready - all dependencies have been met"
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
simpleTemplates.o:simpleTemplates.cpp
 | 
			
		||||
	$(EXEC) $(NVCC) $(INCLUDES) $(ALL_CCFLAGS) $(GENCODE_FLAGS) -o $@ -c $<
 | 
			
		||||
 | 
			
		||||
simpleTemplates_nvrtc: simpleTemplates.o
 | 
			
		||||
	$(EXEC) $(NVCC) $(ALL_LDFLAGS) $(GENCODE_FLAGS) -o $@ $+ $(LIBRARIES)
 | 
			
		||||
	$(EXEC) mkdir -p ../../../bin/$(TARGET_ARCH)/$(TARGET_OS)/$(BUILD_TYPE)
 | 
			
		||||
	$(EXEC) cp $@ ../../../bin/$(TARGET_ARCH)/$(TARGET_OS)/$(BUILD_TYPE)
 | 
			
		||||
 | 
			
		||||
run: build
 | 
			
		||||
	$(EXEC) ./simpleTemplates_nvrtc
 | 
			
		||||
 | 
			
		||||
testrun: build
 | 
			
		||||
 | 
			
		||||
clean:
 | 
			
		||||
	rm -f simpleTemplates_nvrtc simpleTemplates.o
 | 
			
		||||
	rm -rf ../../../bin/$(TARGET_ARCH)/$(TARGET_OS)/$(BUILD_TYPE)/simpleTemplates_nvrtc
 | 
			
		||||
 | 
			
		||||
clobber: clean
 | 
			
		||||
@ -1,74 +0,0 @@
 | 
			
		||||
# simpleTemplates_nvrtc - Simple Templates with libNVRTC
 | 
			
		||||
 | 
			
		||||
## Description
 | 
			
		||||
 | 
			
		||||
This sample is a templatized version of the template project. It also shows how to correctly templatize dynamically allocated shared memory arrays.
 | 
			
		||||
 | 
			
		||||
## Key Concepts
 | 
			
		||||
 | 
			
		||||
C++ Templates, Runtime Compilation
 | 
			
		||||
 | 
			
		||||
## Supported SM Architectures
 | 
			
		||||
 | 
			
		||||
[SM 5.0 ](https://developer.nvidia.com/cuda-gpus)  [SM 5.2 ](https://developer.nvidia.com/cuda-gpus)  [SM 5.3 ](https://developer.nvidia.com/cuda-gpus)  [SM 6.0 ](https://developer.nvidia.com/cuda-gpus)  [SM 6.1 ](https://developer.nvidia.com/cuda-gpus)  [SM 7.0 ](https://developer.nvidia.com/cuda-gpus)  [SM 7.2 ](https://developer.nvidia.com/cuda-gpus)  [SM 7.5 ](https://developer.nvidia.com/cuda-gpus)  [SM 8.0 ](https://developer.nvidia.com/cuda-gpus)  [SM 8.6 ](https://developer.nvidia.com/cuda-gpus)  [SM 8.7 ](https://developer.nvidia.com/cuda-gpus)  [SM 8.9 ](https://developer.nvidia.com/cuda-gpus)  [SM 9.0 ](https://developer.nvidia.com/cuda-gpus)
 | 
			
		||||
 | 
			
		||||
## Supported OSes
 | 
			
		||||
 | 
			
		||||
Linux, Windows, QNX
 | 
			
		||||
 | 
			
		||||
## Supported CPU Architecture
 | 
			
		||||
 | 
			
		||||
x86_64, ppc64le, aarch64
 | 
			
		||||
 | 
			
		||||
## CUDA APIs involved
 | 
			
		||||
 | 
			
		||||
### [CUDA Driver API](http://docs.nvidia.com/cuda/cuda-driver-api/index.html)
 | 
			
		||||
cuMemcpyDtoH, cuLaunchKernel, cuMemcpyHtoD, cuCtxSynchronize, cuMemAlloc, cuMemFree, cuModuleGetFunction
 | 
			
		||||
 | 
			
		||||
## Dependencies needed to build/run
 | 
			
		||||
[NVRTC](../../../README.md#nvrtc)
 | 
			
		||||
 | 
			
		||||
## Prerequisites
 | 
			
		||||
 | 
			
		||||
Download and install the [CUDA Toolkit 12.5](https://developer.nvidia.com/cuda-downloads) for your corresponding platform.
 | 
			
		||||
Make sure the dependencies mentioned in [Dependencies]() section above are installed.
 | 
			
		||||
 | 
			
		||||
## Build and Run
 | 
			
		||||
 | 
			
		||||
### Windows
 | 
			
		||||
The Windows samples are built using the Visual Studio IDE. Solution files (.sln) are provided for each supported version of Visual Studio, using the format:
 | 
			
		||||
```
 | 
			
		||||
*_vs<version>.sln - for Visual Studio <version>
 | 
			
		||||
```
 | 
			
		||||
Each individual sample has its own set of solution files in its directory:
 | 
			
		||||
 | 
			
		||||
To build/examine all the samples at once, the complete solution files should be used. To build/examine a single sample, the individual sample solution files should be used.
 | 
			
		||||
> **Note:** Some samples require that the Microsoft DirectX SDK (June 2010 or newer) be installed and that the VC++ directory paths are properly set up (**Tools > Options...**). Check DirectX Dependencies section for details."
 | 
			
		||||
 | 
			
		||||
### Linux
 | 
			
		||||
The Linux samples are built using makefiles. To use the makefiles, change the current directory to the sample directory you wish to build, and run make:
 | 
			
		||||
```
 | 
			
		||||
$ cd <sample_dir>
 | 
			
		||||
$ make
 | 
			
		||||
```
 | 
			
		||||
The samples makefiles can take advantage of certain options:
 | 
			
		||||
*  **TARGET_ARCH=<arch>** - cross-compile targeting a specific architecture. Allowed architectures are x86_64, ppc64le, aarch64.
 | 
			
		||||
    By default, TARGET_ARCH is set to HOST_ARCH. On a x86_64 machine, not setting TARGET_ARCH is the equivalent of setting TARGET_ARCH=x86_64.<br/>
 | 
			
		||||
`$ make TARGET_ARCH=x86_64` <br/> `$ make TARGET_ARCH=ppc64le` <br/> `$ make TARGET_ARCH=aarch64` <br/>
 | 
			
		||||
    See [here](http://docs.nvidia.com/cuda/cuda-samples/index.html#cross-samples) for more details.
 | 
			
		||||
*   **dbg=1** - build with debug symbols
 | 
			
		||||
    ```
 | 
			
		||||
    $ make dbg=1
 | 
			
		||||
    ```
 | 
			
		||||
*   **SMS="A B ..."** - override the SM architectures for which the sample will be built, where `"A B ..."` is a space-delimited list of SM architectures. For example, to generate SASS for SM 50 and SM 60, use `SMS="50 60"`.
 | 
			
		||||
    ```
 | 
			
		||||
    $ make SMS="50 60"
 | 
			
		||||
    ```
 | 
			
		||||
 | 
			
		||||
*  **HOST_COMPILER=<host_compiler>** - override the default g++ host compiler. See the [Linux Installation Guide](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#system-requirements) for a list of supported host compilers.
 | 
			
		||||
```
 | 
			
		||||
    $ make HOST_COMPILER=g++
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## References (for more details)
 | 
			
		||||
 | 
			
		||||
@ -1,177 +0,0 @@
 | 
			
		||||
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * Redistribution and use in source and binary forms, with or without
 | 
			
		||||
 * modification, are permitted provided that the following conditions
 | 
			
		||||
 * are met:
 | 
			
		||||
 *  * Redistributions of source code must retain the above copyright
 | 
			
		||||
 *    notice, this list of conditions and the following disclaimer.
 | 
			
		||||
 *  * Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
 *    notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
 *    documentation and/or other materials provided with the distribution.
 | 
			
		||||
 *  * Neither the name of NVIDIA CORPORATION nor the names of its
 | 
			
		||||
 *    contributors may be used to endorse or promote products derived
 | 
			
		||||
 *    from this software without specific prior written permission.
 | 
			
		||||
 *
 | 
			
		||||
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
 | 
			
		||||
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | 
			
		||||
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 | 
			
		||||
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 | 
			
		||||
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 | 
			
		||||
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 | 
			
		||||
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 | 
			
		||||
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 | 
			
		||||
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#ifndef _SHAREDMEM_H_
 | 
			
		||||
#define _SHAREDMEM_H_
 | 
			
		||||
 | 
			
		||||
//****************************************************************************
 | 
			
		||||
// Because dynamically sized shared memory arrays are declared "extern",
 | 
			
		||||
// we can't templatize them directly.  To get around this, we declare a
 | 
			
		||||
// simple wrapper struct that will declare the extern array with a different
 | 
			
		||||
// name depending on the type.  This avoids compiler errors about duplicate
 | 
			
		||||
// definitions.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
// To use dynamically allocated shared memory in a templatized __global__ or
 | 
			
		||||
// __device__ function, just replace code like this:
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
//  template<class T>
 | 
			
		||||
//  __global__ void
 | 
			
		||||
//  foo( T* g_idata, T* g_odata)
 | 
			
		||||
//  {
 | 
			
		||||
//      // Shared mem size is determined by the host app at run time
 | 
			
		||||
//      extern __shared__  T sdata[];
 | 
			
		||||
//      ...
 | 
			
		||||
//      doStuff(sdata);
 | 
			
		||||
//      ...
 | 
			
		||||
//   }
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
//   With this
 | 
			
		||||
//  template<class T>
 | 
			
		||||
//  __global__ void
 | 
			
		||||
//  foo( T* g_idata, T* g_odata)
 | 
			
		||||
//  {
 | 
			
		||||
//      // Shared mem size is determined by the host app at run time
 | 
			
		||||
//      SharedMemory<T> smem;
 | 
			
		||||
//      T* sdata = smem.getPointer();
 | 
			
		||||
//      ...
 | 
			
		||||
//      doStuff(sdata);
 | 
			
		||||
//      ...
 | 
			
		||||
//   }
 | 
			
		||||
//****************************************************************************
 | 
			
		||||
 | 
			
		||||
// This is the un-specialized struct.  Note that we prevent instantiation of
 | 
			
		||||
// this
 | 
			
		||||
// struct by putting an undefined symbol in the function body so it won't
 | 
			
		||||
// compile.
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
struct SharedMemory {
 | 
			
		||||
  // Ensure that we won't compile any un-specialized types
 | 
			
		||||
  __device__ T *getPointer() {
 | 
			
		||||
    extern __device__ void error(void);
 | 
			
		||||
    error();
 | 
			
		||||
    return NULL;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Following are the specializations for the following types.
 | 
			
		||||
// int, uint, char, uchar, short, ushort, long, ulong, bool, float, and double
 | 
			
		||||
// One could also specialize it for user-defined types.
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<int> {
 | 
			
		||||
  __device__ int *getPointer() {
 | 
			
		||||
    extern __shared__ int s_int[];
 | 
			
		||||
    return s_int;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<unsigned int> {
 | 
			
		||||
  __device__ unsigned int *getPointer() {
 | 
			
		||||
    extern __shared__ unsigned int s_uint[];
 | 
			
		||||
    return s_uint;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<char> {
 | 
			
		||||
  __device__ char *getPointer() {
 | 
			
		||||
    extern __shared__ char s_char[];
 | 
			
		||||
    return s_char;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<unsigned char> {
 | 
			
		||||
  __device__ unsigned char *getPointer() {
 | 
			
		||||
    extern __shared__ unsigned char s_uchar[];
 | 
			
		||||
    return s_uchar;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<short> {
 | 
			
		||||
  __device__ short *getPointer() {
 | 
			
		||||
    extern __shared__ short s_short[];
 | 
			
		||||
    return s_short;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<unsigned short> {
 | 
			
		||||
  __device__ unsigned short *getPointer() {
 | 
			
		||||
    extern __shared__ unsigned short s_ushort[];
 | 
			
		||||
    return s_ushort;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<long> {
 | 
			
		||||
  __device__ long *getPointer() {
 | 
			
		||||
    extern __shared__ long s_long[];
 | 
			
		||||
    return s_long;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<unsigned long> {
 | 
			
		||||
  __device__ unsigned long *getPointer() {
 | 
			
		||||
    extern __shared__ unsigned long s_ulong[];
 | 
			
		||||
    return s_ulong;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<bool> {
 | 
			
		||||
  __device__ bool *getPointer() {
 | 
			
		||||
    extern __shared__ bool s_bool[];
 | 
			
		||||
    return s_bool;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<float> {
 | 
			
		||||
  __device__ float *getPointer() {
 | 
			
		||||
    extern __shared__ float s_float[];
 | 
			
		||||
    return s_float;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct SharedMemory<double> {
 | 
			
		||||
  __device__ double *getPointer() {
 | 
			
		||||
    extern __shared__ double s_double[];
 | 
			
		||||
    return s_double;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#endif  //_SHAREDMEM_H_
 | 
			
		||||
@ -1,272 +0,0 @@
 | 
			
		||||
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * Redistribution and use in source and binary forms, with or without
 | 
			
		||||
 * modification, are permitted provided that the following conditions
 | 
			
		||||
 * are met:
 | 
			
		||||
 *  * Redistributions of source code must retain the above copyright
 | 
			
		||||
 *    notice, this list of conditions and the following disclaimer.
 | 
			
		||||
 *  * Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
 *    notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
 *    documentation and/or other materials provided with the distribution.
 | 
			
		||||
 *  * Neither the name of NVIDIA CORPORATION nor the names of its
 | 
			
		||||
 *    contributors may be used to endorse or promote products derived
 | 
			
		||||
 *    from this software without specific prior written permission.
 | 
			
		||||
 *
 | 
			
		||||
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
 | 
			
		||||
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | 
			
		||||
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 | 
			
		||||
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 | 
			
		||||
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 | 
			
		||||
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 | 
			
		||||
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 | 
			
		||||
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 | 
			
		||||
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
/* This sample is a templatized version of the template project.
 | 
			
		||||
* It also shows how to correctly templatize dynamically allocated shared
 | 
			
		||||
* memory arrays.
 | 
			
		||||
* Host code.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// System includes
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <assert.h>
 | 
			
		||||
#include <string.h>
 | 
			
		||||
#include <math.h>
 | 
			
		||||
 | 
			
		||||
// CUDA runtime
 | 
			
		||||
#include <cuda_runtime.h>
 | 
			
		||||
 | 
			
		||||
// helper functions and utilities to work with CUDA
 | 
			
		||||
#include <helper_functions.h>
 | 
			
		||||
#include <nvrtc_helper.h>
 | 
			
		||||
 | 
			
		||||
#ifndef MAX
 | 
			
		||||
#define MAX(a, b) (a > b ? a : b)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
int g_TotalFailures = 0;
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// declaration, forward
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
void runTest(int argc, char **argv, int len);
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
void computeGold(T *reference, T *idata, const unsigned int len) {
 | 
			
		||||
  const T T_len = static_cast<T>(len);
 | 
			
		||||
 | 
			
		||||
  for (unsigned int i = 0; i < len; ++i) {
 | 
			
		||||
    reference[i] = idata[i] * T_len;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Program main
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
int main(int argc, char **argv) {
 | 
			
		||||
  printf("> runTest<float,32>\n");
 | 
			
		||||
 | 
			
		||||
  runTest<float>(argc, argv, 32);
 | 
			
		||||
 | 
			
		||||
  printf("> runTest<int,64>\n");
 | 
			
		||||
 | 
			
		||||
  runTest<int>(argc, argv, 64);
 | 
			
		||||
 | 
			
		||||
  printf("\n[simpleTemplates_nvrtc] -> Test Results: %d Failures\n",
 | 
			
		||||
         g_TotalFailures);
 | 
			
		||||
 | 
			
		||||
  exit(g_TotalFailures == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// To completely templatize runTest (below) with cutil, we need to use
 | 
			
		||||
// template specialization to wrap up CUTIL's array comparison and file writing
 | 
			
		||||
// functions for different types.
 | 
			
		||||
 | 
			
		||||
// Here's the generic wrapper for cutCompare*
 | 
			
		||||
template <class T>
 | 
			
		||||
class ArrayComparator {
 | 
			
		||||
 public:
 | 
			
		||||
  bool compare(const T *reference, T *data, unsigned int len) {
 | 
			
		||||
    fprintf(stderr,
 | 
			
		||||
            "Error: no comparison function implemented for this type\n");
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Here's the specialization for ints:
 | 
			
		||||
template <>
 | 
			
		||||
class ArrayComparator<int> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool compare(const int *reference, int *data, unsigned int len) {
 | 
			
		||||
    return compareData(reference, data, len, 0.15f, 0.0f);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Here's the specialization for floats:
 | 
			
		||||
template <>
 | 
			
		||||
class ArrayComparator<float> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool compare(const float *reference, float *data, unsigned int len) {
 | 
			
		||||
    return compareData(reference, data, len, 0.15f, 0.15f);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Here's the generic wrapper for cutWriteFile*
 | 
			
		||||
template <class T>
 | 
			
		||||
class ArrayFileWriter {
 | 
			
		||||
 public:
 | 
			
		||||
  bool write(const char *filename, T *data, unsigned int len, float epsilon) {
 | 
			
		||||
    fprintf(stderr,
 | 
			
		||||
            "Error: no file write function implemented for this type\n");
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Here's the specialization for ints:
 | 
			
		||||
template <>
 | 
			
		||||
class ArrayFileWriter<int> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool write(const char *filename, int *data, unsigned int len, float epsilon) {
 | 
			
		||||
    return sdkWriteFile(filename, data, len, epsilon, false);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Here's the specialization for floats:
 | 
			
		||||
template <>
 | 
			
		||||
class ArrayFileWriter<float> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool write(const char *filename, float *data, unsigned int len,
 | 
			
		||||
             float epsilon) {
 | 
			
		||||
    return sdkWriteFile(filename, data, len, epsilon, false);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
CUfunction getKernel(CUmodule in);
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
CUfunction getKernel<int>(CUmodule in) {
 | 
			
		||||
  CUfunction kernel_addr;
 | 
			
		||||
  checkCudaErrors(cuModuleGetFunction(&kernel_addr, in, "testInt"));
 | 
			
		||||
 | 
			
		||||
  return kernel_addr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
CUfunction getKernel<float>(CUmodule in) {
 | 
			
		||||
  CUfunction kernel_addr;
 | 
			
		||||
  checkCudaErrors(cuModuleGetFunction(&kernel_addr, in, "testFloat"));
 | 
			
		||||
 | 
			
		||||
  return kernel_addr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
//! Run a simple test for CUDA
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
static bool moduleLoaded = false;
 | 
			
		||||
CUmodule module;
 | 
			
		||||
char *cubin, *kernel_file;
 | 
			
		||||
size_t cubinSize;
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
void runTest(int argc, char **argv, int len) {
 | 
			
		||||
  if (!moduleLoaded) {
 | 
			
		||||
    kernel_file = sdkFindFilePath("simpleTemplates_kernel.cu", argv[0]);
 | 
			
		||||
    compileFileToCUBIN(kernel_file, argc, argv, &cubin, &cubinSize, 0);
 | 
			
		||||
    module = loadCUBIN(cubin, argc, argv);
 | 
			
		||||
    moduleLoaded = true;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // create and start timer
 | 
			
		||||
  StopWatchInterface *timer = NULL;
 | 
			
		||||
  sdkCreateTimer(&timer);
 | 
			
		||||
 | 
			
		||||
  // start the timer
 | 
			
		||||
  sdkStartTimer(&timer);
 | 
			
		||||
 | 
			
		||||
  unsigned int num_threads = len;
 | 
			
		||||
  unsigned int mem_size = sizeof(float) * num_threads;
 | 
			
		||||
 | 
			
		||||
  // allocate host memory
 | 
			
		||||
  T *h_idata = (T *)malloc(mem_size);
 | 
			
		||||
 | 
			
		||||
  // initialize the memory
 | 
			
		||||
  for (unsigned int i = 0; i < num_threads; ++i) {
 | 
			
		||||
    h_idata[i] = (T)i;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // allocate device memory
 | 
			
		||||
  CUdeviceptr d_idata;
 | 
			
		||||
  checkCudaErrors(cuMemAlloc(&d_idata, mem_size));
 | 
			
		||||
 | 
			
		||||
  // copy host memory to device
 | 
			
		||||
  checkCudaErrors(cuMemcpyHtoD(d_idata, h_idata, mem_size));
 | 
			
		||||
 | 
			
		||||
  // allocate device memory for result
 | 
			
		||||
  CUdeviceptr d_odata;
 | 
			
		||||
  checkCudaErrors(cuMemAlloc(&d_odata, mem_size));
 | 
			
		||||
 | 
			
		||||
  // setup execution parameters
 | 
			
		||||
  dim3 grid(1, 1, 1);
 | 
			
		||||
  dim3 threads(num_threads, 1, 1);
 | 
			
		||||
 | 
			
		||||
  // execute the kernel
 | 
			
		||||
  CUfunction kernel_addr = getKernel<T>(module);
 | 
			
		||||
 | 
			
		||||
  void *arr[] = {(void *)&d_idata, (void *)&d_odata};
 | 
			
		||||
  checkCudaErrors(
 | 
			
		||||
      cuLaunchKernel(kernel_addr, grid.x, grid.y, grid.z, /* grid dim */
 | 
			
		||||
                     threads.x, threads.y, threads.z,     /* block dim */
 | 
			
		||||
                     mem_size, 0, /* shared mem, stream */
 | 
			
		||||
                     &arr[0],     /* arguments */
 | 
			
		||||
                     0));
 | 
			
		||||
 | 
			
		||||
  // check if kernel execution generated and error
 | 
			
		||||
  checkCudaErrors(cuCtxSynchronize());
 | 
			
		||||
 | 
			
		||||
  // allocate mem for the result on host side
 | 
			
		||||
  T *h_odata = (T *)malloc(mem_size);
 | 
			
		||||
 | 
			
		||||
  // copy result from device to host
 | 
			
		||||
  checkCudaErrors(cuMemcpyDtoH(h_odata, d_odata, sizeof(T) * num_threads));
 | 
			
		||||
 | 
			
		||||
  sdkStopTimer(&timer);
 | 
			
		||||
  printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
 | 
			
		||||
  sdkDeleteTimer(&timer);
 | 
			
		||||
 | 
			
		||||
  // compute reference solution
 | 
			
		||||
  T *reference = (T *)malloc(mem_size);
 | 
			
		||||
 | 
			
		||||
  computeGold<T>(reference, h_idata, num_threads);
 | 
			
		||||
 | 
			
		||||
  ArrayComparator<T> comparator;
 | 
			
		||||
  ArrayFileWriter<T> writer;
 | 
			
		||||
 | 
			
		||||
  // check result
 | 
			
		||||
  if (checkCmdLineFlag(argc, (const char **)argv, "regression")) {
 | 
			
		||||
    // write file for regression test
 | 
			
		||||
    writer.write("./data/regression.dat", h_odata, num_threads, 0.0f);
 | 
			
		||||
  } else {
 | 
			
		||||
    // custom output handling when no regression test running
 | 
			
		||||
    // in this case check if the result is equivalent to the expected solution
 | 
			
		||||
    bool res = comparator.compare(reference, h_odata, num_threads);
 | 
			
		||||
 | 
			
		||||
    printf("Compare %s\n\n", (1 == res) ? "OK" : "MISMATCH");
 | 
			
		||||
 | 
			
		||||
    g_TotalFailures += (1 != res);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // cleanup memory
 | 
			
		||||
  free(h_idata);
 | 
			
		||||
  free(h_odata);
 | 
			
		||||
  free(reference);
 | 
			
		||||
  checkCudaErrors(cuMemFree(d_idata));
 | 
			
		||||
  checkCudaErrors(cuMemFree(d_odata));
 | 
			
		||||
}
 | 
			
		||||
@ -1,70 +0,0 @@
 | 
			
		||||
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * Redistribution and use in source and binary forms, with or without
 | 
			
		||||
 * modification, are permitted provided that the following conditions
 | 
			
		||||
 * are met:
 | 
			
		||||
 *  * Redistributions of source code must retain the above copyright
 | 
			
		||||
 *    notice, this list of conditions and the following disclaimer.
 | 
			
		||||
 *  * Redistributions in binary form must reproduce the above copyright
 | 
			
		||||
 *    notice, this list of conditions and the following disclaimer in the
 | 
			
		||||
 *    documentation and/or other materials provided with the distribution.
 | 
			
		||||
 *  * Neither the name of NVIDIA CORPORATION nor the names of its
 | 
			
		||||
 *    contributors may be used to endorse or promote products derived
 | 
			
		||||
 *    from this software without specific prior written permission.
 | 
			
		||||
 *
 | 
			
		||||
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
 | 
			
		||||
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | 
			
		||||
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 | 
			
		||||
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 | 
			
		||||
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 | 
			
		||||
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 | 
			
		||||
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 | 
			
		||||
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 | 
			
		||||
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
// includes, kernels
 | 
			
		||||
#include "sharedmem.cuh"
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
//! Simple test kernel for device functionality
 | 
			
		||||
//! @param g_idata  input data in global memory
 | 
			
		||||
//! @param g_odata  output data in global memory
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
template <class T>
 | 
			
		||||
__device__ void testKernel(T *g_idata, T *g_odata) {
 | 
			
		||||
  // Shared mem size is determined by the host app at run time
 | 
			
		||||
  SharedMemory<T> smem;
 | 
			
		||||
 | 
			
		||||
  T *sdata = smem.getPointer();
 | 
			
		||||
 | 
			
		||||
  // access thread id
 | 
			
		||||
  const unsigned int tid = threadIdx.x;
 | 
			
		||||
 | 
			
		||||
  // access number of threads in this block
 | 
			
		||||
  const unsigned int num_threads = blockDim.x;
 | 
			
		||||
 | 
			
		||||
  // read in input data from global memory
 | 
			
		||||
  sdata[tid] = g_idata[tid];
 | 
			
		||||
 | 
			
		||||
  __syncthreads();
 | 
			
		||||
 | 
			
		||||
  // perform some computations
 | 
			
		||||
  sdata[tid] = (T)num_threads * sdata[tid];
 | 
			
		||||
 | 
			
		||||
  __syncthreads();
 | 
			
		||||
 | 
			
		||||
  // write data to global memory
 | 
			
		||||
  g_odata[tid] = sdata[tid];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern "C" __global__ void testFloat(float *p1, float *p2) {
 | 
			
		||||
  testKernel<float>(p1, p2);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern "C" __global__ void testInt(int *p1, int *p2) {
 | 
			
		||||
  testKernel<int>(p1, p2);
 | 
			
		||||
}
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user