mirror of
https://github.com/NVIDIA/cuda-samples.git
synced 2024-12-01 12:59:17 +08:00
71 lines
2.6 KiB
Plaintext
71 lines
2.6 KiB
Plaintext
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* * Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* * Neither the name of NVIDIA CORPORATION nor the names of its
|
|
* contributors may be used to endorse or promote products derived
|
|
* from this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
|
|
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
|
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
// includes, kernels
|
|
#include "sharedmem.cuh"
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
//! Simple test kernel for device functionality
|
|
//! @param g_idata input data in global memory
|
|
//! @param g_odata output data in global memory
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
template <class T>
|
|
__device__ void testKernel(T *g_idata, T *g_odata) {
|
|
// Shared mem size is determined by the host app at run time
|
|
SharedMemory<T> smem;
|
|
|
|
T *sdata = smem.getPointer();
|
|
|
|
// access thread id
|
|
const unsigned int tid = threadIdx.x;
|
|
|
|
// access number of threads in this block
|
|
const unsigned int num_threads = blockDim.x;
|
|
|
|
// read in input data from global memory
|
|
sdata[tid] = g_idata[tid];
|
|
|
|
__syncthreads();
|
|
|
|
// perform some computations
|
|
sdata[tid] = (T)num_threads * sdata[tid];
|
|
|
|
__syncthreads();
|
|
|
|
// write data to global memory
|
|
g_odata[tid] = sdata[tid];
|
|
}
|
|
|
|
extern "C" __global__ void testFloat(float *p1, float *p2) {
|
|
testKernel<float>(p1, p2);
|
|
}
|
|
|
|
extern "C" __global__ void testInt(int *p1, int *p2) {
|
|
testKernel<int>(p1, p2);
|
|
}
|