mirror of
https://github.com/NVIDIA/cuda-samples.git
synced 2024-11-24 17:49:18 +08:00
update sample conjugateGradientMultiDeviceCG to remove use of deprecated function cudaLaunchCooperativeKernelMultiDevice()
This commit is contained in:
parent
ba04faaf73
commit
3342d604fe
|
@ -27,8 +27,7 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This sample implements a conjugate gradient solver on multiple GPU using
|
* This sample implements a conjugate gradient solver on multiple GPU using
|
||||||
* Multi Device Cooperative Groups, also uses Unified Memory optimized using
|
* Unified Memory optimized prefetching and usage hints.
|
||||||
* prefetching and usage hints.
|
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -62,8 +61,8 @@ __device__ double grid_dot_result = 0.0;
|
||||||
/* genTridiag: generate a random tridiagonal symmetric matrix */
|
/* genTridiag: generate a random tridiagonal symmetric matrix */
|
||||||
void genTridiag(int *I, int *J, float *val, int N, int nz) {
|
void genTridiag(int *I, int *J, float *val, int N, int nz) {
|
||||||
I[0] = 0, J[0] = 0, J[1] = 1;
|
I[0] = 0, J[0] = 0, J[1] = 1;
|
||||||
val[0] = static_cast<float>(rand()) / RAND_MAX + 10.0f;
|
val[0] = (float)rand() / RAND_MAX + 10.0f;
|
||||||
val[1] = static_cast<float>(rand()) / RAND_MAX;
|
val[1] = (float)rand() / RAND_MAX;
|
||||||
int start;
|
int start;
|
||||||
|
|
||||||
for (int i = 1; i < N; i++) {
|
for (int i = 1; i < N; i++) {
|
||||||
|
@ -82,10 +81,10 @@ void genTridiag(int *I, int *J, float *val, int N, int nz) {
|
||||||
}
|
}
|
||||||
|
|
||||||
val[start] = val[start - 1];
|
val[start] = val[start - 1];
|
||||||
val[start + 1] = static_cast<float>(rand()) / RAND_MAX + 10.0f;
|
val[start + 1] = (float)rand() / RAND_MAX + 10.0f;
|
||||||
|
|
||||||
if (i < N - 1) {
|
if (i < N - 1) {
|
||||||
val[start + 2] = static_cast<float>(rand()) / RAND_MAX;
|
val[start + 2] = (float)rand() / RAND_MAX;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,8 +111,8 @@ void cpuSpMV(int *I, int *J, float *val, int nnz, int num_rows, float alpha,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
double dotProduct(float *vecA, float *vecB, int size) {
|
float dotProduct(float *vecA, float *vecB, int size) {
|
||||||
double result = 0.0;
|
float result = 0.0;
|
||||||
|
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
result = result + (vecA[i] * vecB[i]);
|
result = result + (vecA[i] * vecB[i]);
|
||||||
|
@ -176,11 +175,90 @@ void cpuConjugateGrad(int *I, int *J, float *val, float *x, float *Ax, float *p,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Data filled on CPU needed for MultiGPU operations.
|
||||||
|
struct MultiDeviceData {
|
||||||
|
unsigned char *hostMemoryArrivedList;
|
||||||
|
unsigned int numDevices;
|
||||||
|
unsigned int deviceRank;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Class used for coordination of multiple devices.
|
||||||
|
class PeerGroup {
|
||||||
|
const MultiDeviceData &data;
|
||||||
|
const cg::grid_group &grid;
|
||||||
|
|
||||||
|
__device__ unsigned char load_arrived(unsigned char *arrived) const {
|
||||||
|
#if __CUDA_ARCH__ < 700
|
||||||
|
return *(volatile unsigned char *)arrived;
|
||||||
|
#else
|
||||||
|
unsigned int result;
|
||||||
|
asm volatile("ld.acquire.sys.global.u8 %0, [%1];"
|
||||||
|
: "=r"(result)
|
||||||
|
: "l"(arrived)
|
||||||
|
: "memory");
|
||||||
|
return result;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ void store_arrived(unsigned char *arrived,
|
||||||
|
unsigned char val) const {
|
||||||
|
#if __CUDA_ARCH__ < 700
|
||||||
|
*(volatile unsigned char *)arrived = val;
|
||||||
|
#else
|
||||||
|
unsigned int reg_val = val;
|
||||||
|
asm volatile(
|
||||||
|
"st.release.sys.global.u8 [%1], %0;" ::"r"(reg_val) "l"(arrived)
|
||||||
|
: "memory");
|
||||||
|
|
||||||
|
// Avoids compiler warnings from unused variable val.
|
||||||
|
(void)(reg_val = reg_val);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
__device__ PeerGroup(const MultiDeviceData &data, const cg::grid_group &grid)
|
||||||
|
: data(data), grid(grid){};
|
||||||
|
|
||||||
|
__device__ unsigned int size() const { return data.numDevices * grid.size(); }
|
||||||
|
|
||||||
|
__device__ unsigned int thread_rank() const {
|
||||||
|
return data.deviceRank * grid.size() + grid.thread_rank();
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ void sync() const {
|
||||||
|
grid.sync();
|
||||||
|
|
||||||
|
// One thread from each grid participates in the sync.
|
||||||
|
if (grid.thread_rank() == 0) {
|
||||||
|
if (data.deviceRank == 0) {
|
||||||
|
// Leader grid waits for others to join and then releases them.
|
||||||
|
// Other GPUs can arrive in any order, so the leader have to wait for
|
||||||
|
// all others.
|
||||||
|
for (int i = 0; i < data.numDevices - 1; i++) {
|
||||||
|
while (load_arrived(&data.hostMemoryArrivedList[i]) == 0)
|
||||||
|
;
|
||||||
|
}
|
||||||
|
for (int i = 0; i < data.numDevices - 1; i++) {
|
||||||
|
store_arrived(&data.hostMemoryArrivedList[i], 0);
|
||||||
|
}
|
||||||
|
__threadfence_system();
|
||||||
|
} else {
|
||||||
|
// Other grids note their arrival and wait to be released.
|
||||||
|
store_arrived(&data.hostMemoryArrivedList[data.deviceRank - 1], 1);
|
||||||
|
while (load_arrived(&data.hostMemoryArrivedList[data.deviceRank - 1]) ==
|
||||||
|
1)
|
||||||
|
;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
grid.sync();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
__device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows,
|
__device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows,
|
||||||
float alpha, float *inputVecX, float *outputVecY,
|
float alpha, float *inputVecX, float *outputVecY,
|
||||||
cg::thread_block &cta,
|
const PeerGroup &peer_group) {
|
||||||
const cg::multi_grid_group &multi_grid) {
|
for (int i = peer_group.thread_rank(); i < num_rows; i += peer_group.size()) {
|
||||||
for (int i = multi_grid.thread_rank(); i < num_rows; i += multi_grid.size()) {
|
|
||||||
int row_elem = I[i];
|
int row_elem = I[i];
|
||||||
int next_row_elem = I[i + 1];
|
int next_row_elem = I[i + 1];
|
||||||
int num_elems_this_row = next_row_elem - row_elem;
|
int num_elems_this_row = next_row_elem - row_elem;
|
||||||
|
@ -195,21 +273,21 @@ __device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows,
|
||||||
}
|
}
|
||||||
|
|
||||||
__device__ void gpuSaxpy(float *x, float *y, float a, int size,
|
__device__ void gpuSaxpy(float *x, float *y, float a, int size,
|
||||||
const cg::multi_grid_group &multi_grid) {
|
const PeerGroup &peer_group) {
|
||||||
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
|
for (int i = peer_group.thread_rank(); i < size; i += peer_group.size()) {
|
||||||
y[i] = a * x[i] + y[i];
|
y[i] = a * x[i] + y[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__device__ void gpuDotProduct(float *vecA, float *vecB, int size,
|
__device__ void gpuDotProduct(float *vecA, float *vecB, int size,
|
||||||
const cg::thread_block &cta,
|
const cg::thread_block &cta,
|
||||||
const cg::multi_grid_group &multi_grid) {
|
const PeerGroup &peer_group) {
|
||||||
extern __shared__ double tmp[];
|
extern __shared__ double tmp[];
|
||||||
|
|
||||||
double temp_sum = 0.0;
|
double temp_sum = 0.0;
|
||||||
|
|
||||||
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
|
for (int i = peer_group.thread_rank(); i < size; i += peer_group.size()) {
|
||||||
temp_sum += static_cast<double>(vecA[i] * vecB[i]);
|
temp_sum += (double)(vecA[i] * vecB[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
|
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
|
||||||
|
@ -235,26 +313,26 @@ __device__ void gpuDotProduct(float *vecA, float *vecB, int size,
|
||||||
}
|
}
|
||||||
|
|
||||||
__device__ void gpuCopyVector(float *srcA, float *destB, int size,
|
__device__ void gpuCopyVector(float *srcA, float *destB, int size,
|
||||||
const cg::multi_grid_group &multi_grid) {
|
const PeerGroup &peer_group) {
|
||||||
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
|
for (int i = peer_group.thread_rank(); i < size; i += peer_group.size()) {
|
||||||
destB[i] = srcA[i];
|
destB[i] = srcA[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__device__ void gpuScaleVectorAndSaxpy(float *x, float *y, float a, float scale,
|
__device__ void gpuScaleVectorAndSaxpy(float *x, float *y, float a, float scale,
|
||||||
int size,
|
int size, const PeerGroup &peer_group) {
|
||||||
const cg::multi_grid_group &multi_grid) {
|
for (int i = peer_group.thread_rank(); i < size; i += peer_group.size()) {
|
||||||
for (int i = multi_grid.thread_rank(); i < size; i += multi_grid.size()) {
|
|
||||||
y[i] = a * x[i] + scale * y[i];
|
y[i] = a * x[i] + scale * y[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" __global__ void multiGpuConjugateGradient(
|
extern "C" __global__ void multiGpuConjugateGradient(
|
||||||
int *I, int *J, float *val, float *x, float *Ax, float *p, float *r,
|
int *I, int *J, float *val, float *x, float *Ax, float *p, float *r,
|
||||||
double *dot_result, int nnz, int N, float tol) {
|
double *dot_result, int nnz, int N, float tol,
|
||||||
|
MultiDeviceData multi_device_data) {
|
||||||
cg::thread_block cta = cg::this_thread_block();
|
cg::thread_block cta = cg::this_thread_block();
|
||||||
cg::grid_group grid = cg::this_grid();
|
cg::grid_group grid = cg::this_grid();
|
||||||
cg::multi_grid_group multi_grid = cg::this_multi_grid();
|
PeerGroup peer_group(multi_device_data, grid);
|
||||||
|
|
||||||
const int max_iter = 10000;
|
const int max_iter = 10000;
|
||||||
|
|
||||||
|
@ -262,22 +340,22 @@ extern "C" __global__ void multiGpuConjugateGradient(
|
||||||
float alpham1 = -1.0;
|
float alpham1 = -1.0;
|
||||||
float r0 = 0.0, r1, b, a, na;
|
float r0 = 0.0, r1, b, a, na;
|
||||||
|
|
||||||
for (int i = multi_grid.thread_rank(); i < N; i += multi_grid.size()) {
|
for (int i = peer_group.thread_rank(); i < N; i += peer_group.size()) {
|
||||||
r[i] = 1.0;
|
r[i] = 1.0;
|
||||||
x[i] = 0.0;
|
x[i] = 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
cg::sync(grid);
|
cg::sync(grid);
|
||||||
|
|
||||||
gpuSpMV(I, J, val, nnz, N, alpha, x, Ax, cta, multi_grid);
|
gpuSpMV(I, J, val, nnz, N, alpha, x, Ax, peer_group);
|
||||||
|
|
||||||
cg::sync(grid);
|
cg::sync(grid);
|
||||||
|
|
||||||
gpuSaxpy(Ax, r, alpham1, N, multi_grid);
|
gpuSaxpy(Ax, r, alpham1, N, peer_group);
|
||||||
|
|
||||||
cg::sync(grid);
|
cg::sync(grid);
|
||||||
|
|
||||||
gpuDotProduct(r, r, N, cta, multi_grid);
|
gpuDotProduct(r, r, N, cta, peer_group);
|
||||||
|
|
||||||
cg::sync(grid);
|
cg::sync(grid);
|
||||||
|
|
||||||
|
@ -285,7 +363,7 @@ extern "C" __global__ void multiGpuConjugateGradient(
|
||||||
atomicAdd_system(dot_result, grid_dot_result);
|
atomicAdd_system(dot_result, grid_dot_result);
|
||||||
grid_dot_result = 0.0;
|
grid_dot_result = 0.0;
|
||||||
}
|
}
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
|
|
||||||
r1 = *dot_result;
|
r1 = *dot_result;
|
||||||
|
|
||||||
|
@ -293,21 +371,21 @@ extern "C" __global__ void multiGpuConjugateGradient(
|
||||||
while (r1 > tol * tol && k <= max_iter) {
|
while (r1 > tol * tol && k <= max_iter) {
|
||||||
if (k > 1) {
|
if (k > 1) {
|
||||||
b = r1 / r0;
|
b = r1 / r0;
|
||||||
gpuScaleVectorAndSaxpy(r, p, alpha, b, N, multi_grid);
|
gpuScaleVectorAndSaxpy(r, p, alpha, b, N, peer_group);
|
||||||
} else {
|
} else {
|
||||||
gpuCopyVector(r, p, N, multi_grid);
|
gpuCopyVector(r, p, N, peer_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
|
|
||||||
gpuSpMV(I, J, val, nnz, N, alpha, p, Ax, cta, multi_grid);
|
gpuSpMV(I, J, val, nnz, N, alpha, p, Ax, peer_group);
|
||||||
|
|
||||||
if (multi_grid.thread_rank() == 0) {
|
if (peer_group.thread_rank() == 0) {
|
||||||
*dot_result = 0.0;
|
*dot_result = 0.0;
|
||||||
}
|
}
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
|
|
||||||
gpuDotProduct(p, Ax, N, cta, multi_grid);
|
gpuDotProduct(p, Ax, N, cta, peer_group);
|
||||||
|
|
||||||
cg::sync(grid);
|
cg::sync(grid);
|
||||||
|
|
||||||
|
@ -315,26 +393,27 @@ extern "C" __global__ void multiGpuConjugateGradient(
|
||||||
atomicAdd_system(dot_result, grid_dot_result);
|
atomicAdd_system(dot_result, grid_dot_result);
|
||||||
grid_dot_result = 0.0;
|
grid_dot_result = 0.0;
|
||||||
}
|
}
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
|
|
||||||
a = r1 / *dot_result;
|
a = r1 / *dot_result;
|
||||||
|
|
||||||
gpuSaxpy(p, x, a, N, multi_grid);
|
gpuSaxpy(p, x, a, N, peer_group);
|
||||||
|
|
||||||
na = -a;
|
na = -a;
|
||||||
|
|
||||||
gpuSaxpy(Ax, r, na, N, multi_grid);
|
gpuSaxpy(Ax, r, na, N, peer_group);
|
||||||
|
|
||||||
r0 = r1;
|
r0 = r1;
|
||||||
|
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
if (multi_grid.thread_rank() == 0) {
|
|
||||||
|
if (peer_group.thread_rank() == 0) {
|
||||||
*dot_result = 0.0;
|
*dot_result = 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
|
|
||||||
gpuDotProduct(r, r, N, cta, multi_grid);
|
gpuDotProduct(r, r, N, cta, peer_group);
|
||||||
|
|
||||||
cg::sync(grid);
|
cg::sync(grid);
|
||||||
|
|
||||||
|
@ -342,7 +421,7 @@ extern "C" __global__ void multiGpuConjugateGradient(
|
||||||
atomicAdd_system(dot_result, grid_dot_result);
|
atomicAdd_system(dot_result, grid_dot_result);
|
||||||
grid_dot_result = 0.0;
|
grid_dot_result = 0.0;
|
||||||
}
|
}
|
||||||
cg::sync(multi_grid);
|
peer_group.sync();
|
||||||
|
|
||||||
r1 = *dot_result;
|
r1 = *dot_result;
|
||||||
k++;
|
k++;
|
||||||
|
@ -361,8 +440,7 @@ std::multimap<std::pair<int, int>, int> getIdenticalGPUs() {
|
||||||
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, i));
|
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, i));
|
||||||
|
|
||||||
// Filter unsupported devices
|
// Filter unsupported devices
|
||||||
if (deviceProp.cooperativeMultiDeviceLaunch &&
|
if (deviceProp.cooperativeLaunch && deviceProp.concurrentManagedAccess) {
|
||||||
deviceProp.concurrentManagedAccess) {
|
|
||||||
identicalGpus.emplace(std::make_pair(deviceProp.major, deviceProp.minor),
|
identicalGpus.emplace(std::make_pair(deviceProp.major, deviceProp.minor),
|
||||||
i);
|
i);
|
||||||
}
|
}
|
||||||
|
@ -406,24 +484,24 @@ int main(int argc, char **argv) {
|
||||||
|
|
||||||
if (distance(bestFit) < kNumGpusRequired) {
|
if (distance(bestFit) < kNumGpusRequired) {
|
||||||
printf(
|
printf(
|
||||||
"No Two or more GPUs with same architecture capable of "
|
"No two or more GPUs with same architecture capable of "
|
||||||
"cooperativeMultiDeviceLaunch & concurrentManagedAccess found. "
|
"concurrentManagedAccess found. "
|
||||||
"\nWaiving the sample\n");
|
"\nWaiving the sample\n");
|
||||||
exit(EXIT_WAIVED);
|
exit(EXIT_WAIVED);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<int> bestFitDeviceIds;
|
std::set<int> bestFitDeviceIds;
|
||||||
|
|
||||||
// check & select peer-to-peer access capable GPU devices as enabling p2p
|
// Check & select peer-to-peer access capable GPU devices as enabling p2p
|
||||||
// access between participating
|
// access between participating GPUs gives better performance.
|
||||||
// GPUs gives better performance for multi_grid sync.
|
|
||||||
for (auto itr = bestFit.first; itr != bestFit.second; itr++) {
|
for (auto itr = bestFit.first; itr != bestFit.second; itr++) {
|
||||||
int deviceId = itr->second;
|
int deviceId = itr->second;
|
||||||
checkCudaErrors(cudaSetDevice(deviceId));
|
checkCudaErrors(cudaSetDevice(deviceId));
|
||||||
|
|
||||||
std::for_each(itr, bestFit.second, [&deviceId, &bestFitDeviceIds,
|
std::for_each(
|
||||||
&kNumGpusRequired](
|
itr, bestFit.second,
|
||||||
decltype(*itr) mapPair) {
|
[&deviceId, &bestFitDeviceIds,
|
||||||
|
&kNumGpusRequired](decltype(*itr) mapPair) {
|
||||||
if (deviceId != mapPair.second) {
|
if (deviceId != mapPair.second) {
|
||||||
int access = 0;
|
int access = 0;
|
||||||
checkCudaErrors(
|
checkCudaErrors(
|
||||||
|
@ -434,7 +512,8 @@ int main(int argc, char **argv) {
|
||||||
bestFitDeviceIds.emplace(deviceId);
|
bestFitDeviceIds.emplace(deviceId);
|
||||||
bestFitDeviceIds.emplace(mapPair.second);
|
bestFitDeviceIds.emplace(mapPair.second);
|
||||||
} else {
|
} else {
|
||||||
printf("Ignoring device %i (max devices exceeded)\n", mapPair.second);
|
printf("Ignoring device %i (max devices exceeded)\n",
|
||||||
|
mapPair.second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -451,8 +530,7 @@ int main(int argc, char **argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// if bestFitDeviceIds.size() == 0 it means the GPUs in system are not p2p
|
// if bestFitDeviceIds.size() == 0 it means the GPUs in system are not p2p
|
||||||
// capable,
|
// capable, hence we add it without p2p capability check.
|
||||||
// hence we add it without p2p capability check.
|
|
||||||
if (!bestFitDeviceIds.size()) {
|
if (!bestFitDeviceIds.size()) {
|
||||||
printf("Devices involved are not p2p capable.. selecting %zu of them\n",
|
printf("Devices involved are not p2p capable.. selecting %zu of them\n",
|
||||||
kNumGpusRequired);
|
kNumGpusRequired);
|
||||||
|
@ -469,8 +547,7 @@ int main(int argc, char **argv) {
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
// perform cudaDeviceEnablePeerAccess in both directions for all
|
// perform cudaDeviceEnablePeerAccess in both directions for all
|
||||||
// participating devices of a cudaLaunchCooperativeKernelMultiDevice call
|
// participating devices.
|
||||||
// this gives better performance for multi_grid sync.
|
|
||||||
for (auto p1_itr = bestFitDeviceIds.begin();
|
for (auto p1_itr = bestFitDeviceIds.begin();
|
||||||
p1_itr != bestFitDeviceIds.end(); p1_itr++) {
|
p1_itr != bestFitDeviceIds.end(); p1_itr++) {
|
||||||
checkCudaErrors(cudaSetDevice(*p1_itr));
|
checkCudaErrors(cudaSetDevice(*p1_itr));
|
||||||
|
@ -488,14 +565,11 @@ int main(int argc, char **argv) {
|
||||||
N = 10485760 * 2;
|
N = 10485760 * 2;
|
||||||
nz = (N - 2) * 3 + 4;
|
nz = (N - 2) * 3 + 4;
|
||||||
|
|
||||||
checkCudaErrors(
|
checkCudaErrors(cudaMallocManaged((void **)&I, sizeof(int) * (N + 1)));
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&I), sizeof(int) * (N + 1)));
|
checkCudaErrors(cudaMallocManaged((void **)&J, sizeof(int) * nz));
|
||||||
checkCudaErrors(
|
checkCudaErrors(cudaMallocManaged((void **)&val, sizeof(float) * nz));
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&J), sizeof(int) * nz));
|
|
||||||
checkCudaErrors(
|
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&val), sizeof(float) * nz));
|
|
||||||
|
|
||||||
float *val_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * nz));
|
float *val_cpu = (float *)malloc(sizeof(float) * nz);
|
||||||
|
|
||||||
genTridiag(I, J, val_cpu, N, nz);
|
genTridiag(I, J, val_cpu, N, nz);
|
||||||
|
|
||||||
|
@ -507,22 +581,17 @@ int main(int argc, char **argv) {
|
||||||
checkCudaErrors(
|
checkCudaErrors(
|
||||||
cudaMemAdvise(val, sizeof(float) * nz, cudaMemAdviseSetReadMostly, 0));
|
cudaMemAdvise(val, sizeof(float) * nz, cudaMemAdviseSetReadMostly, 0));
|
||||||
|
|
||||||
checkCudaErrors(
|
checkCudaErrors(cudaMallocManaged((void **)&x, sizeof(float) * N));
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&x), sizeof(float) * N));
|
|
||||||
|
|
||||||
double *dot_result;
|
double *dot_result;
|
||||||
checkCudaErrors(cudaMallocManaged(reinterpret_cast<void **>(&dot_result),
|
checkCudaErrors(cudaMallocManaged((void **)&dot_result, sizeof(double)));
|
||||||
sizeof(double)));
|
|
||||||
|
|
||||||
checkCudaErrors(cudaMemset(dot_result, 0.0, sizeof(double)));
|
checkCudaErrors(cudaMemset(dot_result, 0, sizeof(double)));
|
||||||
|
|
||||||
// temp memory for ConjugateGradient
|
// temp memory for ConjugateGradient
|
||||||
checkCudaErrors(
|
checkCudaErrors(cudaMallocManaged((void **)&r, N * sizeof(float)));
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&r), N * sizeof(float)));
|
checkCudaErrors(cudaMallocManaged((void **)&p, N * sizeof(float)));
|
||||||
checkCudaErrors(
|
checkCudaErrors(cudaMallocManaged((void **)&Ax, N * sizeof(float)));
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&p), N * sizeof(float)));
|
|
||||||
checkCudaErrors(
|
|
||||||
cudaMallocManaged(reinterpret_cast<void **>(&Ax), N * sizeof(float)));
|
|
||||||
|
|
||||||
std::cout << "\nRunning on GPUs = " << kNumGpusRequired << std::endl;
|
std::cout << "\nRunning on GPUs = " << kNumGpusRequired << std::endl;
|
||||||
cudaStream_t nStreams[kNumGpusRequired];
|
cudaStream_t nStreams[kNumGpusRequired];
|
||||||
|
@ -616,10 +685,10 @@ int main(int argc, char **argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#if ENABLE_CPU_DEBUG_CODE
|
#if ENABLE_CPU_DEBUG_CODE
|
||||||
float *Ax_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N));
|
float *Ax_cpu = (float *)malloc(sizeof(float) * N);
|
||||||
float *r_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N));
|
float *r_cpu = (float *)malloc(sizeof(float) * N);
|
||||||
float *p_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N));
|
float *p_cpu = (float *)malloc(sizeof(float) * N);
|
||||||
float *x_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N));
|
float *x_cpu = (float *)malloc(sizeof(float) * N);
|
||||||
|
|
||||||
for (int i = 0; i < N; i++) {
|
for (int i = 0; i < N; i++) {
|
||||||
r_cpu[i] = 1.0;
|
r_cpu[i] = 1.0;
|
||||||
|
@ -631,28 +700,37 @@ int main(int argc, char **argv) {
|
||||||
numSms * numBlocksPerSm * THREADS_PER_BLOCK, numBlocksPerSm);
|
numSms * numBlocksPerSm * THREADS_PER_BLOCK, numBlocksPerSm);
|
||||||
dim3 dimGrid(numSms * numBlocksPerSm, 1, 1),
|
dim3 dimGrid(numSms * numBlocksPerSm, 1, 1),
|
||||||
dimBlock(THREADS_PER_BLOCK, 1, 1);
|
dimBlock(THREADS_PER_BLOCK, 1, 1);
|
||||||
|
|
||||||
|
// Structure used for cross-grid synchronization.
|
||||||
|
MultiDeviceData multi_device_data;
|
||||||
|
checkCudaErrors(cudaHostAlloc(
|
||||||
|
&multi_device_data.hostMemoryArrivedList,
|
||||||
|
(kNumGpusRequired - 1) * sizeof(*multi_device_data.hostMemoryArrivedList),
|
||||||
|
cudaHostAllocPortable));
|
||||||
|
memset(multi_device_data.hostMemoryArrivedList, 0,
|
||||||
|
(kNumGpusRequired - 1) *
|
||||||
|
sizeof(*multi_device_data.hostMemoryArrivedList));
|
||||||
|
multi_device_data.numDevices = kNumGpusRequired;
|
||||||
|
multi_device_data.deviceRank = 0;
|
||||||
|
|
||||||
void *kernelArgs[] = {
|
void *kernelArgs[] = {
|
||||||
(void *)&I, (void *)&J, (void *)&val, (void *)&x,
|
(void *)&I, (void *)&J, (void *)&val, (void *)&x,
|
||||||
(void *)&Ax, (void *)&p, (void *)&r, (void *)&dot_result,
|
(void *)&Ax, (void *)&p, (void *)&r, (void *)&dot_result,
|
||||||
(void *)&nz, (void *)&N, (void *)&tol,
|
(void *)&nz, (void *)&N, (void *)&tol, (void *)&multi_device_data,
|
||||||
};
|
};
|
||||||
cudaLaunchParams *launchParamsList =
|
|
||||||
(cudaLaunchParams *)malloc(sizeof(cudaLaunchParams) * kNumGpusRequired);
|
|
||||||
for (int i = 0; i < kNumGpusRequired; i++) {
|
|
||||||
launchParamsList[i].func = (void *)multiGpuConjugateGradient;
|
|
||||||
launchParamsList[i].gridDim = dimGrid;
|
|
||||||
launchParamsList[i].blockDim = dimBlock;
|
|
||||||
launchParamsList[i].sharedMem = sMemSize;
|
|
||||||
launchParamsList[i].stream = nStreams[i];
|
|
||||||
launchParamsList[i].args = kernelArgs;
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("Launching kernel\n");
|
printf("Launching kernel\n");
|
||||||
|
|
||||||
checkCudaErrors(cudaLaunchCooperativeKernelMultiDevice(
|
deviceId = bestFitDeviceIds.begin();
|
||||||
launchParamsList, kNumGpusRequired,
|
device_count = 0;
|
||||||
cudaCooperativeLaunchMultiDeviceNoPreSync |
|
while (deviceId != bestFitDeviceIds.end()) {
|
||||||
cudaCooperativeLaunchMultiDeviceNoPostSync));
|
checkCudaErrors(cudaSetDevice(*deviceId));
|
||||||
|
checkCudaErrors(cudaLaunchCooperativeKernel(
|
||||||
|
(void *)multiGpuConjugateGradient, dimGrid, dimBlock, kernelArgs,
|
||||||
|
sMemSize, nStreams[device_count++]));
|
||||||
|
multi_device_data.deviceRank++;
|
||||||
|
deviceId++;
|
||||||
|
}
|
||||||
|
|
||||||
checkCudaErrors(cudaMemPrefetchAsync(x, sizeof(float) * N, cudaCpuDeviceId));
|
checkCudaErrors(cudaMemPrefetchAsync(x, sizeof(float) * N, cudaCpuDeviceId));
|
||||||
checkCudaErrors(
|
checkCudaErrors(
|
||||||
|
@ -690,6 +768,7 @@ int main(int argc, char **argv) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkCudaErrors(cudaFreeHost(multi_device_data.hostMemoryArrivedList));
|
||||||
checkCudaErrors(cudaFree(I));
|
checkCudaErrors(cudaFree(I));
|
||||||
checkCudaErrors(cudaFree(J));
|
checkCudaErrors(cudaFree(J));
|
||||||
checkCudaErrors(cudaFree(val));
|
checkCudaErrors(cudaFree(val));
|
||||||
|
|
Loading…
Reference in New Issue
Block a user