How to use CUDA programming to calculate and process the correct number

Collapse
X
 
  • Time
  • Show
Clear All
new posts
  • vokoyo
    New Member
    • Apr 2018
    • 7

    How to use CUDA programming to calculate and process the correct number

    Bandwidth test - test memory bandwidth.

    Especially important for PCIE capability. Different MB has different PCIE capability.

    The CUDA adaptor performance is depend on the capability of PCIE. It could be the performance bottleneck.

    On the following programming drills, the number of clock cycles necessary for computation and utilised memory bandwidth have to be computing.

    (1) parallelization in the programs - using 256 threads

    (2) improving the memory access modes

    (3) testing the parallelization by using 512/1024

    (4) utilizing BLOCKS in the computation

    (5) utilizing shared memory

    (6) improving the computation performance by using a Treesum algorithm

    (7) resolving the memory band conflict issue, encountered in applying Treesum algorithm with the shared memory
  • vokoyo
    New Member
    • Apr 2018
    • 7

    #2
    My own coding sample however have some errors -

    #include <stdio.h>
    #include <stdlib.h>
    #include <cuda_runtime.h >
    #include <time.h>
    #include <math.h>

    int main()
    {
    float *a, *b, *c, *d;
    int n = 1000;

    if (!InitCUDA())
    return 0;
    a = (float*)malloc( sizeof(float)* n * n);
    b = (float*)malloc( sizeof(float)* n * n);
    c = (float*)malloc( sizeof(float)* n * n);
    d = (float*)malloc( sizeof(float)* n * n);

    srand(0);
    matgen(a, n, n);
    matgen(b, n, n);

    clock_t time = matmultCUDA(a, n, b, n, c, n, n);
    matmult(a, n, b, n, d, n, n);
    compare_mat(c, n, d, n, n);

    double sec = (double)time / CLOCKS_PER_SEC;

    printf("Time used: %.2f (%.2lf GFLOPS)\n", sec, 2.0 * n * n * n / (sec * 1E9));

    return 0;
    }
    void matgen(float* a, int lda, int n)
    {
    int i, j;
    for (i = 0; i < n; i++)
    {
    for (j = 0; j < n; j++)
    {
    a[i* lda + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX * RAND_MAX);
    }
    }
    }

    void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
    {
    int i, j, k;

    for (i = 0; i< n; i++)
    {
    for (j = 0; j < n; j++)
    {
    double t = 0;
    for (k = 0; k < n; k++) {
    t += a[i* lda + k] * b[k * ldb + j];
    }
    c[i* ldc + j] = t;
    }
    }

    void compare_mat(con st float* a, int lda, const float* b, int ldb, int n)
    {
    float max_err = 0;
    float average_err = 0; inti, j;
    for (i = 0; i< n; i++)
    {
    for (j = 0; j < n; j++)
    {
    if (b[i* ldb + j] != 0)
    {
    float err = fabs((a[i* lda + j] - b[i* ldb + j]) / b[i* ldb + j]);
    if (max_err< err) max_err = err;
    average_err += err;
    }
    }
    }
    printf("Max error: %g Average error:%g\n", max_err, average_err / (n * n));
    }

    #define NUM_THREADS 256

    clock_t matmultCUDA(con st float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
    {
    float *ac, *bc, *cc;
    clock_tstart, end;

    start = clock();
    cudaMalloc((voi d**)&ac, sizeof(float)* n * n);
    cudaMalloc((voi d**)&bc, sizeof(float)* n * n);
    cudaMalloc((voi d**)&cc, sizeof(float)* n * n);

    cudaMemcpy2D(ac , sizeof(float)* n, a, sizeof(float)* lda, sizeof(float)* n, n, cudaMemcpyHostT oDevice);
    cudaMemcpy2D(bc , sizeof(float)* n, b, sizeof(float)* ldb, sizeof(float)* n, n, cudaMemcpyHostT oDevice);

    intblocks = (n + NUM_THREADS - 1) / NUM_THREADS;
    matMultCUDA<<<b locks * n, NUM_THREADS >>>(ac, n, bc, n, cc, n, n);

    cudaMemcpy2D(c, sizeof(float)* ldc, cc, sizeof(float)* n, sizeof(float)* n, n, cudaMemcpyDevic eToHost);

    cudaFree(ac);
    cudaFree(bc);
    cudaFree(cc);

    end = clock();
    return end - start;
    }

    __global__ static void matMultCUDA(con st float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
    {
    __shared__ float
    matA[BLOCK_SIZE][BLOCK_SIZE];
    __shared__ float matB[BLOCK_SIZE][BLOCK_SIZE];
    constinttidc = threadIdx.x;
    constinttidr = threadIdx.y;
    constintbidc = blockIdx.x* BLOCK_SIZE;
    constintbidr = blockIdx.y* BLOCK_SIZE;
    int i, j;

    float results = 0;
    float comp = 0;

    for (j = 0; j < n; j += BLOCK_SIZE)
    {
    matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
    matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];

    __syncthreads() ;

    for (i = 0; i< BLOCK_SIZE; i++)
    {
    float t; comp -= matA[tidr][i] * matB[i][tidc];
    t = results - comp; comp = (t - results) + comp; results = t;
    }

    __syncthreads() ;
    }

    c[(tidr + bidr) * ldc + tidc + bidc] = results;
    }

    Comment

    • vokoyo
      New Member
      • Apr 2018
      • 7

      #3
      I get some errors for my coding sample as below -



      #include <stdio.h>
      #include <stdlib.h>
      #include <cuda_runtime.h >
      #include <time.h>
      #include <math.h>

      #define DATA_SIZE 1048576
      #define BLOCK_NUM 32
      #define THREAD_NUM 256

      int data[DATA_SIZE];

      bool InitCUDA()
      {
      int count;

      cudaGetDeviceCo unt(&count);
      if (count == 0)
      {
      fprintf(stderr, "There is no device.\n");
      return false;
      }

      int i;
      for (i = 0; i < count; i++)
      {
      cudaDeviceProp prop;
      if (cudaGetDeviceP roperties(&prop , i) == cudaSuccess)
      {
      if (prop.major >= 1)
      {
      cudaGetDevicePr operties(&prop, i);
      printf("Device Name: %s\n", prop.name);
      printf("Total global mem: %1u bytes\n", prop.totalGloba lMem);
      printf("Max threads per block: %d\n", prop.maxThreads PerBlock);
      printf("Clock rate: %.2f GHz\n", prop.clockRate* 1e-6f);
      printf("\n");
      break;
      }
      }

      cudaSetDevice(i );
      }
      return true;
      }

      __global__ static void sumOfSquares(in t *num, int* result, clock_t* time)
      {
      const int tid = threadIdx.x;
      const int size = DATA_SIZE / THREAD_NUM;
      int sum = 0;
      int i;
      clock_t start;
      if (tid == 0) start = clock();
      for (i = tid * size; i < (tid + 1) * size; i++)
      {
      sum += num[i] * num[i];
      }

      result[tid] = sum;
      if (tid == 0) *time = clock() - start;
      }

      int* gpudata, *result;
      clock_t* time;
      cudaMalloc((voi d**)&gpudata, sizeof(int)* DATA_SIZE);
      cudaMalloc((voi d**)&result, sizeof(int)* THREAD_NUM);
      cudaMalloc((voi d**)&time, sizeof(clock_t) );
      cudaMemcpy(gpud ata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostT oDevice);

      sumOfSquares << <1, THREAD_NUM, 0 >> >(gpudata, result, time);

      int sum[THREAD_NUM];
      clock_t time_used;
      cudaMemcpy(&sum , result, sizeof(int)* THREAD_NUM, cudaMemcpyDevic eToHost);
      cudaMemcpy(&tim e_used, time, sizeof(clock_t) , cudaMemcpyDevic eToHost);
      cudaFree(gpudat a);
      cudaFree(result );
      cudaFree(time);

      int final_sum = 0;
      for (int i = 0; i < THREAD_NUM; i++)
      {
      final_sum += sum[i];
      }

      printf("sum: %d time: %d\n", final_sum, time_used);

      final_sum = 0;
      for (int i = 0; i< DATA_SIZE; i++)
      {
      sum += data[i] * data[i];
      }
      printf("sum (CPU): %d\n", final_sum);

      __global__ static void sumOfSquares(in t *num, int* result, clock_t* time)
      {
      const int tid = threadIdx.x;
      int sum = 0;
      int i;
      clock_t start;
      if (tid == 0) start = clock();
      for (i = tid; i< DATA_SIZE; i += THREAD_NUM)
      {
      sum += num[i] * num[i];
      }

      result[tid] = sum;
      if (tid == 0) *time = clock() - start;
      }

      __global__ static void sumOfSquares(in t *num, int* result, clock_t* time)
      {
      const int tid = threadIdx.x;
      const int bid = blockIdx.x;
      int sum = 0;
      int i;
      if (tid == 0) time[bid] = clock();
      for (i = bid * THREAD_NUM + tid; i< DATA_SIZE; i += BLOCK_NUM * THREAD_NUM)
      {
      sum += num[i] * num[i];
      }

      result[bid * THREAD_NUM + tid] = sum;
      if (tid == 0) time[bid + BLOCK_NUM] = clock();
      }

      int* gpudata, *result;
      clock_t* time;
      cudaMalloc((voi d**)&gpudata, sizeof(int)* DATA_SIZE);
      cudaMalloc((voi d**)&result, sizeof(int)* THREAD_NUM * BLOCK_NUM);
      cudaMalloc((voi d**)&time, sizeof(clock_t) * BLOCK_NUM * 2);
      cudaMemcpy(gpud ata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostT oDevice);

      sumOfSquares << <BLOCK_NUM, THREAD_NUM, 0 >> >(gpudata, result, time);

      int sum[THREAD_NUM * BLOCK_NUM];
      clock_t time_used[BLOCK_NUM * 2];
      cudaMemcpy(&sum , result, sizeof(int)* THREAD_NUM * BLOCK_NUM, cudaMemcpyDevic eToHost);
      cudaMemcpy(&tim e_used, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDevic eToHost);
      cudaFree(gpudat a);
      cudaFree(result );
      cudaFree(time);

      intfinal_sum = 0;
      for (inti = 0; i< THREAD_NUM * BLOCK_NUM; i++)
      {
      final_sum += sum[i];
      }

      clock_t min_start, max_end;
      min_start = time_used[0];
      max_end = time_used[BLOCK_NUM];
      for (inti = 1; i< BLOCK_NUM; i++)
      {
      if (min_start > time_used[i])min_start = time_used[i];
      if (max_end < time_used[i + BLOCK_NUM])max_end = time_used[i + BLOCK_NUM];
      }

      printf("sum: %d time: %d\n", final_sum, max_end - min_start);

      __global__ static void sumOfSquares(in t *num, int* result, clock_t* time)
      {
      extern __shared__ int shared[];
      const int tid = threadIdx.x;
      const int bid = blockIdx.x;

      int i;
      if (tid == 0) time[bid] = clock();
      shared[tid] = 0;

      for (i = bid * THREAD_NUM + tid; i< DATA_SIZE; i += BLOCK_NUM * THREAD_NUM)
      {
      shared[tid] += num[i] * num[i];
      }

      __syncthreads() ;
      if (tid == 0)
      {
      for (i = 1; i< THREAD_NUM; i++)
      {
      shared[0] += shared[i];
      }
      result[bid] = shared[0];
      }

      if (tid == 0) time[bid + BLOCK_NUM] = clock();
      }

      int* gpudata, *result;
      clock_t* time;
      cudaMalloc((voi d**)&gpudata, sizeof(int)* DATA_SIZE);
      cudaMalloc((voi d**)&result, sizeof(int)* BLOCK_NUM);
      cudaMalloc((voi d**)&time, sizeof(clock_t) * BLOCK_NUM * 2);
      cudaMemcpy(gpud ata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostT oDevice);

      sumOfSquares << <BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int) >> >(gpudata, result, time);

      int sum[BLOCK_NUM];
      clock_t time_used[BLOCK_NUM * 2];
      cudaMemcpy(&sum , result, sizeof(int)* BLOCK_NUM, cudaMemcpyDevic eToHost);
      cudaMemcpy(&tim e_used, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDevic eToHost);
      cudaFree(gpudat a);
      cudaFree(result );
      cudaFree(time);

      int final_sum = 0;
      for (int i = 0; i< BLOCK_NUM; i++)
      {
      final_sum += sum[i];
      }

      __global__ static void sumOfSquares(in t*num, int* result, clock_t* time)
      {
      extern __shared__ int shared[];
      const int tid = threadIdx.x;
      const int bid = blockIdx.x;

      int i;
      into ffset = 1, mask = 1;
      if (tid == 0) time[bid] = clock();
      shared[tid] = 0;
      for (i = bid * THREAD_NUM + tid; i< DATA_SIZE; i += BLOCK_NUM * THREAD_NUM)
      {
      shared[tid] += num[i] * num[i];
      }

      __syncthreads() ;
      while (offset < THREAD_NUM)
      {
      if ((tid& mask) == 0)
      {
      shared[tid] += shared[tid + offset];
      }
      offset += offset;
      mask = offset + mask;
      __syncthreads() ;
      }

      if (tid == 0)
      {
      result[bid] = shared[0];
      time[bid + BLOCK_NUM] = clock();
      }
      }

      offset = THREAD_NUM / 2;
      while (offset > 0)
      {
      if (tid< offset)
      {
      shared[tid] += shared[tid + offset];
      }
      offset >>= 1;
      __syncthreads() ;
      }

      if (tid < 128)
      {
      shared[tid] += shared[tid + 128];
      }
      __syncthreads() ;

      if (tid < 64)
      {
      shared[tid] += shared[tid + 64];
      }
      __syncthreads() ;

      if (tid < 32)
      {
      shared[tid] += shared[tid + 32];
      }
      __syncthreads() ;

      if (tid < 16)
      {
      shared[tid] += shared[tid + 16];
      }
      __syncthreads() ;

      if (tid < 8)
      {
      shared[tid] += shared[tid + 8];
      }
      __syncthreads() ;

      if (tid < 4)
      {
      shared[tid] += shared[tid + 4];
      }
      __syncthreads() ;

      if (tid < 2)
      {
      shared[tid] += shared[tid + 2];
      }
      __syncthreads() ;

      if (tid < 1)
      {
      shared[tid] += shared[tid + 1];
      }
      __syncthreads() ;

      Comment

      Working...