diff options
author | Determinant <[email protected]> | 2015-05-18 19:29:37 +0800 |
---|---|---|
committer | Determinant <[email protected]> | 2015-05-18 19:29:37 +0800 |
commit | 23fd2694723ab3f2203e6cd040c5e6633cb989c7 (patch) | |
tree | 407e5b1f2826df831336fbb8f1bfd69f8ca6e7c5 /matrix/cukernel.cu | |
parent | 0f953414dfdbd7abb7b867ce0c3f9390551c1083 (diff) |
add rowsum for cumatrix
Diffstat (limited to 'matrix/cukernel.cu')
-rw-r--r-- | matrix/cukernel.cu | 62 |
1 files changed, 55 insertions, 7 deletions
diff --git a/matrix/cukernel.cu b/matrix/cukernel.cu index 91e7e35..d6d7997 100644 --- a/matrix/cukernel.cu +++ b/matrix/cukernel.cu @@ -1,5 +1,9 @@ +#include <assert.h> #include "generic/matrix.h" +#include <stdio.h> +#include "cuda.h" #define CUDA_THREADS_N 16 +#define CUDA_THREADS_NN (16 * 16) #define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) __global__ void sigmoid(const float *a, float *b, int nrow, int ncol, int stride) { @@ -11,11 +15,55 @@ __global__ void sigmoid(const float *a, float *b, b[idx] = 1.0 / (1.0 + exp(-a[idx])); } -extern "C" void cuda_sigmoid(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - sigmoid<<<numBlocks, threadsPerBlock>>>(a->data.f, b->data.f, b->nrow, b->ncol, - b->stride / sizeof(float)); +__global__ void block_sum(const float *input, float *output, + const int istride, const int ostride, + const int n) { + extern __shared__ float arr[]; + int j = blockIdx.x * blockDim.x + threadIdx.x; + arr[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : 0; + __syncthreads(); + for (int offset = blockDim.x >> 1; offset; offset >>= 1) + { + if (threadIdx.x < offset) + arr[threadIdx.x] += arr[threadIdx.x + offset]; + __syncthreads(); + } + if (threadIdx.x == 0) + { + /* printf("bx: %d by: %d arr: %f\n", blockIdx.x, blockIdx.y, arr[0]); */ + output[blockIdx.x + ostride * blockIdx.y] = arr[0]; + } +} + +extern "C" { + void cuda_sigmoid(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, + CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + sigmoid<<<numBlocks, threadsPerBlock>>>(a->data.f, b->data.f, b->nrow, b->ncol, + b->stride / sizeof(float)); + } + + void cuda_rowsum(const Matrix *a, Matrix *b) { + dim3 block(CUDA_THREADS_NN, 1); + int ncol = a->ncol; + int blocks_per_row = CEIL_DIV(ncol, block.x); + dim3 grid(blocks_per_row, a->nrow); + float *res; + size_t stride; + cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(float), a->nrow); + block_sum<<<grid, block, block.x * sizeof(float)>>> \ + (a->data.f, res, + a->stride / sizeof(float), stride / sizeof(float), + ncol); + ncol = blocks_per_row; + assert(ncol <= block.x); + grid.x = 1; + block_sum<<<grid, block, block.x * sizeof(float)>>> \ + (res, b->data.f, + stride / sizeof(float), b->stride / sizeof(float), + ncol); + cudaFree(res); + } } |