diff options
author | Determinant <[email protected]> | 2015-05-18 19:29:37 +0800 |
---|---|---|
committer | Determinant <[email protected]> | 2015-05-18 19:29:37 +0800 |
commit | 23fd2694723ab3f2203e6cd040c5e6633cb989c7 (patch) | |
tree | 407e5b1f2826df831336fbb8f1bfd69f8ca6e7c5 | |
parent | 0f953414dfdbd7abb7b867ce0c3f9390551c1083 (diff) |
add rowsum for cumatrix
-rw-r--r-- | Makefile | 4 | ||||
-rw-r--r-- | cumatrix_example.lua | 29 | ||||
-rw-r--r-- | matrix/cukernel.cu | 62 | ||||
-rw-r--r-- | matrix/cukernel.h | 1 | ||||
-rw-r--r-- | matrix/cumatrix.c | 16 | ||||
-rw-r--r-- | matrix/generic/matrix.h | 2 | ||||
-rw-r--r-- | matrix/matrix.c | 3 |
7 files changed, 81 insertions, 36 deletions
@@ -7,7 +7,7 @@ CUDA_BASE := /usr/local/cuda-6.5 CUDA_INCLUDE := -I $(CUDA_BASE)/include/ INCLUDE += $(CUDA_INCLUDE) LDFLAGS := -L$(CUDA_BASE)/lib64/ -Wl,-rpath=$(CUDA_BASE)/lib64/ -lcudart -lcublas -CFLAGS := +CFLAGS := -Wall -Wextra OBJ_DIR := build/objs LUA_DIR := build/lua NVCC := $(CUDA_BASE)/bin/nvcc @@ -30,7 +30,7 @@ $(OBJ_DIR)/%.o: %.c $(OBJ_DIR)/matrix/%.o: matrix/%.c gcc -c -o $@ $< $(INCLUDE) -fPIC $(CFLAGS) $(OBJ_DIR)/matrix/cukernel.o: matrix/cukernel.cu - $(NVCC) -c -o $@ $< -Xcompiler -fPIC $(INCLUDE) $(CFLAGS) + $(NVCC) -c -o $@ $< -Xcompiler -fPIC $(INCLUDE) $(LUA_DIR)/%.lua: %.lua cp $< $@ $(OBJ_DIR)/luaT.o: diff --git a/cumatrix_example.lua b/cumatrix_example.lua index ce11eea..88b5912 100644 --- a/cumatrix_example.lua +++ b/cumatrix_example.lua @@ -1,28 +1,15 @@ -m = 2 -n = 3 +m = 600 +n = 600 t = nerv.FloatCuMatrix(m, n) t2 = nerv.FloatCuMatrix(m, n) -print(t) +-- print(t) a = t[1] for i = 0, m - 1 do + tt = t[i] + tt2 = t2[i] for j = 0, n - 1 do - t[i][j] = i + j - t2[i][j] = t[i][j] + tt[j] = i + j + tt2[j] = t[i][j] end end -print(a) -print(t) -print(t2) -print(t + t2) -d = nerv.FloatCuMatrix(3, 3) -for i = 0, 2 do - for j = 0, 2 do - d[i][j] = 0 - end -end -d[0][0] = 1 -d[1][1] = 2 -d[2][2] = 3 -print(d) -print(t * d) -print(t:sigmoid()) +-- print(t:rowsum()) diff --git a/matrix/cukernel.cu b/matrix/cukernel.cu index 91e7e35..d6d7997 100644 --- a/matrix/cukernel.cu +++ b/matrix/cukernel.cu @@ -1,5 +1,9 @@ +#include <assert.h> #include "generic/matrix.h" +#include <stdio.h> +#include "cuda.h" #define CUDA_THREADS_N 16 +#define CUDA_THREADS_NN (16 * 16) #define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) __global__ void sigmoid(const float *a, float *b, int nrow, int ncol, int stride) { @@ -11,11 +15,55 @@ __global__ void sigmoid(const float *a, float *b, b[idx] = 1.0 / (1.0 + exp(-a[idx])); } -extern "C" void cuda_sigmoid(const Matrix *a, Matrix *b) { - dim3 threadsPerBlock(CUDA_THREADS_N, - CUDA_THREADS_N); - dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), - CEIL_DIV(b->nrow, threadsPerBlock.y)); - sigmoid<<<numBlocks, threadsPerBlock>>>(a->data.f, b->data.f, b->nrow, b->ncol, - b->stride / sizeof(float)); +__global__ void block_sum(const float *input, float *output, + const int istride, const int ostride, + const int n) { + extern __shared__ float arr[]; + int j = blockIdx.x * blockDim.x + threadIdx.x; + arr[threadIdx.x] = j < n ? input[j + istride * blockIdx.y] : 0; + __syncthreads(); + for (int offset = blockDim.x >> 1; offset; offset >>= 1) + { + if (threadIdx.x < offset) + arr[threadIdx.x] += arr[threadIdx.x + offset]; + __syncthreads(); + } + if (threadIdx.x == 0) + { + /* printf("bx: %d by: %d arr: %f\n", blockIdx.x, blockIdx.y, arr[0]); */ + output[blockIdx.x + ostride * blockIdx.y] = arr[0]; + } +} + +extern "C" { + void cuda_sigmoid(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, + CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + sigmoid<<<numBlocks, threadsPerBlock>>>(a->data.f, b->data.f, b->nrow, b->ncol, + b->stride / sizeof(float)); + } + + void cuda_rowsum(const Matrix *a, Matrix *b) { + dim3 block(CUDA_THREADS_NN, 1); + int ncol = a->ncol; + int blocks_per_row = CEIL_DIV(ncol, block.x); + dim3 grid(blocks_per_row, a->nrow); + float *res; + size_t stride; + cudaMallocPitch(&res, &stride, blocks_per_row * sizeof(float), a->nrow); + block_sum<<<grid, block, block.x * sizeof(float)>>> \ + (a->data.f, res, + a->stride / sizeof(float), stride / sizeof(float), + ncol); + ncol = blocks_per_row; + assert(ncol <= block.x); + grid.x = 1; + block_sum<<<grid, block, block.x * sizeof(float)>>> \ + (res, b->data.f, + stride / sizeof(float), b->stride / sizeof(float), + ncol); + cudaFree(res); + } } diff --git a/matrix/cukernel.h b/matrix/cukernel.h index 5b9e3a6..f86a69b 100644 --- a/matrix/cukernel.h +++ b/matrix/cukernel.h @@ -1,4 +1,5 @@ #ifndef NERV_CUKERNEL_H #define NERV_CUKERNEL_H void cuda_sigmoid(const Matrix *a, Matrix *b); +void cuda_rowsum(const Matrix *a, Matrix *b); #endif diff --git a/matrix/cumatrix.c b/matrix/cumatrix.c index 7759ca1..49b7fbf 100644 --- a/matrix/cumatrix.c +++ b/matrix/cumatrix.c @@ -9,6 +9,7 @@ #include "generic/matrix.h" #include "cukernel.h" #include "cuda.h" +#include "cuda_runtime.h" #include "driver_types.h" #include "cublas_v2.h" @@ -65,10 +66,19 @@ static int nerv_float_matrix_(sigmoid)(lua_State *L) { return 1; } +static int nerv_float_matrix_(rowsum)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 1, nerv_float_matrix_(tname)); + Matrix *b = nerv_float_matrix_(new_)(a->nrow, 1); + cuda_rowsum(a, b); + luaT_pushudata(L, b, nerv_float_matrix_(tname)); + return 1; +} + static const luaL_Reg nerv_float_matrix_(extra_methods)[] = { {"__add__", nerv_float_matrix_(add)}, {"__mul__", nerv_float_matrix_(mul)}, {"sigmoid", nerv_float_matrix_(sigmoid)}, + {"rowsum", nerv_float_matrix_(rowsum)}, {NULL, NULL} }; @@ -77,13 +87,13 @@ static void cuda_float_init(lua_State *L) { cublasCreate(&cublas_handle); } -static cuda_float_array_free(float *ptr) { +static void cuda_float_array_free(float *ptr) { cudaFree(ptr); } -static cuda_float_array_alloc(float **dptr, long *stride, +static void cuda_float_array_alloc(float **dptr, size_t *stride, long width, long height) { - cudaMallocPitch(dptr, stride, width, height); + cudaMallocPitch((void **)dptr, stride, width, height); } static float cuda_float_array_read(float *data, int idx) { diff --git a/matrix/generic/matrix.h b/matrix/generic/matrix.h index 655ff3d..264859b 100644 --- a/matrix/generic/matrix.h +++ b/matrix/generic/matrix.h @@ -2,7 +2,7 @@ #define NERV_GENERIC_MATRIX_H typedef struct Matrix { - long stride; /* size of a row */ + size_t stride; /* size of a row */ long ncol, nrow, nmax; /* dimension of the matrix */ union { float *f; diff --git a/matrix/matrix.c b/matrix/matrix.c index ef311d6..b392f56 100644 --- a/matrix/matrix.c +++ b/matrix/matrix.c @@ -10,7 +10,7 @@ const char *nerv_float_matrix_(tname) = "nerv.FloatMatrix"; -static void host_float_array_alloc(float **dptr, long *stride, +static void host_float_array_alloc(float **dptr, size_t *stride, long width, long height) { *dptr = (float *)malloc(width * height); *stride = width; @@ -29,7 +29,6 @@ int nerv_float_matrix_(set_elem)(lua_State *L) { Matrix *self = luaT_checkudata(L, 1, nerv_float_matrix_(tname)); int idx = luaL_checkinteger(L, 2); float v = luaL_checknumber(L, 3); - long upper = self->nrow * self->ncol; if (idx < 0 || idx >= self->nmax) nerv_error(L, "index must be within range [0, %d)", self->nmax); self->data.f[idx] = v; |