diff options
author | Determinant <[email protected]> | 2015-05-28 16:14:21 +0800 |
---|---|---|
committer | Determinant <[email protected]> | 2015-05-28 16:14:21 +0800 |
commit | e4dedc2992149d245ea65132131253072d3276b8 (patch) | |
tree | 31055890c6b662df9091dd7272556a69f216de28 | |
parent | c13115662e739b434f1071eb623a41a39d8b4985 (diff) |
add mul_elem and log_elem; add dimension checking in cumatrix methods
-rw-r--r-- | Makefile | 3 | ||||
-rw-r--r-- | examples/cumatrix_example.lua | 10 | ||||
-rw-r--r-- | matrix/cukernel.h | 2 | ||||
-rw-r--r-- | matrix/generic/cukernel.cu | 43 | ||||
-rw-r--r-- | matrix/generic/cumatrix.c | 27 |
5 files changed, 79 insertions, 6 deletions
@@ -41,8 +41,9 @@ $(OBJ_DIR)/luaT.o: $(LIBS): $(OBJS) gcc -shared -o $@ $(OBJS) $(LDFLAGS) -$(OBJ_DIR)/matrix/cumatrix.o: matrix/generic/cumatrix.c matrix/generic/matrix.c +$(OBJ_DIR)/matrix/cumatrix.o: matrix/generic/cumatrix.c matrix/generic/matrix.c matrix/generic/cukernel.cu $(OBJ_DIR)/matrix/mmatrix.o: matrix/generic/mmatrix.c matrix/generic/matrix.c +$(OBJ_DIR)/matrix/cukernel.o: matrix/generic/cukernel.cu clean: -rm -rf $(OBJ_DIR) diff --git a/examples/cumatrix_example.lua b/examples/cumatrix_example.lua index 084dcca..544fc7f 100644 --- a/examples/cumatrix_example.lua +++ b/examples/cumatrix_example.lua @@ -1,5 +1,5 @@ -m = 10 -n = 10 +m = 4 +n = 4 fm = nerv.CuMatrixFloat(m, n) dm = nerv.CuMatrixDouble(m, n) for i = 0, m - 1 do @@ -23,3 +23,9 @@ print(fs + fs) print(ds + ds) print(fs - fs) print(ds - ds) + +a = fs:create() +a:mul_elem(fs, fs) +print(a) +a:log_elem(fs) +print(a) diff --git a/matrix/cukernel.h b/matrix/cukernel.h index b2b6cb2..232699d 100644 --- a/matrix/cukernel.h +++ b/matrix/cukernel.h @@ -1,4 +1,6 @@ #ifdef NERV_GENERIC_CUKERNEL +void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, Matrix *c); +void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b); void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b); void cudak_(cuda_sigmoid_grad)(const Matrix *output, const Matrix *err, Matrix *nerr); void cudak_(cuda_rowsum)(const Matrix *a, Matrix *b); diff --git a/matrix/generic/cukernel.cu b/matrix/generic/cukernel.cu index 517393e..0e3d3cf 100644 --- a/matrix/generic/cukernel.cu +++ b/matrix/generic/cukernel.cu @@ -6,6 +6,27 @@ #define CUDA_THREADS_N 16 #define CUDA_THREADS_NN (16 * 16) #define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) +__global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + b[idx] = log(a[idx]); +} + +__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b, + MATRIX_ELEM *c, + int nrow, int ncol, int stride) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + long idx; + if (i >= nrow || j >= ncol) return; + idx = j + i * stride; + c[idx] = a[idx] * b[idx]; +} + __global__ void cudak_(sigmoid)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, int stride) { int j = blockIdx.x * blockDim.x + threadIdx.x; @@ -136,6 +157,28 @@ __global__ void cudak_(fill)(MATRIX_ELEM *a, extern "C" { #include "../cukernel.h" + void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) { + dim3 threadsPerBlock(CUDA_THREADS_N, + CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(log_elem)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); + } + + void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b, + Matrix *c) { + dim3 threadsPerBlock(CUDA_THREADS_N, + CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), + CEIL_DIV(b->nrow, threadsPerBlock.y)); + cudak_(mul_elem)<<<numBlocks, threadsPerBlock>>> \ + (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b), + MATRIX_ELEM_PTR(c), + b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM)); + } + void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); diff --git a/matrix/generic/cumatrix.c b/matrix/generic/cumatrix.c index 2deb7a3..ed64bbf 100644 --- a/matrix/generic/cumatrix.c +++ b/matrix/generic/cumatrix.c @@ -48,6 +48,7 @@ static int nerv_matrix_(add)(lua_State *L) { MATRIX_ELEM alpha = luaL_checknumber(L, 4); /* alpha */ MATRIX_ELEM beta = luaL_checknumber(L, 5); /* alpha */ CHECK_SAME_DIMENSION(a, b); + CHECK_SAME_DIMENSION(a, c); nerv_matrix_(add_)(a, b, c, alpha, beta); return 0; } @@ -118,6 +119,7 @@ static int nerv_matrix_(softmax)(lua_State *L) { Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); Matrix *max = nerv_matrix_(new_)(a->nrow, 1); Matrix *dno = nerv_matrix_(new_)(a->nrow, 1); + CHECK_SAME_DIMENSION(a, b); cudak_(cuda_rowmax)(a, max); cudak_(cuda_softmax_denominator)(a, max, dno); cudak_(cuda_softmax_final)(a, max, dno, b); @@ -230,25 +232,44 @@ static int nerv_matrix_(trans)(lua_State *L) { return 1; } +static int nerv_matrix_(mul_elem)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname)); + Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname)); + CHECK_SAME_DIMENSION(a, b); + CHECK_SAME_DIMENSION(a, c); + cudak_(cuda_mul_elem)(a, b, c); + return 0; +} + +static int nerv_matrix_(log_elem)(lua_State *L) { + Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname)); + Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname)); + CHECK_SAME_DIMENSION(a, b); + cudak_(cuda_log_elem)(a, b); + return 0; +} static const luaL_Reg nerv_matrix_(extra_methods)[] = { {"create", nerv_matrix_(create)}, - {"softmax", nerv_matrix_(softmax)}, {"colsum", nerv_matrix_(colsum)}, {"rowsum", nerv_matrix_(rowsum)}, {"rowmax", nerv_matrix_(rowmax)}, + {"trans", nerv_matrix_(trans)}, + /* in-place calc */ {"copy_fromh", nerv_matrix_(copy_fromh)}, {"copy_fromd", nerv_matrix_(copy_fromd)}, {"copy_toh", nerv_matrix_(copy_toh)}, {"copy_tod", nerv_matrix_(copy_tod)}, - {"trans", nerv_matrix_(trans)}, - /* in-place calc */ {"add", nerv_matrix_(add)}, {"mul", nerv_matrix_(mul)}, {"add_row", nerv_matrix_(add_row)}, {"fill", nerv_matrix_(fill)}, {"sigmoid", nerv_matrix_(sigmoid)}, {"sigmoid_grad", nerv_matrix_(sigmoid_grad)}, + {"softmax", nerv_matrix_(softmax)}, + {"mul_elem", nerv_matrix_(mul_elem)}, + {"log_elem", nerv_matrix_(log_elem)}, {NULL, NULL} }; |