From e4dedc2992149d245ea65132131253072d3276b8 Mon Sep 17 00:00:00 2001
From: Determinant <ted.sybil@gmail.com>
Date: Thu, 28 May 2015 16:14:21 +0800
Subject: add mul_elem and log_elem; add dimension checking in cumatrix methods

---
 matrix/generic/cukernel.cu | 43 +++++++++++++++++++++++++++++++++++++++++++
 matrix/generic/cumatrix.c  | 27 ++++++++++++++++++++++++---
 2 files changed, 67 insertions(+), 3 deletions(-)

(limited to 'matrix/generic')

diff --git a/matrix/generic/cukernel.cu b/matrix/generic/cukernel.cu
index 517393e..0e3d3cf 100644
--- a/matrix/generic/cukernel.cu
+++ b/matrix/generic/cukernel.cu
@@ -6,6 +6,27 @@
 #define CUDA_THREADS_N 16
 #define CUDA_THREADS_NN (16 * 16)
 #define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
+__global__ void cudak_(log_elem)(const MATRIX_ELEM *a, MATRIX_ELEM *b, 
+                                int nrow, int ncol, int stride) {
+    int j = blockIdx.x * blockDim.x + threadIdx.x;
+    int i = blockIdx.y * blockDim.y + threadIdx.y;
+    long idx;
+    if (i >= nrow || j >= ncol) return;
+    idx = j + i * stride;
+    b[idx] = log(a[idx]);
+}
+
+__global__ void cudak_(mul_elem)(const MATRIX_ELEM *a, const MATRIX_ELEM *b,
+                                MATRIX_ELEM *c, 
+                                int nrow, int ncol, int stride) {
+    int j = blockIdx.x * blockDim.x + threadIdx.x;
+    int i = blockIdx.y * blockDim.y + threadIdx.y;
+    long idx;
+    if (i >= nrow || j >= ncol) return;
+    idx = j + i * stride;
+    c[idx] = a[idx] * b[idx];
+}
+
 __global__ void cudak_(sigmoid)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
                         int nrow, int ncol, int stride) {
     int j = blockIdx.x * blockDim.x + threadIdx.x;
@@ -136,6 +157,28 @@ __global__ void cudak_(fill)(MATRIX_ELEM *a,
 
 extern "C" {
 #include "../cukernel.h"
+    void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) {
+        dim3 threadsPerBlock(CUDA_THREADS_N,
+                CUDA_THREADS_N);
+        dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+                CEIL_DIV(b->nrow, threadsPerBlock.y));
+        cudak_(log_elem)<<<numBlocks, threadsPerBlock>>> \
+            (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+             b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM));
+    }
+
+    void cudak_(cuda_mul_elem)(const Matrix *a, const Matrix *b,
+                                Matrix *c) {
+        dim3 threadsPerBlock(CUDA_THREADS_N,
+                CUDA_THREADS_N);
+        dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+                CEIL_DIV(b->nrow, threadsPerBlock.y));
+        cudak_(mul_elem)<<<numBlocks, threadsPerBlock>>> \
+            (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+             MATRIX_ELEM_PTR(c),
+             b->nrow, b->ncol, b->stride / sizeof(MATRIX_ELEM));
+    }
+
     void cudak_(cuda_sigmoid)(const Matrix *a, Matrix *b) {
         dim3 threadsPerBlock(CUDA_THREADS_N,
                 CUDA_THREADS_N);
diff --git a/matrix/generic/cumatrix.c b/matrix/generic/cumatrix.c
index 2deb7a3..ed64bbf 100644
--- a/matrix/generic/cumatrix.c
+++ b/matrix/generic/cumatrix.c
@@ -48,6 +48,7 @@ static int nerv_matrix_(add)(lua_State *L) {
     MATRIX_ELEM alpha = luaL_checknumber(L, 4); /* alpha */
     MATRIX_ELEM beta = luaL_checknumber(L, 5); /* alpha */
     CHECK_SAME_DIMENSION(a, b);
+    CHECK_SAME_DIMENSION(a, c);
     nerv_matrix_(add_)(a, b, c, alpha, beta);
     return 0;
 }
@@ -118,6 +119,7 @@ static int nerv_matrix_(softmax)(lua_State *L) {
     Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname));
     Matrix *max = nerv_matrix_(new_)(a->nrow, 1);
     Matrix *dno = nerv_matrix_(new_)(a->nrow, 1);
+    CHECK_SAME_DIMENSION(a, b);
     cudak_(cuda_rowmax)(a, max);
     cudak_(cuda_softmax_denominator)(a, max, dno);
     cudak_(cuda_softmax_final)(a, max, dno, b);
@@ -230,25 +232,44 @@ static int nerv_matrix_(trans)(lua_State *L) {
     return 1;
 }
 
+static int nerv_matrix_(mul_elem)(lua_State *L) {
+    Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname));
+    Matrix *b = luaT_checkudata(L, 3, nerv_matrix_(tname));
+    Matrix *c = luaT_checkudata(L, 1, nerv_matrix_(tname));
+    CHECK_SAME_DIMENSION(a, b);
+    CHECK_SAME_DIMENSION(a, c);
+    cudak_(cuda_mul_elem)(a, b, c);
+    return 0;
+}
+
+static int nerv_matrix_(log_elem)(lua_State *L) {
+    Matrix *a = luaT_checkudata(L, 2, nerv_matrix_(tname));
+    Matrix *b = luaT_checkudata(L, 1, nerv_matrix_(tname));
+    CHECK_SAME_DIMENSION(a, b);
+    cudak_(cuda_log_elem)(a, b);
+    return 0;
+}
 
 static const luaL_Reg nerv_matrix_(extra_methods)[] = {
     {"create", nerv_matrix_(create)},
-    {"softmax", nerv_matrix_(softmax)},
     {"colsum", nerv_matrix_(colsum)},
     {"rowsum", nerv_matrix_(rowsum)},
     {"rowmax", nerv_matrix_(rowmax)},
+    {"trans", nerv_matrix_(trans)},
+    /* in-place calc */
     {"copy_fromh", nerv_matrix_(copy_fromh)},
     {"copy_fromd", nerv_matrix_(copy_fromd)},
     {"copy_toh", nerv_matrix_(copy_toh)},
     {"copy_tod", nerv_matrix_(copy_tod)},
-    {"trans", nerv_matrix_(trans)},
-    /* in-place calc */
     {"add", nerv_matrix_(add)},
     {"mul", nerv_matrix_(mul)},
     {"add_row", nerv_matrix_(add_row)},
     {"fill", nerv_matrix_(fill)},
     {"sigmoid", nerv_matrix_(sigmoid)},
     {"sigmoid_grad", nerv_matrix_(sigmoid_grad)},
+    {"softmax", nerv_matrix_(softmax)},
+    {"mul_elem", nerv_matrix_(mul_elem)},
+    {"log_elem", nerv_matrix_(log_elem)},
     {NULL, NULL}
 };
 
-- 
cgit v1.2.3-70-g09d2