aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortxh18 <cloudygooseg@gmail.com>2016-01-20 14:26:54 +0800
committertxh18 <cloudygooseg@gmail.com>2016-01-20 14:26:54 +0800
commit20e0c009ab387107ed1a492dd42b0253ca19ed37 (patch)
treec0319067604733a8f7e07282ad328cb00f58feeb
parent37dec2610c92d03813c4e91ed58791ab60da6646 (diff)
the prefixsum_row operation of matrix is implemented
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu50
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c8
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h1
-rw-r--r--nerv/matrix/generic/cumatrix.c10
4 files changed, 69 insertions, 0 deletions
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index 2b696d5..995059c 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -376,6 +376,22 @@ __global__ void cudak_(copy_rows_by_colidx)(const MATRIX_ELEM *a, MATRIX_ELEM *b
b[j + i * stride] = a[j + k * stride];
}
+__global__ void cudak_(prefixsum_row_reduce)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ int nrow, int ncol, int stride_a, int stride_b, int offset) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ long idx_a, idx_b;
+ if (i >= nrow || j >= ncol) return;
+ idx_b = j + i * stride_b;
+ idx_a = j + i * stride_a;
+ //b[idx] = 1.0 / (1.0 + exp(-a[idx]));
+ if (j >= offset)
+ b[idx_b] = a[idx_a] + a[idx_a - offset];
+ else
+ b[idx_b] = a[idx_a];
+}
+
+
extern "C" {
#include "../cukernel.h"
@@ -737,6 +753,40 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_prefixsum_row)(const Matrix *a, Matrix *b) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+ CEIL_DIV(b->nrow, threadsPerBlock.y));
+
+ MATRIX_ELEM *tmp[2];
+ size_t tmp_stride[2];
+ cudaMallocPitch(tmp, tmp_stride + 0, a->ncol * sizeof(MATRIX_ELEM), a->nrow);
+ cudaMallocPitch(tmp + 1, tmp_stride + 1, a->ncol * sizeof(MATRIX_ELEM), a->nrow);
+
+ int offset = 1;
+ cudak_(prefixsum_row_reduce)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), tmp[0], b->nrow, b->ncol,
+ a->stride / sizeof(MATRIX_ELEM), tmp_stride[0] / sizeof(MATRIX_ELEM), offset);
+ int pin = 0, pout = 1;
+
+ for (offset = 2;offset <= a->ncol / 2;offset *= 2) {
+ cudak_(prefixsum_row_reduce)<<<numBlocks, threadsPerBlock>>> \
+ (tmp[pin], tmp[pout], b->nrow, b->ncol,
+ tmp_stride[pin] / sizeof(MATRIX_ELEM), tmp_stride[pout] / sizeof(MATRIX_ELEM), offset);
+ pin = 1 - pin;
+ pout = 1 - pout;
+ }
+
+ cudak_(prefixsum_row_reduce)<<<numBlocks, threadsPerBlock>>> \
+ (tmp[pin], MATRIX_ELEM_PTR(b), b->nrow, b->ncol,
+ tmp_stride[pin] / sizeof(MATRIX_ELEM), b->stride / sizeof(MATRIX_ELEM), offset);
+
+ cudaFree(tmp[0]);
+ cudaFree(tmp[1]);
+
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_decompress)(const Matrix *a, Matrix *b) {
dim3 threadsPerBlock(1, CUDA_THREADS_NN);
dim3 numBlocks(1, CEIL_DIV(a->nrow, threadsPerBlock.y));
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 7582725..dafadb2 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -486,6 +486,14 @@ void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status) {
+ CHECK_SAME_DIMENSION(a, b, status);
+ PROFILE_START
+ cudak_(cuda_prefixsum_row)(b, a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
static void cuda_matrix_(free)(MATRIX_ELEM *ptr, Status *status) {
CUDA_SAFE_SYNC_CALL(cudaFree(ptr), status);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index e82dccd..41e9e35 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -61,3 +61,4 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
Status *status);
void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
Status *status);
+void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status);
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index edd7b0a..29fa7b1 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -52,6 +52,15 @@ static int nerv_matrix_(lua_sigmoid)(lua_State *L) {
return 0;
}
+static int nerv_matrix_(lua_prefixsum_row)(lua_State *L) {
+ Status status;
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ nerv_matrix_(prefixsum_row)(a, b, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
static int nerv_matrix_(lua_sigmoid_grad)(lua_State *L) {
Status status;
Matrix *nerr = luaT_checkudata(L, 1, nerv_matrix_(tname));
@@ -407,6 +416,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"rearrange_frm", nerv_matrix_(lua_rearrange_frm)},
{"scale_rows_by_row", nerv_matrix_(lua_scale_rows_by_row)},
{"scale_rows_by_col", nerv_matrix_(lua_scale_rows_by_col)},
+ {"prefixsum_row", nerv_matrix_(lua_prefixsum_row)},
#ifdef __NERV_FUTURE_CUDA_7
{"update_select_rows_by_rowidx", nerv_matrix_(lua_update_select_rows_by_rowidx)},
{"update_select_rows_by_colidx", nerv_matrix_(lua_update_select_rows_by_colidx)},