aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/examples/lmptb/lmptb/layer/select_linear.lua2
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu25
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c18
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h2
-rw-r--r--nerv/matrix/generic/cumatrix.c13
5 files changed, 59 insertions, 1 deletions
diff --git a/nerv/examples/lmptb/lmptb/layer/select_linear.lua b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
index 4214533..3eba31e 100644
--- a/nerv/examples/lmptb/lmptb/layer/select_linear.lua
+++ b/nerv/examples/lmptb/lmptb/layer/select_linear.lua
@@ -49,7 +49,7 @@ function SL:propagate(input, output)
-- output[1][i]:fill(0)
-- end
--end
- output[1]:copy_rows_fromd_by_idx(self.ltp.trans, input[1]:trans())
+ output[1]:copy_rows_fromd_by_colidx(self.ltp.trans, input[1])
end
function SL:back_propagate(bp_err, next_bp_err, input, output)
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index 9244783..2b696d5 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -363,6 +363,20 @@ __global__ void cudak_(copy_rows_by_idx)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[j + i * stride] = a[j + k * stride];
}
+__global__ void cudak_(copy_rows_by_colidx)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ const MATRIX_ELEM *idx,
+ int nrow, int ncol, int a_nrow, int stride, int idx_stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ if (i >= nrow || j >= ncol) return;
+ int k = lrintf(idx[i * idx_stride]);
+ if (k < 0 || k >= a_nrow) {
+ printf("error in kernel copy_rows_by_colidx k(%d) out of range\n", k);
+ }
+ b[j + i * stride] = a[j + k * stride];
+}
+
+
extern "C" {
#include "../cukernel.h"
void cudak_(cuda_log_elem)(const Matrix *a, Matrix *b) {
@@ -744,5 +758,16 @@ extern "C" {
b->nrow, b->ncol, a->nrow, b->stride / sizeof(MATRIX_ELEM));
cudaStreamSynchronize(0);
}
+
+ void cudak_(cuda_copy_rows_by_colidx)(const Matrix *a, Matrix *b,
+ const Matrix *idx, int idx_begin) {
+ dim3 threadsPerBlock(CUDA_THREADS_NN, 1);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), b->nrow);
+ cudak_(copy_rows_by_colidx)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+ MATRIX_ELEM_PTR(idx) + idx_begin,
+ b->nrow, b->ncol, a->nrow, b->stride / sizeof(MATRIX_ELEM), idx->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
}
#endif
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 31d6b06..7582725 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -393,6 +393,24 @@ void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b,
+ const Matrix *idx, int b_begin, Status *status) {
+ long nrow = a->nrow;
+ if (!(0 <= b_begin && b_begin + nrow <= idx->nrow))
+ NERV_EXIT_STATUS(status, MAT_INVALID_COPY_INTERVAL, 0);
+ if (idx->ncol != 1)
+ NERV_EXIT_STATUS(status, MAT_IDX_VECTOR_EXP, 0);
+ if (a->ncol != b->ncol) {
+ printf("%d %d\n", a->ncol, b->ncol);
+ NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
+ }
+ PROFILE_START
+ cudak_(cuda_copy_rows_by_colidx)(b, a, idx, b_begin);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
+
#ifdef __NERV_FUTURE_CUDA_7
void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status) {
long nrow = a->nrow;
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index 560311e..e82dccd 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -45,6 +45,8 @@ void nerv_matrix_(copy_rows_fromh_by_idx)(Matrix *a, const Matrix *b,
const Matrix *idx, int b_begin, Status *status);
void nerv_matrix_(copy_rows_fromd_by_idx)(Matrix *a, const Matrix *b,
const Matrix *idx, int b_begin, Status *status);
+void nerv_matrix_(copy_rows_fromd_by_colidx)(Matrix *a, const Matrix *b,
+ const Matrix *idx, int b_begin, Status *status);
#ifdef __NERV_FUTURE_CUDA_7
void nerv_matrix_(update_select_rows_by_rowidx)(Matrix *c, const Matrix *a, const Matrix *idx, double alpha, double beta, Status *status);
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index 95a0132..edd7b0a 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -292,6 +292,18 @@ static int nerv_matrix_(lua_copy_rows_fromd_by_idx)(lua_State *L) {
return 0;
}
+static int nerv_matrix_(lua_copy_rows_fromd_by_colidx)(lua_State *L) {
+ Status status;
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ const Matrix *b = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ const Matrix *idx = luaT_checkudata(L, 3, nerv_matrix_(tname));
+ long nrow = a->nrow;
+ int idx_begin = lua_gettop(L) > 3 ? luaL_checkinteger(L, 4) : 0;
+ nerv_matrix_(copy_rows_fromd_by_colidx)(a, b, idx, idx_begin, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
static int nerv_matrix_(lua_expand_frm)(lua_State *L) {
Status status;
Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
@@ -390,6 +402,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"thres_mask", nerv_matrix_(lua_thres_mask)},
{"copy_rows_fromh_by_idx", nerv_matrix_(lua_copy_rows_fromh_by_idx)},
{"copy_rows_fromd_by_idx", nerv_matrix_(lua_copy_rows_fromd_by_idx)},
+ {"copy_rows_fromd_by_colidx", nerv_matrix_(lua_copy_rows_fromd_by_colidx)},
{"expand_frm", nerv_matrix_(lua_expand_frm)},
{"rearrange_frm", nerv_matrix_(lua_rearrange_frm)},
{"scale_rows_by_row", nerv_matrix_(lua_scale_rows_by_row)},