summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/layer/lstm.lua11
-rw-r--r--nerv/layer/lstm_gate.lua7
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu18
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c9
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h1
-rw-r--r--nerv/lib/matrix/generic/mmatrix.c16
-rw-r--r--nerv/lib/matrix/generic/mmatrix.h1
-rw-r--r--nerv/matrix/generic/cumatrix.c1
-rw-r--r--nerv/matrix/generic/matrix.c8
-rw-r--r--nerv/matrix/generic/mmatrix.c1
10 files changed, 69 insertions, 4 deletions
diff --git a/nerv/layer/lstm.lua b/nerv/layer/lstm.lua
index 500bd87..b0cfe08 100644
--- a/nerv/layer/lstm.lua
+++ b/nerv/layer/lstm.lua
@@ -19,7 +19,7 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
return self.id .. '.' .. str
end
local din1, din2, din3 = self.dim_in[1], self.dim_in[2], self.dim_in[3]
- local dout1, dout2, dout3 = self.dim_out[1], self.dim_out[2], self.dim_out[3]
+ local dout1, dout2 = self.dim_out[1], self.dim_out[2]
local layers = {
["nerv.CombinerLayer"] = {
[ap("inputXDup")] = {{}, {dim_in = {din1},
@@ -49,11 +49,14 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
},
["nerv.LSTMGateLayer"] = {
[ap("forgetGateL")] = {{}, {dim_in = {din1, din2, din3},
- dim_out = {din3}, pr = pr}},
+ dim_out = {din3}, pr = pr},
+ param_type = {'N', 'N', 'D'}},
[ap("inputGateL")] = {{}, {dim_in = {din1, din2, din3},
- dim_out = {din3}, pr = pr}},
+ dim_out = {din3}, pr = pr},
+ param_tpye = {'N', 'N', 'D'}},
[ap("outputGateL")] = {{}, {dim_in = {din1, din2, din3},
- dim_out = {din3}, pr = pr}},
+ dim_out = {din3}, pr = pr},
+ param_type = {'N', 'N', 'D'}},
},
["nerv.ElemMulLayer"] = {
diff --git a/nerv/layer/lstm_gate.lua b/nerv/layer/lstm_gate.lua
index 1963eba..8785b4f 100644
--- a/nerv/layer/lstm_gate.lua
+++ b/nerv/layer/lstm_gate.lua
@@ -5,12 +5,16 @@ function LSTMGateLayer:__init(id, global_conf, layer_conf)
self.id = id
self.dim_in = layer_conf.dim_in
self.dim_out = layer_conf.dim_out
+ self.param_type = layer_conf.param_type
self.gconf = global_conf
for i = 1, #self.dim_in do
self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf,
nerv.LinearTransParam,
{self.dim_in[i], self.dim_out[1]})
+ if self.param_type[i] == 'D' then
+ self["ltp" .. i].trans:diagonalize()
+ end
end
self.bp = self:find_param("bp", layer_conf, global_conf,
nerv.BiasParam, {1, self.dim_out[1]})
@@ -64,6 +68,9 @@ function LSTMGateLayer:update(bp_err, input, output)
self.err_bakm:sigmoid_grad(bp_err[1], output[1])
for i = 1, #self.dim_in do
self["ltp" .. i]:update_by_err_input(self.err_bakm, input[i])
+ if self.param_type[i] == 'D' then
+ self["ltp" .. i].trans:diagonalize()
+ end
end
self.bp:update_by_gradient(self.err_bakm:colsum())
end
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index 51e3b6a..311a6ce 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -250,6 +250,14 @@ __global__ void cudak_(fill)(MATRIX_ELEM *a,
a[j + i * stride] = val;
}
+__global__ void cudak_(diagonalize)(MATRIX_ELEM *a,
+ int nrow, int ncol, int stride) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ if (i >= nrow || j >= ncol || i == j) return;
+ a[j + i * stride] = 0;
+}
+
__global__ void cudak_(clip)(MATRIX_ELEM *a,
int nrow, int ncol, int stride, double val_1, double val_2) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
@@ -679,6 +687,16 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_diagonalize)(Matrix *a) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
+ CEIL_DIV(a->nrow, threadsPerBlock.y));
+ cudak_(diagonalize)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), a->nrow, a->ncol,
+ a->stride / sizeof(MATRIX_ELEM));
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_clip)(Matrix *a, double val_1, double val_2) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x),
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 7b70607..1c74866 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -494,6 +494,15 @@ void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(diagonalize)(Matrix *a, Status *status) {
+ if (a->nrow != a->ncol)
+ NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
+ PROFILE_START
+ cudak_(cuda_diagonalize)(a);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
static void cuda_matrix_(free)(MATRIX_ELEM *ptr, Status *status) {
CUDA_SAFE_SYNC_CALL(cudaFree(ptr), status);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index f3c2df8..48d1f13 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -25,6 +25,7 @@ void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
Status *status);
void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status);
void nerv_matrix_(fill)(Matrix *self, double val, Status *status);
+void nerv_matrix_(diagonalize)(Matrix *self, Status *statut);
void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
Status *status);
diff --git a/nerv/lib/matrix/generic/mmatrix.c b/nerv/lib/matrix/generic/mmatrix.c
index fa1dc5f..3dabe0e 100644
--- a/nerv/lib/matrix/generic/mmatrix.c
+++ b/nerv/lib/matrix/generic/mmatrix.c
@@ -265,6 +265,22 @@ void nerv_matrix_(fill)(Matrix *self, double val, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(diagonalize)(Matrix *self, Status *status) {
+ if (self->nrow != self->ncol)
+ NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
+ int i, j;
+ size_t astride = self->stride;
+ MATRIX_ELEM *arow = MATRIX_ELEM_PTR(self);
+ for (i = 0; i < self->nrow; i++)
+ {
+ for (j = 0; j < self->ncol; j++)
+ if (i != j)
+ arow[j] = 0;
+ arow = MATRIX_NEXT_ROW_PTR(arow, astride);
+ }
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
void nerv_matrix_(sigmoid)(Matrix *a, const Matrix *b, Status *status) {
CHECK_SAME_DIMENSION(a, b, status);
int i, j;
diff --git a/nerv/lib/matrix/generic/mmatrix.h b/nerv/lib/matrix/generic/mmatrix.h
index c54c4e5..2cbca47 100644
--- a/nerv/lib/matrix/generic/mmatrix.h
+++ b/nerv/lib/matrix/generic/mmatrix.h
@@ -23,6 +23,7 @@ void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta,
Status *status);
void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status);
void nerv_matrix_(fill)(Matrix *self, double val, Status *status);
+void nerv_matrix_(diagonalize)(Matrix *self, Status *status);
void nerv_matrix_(copy_fromh)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
Status *status);
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index b706c21..f8b8038 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -240,6 +240,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"scale_rows_by_row", nerv_matrix_(lua_scale_rows_by_row)},
{"scale_rows_by_col", nerv_matrix_(lua_scale_rows_by_col)},
{"prefixsum_row", nerv_matrix_(lua_prefixsum_row)},
+ {"diagonalize", nerv_matrix_(lua_diagonalize)},
#ifdef __NERV_FUTURE_CUDA_7
{"update_select_rows_by_rowidx", nerv_matrix_(lua_update_select_rows_by_rowidx)},
{"update_select_rows_by_colidx", nerv_matrix_(lua_update_select_rows_by_colidx)},
diff --git a/nerv/matrix/generic/matrix.c b/nerv/matrix/generic/matrix.c
index c1da774..3162ffb 100644
--- a/nerv/matrix/generic/matrix.c
+++ b/nerv/matrix/generic/matrix.c
@@ -338,4 +338,12 @@ static int nerv_matrix_(lua_scale_rows_by_row)(lua_State *L) {
return 0;
}
+static int nerv_matrix_(lua_diagonalize)(lua_State *L) {
+ Status status;
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ nerv_matrix_(diagonalize)(a, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
#endif
diff --git a/nerv/matrix/generic/mmatrix.c b/nerv/matrix/generic/mmatrix.c
index 93562d0..1665eff 100644
--- a/nerv/matrix/generic/mmatrix.c
+++ b/nerv/matrix/generic/mmatrix.c
@@ -107,6 +107,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"add_row", nerv_matrix_(lua_add_row)},
{"clip", nerv_matrix_(lua_clip)},
{"fill", nerv_matrix_(lua_fill)},
+ {"diagonalize", nerv_matrix_(lua_diagonalize)},
{"sigmoid", nerv_matrix_(lua_sigmoid)},
{"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)},
{"softmax", nerv_matrix_(lua_softmax)},