From e599ae7524e0e21e2266f2419555865388bded40 Mon Sep 17 00:00:00 2001 From: cloudygoose Date: Wed, 8 Jul 2015 08:17:17 +0800 Subject: ... --- nerv/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nerv/Makefile b/nerv/Makefile index 224cc8a..7b75522 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -36,8 +36,8 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \ io/sgd_buffer.lua INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK -#CUDA_BASE := /usr/local/cuda-6.5 -CUDA_BASE := /usr/local/cuda-5.0 +CUDA_BASE := /usr/local/cuda-6.5 +#CUDA_BASE := /usr/local/cuda-5.0 CUDA_INCLUDE := -I $(CUDA_BASE)/include/ INCLUDE += $(CUDA_INCLUDE) -- cgit v1.2.3 From 72acf24e248cca7d69658d02939f99d57d02e9a9 Mon Sep 17 00:00:00 2001 From: cloudygoose Date: Wed, 8 Jul 2015 08:54:01 +0800 Subject: added matrix:clip, TODO:testing --- nerv/lib/matrix/cukernel.h | 1 + nerv/lib/matrix/generic/cukernel.cu | 22 ++++++++++++++++++++++ nerv/lib/matrix/generic/cumatrix.c | 7 +++++++ nerv/lib/matrix/generic/cumatrix.h | 1 + nerv/matrix/generic/cukernel.cu | 21 +++++++++++++++++++++ nerv/matrix/generic/cumatrix.c | 11 +++++++++++ 6 files changed, 63 insertions(+) diff --git a/nerv/lib/matrix/cukernel.h b/nerv/lib/matrix/cukernel.h index 8a1494f..7bb4c2c 100644 --- a/nerv/lib/matrix/cukernel.h +++ b/nerv/lib/matrix/cukernel.h @@ -12,6 +12,7 @@ void cudak_(cuda_softmax_denominator)(const Matrix *a, const Matrix *max, Matrix void cudak_(cuda_softmax_final)(const Matrix *a, const Matrix *max, const Matrix *deno, Matrix *b); void cudak_(cuda_add_row)(const Matrix *a, Matrix *b, double beta); void cudak_(cuda_fill)(Matrix *a, double val); +void cudak_(cuda_clip)(Matrix *a, double val_1, double val_2); void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context); void cudak_(cuda_rearrange_frm)(const Matrix *a, Matrix *b, int step); void cudak_(cuda_scale_rows_by_row)(const Matrix *a, Matrix *b); diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu index 6111193..e337798 100644 --- a/nerv/lib/matrix/generic/cukernel.cu +++ b/nerv/lib/matrix/generic/cukernel.cu @@ -213,6 +213,18 @@ __global__ void cudak_(fill)(MATRIX_ELEM *a, a[j + i * stride] = val; } +__global__ void cudak_(clip)(MATRIX_ELEM *a, + int nrow, int ncol, int stride, double val_1, double val_2) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + if (a[j + i * stride] > val_2) + a[j + i * stride] = val_2; + else + if (a[j + i * stride] < val_1) + a[j + i * stride] = val_1; +} + __global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, int enrow, int encol, @@ -510,6 +522,16 @@ extern "C" { cudaStreamSynchronize(0); } + void cudak_(cuda_clip)(Matrix *a, double val_1, double val_2) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), + CEIL_DIV(a->nrow, threadsPerBlock.y)); + cudak_(clip)<<>> \ + (MATRIX_ELEM_PTR(a), a->nrow, a->ncol, + a->stride / sizeof(MATRIX_ELEM), val_1, val_2); + cudaStreamSynchronize(0); + } + void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c index 772b78d..40a0030 100644 --- a/nerv/lib/matrix/generic/cumatrix.c +++ b/nerv/lib/matrix/generic/cumatrix.c @@ -189,6 +189,13 @@ void nerv_matrix_(fill)(Matrix *self, double val, Status *status) { NERV_SET_STATUS(status, NERV_NORMAL, 0); } +void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status) { + PROFILE_START + cudak_(cuda_clip)(self, val_1, val_2); + PROFILE_STOP + NERV_SET_STATUS(status, NERV_NORMAL, 0); +} + void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, Status *status) { diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h index 5cfe9d5..3f1f8a3 100644 --- a/nerv/lib/matrix/generic/cumatrix.h +++ b/nerv/lib/matrix/generic/cumatrix.h @@ -20,6 +20,7 @@ void nerv_matrix_(rowmax_idx)(Matrix *a, Matrix **b, Matrix **idx, Status *status); void nerv_matrix_(add_row)(Matrix *b, const Matrix *a, double beta, Status *status); +void nerv_matrix_(clip)(Matrix *self, double val_1, double val_2, Status *status); void nerv_matrix_(fill)(Matrix *self, double val, Status *status); void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b, int a_begin, int b_begin, int b_end, diff --git a/nerv/matrix/generic/cukernel.cu b/nerv/matrix/generic/cukernel.cu index d6c8adc..2ae5e62 100644 --- a/nerv/matrix/generic/cukernel.cu +++ b/nerv/matrix/generic/cukernel.cu @@ -213,6 +213,17 @@ __global__ void cudak_(fill)(MATRIX_ELEM *a, a[j + i * stride] = val; } +__global__ void cudak_(clip)(MATRIX_ELEM *a, + int nrow, int ncol, int stride, double val_1, double val_2) { + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + if (i >= nrow || j >= ncol) return; + if (a[j + i * stride] > val_2) + a[j + i * stride] = val_2; + else if (a[j + i * stride] < val_1) + a[j + i * stride] = val_1; +} + __global__ void cudak_(expand_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b, int nrow, int ncol, int enrow, int encol, @@ -510,6 +521,16 @@ extern "C" { cudaStreamSynchronize(0); } + void cudak_(cuda_clip)(Matrix *a, double val_1, double val_2) { + dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); + dim3 numBlocks(CEIL_DIV(a->ncol, threadsPerBlock.x), + CEIL_DIV(a->nrow, threadsPerBlock.y)); + cudak_(clip)<<>> \ + (MATRIX_ELEM_PTR(a), a->nrow, a->ncol, + a->stride / sizeof(MATRIX_ELEM), val_1, val_2); + cudaStreamSynchronize(0); + } + void cudak_(cuda_expand_frm)(const Matrix *a, Matrix *b, int context) { dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N); dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x), diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c index 311b503..4bdf5f0 100644 --- a/nerv/matrix/generic/cumatrix.c +++ b/nerv/matrix/generic/cumatrix.c @@ -149,6 +149,16 @@ static int nerv_matrix_(lua_fill)(lua_State *L) { return 0; } +static int nerv_matrix_(lua_clip)(lua_State *L) { + Status status; + Matrix *self = luaT_checkudata(L, 1, nerv_matrix_(tname)); + double val_1 = luaL_checknumber(L, 2); + double val_2 = luaL_checknumber(L, 3); + nerv_matrix_(clip)(self, val_1, val_2, &status); + NERV_LUA_CHECK_STATUS(L, status); + return 0; +} + static int nerv_matrix_(lua_copy_fromd)(lua_State *L) { Status status; Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname)); @@ -294,6 +304,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = { {"add", nerv_matrix_(lua_add)}, {"mul", nerv_matrix_(lua_mul)}, {"add_row", nerv_matrix_(lua_add_row)}, + {"clip", nerv_matrix_(lua_clip)}, {"fill", nerv_matrix_(lua_fill)}, {"sigmoid", nerv_matrix_(lua_sigmoid)}, {"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)}, -- cgit v1.2.3 From 882541b311ae9f18c916609a74708cb29bc1abdd Mon Sep 17 00:00:00 2001 From: cloudygoose Date: Fri, 10 Jul 2015 11:09:42 +0800 Subject: added affine_recurrent layer --- .gitignore | 1 + nerv/Makefile | 4 +- nerv/layer/affine_recurrent.lua | 83 +++++++++++++++++++++++++++++++++++++++++ nerv/layer/init.lua | 1 + 4 files changed, 87 insertions(+), 2 deletions(-) create mode 100644 nerv/layer/affine_recurrent.lua diff --git a/.gitignore b/.gitignore index 24f2f11..5c87b71 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.o install/ +build/ *.swp *.swo diff --git a/nerv/Makefile b/nerv/Makefile index 7b75522..022e2fb 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -31,9 +31,9 @@ OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS) LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so LUA_LIBS := matrix/init.lua io/init.lua init.lua \ layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua \ - layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua \ + layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua\ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \ - io/sgd_buffer.lua + io/sgd_buffer.lua INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK CUDA_BASE := /usr/local/cuda-6.5 diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua new file mode 100644 index 0000000..5afdaa1 --- /dev/null +++ b/nerv/layer/affine_recurrent.lua @@ -0,0 +1,83 @@ +local Recurrent = nerv.class('nerv.AffineRecurrentLayer', 'nerv.Layer') + +--id: string +--global_conf: table +--layer_conf: table +--Get Parameters +function Recurrent:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + self.bp = layer_conf.bp + self.ltp_hh = layer_conf.ltp_hh --from hidden to hidden + + self:check_dim_len(2, 1) + self.direct_update = layer_conf.direct_update +end + +--Check parameter +function Recurrent:init(batch_size) + if (self.ltp_hh.trans:ncol() ~= self.bp.trans:ncol()) then + nerv.error("mismatching dimensions of ltp and bp") + end + if (self.dim_in[1] ~= self.ltp_hh.trans:nrow() or + self.dim_in[2] ~= self.ltp_hh.trans:nrow()) then + nerv.error("mismatching dimensions of ltp and input") + end + if (self.dim_out[1] ~= self.bp.trans:ncol()) then + nerv.error("mismatching dimensions of bp and output") + end + + self.ltp_hh_grad = self.ltp_hh.trans:create() + self.ltp_hh:train_init() + self.bp:train_init() +end + +function Recurrent:update(bp_err, input, output) + if (self.direct_update == true) then + local ltp_hh = self.ltp_hh.trans + local bp = self.bp.trans + local gconf = self.gconf + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local n = input[1]:nrow() * mmt_gain + -- update corrections (accumulated errors) + self.ltp_hh.correction:mul(input[2], bp_err[1], 1.0, gconf.momentum, 'T', 'N') + self.bp.correction:add(bc, bp_err[1]:colsum(), gconf.momentum, 1.0) + -- perform update + ltp_hh:add(ltp_hh, self.ltp_hh.correction, 1.0, -gconf.lrate / n) + bp:add(bp, self.bp.correction, 1.0, -gconf.lrate / n) + -- weight decay + ltp_hh:add(ltp_hh, ltp_hh, 1.0, -gconf.lrate * gconf.wcost) + else + self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') + self.ltp_hh:update(self.ltp_hh_grad) + self.bp:update(bp_err[1]:colsum()) + end +end + +function Recurrent:propagate(input, output) + output[1]:copy_fromd(input[1]) + output[1]:mul(input[2], self.ltp_hh.trans, 1.0, 1.0, 'N', 'N') + output[1]:add_row(self.bp.trans, 1.0) +end + +function Recurrent:back_propagate(bp_err, next_bp_err, input, output) + next_bp_err[1]:copy_fromd(bp_err[1]) + next_bp_err[2]:mul(bp_err[1], self.ltp_hh.trans, 1.0, 0.0, 'N', 'T') + --[[ + for i = 0, next_bp_err[2]:nrow() - 1 do + for j = 0, next_bp_err[2]:ncol() - 1 do + if (next_bp_err[2][i][j] > 10) then next_bp_err[2][i][j] = 10 end + if (next_bp_err[2][i][j] < -10) then next_bp_err[2][i][j] = -10 end + end + end + ]]-- + next_bp_err[2]:clip(-10, 10) +end + +function Recurrent:get_params() + return nerv.ParamRepo({self.ltp_hh, self.bp}) +end diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua index 3c55a94..7172f99 100644 --- a/nerv/layer/init.lua +++ b/nerv/layer/init.lua @@ -77,3 +77,4 @@ nerv.include('bias.lua') nerv.include('window.lua') nerv.include('mse.lua') nerv.include('combiner.lua') +nerv.include('affine_recurrent.lua') -- cgit v1.2.3 From 1972c47c4b78e26a1e57f5001fe030c37d360a49 Mon Sep 17 00:00:00 2001 From: cloudygoose Date: Fri, 10 Jul 2015 11:16:21 +0800 Subject: added clip to layer_conf --- nerv/layer/affine_recurrent.lua | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua index 5afdaa1..59d259c 100644 --- a/nerv/layer/affine_recurrent.lua +++ b/nerv/layer/affine_recurrent.lua @@ -15,6 +15,8 @@ function Recurrent:__init(id, global_conf, layer_conf) self:check_dim_len(2, 1) self.direct_update = layer_conf.direct_update + + self.clip = layer_conf.clip --clip error in back_propagate end --Check parameter @@ -75,7 +77,9 @@ function Recurrent:back_propagate(bp_err, next_bp_err, input, output) end end ]]-- - next_bp_err[2]:clip(-10, 10) + if (self.clip ~= nil) then + next_bp_err[2]:clip(-self.clip, self.clip) + end end function Recurrent:get_params() -- cgit v1.2.3