diff options
Diffstat (limited to 'nerv/examples/lmptb/lmptb')
4 files changed, 76 insertions, 95 deletions
diff --git a/nerv/examples/lmptb/lmptb/layer/affine_recurrent.lua b/nerv/examples/lmptb/lmptb/layer/affine_recurrent.lua deleted file mode 100644 index 0a762f0..0000000 --- a/nerv/examples/lmptb/lmptb/layer/affine_recurrent.lua +++ /dev/null @@ -1,93 +0,0 @@ -local Recurrent = nerv.class('nerv.AffineRecurrentLayer', 'nerv.Layer') - ---id: string ---global_conf: table ---layer_conf: table ---Get Parameters -function Recurrent:__init(id, global_conf, layer_conf) - self.id = id - self.dim_in = layer_conf.dim_in - self.dim_out = layer_conf.dim_out - self.gconf = global_conf - - self.bp = layer_conf.bp - self.ltp_ih = layer_conf.ltp_ih --from input to hidden - self.ltp_hh = layer_conf.ltp_hh --from hidden to hidden - - self:check_dim_len(2, 1) - self.direct_update = layer_conf.direct_update -end - ---Check parameter -function Recurrent:init(batch_size) - if (self.ltp_ih.trans:ncol() ~= self.bp.trans:ncol() or - self.ltp_hh.trans:ncol() ~= self.bp.trans:ncol()) then - nerv.error("mismatching dimensions of ltp and bp") - end - if (self.dim_in[1] ~= self.ltp_ih.trans:nrow() or - self.dim_in[2] ~= self.ltp_hh.trans:nrow()) then - nerv.error("mismatching dimensions of ltp and input") - end - if (self.dim_out[1] ~= self.bp.trans:ncol()) then - nerv.error("mismatching dimensions of bp and output") - end - - self.ltp_ih_grad = self.ltp_ih.trans:create() - self.ltp_hh_grad = self.ltp_hh.trans:create() - self.ltp_ih:train_init() - self.ltp_hh:train_init() - self.bp:train_init() -end - -function Recurrent:update(bp_err, input, output) - if (self.direct_update == true) then - local ltp_ih = self.ltp_ih.trans - local ltp_hh = self.ltp_hh.trans - local bp = self.bp.trans - local ltc_ih = self.ltc_ih - local ltc_hh = self.ltc_hh - local bc = self.bc - local gconf = self.gconf - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); - local n = input[1]:nrow() * mmt_gain - -- update corrections (accumulated errors) - self.ltp_ih.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - self.ltc_hh.correction:mul(input[2], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - self.bp.correction:add(bc, bp_err[1]:colsum(), gconf.momentum, 1.0) - -- perform update - ltp_ih:add(ltp_ih, self.ltp_ih.correction, 1.0, -gconf.lrate / n) - ltp_hh:add(ltp_hh, self.ltp_hh.correction, 1.0, -gconf.lrate / n) - bp:add(bp, self.bp.correction, 1.0, -gconf.lrate / n) - -- weight decay - ltp_ih:add(ltp_ih, ltp_ih, 1.0, -gconf.lrate * gconf.wcost) - ltp_hh:add(ltp_hh, ltp_hh, 1.0, -gconf.lrate * gconf.wcost) - else - self.ltp_ih_grad:mul(input[1], bp_err[1], 1.0, 0.0, 'T', 'N') - self.ltp_ih:update(self.ltp_ih_grad) - self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') - self.ltp_hh:update(self.ltp_hh_grad) - self.bp:update(bp_err[1]:colsum()) - end -end - -function Recurrent:propagate(input, output) - output[1]:mul(input[1], self.ltp_ih.trans, 1.0, 0.0, 'N', 'N') - output[1]:mul(input[2], self.ltp_hh.trans, 1.0, 1.0, 'N', 'N') - output[1]:add_row(self.bp.trans, 1.0) -end - -function Recurrent:back_propagate(bp_err, next_bp_err, input, output) - next_bp_err[1]:mul(bp_err[1], self.ltp_ih.trans, 1.0, 0.0, 'N', 'T') - next_bp_err[2]:mul(bp_err[1], self.ltp_hh.trans, 1.0, 0.0, 'N', 'T') - for i = 0, next_bp_err[2]:nrow() - 1 do - for j = 0, next_bp_err[2]:ncol() - 1 do - if (next_bp_err[2][i][j] > 10) then next_bp_err[2][i][j] = 10 end - if (next_bp_err[2][i][j] < -10) then next_bp_err[2][i][j] = -10 end - end - end -end - -function Recurrent:get_params() - return {self.ltp_ih, self.ltp_hh, self.bp} -end diff --git a/nerv/examples/lmptb/lmptb/layer/affine_recurrent_plusvec.lua b/nerv/examples/lmptb/lmptb/layer/affine_recurrent_plusvec.lua new file mode 100644 index 0000000..5606a09 --- /dev/null +++ b/nerv/examples/lmptb/lmptb/layer/affine_recurrent_plusvec.lua @@ -0,0 +1,74 @@ +local RecurrentV = nerv.class('nerv.AffineRecurrentPlusVecLayer', 'nerv.Layer') + +--id: string +--global_conf: table +--layer_conf: table +--Get Parameters +function RecurrentV:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp + self.ltp_hh = self:find_param("ltp_hh", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp_hh --from hidden to hidden + + self:check_dim_len(2, 1) + self.direct_update = layer_conf.direct_update + + self.clip = layer_conf.clip --clip error in back_propagate +end + +--Check parameter +function RecurrentV:init(batch_size) + if (self.ltp_hh.trans:ncol() ~= self.bp.trans:ncol()) then + nerv.error("mismatching dimensions of ltp and bp") + end + if (self.dim_in[1] ~= self.ltp_hh.trans:nrow() or + self.dim_in[2] ~= self.ltp_hh.trans:nrow()) then + nerv.error("mismatching dimensions of ltp and input") + end + if (self.dim_out[1] ~= self.bp.trans:ncol()) then + nerv.error("mismatching dimensions of bp and output") + end + + self.ltp_hh_grad = self.ltp_hh.trans:create() + self.ltp_hh:train_init() + self.bp:train_init() +end + +function RecurrentV:batch_resize(batch_size) + -- do nothing +end + +function RecurrentV:update(bp_err, input, output) + --self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') + self.ltp_hh:update_by_err_input(bp_err[1], input[2]) + self.bp:update_by_gradient(bp_err[1]:colsum()) +end + +function RecurrentV:propagate(input, output) + output[1]:copy_fromd(input[1]) + output[1]:mul(input[2], self.ltp_hh.trans, 1.0, 1.0, 'N', 'N') + output[1]:add_row(self.bp.trans, 1.0) +end + +function RecurrentV:back_propagate(bp_err, next_bp_err, input, output) + next_bp_err[1]:copy_fromd(bp_err[1]) + next_bp_err[2]:mul(bp_err[1], self.ltp_hh.trans, 1.0, 0.0, 'N', 'T') + --[[ + for i = 0, next_bp_err[2]:nrow() - 1 do + for j = 0, next_bp_err[2]:ncol() - 1 do + if (next_bp_err[2][i][j] > 10) then next_bp_err[2][i][j] = 10 end + if (next_bp_err[2][i][j] < -10) then next_bp_err[2][i][j] = -10 end + end + end + ]]-- + if (self.clip ~= nil) then + next_bp_err[2]:clip(- self.clip, self.clip) + end +end + +function RecurrentV:get_params() + return nerv.ParamRepo({self.ltp_hh, self.bp}) +end diff --git a/nerv/examples/lmptb/lmptb/layer/init.lua b/nerv/examples/lmptb/lmptb/layer/init.lua index ff29126..ae2887c 100644 --- a/nerv/examples/lmptb/lmptb/layer/init.lua +++ b/nerv/examples/lmptb/lmptb/layer/init.lua @@ -1,5 +1,5 @@ require 'lmptb.layer.select_linear' ---require 'lmptb.layer.affine_recurrent' +require 'lmptb.layer.affine_recurrent_plusvec' require 'lmptb.layer.lm_affine_recurrent' diff --git a/nerv/examples/lmptb/lmptb/layer/select_linear.lua b/nerv/examples/lmptb/lmptb/layer/select_linear.lua index 3eba31e..f07eb2f 100644 --- a/nerv/examples/lmptb/lmptb/layer/select_linear.lua +++ b/nerv/examples/lmptb/lmptb/layer/select_linear.lua @@ -38,7 +38,7 @@ function SL:update(bp_err, input, output) --I tried the update_select_rows kernel which uses atomicAdd, but it generates unreproducable result self.ltp.trans:update_select_rows_by_colidx(bp_err[1], input[1], - self.gconf.lrate / self.gconf.batch_size, 0) - self.ltp.trans:add(self.ltp.trans, self.ltp.trans, 1.0, - self.gconf.lrate * self.gconf.wcost / self.gconf.batch_size) + self.ltp.trans:add(self.ltp.trans, self.ltp.trans, 1.0, - self.gconf.lrate * self.gconf.wcost) end function SL:propagate(input, output) |