diff options
Diffstat (limited to 'nerv/layer')
-rw-r--r-- | nerv/layer/affine.lua | 72 | ||||
-rw-r--r-- | nerv/layer/affine_recurrent.lua | 35 |
2 files changed, 72 insertions, 35 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index 015ec3f..3ba9408 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -17,22 +17,49 @@ function MatrixParam:train_init() self.correction:fill(0) end -function MatrixParam:update(gradient) +function MatrixParam:update_by_gradient(gradient) local gconf = self.gconf - self.correction:add(self.correction, gradient, gconf.momentum, 1.0) - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); - local n = self.gconf.batch_size * mmt_gain - -- perform update - self.trans:add(self.trans, self.correction, 1.0, -gconf.lrate / n) + if gconf.momentum > 0 then + self.correction:add(self.correction, gradient, gconf.momentum, 1.0) + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = self.gconf.batch_size * mmt_gain + -- perform update + self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + else + self.trans:add(self.trans, gradient, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) + end +end + +function MatrixParam:update_by_err_input(err, input) + local gconf = self.gconf + if gconf.momentum > 0 then + self.correction:mul(input, err, 1.0, gconf.momentum, 'T', 'N') + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = self.gconf.batch_size * mmt_gain + -- perform update + self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + else + self.trans:mul(input, err, - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') + end end +--[[ --these updates are the same function LinearTransParam:update(gradient) MatrixParam.update(self, gradient) - local gconf = self.gconf + -- local gconf = self.gconf + -- weight decay(put into MatrixParam:update) + -- self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost / gconf.batch_size) +end + +function BiasParam:update(gradient) + MatrixParam.update(self, gradient) + --local gconf = self.gconf -- weight decay - self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost) + -- self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost / gconf.batch_size) end +]]-- function AffineLayer:__init(id, global_conf, layer_conf) self.id = id @@ -65,18 +92,25 @@ function AffineLayer:batch_resize(batch_size) end function AffineLayer:update(bp_err, input, output) - if self.direct_update then - self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); - local n = self.gconf.batch_size * mmt_gain - -- perform update - self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0, -gconf.lrate / n) + if self.direct_update == true then + local gconf = self.gconf + if gconf.momentum > 0 then + self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') + self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1) + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = self.gconf.batch_size * mmt_gain + -- perform update + self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + else + self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') + self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) + end else - self.ltp_grad:mul(input[1], bp_err[1], 1.0, 0.0, 'T', 'N') - self.ltp:update(self.ltp_grad) + self.ltp:update_by_err_input(bp_err[1], input[1]) + self.bp:update_by_gradient(bp_err[1]:colsum()) end - self.bp:update(bp_err[1]:colsum()) end function AffineLayer:propagate(input, output) diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua index 92d98e2..da189e0 100644 --- a/nerv/layer/affine_recurrent.lua +++ b/nerv/layer/affine_recurrent.lua @@ -42,25 +42,28 @@ function Recurrent:batch_resize(batch_size) end function Recurrent:update(bp_err, input, output) - if (self.direct_update == true) then + if self.direct_update == true then local ltp_hh = self.ltp_hh.trans local bp = self.bp.trans local gconf = self.gconf - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); - local n = input[1]:nrow() * mmt_gain - -- update corrections (accumulated errors) - self.ltp_hh.correction:mul(input[2], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - self.bp.correction:add(bc, bp_err[1]:colsum(), gconf.momentum, 1.0) - -- perform update - ltp_hh:add(ltp_hh, self.ltp_hh.correction, 1.0, -gconf.lrate / n) - bp:add(bp, self.bp.correction, 1.0, -gconf.lrate / n) - -- weight decay - ltp_hh:add(ltp_hh, ltp_hh, 1.0, -gconf.lrate * gconf.wcost) + if (gconf.momentum > 0) then + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = input[1]:nrow() * mmt_gain + -- update corrections (accumulated errors) + self.ltp_hh.correction:mul(input[2], bp_err[1], 1.0, gconf.momentum, 'T', 'N') + self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1.0) + -- perform update and weight decay + ltp_hh:add(ltp_hh, self.ltp_hh.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + bp:add(bp, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + else + ltp_hh:mul(input[2], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.wcost * gconf.lrate / gconf.batch_size, 'T', 'N') + bp:add(bp, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) + end else - self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') - self.ltp_hh:update(self.ltp_hh_grad) - self.bp:update(bp_err[1]:colsum()) + --self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') + self.ltp_hh:update_by_err_input(bp_err[1], input[2]) + self.bp:update_by_gradient(bp_err[1]:colsum()) end end @@ -82,7 +85,7 @@ function Recurrent:back_propagate(bp_err, next_bp_err, input, output) end ]]-- if (self.clip ~= nil) then - next_bp_err[2]:clip(-self.clip, self.clip) + next_bp_err[2]:clip(- self.clip, self.clip) end end |