diff options
-rw-r--r-- | nerv/layer/affine.lua | 30 | ||||
-rw-r--r-- | nerv/layer/affine_recurrent.lua | 14 |
2 files changed, 22 insertions, 22 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index c24af16..c5084c4 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -19,29 +19,29 @@ end function MatrixParam:update(gradient) local gconf = self.gconf - if (gconf.momentum > 0) then + if gconf.momentum > 0 then self.correction:add(self.correction, gradient, gconf.momentum, 1.0) -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local mmt_gain = 1.0 / (1.0 - gconf.momentum) local n = self.gconf.batch_size * mmt_gain -- perform update - self.trans:add(self.trans, self.correction, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate/n) + self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) else - self.trans:add(self.trans, gradient, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate/gconf.batch_size) + self.trans:add(self.trans, gradient, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) end end function MatrixParam:updateEI(err, input) local gconf = self.gconf - if (gconf.momentum > 0) then + if gconf.momentum > 0 then self.correction:mul(input, err, 1.0, gconf.momentum, 'T', 'N') -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local mmt_gain = 1.0 / (1.0 - gconf.momentum) local n = self.gconf.batch_size * mmt_gain -- perform update - self.trans:add(self.trans, self.correction, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate/n) + self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) else - self.trans:mul(input, err, -gconf.lrate/gconf.batch_size, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, 'T', 'N') + self.trans:mul(input, err, - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') end end @@ -92,20 +92,20 @@ function AffineLayer:batch_resize(batch_size) end function AffineLayer:update(bp_err, input, output) - if (self.direct_update == true) then + if self.direct_update == true then local gconf = self.gconf - if (gconf.momentum > 0) then + if gconf.momentum > 0 then self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1) -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local mmt_gain = 1.0 / (1.0 - gconf.momentum) local n = self.gconf.batch_size * mmt_gain -- perform update - self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate / n) - self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate / n) + self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) else - self.ltp.trans:mul(input[1], bp_err[1], -gconf.lrate / gconf.batch_size, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, 'T', 'N') - self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate / gconf.batch_size) + self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') + self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) end else self.ltp:updateEI(bp_err[1], input[1]) diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua index b465e95..7f9c3f5 100644 --- a/nerv/layer/affine_recurrent.lua +++ b/nerv/layer/affine_recurrent.lua @@ -42,23 +42,23 @@ function Recurrent:batch_resize(batch_size) end function Recurrent:update(bp_err, input, output) - if (self.direct_update == true) then + if self.direct_update == true then local ltp_hh = self.ltp_hh.trans local bp = self.bp.trans local gconf = self.gconf if (gconf.momentum > 0) then -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local mmt_gain = 1.0 / (1.0 - gconf.momentum) local n = input[1]:nrow() * mmt_gain -- update corrections (accumulated errors) self.ltp_hh.correction:mul(input[2], bp_err[1], 1.0, gconf.momentum, 'T', 'N') self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1.0) -- perform update and weight decay - ltp_hh:add(ltp_hh, self.ltp_hh.correction, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate/n) - bp:add(bp, self.bp.correction, 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate/n) + ltp_hh:add(ltp_hh, self.ltp_hh.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + bp:add(bp, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) else - ltp_hh:mul(input[2], bp_err[1], -gconf.lrate/gconf.batch_size, 1.0-gconf.wcost*gconf.lrate/gconf.batch_size, 'T', 'N') - bp:add(bp, bp_err[1]:colsum(), 1.0-gconf.lrate*gconf.wcost/gconf.batch_size, -gconf.lrate/gconf.batch_size) + ltp_hh:mul(input[2], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.wcost * gconf.lrate / gconf.batch_size, 'T', 'N') + bp:add(bp, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) end else --self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') @@ -85,7 +85,7 @@ function Recurrent:back_propagate(bp_err, next_bp_err, input, output) end ]]-- if (self.clip ~= nil) then - next_bp_err[2]:clip(-self.clip, self.clip) + next_bp_err[2]:clip(- self.clip, self.clip) end end |