diff options
author | txh18 <[email protected]> | 2015-11-16 23:21:43 +0800 |
---|---|---|
committer | txh18 <[email protected]> | 2015-11-16 23:21:43 +0800 |
commit | cbcce5ecc2864872b411eebbd307fa0f9a7e9dc0 (patch) | |
tree | 8670676196b9c8a70d88ba17aa9b4f6f50309a62 | |
parent | a766983d167c5eb700ff9aaf0ba7e1c4e97a9cf3 (diff) |
change updateEI to update_by_err_input
-rw-r--r-- | nerv/layer/affine.lua | 8 | ||||
-rw-r--r-- | nerv/layer/affine_recurrent.lua | 4 |
2 files changed, 6 insertions, 6 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index c5084c4..3ba9408 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -17,7 +17,7 @@ function MatrixParam:train_init() self.correction:fill(0) end -function MatrixParam:update(gradient) +function MatrixParam:update_by_gradient(gradient) local gconf = self.gconf if gconf.momentum > 0 then self.correction:add(self.correction, gradient, gconf.momentum, 1.0) @@ -31,7 +31,7 @@ function MatrixParam:update(gradient) end end -function MatrixParam:updateEI(err, input) +function MatrixParam:update_by_err_input(err, input) local gconf = self.gconf if gconf.momentum > 0 then self.correction:mul(input, err, 1.0, gconf.momentum, 'T', 'N') @@ -108,8 +108,8 @@ function AffineLayer:update(bp_err, input, output) self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) end else - self.ltp:updateEI(bp_err[1], input[1]) - self.bp:update(bp_err[1]:colsum()) + self.ltp:update_by_err_input(bp_err[1], input[1]) + self.bp:update_by_gradient(bp_err[1]:colsum()) end end diff --git a/nerv/layer/affine_recurrent.lua b/nerv/layer/affine_recurrent.lua index 7f9c3f5..da189e0 100644 --- a/nerv/layer/affine_recurrent.lua +++ b/nerv/layer/affine_recurrent.lua @@ -62,8 +62,8 @@ function Recurrent:update(bp_err, input, output) end else --self.ltp_hh_grad:mul(input[2], bp_err[1], 1.0, 0.0, 'T', 'N') - self.ltp_hh:updateEI(bp_err[1], input[2]) - self.bp:update(bp_err[1]:colsum()) + self.ltp_hh:update_by_err_input(bp_err[1], input[2]) + self.bp:update_by_gradient(bp_err[1]:colsum()) end end |