diff options
Diffstat (limited to 'nerv/examples/lmptb/rnn/layers/gate_fff.lua')
-rw-r--r-- | nerv/examples/lmptb/rnn/layers/gate_fff.lua | 67 |
1 files changed, 67 insertions, 0 deletions
diff --git a/nerv/examples/lmptb/rnn/layers/gate_fff.lua b/nerv/examples/lmptb/rnn/layers/gate_fff.lua new file mode 100644 index 0000000..74e19ce --- /dev/null +++ b/nerv/examples/lmptb/rnn/layers/gate_fff.lua @@ -0,0 +1,67 @@ +local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer') + +function AffineLayer:__init(id, global_conf, layer_conf) + self.id = id + self.ltp = layer_conf.ltp + self.bp = layer_conf.bp + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + self:check_dim_len(1, 1) -- exactly one input and one output +end + +function AffineLayer:init(batch_size) + if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then + nerv.error("mismatching dimensions of linear transform and bias paramter") + end + if self.dim_in[1] ~= self.ltp.trans:nrow() then + nerv.error("mismatching dimensions of linear transform parameter and input") + end + if self.dim_out[1] ~= self.ltp.trans:ncol() then + nerv.error("mismatching dimensions of linear transform parameter and output") + end + self.ltp_grad = self.ltp.trans:create() + self.ltp:train_init() + self.bp:train_init() +end + +function AffineLayer:batch_resize(batch_size) + -- do nothing +end + +function AffineLayer:update(bp_err, input, output) + if self.direct_update == true then + local gconf = self.gconf + if gconf.momentum > 0 then + self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') + self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1) + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = self.gconf.batch_size * mmt_gain + -- perform update + self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + else + self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') + self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) + end + else + self.ltp:update_by_err_input(bp_err[1], input[1]) + self.bp:update_by_gradient(bp_err[1]:colsum()) + end +end + +function AffineLayer:propagate(input, output) + -- apply linear transform + output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') + -- add bias + output[1]:add_row(self.bp.trans, 1.0) +end + +function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) + next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') +end + +function AffineLayer:get_params() + return nerv.ParamRepo({self.ltp, self.bp}) +end |