diff options
author | txh18 <[email protected]> | 2015-11-23 14:24:54 +0800 |
---|---|---|
committer | txh18 <[email protected]> | 2015-11-23 14:24:54 +0800 |
commit | 47215f8aed55fe2912391c69cc70b90f85a776a5 (patch) | |
tree | d58c60e1790b10b6500fa64f9ea1141e84d147dc /nerv | |
parent | e7a45e14d75959a3d4095ac34158a8abc3e995cf (diff) |
implementing GateFFF layer
Diffstat (limited to 'nerv')
-rw-r--r-- | nerv/examples/lmptb/rnn/layers/gate_fff.lua | 65 |
1 files changed, 33 insertions, 32 deletions
diff --git a/nerv/examples/lmptb/rnn/layers/gate_fff.lua b/nerv/examples/lmptb/rnn/layers/gate_fff.lua index 6a588fc..1010639 100644 --- a/nerv/examples/lmptb/rnn/layers/gate_fff.lua +++ b/nerv/examples/lmptb/rnn/layers/gate_fff.lua @@ -2,64 +2,65 @@ local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer') function GateFFFLayer:__init(id, global_conf, layer_conf) self.id = id - self.ltp = layer_conf.ltp - self.bp = layer_conf.bp self.dim_in = layer_conf.dim_in self.dim_out = layer_conf.dim_out self.gconf = global_conf - self:check_dim_len(1, 1) -- exactly one input and one output + + self.ltp1 = self:find_param("ltp1", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp + self.ltp2 = self:find_param("ltp2", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp + self.ltp3 = self:find_param("ltp3", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[3], self.dim_out[1]}) --layer_conf.ltp + self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp + + self:check_dim_len(3, 1) -- exactly one input and one output end function GateFFFLayer:init(batch_size) - if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then + if self.ltp1.trans:ncol() ~= self.bp.trans:ncol() or + self.ltp2.trans:ncol() ~= self.bp.trans:ncol() or + self.ltp3.trans:ncol() ~= self.bp.trans:ncol() then nerv.error("mismatching dimensions of linear transform and bias paramter") end - if self.dim_in[1] ~= self.ltp.trans:nrow() then + if self.dim_in[1] ~= self.ltp1.trans:nrow() or + self.dim_in[2] ~= self.ltp2.trans:nrow() or + self.dim_in[3] ~= self.ltp3.trans:nrow() then nerv.error("mismatching dimensions of linear transform parameter and input") end - if self.dim_out[1] ~= self.ltp.trans:ncol() then + if self.dim_out[1] ~= self.ltp1.trans:ncol() then nerv.error("mismatching dimensions of linear transform parameter and output") end - self.ltp_grad = self.ltp.trans:create() - self.ltp:train_init() + self.ltp1:train_init() + self.ltp2:train_init() + self.ltp3:train_init() self.bp:train_init() + self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) end function GateFFFLayer:batch_resize(batch_size) - -- do nothing -end - -function GateFFFLayer:update(bp_err, input, output) - if self.direct_update == true then - local gconf = self.gconf - if gconf.momentum > 0 then - self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1) - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum) - local n = self.gconf.batch_size * mmt_gain - -- perform update - self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) - self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) - else - self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') - self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) - end - else - self.ltp:update_by_err_input(bp_err[1], input[1]) - self.bp:update_by_gradient(bp_err[1]:colsum()) + if self.err_m:nrow() ~= batch_size then + self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) end end function GateFFFLayer:propagate(input, output) -- apply linear transform - output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') + output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') + output[1]:mul(input[2], self.ltp2.trans, 1.0, 1.0, 'N', 'N') + output[1]:mul(input[3], self.ltp3.trans, 1.0, 1.0, 'N', 'N') -- add bias output[1]:add_row(self.bp.trans, 1.0) + output[1]:sigmoid(output[1]) end function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output) - next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') + self.err_bakm:sigmoid_grad(bp_err[1], output[1]) + next_bp_err[1]:mul(self.err_bakm, self.ltp1.trans, 1.0, 0.0, 'N', 'T') + next_bp_err[2]:mul(self.err_bakm, self.ltp2.trans, 1.0, 0.0, 'N', 'T') + next_bp_err[3]:mul(self.err_bakm, self.ltp3.trans, 1.0, 0.0, 'N', 'T') +end + +function GateFFFLayer:update(bp_err, input, output) + self.ltp:update_by_err_input(bp_err[1], input[1]) + self.bp:update_by_gradient(bp_err[1]:colsum()) end function GateFFFLayer:get_params() |