diff options
Diffstat (limited to 'nerv/examples/lmptb/tnn/layers/gate_fff.lua')
-rw-r--r-- | nerv/examples/lmptb/tnn/layers/gate_fff.lua | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/nerv/examples/lmptb/tnn/layers/gate_fff.lua b/nerv/examples/lmptb/tnn/layers/gate_fff.lua new file mode 100644 index 0000000..751dde1 --- /dev/null +++ b/nerv/examples/lmptb/tnn/layers/gate_fff.lua @@ -0,0 +1,71 @@ +local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer') + +function GateFFFLayer:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + self.ltp1 = self:find_param("ltp1", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp + self.ltp2 = self:find_param("ltp2", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp + self.ltp3 = self:find_param("ltp3", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[3], self.dim_out[1]}) --layer_conf.ltp + self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp + + self:check_dim_len(3, 1) -- exactly one input and one output +end + +function GateFFFLayer:init(batch_size) + if self.ltp1.trans:ncol() ~= self.bp.trans:ncol() or + self.ltp2.trans:ncol() ~= self.bp.trans:ncol() or + self.ltp3.trans:ncol() ~= self.bp.trans:ncol() then + nerv.error("mismatching dimensions of linear transform and bias paramter") + end + if self.dim_in[1] ~= self.ltp1.trans:nrow() or + self.dim_in[2] ~= self.ltp2.trans:nrow() or + self.dim_in[3] ~= self.ltp3.trans:nrow() then + nerv.error("mismatching dimensions of linear transform parameter and input") + end + if self.dim_out[1] ~= self.ltp1.trans:ncol() then + nerv.error("mismatching dimensions of linear transform parameter and output") + end + self.ltp1:train_init() + self.ltp2:train_init() + self.ltp3:train_init() + self.bp:train_init() + self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) +end + +function GateFFFLayer:batch_resize(batch_size) + if self.err_m:nrow() ~= batch_size then + self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1]) + end +end + +function GateFFFLayer:propagate(input, output) + -- apply linear transform + output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') + output[1]:mul(input[2], self.ltp2.trans, 1.0, 1.0, 'N', 'N') + output[1]:mul(input[3], self.ltp3.trans, 1.0, 1.0, 'N', 'N') + -- add bias + output[1]:add_row(self.bp.trans, 1.0) + output[1]:sigmoid(output[1]) +end + +function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output) + self.err_bakm:sigmoid_grad(bp_err[1], output[1]) + next_bp_err[1]:mul(self.err_bakm, self.ltp1.trans, 1.0, 0.0, 'N', 'T') + next_bp_err[2]:mul(self.err_bakm, self.ltp2.trans, 1.0, 0.0, 'N', 'T') + next_bp_err[3]:mul(self.err_bakm, self.ltp3.trans, 1.0, 0.0, 'N', 'T') +end + +function GateFFFLayer:update(bp_err, input, output) + self.err_bakm:sigmoid_grad(bp_err[1], output[1]) + self.ltp1:update_by_err_input(self.err_bakm, input[1]) + self.ltp2:update_by_err_input(self.err_bakm, input[2]) + self.ltp3:update_by_err_input(self.err_bakm, input[3]) + self.bp:update_by_gradient(self.err_bakm:colsum()) +end + +function GateFFFLayer:get_params() + return nerv.ParamRepo({self.ltp1, self.ltp2, self.ltp3, self.bp}) +end |