diff options
author | txh18 <[email protected]> | 2015-11-24 22:06:45 +0800 |
---|---|---|
committer | txh18 <[email protected]> | 2015-11-24 22:06:45 +0800 |
commit | 8e590ba284bfee414659f1845e175b41cac05d45 (patch) | |
tree | a812e760e3631263c18144c7c6bb4f7a332732af | |
parent | 914a026734db6608e04987e9fcec9c82612e8673 (diff) |
let affine supported multiple inputs
-rw-r--r-- | nerv/examples/lmptb/tnn/init.lua | 1 | ||||
-rw-r--r-- | nerv/examples/lmptb/tnn/layers/elem_mul.lua | 38 | ||||
-rw-r--r-- | nerv/examples/lmptb/tnn/layersT/lstm.lua | 56 | ||||
-rw-r--r-- | nerv/layer/affine.lua | 36 |
4 files changed, 123 insertions, 8 deletions
diff --git a/nerv/examples/lmptb/tnn/init.lua b/nerv/examples/lmptb/tnn/init.lua index a069527..a7a377e 100644 --- a/nerv/examples/lmptb/tnn/init.lua +++ b/nerv/examples/lmptb/tnn/init.lua @@ -43,5 +43,6 @@ end nerv.include('tnn.lua') nerv.include('layersT/softmax_ce_t.lua') +nerv.include('layers/elem_mul.lua') nerv.include('layers/gate_fff.lua') nerv.include('layer_dag_t.lua') diff --git a/nerv/examples/lmptb/tnn/layers/elem_mul.lua b/nerv/examples/lmptb/tnn/layers/elem_mul.lua new file mode 100644 index 0000000..c809d3e --- /dev/null +++ b/nerv/examples/lmptb/tnn/layers/elem_mul.lua @@ -0,0 +1,38 @@ +local ElemMulLayer = nerv.class('nerv.ElemMulLayer', 'nerv.Layer') + +function ElemMulLayer:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + self:check_dim_len(2, 1) -- Element-multiply input[1] and input[2] +end + +function ElemMulLayer:init(batch_size) + if self.dim_in[1] ~= self.dim_in[2] or + self.dim_in[1] ~= self.dim_out[1] then + nerv.error("dim_in and dim_out mismatch for ElemMulLayer") + end +end + +function ElemMulLayer:batch_resize(batch_size) + --do nothing +end + +function ElemMulLayer:propagate(input, output) + output[1]:mul_elem(input[1], input[2]) +end + +function ElemMulLayer:back_propagate(bp_err, next_bp_err, input, output) + next_bp_err[1]:mul_elem(bp_err[1], input[2]) + next_bp_err[2]:mul_elem(bp_err[1], input[1]) +end + +function ElemMulLayer:update(bp_err, input, output) + --do nothing +end + +function ElemMulLayer:get_params() + return nerv.ParamRepo({}) +end diff --git a/nerv/examples/lmptb/tnn/layersT/lstm.lua b/nerv/examples/lmptb/tnn/layersT/lstm.lua new file mode 100644 index 0000000..0da1f38 --- /dev/null +++ b/nerv/examples/lmptb/tnn/layersT/lstm.lua @@ -0,0 +1,56 @@ +local LSTMLayerT = nerv.class('nerv.LSTMLayerT', 'nerv.LayerT') + +function LSTMLayerT:__init(id, global_conf, layer_conf) + self.id = id + self.dim_in = layer_conf.dim_in + self.dim_out = layer_conf.dim_out + self.gconf = global_conf + + --prepare a DAGLayerT to hold the lstm structure + local paramRepo = nerv.ParamRepo() + local layers = { + ["nerv.IndRecurrentLayer"] = { + ["recurrentL1"] = recurrentLconfig, + }} + + self:check_dim_len(1, 1) -- exactly one input and one output +end + +function LSTMLayerT:init(batch_size) + if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then + nerv.error("mismatching dimensions of linear transform and bias paramter") + end + if self.dim_in[1] ~= self.ltp.trans:nrow() then + nerv.error("mismatching dimensions of linear transform parameter and input") + end + if self.dim_out[1] ~= self.ltp.trans:ncol() then + nerv.error("mismatching dimensions of linear transform parameter and output") + end + self.ltp_grad = self.ltp.trans:create() + self.ltp:train_init() + self.bp:train_init() +end + +function LSTMLayerT:batch_resize(batch_size) + -- do nothing +end + +function AffineLayer:update(bp_err, input, output) + self.ltp:update_by_err_input(bp_err[1], input[1]) + self.bp:update_by_gradient(bp_err[1]:colsum()) +end + +function AffineLayer:propagate(input, output) + -- apply linear transform + output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') + -- add bias + output[1]:add_row(self.bp.trans, 1.0) +end + +function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) + next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') +end + +function AffineLayer:get_params() + return nerv.ParamRepo({self.ltp, self.bp}) +end diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index e24a0c6..d56fcb8 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -64,25 +64,35 @@ function AffineLayer:__init(id, global_conf, layer_conf) self.dim_in = layer_conf.dim_in self.dim_out = layer_conf.dim_out self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp - self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp + for i = 2, #self.dim_in do + self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) + end + self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp self.gconf = global_conf - self:check_dim_len(1, 1) -- exactly one input and one output - -- self.direct_update = layer_conf.direct_update or global_conf.direct_update + self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs end function AffineLayer:init(batch_size) if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then nerv.error("mismatching dimensions of linear transform and bias paramter") end + self.bp:train_init() if self.dim_in[1] ~= self.ltp.trans:nrow() then nerv.error("mismatching dimensions of linear transform parameter and input") end if self.dim_out[1] ~= self.ltp.trans:ncol() then nerv.error("mismatching dimensions of linear transform parameter and output") end - self.ltp_grad = self.ltp.trans:create() self.ltp:train_init() - self.bp:train_init() + for i = 2, #self.dim_in do + if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then + nerv.error("mismatching dimensions of linear transform parameter and input") + end + if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then + nerv.error("mismatching dimensions of linear transform parameter and output") + end + self["ltp" .. i]:train_init() + end end function AffineLayer:batch_resize(batch_size) @@ -91,20 +101,30 @@ end function AffineLayer:update(bp_err, input, output) self.ltp:update_by_err_input(bp_err[1], input[1]) + for i = 2, #self.dim_in do + self["ltp" .. i]:update_by_err_input(bp_err[1], input[i]) + end self.bp:update_by_gradient(bp_err[1]:colsum()) end function AffineLayer:propagate(input, output) - -- apply linear transform output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') - -- add bias + for i = 2, #self.dim_in do + output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') + end output[1]:add_row(self.bp.trans, 1.0) end function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') + for i = 2, #self.dim_in do + next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') + end end function AffineLayer:get_params() - return nerv.ParamRepo({self.ltp, self.bp}) + local pr = nerv.ParamRepo({self.ltp, self.bp}) + for i = 2, #self.dim_in do + pr:add(self["ltp" .. i].id, self["ltp" .. i]) + end end |