diff options
Diffstat (limited to 'nerv/layer')
-rw-r--r-- | nerv/layer/affine.lua | 75 | ||||
-rw-r--r-- | nerv/layer/init.lua | 2 | ||||
-rw-r--r-- | nerv/layer/lstm.lua | 55 | ||||
-rw-r--r-- | nerv/layer/lstm_gate.lua | 97 | ||||
-rw-r--r-- | nerv/layer/lstmp.lua | 61 | ||||
-rw-r--r-- | nerv/layer/projection.lua | 70 | ||||
-rw-r--r-- | nerv/layer/rnn.lua | 15 |
7 files changed, 131 insertions, 244 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index 16250fd..b68cf3d 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -48,6 +48,10 @@ function MatrixParam:_update(alpha, beta) -- momentum gain local mmt_gain = 1.0 / (1.0 - gconf.momentum) local n = gconf.batch_size * mmt_gain + -- clip gradient + if gconf.clip then + self.correction_acc:clip(-gconf.clip, gconf.clip) + end -- perform update if gconf.momentum > 0 then self.correction:add(self.correction, self.correction_acc, gconf.momentum, 1.0) @@ -87,7 +91,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer') -- @param global_conf see `self.gconf` of `nerv.Layer.__init` -- @param layer_conf a table providing with settings dedicated for the layer, -- for `layer_conf` fields that are shared by all layers, see --- `nerv.Layer.__init`. The affine layer requires parameters to be bound, the +-- `nerv.Layer.__init`. This fields can be specified: +-- * `activation`: the type of the activation function layer, also known as \sigma in \sigma(Wx + b). The activation function layer must gurantee not use parameter `input` in its `back_propagate` function. Default value none (no activation function). +-- * `no_bias`: a bool value indicates use bias parameter or not. Default value false. +-- * `param_type`: a string table has the same length with `dim_in`, indicates the parameter type for every input. 'D' for diagonal weight matrix, 'N' for normal weight matrix. Default 'N' for every input. +-- The affine layer requires parameters to be bound, the -- following parameter names will be looked up while binding: -- -- * `ltp`: the linear transformation parameter, also known as the weight matrix, W in Wx + b @@ -96,6 +104,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer') function AffineLayer:__init(id, global_conf, layer_conf) nerv.Layer.__init(self, id, global_conf, layer_conf) self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs + self.param_type = layer_conf.param_type or table.vector(#self.dim_in, 'N') + if layer_conf.activation then + self.activation = layer_conf.activation('', global_conf, {dim_in = {self.dim_out[1]}, dim_out = {self.dim_out[1]}}) + end + self.no_bias = layer_conf.no_bias self:bind_params() end @@ -108,24 +121,29 @@ function AffineLayer:bind_params() self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) + if self.param_type[i] == 'D' then + self['ltp' .. i].trans:diagonalize() + end local no_update = lconf["no_update_ltp" .. i] if (no_update ~= nil) and no_update or lconf.no_update_all then self["ltp" .. i].no_update = true end end self.ltp = self.ltp1 -- alias of ltp1 - self.bp = self:find_param("bp", lconf, self.gconf, - nerv.BiasParam, - {1, self.dim_out[1]}, - nerv.Param.gen_zero) - local no_update = lconf["no_update_bp"] - if (no_update ~= nil) and no_update or lconf.no_update_all then - self.bp.no_update = true + if not self.no_bias then + self.bp = self:find_param("bp", lconf, self.gconf, + nerv.BiasParam, + {1, self.dim_out[1]}, + nerv.Param.gen_zero) + local no_update = lconf["no_update_bp"] + if (no_update ~= nil) and no_update or lconf.no_update_all then + self.bp.no_update = true + end end end function AffineLayer:init(batch_size) - if self.dim_out[1] ~= self.bp.trans:ncol() then + if not self.no_bias and self.dim_out[1] ~= self.bp.trans:ncol() then nerv.error("mismatching dimensions of linear transform and bias paramter") end for i = 1, #self.dim_in do @@ -137,7 +155,13 @@ function AffineLayer:init(batch_size) end self["ltp" .. i]:train_init() end - self.bp:train_init() + if not self.no_bias then + self.bp:train_init() + end + if self.activation then + self.bak_mat = self.mat_type(batch_size, self.dim_out[1]) + self.bak_mat:fill(0) + end end function AffineLayer:batch_resize(batch_size) @@ -147,26 +171,43 @@ end function AffineLayer:update() for i = 1, #self.dim_in do self["ltp" .. i]:update_by_err_input() + if self.param_type[i] == 'D' then + self['ltp' .. i].trans:diagonalize() + end + end + if not self.no_bias then + self.bp:update_by_gradient() end - self.bp:update_by_gradient() end function AffineLayer:propagate(input, output) + local result = self.activation and self.bak_mat or output[1] -- apply linear transform - output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') + result:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') for i = 2, #self.dim_in do - output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') + result:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') end -- add bias - output[1]:add_row(self.bp.trans, 1.0) + if not self.no_bias then + result:add_row(self.bp.trans, 1.0) + end + if self.activation then + self.activation:propagate({result}, output) + end end function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) + local result = self.activation and self.bak_mat or bp_err[1] + if self.activation then + self.activation:back_propagate(bp_err, {result}, {result}, output) + end for i = 1, #self.dim_in do - next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') - self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i]) + next_bp_err[i]:mul(result, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') + self["ltp" .. i]:back_propagate_by_err_input(result, input[i]) + end + if not self.no_bias then + self.bp:back_propagate_by_gradient(result:colsum()) end - self.bp:back_propagate_by_gradient(bp_err[1]:colsum()) end function AffineLayer:get_params() diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua index d175d02..054784b 100644 --- a/nerv/layer/init.lua +++ b/nerv/layer/init.lua @@ -272,13 +272,11 @@ nerv.include('combiner.lua') nerv.include('softmax.lua') nerv.include('elem_mul.lua') nerv.include('lstm.lua') -nerv.include('lstm_gate.lua') nerv.include('dropout.lua') nerv.include('gru.lua') nerv.include('rnn.lua') nerv.include('duplicate.lua') nerv.include('identity.lua') -nerv.include('projection.lua') nerv.include('lstmp.lua') nerv.include('relu.lua') diff --git a/nerv/layer/lstm.lua b/nerv/layer/lstm.lua index 3de3453..5d73ad2 100644 --- a/nerv/layer/lstm.lua +++ b/nerv/layer/lstm.lua @@ -2,9 +2,12 @@ local LSTMLayer = nerv.class('nerv.LSTMLayer', 'nerv.GraphLayer') function LSTMLayer:__init(id, global_conf, layer_conf) nerv.Layer.__init(self, id, global_conf, layer_conf) - self:check_dim_len(1, 1) + self:check_dim_len(-1, 1) + if #self.dim_in == 0 then + nerv.error('LSTM layer %s has no input', self.id) + end - local din = layer_conf.dim_in[1] + local din = layer_conf.dim_in local dout = layer_conf.dim_out[1] local pr = layer_conf.pr @@ -17,48 +20,51 @@ function LSTMLayer:__init(id, global_conf, layer_conf) mainCombine = {dim_in = {dout, dout}, dim_out = {dout}, lambda = {1, 1}}, }, ['nerv.DuplicateLayer'] = { - inputDup = {dim_in = {din}, dim_out = {din, din, din, din}}, outputDup = {dim_in = {dout}, dim_out = {dout, dout, dout, dout, dout}}, cellDup = {dim_in = {dout}, dim_out = {dout, dout, dout, dout, dout}}, }, ['nerv.AffineLayer'] = { - mainAffine = {dim_in = {din, dout}, dim_out = {dout}, pr = pr}, + mainAffine = {dim_in = table.connect({dout}, din), dim_out = {dout}, pr = pr}, + forgetGate = {dim_in = table.connect({dout, dout}, din), dim_out = {dout}, + param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer}, + inputGate = {dim_in = table.connect({dout, dout}, din), dim_out = {dout}, + param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer}, + outputGate = {dim_in = table.connect({dout, dout}, din), dim_out = {dout}, + param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer}, }, ['nerv.TanhLayer'] = { mainTanh = {dim_in = {dout}, dim_out = {dout}}, outputTanh = {dim_in = {dout}, dim_out = {dout}}, }, - ['nerv.LSTMGateLayer'] = { - forgetGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr}, - inputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr}, - outputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr}, - }, ['nerv.ElemMulLayer'] = { inputGateMul = {dim_in = {dout, dout}, dim_out = {dout}}, forgetGateMul = {dim_in = {dout, dout}, dim_out = {dout}}, outputGateMul = {dim_in = {dout, dout}, dim_out = {dout}}, }, } + for i = 1, #din do + layers['nerv.DuplicateLayer']['inputDup' .. i] = {dim_in = {din[i]}, dim_out = {din[i], din[i], din[i], din[i]}} + end local connections = { -- lstm input - {'<input>[1]', 'inputDup[1]', 0}, + --{'<input>[1 .. n]', 'inputDup(1 .. n)[1]', 0}, -- input gate - {'inputDup[1]', 'inputGate[1]', 0}, - {'outputDup[1]', 'inputGate[2]', 1}, - {'cellDup[1]', 'inputGate[3]', 1}, + {'outputDup[1]', 'inputGate[1]', 1}, + {'cellDup[1]', 'inputGate[2]', 1}, + --{'inputDup(1 .. n)[1]', 'inputGate[3 .. n + 2]', 0}, -- forget gate - {'inputDup[2]', 'forgetGate[1]', 0}, - {'outputDup[2]', 'forgetGate[2]', 1}, - {'cellDup[2]', 'forgetGate[3]', 1}, + {'outputDup[2]', 'forgetGate[1]', 1}, + {'cellDup[2]', 'forgetGate[2]', 1}, + --{'inputDup(1 .. n)[2]', 'forgetGate[3 .. n + 2]', 0}, -- lstm cell {'forgetGate[1]', 'forgetGateMul[1]', 0}, {'cellDup[3]', 'forgetGateMul[2]', 1}, - {'inputDup[3]', 'mainAffine[1]', 0}, - {'outputDup[3]', 'mainAffine[2]', 1}, + {'outputDup[3]', 'mainAffine[1]', 1}, + --{'inputDup(1 .. n)[3]', 'mainAffine[2 .. n + 1]', 0}, {'mainAffine[1]', 'mainTanh[1]', 0}, {'inputGate[1]', 'inputGateMul[1]', 0}, {'mainTanh[1]', 'inputGateMul[2]', 0}, @@ -67,9 +73,9 @@ function LSTMLayer:__init(id, global_conf, layer_conf) {'mainCombine[1]', 'cellDup[1]', 0}, -- forget gate - {'inputDup[4]', 'outputGate[1]', 0}, - {'outputDup[4]', 'outputGate[2]', 1}, - {'cellDup[4]', 'outputGate[3]', 0}, + {'outputDup[4]', 'outputGate[1]', 1}, + {'cellDup[4]', 'outputGate[2]', 0}, + --{'inputDup(1 .. n)[4]', 'outputGate[2 .. n + 1]', 0}, -- lstm output {'cellDup[5]', 'outputTanh[1]', 0}, @@ -78,6 +84,13 @@ function LSTMLayer:__init(id, global_conf, layer_conf) {'outputGateMul[1]', 'outputDup[1]', 0}, {'outputDup[5]', '<output>[1]', 0}, } + for i = 1, #din do + table.insert(connections, {'<input>[' .. i .. ']', 'inputDup' .. i .. '[1]', 0}) + table.insert(connections, {'inputDup' .. i .. '[1]', 'inputGate[' .. (i + 2) .. ']', 0}) + table.insert(connections, {'inputDup' .. i .. '[2]', 'forgetGate[' .. (i + 2) .. ']', 0}) + table.insert(connections, {'inputDup' .. i .. '[3]', 'mainAffine[' .. (i + 1) .. ']', 0}) + table.insert(connections, {'inputDup' .. i .. '[4]', 'outputGate[' .. (i + 2) .. ']', 0}) + end self:add_prefix(layers, connections) local layer_repo = nerv.LayerRepo(layers, pr, global_conf) diff --git a/nerv/layer/lstm_gate.lua b/nerv/layer/lstm_gate.lua deleted file mode 100644 index 39a3ff7..0000000 --- a/nerv/layer/lstm_gate.lua +++ /dev/null @@ -1,97 +0,0 @@ -local LSTMGateLayer = nerv.class('nerv.LSTMGateLayer', 'nerv.Layer') --- NOTE: this is a full matrix gate - -function LSTMGateLayer:__init(id, global_conf, layer_conf) - nerv.Layer.__init(self, id, global_conf, layer_conf) - self.param_type = layer_conf.param_type - self:check_dim_len(-1, 1) --accept multiple inputs - self:bind_params() -end - -function LSTMGateLayer:bind_params() - local lconf = self.lconf - lconf.no_update_ltp1 = lconf.no_update_ltp1 or lconf.no_update_ltp - for i = 1, #self.dim_in do - local pid = "ltp" .. i - local pid_list = i == 1 and {pid, "ltp"} or pid - self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf, - nerv.LinearTransParam, - {self.dim_in[i], self.dim_out[1]}) - if self.param_type[i] == 'D' then - self["ltp" .. i].trans:diagonalize() - end - local no_update = lconf["no_update_ltp" .. i] - if (no_update ~= nil) and no_update or lconf.no_update_all then - self["ltp" .. i].no_update = true - end - end - self.ltp = self.ltp1 -- alias of ltp1 - self.bp = self:find_param("bp", lconf, self.gconf, - nerv.BiasParam, {1, self.dim_out[1]}, - nerv.Param.gen_zero) - local no_update = lconf["no_update_bp"] - if (no_update ~= nil) and no_update or lconf.no_update_all then - self.bp.no_update = true - end -end - -function LSTMGateLayer:init(batch_size) - if self.dim_out[1] ~= self.bp.trans:ncol() then - nerv.error("mismatching dimensions of linear transform and bias paramter") - end - for i = 1, #self.dim_in do - if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then - nerv.error("mismatching dimensions of linear transform parameter and input") - end - if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then - nerv.error("mismatching dimensions of linear transform parameter and output") - end - self["ltp" .. i]:train_init() - end - self.bp:train_init() - self.err_bakm = self.mat_type(batch_size, self.dim_out[1]) -end - -function LSTMGateLayer:batch_resize(batch_size) - if self.err_m:nrow() ~= batch_size then - self.err_bakm = self.mat_type(batch_size, self.dim_out[1]) - end -end - -function LSTMGateLayer:propagate(input, output) - -- apply linear transform - output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') - for i = 2, #self.dim_in do - output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') - end - -- add bias - output[1]:add_row(self.bp.trans, 1.0) - output[1]:sigmoid(output[1]) -end - -function LSTMGateLayer:back_propagate(bp_err, next_bp_err, input, output) - self.err_bakm:sigmoid_grad(bp_err[1], output[1]) - for i = 1, #self.dim_in do - next_bp_err[i]:mul(self.err_bakm, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') - self["ltp" .. i]:back_propagate_by_err_input(self.err_bakm, input[i]) - end - self.bp:back_propagate_by_gradient(self.err_bakm:colsum()) -end - -function LSTMGateLayer:update() - for i = 1, #self.dim_in do - self["ltp" .. i]:update_by_err_input() - if self.param_type[i] == 'D' then - self["ltp" .. i].trans:diagonalize() - end - end - self.bp:update_by_gradient() -end - -function LSTMGateLayer:get_params() - local pr = nerv.ParamRepo({self.bp}, self.loc_type) - for i = 1, #self.dim_in do - pr:add(self["ltp" .. i]) - end - return pr -end diff --git a/nerv/layer/lstmp.lua b/nerv/layer/lstmp.lua index bbb2091..49c9516 100644 --- a/nerv/layer/lstmp.lua +++ b/nerv/layer/lstmp.lua @@ -2,9 +2,12 @@ local LSTMPLayer = nerv.class('nerv.LSTMPLayer', 'nerv.GraphLayer') function LSTMPLayer:__init(id, global_conf, layer_conf) nerv.Layer.__init(self, id, global_conf, layer_conf) - self:check_dim_len(1, 1) + self:check_dim_len(-1, 1) + if #self.dim_in == 0 then + nerv.error('LSTMP layer %s has no input', self.id) + end - local din = layer_conf.dim_in[1] + local din = layer_conf.dim_in local dcell = layer_conf.cell_dim local dout = layer_conf.dim_out[1] @@ -18,51 +21,52 @@ function LSTMPLayer:__init(id, global_conf, layer_conf) mainCombine = {dim_in = {dcell, dcell}, dim_out = {dcell}, lambda = {1, 1}}, }, ['nerv.DuplicateLayer'] = { - inputDup = {dim_in = {din}, dim_out = {din, din, din, din}}, outputDup = {dim_in = {dout}, dim_out = {dout, dout, dout, dout, dout}}, cellDup = {dim_in = {dcell}, dim_out = {dcell, dcell, dcell, dcell, dcell}}, }, ['nerv.AffineLayer'] = { - mainAffine = {dim_in = {din, dout}, dim_out = {dcell}, pr = pr}, + mainAffine = {dim_in = table.connect({dout}, din), dim_out = {dcell}, pr = pr}, + forgetGate = {dim_in = table.connect({dout, dcell}, din), dim_out = {dcell}, + param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer}, + inputGate = {dim_in = table.connect({dout, dcell}, din), dim_out = {dcell}, + param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer}, + outputGate = {dim_in = table.connect({dout, dcell}, din), dim_out = {dcell}, + param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer}, + projection = {dim_in = {dcell}, dim_out = {dout}, pr = pr, no_bias = true}, }, ['nerv.TanhLayer'] = { mainTanh = {dim_in = {dcell}, dim_out = {dcell}}, outputTanh = {dim_in = {dcell}, dim_out = {dcell}}, }, - ['nerv.LSTMGateLayer'] = { - forgetGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr}, - inputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr}, - outputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr}, - }, ['nerv.ElemMulLayer'] = { inputGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}}, forgetGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}}, outputGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}}, }, - ['nerv.ProjectionLayer'] = { - projection = {dim_in = {dcell}, dim_out = {dout}, pr = pr}, - }, } - + for i = 1, #din do + layers['nerv.DuplicateLayer']['inputDup' .. i] = {dim_in = {din[i]}, dim_out = {din[i], din[i], din[i], din[i]}} + end + local connections = { -- lstm input - {'<input>[1]', 'inputDup[1]', 0}, + --{'<input>[1 .. n]', 'inputDup(1 .. n)[1]', 0}, -- input gate - {'inputDup[1]', 'inputGate[1]', 0}, - {'outputDup[1]', 'inputGate[2]', 1}, - {'cellDup[1]', 'inputGate[3]', 1}, + {'outputDup[1]', 'inputGate[1]', 1}, + {'cellDup[1]', 'inputGate[2]', 1}, + --{'inputDup(1 .. n)[1]', 'inputGate[3 .. n + 2]', 0}, -- forget gate - {'inputDup[2]', 'forgetGate[1]', 0}, - {'outputDup[2]', 'forgetGate[2]', 1}, - {'cellDup[2]', 'forgetGate[3]', 1}, + {'outputDup[2]', 'forgetGate[1]', 1}, + {'cellDup[2]', 'forgetGate[2]', 1}, + --{'inputDup(1 .. n)[2]', 'forgetGate[3 .. n + 2]', 0}, -- lstm cell {'forgetGate[1]', 'forgetGateMul[1]', 0}, {'cellDup[3]', 'forgetGateMul[2]', 1}, - {'inputDup[3]', 'mainAffine[1]', 0}, - {'outputDup[3]', 'mainAffine[2]', 1}, + {'outputDup[3]', 'mainAffine[1]', 1}, + --{'inputDup(1 .. n)[3]', 'mainAffine[2 .. n + 1]', 0}, {'mainAffine[1]', 'mainTanh[1]', 0}, {'inputGate[1]', 'inputGateMul[1]', 0}, {'mainTanh[1]', 'inputGateMul[2]', 0}, @@ -71,9 +75,9 @@ function LSTMPLayer:__init(id, global_conf, layer_conf) {'mainCombine[1]', 'cellDup[1]', 0}, -- forget gate - {'inputDup[4]', 'outputGate[1]', 0}, - {'outputDup[4]', 'outputGate[2]', 1}, - {'cellDup[4]', 'outputGate[3]', 0}, + {'outputDup[4]', 'outputGate[1]', 1}, + {'cellDup[4]', 'outputGate[2]', 0}, + --{'inputDup(1 .. n)[4]', 'outputGate[2 .. n + 1]', 0}, -- lstm output {'cellDup[5]', 'outputTanh[1]', 0}, @@ -83,6 +87,13 @@ function LSTMPLayer:__init(id, global_conf, layer_conf) {'projection[1]', 'outputDup[1]', 0}, {'outputDup[5]', '<output>[1]', 0}, } + for i = 1, #din do + table.insert(connections, {'<input>[' .. i .. ']', 'inputDup' .. i .. '[1]', 0}) + table.insert(connections, {'inputDup' .. i .. '[1]', 'inputGate[' .. (i + 2) .. ']', 0}) + table.insert(connections, {'inputDup' .. i .. '[2]', 'forgetGate[' .. (i + 2) .. ']', 0}) + table.insert(connections, {'inputDup' .. i .. '[3]', 'mainAffine[' .. (i + 1) .. ']', 0}) + table.insert(connections, {'inputDup' .. i .. '[4]', 'outputGate[' .. (i + 2) .. ']', 0}) + end self:add_prefix(layers, connections) local layer_repo = nerv.LayerRepo(layers, pr, global_conf) diff --git a/nerv/layer/projection.lua b/nerv/layer/projection.lua deleted file mode 100644 index 077125b..0000000 --- a/nerv/layer/projection.lua +++ /dev/null @@ -1,70 +0,0 @@ -local ProjectionLayer = nerv.class('nerv.ProjectionLayer', 'nerv.Layer') - ---- The constructor. -function ProjectionLayer:__init(id, global_conf, layer_conf) - nerv.Layer.__init(self, id, global_conf, layer_conf) - self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs - self:bind_params() -end - -function ProjectionLayer:bind_params() - local lconf = self.lconf - lconf.no_update_ltp1 = lconf.no_update_ltp1 or lconf.no_update_ltp - for i = 1, #self.dim_in do - local pid = "ltp" .. i - local pid_list = i == 1 and {pid, "ltp"} or pid - self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf, - nerv.LinearTransParam, - {self.dim_in[i], self.dim_out[1]}) - local no_update = lconf["no_update_ltp" .. i] - if (no_update ~= nil) and no_update or lconf.no_update_all then - self["ltp" .. i].no_update = true - end - end - self.ltp = self.ltp1 -- alias of ltp1 -end - -function ProjectionLayer:init(batch_size) - for i = 1, #self.dim_in do - if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then - nerv.error("mismatching dimensions of linear transform parameter and input") - end - if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then - nerv.error("mismatching dimensions of linear transform parameter and output") - end - self["ltp" .. i]:train_init() - end -end - -function ProjectionLayer:batch_resize(batch_size) - -- do nothing -end - -function ProjectionLayer:update() - for i = 1, #self.dim_in do - self["ltp" .. i]:update_by_err_input() - end -end - -function ProjectionLayer:propagate(input, output) - -- apply linear transform - output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') - for i = 2, #self.dim_in do - output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') - end -end - -function ProjectionLayer:back_propagate(bp_err, next_bp_err, input, output) - for i = 1, #self.dim_in do - next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') - self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i]) - end -end - -function ProjectionLayer:get_params() - local pr = nerv.ParamRepo({self.ltp1}, self.loc_type) - for i = 2, #self.dim_in do - pr:add(self["ltp" .. i]) - end - return pr -end diff --git a/nerv/layer/rnn.lua b/nerv/layer/rnn.lua index fd6e753..63e0b55 100644 --- a/nerv/layer/rnn.lua +++ b/nerv/layer/rnn.lua @@ -4,12 +4,7 @@ function RNNLayer:__init(id, global_conf, layer_conf) nerv.Layer.__init(self, id, global_conf, layer_conf) self:check_dim_len(-1, 1) if #self.dim_in == 0 then - nerv.error('RNN Layer %s has no input', self.id) - end - - self.activation = layer_conf.activation - if self.activation == nil then - self.activation = 'nerv.SigmoidLayer' + nerv.error('RNN layer %s has no input', self.id) end local din = layer_conf.dim_in @@ -22,10 +17,7 @@ function RNNLayer:__init(id, global_conf, layer_conf) local layers = { ['nerv.AffineLayer'] = { - main = {dim_in = table.connect({dout}, din), dim_out = {dout}, pr = pr}, - }, - [self.activation] = { - activation = {dim_in = {dout}, dim_out = {dout}}, + main = {dim_in = table.connect({dout}, din), dim_out = {dout}, pr = pr, activation = layer_conf.activation}, }, ['nerv.DuplicateLayer'] = { duplicate = {dim_in = {dout}, dim_out = {dout, dout}}, @@ -33,8 +25,7 @@ function RNNLayer:__init(id, global_conf, layer_conf) } local connections = { - {'main[1]', 'activation[1]', 0}, - {'activation[1]', 'duplicate[1]', 0}, + {'main[1]', 'duplicate[1]', 0}, {'duplicate[1]', 'main[1]', 1}, {'duplicate[2]', '<output>[1]', 0}, } |