diff options
Diffstat (limited to 'nerv/layer/affine.lua')
-rw-r--r-- | nerv/layer/affine.lua | 110 |
1 files changed, 56 insertions, 54 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index 3ba9408..566e9bc 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -5,7 +5,7 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer') function MatrixParam:read(handle) self.trans = self.gconf.cumat_type.new_from_host( - nerv.MMatrixFloat.load(handle)) + self.gconf.mmat_type.load(handle)) end function MatrixParam:write(handle) @@ -17,74 +17,82 @@ function MatrixParam:train_init() self.correction:fill(0) end -function MatrixParam:update_by_gradient(gradient) +function MatrixParam:_update_by_gradient(gradient, alpha, beta) local gconf = self.gconf + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = gconf.batch_size * mmt_gain + -- perform update if gconf.momentum > 0 then self.correction:add(self.correction, gradient, gconf.momentum, 1.0) - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum) - local n = self.gconf.batch_size * mmt_gain - -- perform update - self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + self.trans:add(self.trans, self.correction, alpha, -gconf.lrate / n * beta) else - self.trans:add(self.trans, gradient, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) + self.trans:add(self.trans, gradient, alpha, -gconf.lrate / n * beta) end end -function MatrixParam:update_by_err_input(err, input) +function MatrixParam:_update_by_err_input(err, input, alpha, beta) local gconf = self.gconf + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum) + local n = gconf.batch_size * mmt_gain + -- perform update if gconf.momentum > 0 then self.correction:mul(input, err, 1.0, gconf.momentum, 'T', 'N') - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum) - local n = self.gconf.batch_size * mmt_gain - -- perform update - self.trans:add(self.trans, self.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) + self.trans:add(self.trans, self.correction, alpha, -gconf.lrate / n * beta) else - self.trans:mul(input, err, - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') + self.trans:mul(input, err, -gconf.lrate / n * beta, alpha, 'T', 'N') end end ---[[ --these updates are the same -function LinearTransParam:update(gradient) - MatrixParam.update(self, gradient) - -- local gconf = self.gconf - -- weight decay(put into MatrixParam:update) - -- self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost / gconf.batch_size) +function MatrixParam:update_by_gradient(gradient) + self:_update_by_gradient(gradient, 1.0, 1.0) end -function BiasParam:update(gradient) - MatrixParam.update(self, gradient) - --local gconf = self.gconf - -- weight decay - -- self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost / gconf.batch_size) +function MatrixParam:update_by_err_input(err, input) + self:_update_by_err_input(err, input, 1.0, 1.0) +end + +function LinearTransParam:update_by_err_input(err, input) + local gconf = self.gconf + local l2 = 1 - gconf.lrate * gconf.wcost + self:_update_by_err_input(err, input, l2, l2) end -]]-- function AffineLayer:__init(id, global_conf, layer_conf) self.id = id - self.ltp = layer_conf.ltp - self.bp = layer_conf.bp self.dim_in = layer_conf.dim_in self.dim_out = layer_conf.dim_out + self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp + for i = 2, #self.dim_in do + self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) + end + self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp self.gconf = global_conf - self:check_dim_len(1, 1) -- exactly one input and one output - self.direct_update = layer_conf.direct_update or global_conf.direct_update + self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs end function AffineLayer:init(batch_size) if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then nerv.error("mismatching dimensions of linear transform and bias paramter") end + self.bp:train_init() if self.dim_in[1] ~= self.ltp.trans:nrow() then nerv.error("mismatching dimensions of linear transform parameter and input") end if self.dim_out[1] ~= self.ltp.trans:ncol() then nerv.error("mismatching dimensions of linear transform parameter and output") end - self.ltp_grad = self.ltp.trans:create() self.ltp:train_init() - self.bp:train_init() + for i = 2, #self.dim_in do + if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then + nerv.error("mismatching dimensions of linear transform parameter and input") + end + if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then + nerv.error("mismatching dimensions of linear transform parameter and output") + end + self["ltp" .. i]:train_init() + end end function AffineLayer:batch_resize(batch_size) @@ -92,38 +100,32 @@ function AffineLayer:batch_resize(batch_size) end function AffineLayer:update(bp_err, input, output) - if self.direct_update == true then - local gconf = self.gconf - if gconf.momentum > 0 then - self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - self.bp.correction:add(self.bp.correction, bp_err[1]:colsum(), gconf.momentum, 1) - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum) - local n = self.gconf.batch_size * mmt_gain - -- perform update - self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) - self.bp.trans:add(self.bp.trans, self.bp.correction, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / n) - else - self.ltp.trans:mul(input[1], bp_err[1], - gconf.lrate / gconf.batch_size, 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, 'T', 'N') - self.bp.trans:add(self.bp.trans, bp_err[1]:colsum(), 1.0 - gconf.lrate * gconf.wcost / gconf.batch_size, - gconf.lrate / gconf.batch_size) - end - else - self.ltp:update_by_err_input(bp_err[1], input[1]) - self.bp:update_by_gradient(bp_err[1]:colsum()) + self.ltp:update_by_err_input(bp_err[1], input[1]) + for i = 2, #self.dim_in do + self["ltp" .. i]:update_by_err_input(bp_err[1], input[i]) end + self.bp:update_by_gradient(bp_err[1]:colsum()) end function AffineLayer:propagate(input, output) - -- apply linear transform output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') - -- add bias + for i = 2, #self.dim_in do + output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') + end output[1]:add_row(self.bp.trans, 1.0) end function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') + for i = 2, #self.dim_in do + next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') + end end function AffineLayer:get_params() - return nerv.ParamRepo({self.ltp, self.bp}) + local pr = nerv.ParamRepo({self.ltp, self.bp}) + for i = 2, #self.dim_in do + pr:add(self["ltp" .. i].id, self["ltp" .. i]) + end + return pr end |