diff options
author | Determinant <[email protected]> | 2016-02-15 15:04:13 +0800 |
---|---|---|
committer | Determinant <[email protected]> | 2016-02-15 15:04:13 +0800 |
commit | 3362020a6bc43766a92882abe6d127c8bb98a628 (patch) | |
tree | cad93eb88c2813694c0ae4ca4ecb9873a719ad85 /nerv/layer/affine.lua | |
parent | dcad8a3f80fc55ca93984d981f9b829d2e4ea728 (diff) |
try a basic merge
Diffstat (limited to 'nerv/layer/affine.lua')
-rw-r--r-- | nerv/layer/affine.lua | 56 |
1 files changed, 35 insertions, 21 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua index 566e9bc..0d4f7dd 100644 --- a/nerv/layer/affine.lua +++ b/nerv/layer/affine.lua @@ -1,15 +1,28 @@ +--- Parameter and layer classes related to linear transform. + local MatrixParam = nerv.class('nerv.MatrixParam', 'nerv.Param') local LinearTransParam = nerv.class('nerv.LinearTransParam', 'nerv.MatrixParam') local BiasParam = nerv.class('nerv.BiasParam', 'nerv.MatrixParam') local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer') +--- A parameter that consists of a single matrix +-- @type nerv.MatrixParam + +--- Read from a file handle. +-- @param handle the file handle function MatrixParam:read(handle) - self.trans = self.gconf.cumat_type.new_from_host( - self.gconf.mmat_type.load(handle)) + self.trans = self.gconf.mmat_type.load(handle) + if not self.gconf.use_cpu then + self.trans = self.gconf.cumat_type.new_from_host(self.trans) + end end function MatrixParam:write(handle) - self.trans:new_to_host():save(handle) + local trans = self.trans + if not self.gconf.use_cpu then + trans = self.trans:new_to_host() + end + trans:save(handle) end function MatrixParam:train_init() @@ -59,15 +72,23 @@ function LinearTransParam:update_by_err_input(err, input) self:_update_by_err_input(err, input, l2, l2) end +--- A fully-connected linear transform layer. +-- @type nerv.AffineLayer + +--- The constructor. function AffineLayer:__init(id, global_conf, layer_conf) self.id = id self.dim_in = layer_conf.dim_in self.dim_out = layer_conf.dim_out - self.ltp = self:find_param("ltp", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp - for i = 2, #self.dim_in do - self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) + for i = 1, #self.dim_in do + self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, + nerv.LinearTransParam, + {self.dim_in[i], self.dim_out[1]}) end - self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]}) --layer_conf.bp + self.ltp = self.ltp1 -- alias of ltp1 + self.bp = self:find_param("bp", layer_conf, global_conf, + nerv.BiasParam, + {1, self.dim_out[1]}) self.gconf = global_conf self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs end @@ -76,15 +97,7 @@ function AffineLayer:init(batch_size) if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then nerv.error("mismatching dimensions of linear transform and bias paramter") end - self.bp:train_init() - if self.dim_in[1] ~= self.ltp.trans:nrow() then - nerv.error("mismatching dimensions of linear transform parameter and input") - end - if self.dim_out[1] ~= self.ltp.trans:ncol() then - nerv.error("mismatching dimensions of linear transform parameter and output") - end - self.ltp:train_init() - for i = 2, #self.dim_in do + for i = 1, #self.dim_in do if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then nerv.error("mismatching dimensions of linear transform parameter and input") end @@ -93,6 +106,7 @@ function AffineLayer:init(batch_size) end self["ltp" .. i]:train_init() end + self.bp:train_init() end function AffineLayer:batch_resize(batch_size) @@ -100,24 +114,24 @@ function AffineLayer:batch_resize(batch_size) end function AffineLayer:update(bp_err, input, output) - self.ltp:update_by_err_input(bp_err[1], input[1]) - for i = 2, #self.dim_in do + for i = 1, #self.dim_in do self["ltp" .. i]:update_by_err_input(bp_err[1], input[i]) end self.bp:update_by_gradient(bp_err[1]:colsum()) end function AffineLayer:propagate(input, output) - output[1]:mul(input[1], self.ltp.trans, 1.0, 0.0, 'N', 'N') + -- apply linear transform + output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N') for i = 2, #self.dim_in do output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N') end + -- add bias output[1]:add_row(self.bp.trans, 1.0) end function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) - next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') - for i = 2, #self.dim_in do + for i = 1, #self.dim_in do next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T') end end |