aboutsummaryrefslogtreecommitdiff
path: root/nerv/layer/affine.lua
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/layer/affine.lua')
-rw-r--r--nerv/layer/affine.lua75
1 files changed, 58 insertions, 17 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 16250fd..b68cf3d 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -48,6 +48,10 @@ function MatrixParam:_update(alpha, beta)
-- momentum gain
local mmt_gain = 1.0 / (1.0 - gconf.momentum)
local n = gconf.batch_size * mmt_gain
+ -- clip gradient
+ if gconf.clip then
+ self.correction_acc:clip(-gconf.clip, gconf.clip)
+ end
-- perform update
if gconf.momentum > 0 then
self.correction:add(self.correction, self.correction_acc, gconf.momentum, 1.0)
@@ -87,7 +91,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
-- @param global_conf see `self.gconf` of `nerv.Layer.__init`
-- @param layer_conf a table providing with settings dedicated for the layer,
-- for `layer_conf` fields that are shared by all layers, see
--- `nerv.Layer.__init`. The affine layer requires parameters to be bound, the
+-- `nerv.Layer.__init`. This fields can be specified:
+-- * `activation`: the type of the activation function layer, also known as \sigma in \sigma(Wx + b). The activation function layer must gurantee not use parameter `input` in its `back_propagate` function. Default value none (no activation function).
+-- * `no_bias`: a bool value indicates use bias parameter or not. Default value false.
+-- * `param_type`: a string table has the same length with `dim_in`, indicates the parameter type for every input. 'D' for diagonal weight matrix, 'N' for normal weight matrix. Default 'N' for every input.
+-- The affine layer requires parameters to be bound, the
-- following parameter names will be looked up while binding:
--
-- * `ltp`: the linear transformation parameter, also known as the weight matrix, W in Wx + b
@@ -96,6 +104,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
function AffineLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
+ self.param_type = layer_conf.param_type or table.vector(#self.dim_in, 'N')
+ if layer_conf.activation then
+ self.activation = layer_conf.activation('', global_conf, {dim_in = {self.dim_out[1]}, dim_out = {self.dim_out[1]}})
+ end
+ self.no_bias = layer_conf.no_bias
self:bind_params()
end
@@ -108,24 +121,29 @@ function AffineLayer:bind_params()
self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
nerv.LinearTransParam,
{self.dim_in[i], self.dim_out[1]})
+ if self.param_type[i] == 'D' then
+ self['ltp' .. i].trans:diagonalize()
+ end
local no_update = lconf["no_update_ltp" .. i]
if (no_update ~= nil) and no_update or lconf.no_update_all then
self["ltp" .. i].no_update = true
end
end
self.ltp = self.ltp1 -- alias of ltp1
- self.bp = self:find_param("bp", lconf, self.gconf,
- nerv.BiasParam,
- {1, self.dim_out[1]},
- nerv.Param.gen_zero)
- local no_update = lconf["no_update_bp"]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self.bp.no_update = true
+ if not self.no_bias then
+ self.bp = self:find_param("bp", lconf, self.gconf,
+ nerv.BiasParam,
+ {1, self.dim_out[1]},
+ nerv.Param.gen_zero)
+ local no_update = lconf["no_update_bp"]
+ if (no_update ~= nil) and no_update or lconf.no_update_all then
+ self.bp.no_update = true
+ end
end
end
function AffineLayer:init(batch_size)
- if self.dim_out[1] ~= self.bp.trans:ncol() then
+ if not self.no_bias and self.dim_out[1] ~= self.bp.trans:ncol() then
nerv.error("mismatching dimensions of linear transform and bias paramter")
end
for i = 1, #self.dim_in do
@@ -137,7 +155,13 @@ function AffineLayer:init(batch_size)
end
self["ltp" .. i]:train_init()
end
- self.bp:train_init()
+ if not self.no_bias then
+ self.bp:train_init()
+ end
+ if self.activation then
+ self.bak_mat = self.mat_type(batch_size, self.dim_out[1])
+ self.bak_mat:fill(0)
+ end
end
function AffineLayer:batch_resize(batch_size)
@@ -147,26 +171,43 @@ end
function AffineLayer:update()
for i = 1, #self.dim_in do
self["ltp" .. i]:update_by_err_input()
+ if self.param_type[i] == 'D' then
+ self['ltp' .. i].trans:diagonalize()
+ end
+ end
+ if not self.no_bias then
+ self.bp:update_by_gradient()
end
- self.bp:update_by_gradient()
end
function AffineLayer:propagate(input, output)
+ local result = self.activation and self.bak_mat or output[1]
-- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
+ result:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
+ result:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
end
-- add bias
- output[1]:add_row(self.bp.trans, 1.0)
+ if not self.no_bias then
+ result:add_row(self.bp.trans, 1.0)
+ end
+ if self.activation then
+ self.activation:propagate({result}, output)
+ end
end
function AffineLayer:back_propagate(bp_err, next_bp_err, input, output)
+ local result = self.activation and self.bak_mat or bp_err[1]
+ if self.activation then
+ self.activation:back_propagate(bp_err, {result}, {result}, output)
+ end
for i = 1, #self.dim_in do
- next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i])
+ next_bp_err[i]:mul(result, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
+ self["ltp" .. i]:back_propagate_by_err_input(result, input[i])
+ end
+ if not self.no_bias then
+ self.bp:back_propagate_by_gradient(result:colsum())
end
- self.bp:back_propagate_by_gradient(bp_err[1]:colsum())
end
function AffineLayer:get_params()