aboutsummaryrefslogtreecommitdiff
path: root/nerv
diff options
context:
space:
mode:
authorQi Liu <[email protected]>2016-05-09 20:51:10 +0800
committerQi Liu <[email protected]>2016-05-09 20:51:10 +0800
commit03439902dbd339cfbbc684b6fcc6b1810fa02ede (patch)
treeabf9c3cab15105e342def200e02a8a27ca4013f3 /nerv
parent89a3fa93d571f446bcd1fa69ddd35257d975c239 (diff)
fix bug in affine.lua
Diffstat (limited to 'nerv')
-rw-r--r--nerv/layer/affine.lua24
-rw-r--r--nerv/nn/network.lua1
2 files changed, 13 insertions, 12 deletions
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 8b4751c..1ac4681 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -88,7 +88,7 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
-- @param layer_conf a table providing with settings dedicated for the layer,
-- for `layer_conf` fields that are shared by all layers, see
-- `nerv.Layer.__init`. This fields can be specified:
--- * `activation`: the type of the activation function layer, also known as \sigma in \sigma(Wx + b). Default value none (no activation function).
+-- * `activation`: the type of the activation function layer, also known as \sigma in \sigma(Wx + b). The activation function layer must gurantee not use parameter `input` in its `back_propagate` function. Default value none (no activation function).
-- * `no_bias`: a bool value indicates use bias parameter or not. Default value false.
-- * `param_type`: a string table has the same length with `dim_in`, indicates the parameter type for every input. 'D' for diagonal weight matrix, 'N' for normal weight matrix. Default 'N' for every input.
-- The affine layer requires parameters to be bound, the
@@ -99,11 +99,12 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
function AffineLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
- self.param_type = layer_conf.param_type or table.vector(#self.dim_in, 'N')
self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
+ self.param_type = layer_conf.param_type or table.vector(#self.dim_in, 'N')
if layer_conf.activation then
self.activation = layer_conf.activation('', global_conf, {dim_in = {self.dim_out[1]}, dim_out = {self.dim_out[1]}})
end
+ self.no_bias = layer_conf.no_bias
self:bind_params()
end
@@ -138,7 +139,7 @@ function AffineLayer:bind_params()
end
function AffineLayer:init(batch_size)
- if self.dim_out[1] ~= self.bp.trans:ncol() then
+ if not self.no_bias and self.dim_out[1] ~= self.bp.trans:ncol() then
nerv.error("mismatching dimensions of linear transform and bias paramter")
end
for i = 1, #self.dim_in do
@@ -154,10 +155,8 @@ function AffineLayer:init(batch_size)
self.bp:train_init()
end
if self.activation then
- self.act_bak = self.mat_type(batch_size, self.dim_out[1])
- self.act_bak:fill(0)
- self.err_bak = self.mat_type(batch_size, self.dim_out[1])
- self.err_bak:fill(0)
+ self.bak_mat = self.mat_type(batch_size, self.dim_out[1])
+ self.bak_mat:fill(0)
end
end
@@ -168,6 +167,9 @@ end
function AffineLayer:update()
for i = 1, #self.dim_in do
self["ltp" .. i]:update_by_err_input()
+ if self.param_type[i] == 'D' then
+ self['ltp' .. i].trans:diagonalize()
+ end
end
if not self.no_bias then
self.bp:update_by_gradient()
@@ -175,7 +177,7 @@ function AffineLayer:update()
end
function AffineLayer:propagate(input, output)
- local result = self.activation and self.act_bak or output[1]
+ local result = self.activation and self.bak_mat or output[1]
-- apply linear transform
result:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
for i = 2, #self.dim_in do
@@ -186,15 +188,15 @@ function AffineLayer:propagate(input, output)
result:add_row(self.bp.trans, 1.0)
end
if self.activation then
- self.activation:propagate({self.act_bak}, output)
+ self.activation:propagate({result}, output)
end
end
function AffineLayer:back_propagate(bp_err, next_bp_err, input, output)
+ local result = self.activation and self.bak_mat or bp_err[1]
if self.activation then
- self.activation:back_propagate(bp_err, {self.err_bak}, {self.act_bak}, output)
+ self.activation:back_propagate(bp_err, {result}, {result}, output)
end
- local result = self.activation and self.err_bak or bp_err[1]
for i = 1, #self.dim_in do
next_bp_err[i]:mul(result, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
self["ltp" .. i]:back_propagate_by_err_input(result, input[i])
diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua
index bf69ccc..d0d5462 100644
--- a/nerv/nn/network.lua
+++ b/nerv/nn/network.lua
@@ -416,7 +416,6 @@ function network:make_initial_store()
local dim_in, dim_out = self.layers[i]:get_dim()
for j = 1, #dim_in do
if self.input[t][i][j] == nil then
- print(t,i,j,self.layers[i].id)
nerv.error('input reference dangling')
end
if self.err_output[t][i][j] == nil then