aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQi Liu <[email protected]>2016-05-09 17:57:30 +0800
committerQi Liu <[email protected]>2016-05-09 17:57:30 +0800
commit89a3fa93d571f446bcd1fa69ddd35257d975c239 (patch)
tree42d80fe75fe3af265bceca3c6593c027b0b87dcf
parentd88a57f4852c50a2678de950ee650ed9b6a895f0 (diff)
merge affine & lstm_gate & projection layer
-rw-r--r--nerv/Makefile4
-rw-r--r--nerv/examples/ptb/main.lua2
-rw-r--r--nerv/layer/affine.lua67
-rw-r--r--nerv/layer/init.lua2
-rw-r--r--nerv/layer/lstm.lua8
-rw-r--r--nerv/layer/lstm_gate.lua97
-rw-r--r--nerv/layer/lstmp.lua12
-rw-r--r--nerv/layer/projection.lua70
8 files changed, 61 insertions, 201 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index 0d9934a..a3d0f34 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -40,9 +40,9 @@ OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/tanh.lua layer/softmax_ce.lua layer/softmax.lua \
- layer/lstmp.lua layer/projection.lua layer/relu.lua\
+ layer/lstmp.lua layer/relu.lua\
layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua \
- layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
+ layer/elem_mul.lua layer/lstm.lua layer/dropout.lua layer/gru.lua \
layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua nn/trainer.lua\
io/frm_buffer.lua io/seq_buffer.lua
diff --git a/nerv/examples/ptb/main.lua b/nerv/examples/ptb/main.lua
index 5d1a326..7191da0 100644
--- a/nerv/examples/ptb/main.lua
+++ b/nerv/examples/ptb/main.lua
@@ -105,7 +105,7 @@ function trainer:epoch_preprocess(dataset, do_train)
total_frame = 0
end
-function trainer:mini_batch_middleprocess(cnt, info)
+function trainer:mini_batch_inprocess(cnt, info)
for t = 1, gconf.chunk_size do
local tmp = info.output[1][t]:new_to_host()
for i = 1, gconf.batch_size do
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 16250fd..8b4751c 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -87,7 +87,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
-- @param global_conf see `self.gconf` of `nerv.Layer.__init`
-- @param layer_conf a table providing with settings dedicated for the layer,
-- for `layer_conf` fields that are shared by all layers, see
--- `nerv.Layer.__init`. The affine layer requires parameters to be bound, the
+-- `nerv.Layer.__init`. This fields can be specified:
+-- * `activation`: the type of the activation function layer, also known as \sigma in \sigma(Wx + b). Default value none (no activation function).
+-- * `no_bias`: a bool value indicates use bias parameter or not. Default value false.
+-- * `param_type`: a string table has the same length with `dim_in`, indicates the parameter type for every input. 'D' for diagonal weight matrix, 'N' for normal weight matrix. Default 'N' for every input.
+-- The affine layer requires parameters to be bound, the
-- following parameter names will be looked up while binding:
--
-- * `ltp`: the linear transformation parameter, also known as the weight matrix, W in Wx + b
@@ -95,7 +99,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
function AffineLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
+ self.param_type = layer_conf.param_type or table.vector(#self.dim_in, 'N')
self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
+ if layer_conf.activation then
+ self.activation = layer_conf.activation('', global_conf, {dim_in = {self.dim_out[1]}, dim_out = {self.dim_out[1]}})
+ end
self:bind_params()
end
@@ -108,19 +116,24 @@ function AffineLayer:bind_params()
self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
nerv.LinearTransParam,
{self.dim_in[i], self.dim_out[1]})
+ if self.param_type[i] == 'D' then
+ self['ltp' .. i].trans:diagonalize()
+ end
local no_update = lconf["no_update_ltp" .. i]
if (no_update ~= nil) and no_update or lconf.no_update_all then
self["ltp" .. i].no_update = true
end
end
self.ltp = self.ltp1 -- alias of ltp1
- self.bp = self:find_param("bp", lconf, self.gconf,
- nerv.BiasParam,
- {1, self.dim_out[1]},
- nerv.Param.gen_zero)
- local no_update = lconf["no_update_bp"]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self.bp.no_update = true
+ if not self.no_bias then
+ self.bp = self:find_param("bp", lconf, self.gconf,
+ nerv.BiasParam,
+ {1, self.dim_out[1]},
+ nerv.Param.gen_zero)
+ local no_update = lconf["no_update_bp"]
+ if (no_update ~= nil) and no_update or lconf.no_update_all then
+ self.bp.no_update = true
+ end
end
end
@@ -137,7 +150,15 @@ function AffineLayer:init(batch_size)
end
self["ltp" .. i]:train_init()
end
- self.bp:train_init()
+ if not self.no_bias then
+ self.bp:train_init()
+ end
+ if self.activation then
+ self.act_bak = self.mat_type(batch_size, self.dim_out[1])
+ self.act_bak:fill(0)
+ self.err_bak = self.mat_type(batch_size, self.dim_out[1])
+ self.err_bak:fill(0)
+ end
end
function AffineLayer:batch_resize(batch_size)
@@ -148,25 +169,39 @@ function AffineLayer:update()
for i = 1, #self.dim_in do
self["ltp" .. i]:update_by_err_input()
end
- self.bp:update_by_gradient()
+ if not self.no_bias then
+ self.bp:update_by_gradient()
+ end
end
function AffineLayer:propagate(input, output)
+ local result = self.activation and self.act_bak or output[1]
-- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
+ result:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
+ result:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
end
-- add bias
- output[1]:add_row(self.bp.trans, 1.0)
+ if not self.no_bias then
+ result:add_row(self.bp.trans, 1.0)
+ end
+ if self.activation then
+ self.activation:propagate({self.act_bak}, output)
+ end
end
function AffineLayer:back_propagate(bp_err, next_bp_err, input, output)
+ if self.activation then
+ self.activation:back_propagate(bp_err, {self.err_bak}, {self.act_bak}, output)
+ end
+ local result = self.activation and self.err_bak or bp_err[1]
for i = 1, #self.dim_in do
- next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i])
+ next_bp_err[i]:mul(result, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
+ self["ltp" .. i]:back_propagate_by_err_input(result, input[i])
+ end
+ if not self.no_bias then
+ self.bp:back_propagate_by_gradient(result:colsum())
end
- self.bp:back_propagate_by_gradient(bp_err[1]:colsum())
end
function AffineLayer:get_params()
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index d175d02..054784b 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -272,13 +272,11 @@ nerv.include('combiner.lua')
nerv.include('softmax.lua')
nerv.include('elem_mul.lua')
nerv.include('lstm.lua')
-nerv.include('lstm_gate.lua')
nerv.include('dropout.lua')
nerv.include('gru.lua')
nerv.include('rnn.lua')
nerv.include('duplicate.lua')
nerv.include('identity.lua')
-nerv.include('projection.lua')
nerv.include('lstmp.lua')
nerv.include('relu.lua')
diff --git a/nerv/layer/lstm.lua b/nerv/layer/lstm.lua
index 3de3453..e568ee8 100644
--- a/nerv/layer/lstm.lua
+++ b/nerv/layer/lstm.lua
@@ -23,16 +23,14 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
},
['nerv.AffineLayer'] = {
mainAffine = {dim_in = {din, dout}, dim_out = {dout}, pr = pr},
+ forgetGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr, activation = nerv.SigmoidLayer},
+ inputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr, activation = nerv.SigmoidLayer},
+ outputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr, activation = nerv.SigmoidLayer},
},
['nerv.TanhLayer'] = {
mainTanh = {dim_in = {dout}, dim_out = {dout}},
outputTanh = {dim_in = {dout}, dim_out = {dout}},
},
- ['nerv.LSTMGateLayer'] = {
- forgetGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr},
- inputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr},
- outputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr},
- },
['nerv.ElemMulLayer'] = {
inputGateMul = {dim_in = {dout, dout}, dim_out = {dout}},
forgetGateMul = {dim_in = {dout, dout}, dim_out = {dout}},
diff --git a/nerv/layer/lstm_gate.lua b/nerv/layer/lstm_gate.lua
deleted file mode 100644
index 39a3ff7..0000000
--- a/nerv/layer/lstm_gate.lua
+++ /dev/null
@@ -1,97 +0,0 @@
-local LSTMGateLayer = nerv.class('nerv.LSTMGateLayer', 'nerv.Layer')
--- NOTE: this is a full matrix gate
-
-function LSTMGateLayer:__init(id, global_conf, layer_conf)
- nerv.Layer.__init(self, id, global_conf, layer_conf)
- self.param_type = layer_conf.param_type
- self:check_dim_len(-1, 1) --accept multiple inputs
- self:bind_params()
-end
-
-function LSTMGateLayer:bind_params()
- local lconf = self.lconf
- lconf.no_update_ltp1 = lconf.no_update_ltp1 or lconf.no_update_ltp
- for i = 1, #self.dim_in do
- local pid = "ltp" .. i
- local pid_list = i == 1 and {pid, "ltp"} or pid
- self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
- nerv.LinearTransParam,
- {self.dim_in[i], self.dim_out[1]})
- if self.param_type[i] == 'D' then
- self["ltp" .. i].trans:diagonalize()
- end
- local no_update = lconf["no_update_ltp" .. i]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self["ltp" .. i].no_update = true
- end
- end
- self.ltp = self.ltp1 -- alias of ltp1
- self.bp = self:find_param("bp", lconf, self.gconf,
- nerv.BiasParam, {1, self.dim_out[1]},
- nerv.Param.gen_zero)
- local no_update = lconf["no_update_bp"]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self.bp.no_update = true
- end
-end
-
-function LSTMGateLayer:init(batch_size)
- if self.dim_out[1] ~= self.bp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform and bias paramter")
- end
- for i = 1, #self.dim_in do
- if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self["ltp" .. i]:train_init()
- end
- self.bp:train_init()
- self.err_bakm = self.mat_type(batch_size, self.dim_out[1])
-end
-
-function LSTMGateLayer:batch_resize(batch_size)
- if self.err_m:nrow() ~= batch_size then
- self.err_bakm = self.mat_type(batch_size, self.dim_out[1])
- end
-end
-
-function LSTMGateLayer:propagate(input, output)
- -- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
- for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
- end
- -- add bias
- output[1]:add_row(self.bp.trans, 1.0)
- output[1]:sigmoid(output[1])
-end
-
-function LSTMGateLayer:back_propagate(bp_err, next_bp_err, input, output)
- self.err_bakm:sigmoid_grad(bp_err[1], output[1])
- for i = 1, #self.dim_in do
- next_bp_err[i]:mul(self.err_bakm, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(self.err_bakm, input[i])
- end
- self.bp:back_propagate_by_gradient(self.err_bakm:colsum())
-end
-
-function LSTMGateLayer:update()
- for i = 1, #self.dim_in do
- self["ltp" .. i]:update_by_err_input()
- if self.param_type[i] == 'D' then
- self["ltp" .. i].trans:diagonalize()
- end
- end
- self.bp:update_by_gradient()
-end
-
-function LSTMGateLayer:get_params()
- local pr = nerv.ParamRepo({self.bp}, self.loc_type)
- for i = 1, #self.dim_in do
- pr:add(self["ltp" .. i])
- end
- return pr
-end
diff --git a/nerv/layer/lstmp.lua b/nerv/layer/lstmp.lua
index bbb2091..dc30797 100644
--- a/nerv/layer/lstmp.lua
+++ b/nerv/layer/lstmp.lua
@@ -24,24 +24,20 @@ function LSTMPLayer:__init(id, global_conf, layer_conf)
},
['nerv.AffineLayer'] = {
mainAffine = {dim_in = {din, dout}, dim_out = {dcell}, pr = pr},
+ forgetGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr, activation = nerv.SigmoidLayer},
+ inputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr, activation = nerv.SigmoidLayer},
+ outputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr, activation = nerv.SigmoidLayer},
+ projection = {dim_in = {dcell}, dim_out = {dout}, pr = pr, no_bias = true},
},
['nerv.TanhLayer'] = {
mainTanh = {dim_in = {dcell}, dim_out = {dcell}},
outputTanh = {dim_in = {dcell}, dim_out = {dcell}},
},
- ['nerv.LSTMGateLayer'] = {
- forgetGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr},
- inputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr},
- outputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr},
- },
['nerv.ElemMulLayer'] = {
inputGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}},
forgetGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}},
outputGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}},
},
- ['nerv.ProjectionLayer'] = {
- projection = {dim_in = {dcell}, dim_out = {dout}, pr = pr},
- },
}
local connections = {
diff --git a/nerv/layer/projection.lua b/nerv/layer/projection.lua
deleted file mode 100644
index 077125b..0000000
--- a/nerv/layer/projection.lua
+++ /dev/null
@@ -1,70 +0,0 @@
-local ProjectionLayer = nerv.class('nerv.ProjectionLayer', 'nerv.Layer')
-
---- The constructor.
-function ProjectionLayer:__init(id, global_conf, layer_conf)
- nerv.Layer.__init(self, id, global_conf, layer_conf)
- self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
- self:bind_params()
-end
-
-function ProjectionLayer:bind_params()
- local lconf = self.lconf
- lconf.no_update_ltp1 = lconf.no_update_ltp1 or lconf.no_update_ltp
- for i = 1, #self.dim_in do
- local pid = "ltp" .. i
- local pid_list = i == 1 and {pid, "ltp"} or pid
- self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
- nerv.LinearTransParam,
- {self.dim_in[i], self.dim_out[1]})
- local no_update = lconf["no_update_ltp" .. i]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self["ltp" .. i].no_update = true
- end
- end
- self.ltp = self.ltp1 -- alias of ltp1
-end
-
-function ProjectionLayer:init(batch_size)
- for i = 1, #self.dim_in do
- if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self["ltp" .. i]:train_init()
- end
-end
-
-function ProjectionLayer:batch_resize(batch_size)
- -- do nothing
-end
-
-function ProjectionLayer:update()
- for i = 1, #self.dim_in do
- self["ltp" .. i]:update_by_err_input()
- end
-end
-
-function ProjectionLayer:propagate(input, output)
- -- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
- for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
- end
-end
-
-function ProjectionLayer:back_propagate(bp_err, next_bp_err, input, output)
- for i = 1, #self.dim_in do
- next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i])
- end
-end
-
-function ProjectionLayer:get_params()
- local pr = nerv.ParamRepo({self.ltp1}, self.loc_type)
- for i = 2, #self.dim_in do
- pr:add(self["ltp" .. i])
- end
- return pr
-end