summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQi Liu <[email protected]>2016-05-12 17:41:21 +0800
committerQi Liu <[email protected]>2016-05-12 17:41:21 +0800
commitc0fdb7ee2966546023410bb03e62dee0cf64e0e1 (patch)
treecd90688b8aee2991a78f971e1bc4e0e9596e357b
parentd88a57f4852c50a2678de950ee650ed9b6a895f0 (diff)
parent10916f721a945a5edd052ab93027413fd3c01f65 (diff)
Merge branch 'master' into 'master' beta-1.2
multiple change 1. Merge affine & lstm_gate & projection layer 2. Change clip behavior 3. Seq_buffer support sequence level shuffle 4. LSTM & LSTMP layer support multiple input See merge request !4
-rw-r--r--nerv/Makefile4
-rw-r--r--nerv/examples/ptb/main.lua2
-rw-r--r--nerv/examples/trainer.lua1
-rw-r--r--nerv/io/seq_buffer.lua92
-rw-r--r--nerv/layer/affine.lua75
-rw-r--r--nerv/layer/init.lua2
-rw-r--r--nerv/layer/lstm.lua55
-rw-r--r--nerv/layer/lstm_gate.lua97
-rw-r--r--nerv/layer/lstmp.lua61
-rw-r--r--nerv/layer/projection.lua70
-rw-r--r--nerv/layer/rnn.lua15
-rw-r--r--nerv/nn/network.lua11
-rw-r--r--nerv/nn/trainer.lua5
13 files changed, 216 insertions, 274 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index 0d9934a..a3d0f34 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -40,9 +40,9 @@ OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/tanh.lua layer/softmax_ce.lua layer/softmax.lua \
- layer/lstmp.lua layer/projection.lua layer/relu.lua\
+ layer/lstmp.lua layer/relu.lua\
layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua \
- layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
+ layer/elem_mul.lua layer/lstm.lua layer/dropout.lua layer/gru.lua \
layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua nn/trainer.lua\
io/frm_buffer.lua io/seq_buffer.lua
diff --git a/nerv/examples/ptb/main.lua b/nerv/examples/ptb/main.lua
index 5d1a326..7191da0 100644
--- a/nerv/examples/ptb/main.lua
+++ b/nerv/examples/ptb/main.lua
@@ -105,7 +105,7 @@ function trainer:epoch_preprocess(dataset, do_train)
total_frame = 0
end
-function trainer:mini_batch_middleprocess(cnt, info)
+function trainer:mini_batch_inprocess(cnt, info)
for t = 1, gconf.chunk_size do
local tmp = info.output[1][t]:new_to_host()
for i = 1, gconf.batch_size do
diff --git a/nerv/examples/trainer.lua b/nerv/examples/trainer.lua
index f6c7a5a..caed2e2 100644
--- a/nerv/examples/trainer.lua
+++ b/nerv/examples/trainer.lua
@@ -80,6 +80,7 @@ local trainer_defaults = {
local options = make_options(trainer_defaults)
local extra_opt_spec = {
+ {"clip", nil, "number"},
{"resume-from", nil, "string"},
{"help", "h", "boolean", default = false, desc = "show this help information"},
{"dir", nil, "string", desc = "specify the working directory"},
diff --git a/nerv/io/seq_buffer.lua b/nerv/io/seq_buffer.lua
index 8cde1b3..5c60f64 100644
--- a/nerv/io/seq_buffer.lua
+++ b/nerv/io/seq_buffer.lua
@@ -28,6 +28,9 @@ local SeqBuffer = nerv.class('nerv.SeqBuffer', 'nerv.DataBuffer')
-- * `batch_size`: the number of rows for each batch matrix
-- * `chunk_size`: the length of the BPTT context (number of batch
-- matrices to provide upon each invocation of `get_data()`)
+-- * `buffer_size`: the number of frames to be buffered and shuffled at once (shuffle
+-- in the sequence level, not in the frame level)
+-- * `randomize`: shuffle the buffer after filled if true
-- * `nn_act_default`: the default value to fill into the "holes" (non-data
-- frames)
@@ -36,6 +39,8 @@ function SeqBuffer:__init(global_conf, buffer_conf)
self.batch_size = buffer_conf.batch_size
self.chunk_size = buffer_conf.chunk_size
+ self.buffer_size = buffer_conf.buffer_size
+ self.randomize = buffer_conf.randomize
self.readers = {}
for _, v in ipairs(buffer_conf.readers) do
table.insert(self.readers, v.reader)
@@ -49,6 +54,11 @@ function SeqBuffer:__init(global_conf, buffer_conf)
self.queue = {}
self.head = 1
self.tail = 0
+ self.offset = 1
+ self.buffer = {}
+ self.length = {}
+ self.index = {}
+ self.complete = false
end
function SeqBuffer:new_mini_batch()
@@ -62,25 +72,77 @@ function SeqBuffer:new_mini_batch()
return res
end
+local function random_shuffle(a)
+ for i = #a, 2, -1 do
+ local j = math.random(i)
+ a[i], a[j] = a[j], a[i]
+ end
+end
+
+function SeqBuffer:fill_buffer()
+ if self.complete then
+ return false
+ end
+ local t = os.clock()
+ self.buffer = {}
+ self.length = {}
+ local size = 0
+ while size < self.buffer_size do
+ local drow = nil
+ local data = {}
+ for i = 1, #self.readers do
+ local tmp = self.readers[i]:get_data()
+ if tmp == nil then
+ self.complete = true
+ break
+ end
+ for id, d in pairs(tmp) do
+ if drow == nil then
+ drow = d:nrow()
+ elseif d:nrow() ~= drow then
+ nerv.error('readers provides with inconsistent rows of data')
+ end
+ data[id] = d
+ end
+ end
+ if self.complete then
+ break
+ end
+ size = size + drow
+ table.insert(self.buffer, data)
+ table.insert(self.length, drow)
+ end
+ self.index = {}
+ for i = 1, #self.buffer do
+ self.index[i] = i
+ end
+ if self.randomize then
+ random_shuffle(self.index)
+ end
+ self.offset = 1
+ collectgarbage('collect')
+ nerv.info('%.3fs to fill the buffer', os.clock() - t)
+ return #self.buffer > 0
+end
+
+function SeqBuffer:get_buffered_data()
+ if self.offset > #self.buffer then
+ if not self:fill_buffer() then
+ return nil
+ end
+ end
+ local id = self.index[self.offset]
+ self.offset = self.offset + 1
+ return self.buffer[id], self.length[id]
+end
+
function SeqBuffer:saturate(batch)
if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then
return true
end
- local data = {}
- local drow = nil
- for i = 1, #self.readers do
- local tmp = self.readers[i]:get_data()
- if tmp == nil then
- return false
- end
- for id, d in pairs(tmp) do
- if drow == nil then
- drow = d:nrow()
- elseif d:nrow() ~= drow then
- nerv.error('readers provides with inconsistent rows of data')
- end
- data[id] = d
- end
+ local data, drow = self:get_buffered_data()
+ if data == nil then
+ return false
end
local offset = 0
local head = self.head
diff --git a/nerv/layer/affine.lua b/nerv/layer/affine.lua
index 16250fd..b68cf3d 100644
--- a/nerv/layer/affine.lua
+++ b/nerv/layer/affine.lua
@@ -48,6 +48,10 @@ function MatrixParam:_update(alpha, beta)
-- momentum gain
local mmt_gain = 1.0 / (1.0 - gconf.momentum)
local n = gconf.batch_size * mmt_gain
+ -- clip gradient
+ if gconf.clip then
+ self.correction_acc:clip(-gconf.clip, gconf.clip)
+ end
-- perform update
if gconf.momentum > 0 then
self.correction:add(self.correction, self.correction_acc, gconf.momentum, 1.0)
@@ -87,7 +91,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
-- @param global_conf see `self.gconf` of `nerv.Layer.__init`
-- @param layer_conf a table providing with settings dedicated for the layer,
-- for `layer_conf` fields that are shared by all layers, see
--- `nerv.Layer.__init`. The affine layer requires parameters to be bound, the
+-- `nerv.Layer.__init`. This fields can be specified:
+-- * `activation`: the type of the activation function layer, also known as \sigma in \sigma(Wx + b). The activation function layer must gurantee not use parameter `input` in its `back_propagate` function. Default value none (no activation function).
+-- * `no_bias`: a bool value indicates use bias parameter or not. Default value false.
+-- * `param_type`: a string table has the same length with `dim_in`, indicates the parameter type for every input. 'D' for diagonal weight matrix, 'N' for normal weight matrix. Default 'N' for every input.
+-- The affine layer requires parameters to be bound, the
-- following parameter names will be looked up while binding:
--
-- * `ltp`: the linear transformation parameter, also known as the weight matrix, W in Wx + b
@@ -96,6 +104,11 @@ local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer')
function AffineLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
+ self.param_type = layer_conf.param_type or table.vector(#self.dim_in, 'N')
+ if layer_conf.activation then
+ self.activation = layer_conf.activation('', global_conf, {dim_in = {self.dim_out[1]}, dim_out = {self.dim_out[1]}})
+ end
+ self.no_bias = layer_conf.no_bias
self:bind_params()
end
@@ -108,24 +121,29 @@ function AffineLayer:bind_params()
self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
nerv.LinearTransParam,
{self.dim_in[i], self.dim_out[1]})
+ if self.param_type[i] == 'D' then
+ self['ltp' .. i].trans:diagonalize()
+ end
local no_update = lconf["no_update_ltp" .. i]
if (no_update ~= nil) and no_update or lconf.no_update_all then
self["ltp" .. i].no_update = true
end
end
self.ltp = self.ltp1 -- alias of ltp1
- self.bp = self:find_param("bp", lconf, self.gconf,
- nerv.BiasParam,
- {1, self.dim_out[1]},
- nerv.Param.gen_zero)
- local no_update = lconf["no_update_bp"]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self.bp.no_update = true
+ if not self.no_bias then
+ self.bp = self:find_param("bp", lconf, self.gconf,
+ nerv.BiasParam,
+ {1, self.dim_out[1]},
+ nerv.Param.gen_zero)
+ local no_update = lconf["no_update_bp"]
+ if (no_update ~= nil) and no_update or lconf.no_update_all then
+ self.bp.no_update = true
+ end
end
end
function AffineLayer:init(batch_size)
- if self.dim_out[1] ~= self.bp.trans:ncol() then
+ if not self.no_bias and self.dim_out[1] ~= self.bp.trans:ncol() then
nerv.error("mismatching dimensions of linear transform and bias paramter")
end
for i = 1, #self.dim_in do
@@ -137,7 +155,13 @@ function AffineLayer:init(batch_size)
end
self["ltp" .. i]:train_init()
end
- self.bp:train_init()
+ if not self.no_bias then
+ self.bp:train_init()
+ end
+ if self.activation then
+ self.bak_mat = self.mat_type(batch_size, self.dim_out[1])
+ self.bak_mat:fill(0)
+ end
end
function AffineLayer:batch_resize(batch_size)
@@ -147,26 +171,43 @@ end
function AffineLayer:update()
for i = 1, #self.dim_in do
self["ltp" .. i]:update_by_err_input()
+ if self.param_type[i] == 'D' then
+ self['ltp' .. i].trans:diagonalize()
+ end
+ end
+ if not self.no_bias then
+ self.bp:update_by_gradient()
end
- self.bp:update_by_gradient()
end
function AffineLayer:propagate(input, output)
+ local result = self.activation and self.bak_mat or output[1]
-- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
+ result:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
+ result:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
end
-- add bias
- output[1]:add_row(self.bp.trans, 1.0)
+ if not self.no_bias then
+ result:add_row(self.bp.trans, 1.0)
+ end
+ if self.activation then
+ self.activation:propagate({result}, output)
+ end
end
function AffineLayer:back_propagate(bp_err, next_bp_err, input, output)
+ local result = self.activation and self.bak_mat or bp_err[1]
+ if self.activation then
+ self.activation:back_propagate(bp_err, {result}, {result}, output)
+ end
for i = 1, #self.dim_in do
- next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i])
+ next_bp_err[i]:mul(result, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
+ self["ltp" .. i]:back_propagate_by_err_input(result, input[i])
+ end
+ if not self.no_bias then
+ self.bp:back_propagate_by_gradient(result:colsum())
end
- self.bp:back_propagate_by_gradient(bp_err[1]:colsum())
end
function AffineLayer:get_params()
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index d175d02..054784b 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -272,13 +272,11 @@ nerv.include('combiner.lua')
nerv.include('softmax.lua')
nerv.include('elem_mul.lua')
nerv.include('lstm.lua')
-nerv.include('lstm_gate.lua')
nerv.include('dropout.lua')
nerv.include('gru.lua')
nerv.include('rnn.lua')
nerv.include('duplicate.lua')
nerv.include('identity.lua')
-nerv.include('projection.lua')
nerv.include('lstmp.lua')
nerv.include('relu.lua')
diff --git a/nerv/layer/lstm.lua b/nerv/layer/lstm.lua
index 3de3453..5d73ad2 100644
--- a/nerv/layer/lstm.lua
+++ b/nerv/layer/lstm.lua
@@ -2,9 +2,12 @@ local LSTMLayer = nerv.class('nerv.LSTMLayer', 'nerv.GraphLayer')
function LSTMLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
- self:check_dim_len(1, 1)
+ self:check_dim_len(-1, 1)
+ if #self.dim_in == 0 then
+ nerv.error('LSTM layer %s has no input', self.id)
+ end
- local din = layer_conf.dim_in[1]
+ local din = layer_conf.dim_in
local dout = layer_conf.dim_out[1]
local pr = layer_conf.pr
@@ -17,48 +20,51 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
mainCombine = {dim_in = {dout, dout}, dim_out = {dout}, lambda = {1, 1}},
},
['nerv.DuplicateLayer'] = {
- inputDup = {dim_in = {din}, dim_out = {din, din, din, din}},
outputDup = {dim_in = {dout}, dim_out = {dout, dout, dout, dout, dout}},
cellDup = {dim_in = {dout}, dim_out = {dout, dout, dout, dout, dout}},
},
['nerv.AffineLayer'] = {
- mainAffine = {dim_in = {din, dout}, dim_out = {dout}, pr = pr},
+ mainAffine = {dim_in = table.connect({dout}, din), dim_out = {dout}, pr = pr},
+ forgetGate = {dim_in = table.connect({dout, dout}, din), dim_out = {dout},
+ param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer},
+ inputGate = {dim_in = table.connect({dout, dout}, din), dim_out = {dout},
+ param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer},
+ outputGate = {dim_in = table.connect({dout, dout}, din), dim_out = {dout},
+ param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer},
},
['nerv.TanhLayer'] = {
mainTanh = {dim_in = {dout}, dim_out = {dout}},
outputTanh = {dim_in = {dout}, dim_out = {dout}},
},
- ['nerv.LSTMGateLayer'] = {
- forgetGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr},
- inputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr},
- outputGate = {dim_in = {din, dout, dout}, dim_out = {dout}, param_type = {'N', 'N', 'D'}, pr = pr},
- },
['nerv.ElemMulLayer'] = {
inputGateMul = {dim_in = {dout, dout}, dim_out = {dout}},
forgetGateMul = {dim_in = {dout, dout}, dim_out = {dout}},
outputGateMul = {dim_in = {dout, dout}, dim_out = {dout}},
},
}
+ for i = 1, #din do
+ layers['nerv.DuplicateLayer']['inputDup' .. i] = {dim_in = {din[i]}, dim_out = {din[i], din[i], din[i], din[i]}}
+ end
local connections = {
-- lstm input
- {'<input>[1]', 'inputDup[1]', 0},
+ --{'<input>[1 .. n]', 'inputDup(1 .. n)[1]', 0},
-- input gate
- {'inputDup[1]', 'inputGate[1]', 0},
- {'outputDup[1]', 'inputGate[2]', 1},
- {'cellDup[1]', 'inputGate[3]', 1},
+ {'outputDup[1]', 'inputGate[1]', 1},
+ {'cellDup[1]', 'inputGate[2]', 1},
+ --{'inputDup(1 .. n)[1]', 'inputGate[3 .. n + 2]', 0},
-- forget gate
- {'inputDup[2]', 'forgetGate[1]', 0},
- {'outputDup[2]', 'forgetGate[2]', 1},
- {'cellDup[2]', 'forgetGate[3]', 1},
+ {'outputDup[2]', 'forgetGate[1]', 1},
+ {'cellDup[2]', 'forgetGate[2]', 1},
+ --{'inputDup(1 .. n)[2]', 'forgetGate[3 .. n + 2]', 0},
-- lstm cell
{'forgetGate[1]', 'forgetGateMul[1]', 0},
{'cellDup[3]', 'forgetGateMul[2]', 1},
- {'inputDup[3]', 'mainAffine[1]', 0},
- {'outputDup[3]', 'mainAffine[2]', 1},
+ {'outputDup[3]', 'mainAffine[1]', 1},
+ --{'inputDup(1 .. n)[3]', 'mainAffine[2 .. n + 1]', 0},
{'mainAffine[1]', 'mainTanh[1]', 0},
{'inputGate[1]', 'inputGateMul[1]', 0},
{'mainTanh[1]', 'inputGateMul[2]', 0},
@@ -67,9 +73,9 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
{'mainCombine[1]', 'cellDup[1]', 0},
-- forget gate
- {'inputDup[4]', 'outputGate[1]', 0},
- {'outputDup[4]', 'outputGate[2]', 1},
- {'cellDup[4]', 'outputGate[3]', 0},
+ {'outputDup[4]', 'outputGate[1]', 1},
+ {'cellDup[4]', 'outputGate[2]', 0},
+ --{'inputDup(1 .. n)[4]', 'outputGate[2 .. n + 1]', 0},
-- lstm output
{'cellDup[5]', 'outputTanh[1]', 0},
@@ -78,6 +84,13 @@ function LSTMLayer:__init(id, global_conf, layer_conf)
{'outputGateMul[1]', 'outputDup[1]', 0},
{'outputDup[5]', '<output>[1]', 0},
}
+ for i = 1, #din do
+ table.insert(connections, {'<input>[' .. i .. ']', 'inputDup' .. i .. '[1]', 0})
+ table.insert(connections, {'inputDup' .. i .. '[1]', 'inputGate[' .. (i + 2) .. ']', 0})
+ table.insert(connections, {'inputDup' .. i .. '[2]', 'forgetGate[' .. (i + 2) .. ']', 0})
+ table.insert(connections, {'inputDup' .. i .. '[3]', 'mainAffine[' .. (i + 1) .. ']', 0})
+ table.insert(connections, {'inputDup' .. i .. '[4]', 'outputGate[' .. (i + 2) .. ']', 0})
+ end
self:add_prefix(layers, connections)
local layer_repo = nerv.LayerRepo(layers, pr, global_conf)
diff --git a/nerv/layer/lstm_gate.lua b/nerv/layer/lstm_gate.lua
deleted file mode 100644
index 39a3ff7..0000000
--- a/nerv/layer/lstm_gate.lua
+++ /dev/null
@@ -1,97 +0,0 @@
-local LSTMGateLayer = nerv.class('nerv.LSTMGateLayer', 'nerv.Layer')
--- NOTE: this is a full matrix gate
-
-function LSTMGateLayer:__init(id, global_conf, layer_conf)
- nerv.Layer.__init(self, id, global_conf, layer_conf)
- self.param_type = layer_conf.param_type
- self:check_dim_len(-1, 1) --accept multiple inputs
- self:bind_params()
-end
-
-function LSTMGateLayer:bind_params()
- local lconf = self.lconf
- lconf.no_update_ltp1 = lconf.no_update_ltp1 or lconf.no_update_ltp
- for i = 1, #self.dim_in do
- local pid = "ltp" .. i
- local pid_list = i == 1 and {pid, "ltp"} or pid
- self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
- nerv.LinearTransParam,
- {self.dim_in[i], self.dim_out[1]})
- if self.param_type[i] == 'D' then
- self["ltp" .. i].trans:diagonalize()
- end
- local no_update = lconf["no_update_ltp" .. i]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self["ltp" .. i].no_update = true
- end
- end
- self.ltp = self.ltp1 -- alias of ltp1
- self.bp = self:find_param("bp", lconf, self.gconf,
- nerv.BiasParam, {1, self.dim_out[1]},
- nerv.Param.gen_zero)
- local no_update = lconf["no_update_bp"]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self.bp.no_update = true
- end
-end
-
-function LSTMGateLayer:init(batch_size)
- if self.dim_out[1] ~= self.bp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform and bias paramter")
- end
- for i = 1, #self.dim_in do
- if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self["ltp" .. i]:train_init()
- end
- self.bp:train_init()
- self.err_bakm = self.mat_type(batch_size, self.dim_out[1])
-end
-
-function LSTMGateLayer:batch_resize(batch_size)
- if self.err_m:nrow() ~= batch_size then
- self.err_bakm = self.mat_type(batch_size, self.dim_out[1])
- end
-end
-
-function LSTMGateLayer:propagate(input, output)
- -- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
- for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
- end
- -- add bias
- output[1]:add_row(self.bp.trans, 1.0)
- output[1]:sigmoid(output[1])
-end
-
-function LSTMGateLayer:back_propagate(bp_err, next_bp_err, input, output)
- self.err_bakm:sigmoid_grad(bp_err[1], output[1])
- for i = 1, #self.dim_in do
- next_bp_err[i]:mul(self.err_bakm, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(self.err_bakm, input[i])
- end
- self.bp:back_propagate_by_gradient(self.err_bakm:colsum())
-end
-
-function LSTMGateLayer:update()
- for i = 1, #self.dim_in do
- self["ltp" .. i]:update_by_err_input()
- if self.param_type[i] == 'D' then
- self["ltp" .. i].trans:diagonalize()
- end
- end
- self.bp:update_by_gradient()
-end
-
-function LSTMGateLayer:get_params()
- local pr = nerv.ParamRepo({self.bp}, self.loc_type)
- for i = 1, #self.dim_in do
- pr:add(self["ltp" .. i])
- end
- return pr
-end
diff --git a/nerv/layer/lstmp.lua b/nerv/layer/lstmp.lua
index bbb2091..49c9516 100644
--- a/nerv/layer/lstmp.lua
+++ b/nerv/layer/lstmp.lua
@@ -2,9 +2,12 @@ local LSTMPLayer = nerv.class('nerv.LSTMPLayer', 'nerv.GraphLayer')
function LSTMPLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
- self:check_dim_len(1, 1)
+ self:check_dim_len(-1, 1)
+ if #self.dim_in == 0 then
+ nerv.error('LSTMP layer %s has no input', self.id)
+ end
- local din = layer_conf.dim_in[1]
+ local din = layer_conf.dim_in
local dcell = layer_conf.cell_dim
local dout = layer_conf.dim_out[1]
@@ -18,51 +21,52 @@ function LSTMPLayer:__init(id, global_conf, layer_conf)
mainCombine = {dim_in = {dcell, dcell}, dim_out = {dcell}, lambda = {1, 1}},
},
['nerv.DuplicateLayer'] = {
- inputDup = {dim_in = {din}, dim_out = {din, din, din, din}},
outputDup = {dim_in = {dout}, dim_out = {dout, dout, dout, dout, dout}},
cellDup = {dim_in = {dcell}, dim_out = {dcell, dcell, dcell, dcell, dcell}},
},
['nerv.AffineLayer'] = {
- mainAffine = {dim_in = {din, dout}, dim_out = {dcell}, pr = pr},
+ mainAffine = {dim_in = table.connect({dout}, din), dim_out = {dcell}, pr = pr},
+ forgetGate = {dim_in = table.connect({dout, dcell}, din), dim_out = {dcell},
+ param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer},
+ inputGate = {dim_in = table.connect({dout, dcell}, din), dim_out = {dcell},
+ param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer},
+ outputGate = {dim_in = table.connect({dout, dcell}, din), dim_out = {dcell},
+ param_type = table.connect({'N', 'D'}, table.vector(#din, 'N')), pr = pr, activation = nerv.SigmoidLayer},
+ projection = {dim_in = {dcell}, dim_out = {dout}, pr = pr, no_bias = true},
},
['nerv.TanhLayer'] = {
mainTanh = {dim_in = {dcell}, dim_out = {dcell}},
outputTanh = {dim_in = {dcell}, dim_out = {dcell}},
},
- ['nerv.LSTMGateLayer'] = {
- forgetGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr},
- inputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr},
- outputGate = {dim_in = {din, dout, dcell}, dim_out = {dcell}, param_type = {'N', 'N', 'D'}, pr = pr},
- },
['nerv.ElemMulLayer'] = {
inputGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}},
forgetGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}},
outputGateMul = {dim_in = {dcell, dcell}, dim_out = {dcell}},
},
- ['nerv.ProjectionLayer'] = {
- projection = {dim_in = {dcell}, dim_out = {dout}, pr = pr},
- },
}
-
+ for i = 1, #din do
+ layers['nerv.DuplicateLayer']['inputDup' .. i] = {dim_in = {din[i]}, dim_out = {din[i], din[i], din[i], din[i]}}
+ end
+
local connections = {
-- lstm input
- {'<input>[1]', 'inputDup[1]', 0},
+ --{'<input>[1 .. n]', 'inputDup(1 .. n)[1]', 0},
-- input gate
- {'inputDup[1]', 'inputGate[1]', 0},
- {'outputDup[1]', 'inputGate[2]', 1},
- {'cellDup[1]', 'inputGate[3]', 1},
+ {'outputDup[1]', 'inputGate[1]', 1},
+ {'cellDup[1]', 'inputGate[2]', 1},
+ --{'inputDup(1 .. n)[1]', 'inputGate[3 .. n + 2]', 0},
-- forget gate
- {'inputDup[2]', 'forgetGate[1]', 0},
- {'outputDup[2]', 'forgetGate[2]', 1},
- {'cellDup[2]', 'forgetGate[3]', 1},
+ {'outputDup[2]', 'forgetGate[1]', 1},
+ {'cellDup[2]', 'forgetGate[2]', 1},
+ --{'inputDup(1 .. n)[2]', 'forgetGate[3 .. n + 2]', 0},
-- lstm cell
{'forgetGate[1]', 'forgetGateMul[1]', 0},
{'cellDup[3]', 'forgetGateMul[2]', 1},
- {'inputDup[3]', 'mainAffine[1]', 0},
- {'outputDup[3]', 'mainAffine[2]', 1},
+ {'outputDup[3]', 'mainAffine[1]', 1},
+ --{'inputDup(1 .. n)[3]', 'mainAffine[2 .. n + 1]', 0},
{'mainAffine[1]', 'mainTanh[1]', 0},
{'inputGate[1]', 'inputGateMul[1]', 0},
{'mainTanh[1]', 'inputGateMul[2]', 0},
@@ -71,9 +75,9 @@ function LSTMPLayer:__init(id, global_conf, layer_conf)
{'mainCombine[1]', 'cellDup[1]', 0},
-- forget gate
- {'inputDup[4]', 'outputGate[1]', 0},
- {'outputDup[4]', 'outputGate[2]', 1},
- {'cellDup[4]', 'outputGate[3]', 0},
+ {'outputDup[4]', 'outputGate[1]', 1},
+ {'cellDup[4]', 'outputGate[2]', 0},
+ --{'inputDup(1 .. n)[4]', 'outputGate[2 .. n + 1]', 0},
-- lstm output
{'cellDup[5]', 'outputTanh[1]', 0},
@@ -83,6 +87,13 @@ function LSTMPLayer:__init(id, global_conf, layer_conf)
{'projection[1]', 'outputDup[1]', 0},
{'outputDup[5]', '<output>[1]', 0},
}
+ for i = 1, #din do
+ table.insert(connections, {'<input>[' .. i .. ']', 'inputDup' .. i .. '[1]', 0})
+ table.insert(connections, {'inputDup' .. i .. '[1]', 'inputGate[' .. (i + 2) .. ']', 0})
+ table.insert(connections, {'inputDup' .. i .. '[2]', 'forgetGate[' .. (i + 2) .. ']', 0})
+ table.insert(connections, {'inputDup' .. i .. '[3]', 'mainAffine[' .. (i + 1) .. ']', 0})
+ table.insert(connections, {'inputDup' .. i .. '[4]', 'outputGate[' .. (i + 2) .. ']', 0})
+ end
self:add_prefix(layers, connections)
local layer_repo = nerv.LayerRepo(layers, pr, global_conf)
diff --git a/nerv/layer/projection.lua b/nerv/layer/projection.lua
deleted file mode 100644
index 077125b..0000000
--- a/nerv/layer/projection.lua
+++ /dev/null
@@ -1,70 +0,0 @@
-local ProjectionLayer = nerv.class('nerv.ProjectionLayer', 'nerv.Layer')
-
---- The constructor.
-function ProjectionLayer:__init(id, global_conf, layer_conf)
- nerv.Layer.__init(self, id, global_conf, layer_conf)
- self:check_dim_len(-1, 1) -- exactly one output, allow multiple inputs
- self:bind_params()
-end
-
-function ProjectionLayer:bind_params()
- local lconf = self.lconf
- lconf.no_update_ltp1 = lconf.no_update_ltp1 or lconf.no_update_ltp
- for i = 1, #self.dim_in do
- local pid = "ltp" .. i
- local pid_list = i == 1 and {pid, "ltp"} or pid
- self["ltp" .. i] = self:find_param(pid_list, lconf, self.gconf,
- nerv.LinearTransParam,
- {self.dim_in[i], self.dim_out[1]})
- local no_update = lconf["no_update_ltp" .. i]
- if (no_update ~= nil) and no_update or lconf.no_update_all then
- self["ltp" .. i].no_update = true
- end
- end
- self.ltp = self.ltp1 -- alias of ltp1
-end
-
-function ProjectionLayer:init(batch_size)
- for i = 1, #self.dim_in do
- if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- if self.dim_out[1] ~= self["ltp" .. i].trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self["ltp" .. i]:train_init()
- end
-end
-
-function ProjectionLayer:batch_resize(batch_size)
- -- do nothing
-end
-
-function ProjectionLayer:update()
- for i = 1, #self.dim_in do
- self["ltp" .. i]:update_by_err_input()
- end
-end
-
-function ProjectionLayer:propagate(input, output)
- -- apply linear transform
- output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
- for i = 2, #self.dim_in do
- output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
- end
-end
-
-function ProjectionLayer:back_propagate(bp_err, next_bp_err, input, output)
- for i = 1, #self.dim_in do
- next_bp_err[i]:mul(bp_err[1], self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
- self["ltp" .. i]:back_propagate_by_err_input(bp_err[1], input[i])
- end
-end
-
-function ProjectionLayer:get_params()
- local pr = nerv.ParamRepo({self.ltp1}, self.loc_type)
- for i = 2, #self.dim_in do
- pr:add(self["ltp" .. i])
- end
- return pr
-end
diff --git a/nerv/layer/rnn.lua b/nerv/layer/rnn.lua
index fd6e753..63e0b55 100644
--- a/nerv/layer/rnn.lua
+++ b/nerv/layer/rnn.lua
@@ -4,12 +4,7 @@ function RNNLayer:__init(id, global_conf, layer_conf)
nerv.Layer.__init(self, id, global_conf, layer_conf)
self:check_dim_len(-1, 1)
if #self.dim_in == 0 then
- nerv.error('RNN Layer %s has no input', self.id)
- end
-
- self.activation = layer_conf.activation
- if self.activation == nil then
- self.activation = 'nerv.SigmoidLayer'
+ nerv.error('RNN layer %s has no input', self.id)
end
local din = layer_conf.dim_in
@@ -22,10 +17,7 @@ function RNNLayer:__init(id, global_conf, layer_conf)
local layers = {
['nerv.AffineLayer'] = {
- main = {dim_in = table.connect({dout}, din), dim_out = {dout}, pr = pr},
- },
- [self.activation] = {
- activation = {dim_in = {dout}, dim_out = {dout}},
+ main = {dim_in = table.connect({dout}, din), dim_out = {dout}, pr = pr, activation = layer_conf.activation},
},
['nerv.DuplicateLayer'] = {
duplicate = {dim_in = {dout}, dim_out = {dout, dout}},
@@ -33,8 +25,7 @@ function RNNLayer:__init(id, global_conf, layer_conf)
}
local connections = {
- {'main[1]', 'activation[1]', 0},
- {'activation[1]', 'duplicate[1]', 0},
+ {'main[1]', 'duplicate[1]', 0},
{'duplicate[1]', 'main[1]', 1},
{'duplicate[2]', '<output>[1]', 0},
}
diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua
index bf69ccc..358b100 100644
--- a/nerv/nn/network.lua
+++ b/nerv/nn/network.lua
@@ -33,8 +33,6 @@ local network = nerv.class('nerv.Network')
--
-- * `network`: a `nerv.Layer` instance describing the structure of the network
-- to be compiled
--- * `clip`: a `number` value indicating the cliping threshold (i.e. preserve
--- the values within [-clip, +clip])
-- * `nn_act_default`: a `number` value indicating the value used for filling
-- "holes" in activation values of a batch matrix (0 by default)
@@ -49,7 +47,6 @@ function network:__init(id, global_conf, network_conf)
else
self.mat_type = self.gconf.cumat_type
end
- self.clip = network_conf.clip
self.nn_act_default = network_conf.nn_act_default
if self.nn_act_default == nil then
self.nn_act_default = 0
@@ -416,7 +413,6 @@ function network:make_initial_store()
local dim_in, dim_out = self.layers[i]:get_dim()
for j = 1, #dim_in do
if self.input[t][i][j] == nil then
- print(t,i,j,self.layers[i].id)
nerv.error('input reference dangling')
end
if self.err_output[t][i][j] == nil then
@@ -661,13 +657,6 @@ function network:back_propagate()
local t, id = self.queue[i].chunk, self.queue[i].id
if t <= self.max_length then
self.layers[id]:back_propagate(self.err_input[t][id], self.err_output[t][id], self.input[t][id], self.output[t][id], t)
- -- gradient clip
- if self.clip ~= nil then
- local dim_in, _ = self.layers[id]:get_dim()
- for j = 1, #dim_in do
- self.err_output[t][id][j]:clip(-self.clip, self.clip)
- end
- end
end
-- flush border gradient
if self.flush[t][id].timestamp == self.timestamp then
diff --git a/nerv/nn/trainer.lua b/nerv/nn/trainer.lua
index 44390ea..a17b36c 100644
--- a/nerv/nn/trainer.lua
+++ b/nerv/nn/trainer.lua
@@ -25,7 +25,7 @@ function trainer:__init(gconf)
self.input_order = self:get_input_order()
self.network = nerv.Network('network', gconf,
{network = graph,
- clip = gconf.clip})
+ nn_act_default = gconf.nn_act_default})
local network = self.network
network:init(gconf.batch_size, gconf.chunk_size)
@@ -77,9 +77,12 @@ function trainer:make_buffer(readers)
})
else
return nerv.SeqBuffer(gconf, {
+ buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
chunk_size = gconf.chunk_size,
+ randomize = gconf.randomize,
readers = readers,
+ nn_act_default = gconf.nn_act_default,
})
end
end