aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <[email protected]>2016-03-16 17:56:40 +0800
committerDeterminant <[email protected]>2016-03-16 17:56:40 +0800
commit00211587988024e34370e7ec2ee1cb1c8632f116 (patch)
treed4fc3a4fc20f2d5908624b3f6587ecd57966d719
parenta5a4d2735b595fc9fadc9c7e91198786d3c0e078 (diff)
parent289ac7f4b6e88b935da5c891e1efcf91fc047403 (diff)
Merge branch 'liuq901-master' into seq-buffer
-rw-r--r--nerv/Makefile2
-rw-r--r--nerv/examples/asr_trainer.lua71
-rw-r--r--nerv/examples/network_debug/config.lua10
-rw-r--r--nerv/examples/network_debug/main.lua21
-rw-r--r--nerv/examples/network_debug/network.lua120
-rw-r--r--nerv/examples/network_debug/reader.lua76
-rw-r--r--nerv/examples/swb_baseline.lua1
-rw-r--r--nerv/examples/swb_baseline2.lua8
-rw-r--r--nerv/examples/timit_baseline2.lua6
-rw-r--r--nerv/init.lua8
-rw-r--r--nerv/io/frm_buffer.lua (renamed from nerv/io/sgd_buffer.lua)14
-rw-r--r--nerv/io/init.lua2
-rw-r--r--nerv/io/seq_buffer.lua108
-rw-r--r--nerv/layer/duplicate.lua5
-rw-r--r--nerv/layer/rnn.lua2
-rw-r--r--nerv/layer/softmax_ce.lua4
-rw-r--r--nerv/lib/matrix/cumatrix.c6
-rw-r--r--nerv/lib/matrix/generic/cukernel.cu22
-rw-r--r--nerv/lib/matrix/generic/cumatrix.c14
-rw-r--r--nerv/lib/matrix/generic/cumatrix.h2
-rw-r--r--nerv/lib/matrix/generic/mmatrix.c23
-rw-r--r--nerv/lib/matrix/generic/mmatrix.h2
-rw-r--r--nerv/lib/matrix/mmatrix.c6
-rw-r--r--nerv/matrix/generic/cumatrix.c1
-rw-r--r--nerv/matrix/generic/matrix.c12
-rw-r--r--nerv/matrix/generic/mmatrix.c1
-rw-r--r--nerv/nn/network.lua293
27 files changed, 557 insertions, 283 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index 68465a1..a5e4f66 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -44,7 +44,7 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua \
- io/sgd_buffer.lua io/seq_buffer.lua
+ io/frm_buffer.lua io/seq_buffer.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
CUDA_INCLUDE := -I $(CUDA_BASE)/include/
diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua
index 6bdf57c..645f1ef 100644
--- a/nerv/examples/asr_trainer.lua
+++ b/nerv/examples/asr_trainer.lua
@@ -22,9 +22,9 @@ local function build_trainer(ifname)
local input_order = get_input_order()
network = nerv.Network("nt", gconf, {network = network})
- network:init(gconf.batch_size, 1)
+ network:init(gconf.batch_size, gconf.chunk_size)
global_transf = nerv.Network("gt", gconf, {network = global_transf})
- global_transf:init(gconf.batch_size, 1)
+ global_transf:init(gconf.batch_size, gconf.chunk_size)
local iterative_trainer = function (prefix, scp_file, bp, rebind_param_repo)
-- rebind the params if necessary
@@ -39,11 +39,17 @@ local function build_trainer(ifname)
local buffer = make_buffer(make_readers(scp_file, layer_repo))
-- initialize the network
gconf.cnt = 0
- err_input = {mat_type(gconf.batch_size, 1)}
- err_input[1]:fill(1)
+ local err_input = {{}}
+ local output = {{}}
+ for i = 1, gconf.chunk_size do
+ local mini_batch = mat_type(gconf.batch_size, 1)
+ mini_batch:fill(1)
+ table.insert(err_input[1], mini_batch)
+ table.insert(output[1], mat_type(gconf.batch_size, 1))
+ end
network:epoch_init()
global_transf:epoch_init()
- for data in buffer.get_data, buffer do
+ for d in buffer.get_data, buffer do
-- prine stat periodically
gconf.cnt = gconf.cnt + 1
if gconf.cnt == 1000 then
@@ -54,35 +60,39 @@ local function build_trainer(ifname)
-- break
end
local input = {}
+ local err_output = {}
-- if gconf.cnt == 1000 then break end
for i, e in ipairs(input_order) do
local id = e.id
- if data[id] == nil then
+ if d.data[id] == nil then
nerv.error("input data %s not found", id)
end
- local transformed
+ local transformed = {}
+ local err_output_i = {}
if e.global_transf then
- transformed = nerv.speech_utils.global_transf(data[id],
- global_transf,
- gconf.frm_ext or 0, 0,
- gconf)
+ for _, mini_batch in ipairs(d.data[id]) do
+ table.insert(transformed,
+ nerv.speech_utils.global_transf(mini_batch,
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf))
+ end
else
- transformed = data[id]
+ transformed = d.data[id]
+ end
+ for _, mini_batch in ipairs(transformed) do
+ table.insert(err_output_i, mini_batch:create())
end
+ table.insert(err_output, err_output_i)
table.insert(input, transformed)
end
- local output = {mat_type(gconf.batch_size, 1)}
- err_output = {}
- for i = 1, #input do
- table.insert(err_output, input[i]:create())
- end
- network:mini_batch_init({seq_length = table.vector(gconf.batch_size, 1),
- new_seq = {},
+ network:mini_batch_init({seq_length = d.seq_length,
+ new_seq = d.new_seq,
do_train = bp,
- input = {input},
- output = {output},
- err_input = {err_input},
- err_output = {err_output}})
+ input = input,
+ output = output,
+ err_input = err_input,
+ err_output = err_output})
network:propagate()
if bp then
network:back_propagate()
@@ -111,19 +121,21 @@ end
local function check_and_add_defaults(spec, opts)
local function get_opt_val(k)
- return opts[string.gsub(k, '_', '-')].val
+ local k = string.gsub(k, '_', '-')
+ return opts[k].val, opts[k].specified
end
local opt_v = get_opt_val("resume_from")
if opt_v then
+ nerv.info("resuming from previous training state")
gconf = dofile(opt_v)
else
for k, v in pairs(spec) do
- local opt_v = get_opt_val(k)
- if opt_v ~= nil then
+ local opt_v, specified = get_opt_val(k)
+ if (not specified) and gconf[k] ~= nil then
+ nerv.info("using setting in network config file: %s = %s", k, gconf[k])
+ elseif opt_v ~= nil then
+ nerv.info("using setting in options: %s = %s", k, opt_v)
gconf[k] = opt_v
- elseif gconf[k] ~= nil then
- elseif v ~= nil then
- gconf[k] = v
end
end
end
@@ -168,6 +180,7 @@ end
local trainer_defaults = {
lrate = 0.8,
batch_size = 256,
+ chunk_size = 1,
buffer_size = 81920,
wcost = 1e-6,
momentum = 0.9,
diff --git a/nerv/examples/network_debug/config.lua b/nerv/examples/network_debug/config.lua
index e20d5a9..0429e9a 100644
--- a/nerv/examples/network_debug/config.lua
+++ b/nerv/examples/network_debug/config.lua
@@ -35,6 +35,10 @@ function get_layers(global_conf)
['nerv.SoftmaxCELayer'] = {
softmax = {dim_in = {global_conf.vocab_size, global_conf.vocab_size}, dim_out = {1}, compressed = true},
},
+ ['nerv.DuplicateLayer'] = {
+ dup1 = {dim_in = {1}, dim_out = {1}},
+ dup2 = {dim_in = {1}, dim_out = {1}},
+ },
}
for i = 1, global_conf.layer_num do
layers['nerv.LSTMLayer']['lstm' .. i] = {dim_in = {global_conf.hidden_size}, dim_out = {global_conf.hidden_size}, pr = pr}
@@ -45,12 +49,14 @@ end
function get_connections(global_conf)
local connections = {
- {'<input>[1]', 'select[1]', 0},
+ {'<input>[1]', 'dup1[1]', 0},
+ {'dup1[1]', 'select[1]', 0},
{'select[1]', 'lstm1[1]', 0},
{'dropout' .. global_conf.layer_num .. '[1]', 'output[1]', 0},
{'output[1]', 'softmax[1]', 0},
{'<input>[2]', 'softmax[2]', 0},
- {'softmax[1]', '<output>[1]', 0},
+ {'softmax[1]', 'dup2[1]', 0},
+ {'dup2[1]', '<output>[1]', 0},
}
for i = 1, global_conf.layer_num do
table.insert(connections, {'lstm' .. i .. '[1]', 'dropout' .. i .. '[1]', 0})
diff --git a/nerv/examples/network_debug/main.lua b/nerv/examples/network_debug/main.lua
index 790c404..bbcdb6c 100644
--- a/nerv/examples/network_debug/main.lua
+++ b/nerv/examples/network_debug/main.lua
@@ -6,35 +6,26 @@ nerv.include(arg[1])
local global_conf = get_global_conf()
local timer = global_conf.timer
-timer:tic('IO')
-
local data_path = 'examples/lmptb/PTBdata/'
-local train_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.train.txt.adds')
-local val_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.valid.txt.adds')
-
-local train_data = train_reader:get_all_batch(global_conf)
-local val_data = val_reader:get_all_batch(global_conf)
local layers = get_layers(global_conf)
local connections = get_connections(global_conf)
-local NN = nerv.NN(global_conf, train_data, val_data, layers, connections)
-
-timer:toc('IO')
-timer:check('IO')
-io.flush()
+local NN = nerv.NN(global_conf, layers, connections)
timer:tic('global')
local best_cv = 1e10
for i = 1, global_conf.max_iter do
timer:tic('Epoch' .. i)
- local train_ppl, val_ppl = NN:epoch()
+ local train_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.train.txt.adds')
+ local val_reader = nerv.Reader(data_path .. 'vocab', data_path .. 'ptb.valid.txt.adds')
+ local train_ppl, val_ppl = NN:epoch(train_reader, val_reader)
+ nerv.printf('Epoch %d: %f %f %f\n', i, global_conf.lrate, train_ppl, val_ppl)
if val_ppl < best_cv then
best_cv = val_ppl
else
global_conf.lrate = global_conf.lrate / 2.0
end
- nerv.printf('Epoch %d: %f %f %f\n', i, global_conf.lrate, train_ppl, val_ppl)
timer:toc('Epoch' .. i)
timer:check('Epoch' .. i)
io.flush()
@@ -43,3 +34,5 @@ timer:toc('global')
timer:check('global')
timer:check('network')
timer:check('gc')
+timer:check('IO')
+global_conf.cumat_type.print_profile()
diff --git a/nerv/examples/network_debug/network.lua b/nerv/examples/network_debug/network.lua
index 5518e27..386c3b0 100644
--- a/nerv/examples/network_debug/network.lua
+++ b/nerv/examples/network_debug/network.lua
@@ -2,11 +2,17 @@ nerv.include('select_linear.lua')
local nn = nerv.class('nerv.NN')
-function nn:__init(global_conf, train_data, val_data, layers, connections)
+function nn:__init(global_conf, layers, connections)
self.gconf = global_conf
self.network = self:get_network(layers, connections)
- self.train_data = self:get_data(train_data)
- self.val_data = self:get_data(val_data)
+
+ self.output = {}
+ self.err_output = {}
+ for i = 1, self.gconf.chunk_size do
+ self.output[i] = {self.gconf.cumat_type(self.gconf.batch_size, 1)}
+ self.err_output[i] = {self.gconf.cumat_type(self.gconf.batch_size, 1)}
+ self.err_output[i][2] = self.gconf.cumat_type(self.gconf.batch_size, 1)
+ end
end
function nn:get_network(layers, connections)
@@ -20,79 +26,67 @@ function nn:get_network(layers, connections)
return network
end
-function nn:get_data(data)
- local err_output = {}
- local softmax_output = {}
- local output = {}
- for i = 1, self.gconf.chunk_size do
- err_output[i] = self.gconf.cumat_type(self.gconf.batch_size, 1)
- softmax_output[i] = self.gconf.cumat_type(self.gconf.batch_size, self.gconf.vocab_size)
- output[i] = self.gconf.cumat_type(self.gconf.batch_size, 1)
- end
- local ret = {}
- for i = 1, #data do
- ret[i] = {}
- ret[i].input = {}
- ret[i].output = {}
- ret[i].err_input = {}
- ret[i].err_output = {}
- for t = 1, self.gconf.chunk_size do
- ret[i].input[t] = {}
- ret[i].output[t] = {}
- ret[i].err_input[t] = {}
- ret[i].err_output[t] = {}
- ret[i].input[t][1] = data[i].input[t]
- ret[i].input[t][2] = data[i].output[t]
- ret[i].output[t][1] = output[t]
- local err_input = self.gconf.mmat_type(self.gconf.batch_size, 1)
- for j = 1, self.gconf.batch_size do
- if t <= data[i].seq_len[j] then
- err_input[j - 1][0] = 1
- else
- err_input[j - 1][0] = 0
+function nn:process(data, do_train, reader)
+ local timer = self.gconf.timer
+ local buffer = nerv.SeqBuffer(self.gconf, {
+ batch_size = self.gconf.batch_size, chunk_size = self.gconf.chunk_size,
+ readers = {reader},
+ })
+ local total_err = 0
+ local total_frame = 0
+ self.network:epoch_init()
+ while true do
+ timer:tic('IO')
+ data = buffer:get_data()
+ if data == nil then
+ break
+ end
+ local err_input = {}
+ if do_train then
+ for t = 1, self.gconf.chunk_size do
+ local tmp = self.gconf.mmat_type(self.gconf.batch_size, 1)
+ for i = 1, self.gconf.batch_size do
+ if t <= data.seq_length[i] then
+ tmp[i - 1][0] = 1
+ else
+ tmp[i - 1][0] = 0
+ end
end
+ err_input[t] = {self.gconf.cumat_type.new_from_host(tmp)}
end
- ret[i].err_input[t][1] = self.gconf.cumat_type.new_from_host(err_input)
- ret[i].err_output[t][1] = err_output[t]
- ret[i].err_output[t][2] = softmax_output[t]
end
- ret[i].seq_length = data[i].seq_len
- ret[i].new_seq = {}
- for j = 1, self.gconf.batch_size do
- if data[i].seq_start[j] then
- table.insert(ret[i].new_seq, j)
- end
+ local info = {input = {}, output = self.output, err_input = err_input, do_train = do_train,
+ err_output = self.err_output, seq_length = data.seq_length, new_seq = data.new_seq}
+ for t = 1, self.gconf.chunk_size do
+ info.input[t] = {data.data['input'][t]}
+ info.input[t][2] = data.data['label'][t]
end
- end
- return ret
-end
+ timer:toc('IO')
-function nn:process(data, do_train)
- local timer = self.gconf.timer
- local total_err = 0
- local total_frame = 0
- self.network:epoch_init()
- for id = 1, #data do
- data[id].do_train = do_train
timer:tic('network')
- self.network:mini_batch_init(data[id])
+ self.network:mini_batch_init(info)
self.network:propagate()
timer:toc('network')
+
+ timer:tic('IO')
for t = 1, self.gconf.chunk_size do
- local tmp = data[id].output[t][1]:new_to_host()
+ local tmp = info.output[t][1]:new_to_host()
for i = 1, self.gconf.batch_size do
- if t <= data[id].seq_length[i] then
- total_err = total_err + math.log10(math.exp(tmp[i - 1][0]))
- total_frame = total_frame + 1
- end
+ total_err = total_err + math.log10(math.exp(tmp[i - 1][0]))
end
end
+ for i = 1, self.gconf.batch_size do
+ total_frame = total_frame + info.seq_length[i]
+ end
+ timer:toc('IO')
+
+ timer:tic('network')
if do_train then
- timer:tic('network')
self.network:back_propagate()
self.network:update()
- timer:toc('network')
end
+ timer:toc('network')
+
timer:tic('gc')
collectgarbage('collect')
timer:toc('gc')
@@ -100,11 +94,11 @@ function nn:process(data, do_train)
return math.pow(10, - total_err / total_frame)
end
-function nn:epoch()
- local train_error = self:process(self.train_data, true)
+function nn:epoch(train_reader, val_reader)
+ local train_error = self:process(self.train_data, true, train_reader)
local tmp = self.gconf.dropout_rate
self.gconf.dropout_rate = 0
- local val_error = self:process(self.val_data, false)
+ local val_error = self:process(self.val_data, false, val_reader)
self.gconf.dropout_rate = tmp
return train_error, val_error
end
diff --git a/nerv/examples/network_debug/reader.lua b/nerv/examples/network_debug/reader.lua
index b10baaf..76a78cf 100644
--- a/nerv/examples/network_debug/reader.lua
+++ b/nerv/examples/network_debug/reader.lua
@@ -3,6 +3,7 @@ local Reader = nerv.class('nerv.Reader')
function Reader:__init(vocab_file, input_file)
self:get_vocab(vocab_file)
self:get_seq(input_file)
+ self.offset = 1
end
function Reader:get_vocab(vocab_file)
@@ -32,6 +33,7 @@ function Reader:get_seq(input_file)
local f = io.open(input_file, 'r')
self.seq = {}
while true do
+ -- for i = 1, 26 do
local seq = f:read()
if seq == nil then
break
@@ -47,67 +49,19 @@ function Reader:get_seq(input_file)
end
end
-function Reader:get_in_out(id, pos)
- return self.seq[id][pos], self.seq[id][pos + 1], pos + 1 == #self.seq[id]
-end
-
-function Reader:get_all_batch(global_conf)
- local data = {}
- local pos = {}
- local offset = 1
- for i = 1, global_conf.batch_size do
- pos[i] = nil
+function Reader:get_data()
+ if self.offset > #self.seq then
+ return nil
end
- while true do
- -- for i = 1, 26 do
- local input = {}
- local output = {}
- for i = 1, global_conf.chunk_size do
- input[i] = global_conf.mmat_type(global_conf.batch_size, 1)
- input[i]:fill(global_conf.nn_act_default)
- output[i] = global_conf.mmat_type(global_conf.batch_size, 1)
- output[i]:fill(global_conf.nn_act_default)
- end
- local seq_start = {}
- local seq_end = {}
- local seq_len = {}
- for i = 1, global_conf.batch_size do
- seq_start[i] = false
- seq_end[i] = false
- seq_len[i] = 0
- end
- local has_new = false
- for i = 1, global_conf.batch_size do
- if pos[i] == nil then
- if offset < #self.seq then
- seq_start[i] = true
- pos[i] = {offset, 1}
- offset = offset + 1
- end
- end
- if pos[i] ~= nil then
- has_new = true
- for j = 1, global_conf.chunk_size do
- local final
- input[j][i-1][0], output[j][i-1][0], final = self:get_in_out(pos[i][1], pos[i][2])
- seq_len[i] = j
- if final then
- seq_end[i] = true
- pos[i] = nil
- break
- end
- pos[i][2] = pos[i][2] + 1
- end
- end
- end
- if not has_new then
- break
- end
- for i = 1, global_conf.chunk_size do
- input[i] = global_conf.cumat_type.new_from_host(input[i])
- output[i] = global_conf.cumat_type.new_from_host(output[i])
- end
- table.insert(data, {input = input, output = output, seq_start = seq_start, seq_end = seq_end, seq_len = seq_len})
+ local tmp = self.seq[self.offset]
+ local res = {
+ input = nerv.MMatrixFloat(#tmp - 1, 1),
+ label = nerv.MMatrixFloat(#tmp - 1, 1),
+ }
+ for i = 1, #tmp - 1 do
+ res.input[i - 1][0] = tmp[i]
+ res.label[i - 1][0] = tmp[i + 1]
end
- return data
+ self.offset = self.offset + 1
+ return res
end
diff --git a/nerv/examples/swb_baseline.lua b/nerv/examples/swb_baseline.lua
index 0ce8468..ece4d44 100644
--- a/nerv/examples/swb_baseline.lua
+++ b/nerv/examples/swb_baseline.lua
@@ -171,6 +171,7 @@ function make_buffer(readers)
{
buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
+ chunk_size = gconf.chunk_size,
randomize = gconf.randomize,
readers = readers,
use_gpu = true
diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua
index 6796f6f..38cfb9a 100644
--- a/nerv/examples/swb_baseline2.lua
+++ b/nerv/examples/swb_baseline2.lua
@@ -6,7 +6,8 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp",
htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf",
initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv",
- "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"}}
+ "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"},
+ chunk_size = 1}
function make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
@@ -145,7 +146,7 @@ end
function make_readers(scp_file, layer_repo)
return {
- {reader = nerv.TNetReader(gconf,
+ {reader = nerv.HTKReader(gconf,
{
id = "main_scp",
scp_file = scp_file,
@@ -166,10 +167,11 @@ function make_readers(scp_file, layer_repo)
end
function make_buffer(readers)
- return nerv.SGDBuffer(gconf,
+ return nerv.FrmBuffer(gconf,
{
buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
+ chunk_size = gconf.chunk_size,
randomize = gconf.randomize,
readers = readers,
use_gpu = true
diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua
index b1c1e66..658aa2e 100644
--- a/nerv/examples/timit_baseline2.lua
+++ b/nerv/examples/timit_baseline2.lua
@@ -8,7 +8,8 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv",
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"},
-- params in nnet_trans.nerv are included in the trained model
- decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"}}
+ decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"},
+ chunk_size = 1}
function make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
@@ -176,10 +177,11 @@ function make_decode_readers(scp_file, layer_repo)
end
function make_buffer(readers)
- return nerv.SGDBuffer(gconf,
+ return nerv.FrmBuffer(gconf,
{
buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
+ chunk_size = gconf.chunk_size,
randomize = gconf.randomize,
readers = readers,
use_gpu = true
diff --git a/nerv/init.lua b/nerv/init.lua
index ff944b8..439a83e 100644
--- a/nerv/init.lua
+++ b/nerv/init.lua
@@ -109,7 +109,7 @@ function table.val_to_str(v)
(("number" == type(v) or
"string" == type(v) or
"boolean" == type(v)) and tostring(v)) or
- nil -- failed to serialize
+ "" -- failed to serialize
end
end
@@ -226,7 +226,8 @@ function nerv.parse_args(argv, options, unordered)
local opt_type = v[3]
local opt_meta = {type = opt_type,
desc = v.desc or "",
- val = v.default}
+ val = v.default,
+ specified = false}
if opt_short ~= nil then
if type(opt_short) ~= "string" or #opt_short ~= 1 then err() end
if opt_type ~= "boolean" then
@@ -246,6 +247,7 @@ function nerv.parse_args(argv, options, unordered)
for c in k:gmatch"." do
if sopts[c] then
sopts[c].val = true
+ sopts[c].specified = true
else
nerv.error("invalid option -%s", c)
end
@@ -262,6 +264,7 @@ function nerv.parse_args(argv, options, unordered)
k, opts[k].type)
else
opts[k].val = true
+ opts[k].specified = true
end
else
local k, v = token:match(opt_with_val_exp)
@@ -269,6 +272,7 @@ function nerv.parse_args(argv, options, unordered)
if opts[k] == nil then
nerv.error("invalid option %s", token)
end
+ opts[k].specified = true
if opts[k].type == "boolean" then
if v == "yes" then
opts[k].val = true
diff --git a/nerv/io/sgd_buffer.lua b/nerv/io/frm_buffer.lua
index d78f6d1..9761f16 100644
--- a/nerv/io/sgd_buffer.lua
+++ b/nerv/io/frm_buffer.lua
@@ -1,6 +1,6 @@
-local SGDBuffer = nerv.class("nerv.SGDBuffer", "nerv.DataBuffer")
+local FrmBuffer = nerv.class("nerv.FrmBuffer", "nerv.DataBuffer")
-function SGDBuffer:__init(global_conf, buffer_conf)
+function FrmBuffer:__init(global_conf, buffer_conf)
self.gconf = global_conf
self.batch_size = buffer_conf.batch_size
self.buffer_size = math.floor(buffer_conf.buffer_size /
@@ -57,7 +57,7 @@ function SGDBuffer:__init(global_conf, buffer_conf)
end
end
-function SGDBuffer:saturate()
+function FrmBuffer:saturate()
local buffer_size = self.buffer_size
self.head = 0
self.tail = buffer_size
@@ -116,7 +116,7 @@ function SGDBuffer:saturate()
return self.tail >= self.batch_size
end
-function SGDBuffer:get_data()
+function FrmBuffer:get_data()
local batch_size = self.batch_size
if self.head >= self.tail then -- buffer is empty
local t = os.clock()
@@ -132,7 +132,9 @@ function SGDBuffer:get_data()
return nil -- the remaining data cannot build a batch
end
actual_batch_size = math.min(batch_size, self.tail - self.head)
- local res = {}
+ local res = {seq_length = table.vector(gconf.batch_size, 1),
+ new_seq = {},
+ data = {}}
for i, reader in ipairs(self.readers) do
for id, buff in pairs(reader.buffs) do
local batch = self.output_mat_type(actual_batch_size, buff.width)
@@ -141,7 +143,7 @@ function SGDBuffer:get_data()
else
self.copy_from(batch, buff.data, self.head, self.head + actual_batch_size)
end
- res[id] = batch
+ res.data[id] = {batch}
end
end
self.head = self.head + actual_batch_size
diff --git a/nerv/io/init.lua b/nerv/io/init.lua
index c36d850..d3ba27c 100644
--- a/nerv/io/init.lua
+++ b/nerv/io/init.lua
@@ -56,5 +56,5 @@ function DataBuffer:get_data()
nerv.error_method_not_implemented()
end
-nerv.include('sgd_buffer.lua')
+nerv.include('frm_buffer.lua')
nerv.include('seq_buffer.lua')
diff --git a/nerv/io/seq_buffer.lua b/nerv/io/seq_buffer.lua
index e69de29..029e7b8 100644
--- a/nerv/io/seq_buffer.lua
+++ b/nerv/io/seq_buffer.lua
@@ -0,0 +1,108 @@
+local SeqBuffer = nerv.class('nerv.SeqBuffer', 'nerv.DataBuffer')
+
+function SeqBuffer:__init(global_conf, buffer_conf)
+ self.gconf = global_conf
+
+ self.batch_size = buffer_conf.batch_size
+ self.chunk_size = buffer_conf.chunk_size
+ self.readers = {}
+ for _, v in ipairs(buffer_conf.readers) do
+ table.insert(self.readers, v.reader)
+ end
+ self.nn_act_default = buffer_conf.nn_act_default
+ if self.nn_act_default == nil then
+ self.nn_act_default = 0
+ end
+
+ self.mat_type = self.gconf.mmat_type
+ self.queue = {}
+ self.head = 1
+ self.tail = 0
+end
+
+function SeqBuffer:new_mini_batch()
+ local res = {}
+ res.data = {}
+ res.new_seq = {}
+ res.seq_length = {}
+ for i = 1, self.batch_size do
+ res.seq_length[i] = 0
+ end
+ return res
+end
+
+function SeqBuffer:saturate(batch)
+ if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then
+ return true
+ end
+ local data = {}
+ local drow = nil
+ for i = 1, #self.readers do
+ local tmp = self.readers[i]:get_data()
+ if tmp == nil then
+ return false
+ end
+ for id, d in pairs(tmp) do
+ if drow == nil then
+ drow = d:nrow()
+ elseif d:nrow() ~= drow then
+ nerv.error('readers provides with inconsistent rows of data')
+ end
+ data[id] = d
+ end
+ end
+ local offset = 0
+ local head = self.head
+ while offset < drow do
+ local last = math.min(offset + self.chunk_size, drow)
+ if head > self.tail then
+ self.tail = self.tail + 1
+ self.queue[self.tail] = self:new_mini_batch()
+ end
+ self.queue[head].seq_length[batch] = last - offset
+ if offset == 0 then
+ table.insert(self.queue[head].new_seq, batch)
+ end
+ local mini_batch = self.queue[head].data
+ for id, d in pairs(data) do
+ if mini_batch[id] == nil then
+ mini_batch[id] = {}
+ end
+ local tmp = mini_batch[id]
+ for i = offset + 1, last do
+ local chunk = i - offset
+ if tmp[chunk] == nil then
+ tmp[chunk] = self.mat_type(self.batch_size, d:ncol())
+ tmp[chunk]:fill(self.nn_act_default)
+ end
+ tmp[chunk]:copy_from(d, i - 1, i, batch - 1)
+ end
+ end
+ head = head + 1
+ offset = last
+ end
+ return true
+end
+
+function SeqBuffer:get_data()
+ local has_data = false
+ for i = 1, self.batch_size do
+ if self:saturate(i) then
+ has_data = true
+ end
+ end
+ if not has_data then
+ return nil
+ end
+ local res = self.queue[self.head]
+ self.queue[self.head] = nil
+ self.head = self.head + 1
+ if not self.gconf.use_cpu then
+ for id, d in pairs(res.data) do
+ for i = 1, #d do
+ d[i] = self.gconf.cumat_type.new_from_host(d[i])
+ end
+ end
+ end
+ return res
+end
diff --git a/nerv/layer/duplicate.lua b/nerv/layer/duplicate.lua
index 137472b..2621cdf 100644
--- a/nerv/layer/duplicate.lua
+++ b/nerv/layer/duplicate.lua
@@ -20,10 +20,7 @@ function DuplicateLayer:batch_resize()
end
function DuplicateLayer:propagate(input, output)
- for i = 1, #self.dim_out do
- output[i]:copy_from(input[1])
- -- FIXME: use reference copy to speed up
- end
+ -- do nothing, use reference copy in nn/network.lua
end
function DuplicateLayer:back_propagate(bp_err, next_bp_err)
diff --git a/nerv/layer/rnn.lua b/nerv/layer/rnn.lua
index 0b5ccaa..333be9e 100644
--- a/nerv/layer/rnn.lua
+++ b/nerv/layer/rnn.lua
@@ -20,7 +20,7 @@ function RNNLayer:__init(id, global_conf, layer_conf)
['nerv.AffineLayer'] = {
main = {dim_in = {din, dout}, dim_out = {dout}, pr = pr},
},
- [layers.activation] = {
+ [layer_conf.activation] = {
activation = {dim_in = {dout}, dim_out = {dout}},
},
['nerv.DuplicateLayer'] = {
diff --git a/nerv/layer/softmax_ce.lua b/nerv/layer/softmax_ce.lua
index 7b4a80c..acd4ee6 100644
--- a/nerv/layer/softmax_ce.lua
+++ b/nerv/layer/softmax_ce.lua
@@ -61,14 +61,16 @@ function SoftmaxCELayer:propagate(input, output, t)
end
ce:mul_elem(ce, label)
ce = ce:rowsum()
+ ce:set_values_by_mask(self.gconf.mask[t], 0)
if output[1] ~= nil then
output[1]:copy_from(ce)
end
-- add total ce
self.total_ce = self.total_ce - ce:colsum()[0][0]
- self.total_frames = self.total_frames + softmax:nrow()
+ self.total_frames = self.total_frames + self.gconf.mask[t]:colsum()[0][0]
-- TODO: add colsame for uncompressed label
if self.compressed then
+ classified:set_values_by_mask(self.gconf.mask[t], -1)
self.total_correct = self.total_correct + classified:colsame(input[2])[0][0]
end
end
diff --git a/nerv/lib/matrix/cumatrix.c b/nerv/lib/matrix/cumatrix.c
index aec4d60..43448bf 100644
--- a/nerv/lib/matrix/cumatrix.c
+++ b/nerv/lib/matrix/cumatrix.c
@@ -9,15 +9,19 @@
void nerv_cuda_context_print_profile(CuContext *context) {
HashMap *profile = context->profile;
size_t i;
+ float tmp, tot = 0;
fprintf(stderr, "*** [nerv cumatrix profile] **\n");
for (i = 0; i < profile->size; i++)
{
HashNode *ptr;
for (ptr = profile->bucket[i]; ptr; ptr = ptr->next)
{
- fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val);
+ tmp = *(float *)ptr->val;
+ fprintf(stderr, "%s:\t%.6f\n", ptr->key, tmp);
+ tot += tmp;
}
}
+ fprintf(stderr, "Total time:\t%.6f\n", tot);
}
void nerv_cuda_context_clear_profile(CuContext *context) {
diff --git a/nerv/lib/matrix/generic/cukernel.cu b/nerv/lib/matrix/generic/cukernel.cu
index fc630ad..cf9d213 100644
--- a/nerv/lib/matrix/generic/cukernel.cu
+++ b/nerv/lib/matrix/generic/cukernel.cu
@@ -328,6 +328,15 @@ __global__ void cudak_(rearrange_frm)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
b[j + i * stride] = a[j / step + (j % step) * orig_dim + i * stride];
}
+__global__ void cudak_(set_values_by_mask)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
+ int nrow, int ncol,
+ int astride, int bstride, double val) {
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
+ if (i >= nrow || j >= ncol || a[i * astride] != 0.0) return;
+ b[j + i * bstride] = val;
+}
+
__global__ void cudak_(scale_rows_by_col)(const MATRIX_ELEM *a, MATRIX_ELEM *b,
int nrow, int ncol,
int astride, int bstride) {
@@ -772,6 +781,19 @@ extern "C" {
cudaStreamSynchronize(0);
}
+ void cudak_(cuda_set_values_by_mask)(const Matrix *a, Matrix *b, double val) {
+ dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
+ dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
+ CEIL_DIV(b->nrow, threadsPerBlock.y));
+ cudak_(set_values_by_mask)<<<numBlocks, threadsPerBlock>>> \
+ (MATRIX_ELEM_PTR(a), MATRIX_ELEM_PTR(b),
+ b->nrow, b->ncol,
+ a->stride / sizeof(MATRIX_ELEM),
+ b->stride / sizeof(MATRIX_ELEM),
+ val);
+ cudaStreamSynchronize(0);
+ }
+
void cudak_(cuda_scale_rows_by_row)(const Matrix *a, Matrix *b) {
dim3 threadsPerBlock(CUDA_THREADS_N, CUDA_THREADS_N);
dim3 numBlocks(CEIL_DIV(b->ncol, threadsPerBlock.x),
diff --git a/nerv/lib/matrix/generic/cumatrix.c b/nerv/lib/matrix/generic/cumatrix.c
index 6d84663..bc5f285 100644
--- a/nerv/lib/matrix/generic/cumatrix.c
+++ b/nerv/lib/matrix/generic/cumatrix.c
@@ -515,7 +515,7 @@ void nerv_matrix_(prefixsum_row)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
-void nerv_matrix_(diagonalize)(Matrix *a, CuContext * context, Status *status) {
+void nerv_matrix_(diagonalize)(Matrix *a, CuContext *context, Status *status) {
if (a->nrow != a->ncol)
NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
PROFILE_START
@@ -524,6 +524,18 @@ void nerv_matrix_(diagonalize)(Matrix *a, CuContext * context, Status *status) {
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(set_values_by_mask)(Matrix *a, const Matrix *b, double val,
+ CuContext *context, Status *status) {
+ if (a->nrow != b->nrow)
+ NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
+ if (b->ncol != 1)
+ NERV_EXIT_STATUS(status, MAT_COL_VECTOR_EXP, 0);
+ PROFILE_START
+ cudak_(cuda_set_values_by_mask)(b, a, val);
+ PROFILE_STOP
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
static void cuda_matrix_(free)(MATRIX_ELEM *ptr, CuContext *context, Status *status) {
CUDA_SAFE_SYNC_CALL(cudaFree(ptr), status);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
diff --git a/nerv/lib/matrix/generic/cumatrix.h b/nerv/lib/matrix/generic/cumatrix.h
index de3a09e..79bfc76 100644
--- a/nerv/lib/matrix/generic/cumatrix.h
+++ b/nerv/lib/matrix/generic/cumatrix.h
@@ -35,6 +35,8 @@ void nerv_matrix_(fill)(Matrix *self, double val,
CuContext *context, Status *status);
void nerv_matrix_(diagonalize)(Matrix *self,
CuContext *context, Status *status);
+void nerv_matrix_(set_values_by_mask)(Matrix *self, Matrix *mask, double val,
+ CuContext *context, Status *status);
void nerv_matrix_(copy_fromd)(Matrix *a, const Matrix *b,
int a_begin, int b_begin, int b_end,
CuContext *context, Status *status);
diff --git a/nerv/lib/matrix/generic/mmatrix.c b/nerv/lib/matrix/generic/mmatrix.c
index badddbd..e356de7 100644
--- a/nerv/lib/matrix/generic/mmatrix.c
+++ b/nerv/lib/matrix/generic/mmatrix.c
@@ -507,6 +507,29 @@ void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
NERV_SET_STATUS(status, NERV_NORMAL, 0);
}
+void nerv_matrix_(set_values_by_mask)(Matrix *a, const Matrix *b, double val,
+ MContext *context, Status *status) {
+ if (a->nrow != b->nrow)
+ NERV_EXIT_STATUS(status, MAT_MISMATCH_DIM, 0);
+ if (b->ncol != 1)
+ NERV_EXIT_STATUS(status, MAT_COL_VECTOR_EXP, 0);
+ int i, j;
+ size_t astride = a->stride, bstride = b->stride;
+ MATRIX_ELEM *arow = MATRIX_ELEM_PTR(a),
+ *brow = MATRIX_ELEM_PTR(b);
+ for (i = 0; i < a->nrow; i++)
+ {
+ if (brow[0] == 0.0)
+ {
+ for (j = 0; j < a->ncol; j++)
+ arow[j] = val;
+ }
+ arow = MATRIX_NEXT_ROW_PTR(arow, astride);
+ brow = MATRIX_NEXT_ROW_PTR(brow, bstride);
+ }
+ NERV_SET_STATUS(status, NERV_NORMAL, 0);
+}
+
static void host_matrix_(free)(MATRIX_ELEM *ptr, MContext *context, Status *status) {
free(ptr);
NERV_SET_STATUS(status, NERV_NORMAL, 0);
diff --git a/nerv/lib/matrix/generic/mmatrix.h b/nerv/lib/matrix/generic/mmatrix.h
index 6d17c99..41c39f6 100644
--- a/nerv/lib/matrix/generic/mmatrix.h
+++ b/nerv/lib/matrix/generic/mmatrix.h
@@ -48,6 +48,8 @@ void nerv_matrix_(expand_frm)(Matrix *a, const Matrix *b,
int cont, MContext *context, Status *status);
void nerv_matrix_(rearrange_frm)(Matrix *a, const Matrix *b,
int step, MContext *context, Status *status);
+void nerv_matrix_(set_values_by_mask)(Matrix *a, const Matrix *b, double val,
+ MContext *context, Status *status);
void nerv_matrix_(scale_rows_by_col)(Matrix *a, const Matrix *b,
MContext *context, Status *status);
void nerv_matrix_(scale_rows_by_row)(Matrix *a, const Matrix *b,
diff --git a/nerv/lib/matrix/mmatrix.c b/nerv/lib/matrix/mmatrix.c
index e40b160..006735d 100644
--- a/nerv/lib/matrix/mmatrix.c
+++ b/nerv/lib/matrix/mmatrix.c
@@ -8,15 +8,19 @@
void nerv_host_context_print_profile(MContext *context) {
HashMap *profile = context->profile;
size_t i;
+ float tmp, tot = 0;
fprintf(stderr, "*** [nerv mmatrix profile] **\n");
for (i = 0; i < profile->size; i++)
{
HashNode *ptr;
for (ptr = profile->bucket[i]; ptr; ptr = ptr->next)
{
- fprintf(stderr, "%s:\t%.6f\n", ptr->key, *(float *)ptr->val);
+ tmp = *(float *)ptr->val;
+ fprintf(stderr, "%s:\t%.6f\n", ptr->key, tmp);
+ tot += tmp;
}
}
+ fprintf(stderr, "Total time:\t%.6f\n", tot);
}
void nerv_host_context_clear_profile(MContext *context) {
diff --git a/nerv/matrix/generic/cumatrix.c b/nerv/matrix/generic/cumatrix.c
index 0c90d39..9577fd5 100644
--- a/nerv/matrix/generic/cumatrix.c
+++ b/nerv/matrix/generic/cumatrix.c
@@ -268,6 +268,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"scale_rows_by_col", nerv_matrix_(lua_scale_rows_by_col)},
{"prefixsum_row", nerv_matrix_(lua_prefixsum_row)},
{"diagonalize", nerv_matrix_(lua_diagonalize)},
+ {"set_values_by_mask", nerv_matrix_(lua_set_values_by_mask)},
#ifdef __NERV_FUTURE_CUDA_7
{"update_select_rows_by_rowidx", nerv_matrix_(lua_update_select_rows_by_rowidx)},
{"update_select_rows_by_colidx", nerv_matrix_(lua_update_select_rows_by_colidx)},
diff --git a/nerv/matrix/generic/matrix.c b/nerv/matrix/generic/matrix.c
index fe07585..3e91933 100644
--- a/nerv/matrix/generic/matrix.c
+++ b/nerv/matrix/generic/matrix.c
@@ -395,4 +395,16 @@ static int nerv_matrix_(lua_diagonalize)(lua_State *L) {
return 0;
}
+static int nerv_matrix_(lua_set_values_by_mask)(lua_State *L) {
+ Status status;
+ MATRIX_CONTEXT *context;
+ MATRIX_GET_CONTEXT(L, 4);
+ Matrix *a = luaT_checkudata(L, 1, nerv_matrix_(tname));
+ Matrix *mask = luaT_checkudata(L, 2, nerv_matrix_(tname));
+ double val = luaL_checknumber(L, 3);
+ nerv_matrix_(set_values_by_mask)(a, mask, val, context, &status);
+ NERV_LUA_CHECK_STATUS(L, status);
+ return 0;
+}
+
#endif
diff --git a/nerv/matrix/generic/mmatrix.c b/nerv/matrix/generic/mmatrix.c
index a5e5969..de1eaa3 100644
--- a/nerv/matrix/generic/mmatrix.c
+++ b/nerv/matrix/generic/mmatrix.c
@@ -117,6 +117,7 @@ static const luaL_Reg nerv_matrix_(extra_methods)[] = {
{"clip", nerv_matrix_(lua_clip)},
{"fill", nerv_matrix_(lua_fill)},
{"diagonalize", nerv_matrix_(lua_diagonalize)},
+ {"set_values_by_mask", nerv_matrix_(lua_set_values_by_mask)},
{"sigmoid", nerv_matrix_(lua_sigmoid)},
{"sigmoid_grad", nerv_matrix_(lua_sigmoid_grad)},
{"softmax", nerv_matrix_(lua_softmax)},
diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua
index 2cb83ce..7e2af4e 100644
--- a/nerv/nn/network.lua
+++ b/nerv/nn/network.lua
@@ -16,6 +16,7 @@ function network:__init(id, global_conf, network_conf)
if self.nn_act_default == nil then
self.nn_act_default = 0
end
+
self.layers = {}
self.input_conn = {}
self.output_conn = {}
@@ -26,7 +27,17 @@ function network:__init(id, global_conf, network_conf)
if self.input_conn[id][port] ~= nil then
nerv.error('duplicate edge')
end
- self.input_conn[id][port] = {0, i, time}
+ if nerv.is_type(self.layers[id], 'nerv.DuplicateLayer') then
+ local tmp = nerv.IdentityLayer('', self.gconf, {dim_in = {self.dim_in[i]}, dim_out = {self.dim_in[i]}})
+ table.insert(self.layers, tmp)
+ local new_id = #self.layers
+ self.input_conn[new_id] = {{0, i, time}}
+ self.output_conn[new_id] = {{id, port, 0}}
+ self.input_conn[id][port] = {new_id, 1, 0}
+ self.socket.inputs[i] = {new_id, 1, time}
+ else
+ self.input_conn[id][port] = {0, i, time}
+ end
end
for i = 1, #self.dim_out do
local edge = self.socket.outputs[i]
@@ -34,18 +45,53 @@ function network:__init(id, global_conf, network_conf)
if self.output_conn[id][port] ~= nil then
nerv.error('duplicate edge')
end
- self.output_conn[id][port] = {0, i, time}
+ if nerv.is_type(self.layers[id], 'nerv.DuplicateLayer') then
+ local tmp = nerv.IdentityLayer('', self.gconf, {dim_in = {self.dim_out[i]}, dim_out = {self.dim_out[i]}})
+ table.insert(self.layers, tmp)
+ local new_id = #self.layers
+ self.input_conn[new_id] = {{id, port, 0}}
+ self.output_conn[new_id] = {{0, i, time}}
+ self.output_conn[id][port] = {new_id, 1, 0}
+ self.socket.outputs[i] = {new_id, 1, time}
+ else
+ self.output_conn[id][port] = {0, i, time}
+ end
end
+
self.delay = 0
for i = 1, #self.layers do
local dim_in, _ = self.layers[i]:get_dim()
for j = 1, #dim_in do
+ if self.input_conn[i][j] == nil then
+ nerv.error('dangling input')
+ end
local time = self.input_conn[i][j][3]
if math.abs(time) > self.delay then
self.delay = math.abs(time)
end
end
end
+
+ self.input_edge = {}
+ self.output_edge = {}
+ for t = -self.delay, self.delay do
+ self.input_edge[t] = {}
+ self.output_edge[t] = {}
+ end
+ for i = 1, #self.layers do
+ local dim_in, dim_out = self.layers[i]:get_dim()
+ for j = 1, #dim_in do
+ local time = self.input_conn[i][j][3]
+ table.insert(self.input_edge[time], {i, j})
+ end
+ for j = 1, #dim_out do
+ if self.output_conn[i][j] == nil then
+ nerv.error('dangling output')
+ end
+ local time = self.output_conn[i][j][3]
+ table.insert(self.output_edge[time], {i, j})
+ end
+ end
end
function network:compile(layer)
@@ -112,11 +158,22 @@ function network:init(batch_size, chunk_size)
self:make_initial_store()
collectgarbage('collect')
+
+ self.flush = {}
+ self.gconf.mask = {}
+ for t = 1, self.chunk_size do
+ self.flush[t] = {}
+ self.gconf.mask[t] = self.mat_type(self.batch_size, 1)
+ end
end
function network:epoch_init()
+ self.timestamp = 0
for i = 1, #self.layers do
self.layers[i]:init(self.batch_size, self.chunk_size)
+ for t = 1, self.chunk_size do
+ self.flush[t][i] = {timestamp = 0, input = {}, output = {}}
+ end
end
end
@@ -134,12 +191,10 @@ function network:topsort()
for i = 1, #self.layers do
local _, dim_out = self.layers[i]:get_dim()
for j = 1, #dim_out do
- if self.output_conn[i][j] ~= nil then
- local edge = self.output_conn[i][j]
- local id, time = edge[1], edge[3] + t
- if time >= 1 and time <= self.chunk_size and id ~= 0 then
- degree[time][id] = degree[time][id] + 1
- end
+ local edge = self.output_conn[i][j]
+ local id, time = edge[1], edge[3] + t
+ if time >= 1 and time <= self.chunk_size and id ~= 0 then
+ degree[time][id] = degree[time][id] + 1
end
end
end
@@ -161,15 +216,13 @@ function network:topsort()
l = l + 1
local _, dim_out = self.layers[i]:get_dim()
for j = 1, #dim_out do
- if self.output_conn[i][j] ~= nil then
- local edge = self.output_conn[i][j]
- local id, time = edge[1], edge[3] + t
- if time >= 1 and time <= self.chunk_size and id ~= 0 then
- degree[time][id] = degree[time][id] - 1
- if degree[time][id] == 0 then
- r = r + 1
- self.queue[r] = {chunk = time, id = id}
- end
+ local edge = self.output_conn[i][j]
+ local id, time = edge[1], edge[3] + t
+ if time >= 1 and time <= self.chunk_size and id ~= 0 then
+ degree[time][id] = degree[time][id] - 1
+ if degree[time][id] == 0 then
+ r = r + 1
+ self.queue[r] = {chunk = time, id = id}
end
end
end
@@ -197,22 +250,26 @@ function network:make_initial_store()
err_memory[t][i][j] = self.mat_type(self.batch_size, dim_in[j])
err_memory[t][i][j]:fill(0)
end
- for j = 1, #dim_out do
- memory[t][i][j] = self.mat_type(self.batch_size, dim_out[j])
- memory[t][i][j]:fill(self.nn_act_default)
+ if t < 1 or t > self.chunk_size or not nerv.is_type(self.layers[i], 'nerv.DuplicateLayer') then
+ for j = 1, #dim_out do
+ memory[t][i][j] = self.mat_type(self.batch_size, dim_out[j])
+ memory[t][i][j]:fill(self.nn_act_default)
+ end
end
end
- -- memory[t][0] stores network input
- memory[t][0] = {}
- for j = 1, #self.dim_in do
- memory[t][0][j] = self.mat_type(self.batch_size, self.dim_in[j])
- memory[t][0][j]:fill(self.nn_act_default)
- end
- -- err_memory[t][0] stores network err_input
- err_memory[t][0] = {}
- for j = 1, #self.dim_out do
- err_memory[t][0][j] = self.mat_type(self.batch_size, self.dim_out[j])
- err_memory[t][0][j]:fill(0)
+ if t < 1 or t > self.chunk_size then
+ -- memory[t][0] stores network input
+ memory[t][0] = {}
+ for j = 1, #self.dim_in do
+ memory[t][0][j] = self.mat_type(self.batch_size, self.dim_in[j])
+ memory[t][0][j]:fill(self.nn_act_default)
+ end
+ -- err_memory[t][0] stores network err_input
+ err_memory[t][0] = {}
+ for j = 1, #self.dim_out do
+ err_memory[t][0][j] = self.mat_type(self.batch_size, self.dim_out[j])
+ err_memory[t][0][j]:fill(0)
+ end
end
end
@@ -255,6 +312,28 @@ function network:make_initial_store()
end
end
+ -- reference copy for duplicate layer
+ for i = 1, #self.queue do
+ local t, id = self.queue[i].chunk, self.queue[i].id
+ if nerv.is_type(self.layers[id], 'nerv.DuplicateLayer') then
+ local _, dim_out = self.layers[id]:get_dim()
+ for j = 1, #dim_out do
+ if self.output[t][id][j] ~= nil then
+ nerv.error('duplicate output reference not nil')
+ end
+ self.output[t][id][j] = self.input[t][id][1]
+ local edge = self.output_conn[id][j]
+ local to, port, time = edge[1], edge[2], edge[3] + t
+ if time >= 1 and time <= self.chunk_size then
+ if self.input[time][to][port] ~= nil then
+ nerv.error('duplicate input reference not nil')
+ end
+ self.input[time][to][port] = self.output[t][id][j]
+ end
+ end
+ end
+ end
+
-- check dangling reference
for t = 1, self.chunk_size do
for i = 1, #self.dim_in do
@@ -291,6 +370,7 @@ function network:make_initial_store()
local dim_in, dim_out = self.layers[i]:get_dim()
for j = 1, #dim_in do
if self.input[t][i][j] == nil then
+ print(t,i,j,self.layers[i].id)
nerv.error('input reference dangling')
end
if self.err_output[t][i][j] == nil then
@@ -314,9 +394,14 @@ function network:make_initial_store()
self.legacy[t] = {}
for i = 1, #self.layers do
self.legacy[t][i] = {}
- local _, dim_out = self.layers[i]:get_dim()
- for j = 1, #dim_out do
- self.legacy[t][i][j] = memory[t][i][j]
+ end
+ end
+ for d = 1, self.delay do
+ for t = 1 - d, 0 do
+ for i = 1, #self.output_edge[d] do
+ local edge = self.output_edge[d][i]
+ local id, port = edge[1], edge[2]
+ self.legacy[t][id][port] = memory[t][id][port]
end
end
end
@@ -328,7 +413,7 @@ function network:set_input(input)
local edge = self.socket.inputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t + time >= 1 and t + time <= self.chunk_size then
- self.input[t + time][id][port] = input[t][i]
+ self.input[t + time][id][port] = input[i][t]
end
end
end
@@ -340,7 +425,7 @@ function network:set_output(output)
local edge = self.socket.outputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t - time >= 1 and t - time <= self.chunk_size then
- self.output[t - time][id][port] = output[t][i]
+ self.output[t - time][id][port] = output[i][t]
end
end
end
@@ -352,7 +437,7 @@ function network:set_err_input(err_input)
local edge = self.socket.outputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t - time >= 1 and t - time <= self.chunk_size then
- self.err_input[t - time][id][port] = err_input[t][i]
+ self.err_input[t - time][id][port] = err_input[i][t]
end
end
end
@@ -364,7 +449,7 @@ function network:set_err_output(err_output)
local edge = self.socket.inputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t + time >= 1 and t + time <= self.chunk_size then
- self.err_output[t + time][id][port] = err_output[t][i]
+ self.err_output[t + time][id][port] = err_output[i][t]
end
end
end
@@ -383,59 +468,87 @@ function network:mini_batch_init(info)
self.info = info
self:set_input(self.info.input)
self:set_output(self.info.output)
+ if self.info.do_train then
+ self:set_err_input(self.info.err_input)
+ self:set_err_output(self.info.err_output)
+ end
+
+ -- calculate mask
+ for t = 1, self.chunk_size do
+ local tmp = self.gconf.mmat_type(self.batch_size, 1)
+ for i = 1, self.batch_size do
+ if t <= self.info.seq_length[i] then
+ tmp[i - 1][0] = 1
+ else
+ tmp[i - 1][0] = 0
+ end
+ end
+ self.gconf.mask[t]:copy_fromh(tmp)
+ end
-- calculate border
self.max_length = 0
- self.border = {}
- for i = 1, self.chunk_size do
- self.border[i] = {}
- end
+ self.timestamp = self.timestamp + 1
for i = 1, self.batch_size do
if self.info.seq_length[i] > self.max_length then
self.max_length = self.info.seq_length[i]
end
- for t = 1, self.delay do
- local chunk = self.info.seq_length[i] + t
- if chunk > self.chunk_size then
- break
+ local border = self.info.seq_length[i]
+ for d = 1, self.delay do
+ for t = border + 1, border + d do
+ if t > self.chunk_size then
+ break
+ end
+ for j = 1, #self.output_edge[-d] do
+ local edge = self.output_edge[-d][j]
+ local id, port = edge[1], edge[2]
+ local flush = self.flush[t][id]
+ if flush.timestamp ~= self.timestamp then
+ flush.timestamp = self.timestamp
+ flush.input = {}
+ flush.output = {}
+ end
+ table.insert(flush.output, {port, i})
+ end
+ end
+ if self.info.do_train then
+ for t = border, border - d + 1, -1 do
+ if t < 1 then
+ break
+ end
+ for j = 1, #self.input_edge[-d] do
+ local edge = self.input_edge[-d][j]
+ local id, port = edge[1], edge[2]
+ local flush = self.flush[t][id]
+ if flush.timestamp ~= self.timestamp then
+ flush.timestamp = self.timestamp
+ flush.input = {}
+ flush.output = {}
+ end
+ table.insert(flush.input, {port, i})
+ end
+ end
end
- table.insert(self.border[chunk], i)
end
end
-- copy legacy
- for t = 1 - self.delay, 0 do
- for i = 1, #self.layers do
- local _, dim_out = self.layers[i]:get_dim()
- for j = 1, #dim_out do
- if t + self.chunk_size >= 1 and self.output_conn[i][j][1] ~= 0 then
- self.legacy[t][i][j]:copy_from(self.output[t + self.chunk_size][i][j])
+ for d = 1, self.delay do
+ for t = 1 - d, 0 do
+ for i = 1, #self.output_edge[d] do
+ local edge = self.output_edge[d][i]
+ local id, port = edge[1], edge[2]
+ if t + self.chunk_size >= 1 and self.output_conn[id][port][1] ~= 0 then
+ self.legacy[t][id][port]:copy_from(self.output[t + self.chunk_size][id][port])
end
- for k = 1, #self.info.new_seq do
- local batch = self.info.new_seq[k]
- self.legacy[t][i][j][batch - 1]:fill(self.nn_act_default)
+ for j = 1, #self.info.new_seq do
+ local batch = self.info.new_seq[j]
+ self.legacy[t][id][port][batch - 1]:fill(self.nn_act_default)
end
end
end
end
- if self.info.do_train then
- self:set_err_input(self.info.err_input)
- self:set_err_output(self.info.err_output)
-
- -- flush border gradient
- for t = self.max_length + 1, self.max_length + self.delay do
- if t > self.chunk_size then
- break
- end
- for i = 1, #self.layers do
- local dim_in, _ = self.layers[i]:get_dim()
- for j = 1, #dim_in do
- self.err_output[t][i][j]:fill(0)
- end
- end
- end
- end
end
function network:propagate()
@@ -445,11 +558,11 @@ function network:propagate()
self.layers[id]:propagate(self.input[t][id], self.output[t][id], t)
end
-- flush border activation
- for j = 1, #self.border[t] do
- local batch = self.border[t][j]
- local _, dim_out = self.layers[id]:get_dim()
- for k = 1, #dim_out do
- self.output[t][id][k][batch - 1]:fill(self.nn_act_default)
+ if self.flush[t][id].timestamp == self.timestamp then
+ for j = 1, #self.flush[t][id].output do
+ local border = self.flush[t][id].output[j]
+ local port, batch = border[1], border[2]
+ self.output[t][id][port][batch - 1]:fill(self.nn_act_default)
end
end
end
@@ -459,15 +572,8 @@ function network:back_propagate()
for i = #self.queue, 1, -1 do
local t, id = self.queue[i].chunk, self.queue[i].id
if t <= self.max_length then
- -- flush border gradient
- for j = 1, #self.border[t] do
- local batch = self.border[t][j]
- local _, dim_out = self.layers[id]:get_dim()
- for k = 1, #dim_out do
- self.err_input[t][id][k][batch - 1]:fill(0)
- end
- end
self.layers[id]:back_propagate(self.err_input[t][id], self.err_output[t][id], self.input[t][id], self.output[t][id], t)
+ -- gradient clip
if self.clip ~= nil then
local dim_in, _ = self.layers[id]:get_dim()
for j = 1, #dim_in do
@@ -475,14 +581,21 @@ function network:back_propagate()
end
end
end
+ -- flush border gradient
+ if self.flush[t][id].timestamp == self.timestamp then
+ for j = 1, #self.flush[t][id].input do
+ local border = self.flush[t][id].input[j]
+ local port, batch = border[1], border[2]
+ self.err_output[t][id][port][batch - 1]:fill(0)
+ end
+ end
end
end
function network:update()
- for i = 1, #self.queue do
- local t, id = self.queue[i].chunk, self.queue[i].id
- if t <= self.max_length then
- self.layers[id]:update(self.err_input[t][id], self.input[t][id], self.output[t][id], t)
+ for t = 1, self.max_length do
+ for i = 1, #self.layers do
+ self.layers[i]:update(self.err_input[t][i], self.input[t][i], self.output[t][i], t)
end
end
end