aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDeterminant <ted.sybil@gmail.com>2016-03-16 17:53:39 +0800
committerDeterminant <ted.sybil@gmail.com>2016-03-16 17:53:39 +0800
commit289ac7f4b6e88b935da5c891e1efcf91fc047403 (patch)
treed4fc3a4fc20f2d5908624b3f6587ecd57966d719
parent07fc1e2794027d44c255e1062c4491346b101a08 (diff)
merge seq_buffer and change asr_trainer.lua accordingly
-rw-r--r--Makefile5
-rw-r--r--nerv/Makefile2
-rw-r--r--nerv/examples/asr_trainer.lua71
-rw-r--r--nerv/examples/swb_baseline.lua1
-rw-r--r--nerv/examples/swb_baseline2.lua8
-rw-r--r--nerv/examples/timit_baseline2.lua6
-rw-r--r--nerv/init.lua8
-rw-r--r--nerv/io/frm_buffer.lua (renamed from nerv/io/sgd_buffer.lua)14
-rw-r--r--nerv/io/init.lua2
-rw-r--r--nerv/io/seq_buffer.lua7
-rw-r--r--nerv/layer/rnn.lua2
-rw-r--r--nerv/nn/network.lua8
12 files changed, 80 insertions, 54 deletions
diff --git a/Makefile b/Makefile
index e98302b..728d14b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: all clean install luajit luarocks speech submodule
+.PHONY: all clean install luajit luarocks speech
############## EDIT THESE LINES #####################
SHELL := /bin/bash
PREFIX := $(CURDIR)/install/
@@ -26,8 +26,7 @@ export BLAS_LDFLAGS
nerv-clean speech-clean speech/speech_utils-clean speech/htk_io-clean speech/kaldi_io-clean speech/kaldi_decode-clean \
Penlight
-all: nerv
-submodule: luajit luarocks Penlight
+all: luajit luarocks Penlight nerv
luajit:
PREFIX=$(PREFIX) ./tools/build_luajit.sh
luarocks:
diff --git a/nerv/Makefile b/nerv/Makefile
index 68465a1..a5e4f66 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -44,7 +44,7 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \
layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua \
- io/sgd_buffer.lua io/seq_buffer.lua
+ io/frm_buffer.lua io/seq_buffer.lua
INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK
CUDA_INCLUDE := -I $(CUDA_BASE)/include/
diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua
index 6bdf57c..645f1ef 100644
--- a/nerv/examples/asr_trainer.lua
+++ b/nerv/examples/asr_trainer.lua
@@ -22,9 +22,9 @@ local function build_trainer(ifname)
local input_order = get_input_order()
network = nerv.Network("nt", gconf, {network = network})
- network:init(gconf.batch_size, 1)
+ network:init(gconf.batch_size, gconf.chunk_size)
global_transf = nerv.Network("gt", gconf, {network = global_transf})
- global_transf:init(gconf.batch_size, 1)
+ global_transf:init(gconf.batch_size, gconf.chunk_size)
local iterative_trainer = function (prefix, scp_file, bp, rebind_param_repo)
-- rebind the params if necessary
@@ -39,11 +39,17 @@ local function build_trainer(ifname)
local buffer = make_buffer(make_readers(scp_file, layer_repo))
-- initialize the network
gconf.cnt = 0
- err_input = {mat_type(gconf.batch_size, 1)}
- err_input[1]:fill(1)
+ local err_input = {{}}
+ local output = {{}}
+ for i = 1, gconf.chunk_size do
+ local mini_batch = mat_type(gconf.batch_size, 1)
+ mini_batch:fill(1)
+ table.insert(err_input[1], mini_batch)
+ table.insert(output[1], mat_type(gconf.batch_size, 1))
+ end
network:epoch_init()
global_transf:epoch_init()
- for data in buffer.get_data, buffer do
+ for d in buffer.get_data, buffer do
-- prine stat periodically
gconf.cnt = gconf.cnt + 1
if gconf.cnt == 1000 then
@@ -54,35 +60,39 @@ local function build_trainer(ifname)
-- break
end
local input = {}
+ local err_output = {}
-- if gconf.cnt == 1000 then break end
for i, e in ipairs(input_order) do
local id = e.id
- if data[id] == nil then
+ if d.data[id] == nil then
nerv.error("input data %s not found", id)
end
- local transformed
+ local transformed = {}
+ local err_output_i = {}
if e.global_transf then
- transformed = nerv.speech_utils.global_transf(data[id],
- global_transf,
- gconf.frm_ext or 0, 0,
- gconf)
+ for _, mini_batch in ipairs(d.data[id]) do
+ table.insert(transformed,
+ nerv.speech_utils.global_transf(mini_batch,
+ global_transf,
+ gconf.frm_ext or 0, 0,
+ gconf))
+ end
else
- transformed = data[id]
+ transformed = d.data[id]
+ end
+ for _, mini_batch in ipairs(transformed) do
+ table.insert(err_output_i, mini_batch:create())
end
+ table.insert(err_output, err_output_i)
table.insert(input, transformed)
end
- local output = {mat_type(gconf.batch_size, 1)}
- err_output = {}
- for i = 1, #input do
- table.insert(err_output, input[i]:create())
- end
- network:mini_batch_init({seq_length = table.vector(gconf.batch_size, 1),
- new_seq = {},
+ network:mini_batch_init({seq_length = d.seq_length,
+ new_seq = d.new_seq,
do_train = bp,
- input = {input},
- output = {output},
- err_input = {err_input},
- err_output = {err_output}})
+ input = input,
+ output = output,
+ err_input = err_input,
+ err_output = err_output})
network:propagate()
if bp then
network:back_propagate()
@@ -111,19 +121,21 @@ end
local function check_and_add_defaults(spec, opts)
local function get_opt_val(k)
- return opts[string.gsub(k, '_', '-')].val
+ local k = string.gsub(k, '_', '-')
+ return opts[k].val, opts[k].specified
end
local opt_v = get_opt_val("resume_from")
if opt_v then
+ nerv.info("resuming from previous training state")
gconf = dofile(opt_v)
else
for k, v in pairs(spec) do
- local opt_v = get_opt_val(k)
- if opt_v ~= nil then
+ local opt_v, specified = get_opt_val(k)
+ if (not specified) and gconf[k] ~= nil then
+ nerv.info("using setting in network config file: %s = %s", k, gconf[k])
+ elseif opt_v ~= nil then
+ nerv.info("using setting in options: %s = %s", k, opt_v)
gconf[k] = opt_v
- elseif gconf[k] ~= nil then
- elseif v ~= nil then
- gconf[k] = v
end
end
end
@@ -168,6 +180,7 @@ end
local trainer_defaults = {
lrate = 0.8,
batch_size = 256,
+ chunk_size = 1,
buffer_size = 81920,
wcost = 1e-6,
momentum = 0.9,
diff --git a/nerv/examples/swb_baseline.lua b/nerv/examples/swb_baseline.lua
index 0ce8468..ece4d44 100644
--- a/nerv/examples/swb_baseline.lua
+++ b/nerv/examples/swb_baseline.lua
@@ -171,6 +171,7 @@ function make_buffer(readers)
{
buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
+ chunk_size = gconf.chunk_size,
randomize = gconf.randomize,
readers = readers,
use_gpu = true
diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua
index 6796f6f..38cfb9a 100644
--- a/nerv/examples/swb_baseline2.lua
+++ b/nerv/examples/swb_baseline2.lua
@@ -6,7 +6,8 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp",
htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf",
initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv",
- "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"}}
+ "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"},
+ chunk_size = 1}
function make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
@@ -145,7 +146,7 @@ end
function make_readers(scp_file, layer_repo)
return {
- {reader = nerv.TNetReader(gconf,
+ {reader = nerv.HTKReader(gconf,
{
id = "main_scp",
scp_file = scp_file,
@@ -166,10 +167,11 @@ function make_readers(scp_file, layer_repo)
end
function make_buffer(readers)
- return nerv.SGDBuffer(gconf,
+ return nerv.FrmBuffer(gconf,
{
buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
+ chunk_size = gconf.chunk_size,
randomize = gconf.randomize,
readers = readers,
use_gpu = true
diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua
index b1c1e66..658aa2e 100644
--- a/nerv/examples/timit_baseline2.lua
+++ b/nerv/examples/timit_baseline2.lua
@@ -8,7 +8,8 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv",
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"},
-- params in nnet_trans.nerv are included in the trained model
- decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"}}
+ decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"},
+ chunk_size = 1}
function make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
@@ -176,10 +177,11 @@ function make_decode_readers(scp_file, layer_repo)
end
function make_buffer(readers)
- return nerv.SGDBuffer(gconf,
+ return nerv.FrmBuffer(gconf,
{
buffer_size = gconf.buffer_size,
batch_size = gconf.batch_size,
+ chunk_size = gconf.chunk_size,
randomize = gconf.randomize,
readers = readers,
use_gpu = true
diff --git a/nerv/init.lua b/nerv/init.lua
index ff944b8..439a83e 100644
--- a/nerv/init.lua
+++ b/nerv/init.lua
@@ -109,7 +109,7 @@ function table.val_to_str(v)
(("number" == type(v) or
"string" == type(v) or
"boolean" == type(v)) and tostring(v)) or
- nil -- failed to serialize
+ "" -- failed to serialize
end
end
@@ -226,7 +226,8 @@ function nerv.parse_args(argv, options, unordered)
local opt_type = v[3]
local opt_meta = {type = opt_type,
desc = v.desc or "",
- val = v.default}
+ val = v.default,
+ specified = false}
if opt_short ~= nil then
if type(opt_short) ~= "string" or #opt_short ~= 1 then err() end
if opt_type ~= "boolean" then
@@ -246,6 +247,7 @@ function nerv.parse_args(argv, options, unordered)
for c in k:gmatch"." do
if sopts[c] then
sopts[c].val = true
+ sopts[c].specified = true
else
nerv.error("invalid option -%s", c)
end
@@ -262,6 +264,7 @@ function nerv.parse_args(argv, options, unordered)
k, opts[k].type)
else
opts[k].val = true
+ opts[k].specified = true
end
else
local k, v = token:match(opt_with_val_exp)
@@ -269,6 +272,7 @@ function nerv.parse_args(argv, options, unordered)
if opts[k] == nil then
nerv.error("invalid option %s", token)
end
+ opts[k].specified = true
if opts[k].type == "boolean" then
if v == "yes" then
opts[k].val = true
diff --git a/nerv/io/sgd_buffer.lua b/nerv/io/frm_buffer.lua
index d78f6d1..9761f16 100644
--- a/nerv/io/sgd_buffer.lua
+++ b/nerv/io/frm_buffer.lua
@@ -1,6 +1,6 @@
-local SGDBuffer = nerv.class("nerv.SGDBuffer", "nerv.DataBuffer")
+local FrmBuffer = nerv.class("nerv.FrmBuffer", "nerv.DataBuffer")
-function SGDBuffer:__init(global_conf, buffer_conf)
+function FrmBuffer:__init(global_conf, buffer_conf)
self.gconf = global_conf
self.batch_size = buffer_conf.batch_size
self.buffer_size = math.floor(buffer_conf.buffer_size /
@@ -57,7 +57,7 @@ function SGDBuffer:__init(global_conf, buffer_conf)
end
end
-function SGDBuffer:saturate()
+function FrmBuffer:saturate()
local buffer_size = self.buffer_size
self.head = 0
self.tail = buffer_size
@@ -116,7 +116,7 @@ function SGDBuffer:saturate()
return self.tail >= self.batch_size
end
-function SGDBuffer:get_data()
+function FrmBuffer:get_data()
local batch_size = self.batch_size
if self.head >= self.tail then -- buffer is empty
local t = os.clock()
@@ -132,7 +132,9 @@ function SGDBuffer:get_data()
return nil -- the remaining data cannot build a batch
end
actual_batch_size = math.min(batch_size, self.tail - self.head)
- local res = {}
+ local res = {seq_length = table.vector(gconf.batch_size, 1),
+ new_seq = {},
+ data = {}}
for i, reader in ipairs(self.readers) do
for id, buff in pairs(reader.buffs) do
local batch = self.output_mat_type(actual_batch_size, buff.width)
@@ -141,7 +143,7 @@ function SGDBuffer:get_data()
else
self.copy_from(batch, buff.data, self.head, self.head + actual_batch_size)
end
- res[id] = batch
+ res.data[id] = {batch}
end
end
self.head = self.head + actual_batch_size
diff --git a/nerv/io/init.lua b/nerv/io/init.lua
index c36d850..d3ba27c 100644
--- a/nerv/io/init.lua
+++ b/nerv/io/init.lua
@@ -56,5 +56,5 @@ function DataBuffer:get_data()
nerv.error_method_not_implemented()
end
-nerv.include('sgd_buffer.lua')
+nerv.include('frm_buffer.lua')
nerv.include('seq_buffer.lua')
diff --git a/nerv/io/seq_buffer.lua b/nerv/io/seq_buffer.lua
index ad1b3f7..029e7b8 100644
--- a/nerv/io/seq_buffer.lua
+++ b/nerv/io/seq_buffer.lua
@@ -5,7 +5,10 @@ function SeqBuffer:__init(global_conf, buffer_conf)
self.batch_size = buffer_conf.batch_size
self.chunk_size = buffer_conf.chunk_size
- self.readers = buffer_conf.readers
+ self.readers = {}
+ for _, v in ipairs(buffer_conf.readers) do
+ table.insert(self.readers, v.reader)
+ end
self.nn_act_default = buffer_conf.nn_act_default
if self.nn_act_default == nil then
self.nn_act_default = 0
@@ -29,7 +32,7 @@ function SeqBuffer:new_mini_batch()
end
function SeqBuffer:saturate(batch)
- if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then
+ if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then
return true
end
local data = {}
diff --git a/nerv/layer/rnn.lua b/nerv/layer/rnn.lua
index 0b5ccaa..333be9e 100644
--- a/nerv/layer/rnn.lua
+++ b/nerv/layer/rnn.lua
@@ -20,7 +20,7 @@ function RNNLayer:__init(id, global_conf, layer_conf)
['nerv.AffineLayer'] = {
main = {dim_in = {din, dout}, dim_out = {dout}, pr = pr},
},
- [layers.activation] = {
+ [layer_conf.activation] = {
activation = {dim_in = {dout}, dim_out = {dout}},
},
['nerv.DuplicateLayer'] = {
diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua
index 6f7fe10..7e2af4e 100644
--- a/nerv/nn/network.lua
+++ b/nerv/nn/network.lua
@@ -413,7 +413,7 @@ function network:set_input(input)
local edge = self.socket.inputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t + time >= 1 and t + time <= self.chunk_size then
- self.input[t + time][id][port] = input[t][i]
+ self.input[t + time][id][port] = input[i][t]
end
end
end
@@ -425,7 +425,7 @@ function network:set_output(output)
local edge = self.socket.outputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t - time >= 1 and t - time <= self.chunk_size then
- self.output[t - time][id][port] = output[t][i]
+ self.output[t - time][id][port] = output[i][t]
end
end
end
@@ -437,7 +437,7 @@ function network:set_err_input(err_input)
local edge = self.socket.outputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t - time >= 1 and t - time <= self.chunk_size then
- self.err_input[t - time][id][port] = err_input[t][i]
+ self.err_input[t - time][id][port] = err_input[i][t]
end
end
end
@@ -449,7 +449,7 @@ function network:set_err_output(err_output)
local edge = self.socket.inputs[i]
local id, port, time = edge[1], edge[2], edge[3]
if t + time >= 1 and t + time <= self.chunk_size then
- self.err_output[t + time][id][port] = err_output[t][i]
+ self.err_output[t + time][id][port] = err_output[i][t]
end
end
end