From 289ac7f4b6e88b935da5c891e1efcf91fc047403 Mon Sep 17 00:00:00 2001 From: Determinant Date: Wed, 16 Mar 2016 17:53:39 +0800 Subject: merge seq_buffer and change asr_trainer.lua accordingly --- nerv/Makefile | 2 +- nerv/examples/asr_trainer.lua | 71 ++++++++++-------- nerv/examples/swb_baseline.lua | 1 + nerv/examples/swb_baseline2.lua | 8 +- nerv/examples/timit_baseline2.lua | 6 +- nerv/init.lua | 8 +- nerv/io/frm_buffer.lua | 151 ++++++++++++++++++++++++++++++++++++++ nerv/io/init.lua | 2 +- nerv/io/seq_buffer.lua | 7 +- nerv/io/sgd_buffer.lua | 149 ------------------------------------- nerv/layer/rnn.lua | 2 +- nerv/nn/network.lua | 8 +- 12 files changed, 221 insertions(+), 194 deletions(-) create mode 100644 nerv/io/frm_buffer.lua delete mode 100644 nerv/io/sgd_buffer.lua (limited to 'nerv') diff --git a/nerv/Makefile b/nerv/Makefile index 68465a1..a5e4f66 100644 --- a/nerv/Makefile +++ b/nerv/Makefile @@ -44,7 +44,7 @@ LUA_LIBS := matrix/init.lua io/init.lua init.lua \ layer/elem_mul.lua layer/lstm.lua layer/lstm_gate.lua layer/dropout.lua layer/gru.lua \ layer/graph.lua layer/rnn.lua layer/duplicate.lua layer/identity.lua \ nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/network.lua \ - io/sgd_buffer.lua io/seq_buffer.lua + io/frm_buffer.lua io/seq_buffer.lua INCLUDE := -I $(LUA_INCDIR) -DLUA_USE_APICHECK CUDA_INCLUDE := -I $(CUDA_BASE)/include/ diff --git a/nerv/examples/asr_trainer.lua b/nerv/examples/asr_trainer.lua index 6bdf57c..645f1ef 100644 --- a/nerv/examples/asr_trainer.lua +++ b/nerv/examples/asr_trainer.lua @@ -22,9 +22,9 @@ local function build_trainer(ifname) local input_order = get_input_order() network = nerv.Network("nt", gconf, {network = network}) - network:init(gconf.batch_size, 1) + network:init(gconf.batch_size, gconf.chunk_size) global_transf = nerv.Network("gt", gconf, {network = global_transf}) - global_transf:init(gconf.batch_size, 1) + global_transf:init(gconf.batch_size, gconf.chunk_size) local iterative_trainer = function (prefix, scp_file, bp, rebind_param_repo) -- rebind the params if necessary @@ -39,11 +39,17 @@ local function build_trainer(ifname) local buffer = make_buffer(make_readers(scp_file, layer_repo)) -- initialize the network gconf.cnt = 0 - err_input = {mat_type(gconf.batch_size, 1)} - err_input[1]:fill(1) + local err_input = {{}} + local output = {{}} + for i = 1, gconf.chunk_size do + local mini_batch = mat_type(gconf.batch_size, 1) + mini_batch:fill(1) + table.insert(err_input[1], mini_batch) + table.insert(output[1], mat_type(gconf.batch_size, 1)) + end network:epoch_init() global_transf:epoch_init() - for data in buffer.get_data, buffer do + for d in buffer.get_data, buffer do -- prine stat periodically gconf.cnt = gconf.cnt + 1 if gconf.cnt == 1000 then @@ -54,35 +60,39 @@ local function build_trainer(ifname) -- break end local input = {} + local err_output = {} -- if gconf.cnt == 1000 then break end for i, e in ipairs(input_order) do local id = e.id - if data[id] == nil then + if d.data[id] == nil then nerv.error("input data %s not found", id) end - local transformed + local transformed = {} + local err_output_i = {} if e.global_transf then - transformed = nerv.speech_utils.global_transf(data[id], - global_transf, - gconf.frm_ext or 0, 0, - gconf) + for _, mini_batch in ipairs(d.data[id]) do + table.insert(transformed, + nerv.speech_utils.global_transf(mini_batch, + global_transf, + gconf.frm_ext or 0, 0, + gconf)) + end else - transformed = data[id] + transformed = d.data[id] + end + for _, mini_batch in ipairs(transformed) do + table.insert(err_output_i, mini_batch:create()) end + table.insert(err_output, err_output_i) table.insert(input, transformed) end - local output = {mat_type(gconf.batch_size, 1)} - err_output = {} - for i = 1, #input do - table.insert(err_output, input[i]:create()) - end - network:mini_batch_init({seq_length = table.vector(gconf.batch_size, 1), - new_seq = {}, + network:mini_batch_init({seq_length = d.seq_length, + new_seq = d.new_seq, do_train = bp, - input = {input}, - output = {output}, - err_input = {err_input}, - err_output = {err_output}}) + input = input, + output = output, + err_input = err_input, + err_output = err_output}) network:propagate() if bp then network:back_propagate() @@ -111,19 +121,21 @@ end local function check_and_add_defaults(spec, opts) local function get_opt_val(k) - return opts[string.gsub(k, '_', '-')].val + local k = string.gsub(k, '_', '-') + return opts[k].val, opts[k].specified end local opt_v = get_opt_val("resume_from") if opt_v then + nerv.info("resuming from previous training state") gconf = dofile(opt_v) else for k, v in pairs(spec) do - local opt_v = get_opt_val(k) - if opt_v ~= nil then + local opt_v, specified = get_opt_val(k) + if (not specified) and gconf[k] ~= nil then + nerv.info("using setting in network config file: %s = %s", k, gconf[k]) + elseif opt_v ~= nil then + nerv.info("using setting in options: %s = %s", k, opt_v) gconf[k] = opt_v - elseif gconf[k] ~= nil then - elseif v ~= nil then - gconf[k] = v end end end @@ -168,6 +180,7 @@ end local trainer_defaults = { lrate = 0.8, batch_size = 256, + chunk_size = 1, buffer_size = 81920, wcost = 1e-6, momentum = 0.9, diff --git a/nerv/examples/swb_baseline.lua b/nerv/examples/swb_baseline.lua index 0ce8468..ece4d44 100644 --- a/nerv/examples/swb_baseline.lua +++ b/nerv/examples/swb_baseline.lua @@ -171,6 +171,7 @@ function make_buffer(readers) { buffer_size = gconf.buffer_size, batch_size = gconf.batch_size, + chunk_size = gconf.chunk_size, randomize = gconf.randomize, readers = readers, use_gpu = true diff --git a/nerv/examples/swb_baseline2.lua b/nerv/examples/swb_baseline2.lua index 6796f6f..38cfb9a 100644 --- a/nerv/examples/swb_baseline2.lua +++ b/nerv/examples/swb_baseline2.lua @@ -6,7 +6,8 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5, cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp", htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf", initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv", - "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"}} + "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"}, + chunk_size = 1} function make_layer_repo(param_repo) local layer_repo = nerv.LayerRepo( @@ -145,7 +146,7 @@ end function make_readers(scp_file, layer_repo) return { - {reader = nerv.TNetReader(gconf, + {reader = nerv.HTKReader(gconf, { id = "main_scp", scp_file = scp_file, @@ -166,10 +167,11 @@ function make_readers(scp_file, layer_repo) end function make_buffer(readers) - return nerv.SGDBuffer(gconf, + return nerv.FrmBuffer(gconf, { buffer_size = gconf.buffer_size, batch_size = gconf.batch_size, + chunk_size = gconf.chunk_size, randomize = gconf.randomize, readers = readers, use_gpu = true diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua index b1c1e66..658aa2e 100644 --- a/nerv/examples/timit_baseline2.lua +++ b/nerv/examples/timit_baseline2.lua @@ -8,7 +8,8 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5, "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv", "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"}, -- params in nnet_trans.nerv are included in the trained model - decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"}} + decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_20160311205342/nnet_init_20160311211609_iter_13_lr0.013437_tr72.572_cv58.709.nerv"}, + chunk_size = 1} function make_layer_repo(param_repo) local layer_repo = nerv.LayerRepo( @@ -176,10 +177,11 @@ function make_decode_readers(scp_file, layer_repo) end function make_buffer(readers) - return nerv.SGDBuffer(gconf, + return nerv.FrmBuffer(gconf, { buffer_size = gconf.buffer_size, batch_size = gconf.batch_size, + chunk_size = gconf.chunk_size, randomize = gconf.randomize, readers = readers, use_gpu = true diff --git a/nerv/init.lua b/nerv/init.lua index ff944b8..439a83e 100644 --- a/nerv/init.lua +++ b/nerv/init.lua @@ -109,7 +109,7 @@ function table.val_to_str(v) (("number" == type(v) or "string" == type(v) or "boolean" == type(v)) and tostring(v)) or - nil -- failed to serialize + "" -- failed to serialize end end @@ -226,7 +226,8 @@ function nerv.parse_args(argv, options, unordered) local opt_type = v[3] local opt_meta = {type = opt_type, desc = v.desc or "", - val = v.default} + val = v.default, + specified = false} if opt_short ~= nil then if type(opt_short) ~= "string" or #opt_short ~= 1 then err() end if opt_type ~= "boolean" then @@ -246,6 +247,7 @@ function nerv.parse_args(argv, options, unordered) for c in k:gmatch"." do if sopts[c] then sopts[c].val = true + sopts[c].specified = true else nerv.error("invalid option -%s", c) end @@ -262,6 +264,7 @@ function nerv.parse_args(argv, options, unordered) k, opts[k].type) else opts[k].val = true + opts[k].specified = true end else local k, v = token:match(opt_with_val_exp) @@ -269,6 +272,7 @@ function nerv.parse_args(argv, options, unordered) if opts[k] == nil then nerv.error("invalid option %s", token) end + opts[k].specified = true if opts[k].type == "boolean" then if v == "yes" then opts[k].val = true diff --git a/nerv/io/frm_buffer.lua b/nerv/io/frm_buffer.lua new file mode 100644 index 0000000..9761f16 --- /dev/null +++ b/nerv/io/frm_buffer.lua @@ -0,0 +1,151 @@ +local FrmBuffer = nerv.class("nerv.FrmBuffer", "nerv.DataBuffer") + +function FrmBuffer:__init(global_conf, buffer_conf) + self.gconf = global_conf + self.batch_size = buffer_conf.batch_size + self.buffer_size = math.floor(buffer_conf.buffer_size / + self.batch_size) * self.batch_size + self.randomize = buffer_conf.randomize + self.consume = buffer_conf.consume + local cumat_type = global_conf.cumat_type + if self.gconf.use_cpu then + self.output_mat_type = self.gconf.mmat_type + else + self.output_mat_type = self.gconf.cumat_type + end + if buffer_conf.use_gpu then + self.mat_type = cumat_type + if self.gconf.use_cpu then + -- gpu buffer -> cpu training + nerv.error("not implemeted") + else + -- gpu buffer -> gpu training + self.copy_rows_from_by_idx = cumat_type.copy_rows_fromd_by_idx + self.copy_from = cumat_type.copy_fromd + end + self.perm_gen = function (x) + return cumat_type.new_from_host(nerv.MMatrixFloat.perm_gen(x)) + end + else + self.mat_type = global_conf.mmat_type + if self.gconf.use_cpu then + -- cpu buffer -> cpu training + self.copy_rows_from_by_idx = gconf.mmat_type.copy_rows_fromh_by_idx + self.copy_from = gconf.mmat_type.copy_fromh + else + -- cpu buffer -> gpu training + self.copy_rows_from_by_idx = cumat_type.copy_rows_fromh_by_idx + self.copy_from = cumat_type.copy_fromh + end + self.perm_gen = nerv.MMatrixFloat.perm_gen + end + self.copy_from_reader = self.mat_type.copy_fromh + self.head = 0 + self.tail = 0 + self.readers = {} + for i, reader_spec in ipairs(buffer_conf.readers) do + local buffs = {} + for id, width in pairs(reader_spec.data) do + buffs[id] = {data = self.mat_type(self.buffer_size, width), + leftover = nil, + width = width} + end + table.insert(self.readers, {buffs = buffs, + reader = reader_spec.reader, + tail = 0, + has_leftover = false}) + end +end + +function FrmBuffer:saturate() + local buffer_size = self.buffer_size + self.head = 0 + self.tail = buffer_size + for i, reader in ipairs(self.readers) do + reader.tail = 0 + if reader.has_leftover then + local lrow + for id, buff in pairs(reader.buffs) do + lrow = buff.leftover:nrow() + if lrow > buffer_size then + nerv.error("buffer size is too small to contain leftovers") + end + buff.data:copy_from(buff.leftover, 0, lrow) + buff.leftover = nil + end + nerv.info("buffer leftover: %d\n", lrow) + reader.tail = lrow + reader.has_leftover = false + end + while reader.tail < buffer_size do + local data = reader.reader:get_data() + if data == nil then + break + end + local drow = nil + for id, d in pairs(data) do + if drow == nil then + drow = d:nrow() + elseif d:nrow() ~= drow then + nerv.error("reader provides with inconsistent rows of data") + end + end + local remain = buffer_size - reader.tail + if drow > remain then + for id, buff in pairs(reader.buffs) do + local d = data[id] + if d == nil then + nerv.error("reader does not provide data for %s", id) + end + buff.leftover = self.mat_type(drow - remain, + buff.width) + self.copy_from_reader(buff.leftover, d, remain, drow) + end + drow = remain + reader.has_leftover = true + end + for id, buff in pairs(reader.buffs) do + self.copy_from_reader(buff.data, data[id], 0, drow, reader.tail) + end + reader.tail = reader.tail + drow + end + self.tail = math.min(self.tail, reader.tail) + end + self.rand_map = self.perm_gen(self.tail) -- generate shuffled index + collectgarbage("collect") + return self.tail >= self.batch_size +end + +function FrmBuffer:get_data() + local batch_size = self.batch_size + if self.head >= self.tail then -- buffer is empty + local t = os.clock() + if (not self:saturate()) and (not self.consume) then + return nil -- the remaining data cannot build a batch + end + if self.tail == self.head then + return nil -- nothing left + end + nerv.info("%.3fs to fill the buffer", os.clock() - t) + end + if self.head + batch_size > self.tail and (not self.consume) then + return nil -- the remaining data cannot build a batch + end + actual_batch_size = math.min(batch_size, self.tail - self.head) + local res = {seq_length = table.vector(gconf.batch_size, 1), + new_seq = {}, + data = {}} + for i, reader in ipairs(self.readers) do + for id, buff in pairs(reader.buffs) do + local batch = self.output_mat_type(actual_batch_size, buff.width) + if self.randomize then + self.copy_rows_from_by_idx(batch, buff.data, self.rand_map, self.head) + else + self.copy_from(batch, buff.data, self.head, self.head + actual_batch_size) + end + res.data[id] = {batch} + end + end + self.head = self.head + actual_batch_size + return res +end diff --git a/nerv/io/init.lua b/nerv/io/init.lua index c36d850..d3ba27c 100644 --- a/nerv/io/init.lua +++ b/nerv/io/init.lua @@ -56,5 +56,5 @@ function DataBuffer:get_data() nerv.error_method_not_implemented() end -nerv.include('sgd_buffer.lua') +nerv.include('frm_buffer.lua') nerv.include('seq_buffer.lua') diff --git a/nerv/io/seq_buffer.lua b/nerv/io/seq_buffer.lua index ad1b3f7..029e7b8 100644 --- a/nerv/io/seq_buffer.lua +++ b/nerv/io/seq_buffer.lua @@ -5,7 +5,10 @@ function SeqBuffer:__init(global_conf, buffer_conf) self.batch_size = buffer_conf.batch_size self.chunk_size = buffer_conf.chunk_size - self.readers = buffer_conf.readers + self.readers = {} + for _, v in ipairs(buffer_conf.readers) do + table.insert(self.readers, v.reader) + end self.nn_act_default = buffer_conf.nn_act_default if self.nn_act_default == nil then self.nn_act_default = 0 @@ -29,7 +32,7 @@ function SeqBuffer:new_mini_batch() end function SeqBuffer:saturate(batch) - if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then + if self.queue[self.head] ~= nil and self.queue[self.head].seq_length[batch] ~= 0 then return true end local data = {} diff --git a/nerv/io/sgd_buffer.lua b/nerv/io/sgd_buffer.lua deleted file mode 100644 index d78f6d1..0000000 --- a/nerv/io/sgd_buffer.lua +++ /dev/null @@ -1,149 +0,0 @@ -local SGDBuffer = nerv.class("nerv.SGDBuffer", "nerv.DataBuffer") - -function SGDBuffer:__init(global_conf, buffer_conf) - self.gconf = global_conf - self.batch_size = buffer_conf.batch_size - self.buffer_size = math.floor(buffer_conf.buffer_size / - self.batch_size) * self.batch_size - self.randomize = buffer_conf.randomize - self.consume = buffer_conf.consume - local cumat_type = global_conf.cumat_type - if self.gconf.use_cpu then - self.output_mat_type = self.gconf.mmat_type - else - self.output_mat_type = self.gconf.cumat_type - end - if buffer_conf.use_gpu then - self.mat_type = cumat_type - if self.gconf.use_cpu then - -- gpu buffer -> cpu training - nerv.error("not implemeted") - else - -- gpu buffer -> gpu training - self.copy_rows_from_by_idx = cumat_type.copy_rows_fromd_by_idx - self.copy_from = cumat_type.copy_fromd - end - self.perm_gen = function (x) - return cumat_type.new_from_host(nerv.MMatrixFloat.perm_gen(x)) - end - else - self.mat_type = global_conf.mmat_type - if self.gconf.use_cpu then - -- cpu buffer -> cpu training - self.copy_rows_from_by_idx = gconf.mmat_type.copy_rows_fromh_by_idx - self.copy_from = gconf.mmat_type.copy_fromh - else - -- cpu buffer -> gpu training - self.copy_rows_from_by_idx = cumat_type.copy_rows_fromh_by_idx - self.copy_from = cumat_type.copy_fromh - end - self.perm_gen = nerv.MMatrixFloat.perm_gen - end - self.copy_from_reader = self.mat_type.copy_fromh - self.head = 0 - self.tail = 0 - self.readers = {} - for i, reader_spec in ipairs(buffer_conf.readers) do - local buffs = {} - for id, width in pairs(reader_spec.data) do - buffs[id] = {data = self.mat_type(self.buffer_size, width), - leftover = nil, - width = width} - end - table.insert(self.readers, {buffs = buffs, - reader = reader_spec.reader, - tail = 0, - has_leftover = false}) - end -end - -function SGDBuffer:saturate() - local buffer_size = self.buffer_size - self.head = 0 - self.tail = buffer_size - for i, reader in ipairs(self.readers) do - reader.tail = 0 - if reader.has_leftover then - local lrow - for id, buff in pairs(reader.buffs) do - lrow = buff.leftover:nrow() - if lrow > buffer_size then - nerv.error("buffer size is too small to contain leftovers") - end - buff.data:copy_from(buff.leftover, 0, lrow) - buff.leftover = nil - end - nerv.info("buffer leftover: %d\n", lrow) - reader.tail = lrow - reader.has_leftover = false - end - while reader.tail < buffer_size do - local data = reader.reader:get_data() - if data == nil then - break - end - local drow = nil - for id, d in pairs(data) do - if drow == nil then - drow = d:nrow() - elseif d:nrow() ~= drow then - nerv.error("reader provides with inconsistent rows of data") - end - end - local remain = buffer_size - reader.tail - if drow > remain then - for id, buff in pairs(reader.buffs) do - local d = data[id] - if d == nil then - nerv.error("reader does not provide data for %s", id) - end - buff.leftover = self.mat_type(drow - remain, - buff.width) - self.copy_from_reader(buff.leftover, d, remain, drow) - end - drow = remain - reader.has_leftover = true - end - for id, buff in pairs(reader.buffs) do - self.copy_from_reader(buff.data, data[id], 0, drow, reader.tail) - end - reader.tail = reader.tail + drow - end - self.tail = math.min(self.tail, reader.tail) - end - self.rand_map = self.perm_gen(self.tail) -- generate shuffled index - collectgarbage("collect") - return self.tail >= self.batch_size -end - -function SGDBuffer:get_data() - local batch_size = self.batch_size - if self.head >= self.tail then -- buffer is empty - local t = os.clock() - if (not self:saturate()) and (not self.consume) then - return nil -- the remaining data cannot build a batch - end - if self.tail == self.head then - return nil -- nothing left - end - nerv.info("%.3fs to fill the buffer", os.clock() - t) - end - if self.head + batch_size > self.tail and (not self.consume) then - return nil -- the remaining data cannot build a batch - end - actual_batch_size = math.min(batch_size, self.tail - self.head) - local res = {} - for i, reader in ipairs(self.readers) do - for id, buff in pairs(reader.buffs) do - local batch = self.output_mat_type(actual_batch_size, buff.width) - if self.randomize then - self.copy_rows_from_by_idx(batch, buff.data, self.rand_map, self.head) - else - self.copy_from(batch, buff.data, self.head, self.head + actual_batch_size) - end - res[id] = batch - end - end - self.head = self.head + actual_batch_size - return res -end diff --git a/nerv/layer/rnn.lua b/nerv/layer/rnn.lua index 0b5ccaa..333be9e 100644 --- a/nerv/layer/rnn.lua +++ b/nerv/layer/rnn.lua @@ -20,7 +20,7 @@ function RNNLayer:__init(id, global_conf, layer_conf) ['nerv.AffineLayer'] = { main = {dim_in = {din, dout}, dim_out = {dout}, pr = pr}, }, - [layers.activation] = { + [layer_conf.activation] = { activation = {dim_in = {dout}, dim_out = {dout}}, }, ['nerv.DuplicateLayer'] = { diff --git a/nerv/nn/network.lua b/nerv/nn/network.lua index 6f7fe10..7e2af4e 100644 --- a/nerv/nn/network.lua +++ b/nerv/nn/network.lua @@ -413,7 +413,7 @@ function network:set_input(input) local edge = self.socket.inputs[i] local id, port, time = edge[1], edge[2], edge[3] if t + time >= 1 and t + time <= self.chunk_size then - self.input[t + time][id][port] = input[t][i] + self.input[t + time][id][port] = input[i][t] end end end @@ -425,7 +425,7 @@ function network:set_output(output) local edge = self.socket.outputs[i] local id, port, time = edge[1], edge[2], edge[3] if t - time >= 1 and t - time <= self.chunk_size then - self.output[t - time][id][port] = output[t][i] + self.output[t - time][id][port] = output[i][t] end end end @@ -437,7 +437,7 @@ function network:set_err_input(err_input) local edge = self.socket.outputs[i] local id, port, time = edge[1], edge[2], edge[3] if t - time >= 1 and t - time <= self.chunk_size then - self.err_input[t - time][id][port] = err_input[t][i] + self.err_input[t - time][id][port] = err_input[i][t] end end end @@ -449,7 +449,7 @@ function network:set_err_output(err_output) local edge = self.socket.inputs[i] local id, port, time = edge[1], edge[2], edge[3] if t + time >= 1 and t + time <= self.chunk_size then - self.err_output[t + time][id][port] = err_output[t][i] + self.err_output[t + time][id][port] = err_output[i][t] end end end -- cgit v1.2.3