diff options
-rw-r--r-- | examples/asr_trainer.lua | 42 | ||||
-rw-r--r-- | examples/swb_baseline.lua | 87 | ||||
-rw-r--r-- | examples/test_dnn_layers.lua | 4 | ||||
-rw-r--r-- | examples/test_nn_lib.lua | 18 | ||||
-rw-r--r-- | io/sgd_buffer.lua | 2 | ||||
-rw-r--r-- | layer/affine.lua | 75 | ||||
-rw-r--r-- | layer/bias.lua | 2 | ||||
-rw-r--r-- | layer/combiner.lua | 26 | ||||
-rw-r--r-- | layer/init.lua | 12 | ||||
-rw-r--r-- | layer/mse.lua | 28 | ||||
-rw-r--r-- | layer/sigmoid.lua | 4 | ||||
-rw-r--r-- | layer/softmax_ce.lua | 21 | ||||
-rw-r--r-- | layer/window.lua | 2 | ||||
-rw-r--r-- | nerv.lua | 37 | ||||
-rw-r--r-- | nn/layer_dag.lua | 23 | ||||
-rw-r--r-- | nn/layer_repo.lua | 4 | ||||
-rw-r--r-- | nn/param_repo.lua | 70 | ||||
m--------- | speech | 0 |
18 files changed, 284 insertions, 173 deletions
diff --git a/examples/asr_trainer.lua b/examples/asr_trainer.lua index 05d770f..a5727be 100644 --- a/examples/asr_trainer.lua +++ b/examples/asr_trainer.lua @@ -1,50 +1,58 @@ function build_trainer(ifname) - local param_repo = make_param_repo(ifname) + local param_repo = nerv.ParamRepo() + param_repo:import(ifname, nil, gconf) local sublayer_repo = make_sublayer_repo(param_repo) local layer_repo = make_layer_repo(sublayer_repo, param_repo) local crit = get_criterion_layer(sublayer_repo) local network = get_network(layer_repo) + local input_order = get_input_order() local iterative_trainer = function (prefix, scp_file, bp) gconf.randomize = bp -- build buffer - local buffer = make_buffer(make_reader(scp_file, layer_repo)) + local buffer = make_buffer(make_readers(scp_file, layer_repo)) -- initialize the network network:init(gconf.batch_size) gconf.cnt = 0 + err_input = {nerv.CuMatrixFloat(256, 1)} + err_input[1]:fill(1) for data in buffer.get_data, buffer do -- prine stat periodically gconf.cnt = gconf.cnt + 1 if gconf.cnt == 1000 then - print_stat(crit) + print_stat(sublayer_repo) + nerv.CuMatrix.print_profile() + nerv.CuMatrix.clear_profile() gconf.cnt = 0 + -- break end + local input = {} -- if gconf.cnt == 100 then break end - - input = {data.main_scp, data.phone_state} - output = {} - err_input = {} + for i, id in ipairs(input_order) do + if data[id] == nil then + nerv.error("input data %s not found", id) + end + table.insert(input, data[id]) + end + local output = {nerv.CuMatrixFloat(256, 1)} err_output = {input[1]:create()} network:propagate(input, output) if bp then - network:back_propagate(err_output, err_input, input, output) + network:back_propagate(err_input, err_output, input, output) network:update(err_input, input, output) end -- collect garbage in-time to save GPU memory collectgarbage("collect") end - print_stat(crit) + print_stat(sublayer_repo) nerv.CuMatrix.print_profile() + nerv.CuMatrix.clear_profile() if (not bp) and prefix ~= nil then nerv.info("writing back...") local fname = string.format("%s_cv%.3f.nerv", - prefix, get_accuracy(crit)) - cf = nerv.ChunkFile(fname, "w") - for i, p in ipairs(network:get_params()) do - cf:write_chunk(p) - end - cf:close() + prefix, get_accuracy(sublayer_repo)) + network:get_params():export(fname, nil) end - return get_accuracy(crit) + return get_accuracy(sublayer_repo) end return iterative_trainer end @@ -73,7 +81,7 @@ for i = 1, max_iter do local accu_new = trainer( string.format("%s_%s_iter_%d_lr%f_tr%.3f", string.gsub( - (string.gsub(pf0, "(.*/)(.*)", "%2")), + (string.gsub(pf0[1], "(.*/)(.*)", "%2")), "(.*)%..*", "%1"), os.date("%Y%m%d%H%M%S"), i, gconf.lrate, diff --git a/examples/swb_baseline.lua b/examples/swb_baseline.lua index 28cc6d5..8b7e01a 100644 --- a/examples/swb_baseline.lua +++ b/examples/swb_baseline.lua @@ -6,14 +6,10 @@ gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, tr_scp = "/slfs1/users/mfy43/swb_ivec/train_bp.scp", cv_scp = "/slfs1/users/mfy43/swb_ivec/train_cv.scp", htk_conf = "/slfs1/users/mfy43/swb_ivec/plp_0_d_a.conf", - global_transf = "/slfs1/users/mfy43/swb_global_transf.nerv", - initialized_param = "/slfs1/users/mfy43/swb_init.nerv", + initialized_param = {"/slfs1/users/mfy43/swb_init.nerv", + "/slfs1/users/mfy43/swb_global_transf.nerv"}, debug = false} -function make_param_repo(param_file) - return nerv.ParamRepo({param_file, gconf.global_transf}) -end - function make_sublayer_repo(param_repo) return nerv.LayerRepo( { @@ -60,7 +56,7 @@ function make_sublayer_repo(param_repo) }, ["nerv.SoftmaxCELayer"] = { - criterion = {{}, {dim_in = {3001, 1}, dim_out = {}, compressed = true}} + ce_crit = {{}, {dim_in = {3001, 1}, dim_out = {1}, compressed = true}} } }, param_repo, gconf) end @@ -82,7 +78,7 @@ function make_layer_repo(sublayer_repo, param_repo) } }}, main = {{}, { - dim_in = {429, 1}, dim_out = {}, + dim_in = {429, 1}, dim_out = {1}, sub_layers = sublayer_repo, connections = { ["<input>[1]"] = "affine0[1]", @@ -100,8 +96,9 @@ function make_layer_repo(sublayer_repo, param_repo) ["sigmoid5[1]"] = "affine6[1]", ["affine6[1]"] = "sigmoid6[1]", ["sigmoid6[1]"] = "affine7[1]", - ["affine7[1]"] = "criterion[1]", - ["<input>[2]"] = "criterion[2]" + ["affine7[1]"] = "ce_crit[1]", + ["<input>[2]"] = "ce_crit[2]", + ["ce_crit[1]"] = "<output>[1]" } }} } @@ -109,55 +106,61 @@ function make_layer_repo(sublayer_repo, param_repo) end function get_criterion_layer(sublayer_repo) - return sublayer_repo:get_layer("criterion") + return sublayer_repo:get_layer("ce_crit") end function get_network(layer_repo) return layer_repo:get_layer("main") end -function make_reader(scp_file, layer_repo) - return nerv.TNetReader(gconf, - { - id = "main_scp", - scp_file = scp_file, - conf_file = gconf.htk_conf, - frm_ext = gconf.frm_ext, - mlfs = { - phone_state = { - file = "/slfs1/users/mfy43/swb_ivec/ref.mlf", - format = "map", - format_arg = "/slfs1/users/mfy43/swb_ivec/dict", - dir = "*/", - ext = "lab" - } - }, - global_transf = layer_repo:get_layer("global_transf") - }) +function make_readers(scp_file, layer_repo) + return { + {reader = nerv.TNetReader(gconf, + { + id = "main_scp", + scp_file = scp_file, + conf_file = gconf.htk_conf, + frm_ext = gconf.frm_ext, + mlfs = { + phone_state = { + file = "/slfs1/users/mfy43/swb_ivec/ref.mlf", + format = "map", + format_arg = "/slfs1/users/mfy43/swb_ivec/dict", + dir = "*/", + ext = "lab" + } + }, + global_transf = layer_repo:get_layer("global_transf") + }), + data = {main_scp = 429, phone_state = 1}} + } end -function make_buffer(reader, buffer) +function make_buffer(readers) return nerv.SGDBuffer(gconf, { buffer_size = gconf.buffer_size, randomize = gconf.randomize, - readers = { - { reader = reader, - data = {main_scp = 429, phone_state = 1}} - } + readers = readers }) end -function get_accuracy(crit) - return crit.total_correct / crit.total_frames * 100 +function get_input_order() + return {"main_scp", "phone_state"} +end + +function get_accuracy(sublayer_repo) + local ce_crit = sublayer_repo:get_layer("ce_crit") + return ce_crit.total_correct / ce_crit.total_frames * 100 end -function print_stat(crit) +function print_stat(sublayer_repo) + local ce_crit = sublayer_repo:get_layer("ce_crit") nerv.info("*** training stat begin ***") - nerv.utils.printf("cross entropy:\t\t%.8f\n", crit.total_ce) - nerv.utils.printf("correct:\t\t%d\n", crit.total_correct) - nerv.utils.printf("frames:\t\t\t%d\n", crit.total_frames) - nerv.utils.printf("err/frm:\t\t%.8f\n", crit.total_ce / crit.total_frames) - nerv.utils.printf("accuracy:\t\t%.3f%%\n", get_accuracy(crit)) + nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce) + nerv.printf("correct:\t\t%d\n", ce_crit.total_correct) + nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames) + nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames) + nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(sublayer_repo)) nerv.info("*** training stat end ***") end diff --git a/examples/test_dnn_layers.lua b/examples/test_dnn_layers.lua index bf81f7b..64c0dec 100644 --- a/examples/test_dnn_layers.lua +++ b/examples/test_dnn_layers.lua @@ -69,8 +69,8 @@ for i = 0, 3 do print(err_output1[1]) print("err_output2") print(err_output2[1]) - nerv.utils.printf("cross entropy: %.8f\n", sm.total_ce) - nerv.utils.printf("frames: %.8f\n", sm.total_frames) + nerv.printf("cross entropy: %.8f\n", sm.total_ce) + nerv.printf("frames: %.8f\n", sm.total_frames) end print("linear") print(af.ltp.trans) diff --git a/examples/test_nn_lib.lua b/examples/test_nn_lib.lua index 6fdbd67..5444810 100644 --- a/examples/test_nn_lib.lua +++ b/examples/test_nn_lib.lua @@ -144,17 +144,17 @@ for data in buffer.get_data, buffer do main:back_propagate(err_output, err_input, input, output) main:update(err_input, input, output) --- nerv.utils.printf("cross entropy: %.8f\n", sm.total_ce) --- nerv.utils.printf("correct: %d\n", sm.total_correct) --- nerv.utils.printf("frames: %d\n", sm.total_frames) --- nerv.utils.printf("err/frm: %.8f\n", sm.total_ce / sm.total_frames) --- nerv.utils.printf("accuracy: %.8f\n", sm.total_correct / sm.total_frames) +-- nerv.printf("cross entropy: %.8f\n", sm.total_ce) +-- nerv.printf("correct: %d\n", sm.total_correct) +-- nerv.printf("frames: %d\n", sm.total_frames) +-- nerv.printf("err/frm: %.8f\n", sm.total_ce / sm.total_frames) +-- nerv.printf("accuracy: %.8f\n", sm.total_correct / sm.total_frames) collectgarbage("collect") end -nerv.utils.printf("cross entropy: %.8f\n", sm.total_ce) -nerv.utils.printf("correct: %d\n", sm.total_correct) -nerv.utils.printf("accuracy: %.3f%%\n", sm.total_correct / sm.total_frames * 100) -nerv.utils.printf("writing back...\n") +nerv.printf("cross entropy: %.8f\n", sm.total_ce) +nerv.printf("correct: %d\n", sm.total_correct) +nerv.printf("accuracy: %.3f%%\n", sm.total_correct / sm.total_frames * 100) +nerv.printf("writing back...\n") cf = nerv.ChunkFile("output.nerv", "w") for i, p in ipairs(main:get_params()) do print(p) diff --git a/io/sgd_buffer.lua b/io/sgd_buffer.lua index 381b863..f4f7dfe 100644 --- a/io/sgd_buffer.lua +++ b/io/sgd_buffer.lua @@ -41,7 +41,7 @@ function SGDBuffer:saturate() buff.data:copy_from(buff.leftover, 0, lrow) buff.leftover = nil end - nerv.utils.printf("leftover: %d\n", lrow) + nerv.printf("leftover: %d\n", lrow) reader.tail = lrow reader.has_leftover = false end diff --git a/layer/affine.lua b/layer/affine.lua index 2cd7acb..00cbcfb 100644 --- a/layer/affine.lua +++ b/layer/affine.lua @@ -3,13 +3,35 @@ local LinearTransParam = nerv.class('nerv.LinearTransParam', 'nerv.MatrixParam') local BiasParam = nerv.class('nerv.BiasParam', 'nerv.MatrixParam') local AffineLayer = nerv.class('nerv.AffineLayer', 'nerv.Layer') -function MatrixParam:read(pcdata) +function MatrixParam:read(handle) self.trans = self.gconf.cumat_type.new_from_host( - nerv.MMatrixFloat.load(pcdata)) + nerv.MMatrixFloat.load(handle)) end -function MatrixParam:write(pfhandle) - self.trans:new_to_host():save(pfhandle) +function MatrixParam:write(handle) + self.trans:new_to_host():save(handle) +end + +function MatrixParam:train_init() + self.correction = self.trans:create() + self.correction:fill(0) +end + +function MatrixParam:update(gradient) + local gconf = self.gconf + self.correction:add(self.correction, gradient, gconf.momentum, 1.0) + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local n = self.gconf.batch_size * mmt_gain + -- perform update + self.trans:add(self.trans, self.correction, 1.0, -gconf.lrate / n) +end + +function LinearTransParam:update(gradient) + MatrixParam.update(self, gradient) + local gconf = self.gconf + -- weight decay + self.trans:add(self.trans, self.trans, 1.0, -gconf.lrate * gconf.wcost) end function AffineLayer:__init(id, global_conf, layer_conf) @@ -20,9 +42,10 @@ function AffineLayer:__init(id, global_conf, layer_conf) self.dim_out = layer_conf.dim_out self.gconf = global_conf self:check_dim_len(1, 1) -- exactly one input and one output + self.direct_update = layer_conf.direct_update end -function AffineLayer:init() +function AffineLayer:init(batch_size) if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then nerv.error("mismatching dimensions of linear transform and bias paramter") end @@ -32,32 +55,24 @@ function AffineLayer:init() if self.dim_out[1] ~= self.ltp.trans:ncol() then nerv.error("mismatching dimensions of linear transform parameter and output") end - - -- linear transform correction - self.ltc = self.ltp.trans:create() - self.ltc:fill(0) - -- bias correction - self.bc = self.bp.trans:create() - self.bc:fill(0) + self.ltp_grad = self.ltp.trans:create() + self.ltp:train_init() + self.bp:train_init() end function AffineLayer:update(bp_err, input, output) - local ltp = self.ltp.trans - local bp = self.bp.trans - local ltc = self.ltc - local bc = self.bc - local gconf = self.gconf - -- momentum gain - local mmt_gain = 1.0 / (1.0 - gconf.momentum); - local n = input[1]:nrow() * mmt_gain - -- update corrections (accumulated errors) - ltc:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') - bc:add(bc, bp_err[1]:colsum(), gconf.momentum, 1.0) - -- perform update - ltp:add(ltp, ltc, 1.0, -gconf.lrate / n) - bp:add(bp, bc, 1.0, -gconf.lrate / n) - -- weight decay - ltp:add(ltp, ltp, 1.0, -gconf.lrate * gconf.wcost) + if self.direct_update then + self.ltp.correction:mul(input[1], bp_err[1], 1.0, gconf.momentum, 'T', 'N') + -- momentum gain + local mmt_gain = 1.0 / (1.0 - gconf.momentum); + local n = self.gconf.batch_size * mmt_gain + -- perform update + self.ltp.trans:add(self.ltp.trans, self.ltp.correction, 1.0, -gconf.lrate / n) + else + self.ltp_grad:mul(input[1], bp_err[1], 1.0, 0.0, 'T', 'N') + self.ltp:update(self.ltp_grad) + end + self.bp:update(bp_err[1]:colsum()) end function AffineLayer:propagate(input, output) @@ -67,10 +82,10 @@ function AffineLayer:propagate(input, output) output[1]:add_row(self.bp.trans, 1.0) end -function AffineLayer:back_propagate(next_bp_err, bp_err, input, output) +function AffineLayer:back_propagate(bp_err, next_bp_err, input, output) next_bp_err[1]:mul(bp_err[1], self.ltp.trans, 1.0, 0.0, 'N', 'T') end function AffineLayer:get_params() - return {self.ltp, self.bp} + return nerv.ParamRepo({self.ltp, self.bp}) end diff --git a/layer/bias.lua b/layer/bias.lua index 8cd326b..c99274d 100644 --- a/layer/bias.lua +++ b/layer/bias.lua @@ -24,5 +24,5 @@ function BiasLayer:propagate(input, output) end function BiasLayer:get_params() - return {self.bias} + return nerv.ParamRepo({self.bias}) end diff --git a/layer/combiner.lua b/layer/combiner.lua index 75e47e2..7bd7617 100644 --- a/layer/combiner.lua +++ b/layer/combiner.lua @@ -7,9 +7,15 @@ function CombinerLayer:__init(id, global_conf, layer_conf) self.dim_out = layer_conf.dim_out self.gconf = global_conf self:check_dim_len(#self.lambda, -1) + if #self.dim_in < 1 then + nerv.error("no input specified") + end + if #self.dim_out < 1 then + nerv.error("no output specified") + end end -function CombinerLayer:init() +function CombinerLayer:init(batch_size) local dim = self.dim_in[1] for i = 2, #self.dim_in do if self.dim_in[i] ~= dim then @@ -21,6 +27,7 @@ function CombinerLayer:init() nerv.error("mismatching dimensions of inputs/outputs") end end + self.sum = self.gconf.cumat_type(batch_size, dim) end function CombinerLayer:update(bp_err, input, output) @@ -32,24 +39,21 @@ function CombinerLayer:propagate(input, output) output[1]:add(output[1], input[i], 1.0, self.lambda[i]) end for i = 2, #self.dim_out do - output[i]:copy_fromd(output[1]) + output[i]:copy_fromd(output[1]) end end -function CombinerLayer:back_propagate(next_bp_err, bp_err, input, output) - local sum = bp_err[1]:create() - sum:fill(0) - for i = 1, #self.dim_out do +function CombinerLayer:back_propagate(bp_err, next_bp_err, input, output) + local sum = self.sum + sum:copy_fromd(bp_err[1]) + for i = 2, #self.dim_out do sum:add(sum, bp_err[i], 1.0, 1.0) end for i = 1, #self.dim_in do - local scale = nerv.CuMatrixFloat(sum:nrow(), 1) - scale:fill(self.lambda[i]) - next_bp_err[i]:copy_fromd(sum) - next_bp_err[i]:scale_rows_by_col(scale) + next_bp_err[i]:add(next_bp_err[i], sum, 0.0, self.lambda[i]) end end function CombinerLayer:get_params() - return {} + return nerv.ParamRepo({}) end diff --git a/layer/init.lua b/layer/init.lua index 169427d..e39af94 100644 --- a/layer/init.lua +++ b/layer/init.lua @@ -15,11 +15,15 @@ function Param:set_info(info) self.info = info end -function Param:read(pfhandle) +function Param:read(handle) nerv.error_method_not_implemented() end -function Param:write(pfhandle) +function Param:write(handle) + nerv.error_method_not_implemented() +end + +function Param:update(gradient) nerv.error_method_not_implemented() end @@ -29,7 +33,7 @@ function Layer:__init(id, global_conf, layer_conf) nerv.error_method_not_implemented() end -function Layer:init() +function Layer:init(batch_size) nerv.error_method_not_implemented() end @@ -41,7 +45,7 @@ function Layer:propagate(input, output) nerv.error_method_not_implemented() end -function Layer:back_propagate(next_bp_err, bp_err, input, output) +function Layer:back_propagate(bp_err, next_bp_err, input, output) nerv.error_method_not_implemented() end diff --git a/layer/mse.lua b/layer/mse.lua index da5b24d..9a97add 100644 --- a/layer/mse.lua +++ b/layer/mse.lua @@ -8,12 +8,16 @@ function MSELayer:__init(id, global_conf, layer_conf) self:check_dim_len(2, -1) end -function MSELayer:init() +function MSELayer:init(batch_size) if self.dim_in[1] ~= self.dim_in[2] then nerv.error("mismatching dimensions of previous network output and labels") end + self.scale = 1 / self.dim_in[1] self.total_mse = 0.0 self.total_frames = 0 + self.mse = self.gconf.cumat_type(batch_size, self.dim_in[1]) + self.mse_sum = self.gconf.cumat_type(batch_size, 1) + self.diff = self.mse:create() end function MSELayer:update(bp_err, input, output) @@ -21,32 +25,28 @@ function MSELayer:update(bp_err, input, output) end function MSELayer:propagate(input, output) - local mse = input[1]:create() + local mse = self.mse + local mse_sum = self.mse_sum mse:add(input[1], input[2], 1.0, -1.0) - self.diff = mse:create() self.diff:copy_fromd(mse) mse:mul_elem(mse, mse) - mse = mse:rowsum(mse) - local scale = nerv.CuMatrixFloat(mse:nrow(), 1) - scale:fill(1 / input[1]:ncol()) - mse:scale_rows_by_col(scale) + mse_sum:add(mse_sum, mse:rowsum(mse), 0.0, self.scale) if output[1] ~= nil then - output[1]:copy_fromd(mse) + output[1]:copy_fromd(mse_sum) end - self.total_mse = self.total_mse + mse:colsum()[0] - self.total_frames = self.total_frames + mse:nrow() + self.total_mse = self.total_mse + mse_sum:colsum()[0] + self.total_frames = self.total_frames + mse_sum:nrow() end -- NOTE: must call propagate before back_propagate -function MSELayer:back_propagate(next_bp_err, bp_err, input, output) +function MSELayer:back_propagate(bp_err, next_bp_err, input, output) local nbe = next_bp_err[1] - nbe:copy_fromd(self.diff) - self.diff = nil + nbe:add(nbe, self.diff, 0.0, 2 * self.scale) if bp_err[1] ~= nil then nbe:scale_rows_by_col(bp_err[1]) end end function MSELayer:get_params() - return {} + return nerv.ParamRepo({}) end diff --git a/layer/sigmoid.lua b/layer/sigmoid.lua index dd10fb9..dfd09eb 100644 --- a/layer/sigmoid.lua +++ b/layer/sigmoid.lua @@ -22,10 +22,10 @@ function SigmoidLayer:propagate(input, output) output[1]:sigmoid(input[1]) end -function SigmoidLayer:back_propagate(next_bp_err, bp_err, input, output) +function SigmoidLayer:back_propagate(bp_err, next_bp_err, input, output) next_bp_err[1]:sigmoid_grad(bp_err[1], output[1]) end function SigmoidLayer:get_params() - return {} + return nerv.ParamRepo({}) end diff --git a/layer/softmax_ce.lua b/layer/softmax_ce.lua index 7888540..daf891e 100644 --- a/layer/softmax_ce.lua +++ b/layer/softmax_ce.lua @@ -12,13 +12,15 @@ function SoftmaxCELayer:__init(id, global_conf, layer_conf) self:check_dim_len(2, -1) -- two inputs: nn output and label end -function SoftmaxCELayer:init() +function SoftmaxCELayer:init(batch_size) if not self.compressed and (self.dim_in[1] ~= self.dim_in[2]) then nerv.error("mismatching dimensions of previous network output and labels") end self.total_ce = 0.0 self.total_correct = 0 self.total_frames = 0 + self.softmax = self.gconf.cumat_type(batch_size, self.dim_in[1]) + self.ce = self.softmax:create() end function SoftmaxCELayer:update(bp_err, input, output) @@ -26,12 +28,11 @@ function SoftmaxCELayer:update(bp_err, input, output) end function SoftmaxCELayer:propagate(input, output) - local soutput = input[1]:create() -- temporary value for calc softmax - self.soutput = soutput - local classified = soutput:softmax(input[1]) - local ce = soutput:create() - ce:log_elem(soutput) + local softmax = self.softmax + local ce = self.ce + local classified = softmax:softmax(input[1]) local label = input[2] + ce:log_elem(softmax) if self.compressed then label = label:decompress(input[1]:ncol()) end @@ -42,26 +43,26 @@ function SoftmaxCELayer:propagate(input, output) end -- add total ce self.total_ce = self.total_ce - ce:colsum()[0] - self.total_frames = self.total_frames + soutput:nrow() + self.total_frames = self.total_frames + softmax:nrow() -- TODO: add colsame for uncompressed label if self.compressed then self.total_correct = self.total_correct + classified:colsame(input[2])[0] end end -function SoftmaxCELayer:back_propagate(next_bp_err, bp_err, input, output) +function SoftmaxCELayer:back_propagate(bp_err, next_bp_err, input, output) -- softmax output - label local label = input[2] if self.compressed then label = label:decompress(input[1]:ncol()) end local nbe = next_bp_err[1] - nbe:add(self.soutput, label, 1.0, -1.0) + nbe:add(self.softmax, label, 1.0, -1.0) if bp_err[1] ~= nil then nbe:scale_rows_by_col(bp_err[1]) end end function SoftmaxCELayer:get_params() - return {} + return nerv.ParamRepo({}) end diff --git a/layer/window.lua b/layer/window.lua index 3a093f4..4e9a3b1 100644 --- a/layer/window.lua +++ b/layer/window.lua @@ -24,5 +24,5 @@ function WindowLayer:propagate(input, output) end function WindowLayer:get_params() - return {self.window} + return nerv.ParamRepo({self.window}) end @@ -1,20 +1,35 @@ require 'libnerv' -nerv.utils = require 'pl.utils' function nerv.error(fmt, ...) - error(nerv.utils.printf("[nerv] internal error: " .. fmt .. "\n", ...)) + error(nerv.printf("[nerv] internal error: " .. fmt .. "\n", ...)) end function nerv.error_method_not_implemented() nerv.error("method not implemented"); end +function nerv.printf(fmt, ...) + io.write(string.format(fmt, ...)) +end + +function nerv.mesg_with_timestamp(fmt, ...) + nerv.printf( + string.format("(%s)[nerv] info: %s\n", + os.date("%H:%M:%S %F"), fmt), ...) +end + function nerv.info(fmt, ...) - nerv.utils.printf( + nerv.printf( string.format("(%s)[nerv] info: %s\n", os.date("%H:%M:%S %F"), fmt), ...) end +function nerv.warning(fmt, ...) + nerv.printf( + string.format("(%s)[nerv] warning: %s\n", + os.date("%H:%M:%S %F"), fmt), ...) +end + -- Torch C API wrapper function nerv.class(tname, parenttname) @@ -77,8 +92,20 @@ function table.tostring(tbl) return "{" .. table.concat(result, ",") .. "}" end -function nerv.get_type(typename) - return assert(loadstring("return " .. typename))() +function nerv.get_type(tname) + return assert(loadstring("return " .. tname))() +end + +function nerv.is_type(obj, tname) + local mt0 = nerv.getmetatable(tname) + local mt = getmetatable(obj) + while mt do + if mt == mt0 then + return true + end + mt = getmetatable(mt) + end + return false end require 'matrix.init' diff --git a/nn/layer_dag.lua b/nn/layer_dag.lua index 2dda7c9..8e30216 100644 --- a/nn/layer_dag.lua +++ b/nn/layer_dag.lua @@ -85,13 +85,14 @@ function DAGLayer:__init(id, global_conf, layer_conf) end end + -- topology sort local queue = {} local l = 1 local r = 1 for id, ref in pairs(layers) do if ref.in_deg == 0 then table.insert(queue, ref) - nerv.utils.printf("adding source layer: %s\n", id) + nerv.info("adding source layer: %s", id) r = r + 1 end end @@ -111,13 +112,13 @@ function DAGLayer:__init(id, global_conf, layer_conf) end end for i = 1, #queue do - nerv.utils.printf("queued layer: %s\n", queue[i].layer.id) + nerv.info("enqueued layer: %s", queue[i].layer.id) end for id, ref in pairs(layers) do -- check wether the graph is connected if ref.visited == false then - nerv.utils.printf("warning: layer %s is ignored\n", id) + nerv.warning("layer %s is ignored", id) end end @@ -131,7 +132,7 @@ function DAGLayer:__init(id, global_conf, layer_conf) self.gconf = global_conf end -function DAGLayer:init(batch_size) -- topology sort +function DAGLayer:init(batch_size) for i, conn in ipairs(self.parsed_conn) do local _, output_dim local ref_from, port_from, ref_to, port_to @@ -160,7 +161,7 @@ function DAGLayer:init(batch_size) -- topology sort end end -- initialize sub layers - ref.layer:init() + ref.layer:init(batch_size) end for i = 1, #self.dim_in do if self.inputs[i] == nil then @@ -227,7 +228,7 @@ function DAGLayer:propagate(input, output) end end -function DAGLayer:back_propagate(next_bp_err, bp_err, input, output) +function DAGLayer:back_propagate(bp_err, next_bp_err, input, output) self:set_err_outputs(next_bp_err) self:set_err_inputs(bp_err) self:set_inputs(input) @@ -235,16 +236,14 @@ function DAGLayer:back_propagate(next_bp_err, bp_err, input, output) for i = #self.queue, 1, -1 do local ref = self.queue[i] -- print(ref.layer.id) - ref.layer:back_propagate(ref.err_outputs, ref.err_inputs, ref.inputs, ref.outputs) + ref.layer:back_propagate(ref.err_inputs, ref.err_outputs, ref.inputs, ref.outputs) end end function DAGLayer:get_params() - local res = {} + local param_repos = {} for id, ref in pairs(self.queue) do - for i, p in ipairs(ref.layer:get_params()) do - table.insert(res, p) - end + table.insert(param_repos, ref.layer:get_params()) end - return res + return nerv.ParamRepo.merge(param_repos) end diff --git a/nn/layer_repo.lua b/nn/layer_repo.lua index b1d2248..602c37c 100644 --- a/nn/layer_repo.lua +++ b/nn/layer_repo.lua @@ -8,7 +8,7 @@ function LayerRepo:__init(layer_spec, param_repo, global_conf) if layers[id] ~= nil then nerv.error("a layer with id %s already exists", id) end - nerv.utils.printf("id: %s\n", id) + nerv.info("create layer: %s", id) if type(spec[2]) ~= "table" then nerv.error("layer config table is need") end @@ -17,7 +17,7 @@ function LayerRepo:__init(layer_spec, param_repo, global_conf) nerv.error("parameter description table is needed") end for pname, pid in pairs(spec[1]) do - layer_config[pname] = param_repo:get_param(pid, global_conf) + layer_config[pname] = param_repo:get_param(pid) end layers[id] = layer_type(id, global_conf, layer_config) end diff --git a/nn/param_repo.lua b/nn/param_repo.lua index 3e37c31..ab971ba 100644 --- a/nn/param_repo.lua +++ b/nn/param_repo.lua @@ -1,26 +1,76 @@ local ParamRepo = nerv.class("nerv.ParamRepo") +function ParamRepo:__init(plist) + self.params = {} + if plist ~= nil then + for i, p in ipairs(plist) do + self.params[p.id] = p + end + end +end + +function ParamRepo:add(pid, p) + if self.params[pid] ~= nil then + nerv.error("duplicate params with the same id: %s", pid) + end + self.params[pid] = p +end -function ParamRepo:__init(param_files) - local param_table = {} +function ParamRepo:remove(pid, p) + if self.params[pid] == nil then + nerv.error("param %s does not exit", pid) + end + table.remove(self.params, pid) +end + +function ParamRepo.merge(repos) + local self = nerv.ParamRepo() + for i, repo in ipairs(repos) do + if not nerv.is_type(repo, "nerv.ParamRepo") then + nerv.error("nerv.ParamRepo objects expected, got %s", repo) + end + for pid, p in pairs(repo.params) do + self:add(pid, p) + end + end + return self +end + +function ParamRepo:import(param_files, pids, gconf) if type(param_files) ~= "table" then nerv.error("param file table is need") end for i = 1, #param_files do local pf = nerv.ChunkFile(param_files[i], "r") for cid, cspec in pairs(pf.metadata) do - if param_table[cid] ~= nil then - nerv.error("conflicting chunk id in param files") + if pids == nil or pids[cid] ~= nil then + local p = pf:read_chunk(cid, gconf) + if not nerv.is_type(p, "nerv.Param") then + nerv.error("param chunk is expected") + end + self:add(cid, p) end - param_table[cid] = pf end end - self.param_table = param_table end -function ParamRepo:get_param(pid, global_conf) - local pf = self.param_table[pid] - if pf == nil then +function ParamRepo:export(param_file, pids) + cf = nerv.ChunkFile(param_file, "w") + if pids == nil then + for id, p in pairs(self.params) do + cf:write_chunk(p) + end + else + for i, pid in ipairs(pids) do + cf:write_chunk(self:get_param(pid)) + end + end + cf:close() +end + +function ParamRepo:get_param(pid) + local p = self.params[pid] + if p == nil then nerv.error("param with id %s not found", pid) end - return pf:read_chunk(pid, global_conf) + return p end diff --git a/speech b/speech -Subproject aee0d372e6b06a217f24bea5c88962b97e0ca0e +Subproject 701181d625cfcc59d264db97e5270aa14f1f5d3 |