aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/examples/lmptb/lstmlm_ptb_main.lua376
-rw-r--r--nerv/examples/lmptb/rnnlm_ptb_main.lua (renamed from nerv/examples/lmptb/tnn_ptb_main.lua)0
-rw-r--r--nerv/examples/lmptb/tnn/layersT/lstm_t.lua40
3 files changed, 396 insertions, 20 deletions
diff --git a/nerv/examples/lmptb/lstmlm_ptb_main.lua b/nerv/examples/lmptb/lstmlm_ptb_main.lua
new file mode 100644
index 0000000..d3f38a2
--- /dev/null
+++ b/nerv/examples/lmptb/lstmlm_ptb_main.lua
@@ -0,0 +1,376 @@
+require 'lmptb.lmvocab'
+require 'lmptb.lmfeeder'
+require 'lmptb.lmutil'
+require 'lmptb.layer.init'
+require 'tnn.init'
+require 'lmptb.lmseqreader'
+require 'lm_trainer'
+
+--[[global function rename]]--
+local printf = nerv.printf
+local LMTrainer = nerv.LMTrainer
+--[[global function rename ends]]--
+
+--global_conf: table
+--first_time: bool
+--Returns: a ParamRepo
+function prepare_parameters(global_conf, iter)
+ printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
+ if iter == -1 then --first time
+ printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
+ ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
+ ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
+ ltp_ih.trans:generate(global_conf.param_random)
+
+ ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
+ ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
+ ltp_hh.trans:generate(global_conf.param_random)
+
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
+
+ bp_h = nerv.BiasParam("bp_h", global_conf)
+ bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
+ bp_h.trans:generate(global_conf.param_random)
+
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
+
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:write_chunk(ltp_ih)
+ f:write_chunk(ltp_hh)
+ --f:write_chunk(ltp_ho)
+ f:write_chunk(bp_h)
+ --f:write_chunk(bp_o)
+ f:close()
+ ]]--
+ return nil
+ end
+
+ printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
+ paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
+
+ printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
+
+ return nil
+end
+
+--global_conf: table
+--Returns: nerv.LayerRepo
+function prepare_layers(global_conf)
+ printf("%s preparing layers...\n", global_conf.sche_log_pre)
+
+ local pr = global_conf.paramRepo
+
+ local du = false
+
+ --local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
+ local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}
+
+ local layers = {
+ ["nerv.AffineRecurrentLayer"] = {
+ ["recurrentL1"] = recurrentLconfig,
+ },
+
+ ["nerv.LSTMLayerT"] = {
+ ["lstmL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ },
+
+ ["nerv.SelectLinearLayer"] = {
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
+ },
+
+ ["nerv.SigmoidLayer"] = {
+ ["sigmoidL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ },
+
+ ["nerv.CombinerLayer"] = {
+ ["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
+ },
+
+ ["nerv.AffineLayer"] = {
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
+ },
+
+ ["nerv.SoftmaxCELayerT"] = {
+ ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
+ },
+ }
+
+ --[[ --we do not need those in the new rnn framework
+ printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt)
+ for i = 1, global_conf.bptt do
+ layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig
+ layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
+ end
+ --]]
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+ printf("%s preparing layers end.\n", global_conf.sche_log_pre)
+ return layerRepo
+end
+
+--global_conf: table
+--layerRepo: nerv.LayerRepo
+--Returns: a nerv.TNN
+function prepare_tnn(global_conf, layerRepo)
+ printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
+
+ --input: input_w, input_w, ... input_w_now, last_activation
+ local connections_t = {
+ {"<input>[1]", "selectL1[1]", 0},
+ {"selectL1[1]", "recurrentL1[1]", 0},
+ {"recurrentL1[1]", "sigmoidL1[1]", 0},
+ {"sigmoidL1[1]", "combinerL1[1]", 0},
+ {"combinerL1[1]", "recurrentL1[2]", 1},
+ {"combinerL1[2]", "outputL[1]", 0},
+ {"outputL[1]", "softmaxL[1]", 0},
+ {"<input>[2]", "softmaxL[2]", 0},
+ {"softmaxL[1]", "<output>[1]", 0}
+ }
+
+ --[[
+ printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
+ for key, value in pairs(connections_t) do
+ printf("\t%s->%s\n", key, value)
+ end
+ ]]--
+
+ local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()}, ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t,
+ })
+
+ tnn:init(global_conf.batch_size, global_conf.chunk_size)
+
+ printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
+ return tnn
+end
+
+function load_net(global_conf, next_iter)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
+ local tnn = prepare_tnn(global_conf, layerRepo)
+ return tnn
+end
+
+local train_fn, valid_fn, test_fn
+global_conf = {}
+local set = arg[1] --"test"
+
+if (set == "ptb") then
+
+data_dir = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/PTBdata'
+train_fn = data_dir .. '/ptb.train.txt.adds'
+valid_fn = data_dir .. '/ptb.valid.txt.adds'
+test_fn = data_dir .. '/ptb.test.txt.adds'
+vocab_fn = data_dir .. '/vocab'
+
+global_conf = {
+ lrate = 1, wcost = 1e-6, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 400, --set to 400 for a stable good test PPL
+ chunk_size = 15,
+ batch_size = 10,
+ max_iter = 35,
+ decay_iter = 15,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/ptbEXP/tnn_test'
+}
+
+elseif (set == "msr_sc") then
+
+data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2'
+train_fn = data_dir .. '/normed_all.sf.len60.adds.train'
+valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev'
+test_fn = data_dir .. '/answer_normed.adds'
+vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab'
+
+global_conf = {
+ lrate = 1, wcost = 1e-6, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ chunk_size = 15,
+ batch_size = 10,
+ max_iter = 30,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 400000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+}
+
+else
+
+valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+
+global_conf = {
+ lrate = 1, wcost = 1e-5, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 20,
+ chunk_size = 2,
+ batch_size = 10,
+ max_iter = 3,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 10, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_lstmlm_test'
+}
+
+end
+
+lr_half = false --can not be local, to be set by loadstring
+start_iter = -1
+ppl_last = 100000
+if (arg[2] ~= nil) then
+ printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
+ loadstring(arg[2])()
+ nerv.LMUtil.wait(0.5)
+else
+ printf("%s not user setting, all default...\n", global_conf.sche_log_pre)
+end
+
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+
+----------------printing options---------------------------------
+printf("%s printing global_conf...\n", global_conf.sche_log_pre)
+for id, value in pairs(global_conf) do
+ print(id, value)
+end
+nerv.LMUtil.wait(2)
+printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
+print("lr_half", lr_half)
+print("start_iter", start_iter)
+print("ppl_last", ppl_last)
+printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
+nerv.LMUtil.wait(2)
+------------------printing options end------------------------------
+
+math.randomseed(1)
+
+printf("%s creating work_dir...\n", global_conf.sche_log_pre)
+os.execute("mkdir -p "..global_conf.work_dir)
+os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
+
+local vocab = nerv.LMVocab()
+global_conf["vocab"] = vocab
+printf("%s building vocab...\n", global_conf.sche_log_pre)
+global_conf.vocab:build_file(global_conf.vocab_fn, false)
+ppl_rec = {}
+
+if start_iter == -1 then
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
+end
+
+if start_iter == -1 or start_iter == 0 then
+ print("===INITIAL VALIDATION===")
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo = tnn:get_params() --get auto-generted params
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
+ local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ nerv.LMUtil.wait(1)
+ ppl_rec[0] = {}
+ ppl_rec[0].valid = result:ppl_all("rnn")
+ ppl_last = ppl_rec[0].valid
+ ppl_rec[0].train = 0
+ ppl_rec[0].test = 0
+ ppl_rec[0].lr = 0
+
+ start_iter = 1
+
+ print()
+end
+
+local final_iter
+for iter = start_iter, global_conf.max_iter, 1 do
+ final_iter = iter --for final testing
+ global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
+ tnn = load_net(global_conf, iter - 1)
+ printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
+ result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ ppl_rec[iter] = {}
+ ppl_rec[iter].train = result:ppl_all("rnn")
+ --shuffling training file
+ printf("%s shuffling training file\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
+ os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
+ printf("===PEEK ON TEST %d===\n", iter)
+ result = LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+ ppl_rec[iter].test = result:ppl_all("rnn")
+ printf("===VALIDATION %d===\n", iter)
+ result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ ppl_rec[iter].valid = result:ppl_all("rnn")
+ ppl_rec[iter].lr = global_conf.lrate
+ if ((ppl_last / ppl_rec[iter].valid < 1.0003 or lr_half == true) and iter > global_conf.decay_iter) then
+ global_conf.lrate = (global_conf.lrate * 0.6)
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ else
+ printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
+ end
+ if ppl_last / ppl_rec[iter].valid < 1.0003 or lr_half == true then
+ lr_half = true
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ ppl_last = ppl_rec[iter].valid
+ end
+ printf("\n")
+ nerv.LMUtil.wait(2)
+end
+printf("===VALIDATION PPL record===\n")
+for i, _ in pairs(ppl_rec) do
+ printf("<ITER%d LR%.5f train:%.3f valid:%.3f test:%.3f> \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test)
+end
+printf("\n")
+printf("===FINAL TEST===\n")
+global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+tnn = load_net(global_conf, final_iter)
+LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update!
+
diff --git a/nerv/examples/lmptb/tnn_ptb_main.lua b/nerv/examples/lmptb/rnnlm_ptb_main.lua
index 16024a8..16024a8 100644
--- a/nerv/examples/lmptb/tnn_ptb_main.lua
+++ b/nerv/examples/lmptb/rnnlm_ptb_main.lua
diff --git a/nerv/examples/lmptb/tnn/layersT/lstm_t.lua b/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
index 4ec2e54..d7d8a20 100644
--- a/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
+++ b/nerv/examples/lmptb/tnn/layersT/lstm_t.lua
@@ -19,10 +19,12 @@ function LSTMLayerT:__init(id, global_conf, layer_conf)
local layers = {
["nerv.CombinerLayer"] = {
- [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]}, ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1]}}},
- [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]}, ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2]}}},
+ [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
+ ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
+ [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
[ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}}},
+ ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}},
[ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3]},
["lambda"] = {1, 1}}},
},
@@ -76,39 +78,37 @@ function LSTMLayerT:__init(id, global_conf, layer_conf)
[ap("forgetGMul[1]")] = ap("mainCDup[2]"),
[ap("mainCDup[2]")] = "<output>[2]",
- }
+ [ap("mainCDup[1]")] = ap("outputTanhL[1]"),
+ [ap("outputTanhL[1]")] = "<output>[1]",
+ }
+ self.dagL = nerv.DAGLayerT(self.id, global_conf,
+ {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t})
+
self:check_dim_len(3, 2) -- x, h, c and h, c
end
-function LSTMLayerT:init(batch_size)
- if self.ltp.trans:ncol() ~= self.bp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform and bias paramter")
- end
- if self.dim_in[1] ~= self.ltp.trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
- end
- if self.dim_out[1] ~= self.ltp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform parameter and output")
- end
- self.ltp_grad = self.ltp.trans:create()
- self.ltp:train_init()
- self.bp:train_init()
+function LSTMLayerT:init(batch_size, chunk_size)
+ self.dagL:init(batch_size, chunk_size)
end
-function LSTMLayerT:batch_resize(batch_size)
- -- do nothing
+function LSTMLayerT:batch_resize(batch_size, chunk_size)
+ self.dagL:batch_resize(batch_size, chunk_size)
end
function LSTMLayerT:update(bp_err, input, output)
+ self.dagL:update(bp_err, input, output)
end
function LSTMLayerT:propagate(input, output)
+ self.dagL:propagate(input, output)
end
function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output)
+ self.dagL:back_propagate(bp_err, next_bp_err, input, output)
end
function LSTMLayerT:get_params()
- return nerv.ParamRepo({self.ltp, self.bp})
+ return self.dagL:get_params()
end