aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nerv/examples/lmptb/bilstmlm_ptb_main.lua517
-rw-r--r--nerv/examples/lmptb/lm_trainer.lua182
-rw-r--r--nerv/examples/lmptb/lmptb/lmseqreader.lua2
-rw-r--r--nerv/examples/lmptb/lmptb/lstm_t_v2.lua123
-rw-r--r--nerv/examples/lmptb/lstmlm_ptb_main.lua127
-rw-r--r--nerv/examples/lmptb/m-tests/some-text-chn5
-rw-r--r--nerv/layer/gate_fff.lua56
-rw-r--r--nerv/lib/io/chunk_file.c2
-rw-r--r--nerv/tnn/layer_dag_t.lua2
-rw-r--r--nerv/tnn/layersT/lstm_t.lua13
-rw-r--r--nerv/tnn/sutil.lua1
-rw-r--r--nerv/tnn/tnn.lua37
12 files changed, 973 insertions, 94 deletions
diff --git a/nerv/examples/lmptb/bilstmlm_ptb_main.lua b/nerv/examples/lmptb/bilstmlm_ptb_main.lua
new file mode 100644
index 0000000..0472588
--- /dev/null
+++ b/nerv/examples/lmptb/bilstmlm_ptb_main.lua
@@ -0,0 +1,517 @@
+require 'lmptb.lmvocab'
+require 'lmptb.lmfeeder'
+require 'lmptb.lmutil'
+require 'lmptb.layer.init'
+--require 'tnn.init'
+require 'lmptb.lmseqreader'
+require 'lm_trainer'
+
+--[[global function rename]]--
+--local printf = nerv.printf
+local LMTrainer = nerv.LMTrainer
+--[[global function rename ends]]--
+
+--global_conf: table
+--first_time: bool
+--Returns: a ParamRepo
+function prepare_parameters(global_conf, iter)
+ nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre)
+
+ global_conf.paramRepo = nerv.ParamRepo()
+ local paramRepo = global_conf.paramRepo
+
+ if iter == -1 then --first time
+ nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:close()
+ --[[
+ ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
+ ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
+ ltp_ih.trans:generate(global_conf.param_random)
+
+ ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
+ ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
+ ltp_hh.trans:generate(global_conf.param_random)
+
+ --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
+ --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
+ --ltp_ho.trans:generate(global_conf.param_random)
+
+ bp_h = nerv.BiasParam("bp_h", global_conf)
+ bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
+ bp_h.trans:generate(global_conf.param_random)
+
+ --bp_o = nerv.BiasParam("bp_o", global_conf)
+ --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
+ --bp_o.trans:generate(global_conf.param_random)
+
+ local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
+ f:write_chunk(ltp_ih)
+ f:write_chunk(ltp_hh)
+ --f:write_chunk(ltp_ho)
+ f:write_chunk(bp_h)
+ --f:write_chunk(bp_o)
+ f:close()
+ ]]--
+ return nil
+ end
+
+ nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
+ paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
+
+ nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
+
+ return nil
+end
+
+--global_conf: table
+--Returns: nerv.LayerRepo
+function prepare_layers(global_conf)
+ nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
+
+ local pr = global_conf.paramRepo
+
+ local du = false
+
+ local layers = {
+ ["nerv.LSTMLayerT"] = {
+ ["lstmFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ ["lstmRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
+ },
+
+ ["nerv.DropoutLayerT"] = {
+ ["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SelectLinearLayer"] = {
+ ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
+ },
+
+ ["nerv.CombinerLayer"] = {
+ ["combinerXL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ ["combinerHFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ ["combinerHRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
+ },
+
+ ["nerv.AffineLayer"] = {
+ ["biAffineL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr, ["lambda"] = {1, 1}}},
+ ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
+ },
+
+ ["nerv.TanhLayer"] = {
+ ["biTanhL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
+ },
+
+ ["nerv.SoftmaxCELayerT"] = {
+ ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
+ },
+ }
+
+ if global_conf.layer_num > 1 then
+ nerv.error("this script currently do not support more than one layer")
+ end
+ --[[
+ for l = 2, global_conf.layer_num do
+ layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
+ layers["nerv.LSTMLayerT"]["lstmL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}
+ layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
+ end
+ ]]--
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+ nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
+ return layerRepo
+end
+
+--global_conf: table
+--layerRepo: nerv.LayerRepo
+--Returns: a nerv.TNN
+function prepare_tnn(global_conf, layerRepo)
+ nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
+
+ --input: input_w, input_w, ... input_w_now, last_activation
+ local connections_t = {
+ {"<input>[1]", "selectL1[1]", 0},
+
+ --{"selectL1[1]", "recurrentL1[1]", 0},
+ --{"recurrentL1[1]", "sigmoidL1[1]", 0},
+ --{"sigmoidL1[1]", "combinerL1[1]", 0},
+ --{"combinerL1[1]", "recurrentL1[2]", 1},
+
+ {"selectL1[1]", "combinerXL1[1]", 0},
+ {"combinerXL1[1]", "lstmFL1[1]", 0},
+ {"lstmFL1[1]", "combinerHFL1[1]", 0},
+ {"combinerHFL1[1]", "lstmFL1[2]", 1},
+ {"lstmFL1[2]", "lstmFL1[3]", 1},
+ {"combinerXL1[2]", "lstmRL1[1]", 0},
+ {"lstmRL1[1]", "combinerHRL1[1]", 0},
+ {"combinerHRL1[1]", "lstmRL1[2]", -1},
+ {"lstmRL1[2]", "lstmRL1[3]", -1},
+ {"combinerHFL1[2]", "biAffineL1[1]", 0},
+ {"combinerHRL1[2]", "biAffineL1[2]", 0},
+ {"biAffineL1[1]", "biTanhL1[1]", 0},
+ {"biTanhL1[1]", "dropoutL1[1]", 0},
+
+ {"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
+ {"outputL[1]", "softmaxL[1]", 0},
+ {"<input>[2]", "softmaxL[2]", 0},
+ {"softmaxL[1]", "<output>[1]", 0}
+ }
+
+ --[[
+ for l = 2, global_conf.layer_num do
+ table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "lstmL"..l.."[1]", 0})
+ table.insert(connections_t, {"lstmL"..l.."[2]", "lstmL"..l.."[3]", 1})
+ table.insert(connections_t, {"lstmL"..l.."[1]", "combinerL"..l.."[1]", 0})
+ table.insert(connections_t, {"combinerL"..l.."[1]", "lstmL"..l.."[2]", 1})
+ table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
+ end
+ ]]--
+
+ --[[
+ printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
+ for key, value in pairs(connections_t) do
+ printf("\t%s->%s\n", key, value)
+ end
+ ]]--
+
+ local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()},
+ ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t, ["clip_t"] = global_conf.clip_t,
+ })
+
+ tnn:init(global_conf.batch_size, global_conf.chunk_size)
+
+ nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
+ return tnn
+end
+
+function load_net(global_conf, next_iter)
+ prepare_parameters(global_conf, next_iter)
+ local layerRepo = prepare_layers(global_conf)
+ local tnn = prepare_tnn(global_conf, layerRepo)
+ return tnn
+end
+
+local train_fn, valid_fn, test_fn
+global_conf = {}
+local set = arg[1] --"test"
+
+if (set == "ptb") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/ptb/DATA'
+train_fn = data_dir .. '/ptb.train.txt.adds'
+valid_fn = data_dir .. '/ptb.valid.txt.adds'
+test_fn = data_dir .. '/ptb.test.txt.adds'
+vocab_fn = data_dir .. '/vocab'
+
+qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.015, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 90,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/ptb/EXP-nerv/bilstmlm_v1.0'
+}
+
+elseif (set == "msr_sc") then
+
+data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2'
+train_fn = data_dir .. '/normed_all.sf.len60.adds.train'
+valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev'
+test_fn = data_dir .. '/answer_normed.adds'
+vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab'
+
+global_conf = {
+ lrate = 1, wcost = 1e-6, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 10,
+ max_iter = 30,
+ decay_iter = 10,
+ lr_decay = 1.003,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 400000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
+}
+
+elseif (set == "twitter") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/twitter_new/DATA'
+train_fn = data_dir .. '/twitter.choose.adds'
+valid_fn = data_dir .. '/twitter.valid.adds'
+test_fn = data_dir .. '/comm.test.choose-ppl.adds'
+vocab_fn = data_dir .. '/twitter.choose.train.vocab'
+
+--qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = root_dir .. '/twitter_new/EXP-nerv/bilstmlm_v1.0'
+}
+
+else
+
+valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+
+global_conf = {
+ lrate = 0.01, wcost = 1e-5, momentum = 0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 20,
+ layer_num = 1,
+ chunk_size = 20,
+ batch_size = 10,
+ max_iter = 2,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ max_sen_len = 80,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ vocab_fn = vocab_fn,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 10, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_bilstmlm_test'
+}
+
+end
+
+lr_half = false --can not be local, to be set by loadstring
+start_iter = -1
+start_lr = global_conf.lrate
+ppl_last = 100000
+commands_str = "train:test"
+commands = {}
+test_iter = -1
+
+--for testout(question)
+q_file = "/home/slhome/txh18/workspace/ptb/questionGen/gen/ptb.test.txt.q10rs1_Msss.adds"
+
+if arg[2] ~= nil then
+ nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
+ loadstring(arg[2])()
+ nerv.LMUtil.wait(0.5)
+else
+ nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre)
+end
+
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str
+global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
+global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
+global_conf.param_fn = global_conf.work_dir .. "/params"
+global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
+global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time())
+global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-')
+commands = nerv.SUtil.parse_commands_set(commands_str)
+
+global_conf.lrate = start_lr --starting lr can be set by user(arg[2])
+
+nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir)
+nerv.LMUtil.wait(2)
+os.execute("mkdir -p "..global_conf.work_dir)
+os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
+
+--redirecting log outputs!
+nerv.SUtil.log_redirect(global_conf.log_fn)
+nerv.LMUtil.wait(2)
+
+----------------printing options---------------------------------
+nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre)
+for id, value in pairs(global_conf) do
+ nerv.printf("%s:\t%s\n", id, tostring(value))
+end
+nerv.LMUtil.wait(2)
+
+nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
+nerv.printf("lr_half:\t%s\n", tostring(lr_half))
+nerv.printf("start_iter:\t%s\n", tostring(start_iter))
+nerv.printf("ppl_last:\t%s\n", tostring(ppl_last))
+nerv.printf("commands_str:\t%s\n", commands_str)
+nerv.printf("test_iter:\t%s\n", tostring(test_iter))
+nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
+nerv.LMUtil.wait(2)
+------------------printing options end------------------------------
+
+math.randomseed(1)
+
+local vocab = nerv.LMVocab()
+global_conf["vocab"] = vocab
+nerv.printf("%s building vocab...\n", global_conf.sche_log_pre)
+global_conf.vocab:build_file(global_conf.vocab_fn, false)
+ppl_rec = {}
+
+local final_iter = -1
+if commands["train"] == 1 then
+ if start_iter == -1 then
+ prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
+ end
+
+ if start_iter == -1 or start_iter == 0 then
+ nerv.printf("===INITIAL VALIDATION===\n")
+ local tnn = load_net(global_conf, 0)
+ global_conf.paramRepo = tnn:get_params() --get auto-generted params
+ global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
+ global_conf.dropout_rate = 0
+ local result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ nerv.LMUtil.wait(1)
+ ppl_rec[0] = {}
+ ppl_rec[0].valid = result:ppl_all("birnn")
+ ppl_last = ppl_rec[0].valid
+ ppl_rec[0].train = 0
+ ppl_rec[0].test = 0
+ ppl_rec[0].lr = 0
+
+ start_iter = 1
+
+ nerv.printf("\n")
+ end
+
+ for iter = start_iter, global_conf.max_iter, 1 do
+ final_iter = iter --for final testing
+ global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
+ tnn = load_net(global_conf, iter - 1)
+ nerv.printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
+ global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
+ global_conf.dropout_rate = 0
+ ppl_rec[iter] = {}
+ ppl_rec[iter].train = result:ppl_all("birnn")
+ --shuffling training file
+ nerv.printf("%s shuffling training file\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
+ os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
+ nerv.printf("===PEEK ON TEST %d===\n", iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+ ppl_rec[iter].test = result:ppl_all("birnn")
+ nerv.printf("===VALIDATION %d===\n", iter)
+ result = LMTrainer.lm_process_file_birnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
+ ppl_rec[iter].valid = result:ppl_all("birnn")
+ ppl_rec[iter].lr = global_conf.lrate
+ if ((ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true) and iter > global_conf.decay_iter) then
+ global_conf.lrate = (global_conf.lrate * 0.6)
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ nerv.printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
+ global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
+ else
+ nerv.printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
+ os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
+ end
+ if ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true then
+ lr_half = true
+ end
+ if ppl_rec[iter].valid < ppl_last then
+ ppl_last = ppl_rec[iter].valid
+ end
+ nerv.printf("\n")
+ nerv.LMUtil.wait(2)
+ end
+ nerv.info("saving final nn to param.final")
+ os.execute('cp ' .. global_conf.param_fn .. '.' .. tostring(final_iter) .. ' ' .. global_conf.param_fn .. '.final')
+
+ nerv.printf("===VALIDATION PPL record===\n")
+ for i, _ in pairs(ppl_rec) do
+ nerv.printf("<ITER%d LR%.5f train:%.3f valid:%.3f test:%.3f> \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test)
+ end
+ nerv.printf("\n")
+end --if commands["train"]
+
+if commands["test"] == 1 then
+ nerv.printf("===FINAL TEST===\n")
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_birnn(global_conf, global_conf.test_fn, tnn, false) --false update!
+end --if commands["test"]
+
+if commands["testout"] == 1 then
+ nerv.printf("===TEST OUT===\n")
+ nerv.printf("q_file:\t%s\n", q_file)
+ local q_fn = q_file --qdata_dir .. '/' .. q_file
+ global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_birnn(global_conf, q_fn, tnn, false,
+ {["one_sen_report"] = true}) --false update!
+end --if commands["testout"]
+
+
diff --git a/nerv/examples/lmptb/lm_trainer.lua b/nerv/examples/lmptb/lm_trainer.lua
index 9ef4794..3b8b5c3 100644
--- a/nerv/examples/lmptb/lm_trainer.lua
+++ b/nerv/examples/lmptb/lm_trainer.lua
@@ -17,9 +17,30 @@ function nerv.BiasParam:update_by_gradient(gradient)
end
--Returns: LMResult
-function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
- local reader = nerv.LMSeqReader(global_conf, global_conf.batch_size, global_conf.chunk_size, global_conf.vocab)
+function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train, p_conf)
+ if p_conf == nil then
+ p_conf = {}
+ end
+ local reader
+ local r_conf = {}
+ local chunk_size, batch_size
+ if p_conf.one_sen_report == true then --report log prob one by one sentence
+ if do_train == true then
+ nerv.warning("LMTrainer.lm_process_file_rnn: warning, one_sen_report is true while do_train is also true, strange")
+ end
+ nerv.printf("lm_process_file_rnn: one_sen report mode, set batch_size to 1 and chunk_size to max_sen_len(%d)\n",
+ global_conf.max_sen_len)
+ batch_size = 1
+ chunk_size = global_conf.max_sen_len
+ r_conf["se_mode"] = true
+ else
+ batch_size = global_conf.batch_size
+ chunk_size = global_conf.chunk_size
+ end
+
+ reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab, r_conf)
reader:open_file(fn)
+
local result = nerv.LMResult(global_conf, global_conf.vocab)
result:init("rnn")
if global_conf.dropout_rate ~= nil then
@@ -27,11 +48,13 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
end
global_conf.timer:flush()
+ tnn:init(batch_size, chunk_size)
tnn:flush_all() --caution: will also flush the inputs from the reader!
local next_log_wcn = global_conf.log_w_num
- local neto_bakm = global_conf.mmat_type(global_conf.batch_size, 1) --space backup matrix for network output
-
+ local neto_bakm = global_conf.mmat_type(batch_size, 1) --space backup matrix for network output
+
+ nerv.info("LMTrainer.lm_process_file_rnn: begin processing...")
while (1) do
global_conf.timer:tic('most_out_loop_lmprocessfile')
@@ -42,9 +65,9 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
break
end
- for t = 1, global_conf.chunk_size do
+ for t = 1, chunk_size do
tnn.err_inputs_m[t][1]:fill(1)
- for i = 1, global_conf.batch_size do
+ for i = 1, batch_size do
if bit.band(feeds.flags_now[t][i], nerv.TNN.FC.HAS_LABEL) == 0 then
tnn.err_inputs_m[t][1][i - 1][0] = 0
end
@@ -70,15 +93,26 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
end
global_conf.timer:tic('tnn_afterprocess')
- for t = 1, global_conf.chunk_size, 1 do
+ local sen_logp = {}
+ for t = 1, chunk_size, 1 do
tnn.outputs_m[t][1]:copy_toh(neto_bakm)
- for i = 1, global_conf.batch_size, 1 do
+ for i = 1, batch_size, 1 do
if (feeds.labels_s[t][i] ~= global_conf.vocab.null_token) then
--result:add("rnn", feeds.labels_s[t][i], math.exp(tnn.outputs_m[t][1][i - 1][0]))
result:add("rnn", feeds.labels_s[t][i], math.exp(neto_bakm[i - 1][0]))
+ if sen_logp[i] == nil then
+ sen_logp[i] = 0
+ end
+ sen_logp[i] = sen_logp[i] + neto_bakm[i - 1][0]
end
end
end
+ if p_conf.one_sen_report == true then
+ for i = 1, batch_size do
+ nerv.printf("LMTrainer.lm_process_file_rnn: one_sen_report_output, %f\n", sen_logp[i])
+ end
+ end
+
tnn:move_right_to_nextmb({0}) --only copy for time 0
global_conf.timer:toc('tnn_afterprocess')
@@ -102,7 +136,6 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
end
]]--
-
collectgarbage("collect")
--break --debug
@@ -115,4 +148,135 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train)
return result
end
+--Returns: LMResult
+function LMTrainer.lm_process_file_birnn(global_conf, fn, tnn, do_train, p_conf)
+ if p_conf == nil then
+ p_conf = {}
+ end
+ local reader
+ local chunk_size, batch_size
+ local r_conf = {["se_mode"] = true}
+ if p_conf.one_sen_report == true then --report log prob one by one sentence
+ if do_train == true then
+ nerv.warning("LMTrainer.lm_process_file_birnn: warning, one_sen_report is true while do_train is also true, strange")
+ end
+ nerv.printf("lm_process_file_birnn: one_sen report mode, set batch_size to 1 and chunk_size to max_sen_len(%d)\n",
+ global_conf.max_sen_len)
+ batch_size = 1
+ chunk_size = global_conf.max_sen_len
+ else
+ batch_size = global_conf.batch_size
+ chunk_size = global_conf.chunk_size
+ end
+
+ reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab, r_conf)
+ reader:open_file(fn)
+
+ local result = nerv.LMResult(global_conf, global_conf.vocab)
+ result:init("birnn")
+ if global_conf.dropout_rate ~= nil then
+ nerv.info("LMTrainer.lm_process_file_birnn: dropout_rate is %f", global_conf.dropout_rate)
+ end
+
+ global_conf.timer:flush()
+ tnn:init(batch_size, chunk_size)
+ tnn:flush_all() --caution: will also flush the inputs from the reader!
+
+ local next_log_wcn = global_conf.log_w_num
+ local neto_bakm = global_conf.mmat_type(batch_size, 1) --space backup matrix for network output
+
+ nerv.info("LMTrainer.lm_process_file_birnn: begin processing...")
+
+ while (1) do
+ global_conf.timer:tic('most_out_loop_lmprocessfile')
+
+ local r, feeds
+ global_conf.timer:tic('tnn_beforeprocess')
+ r, feeds = tnn:getfeed_from_reader(reader)
+ if r == false then
+ break
+ end
+
+ for t = 1, chunk_size do
+ tnn.err_inputs_m[t][1]:fill(1)
+ for i = 1, batch_size do
+ if bit.band(feeds.flags_now[t][i], nerv.TNN.FC.HAS_LABEL) == 0 then
+ tnn.err_inputs_m[t][1][i - 1][0] = 0
+ end
+ end
+ end
+ global_conf.timer:toc('tnn_beforeprocess')
+
+ --[[
+ for j = 1, global_conf.chunk_size, 1 do
+ for i = 1, global_conf.batch_size, 1 do
+ printf("%s[L(%s)] ", feeds.inputs_s[j][i], feeds.labels_s[j][i]) --vocab:get_word_str(input[i][j]).id
+ end
+ printf("\n")
+ end
+ printf("\n")
+ ]]--
+
+ tnn:net_propagate()
+
+ if do_train == true then
+ tnn:net_backpropagate(false)
+ tnn:net_backpropagate(true)
+ end
+
+ global_conf.timer:tic('tnn_afterprocess')
+ local sen_logp = {}
+ for t = 1, chunk_size, 1 do
+ tnn.outputs_m[t][1]:copy_toh(neto_bakm)
+ for i = 1, batch_size, 1 do
+ if (feeds.labels_s[t][i] ~= global_conf.vocab.null_token) then
+ result:add("birnn", feeds.labels_s[t][i], math.exp(neto_bakm[i - 1][0]))
+ if sen_logp[i] == nil then
+ sen_logp[i] = 0
+ end
+ sen_logp[i] = sen_logp[i] + neto_bakm[i - 1][0]
+ end
+ end
+ end
+ if p_conf.one_sen_report == true then
+ for i = 1, batch_size do
+ nerv.printf("LMTrainer.lm_process_file_birnn: one_sen_report_output, %f\n", sen_logp[i])
+ end
+ end
+
+ --tnn:move_right_to_nextmb({0}) --do not need history for bi directional model
+ global_conf.timer:toc('tnn_afterprocess')
+
+ global_conf.timer:toc('most_out_loop_lmprocessfile')
+
+ --print log
+ if result["birnn"].cn_w > next_log_wcn then
+ next_log_wcn = next_log_wcn + global_conf.log_w_num
+ nerv.printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["birnn"].cn_w, os.date())
+ nerv.printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("birnn"))
+ for key, value in pairs(global_conf.timer.rec) do
+ nerv.printf("\t [global_conf.timer]: time spent on %s:%.5f clock time\n", key, value)
+ end
+ global_conf.timer:flush()
+ nerv.LMUtil.wait(0.1)
+ end
+
+ --[[
+ for t = 1, global_conf.chunk_size do
+ print(tnn.outputs_m[t][1])
+ end
+ ]]--
+
+ collectgarbage("collect")
+
+ --break --debug
+ end
+
+ nerv.printf("%s Displaying result:\n", global_conf.sche_log_pre)
+ nerv.printf("%s %s\n", global_conf.sche_log_pre, result:status("birnn"))
+ nerv.printf("%s Doing on %s end.\n", global_conf.sche_log_pre, fn)
+
+ return result
+end
+
diff --git a/nerv/examples/lmptb/lmptb/lmseqreader.lua b/nerv/examples/lmptb/lmptb/lmseqreader.lua
index 40471d5..ed791d2 100644
--- a/nerv/examples/lmptb/lmptb/lmseqreader.lua
+++ b/nerv/examples/lmptb/lmptb/lmseqreader.lua
@@ -179,7 +179,7 @@ function LMReader:get_batch(feeds)
if got_new == false then
nerv.info("lmseqreader file ends, printing stats...")
- print("al_sen_start:", self.stat.al_sen_start)
+ nerv.printf("al_sen_start:%s\n", tostring(self.stat.al_sen_start))
return false
else
diff --git a/nerv/examples/lmptb/lmptb/lstm_t_v2.lua b/nerv/examples/lmptb/lmptb/lstm_t_v2.lua
new file mode 100644
index 0000000..dc2fe45
--- /dev/null
+++ b/nerv/examples/lmptb/lmptb/lstm_t_v2.lua
@@ -0,0 +1,123 @@
+local LSTMLayerT = nerv.class('nerv.LSTMLayerTv2', 'nerv.LayerT')
+--a version of LSTM that only feed h into the gates
+
+function LSTMLayerT:__init(id, global_conf, layer_conf)
+ --input1:x input2:h input3:c
+ self.id = id
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.gconf = global_conf
+
+ --prepare a DAGLayerT to hold the lstm structure
+ local pr = layer_conf.pr
+ if pr == nil then
+ pr = nerv.ParamRepo()
+ end
+
+ local function ap(str)
+ return self.id .. '.' .. str
+ end
+
+ local layers = {
+ ["nerv.CombinerLayer"] = {
+ [ap("inputXDup")] = {{}, {["dim_in"] = {self.dim_in[1]},
+ ["dim_out"] = {self.dim_in[1], self.dim_in[1], self.dim_in[1], self.dim_in[1]}, ["lambda"] = {1}}},
+ [ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
+ [ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3]}, ["lambda"] = {1}}},
+ [ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]},
+ ["dim_out"] = {self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1, 1}}},
+ },
+ ["nerv.AffineLayer"] = {
+ [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}},
+ },
+ ["nerv.TanhLayer"] = {
+ [ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
+ [ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
+ },
+ ["nerv.GateFLayer"] = {
+ [ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+ [ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+ [ap("outputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
+ ["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
+
+ },
+ ["nerv.ElemMulLayer"] = {
+ [ap("inputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ [ap("forgetGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ [ap("outputGMulL")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3]}}},
+ },
+ }
+
+ local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
+
+ local connections_t = {
+ ["<input>[1]"] = ap("inputXDup[1]"),
+ ["<input>[2]"] = ap("inputHDup[1]"),
+ ["<input>[3]"] = ap("inputCDup[1]"),
+
+ [ap("inputXDup[1]")] = ap("mainAffineL[1]"),
+ [ap("inputHDup[1]")] = ap("mainAffineL[2]"),
+
+ [ap("mainAffineL[1]")] = ap("mainTanhL[1]"),
+
+ [ap("inputXDup[2]")] = ap("inputGateL[1]"),
+ [ap("inputHDup[2]")] = ap("inputGateL[2]"),
+
+ [ap("inputXDup[3]")] = ap("forgetGateL[1]"),
+ [ap("inputHDup[3]")] = ap("forgetGateL[2]"),
+
+ [ap("mainTanhL[1]")] = ap("inputGMulL[1]"),
+ [ap("inputGateL[1]")] = ap("inputGMulL[2]"),
+
+ [ap("inputCDup[1]")] = ap("forgetGMulL[1]"),
+ [ap("forgetGateL[1]")] = ap("forgetGMulL[2]"),
+
+ [ap("inputGMulL[1]")] = ap("mainCDup[1]"),
+ [ap("forgetGMulL[1]")] = ap("mainCDup[2]"),
+
+ [ap("inputXDup[4]")] = ap("outputGateL[1]"),
+ [ap("inputHDup[4]")] = ap("outputGateL[2]"),
+
+ [ap("mainCDup[2]")] = "<output>[2]",
+ [ap("mainCDup[1]")] = ap("outputTanhL[1]"),
+
+ [ap("outputTanhL[1]")] = ap("outputGMulL[1]"),
+ [ap("outputGateL[1]")] = ap("outputGMulL[2]"),
+
+ [ap("outputGMulL[1]")] = "<output>[1]",
+ }
+ self.dagL = nerv.DAGLayerT(self.id, global_conf,
+ {["dim_in"] = self.dim_in, ["dim_out"] = self.dim_out, ["sub_layers"] = layerRepo,
+ ["connections"] = connections_t})
+
+ self:check_dim_len(3, 2) -- x, h, c and h, c
+end
+
+function LSTMLayerT:init(batch_size, chunk_size)
+ self.dagL:init(batch_size, chunk_size)
+end
+
+function LSTMLayerT:batch_resize(batch_size, chunk_size)
+ self.dagL:batch_resize(batch_size, chunk_size)
+end
+
+function LSTMLayerT:update(bp_err, input, output, t)
+ self.dagL:update(bp_err, input, output, t)
+end
+
+function LSTMLayerT:propagate(input, output, t)
+ self.dagL:propagate(input, output, t)
+end
+
+function LSTMLayerT:back_propagate(bp_err, next_bp_err, input, output, t)
+ self.dagL:back_propagate(bp_err, next_bp_err, input, output, t)
+end
+
+function LSTMLayerT:get_params()
+ return self.dagL:get_params()
+end
diff --git a/nerv/examples/lmptb/lstmlm_ptb_main.lua b/nerv/examples/lmptb/lstmlm_ptb_main.lua
index 681c308..6e3fab9 100644
--- a/nerv/examples/lmptb/lstmlm_ptb_main.lua
+++ b/nerv/examples/lmptb/lstmlm_ptb_main.lua
@@ -77,10 +77,6 @@ function prepare_layers(global_conf)
--local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}
local layers = {
- --["nerv.AffineRecurrentLayer"] = {
- -- ["recurrentL1"] = recurrentLconfig,
- --},
-
["nerv.LSTMLayerT"] = {
["lstmL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
},
@@ -93,10 +89,6 @@ function prepare_layers(global_conf)
["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
},
- --["nerv.SigmoidLayer"] = {
- -- ["sigmoidL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
- --},
-
["nerv.CombinerLayer"] = {
["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
},
@@ -195,23 +187,26 @@ local set = arg[1] --"test"
if (set == "ptb") then
-data_dir = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/PTBdata'
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/ptb/DATA'
train_fn = data_dir .. '/ptb.train.txt.adds'
valid_fn = data_dir .. '/ptb.valid.txt.adds'
test_fn = data_dir .. '/ptb.test.txt.adds'
vocab_fn = data_dir .. '/vocab'
+qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
global_conf = {
- lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 2,
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
- hidden_size = 650,
- layer_num = 2,
+ hidden_size = 300,
+ layer_num = 1,
chunk_size = 15,
batch_size = 20,
- max_iter = 45,
+ max_iter = 35,
lr_decay = 1.003,
decay_iter = 10,
param_random = function() return (math.random() / 5 - 0.1) end,
@@ -221,10 +216,11 @@ global_conf = {
valid_fn = valid_fn,
test_fn = test_fn,
vocab_fn = vocab_fn,
+ max_sen_len = 90,
sche_log_pre = "[SCHEDULER]:",
log_w_num = 40000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
- work_dir_base = '/home/slhome/txh18/workspace/nerv/play/ptbEXP/tnn_lstm_test'
+ work_dir_base = '/home/slhome/txh18/workspace/ptb/EXP-nerv/lstmlm_v1.0'
}
elseif (set == "msr_sc") then
@@ -261,12 +257,50 @@ global_conf = {
work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
}
+elseif (set == "twitter") then
+
+root_dir = '/home/slhome/txh18/workspace'
+data_dir = root_dir .. '/twitter_new/DATA'
+train_fn = data_dir .. '/twitter.choose.adds'
+valid_fn = data_dir .. '/twitter.valid.adds'
+test_fn = data_dir .. '/comm.test.choose-ppl.adds'
+vocab_fn = data_dir .. '/twitter.choose.train.vocab'
+
+--qdata_dir = root_dir .. '/ptb/questionGen/gen'
+
+global_conf = {
+ lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ nn_act_default = 0,
+
+ hidden_size = 300,
+ layer_num = 1,
+ chunk_size = 15,
+ batch_size = 20,
+ max_iter = 35,
+ lr_decay = 1.003,
+ decay_iter = 10,
+ param_random = function() return (math.random() / 5 - 0.1) end,
+ dropout_str = "0",
+
+ train_fn = train_fn,
+ valid_fn = valid_fn,
+ test_fn = test_fn,
+ vocab_fn = vocab_fn,
+ max_sen_len = 90,
+ sche_log_pre = "[SCHEDULER]:",
+ log_w_num = 40000, --give a message when log_w_num words have been processed
+ timer = nerv.Timer(),
+ work_dir_base = root_dir .. '/twitter_new/EXP-nerv/lstmlm_v1.0'
+}
+
else
-valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
-train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
-test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
-vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text'
+valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
+vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
global_conf = {
lrate = 0.01, wcost = 1e-5, momentum = 0,
@@ -285,6 +319,7 @@ global_conf = {
train_fn = train_fn,
valid_fn = valid_fn,
test_fn = test_fn,
+ max_sen_len = 80,
lr_decay = 1.003,
decay_iter = 10,
vocab_fn = vocab_fn,
@@ -296,12 +331,15 @@ global_conf = {
end
-local lr_half = false --can not be local, to be set by loadstring
-local start_iter = -1
-local ppl_last = 100000
-local commands_str = "train:test"
-local commands = {}
-local test_iter = -1
+lr_half = false --can not be local, to be set by loadstring
+start_iter = -1
+start_lr = global_conf.lrate
+ppl_last = 100000
+commands_str = "train:test"
+commands = {}
+test_iter = -1
+--for testout(question)
+q_file = "/home/slhome/txh18/workspace/ptb/questionGen/gen/ptb.test.txt.q10rs1_Msss.adds"
if arg[2] ~= nil then
nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
@@ -311,21 +349,25 @@ else
nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre)
end
-global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num --.. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost
+global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str
global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
global_conf.param_fn = global_conf.work_dir .. "/params"
global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
-global_conf.log_fn = global_conf.work_dir .. '/lstm_tnn_' .. commands_str .. '_log'
+global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time())
+global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-')
commands = nerv.SUtil.parse_commands_set(commands_str)
-nerv.printf("%s creating work_dir...\n", global_conf.sche_log_pre)
-nerv.LMUtil.wait(1)
+global_conf.lrate = start_lr
+
+nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir)
+nerv.LMUtil.wait(2)
os.execute("mkdir -p "..global_conf.work_dir)
os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
--redirecting log outputs!
nerv.SUtil.log_redirect(global_conf.log_fn)
+nerv.LMUtil.wait(2)
----------------printing options---------------------------------
nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre)
@@ -335,11 +377,11 @@ end
nerv.LMUtil.wait(2)
nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
-nerv.printf("lr_half:%s\n", tostring(lr_half))
-nerv.printf("start_iter:%s\n", tostring(start_iter))
-nerv.printf("ppl_last:%s\n", tostring(ppl_last))
-nerv.printf("commds_str:%s\n", commands_str)
-nerv.printf("test_iter:%s\n", tostring(test_iter))
+nerv.printf("lr_half:\t%s\n", tostring(lr_half))
+nerv.printf("start_iter:\t%s\n", tostring(start_iter))
+nerv.printf("ppl_last:\t%s\n", tostring(ppl_last))
+nerv.printf("commands_str:\t%s\n", commands_str)
+nerv.printf("test_iter:\t%s\n", tostring(test_iter))
nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
nerv.LMUtil.wait(2)
------------------printing options end------------------------------
@@ -441,3 +483,22 @@ if commands["test"] == 1 then
global_conf.dropout_rate = 0
LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
end --if commands["test"]
+
+if commands["testout"] == 1 then
+ nerv.printf("===TEST OUT===\n")
+ nerv.printf("q_file:\t%s\n", q_file)
+ local q_fn = q_file --qdata_dir .. '/' .. q_file
+ global_conf.sche_log_pre = "[SCHEDULER TESTOUT]:"
+ if final_iter ~= -1 and test_iter == -1 then
+ test_iter = final_iter
+ end
+ if test_iter == -1 then
+ test_iter = "final"
+ end
+ tnn = load_net(global_conf, test_iter)
+ global_conf.dropout_rate = 0
+ LMTrainer.lm_process_file_rnn(global_conf, q_fn, tnn, false,
+ {["one_sen_report"] = true}) --false update!
+end --if commands["testout"]
+
+
diff --git a/nerv/examples/lmptb/m-tests/some-text-chn b/nerv/examples/lmptb/m-tests/some-text-chn
new file mode 100644
index 0000000..da474ce
--- /dev/null
+++ b/nerv/examples/lmptb/m-tests/some-text-chn
@@ -0,0 +1,5 @@
+</s> 你好 我 是 一个 人 </s>
+</s> 想 一起 玩 吗 </s>
+</s> 一个 人 很 好 玩 </s>
+</s> 不 想 一个 人 玩 </s>
+</s> 不 想 一个 人 玩 </s>
diff --git a/nerv/layer/gate_fff.lua b/nerv/layer/gate_fff.lua
index 751dde1..6082e27 100644
--- a/nerv/layer/gate_fff.lua
+++ b/nerv/layer/gate_fff.lua
@@ -1,36 +1,33 @@
-local GateFFFLayer = nerv.class('nerv.GateFFFLayer', 'nerv.Layer')
+local GateFFFLayer = nerv.class('nerv.GateFLayer', 'nerv.Layer') --Full matrix gate
function GateFFFLayer:__init(id, global_conf, layer_conf)
self.id = id
self.dim_in = layer_conf.dim_in
self.dim_out = layer_conf.dim_out
self.gconf = global_conf
-
- self.ltp1 = self:find_param("ltp1", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[1], self.dim_out[1]}) --layer_conf.ltp
- self.ltp2 = self:find_param("ltp2", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[2], self.dim_out[1]}) --layer_conf.ltp
- self.ltp3 = self:find_param("ltp3", layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[3], self.dim_out[1]}) --layer_conf.ltp
+
+ for i = 1, #self.dim_in do
+ self["ltp" .. i] = self:find_param("ltp" .. i, layer_conf, global_conf, nerv.LinearTransParam, {self.dim_in[i], self.dim_out[1]}) --layer_conf.ltp
+ end
self.bp = self:find_param("bp", layer_conf, global_conf, nerv.BiasParam, {1, self.dim_out[1]})--layer_conf.bp
- self:check_dim_len(3, 1) -- exactly one input and one output
+ self:check_dim_len(-1, 1) --accept multiple inputs
end
function GateFFFLayer:init(batch_size)
- if self.ltp1.trans:ncol() ~= self.bp.trans:ncol() or
- self.ltp2.trans:ncol() ~= self.bp.trans:ncol() or
- self.ltp3.trans:ncol() ~= self.bp.trans:ncol() then
- nerv.error("mismatching dimensions of linear transform and bias paramter")
- end
- if self.dim_in[1] ~= self.ltp1.trans:nrow() or
- self.dim_in[2] ~= self.ltp2.trans:nrow() or
- self.dim_in[3] ~= self.ltp3.trans:nrow() then
- nerv.error("mismatching dimensions of linear transform parameter and input")
+ for i = 1, #self.dim_in do
+ if self["ltp" .. i].trans:ncol() ~= self.bp.trans:ncol() then
+ nerv.error("mismatching dimensions of linear transform and bias paramter")
+ end
+ if self.dim_in[i] ~= self["ltp" .. i].trans:nrow() then
+ nerv.error("mismatching dimensions of linear transform parameter and input")
+ end
+ self["ltp"..i]:train_init()
end
+
if self.dim_out[1] ~= self.ltp1.trans:ncol() then
nerv.error("mismatching dimensions of linear transform parameter and output")
end
- self.ltp1:train_init()
- self.ltp2:train_init()
- self.ltp3:train_init()
self.bp:train_init()
self.err_bakm = self.gconf.cumat_type(batch_size, self.dim_out[1])
end
@@ -44,8 +41,9 @@ end
function GateFFFLayer:propagate(input, output)
-- apply linear transform
output[1]:mul(input[1], self.ltp1.trans, 1.0, 0.0, 'N', 'N')
- output[1]:mul(input[2], self.ltp2.trans, 1.0, 1.0, 'N', 'N')
- output[1]:mul(input[3], self.ltp3.trans, 1.0, 1.0, 'N', 'N')
+ for i = 2, #self.dim_in do
+ output[1]:mul(input[i], self["ltp" .. i].trans, 1.0, 1.0, 'N', 'N')
+ end
-- add bias
output[1]:add_row(self.bp.trans, 1.0)
output[1]:sigmoid(output[1])
@@ -53,19 +51,23 @@ end
function GateFFFLayer:back_propagate(bp_err, next_bp_err, input, output)
self.err_bakm:sigmoid_grad(bp_err[1], output[1])
- next_bp_err[1]:mul(self.err_bakm, self.ltp1.trans, 1.0, 0.0, 'N', 'T')
- next_bp_err[2]:mul(self.err_bakm, self.ltp2.trans, 1.0, 0.0, 'N', 'T')
- next_bp_err[3]:mul(self.err_bakm, self.ltp3.trans, 1.0, 0.0, 'N', 'T')
+ for i = 1, #self.dim_in do
+ next_bp_err[i]:mul(self.err_bakm, self["ltp" .. i].trans, 1.0, 0.0, 'N', 'T')
+ end
end
function GateFFFLayer:update(bp_err, input, output)
self.err_bakm:sigmoid_grad(bp_err[1], output[1])
- self.ltp1:update_by_err_input(self.err_bakm, input[1])
- self.ltp2:update_by_err_input(self.err_bakm, input[2])
- self.ltp3:update_by_err_input(self.err_bakm, input[3])
+ for i = 1, #self.dim_in do
+ self["ltp" .. i]:update_by_err_input(self.err_bakm, input[i])
+ end
self.bp:update_by_gradient(self.err_bakm:colsum())
end
function GateFFFLayer:get_params()
- return nerv.ParamRepo({self.ltp1, self.ltp2, self.ltp3, self.bp})
+ local pr = nerv.ParamRepo({self.bp})
+ for i = 1, #self.dim_in do
+ pr:add(self["ltp" .. i].id, self["ltp" .. i])
+ end
+ return pr
end
diff --git a/nerv/lib/io/chunk_file.c b/nerv/lib/io/chunk_file.c
index 4e00b0b..71db820 100644
--- a/nerv/lib/io/chunk_file.c
+++ b/nerv/lib/io/chunk_file.c
@@ -112,7 +112,7 @@ static ChunkFile *open_read(const char *fn, Status *status) {
for (i = 0;; offset += chunk_len, i++)
{
ChunkInfo *cip;
- fprintf(stderr, "reading chunk %d from %d\n", i, (int)offset);
+ fprintf(stdout, "reading chunk %d from %d\n", i, (int)offset);
/* skip to the begining of chunk i */
if (fseeko(fp, offset, SEEK_SET) != 0)
{
diff --git a/nerv/tnn/layer_dag_t.lua b/nerv/tnn/layer_dag_t.lua
index e3a9316..b651f4e 100644
--- a/nerv/tnn/layer_dag_t.lua
+++ b/nerv/tnn/layer_dag_t.lua
@@ -142,7 +142,7 @@ function DAGLayerT:__init(id, global_conf, layer_conf)
end
function DAGLayerT:init(batch_size, chunk_size)
- nerv.info("initing DAGLayerT %s...\n", self.id)
+ nerv.info("initing DAGLayerT %s...", self.id)
if chunk_size == nil then
chunk_size = 1
nerv.info("(Initing DAGLayerT) chunk_size is nil, setting it to default 1\n")
diff --git a/nerv/tnn/layersT/lstm_t.lua b/nerv/tnn/layersT/lstm_t.lua
index ded6058..04d0600 100644
--- a/nerv/tnn/layersT/lstm_t.lua
+++ b/nerv/tnn/layersT/lstm_t.lua
@@ -24,19 +24,19 @@ function LSTMLayerT:__init(id, global_conf, layer_conf)
[ap("inputHDup")] = {{}, {["dim_in"] = {self.dim_in[2]},
["dim_out"] = {self.dim_in[2], self.dim_in[2], self.dim_in[2], self.dim_in[2]}, ["lambda"] = {1}}},
[ap("inputCDup")] = {{}, {["dim_in"] = {self.dim_in[3]},
- ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}},
+ ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]}, ["lambda"] = {1}}},
[ap("mainCDup")] = {{}, {["dim_in"] = {self.dim_in[3], self.dim_in[3]}, ["dim_out"] = {self.dim_in[3], self.dim_in[3], self.dim_in[3]},
["lambda"] = {1, 1}}},
},
["nerv.AffineLayer"] = {
- [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
+ [ap("mainAffineL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2]},
["dim_out"] = {self.dim_out[1]}, ["pr"] = pr}},
},
["nerv.TanhLayer"] = {
[ap("mainTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
[ap("outputTanhL")] = {{}, {["dim_in"] = {self.dim_out[1]}, ["dim_out"] = {self.dim_out[1]}}},
},
- ["nerv.GateFFFLayer"] = {
+ ["nerv.GateFLayer"] = {
[ap("forgetGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
["dim_out"] = {self.dim_in[3]}, ["pr"] = pr}},
[ap("inputGateL")] = {{}, {["dim_in"] = {self.dim_in[1], self.dim_in[2], self.dim_in[3]},
@@ -61,21 +61,20 @@ function LSTMLayerT:__init(id, global_conf, layer_conf)
[ap("inputXDup[1]")] = ap("mainAffineL[1]"),
[ap("inputHDup[1]")] = ap("mainAffineL[2]"),
- [ap("inputCDup[1]")] = ap("mainAffineL[3]"),
[ap("mainAffineL[1]")] = ap("mainTanhL[1]"),
[ap("inputXDup[2]")] = ap("inputGateL[1]"),
[ap("inputHDup[2]")] = ap("inputGateL[2]"),
- [ap("inputCDup[2]")] = ap("inputGateL[3]"),
+ [ap("inputCDup[1]")] = ap("inputGateL[3]"),
[ap("inputXDup[3]")] = ap("forgetGateL[1]"),
[ap("inputHDup[3]")] = ap("forgetGateL[2]"),
- [ap("inputCDup[3]")] = ap("forgetGateL[3]"),
+ [ap("inputCDup[2]")] = ap("forgetGateL[3]"),
[ap("mainTanhL[1]")] = ap("inputGMulL[1]"),
[ap("inputGateL[1]")] = ap("inputGMulL[2]"),
- [ap("inputCDup[4]")] = ap("forgetGMulL[1]"),
+ [ap("inputCDup[3]")] = ap("forgetGMulL[1]"),
[ap("forgetGateL[1]")] = ap("forgetGMulL[2]"),
[ap("inputGMulL[1]")] = ap("mainCDup[1]"),
diff --git a/nerv/tnn/sutil.lua b/nerv/tnn/sutil.lua
index 78f88c0..6a968b7 100644
--- a/nerv/tnn/sutil.lua
+++ b/nerv/tnn/sutil.lua
@@ -70,6 +70,7 @@ function Util.log_redirect(fn)
function (fmt, ...)
io.write(nerv.sprintf(fmt, ...))
nerv.log_fh:write(nerv.sprintf(fmt, ...))
+ nerv.log_fh:flush()
end
nerv.error =
function (fmt, ...)
diff --git a/nerv/tnn/tnn.lua b/nerv/tnn/tnn.lua
index 56c9dc0..cf02123 100644
--- a/nerv/tnn/tnn.lua
+++ b/nerv/tnn/tnn.lua
@@ -59,12 +59,12 @@ nerv.TNN.FC.HAS_INPUT = 1
nerv.TNN.FC.HAS_LABEL = 2
nerv.TNN.FC.SEQ_NORM = bit.bor(nerv.TNN.FC.HAS_INPUT, nerv.TNN.FC.HAS_LABEL) --This instance have both input and label
-function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, global_conf, st_c, p_c, t_c)
- --Return a table of matrix storage from time (1-chunk_size)..(2*chunk_size)
+function TNN.make_initial_store(st, p, dim, batch_size, chunk_size, extend_t, global_conf, st_c, p_c, t_c)
+ --Return a table of matrix storage from time (1-extend_t)..(chunk_size+extend_t)
if (type(st) ~= "table") then
nerv.error("st should be a table")
end
- for i = 1 - chunk_size - 1, chunk_size * 2 + 1 do --intentionally allocated more time, should be [1-chunk_size, chunk_size*2]
+ for i = 1 - extend_t - 1, chunk_size + extend_t + 1 do --intentionally allocated more time
if (st[i] == nil) then
st[i] = {}
end
@@ -97,6 +97,13 @@ function TNN:__init(id, global_conf, layer_conf)
if self.clip_t > 0 then
nerv.info("tnn(%s) will clip gradient across time with %f...", id, self.clip_t)
end
+
+ self.extend_t = layer_conf.extend_t --TNN will allocate storage of time for 1-extend_t .. chunk_size+extend_t
+ if self.extend_t == nil then
+ self.extend_t = 5
+ end
+ nerv.info("tnn(%s) will extend storage beyond MB border for time steps %d...", id, self.extend_t)
+
local layers = {}
local inputs_p = {} --map:port of the TNN to layer ref and port
local outputs_p = {}
@@ -171,11 +178,11 @@ function TNN:init(batch_size, chunk_size)
nerv.error("layer %s has a zero dim port", ref_from.layer.id)
end
- print("TNN initing storage", ref_from.layer.id, "->", ref_to.layer.id)
+ nerv.info("TNN initing storage %s->%s", ref_from.layer.id, ref_to.layer.id)
ref_to.inputs_matbak_p[port_to] = self.gconf.cumat_type(batch_size, dim)
- self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.inputs_m, port_to, time)
+ self.make_initial_store(ref_from.outputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.inputs_m, port_to, time)
ref_from.err_inputs_matbak_p[port_from] = self.gconf.cumat_type(batch_size, dim)
- self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.gconf, ref_to.err_outputs_m, port_to, time)
+ self.make_initial_store(ref_from.err_inputs_m, port_from, dim, batch_size, chunk_size, self.extend_t, self.gconf, ref_to.err_outputs_m, port_to, time)
end
@@ -184,8 +191,8 @@ function TNN:init(batch_size, chunk_size)
for i = 1, #self.dim_out do --Init storage for output ports
local ref = self.outputs_p[i].ref
local p = self.outputs_p[i].port
- self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.outputs_m, i, 0)
- self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.gconf, self.err_inputs_m, i, 0)
+ self.make_initial_store(ref.outputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.outputs_m, i, 0)
+ self.make_initial_store(ref.err_inputs_m, p, self.dim_out[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_inputs_m, i, 0)
end
self.inputs_m = {}
@@ -193,8 +200,8 @@ function TNN:init(batch_size, chunk_size)
for i = 1, #self.dim_in do --Init storage for input ports
local ref = self.inputs_p[i].ref
local p = self.inputs_p[i].port
- self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.inputs_m, i, 0)
- self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.gconf, self.err_outputs_m, i, 0)
+ self.make_initial_store(ref.inputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.inputs_m, i, 0)
+ self.make_initial_store(ref.err_outputs_m, p, self.dim_in[i], batch_size, chunk_size, self.extend_t, self.gconf, self.err_outputs_m, i, 0)
end
for id, ref in pairs(self.layers) do --Calling init for child layers
@@ -260,7 +267,7 @@ function TNN:flush_all() --flush all history and activation
local _, ref
for _, ref in pairs(self.layers) do
for i = 1, #ref.dim_in do
- for t = 1 - self.chunk_size, self.chunk_size * 2 do
+ for t = 1 - self.extend_t, self.chunk_size + self.extend_t do
ref.inputs_m[t][i]:fill(self.gconf.nn_act_default)
if (ref.inputs_b[t] == nil) then
ref.inputs_b[t] = {}
@@ -274,7 +281,7 @@ function TNN:flush_all() --flush all history and activation
end
end
for i = 1, #ref.dim_out do
- for t = 1 - self.chunk_size, self.chunk_size * 2 do
+ for t = 1 - self.extend_t, self.chunk_size + self.extend_t do
ref.outputs_m[t][i]:fill(self.gconf.nn_act_default)
if (ref.outputs_b[t] == nil) then
ref.outputs_b[t] = {}
@@ -302,13 +309,13 @@ end
function TNN:move_right_to_nextmb(list_t) --move output history activations of 1..chunk_size to 1-chunk_size..0
if list_t == nil then
list_t = {}
- for i = 1, self.chunk_size do
- list_t[i] = i - self.chunk_size
+ for i = self.extend_t, 1, -1 do
+ list_t[i] = 1 - i
end
end
for i = 1, #list_t do
t = list_t[i]
- if t < 1 - self.chunk_size or t > 0 then
+ if t < 1 - self.extend_t or t > 0 then
nerv.error("MB move range error")
end
for id, ref in pairs(self.layers) do