require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
require 'lmptb.layer.init'
--require 'tnn.init'
require 'lmptb.lmseqreader'
require 'lm_trainer'
--[[global function rename]]--
--local printf = nerv.printf
local LMTrainer = nerv.LMTrainer
--[[global function rename ends]]--
--global_conf: table
--first_time: bool
--Returns: a ParamRepo
function prepare_parameters(global_conf, iter)
nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre)
global_conf.paramRepo = nerv.ParamRepo()
local paramRepo = global_conf.paramRepo
if iter == -1 then --first time
nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:close()
--[[
ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
ltp_ih.trans:generate(global_conf.param_random)
ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
ltp_hh.trans:generate(global_conf.param_random)
--ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
--ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
--ltp_ho.trans:generate(global_conf.param_random)
bp_h = nerv.BiasParam("bp_h", global_conf)
bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
bp_h.trans:generate(global_conf.param_random)
--bp_o = nerv.BiasParam("bp_o", global_conf)
--bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
--bp_o.trans:generate(global_conf.param_random)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:write_chunk(ltp_ih)
f:write_chunk(ltp_hh)
--f:write_chunk(ltp_ho)
f:write_chunk(bp_h)
--f:write_chunk(bp_o)
f:close()
]]--
return nil
end
nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
return nil
end
--global_conf: table
--Returns: nerv.LayerRepo
function prepare_layers(global_conf)
nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
local pr = global_conf.paramRepo
local du = false
--local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
--local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}
local layers = {
["nerv.GRULayerT"] = {
["gruL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr}},
},
["nerv.DropoutLayerT"] = {
["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
},
["nerv.SelectLinearLayer"] = {
["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
},
["nerv.CombinerLayer"] = {
["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
},
["nerv.AffineLayer"] = {
["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
},
["nerv.SoftmaxCELayerT"] = {
["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
},
}
for l = 2, global_conf.layer_num do
layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
layers["nerv.GRULayerT"]["gruL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr}}
layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
end
--[[ --we do not need those in the new tnn framework
printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt)
for i = 1, global_conf.bptt do
layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig
layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
end
--]]
local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
return layerRepo
end
--global_conf: table
--layerRepo: nerv.LayerRepo
--Returns: a nerv.TNN
function prepare_tnn(global_conf, layerRepo)
nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
--input: input_w, input_w, ... input_w_now, last_activation
local connections_t = {
{"<input>[1]", "selectL1[1]", 0},
--{"selectL1[1]", "recurrentL1[1]", 0},
--{"recurrentL1[1]", "sigmoidL1[1]", 0},
--{"sigmoidL1[1]", "combinerL1[1]", 0},
--{"combinerL1[1]", "recurrentL1[2]", 1},
{"selectL1[1]", "gruL1[1]", 0},
{"gruL1[1]", "combinerL1[1]", 0},
{"combinerL1[1]", "gruL1[2]", 1},
{"combinerL1[2]", "dropoutL1[1]", 0},
{"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
{"outputL[1]", "softmaxL[1]", 0},
{"<input>[2]", "softmaxL[2]", 0},
{"softmaxL[1]", "<output>[1]", 0}
}
for l = 2, global_conf.layer_num do
table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "gruL"..l.."[1]", 0})
table.insert(connections_t, {"gruL"..l.."[1]", "combinerL"..l.."[1]", 0})
table.insert(connections_t, {"combinerL"..l.."[1]", "gruL"..l.."[2]", 1})
table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
end
--[[
printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
for key, value in pairs(connections_t) do
printf("\t%s->%s\n", key, value)
end
]]--
local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()},
["dim_out"] = {1}, ["sub_layers"] = layerRepo,
["connections"] = connections_t, ["clip_t"] = global_conf.clip_t,
})
tnn:init(global_conf.batch_size, global_conf.chunk_size)
nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
return tnn
end
function load_net(global_conf, next_iter)
prepare_parameters(global_conf, next_iter)
local layerRepo = prepare_layers(global_conf)
local tnn = prepare_tnn(global_conf, layerRepo)
return tnn
end
local train_fn, valid_fn, test_fn
global_conf = {}
local set = arg[1] --"test"
if (set == "ptb") then
root_dir = '/home/slhome/txh18/workspace'
data_dir = root_dir .. '/ptb/DATA'
train_fn = data_dir .. '/ptb.train.txt.adds'
valid_fn = data_dir .. '/ptb.valid.txt.adds'
test_fn = data_dir .. '/ptb.test.txt.adds'
vocab_fn = data_dir .. '/vocab'
qdata_dir = root_dir .. '/ptb/questionGen/gen'
global_conf = {
lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
hidden_size = 300,
layer_num = 1,
chunk_size = 15,
batch_size = 32,
max_iter = 35,
lr_decay = 1.003,
decay_iter = 10,
param_random = function() return (math.random() / 5 - 0.1) end,
dropout_str = "0.5",
train_fn = train_fn,
valid_fn = valid_fn,
test_fn = test_fn,
vocab_fn = vocab_fn,
max_sen_len = 90,
sche_log_pre = "[SCHEDULER]:",
log_w_num = 40000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
work_dir_base = root_dir .. '/ptb/EXP-nerv/grulm_v1.0'
}
elseif (set == "msr_sc") then
data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2'
train_fn = data_dir .. '/normed_all.sf.len60.adds.train'
valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev'
test_fn = data_dir .. '/answer_normed.adds'
vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab'
global_conf = {
lrate = 1, wcost = 1e-6, momentum = 0,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
hidden_size = 300,
layer_num = 1,
chunk_size = 15,
batch_size = 10,
max_iter = 30,
decay_iter = 10,
lr_decay = 1.003,
param_random = function() return (math.random() / 5 - 0.1) end,
dropout_str = "0",
train_fn = train_fn,
valid_fn = valid_fn,
test_fn = test_fn,
vocab_fn = vocab_fn,
sche_log_pre = "[SCHEDULER]:",
log_w_num = 400000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test'
}
elseif (set == "twitter") then
root_dir = '/home/slhome/txh18/workspace'
data_dir = root_dir .. '/twitter_new/DATA'
train_fn = data_dir .. '/twitter.choose2.adds'
valid_fn = data_dir .. '/twitter.valid.adds'
test_fn = data_dir .. '/comm.test.choose-ppl.adds'
vocab_fn = data_dir .. '/vocab.25000'
--qdata_dir = root_dir .. '/ptb/questionGen/gen'
global_conf = {
lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
hidden_size = 300,
layer_num = 1,
chunk_size = 15,
batch_size = 32,
max_iter = 30,
lr_decay = 1.003,
decay_iter = 10,
param_random = function() return (math.random() / 5 - 0.1) end,
dropout_str = "0.5",
train_fn = train_fn,
valid_fn = valid_fn,
test_fn = test_fn,
vocab_fn = vocab_fn,
max_sen_len = 32,
sche_log_pre = "[SCHEDULER]:",
log_w_num = 40000, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
work_dir_base = root_dir .. '/twitter_new/EXP-nerv/grulm_v1.0'
}
else
valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn'
global_conf = {
lrate = 0.01, wcost = 1e-5, momentum = 0,
cumat_type = nerv.CuMatrixFloat,
mmat_type = nerv.MMatrixFloat,
nn_act_default = 0,
hidden_size = 20,
layer_num = 1,
chunk_size = 2,
batch_size = 10,
max_iter = 3,
param_random = function() return (math.random() / 5 - 0.1) end,
dropout_str = "0",
train_fn = train_fn,
valid_fn = valid_fn,
test_fn = test_fn,
max_sen_len = 80,
lr_decay = 1.003,
decay_iter = 10,
vocab_fn = vocab_fn,
sche_log_pre = "[SCHEDULER]:",
log_w_num = 10, --give a message when log_w_num words have been processed
timer = nerv.Timer(),
work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_lstmlm_test'
}
end
lr_half = false --can not be local, to be set by loadstring
start_iter = -1
start_lr = nil
ppl_last = 100000
commands_str = "train:test"
commands = {}
test_iter = -1
--for testout(question)
q_file = "/home/slhome/txh18/workspace/ptb/questionGen/gen/ptb.test.txt.q10rs1_Msss.adds"
if arg[2] ~= nil then
nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2])
loadstring(arg[2])()
nerv.LMUtil.wait(0.5)
else
nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre)
end
global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str
global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf'
global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak'
global_conf.param_fn = global_conf.work_dir .. "/params"
global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str)
global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time())
global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-')
commands = nerv.SUtil.parse_commands_set(commands_str)
if start_lr ~= nil then
global_conf.lrate = start_lr
end
nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir)
nerv.LMUtil.wait(2)
os.execute("mkdir -p "..global_conf.work_dir)
os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf)
--redirecting log outputs!
nerv.SUtil.log_redirect(global_conf.log_fn)
nerv.LMUtil.wait(2)
----------------printing options---------------------------------
nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre)
for id, value in pairs(global_conf) do
nerv.printf("%s:\t%s\n", id, tostring(value))
end
nerv.LMUtil.wait(2)
nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre)
nerv.printf("lr_half:\t%s\n", tostring(lr_half))
nerv.printf("start_iter:\t%s\n", tostring(start_iter))
nerv.printf("ppl_last:\t%s\n", tostring(ppl_last))
nerv.printf("commands_str:\t%s\n", commands_str)
nerv.printf("test_iter:\t%s\n", tostring(test_iter))
nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre)
nerv.LMUtil.wait(2)
------------------printing options end------------------------------
math.randomseed(1)
local vocab = nerv.LMVocab()
global_conf["vocab"] = vocab
nerv.printf("%s building vocab...\n", global_conf.sche_log_pre)
global_conf.vocab:build_file(global_conf.vocab_fn, false)
ppl_rec = {}
local final_iter = -1
if commands["train"] == 1 then
if start_iter == -1 then
prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file
end
if start_iter == -1 or start_iter == 0 then
nerv.printf("===INITIAL VALIDATION===\n")
local tnn = load_net(global_conf, 0)
global_conf.paramRepo = tnn:get_params() --get auto-generted params
global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file
global_conf.dropout_rate = 0
local result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
nerv.LMUtil.wait(1)
ppl_rec[0] = {}
ppl_rec[0].valid = result:ppl_all("rnn")
ppl_last = ppl_rec[0].valid
ppl_rec[0].train = 0
ppl_rec[0].test = 0
ppl_rec[0].lr = 0
start_iter = 1
nerv.printf("\n")
end
for iter = start_iter, global_conf.max_iter, 1 do
final_iter = iter --for final testing
global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:"
tnn = load_net(global_conf, iter - 1)
nerv.printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate)
global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter)
result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update!
global_conf.dropout_rate = 0
ppl_rec[iter] = {}
ppl_rec[iter].train = result:ppl_all("rnn")
--shuffling training file
nerv.printf("%s shuffling training file\n", global_conf.sche_log_pre)
os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak)
os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf)
nerv.printf("===PEEK ON TEST %d===\n", iter)
result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
ppl_rec[iter].test = result:ppl_all("rnn")
nerv.printf("===VALIDATION %d===\n", iter)
result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update!
ppl_rec[iter].valid = result:ppl_all("rnn")
ppl_rec[iter].lr = global_conf.lrate
if ((ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true) and iter > global_conf.decay_iter) then
global_conf.lrate = (global_conf.lrate * 0.6)
end
if ppl_rec[iter].valid < ppl_last then
nerv.printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter)
global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil)
else
nerv.printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre)
os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter))
end
if ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true then
lr_half = true
end
if ppl_rec[iter].valid < ppl_last then
ppl_last = ppl_rec[iter].valid
end
nerv.printf("\n")
nerv.LMUtil.wait(2)
end
nerv.info("saving final nn to param.final")
os.execute('cp ' .. global_conf.param_fn .. '.' .. tostring(final_iter) .. ' ' .. global_conf.param_fn .. '.final')
nerv.printf("===VALIDATION PPL record===\n")
for i, _ in pairs(ppl_rec) do
nerv.printf("<ITER%d LR%.5f train:%.3f valid:%.3f test:%.3f> \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test)
end
nerv.printf("\n")
end --if commands["train"]
if commands["test"] == 1 then
nerv.printf("===FINAL TEST===\n")
global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:"
if final_iter ~= -1 and test_iter == -1 then
test_iter = final_iter
end
if test_iter == -1 then
test_iter = "final"
end
tnn = load_net(global_conf, test_iter)
global_conf.dropout_rate = 0
LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update!
end --if commands["test"]
if commands["testout"] == 1 then
nerv.printf("===TEST OUT===\n")
nerv.printf("q_file:\t%s\n", q_file)
local q_fn = q_file --qdata_dir .. '/' .. q_file
global_conf.sche_log_pre = "[SCHEDULER TESTOUT]:"
if final_iter ~= -1 and test_iter == -1 then
test_iter = final_iter
end
if test_iter == -1 then
test_iter = "final"
end
tnn = load_net(global_conf, test_iter)
global_conf.dropout_rate = 0
LMTrainer.lm_process_file_rnn(global_conf, q_fn, tnn, false,
{["one_sen_report"] = true}) --false update!
end --if commands["testout"]