require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
require 'lmptb.layer.init'
--require 'tnn.init'
require 'lmptb.lmseqreader'
require 'lm_trainer'
--[[global function rename]]--
--local printf = nerv.printf
local LMTrainer = nerv.LMTrainer
--[[global function rename ends]]--
--global_conf: table
--first_time: bool
--Returns: a ParamRepo
function prepare_parameters(global_conf, iter)
nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre)
global_conf.paramRepo = nerv.ParamRepo()
local paramRepo = global_conf.paramRepo
if iter == -1 then --first time
nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:close()
--[[
ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf)
ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1)
ltp_ih.trans:generate(global_conf.param_random)
ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf)
ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size)
ltp_hh.trans:generate(global_conf.param_random)
--ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf)
--ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size())
--ltp_ho.trans:generate(global_conf.param_random)
bp_h = nerv.BiasParam("bp_h", global_conf)
bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size)
bp_h.trans:generate(global_conf.param_random)
--bp_o = nerv.BiasParam("bp_o", global_conf)
--bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size())
--bp_o.trans:generate(global_conf.param_random)
local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w')
f:write_chunk(ltp_ih)
f:write_chunk(ltp_hh)
--f:write_chunk(ltp_ho)
f:write_chunk(bp_h)
--f:write_chunk(bp_o)
f:close()
]]--
return nil
end
nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter))
paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf)
nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)
return nil
end
--global_conf: table
--Returns: nerv.LayerRepo
function prepare_layers(global_conf)
nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
local pr = global_conf.paramRepo
local du = false
--local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
--local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}
local layers = {
["nerv.LSTMLayerT"] = {
["lstmL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}},
},
["nerv.DropoutLayerT"] = {
["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}},
},
["nerv.SelectLinearLayer"] = {
["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
},
["nerv.CombinerLayer"] = {
["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
},
["nerv.AffineLayer"] = {
["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
},
["nerv.SoftmaxCELayerT"] = {
["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
},
}
for l = 2, global_conf.layer_num do
layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
layers["nerv.LSTMLayerT"]["lstmL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}
layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
end
--[[ --we do not need those in the new tnn framework
printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt)
for i = 1, global_conf.bptt do
layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig
layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
end
--]]
local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
return layerRepo
end
--global_conf: table
--layerRepo: nerv.LayerRepo
--Returns: a nerv.TNN
function prepare_tnn(global_conf, layerRepo)
nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)
--input: input_w, input_w, ... input_w_now, last_activation
local connections_t = {
{"[1]", "selectL1[1]", 0},
--{"selectL1[1]", "recurrentL1[1]", 0},
--{"recurrentL1[1]", "sigmoidL1[1]", 0},
--{"sigmoidL1[1]", "combinerL1[1]", 0},
--{"combinerL1[1]", "recurrentL1[2]", 1},
{"selectL1[1]", "lstmL1[1]", 0},
{"lstmL1[2]", "lstmL1[3]", 1},
{"lstmL1[1]", "combinerL1[1]", 0},
{"combinerL1[1]", "lstmL1[2]", 1},
{"combinerL1[2]", "dropoutL1[1]", 0},
{"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
{"outputL[1]", "softmaxL[1]", 0},
{"[2]", "softmaxL[2]", 0},
{"softmaxL[1]", "