From c56722702c099a6d4b3ea1599836e6226bdccc46 Mon Sep 17 00:00:00 2001 From: txh18 Date: Thu, 12 Nov 2015 17:13:35 +0800 Subject: cleaning files... --- nerv/examples/lmptb/tnn_ptb_main.lua | 284 +++++++++++++++++++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 nerv/examples/lmptb/tnn_ptb_main.lua (limited to 'nerv/examples/lmptb/tnn_ptb_main.lua') diff --git a/nerv/examples/lmptb/tnn_ptb_main.lua b/nerv/examples/lmptb/tnn_ptb_main.lua new file mode 100644 index 0000000..803ae68 --- /dev/null +++ b/nerv/examples/lmptb/tnn_ptb_main.lua @@ -0,0 +1,284 @@ +require 'lmptb.lmvocab' +require 'lmptb.lmfeeder' +require 'lmptb.lmutil' +require 'lmptb.layer.init' +require 'rnn.init' +require 'lmptb.lmseqreader' +require 'lm_trainer' + +--[[global function rename]]-- +local printf = nerv.printf +local LMTrainer = nerv.LMTrainer +--[[global function rename ends]]-- + +--global_conf: table +--first_time: bool +--Returns: a ParamRepo +function prepare_parameters(global_conf, first_time) + printf("%s preparing parameters...\n", global_conf.sche_log_pre) + + if (first_time) then + ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf) + ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1) + ltp_ih.trans:generate(global_conf.param_random) + + ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf) + ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size) + ltp_hh.trans:generate(global_conf.param_random) + + ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf) + ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size()) + ltp_ho.trans:generate(global_conf.param_random) + + bp_h = nerv.BiasParam("bp_h", global_conf) + bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size) + bp_h.trans:generate(global_conf.param_random) + + bp_o = nerv.BiasParam("bp_o", global_conf) + bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size()) + bp_o.trans:generate(global_conf.param_random) + + local f = nerv.ChunkFile(global_conf.param_fn, 'w') + f:write_chunk(ltp_ih) + f:write_chunk(ltp_hh) + f:write_chunk(ltp_ho) + f:write_chunk(bp_h) + f:write_chunk(bp_o) + f:close() + end + + local paramRepo = nerv.ParamRepo() + paramRepo:import({global_conf.param_fn}, nil, global_conf) + + printf("%s preparing parameters end.\n", global_conf.sche_log_pre) + + return paramRepo +end + +--global_conf: table +--Returns: nerv.LayerRepo +function prepare_layers(global_conf, paramRepo) + printf("%s preparing layers...\n", global_conf.sche_log_pre) + + --local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}} + local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10}} + + local layers = { + ["nerv.AffineRecurrentLayer"] = { + ["recurrentL1"] = recurrentLconfig, + }, + + ["nerv.SelectLinearLayer"] = { + ["selectL1"] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}, + }, + + ["nerv.SigmoidLayer"] = { + ["sigmoidL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}} + }, + + ["nerv.CombinerLayer"] = { + ["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}} + }, + + ["nerv.AffineLayer"] = { + ["outputL"] = {{["ltp"] = "ltp_ho", ["bp"] = "bp_o"}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}}}, + }, + + ["nerv.SoftmaxCELayerT"] = { + ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}}, + }, + } + + --[[ --we do not need those in the new rnn framework + printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt) + for i = 1, global_conf.bptt do + layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig + layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}} + layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}} + end + --]] + + local layerRepo = nerv.LayerRepo(layers, paramRepo, global_conf) + printf("%s preparing layers end.\n", global_conf.sche_log_pre) + return layerRepo +end + +--global_conf: table +--layerRepo: nerv.LayerRepo +--Returns: a nerv.TNN +function prepare_tnn(global_conf, layerRepo) + printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre) + + --input: input_w, input_w, ... input_w_now, last_activation + local connections_t = { + {"[1]", "selectL1[1]", 0}, + {"selectL1[1]", "recurrentL1[1]", 0}, + {"recurrentL1[1]", "sigmoidL1[1]", 0}, + {"sigmoidL1[1]", "combinerL1[1]", 0}, + {"combinerL1[1]", "recurrentL1[2]", 1}, + {"combinerL1[2]", "outputL[1]", 0}, + {"outputL[1]", "softmaxL[1]", 0}, + {"[2]", "softmaxL[2]", 0}, + {"softmaxL[1]", "[1]", 0} + } + + --[[ + printf("%s printing DAG connections:\n", global_conf.sche_log_pre) + for key, value in pairs(connections_t) do + printf("\t%s->%s\n", key, value) + end + ]]-- + + local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()}, ["dim_out"] = {1}, ["sub_layers"] = layerRepo, + ["connections"] = connections_t, + }) + + tnn:init(global_conf.batch_size, global_conf.chunk_size) + + printf("%s Initing TNN end.\n", global_conf.sche_log_pre) + return tnn +end + +function load_net(global_conf) + local paramRepo = prepare_parameters(global_conf, false) + local layerRepo = prepare_layers(global_conf, paramRepo) + local tnn = prepare_tnn(global_conf, layerRepo) + return tnn, paramRepo +end + +local train_fn, valid_fn, test_fn, global_conf +local set = arg[1] --"test" + +if (set == "ptb") then + +data_dir = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/PTBdata' +train_fn = data_dir .. '/ptb.train.txt.adds' +valid_fn = data_dir .. '/ptb.valid.txt.adds' +test_fn = data_dir .. '/ptb.test.txt.adds' + +global_conf = { + lrate = 1, wcost = 1e-6, momentum = 0, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.MMatrixFloat, + nn_act_default = 0, + + hidden_size = 300, + chunk_size = 15, + batch_size = 10, + max_iter = 30, + param_random = function() return (math.random() / 5 - 0.1) end, + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 40000, --give a message when log_w_num words have been processed + timer = nerv.Timer() +} + +else + +valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' +train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' +test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' + +global_conf = { + lrate = 1, wcost = 1e-6, momentum = 0, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.CuMatrixFloat, + nn_act_default = 0, + + hidden_size = 20, + chunk_size = 2, + batch_size = 3, + max_iter = 3, + param_random = function() return (math.random() / 5 - 0.1) end, + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 10, --give a message when log_w_num words have been processed + timer = nerv.Timer() +} + +end + +global_conf.work_dir = '/home/slhome/txh18/workspace/nerv/play/dagL_test' +global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf' +global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak' +global_conf.param_fn = global_conf.work_dir .. "/params" + +printf("%s printing global_conf\n", global_conf.sche_log_pre) +for id, value in pairs(global_conf) do + print(id, value) +end +nerv.LMUtil.wait(2) + +printf("%s creating work_dir...\n", global_conf.sche_log_pre) +os.execute("mkdir -p "..global_conf.work_dir) +os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf) + +local vocab = nerv.LMVocab() +global_conf["vocab"] = vocab +global_conf.vocab:build_file(global_conf.train_fn, false) + +prepare_parameters(global_conf, true) --randomly generate parameters + +print("===INITIAL VALIDATION===") +local tnn, paramRepo = load_net(global_conf) +local result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update! +nerv.LMUtil.wait(3) +ppl_rec = {} +lr_rec = {} +ppl_rec[0] = {} +ppl_rec[0].valid = result:ppl_net("rnn") +ppl_last = ppl_rec[0].valid +ppl_rec[0].train = 0 +ppl_rec[0].test = 0 +lr_rec[0] = 0 +print() +local lr_half = false +for iter = 1, global_conf.max_iter, 1 do + tnn, paramRepo = load_net(global_conf) + printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate) + global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:" + result = LMTrainer.lm_process_file(global_conf, global_conf.train_fn_shuf, tnn, true) --true update! + ppl_rec[iter] = {} + ppl_rec[iter].train = result:ppl_net("rnn") + --shuffling training file + printf("%s shuffling training file\n", global_conf.sche_log_pre) + os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak) + os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf) + printf("===PEEK ON TEST %d===\n", iter) + result = LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update! + ppl_rec[iter].test = result:ppl_net("rnn") + printf("===VALIDATION %d===\n", iter) + result = LMTrainer.lm_process_file(global_conf, global_conf.valid_fn, tnn, false) --false update! + ppl_rec[iter].valid = result:ppl_net("rnn") + lr_rec[iter] = global_conf.lrate + if (ppl_last / ppl_rec[iter].valid < 1.0003 or lr_half == true) then + global_conf.lrate = (global_conf.lrate * 0.6) + lr_half = true + end + if (ppl_rec[iter].valid < ppl_last) then + printf("%s saving net to file %s...\n", global_conf.sche_log_pre, global_conf.param_fn) + paramRepo:export(global_conf.param_fn, nil) + ppl_last = ppl_rec[iter].valid + else + printf("%s PPL did not improve, rejected...\n", global_conf.sche_log_pre) + if (lr_halg == true) then + printf("%s LR is already halfing, end training...\n", global_conf.sche_log_pre) + end + end + printf("\n") + nerv.LMUtil.wait(2) +end +printf("===VALIDATION PPL record===\n") +for i = 0, #ppl_rec do printf(" \n", i, lr_rec[i], ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test) end +printf("\n") +printf("===FINAL TEST===\n") +global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:" +tnn, paramRepo = load_net(global_conf) +LMTrainer.lm_process_file(global_conf, global_conf.test_fn, tnn, false) --false update! + -- cgit v1.2.3