From 902e547326311bab9d6494cebbc1e5e2f14a018b Mon Sep 17 00:00:00 2001 From: txh18 Date: Sat, 5 Dec 2015 17:47:30 +0800 Subject: added twitter, added bilstmlm script, todo: test bilstmlm --- nerv/examples/lmptb/bilstmlm_ptb_main.lua | 516 ++++++++++++++++++++++++++++++ nerv/examples/lmptb/lm_trainer.lua | 132 +++++++- nerv/examples/lmptb/lstmlm_ptb_main.lua | 47 ++- nerv/examples/lmptb/m-tests/some-text-chn | 5 + 4 files changed, 695 insertions(+), 5 deletions(-) create mode 100644 nerv/examples/lmptb/bilstmlm_ptb_main.lua create mode 100644 nerv/examples/lmptb/m-tests/some-text-chn (limited to 'nerv/examples') diff --git a/nerv/examples/lmptb/bilstmlm_ptb_main.lua b/nerv/examples/lmptb/bilstmlm_ptb_main.lua new file mode 100644 index 0000000..48bc636 --- /dev/null +++ b/nerv/examples/lmptb/bilstmlm_ptb_main.lua @@ -0,0 +1,516 @@ +require 'lmptb.lmvocab' +require 'lmptb.lmfeeder' +require 'lmptb.lmutil' +require 'lmptb.layer.init' +--require 'tnn.init' +require 'lmptb.lmseqreader' +require 'lm_trainer' + +--[[global function rename]]-- +--local printf = nerv.printf +local LMTrainer = nerv.LMTrainer +--[[global function rename ends]]-- + +--global_conf: table +--first_time: bool +--Returns: a ParamRepo +function prepare_parameters(global_conf, iter) + nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre) + + global_conf.paramRepo = nerv.ParamRepo() + local paramRepo = global_conf.paramRepo + + if iter == -1 then --first time + nerv.printf("%s first time, prepare some pre-set parameters, and leaving other parameters to auto-generation...\n", global_conf.sche_log_pre) + local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w') + f:close() + --[[ + ltp_ih = nerv.LinearTransParam("ltp_ih", global_conf) + ltp_ih.trans = global_conf.cumat_type(global_conf.vocab:size(), global_conf.hidden_size) --index 0 is for zero, others correspond to vocab index(starting from 1) + ltp_ih.trans:generate(global_conf.param_random) + + ltp_hh = nerv.LinearTransParam("ltp_hh", global_conf) + ltp_hh.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.hidden_size) + ltp_hh.trans:generate(global_conf.param_random) + + --ltp_ho = nerv.LinearTransParam("ltp_ho", global_conf) + --ltp_ho.trans = global_conf.cumat_type(global_conf.hidden_size, global_conf.vocab:size()) + --ltp_ho.trans:generate(global_conf.param_random) + + bp_h = nerv.BiasParam("bp_h", global_conf) + bp_h.trans = global_conf.cumat_type(1, global_conf.hidden_size) + bp_h.trans:generate(global_conf.param_random) + + --bp_o = nerv.BiasParam("bp_o", global_conf) + --bp_o.trans = global_conf.cumat_type(1, global_conf.vocab:size()) + --bp_o.trans:generate(global_conf.param_random) + + local f = nerv.ChunkFile(global_conf.param_fn .. '.0', 'w') + f:write_chunk(ltp_ih) + f:write_chunk(ltp_hh) + --f:write_chunk(ltp_ho) + f:write_chunk(bp_h) + --f:write_chunk(bp_o) + f:close() + ]]-- + return nil + end + + nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, global_conf.param_fn .. '.' .. tostring(iter)) + paramRepo:import({global_conf.param_fn .. '.' .. tostring(iter)}, nil, global_conf) + + nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre) + + return nil +end + +--global_conf: table +--Returns: nerv.LayerRepo +function prepare_layers(global_conf) + nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre) + + local pr = global_conf.paramRepo + + local du = false + + --local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}} + --local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}} + + local layers = { + ["nerv.LSTMLayerT"] = { + ["lstmFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}, + ["lstmRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}}, + }, + + ["nerv.DropoutLayerT"] = { + ["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}, + }, + + ["nerv.SelectLinearLayer"] = { + ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}}, + }, + + ["nerv.CombinerLayer"] = { + ["combinerXL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}, + ["combinerHFL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}, + ["combinerHRL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}, + }, + + ["nerv.AffineLayer"] = { + ["biAffineL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["lambda"] = {1, 1}}}, + ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}}, + }, + + ["nerv.TanhLayer"] = { + ["biTanhL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}, + }, + + ["nerv.SoftmaxCELayerT"] = { + ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}}, + }, + } + + if global_conf.layer_num > 1 then + nerv.error("this script currently do not support more than one layer") + end + --[[ + for l = 2, global_conf.layer_num do + layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}} + layers["nerv.LSTMLayerT"]["lstmL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["pr"] = pr}} + layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}} + end + ]]-- + + local layerRepo = nerv.LayerRepo(layers, pr, global_conf) + nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre) + return layerRepo +end + +--global_conf: table +--layerRepo: nerv.LayerRepo +--Returns: a nerv.TNN +function prepare_tnn(global_conf, layerRepo) + nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre) + + --input: input_w, input_w, ... input_w_now, last_activation + local connections_t = { + {"[1]", "selectL1[1]", 0}, + + --{"selectL1[1]", "recurrentL1[1]", 0}, + --{"recurrentL1[1]", "sigmoidL1[1]", 0}, + --{"sigmoidL1[1]", "combinerL1[1]", 0}, + --{"combinerL1[1]", "recurrentL1[2]", 1}, + + {"selectL1[1]", "combinerXL1[1]", 0}, + {"combinerXL1[1]", "lstmFL1[1]", 0}, + {"lstmFL1[1]", "combinerHFL1[1]", 0}, + {"combinerHFL1[1]", "lstmFL1[2]", 1}, + {"lstmFL1[2]", "lstmFL1[3]", 1}, + {"combinerXL1[2]", "lstmRL1[1]", 0}, + {"lstmRL1[1]", "combinerHRL1[1]", 0}, + {"combinerHRL1[1]", "lstmRL1[2]", -1}, + {"lstmRL1[2]", "lstmRL1[3]", -1}, + {"combinerHFL1[2]", "biAffineL1[1]", 0}, + {"combinerHRL1[2]", "biAffineL1[2]", 0}, + {"biAffineL1[1]", "biTanhL1[1]", 0}, + {"biTanhL1[1]", "dropoutL1[1]", 0}, + + {"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0}, + {"outputL[1]", "softmaxL[1]", 0}, + {"[2]", "softmaxL[2]", 0}, + {"softmaxL[1]", "[1]", 0} + } + + --[[ + for l = 2, global_conf.layer_num do + table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "lstmL"..l.."[1]", 0}) + table.insert(connections_t, {"lstmL"..l.."[2]", "lstmL"..l.."[3]", 1}) + table.insert(connections_t, {"lstmL"..l.."[1]", "combinerL"..l.."[1]", 0}) + table.insert(connections_t, {"combinerL"..l.."[1]", "lstmL"..l.."[2]", 1}) + table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0}) + end + ]]-- + + --[[ + printf("%s printing DAG connections:\n", global_conf.sche_log_pre) + for key, value in pairs(connections_t) do + printf("\t%s->%s\n", key, value) + end + ]]-- + + local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()}, + ["dim_out"] = {1}, ["sub_layers"] = layerRepo, + ["connections"] = connections_t, ["clip_t"] = global_conf.clip_t, + }) + + tnn:init(global_conf.batch_size, global_conf.chunk_size) + + nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre) + return tnn +end + +function load_net(global_conf, next_iter) + prepare_parameters(global_conf, next_iter) + local layerRepo = prepare_layers(global_conf) + local tnn = prepare_tnn(global_conf, layerRepo) + return tnn +end + +local train_fn, valid_fn, test_fn +global_conf = {} +local set = arg[1] --"test" + +if (set == "ptb") then + +root_dir = '/home/slhome/txh18/workspace' +data_dir = root_dir .. '/ptb/DATA' +train_fn = data_dir .. '/ptb.train.txt.adds' +valid_fn = data_dir .. '/ptb.valid.txt.adds' +test_fn = data_dir .. '/ptb.test.txt.adds' +vocab_fn = data_dir .. '/vocab' + +qdata_dir = root_dir .. '/ptb/questionGen/gen' + +global_conf = { + lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.MMatrixFloat, + nn_act_default = 0, + + hidden_size = 300, + layer_num = 1, + chunk_size = 90, + batch_size = 20, + max_iter = 35, + lr_decay = 1.003, + decay_iter = 10, + param_random = function() return (math.random() / 5 - 0.1) end, + dropout_str = "0.5", + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + vocab_fn = vocab_fn, + max_sen_len = 90, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 40000, --give a message when log_w_num words have been processed + timer = nerv.Timer(), + work_dir_base = '/home/slhome/txh18/workspace/ptb/EXP-nerv/bilstmlm_v1.0' +} + +elseif (set == "msr_sc") then + +data_dir = '/home/slhome/txh18/workspace/sentenceCompletion/DATA_PV2' +train_fn = data_dir .. '/normed_all.sf.len60.adds.train' +valid_fn = data_dir .. '/normed_all.sf.len60.adds.dev' +test_fn = data_dir .. '/answer_normed.adds' +vocab_fn = data_dir .. '/normed_all.choose.vocab30000.addqvocab' + +global_conf = { + lrate = 1, wcost = 1e-6, momentum = 0, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.MMatrixFloat, + nn_act_default = 0, + + hidden_size = 300, + layer_num = 1, + chunk_size = 15, + batch_size = 10, + max_iter = 30, + decay_iter = 10, + lr_decay = 1.003, + param_random = function() return (math.random() / 5 - 0.1) end, + dropout_str = "0", + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + vocab_fn = vocab_fn, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 400000, --give a message when log_w_num words have been processed + timer = nerv.Timer(), + work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test' +} + +elseif (set == "twitter") then + +root_dir = '/home/slhome/txh18/workspace' +data_dir = root_dir .. '/twitter_new/DATA' +train_fn = data_dir .. '/twitter.choose.adds' +valid_fn = data_dir .. '/twitter.valid.adds' +test_fn = data_dir .. '/comm.test.choose-ppl.adds' +vocab_fn = data_dir .. '/twitter.choose.train.vocab' + +--qdata_dir = root_dir .. '/ptb/questionGen/gen' + +global_conf = { + lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.MMatrixFloat, + nn_act_default = 0, + + hidden_size = 300, + layer_num = 1, + chunk_size = 15, + batch_size = 20, + max_iter = 35, + lr_decay = 1.003, + decay_iter = 10, + param_random = function() return (math.random() / 5 - 0.1) end, + dropout_str = "0", + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + vocab_fn = vocab_fn, + max_sen_len = 90, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 40000, --give a message when log_w_num words have been processed + timer = nerv.Timer(), + work_dir_base = root_dir .. '/twitter_new/EXP-nerv/bilstmlm_v1.0' +} + +else + +valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' +train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' +test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' +vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' + +global_conf = { + lrate = 0.01, wcost = 1e-5, momentum = 0, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.MMatrixFloat, + nn_act_default = 0, + + hidden_size = 20, + layer_num = 1, + chunk_size = 2, + batch_size = 10, + max_iter = 3, + param_random = function() return (math.random() / 5 - 0.1) end, + dropout_str = "0", + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + max_sen_len = 80, + lr_decay = 1.003, + decay_iter = 10, + vocab_fn = vocab_fn, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 10, --give a message when log_w_num words have been processed + timer = nerv.Timer(), + work_dir_base = '/home/slhome/txh18/workspace/nerv/play/testEXP/tnn_bilstmlm_test' +} + +end + +local lr_half = false --can not be local, to be set by loadstring +local start_iter = -1 +local ppl_last = 100000 +local commands_str = "train:test" +local commands = {} +local test_iter = -1 + +--for testout(question) +local q_file = "ptb.test.txt.q10rs1_Msss.adds" + +if arg[2] ~= nil then + nerv.printf("%s applying arg[2](%s)...\n", global_conf.sche_log_pre, arg[2]) + loadstring(arg[2])() + nerv.LMUtil.wait(0.5) +else + nerv.printf("%s no user setting, all default...\n", global_conf.sche_log_pre) +end + +global_conf.work_dir = global_conf.work_dir_base .. 'h' .. global_conf.hidden_size .. 'l' .. global_conf.layer_num .. 'ch' .. global_conf.chunk_size .. 'ba' .. global_conf.batch_size .. 'slr' .. global_conf.lrate .. 'wc' .. global_conf.wcost .. 'dr' .. global_conf.dropout_str +global_conf.train_fn_shuf = global_conf.work_dir .. '/train_fn_shuf' +global_conf.train_fn_shuf_bak = global_conf.train_fn_shuf .. '_bak' +global_conf.param_fn = global_conf.work_dir .. "/params" +global_conf.dropout_list = nerv.SUtil.parse_schedule(global_conf.dropout_str) +global_conf.log_fn = global_conf.work_dir .. '/log_lstm_tnn_' .. commands_str ..os.date("_TT%m_%d_%X",os.time()) +global_conf.log_fn, _ = string.gsub(global_conf.log_fn, ':', '-') +commands = nerv.SUtil.parse_commands_set(commands_str) + +nerv.printf("%s creating work_dir(%s)...\n", global_conf.sche_log_pre, global_conf.work_dir) +nerv.LMUtil.wait(2) +os.execute("mkdir -p "..global_conf.work_dir) +os.execute("cp " .. global_conf.train_fn .. " " .. global_conf.train_fn_shuf) + +--redirecting log outputs! +nerv.SUtil.log_redirect(global_conf.log_fn) +nerv.LMUtil.wait(2) + +----------------printing options--------------------------------- +nerv.printf("%s printing global_conf...\n", global_conf.sche_log_pre) +for id, value in pairs(global_conf) do + nerv.printf("%s:\t%s\n", id, tostring(value)) +end +nerv.LMUtil.wait(2) + +nerv.printf("%s printing training scheduling options...\n", global_conf.sche_log_pre) +nerv.printf("lr_half:\t%s\n", tostring(lr_half)) +nerv.printf("start_iter:\t%s\n", tostring(start_iter)) +nerv.printf("ppl_last:\t%s\n", tostring(ppl_last)) +nerv.printf("commds_str:\t%s\n", commands_str) +nerv.printf("test_iter:\t%s\n", tostring(test_iter)) +nerv.printf("%s printing training scheduling end.\n", global_conf.sche_log_pre) +nerv.LMUtil.wait(2) +------------------printing options end------------------------------ + +math.randomseed(1) + +local vocab = nerv.LMVocab() +global_conf["vocab"] = vocab +nerv.printf("%s building vocab...\n", global_conf.sche_log_pre) +global_conf.vocab:build_file(global_conf.vocab_fn, false) +ppl_rec = {} + +local final_iter = -1 +if commands["train"] == 1 then + if start_iter == -1 then + prepare_parameters(global_conf, -1) --write pre_generated params to param.0 file + end + + if start_iter == -1 or start_iter == 0 then + nerv.printf("===INITIAL VALIDATION===\n") + local tnn = load_net(global_conf, 0) + global_conf.paramRepo = tnn:get_params() --get auto-generted params + global_conf.paramRepo:export(global_conf.param_fn .. '.0', nil) --some parameters are auto-generated, saved again to param.0 file + global_conf.dropout_rate = 0 + local result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update! + nerv.LMUtil.wait(1) + ppl_rec[0] = {} + ppl_rec[0].valid = result:ppl_all("rnn") + ppl_last = ppl_rec[0].valid + ppl_rec[0].train = 0 + ppl_rec[0].test = 0 + ppl_rec[0].lr = 0 + + start_iter = 1 + + nerv.printf("\n") + end + + for iter = start_iter, global_conf.max_iter, 1 do + final_iter = iter --for final testing + global_conf.sche_log_pre = "[SCHEDULER ITER"..iter.." LR"..global_conf.lrate.."]:" + tnn = load_net(global_conf, iter - 1) + nerv.printf("===ITERATION %d LR %f===\n", iter, global_conf.lrate) + global_conf.dropout_rate = nerv.SUtil.sche_get(global_conf.dropout_list, iter) + result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.train_fn_shuf, tnn, true) --true update! + global_conf.dropout_rate = 0 + ppl_rec[iter] = {} + ppl_rec[iter].train = result:ppl_all("rnn") + --shuffling training file + nerv.printf("%s shuffling training file\n", global_conf.sche_log_pre) + os.execute('cp ' .. global_conf.train_fn_shuf .. ' ' .. global_conf.train_fn_shuf_bak) + os.execute('cat ' .. global_conf.train_fn_shuf_bak .. ' | sort -R --random-source=/dev/zero > ' .. global_conf.train_fn_shuf) + nerv.printf("===PEEK ON TEST %d===\n", iter) + result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update! + ppl_rec[iter].test = result:ppl_all("rnn") + nerv.printf("===VALIDATION %d===\n", iter) + result = LMTrainer.lm_process_file_rnn(global_conf, global_conf.valid_fn, tnn, false) --false update! + ppl_rec[iter].valid = result:ppl_all("rnn") + ppl_rec[iter].lr = global_conf.lrate + if ((ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true) and iter > global_conf.decay_iter) then + global_conf.lrate = (global_conf.lrate * 0.6) + end + if ppl_rec[iter].valid < ppl_last then + nerv.printf("%s PPL improves, saving net to file %s.%d...\n", global_conf.sche_log_pre, global_conf.param_fn, iter) + global_conf.paramRepo:export(global_conf.param_fn .. '.' .. tostring(iter), nil) + else + nerv.printf("%s PPL did not improve, rejected, copying param file of last iter...\n", global_conf.sche_log_pre) + os.execute('cp ' .. global_conf.param_fn..'.'..tostring(iter - 1) .. ' ' .. global_conf.param_fn..'.'..tostring(iter)) + end + if ppl_last / ppl_rec[iter].valid < global_conf.lr_decay or lr_half == true then + lr_half = true + end + if ppl_rec[iter].valid < ppl_last then + ppl_last = ppl_rec[iter].valid + end + nerv.printf("\n") + nerv.LMUtil.wait(2) + end + nerv.info("saving final nn to param.final") + os.execute('cp ' .. global_conf.param_fn .. '.' .. tostring(final_iter) .. ' ' .. global_conf.param_fn .. '.final') + + nerv.printf("===VALIDATION PPL record===\n") + for i, _ in pairs(ppl_rec) do + nerv.printf(" \n", i, ppl_rec[i].lr, ppl_rec[i].train, ppl_rec[i].valid, ppl_rec[i].test) + end + nerv.printf("\n") +end --if commands["train"] + +if commands["test"] == 1 then + nerv.printf("===FINAL TEST===\n") + global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:" + if final_iter ~= -1 and test_iter == -1 then + test_iter = final_iter + end + if test_iter == -1 then + test_iter = "final" + end + tnn = load_net(global_conf, test_iter) + global_conf.dropout_rate = 0 + LMTrainer.lm_process_file_rnn(global_conf, global_conf.test_fn, tnn, false) --false update! +end --if commands["test"] + +if commands["testout"] == 1 then + nerv.printf("===TEST OUT===\n") + nerv.printf("q_file:\t%s\n", q_file) + local q_fn = qdata_dir .. q_file + global_conf.sche_log_pre = "[SCHEDULER FINAL_TEST]:" + if final_iter ~= -1 and test_iter == -1 then + test_iter = final_iter + end + if test_iter == -1 then + test_iter = "final" + end + tnn = load_net(global_conf, test_iter) + global_conf.dropout_rate = 0 + LMTrainer.lm_process_file_rnn(global_conf, q_fn, tnn, false) --false update! +end --if commands["testout"] + + diff --git a/nerv/examples/lmptb/lm_trainer.lua b/nerv/examples/lmptb/lm_trainer.lua index 3c7078e..6bd06bb 100644 --- a/nerv/examples/lmptb/lm_trainer.lua +++ b/nerv/examples/lmptb/lm_trainer.lua @@ -22,6 +22,7 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train, p_conf) p_conf = {} end local reader + local r_conf local chunk_size, batch_size if p_conf.one_sen_report == true then --report log prob one by one sentence if do_train == true then @@ -31,12 +32,13 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train, p_conf) global_conf.max_sen_len) batch_size = 1 chunk_size = global_conf.max_sen_len + r_conf["se_mode"] = true else batch_size = global_conf.batch_size chunk_size = global_conf.chunk_size end - reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab) + reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab, r_conf) reader:open_file(fn) local result = nerv.LMResult(global_conf, global_conf.vocab) @@ -144,4 +146,132 @@ function LMTrainer.lm_process_file_rnn(global_conf, fn, tnn, do_train, p_conf) return result end +--Returns: LMResult +function LMTrainer.lm_process_file_birnn(global_conf, fn, tnn, do_train, p_conf) + if p_conf == nil then + p_conf = {} + end + local reader + local chunk_size, batch_size + local r_conf = {["se_mode"] = true} + if p_conf.one_sen_report == true then --report log prob one by one sentence + if do_train == true then + nerv.warning("LMTrainer.lm_process_file_birnn: warning, one_sen_report is true while do_train is also true, strange") + end + nerv.printf("lm_process_file_birnn: one_sen report mode, set batch_size to 1 and chunk_size to max_sen_len(%d)\n", + global_conf.max_sen_len) + batch_size = 1 + chunk_size = global_conf.max_sen_len + else + batch_size = global_conf.batch_size + chunk_size = global_conf.chunk_size + end + + reader = nerv.LMSeqReader(global_conf, batch_size, chunk_size, global_conf.vocab) + reader:open_file(fn) + + local result = nerv.LMResult(global_conf, global_conf.vocab) + result:init("birnn") + if global_conf.dropout_rate ~= nil then + nerv.info("LMTrainer.lm_process_file_birnn: dropout_rate is %f", global_conf.dropout_rate) + end + + global_conf.timer:flush() + tnn:flush_all() --caution: will also flush the inputs from the reader! + + local next_log_wcn = global_conf.log_w_num + local neto_bakm = global_conf.mmat_type(batch_size, 1) --space backup matrix for network output + + while (1) do + global_conf.timer:tic('most_out_loop_lmprocessfile') + + local r, feeds + global_conf.timer:tic('tnn_beforeprocess') + r, feeds = tnn:getfeed_from_reader(reader) + if r == false then + break + end + + for t = 1, chunk_size do + tnn.err_inputs_m[t][1]:fill(1) + for i = 1, batch_size do + if bit.band(feeds.flags_now[t][i], nerv.TNN.FC.HAS_LABEL) == 0 then + tnn.err_inputs_m[t][1][i - 1][0] = 0 + end + end + end + global_conf.timer:toc('tnn_beforeprocess') + + --[[ + for j = 1, global_conf.chunk_size, 1 do + for i = 1, global_conf.batch_size, 1 do + printf("%s[L(%s)] ", feeds.inputs_s[j][i], feeds.labels_s[j][i]) --vocab:get_word_str(input[i][j]).id + end + printf("\n") + end + printf("\n") + ]]-- + + tnn:net_propagate() + + if do_train == true then + tnn:net_backpropagate(false) + tnn:net_backpropagate(true) + end + + global_conf.timer:tic('tnn_afterprocess') + local sen_logp = {} + for t = 1, chunk_size, 1 do + tnn.outputs_m[t][1]:copy_toh(neto_bakm) + for i = 1, batch_size, 1 do + if (feeds.labels_s[t][i] ~= global_conf.vocab.null_token) then + result:add("birnn", feeds.labels_s[t][i], math.exp(neto_bakm[i - 1][0])) + if sen_logp[i] == nil then + sen_logp[i] = 0 + end + sen_logp[i] = sen_logp[i] + neto_bakm[i - 1][0] + end + end + end + if p_conf.one_sen_report == true then + for i = 1, batch_size do + nerv.printf("LMTrainer.lm_process_file_birnn: one_sen_report, %f\n", sen_logp[i]) + end + end + + --tnn:move_right_to_nextmb({0}) --do not need history for bi directional model + global_conf.timer:toc('tnn_afterprocess') + + global_conf.timer:toc('most_out_loop_lmprocessfile') + + --print log + if result["birnn"].cn_w > next_log_wcn then + next_log_wcn = next_log_wcn + global_conf.log_w_num + nerv.printf("%s %d words processed %s.\n", global_conf.sche_log_pre, result["birnn"].cn_w, os.date()) + nerv.printf("\t%s log prob per sample :%f.\n", global_conf.sche_log_pre, result:logp_sample("birnn")) + for key, value in pairs(global_conf.timer.rec) do + nerv.printf("\t [global_conf.timer]: time spent on %s:%.5f clock time\n", key, value) + end + global_conf.timer:flush() + nerv.LMUtil.wait(0.1) + end + + --[[ + for t = 1, global_conf.chunk_size do + print(tnn.outputs_m[t][1]) + end + ]]-- + + collectgarbage("collect") + + --break --debug + end + + nerv.printf("%s Displaying result:\n", global_conf.sche_log_pre) + nerv.printf("%s %s\n", global_conf.sche_log_pre, result:status("birnn")) + nerv.printf("%s Doing on %s end.\n", global_conf.sche_log_pre, fn) + + return result +end + diff --git a/nerv/examples/lmptb/lstmlm_ptb_main.lua b/nerv/examples/lmptb/lstmlm_ptb_main.lua index a2dcbba..887993d 100644 --- a/nerv/examples/lmptb/lstmlm_ptb_main.lua +++ b/nerv/examples/lmptb/lstmlm_ptb_main.lua @@ -257,12 +257,50 @@ global_conf = { work_dir_base = '/home/slhome/txh18/workspace/sentenceCompletion/EXP-Nerv/rnnlm_test' } +elseif (set == "twitter") then + +root_dir = '/home/slhome/txh18/workspace' +data_dir = root_dir .. '/twitter_new/DATA' +train_fn = data_dir .. '/twitter.choose.adds' +valid_fn = data_dir .. '/twitter.valid.adds' +test_fn = data_dir .. '/comm.test.choose-ppl.adds' +vocab_fn = data_dir .. '/twitter.choose.train.vocab' + +--qdata_dir = root_dir .. '/ptb/questionGen/gen' + +global_conf = { + lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5, + cumat_type = nerv.CuMatrixFloat, + mmat_type = nerv.MMatrixFloat, + nn_act_default = 0, + + hidden_size = 300, + layer_num = 1, + chunk_size = 15, + batch_size = 20, + max_iter = 35, + lr_decay = 1.003, + decay_iter = 10, + param_random = function() return (math.random() / 5 - 0.1) end, + dropout_str = "0", + + train_fn = train_fn, + valid_fn = valid_fn, + test_fn = test_fn, + vocab_fn = vocab_fn, + max_sen_len = 90, + sche_log_pre = "[SCHEDULER]:", + log_w_num = 40000, --give a message when log_w_num words have been processed + timer = nerv.Timer(), + work_dir_base = root_dir .. '/twitter_new/EXP-nerv/lstmlm_v1.0' +} + else -valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' -train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' -test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' -vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text' +valid_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' +train_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' +test_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' +vocab_fn = '/home/slhome/txh18/workspace/nerv/nerv/nerv/examples/lmptb/m-tests/some-text-chn' global_conf = { lrate = 0.01, wcost = 1e-5, momentum = 0, @@ -281,6 +319,7 @@ global_conf = { train_fn = train_fn, valid_fn = valid_fn, test_fn = test_fn, + max_sen_len = 80, lr_decay = 1.003, decay_iter = 10, vocab_fn = vocab_fn, diff --git a/nerv/examples/lmptb/m-tests/some-text-chn b/nerv/examples/lmptb/m-tests/some-text-chn new file mode 100644 index 0000000..da474ce --- /dev/null +++ b/nerv/examples/lmptb/m-tests/some-text-chn @@ -0,0 +1,5 @@ + 你好 我 是 一个 人 + 想 一起 玩 吗 + 一个 人 很 好 玩 + 不 想 一个 人 玩 + 不 想 一个 人 玩 -- cgit v1.2.3