aboutsummaryrefslogblamecommitdiff
path: root/nerv/examples/lmptb/sample_grulm_ptb_main.lua
blob: b6351bdab40088d1cd99f76a050a95a517421791 (plain) (tree)
1
2
3
4
5
6
7
8






                           
                    
































































































































                                                                                                                                                                                                                                                                                                    
                                                                               
















































































































































































































































                                                                                                                                                                                                                                                                                       
 
     


                                           
    






































                                                                                               

                                               




                                                                                                 
require 'lmptb.lmvocab'
require 'lmptb.lmfeeder'
require 'lmptb.lmutil'
require 'lmptb.layer.init'
--require 'tnn.init'
require 'lmptb.lmseqreader'
require 'lm_trainer'
require 'lm_sampler'

--[[global function rename]]--
--local printf = nerv.printf
local LMTrainer = nerv.LMTrainer
--[[global function rename ends]]--

function prepare_parameters(global_conf, fn)
    nerv.printf("%s preparing parameters...\n", global_conf.sche_log_pre) 
    
    global_conf.paramRepo = nerv.ParamRepo()
    local paramRepo = global_conf.paramRepo

    nerv.printf("%s loading parameter from file %s...\n", global_conf.sche_log_pre, fn) 
    paramRepo:import({fn}, nil, global_conf)

    nerv.printf("%s preparing parameters end.\n", global_conf.sche_log_pre)

    return nil
end

--global_conf: table
--Returns: nerv.LayerRepo
function prepare_layers(global_conf)
    nerv.printf("%s preparing layers...\n", global_conf.sche_log_pre)
    
    local pr = global_conf.paramRepo

    local du = false

    --local recurrentLconfig = {{["bp"] = "bp_h", ["ltp_hh"] = "ltp_hh"}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["break_id"] = global_conf.vocab:get_sen_entry().id, ["independent"] = global_conf.independent, ["clip"] = 10}}
    --local recurrentLconfig = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["clip"] = 10, ["direct_update"] = du, ["pr"] = pr}}

    local layers = {
        ["nerv.GRULayerT"] = {
            ["gruL1"] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr}}, 
        },
        
        ["nerv.DropoutLayerT"] = {
            ["dropoutL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}, 
        },

        ["nerv.SelectLinearLayer"] = {
            ["selectL1"] = {{}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}, ["vocab"] = global_conf.vocab, ["pr"] = pr}},
        },
        
        ["nerv.CombinerLayer"] = {
            ["combinerL1"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}},
        },

        ["nerv.AffineLayer"] = {
            ["outputL"] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.vocab:size()}, ["direct_update"] = du, ["pr"] = pr}},
        },

        ["nerv.SoftmaxCELayerT"] = {
            ["softmaxL"] = {{}, {["dim_in"] = {global_conf.vocab:size(), global_conf.vocab:size()}, ["dim_out"] = {1}}},
        },
    }
   
    for l = 2, global_conf.layer_num do 
        layers["nerv.DropoutLayerT"]["dropoutL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
        layers["nerv.GRULayerT"]["gruL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size, global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}, ["pr"] = pr}}
        layers["nerv.CombinerLayer"]["combinerL" .. l] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size, global_conf.hidden_size}, ["lambda"] = {1}}}
    end
    --[[ --we do not need those in the new tnn framework
    printf("%s adding %d bptt layers...\n", global_conf.sche_log_pre, global_conf.bptt)
    for i = 1, global_conf.bptt do
        layers["nerv.IndRecurrentLayer"]["recurrentL" .. (i + 1)] = recurrentLconfig 
        layers["nerv.SigmoidLayer"]["sigmoidL" .. (i + 1)] = {{}, {["dim_in"] = {global_conf.hidden_size}, ["dim_out"] = {global_conf.hidden_size}}}
        layers["nerv.SelectLinearLayer"]["selectL" .. (i + 1)] = {{["ltp"] = "ltp_ih"}, {["dim_in"] = {1}, ["dim_out"] = {global_conf.hidden_size}}}
    end
    --]]

    local layerRepo = nerv.LayerRepo(layers, pr, global_conf)
    nerv.printf("%s preparing layers end.\n", global_conf.sche_log_pre)
    return layerRepo
end

--global_conf: table
--layerRepo: nerv.LayerRepo
--Returns: a nerv.TNN
function prepare_tnn(global_conf, layerRepo)
    nerv.printf("%s Generate and initing TNN ...\n", global_conf.sche_log_pre)

    --input: input_w, input_w, ... input_w_now, last_activation
    local connections_t = {
        {"<input>[1]", "selectL1[1]", 0},
        
        --{"selectL1[1]", "recurrentL1[1]", 0},  
        --{"recurrentL1[1]", "sigmoidL1[1]", 0},
        --{"sigmoidL1[1]", "combinerL1[1]", 0},
        --{"combinerL1[1]", "recurrentL1[2]", 1},
        
        {"selectL1[1]", "gruL1[1]", 0},
        {"gruL1[1]", "combinerL1[1]", 0},
        {"combinerL1[1]", "gruL1[2]", 1},
        {"combinerL1[2]", "dropoutL1[1]", 0},
        
        {"dropoutL"..global_conf.layer_num.."[1]", "outputL[1]", 0},
        {"outputL[1]", "softmaxL[1]", 0},
        {"<input>[2]", "softmaxL[2]", 0},
        {"softmaxL[1]", "<output>[1]", 0}
    }

    for l = 2, global_conf.layer_num do
        table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "gruL"..l.."[1]", 0})
        table.insert(connections_t, {"gruL"..l.."[1]", "combinerL"..l.."[1]", 0})
        table.insert(connections_t, {"combinerL"..l.."[1]", "gruL"..l.."[2]", 1})
        table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
    end

    --[[
    printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
    for key, value in pairs(connections_t) do
        printf("\t%s->%s\n", key, value)
    end
    ]]--

    local tnn = nerv.TNN("TNN", global_conf, {["dim_in"] = {1, global_conf.vocab:size()}, 
            ["dim_out"] = {1}, ["sub_layers"] = layerRepo,
            ["connections"] = connections_t, ["clip_t"] = global_conf.clip_t,
        })

    tnn:init(global_conf.batch_size, global_conf.chunk_size)

    nerv.printf("%s Initing TNN end.\n", global_conf.sche_log_pre)
    return tnn
end

function prepare_dagL(global_conf, layerRepo)
    nerv.printf("%s Generate and initing dagL ...\n", global_conf.sche_log_pre)

    --input: input_w, input_w, ... input_w_now, last_activation
    local connections_t = {
        ["<input>[1]"] = "selectL1[1]",
       
        ["selectL1[1]"] = "gruL1[1]",
        ["gruL1[1]"] = "combinerL1[1]",
        ["<input>[2]"] = "gruL1[2]",
        --{"combinerL1[2]", "dropoutL1[1]", 0},
        
        ["combinerL" .. global_conf.layer_num .. "[1]"] = "outputL[1]",
        ["outputL[1]"] = "<output>[1]",     
        ["combinerL1[2]"] = "<output>[2]",
    }
    
    if global_conf.layer_num > 1 then
        nerv.error("multiple layer is currently not supported(not hard to implement though)")
    end
    --[[
    for l = 2, global_conf.layer_num do
        table.insert(connections_t, {"dropoutL"..(l-1).."[1]", "gruL"..l.."[1]", 0})
        table.insert(connections_t, {"gruL"..l.."[1]", "combinerL"..l.."[1]", 0})
        table.insert(connections_t, {"combinerL"..l.."[1]", "gruL"..l.."[2]", 1})
        table.insert(connections_t, {"combinerL"..l.."[2]", "dropoutL"..l.."[1]", 0})
    end
    ]]--

    --[[
    printf("%s printing DAG connections:\n", global_conf.sche_log_pre)
    for key, value in pairs(connections_t) do
        printf("\t%s->%s\n", key, value)
    end
    ]]--

    local dagL = nerv.DAGLayerT("dagL", global_conf, {["dim_in"] = {1, global_conf.hidden_size}, 
            ["dim_out"] = {global_conf.vocab:size(), global_conf.hidden_size}, ["sub_layers"] = layerRepo,
            ["connections"] = connections_t
        })

    dagL:init(global_conf.batch_size)

    nerv.printf("%s Initing DAGL end.\n", global_conf.sche_log_pre)
    return tnn
end

function load_net_tnn(global_conf, fn)
    prepare_parameters(global_conf, fn)
    local layerRepo = prepare_layers(global_conf)
    local tnn = prepare_tnn(global_conf, layerRepo)
    return tnn
end

function load_net_dagL(global_conf, fn)
    prepare_parameters(global_conf, fn)
    local layerRepo = prepare_layers(global_conf)
    local tnn = prepare_dagL(global_conf, layerRepo)
    return tnn
end

local train_fn, valid_fn, test_fn
global_conf = {}
local set = arg[1] --"test"

root_dir = '/home/slhome/txh18/workspace'

if (set == "ptb") then

data_dir = root_dir .. '/ptb/DATA'
train_fn = data_dir .. '/ptb.train.txt.adds'
valid_fn = data_dir .. '/ptb.valid.txt.adds'
test_fn = data_dir .. '/ptb.test.txt.adds'
vocab_fn = data_dir .. '/vocab'

qdata_dir = root_dir .. '/ptb/questionGen/gen'

global_conf = {
    lrate = 0.15, wcost = 1e-5, momentum = 0, clip_t = 5,
    cumat_type = nerv.CuMatrixFloat,
    mmat_type = nerv.MMatrixFloat,
    nn_act_default = 0, 

    hidden_size = 300,
    layer_num = 1,
    chunk_size = 15,
    batch_size = 32, 
    max_iter = 35,
    lr_decay = 1.003,
    decay_iter = 10,
    param_random = function() return (math.random() / 5 - 0.1) end,
    dropout_str = "0.5",

    train_fn =