aboutsummaryrefslogblamecommitdiff
path: root/nerv/examples/timit_baseline2.lua
blob: 103d89d9a8cb1decd267f34f60b01355e9b0ccca (plain) (tree)
1
2
3
4
5
6
7
8
9
10
                  
                                                                







                                                                                                                          
                                                                                                 









































































































































































                                                                                                                                       
                                          





























                                                                              
require 'kaldi_io'
gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
        tr_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
                    "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/train.scp ark:- |",
        cv_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
                    "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/cv.scp ark:- |",
        initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_init.nerv",
                            "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_output.nerv",
                            "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"},
        decode_param = {"/speechlab/users/mfy43/timit/nnet_init_20160229015745_iter_13_lr0.013437_tr72.434_cv58.729.nerv",
                        "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"}}

function make_layer_repo(param_repo)
    local layer_repo = nerv.LayerRepo(
    {
        -- global transf
        ["nerv.BiasLayer"] =
        {
            blayer1 = {{bias = "bias0"}, {dim_in = {440}, dim_out = {440}}}
        },
        ["nerv.WindowLayer"] =
        {
            wlayer1 = {{window = "window0"}, {dim_in = {440}, dim_out = {440}}}
        },
        -- biased linearity
        ["nerv.AffineLayer"] =
        {
            affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
            {dim_in = {440}, dim_out = {1024}}},
            affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
            {dim_in = {1024}, dim_out = {1024}}},
            affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
            {dim_in = {1024}, dim_out = {1024}}},
            affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
            {dim_in = {1024}, dim_out = {1024}}},
            affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
            {dim_in = {1024}, dim_out = {1024}}},
            affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
            {dim_in = {1024}, dim_out = {1024}}},
            affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
            {dim_in = {1024}, dim_out = {1959}}}
        },
        ["nerv.SigmoidLayer"] =
        {
            sigmoid0 = {{}, {dim_in = {1024}, dim_out = {1024}}},
            sigmoid1 = {{}, {dim_in = {1024}, dim_out = {1024}}},
            sigmoid2 = {{}, {dim_in = {1024}, dim_out = {1024}}},
            sigmoid3 = {{}, {dim_in = {1024}, dim_out = {1024}}},
            sigmoid4 = {{}, {dim_in = {1024}, dim_out = {1024}}},
            sigmoid5 = {{}, {dim_in = {1024}, dim_out = {1024}}}
        },
        ["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
        {
            ce_crit = {{}, {dim_in = {1959, 1}, dim_out = {1}, compressed = true}}
        },
        ["nerv.SoftmaxLayer"] = -- softmax for decode output
        {
            softmax = {{}, {dim_in = {1959}, dim_out = {1959}}}
        }
    }, param_repo, gconf)

    layer_repo:add_layers(
    {
        ["nerv.DAGLayer"] =
        {
            global_transf = {{}, {
                dim_in = {440}, dim_out = {440},
                sub_layers = layer_repo,
                connections = {
                    ["<input>[1]"] = "blayer1[1]",
                    ["blayer1[1]"] = "wlayer1[1]",
                    ["wlayer1[1]"] = "<output>[1]"
                }
            }},
            main = {{}, {
                dim_in = {440}, dim_out = {1959},
                sub_layers = layer_repo,
                connections = {
                    ["<input>[1]"] = "affine0[1]",
                    ["affine0[1]"] = "sigmoid0[1]",
                    ["sigmoid0[1]"] = "affine1[1]",
                    ["affine1[1]"] = "sigmoid1[1]",
                    ["sigmoid1[1]"] = "affine2[1]",
                    ["affine2[1]"] = "sigmoid2[1]",
                    ["sigmoid2[1]"] = "affine3[1]",
                    ["affine3[1]"] = "sigmoid3[1]",
                    ["sigmoid3[1]"] = "affine4[1]",
                    ["affine4[1]"] = "sigmoid4[1]",
                    ["sigmoid4[1]"] = "affine5[1]",
                    ["affine5[1]"] = "sigmoid5[1]",
                    ["sigmoid5[1]"] = "affine6[1]",
                    ["affine6[1]"] = "<output>[1]"
                }
            }}
        }
    }, param_repo, gconf)

    layer_repo:add_layers(
    {
        ["nerv.DAGLayer"] =
        {
            ce_output = {{}, {
                dim_in = {440, 1}, dim_out = {1},
                sub_layers = layer_repo,
                connections = {
                    ["<input>[1]"] = "main[1]",
                    ["main[1]"] = "ce_crit[1]",
                    ["<input>[2]"] = "ce_crit[2]",
                    ["ce_crit[1]"] = "<output>[1]"
                }
            }},
            softmax_output = {{}, {
                dim_in = {440}, dim_out = {1959},
                sub_layers = layer_repo,
                connections = {
                    ["<input>[1]"] = "main[1]",
                    ["main[1]"] = "softmax[1]",
                    ["softmax[1]"] = "<output>[1]"
                }
            }}
        }
    }, param_repo, gconf)

    return layer_repo
end

function get_network(layer_repo)
    return layer_repo:get_layer("ce_output")
end

function get_decode_network(layer_repo)
    return layer_repo:get_layer("softmax_output")
end

function get_global_transf(layer_repo)
    return layer_repo:get_layer("global_transf")
end

function make_readers(scp_file, layer_repo)
    return {
                {reader = nerv.KaldiReader(gconf,
                    {
                        id = "main_scp",
                        feature_rspecifier = scp_file,
                        conf_file = gconf.htk_conf,
                        frm_ext = gconf.frm_ext,
                        mlfs = {
                            phone_state = {
                                targets_rspecifier = "ark:/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-pdf " ..
                                                        "/speechlab/users/mfy43/timit/s5/exp/tri3_ali/final.mdl " ..
                                                        "\"ark:gunzip -c /speechlab/users/mfy43/timit/s5/exp/tri3_ali/ali.*.gz |\" " ..
                                                        "ark:- | " ..
                                                    "/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-post " ..
                                                        "ark:- ark:- |",
                                format = "map"
                            }
                        }
                    }),
                data = {main_scp = 440, phone_state = 1}}
            }
end

function make_decode_readers(scp_file, layer_repo)
    return {
                {reader = nerv.KaldiReader(gconf,
                    {
                        id = "main_scp",
                        feature_rspecifier = scp_file,
                        conf_file = gconf.htk_conf,
                        frm_ext = gconf.frm_ext,
                        mlfs = {},
                        need_key = true
                    }),
                data = {main_scp = 440, phone_state = 1}}
            }
end

function make_buffer(readers)
    return nerv.SGDBuffer(gconf,
        {
            buffer_size = gconf.buffer_size,
            batch_size = gconf.batch_size,
            randomize = gconf.randomize,
            readers = readers,
            use_gpu = true
        })
end

function get_input_order()
    return {{id = "main_scp", global_transf = true},
            {id = "phone_state"}}
end

function get_decode_input_order()
    return {{id = "main_scp", global_transf = true}}
end

function get_accuracy(layer_repo)
    local ce_crit = layer_repo:get_layer("ce_crit")
    return ce_crit.total_correct / ce_crit.total_frames * 100
end

function print_stat(layer_repo)
    local ce_crit = layer_repo:get_layer("ce_crit")
    nerv.info("*** training stat begin ***")
    nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
    nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
    nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
    nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
    nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
    nerv.info("*** training stat end ***")
end