aboutsummaryrefslogblamecommitdiff
path: root/nerv/examples/swb_baseline2.lua
blob: 38cfb9a934a941a991e7184cdc8d77df1f84bbb8 (plain) (tree)
1
2
3
4
5
6
7
8
9
10
                
                                                                
                                                                                                   




                                                                                          

                                                                                   






                                      

                                                                                   


                              

                                                                                       



                              















                                                                           


                               






                                                           


                                                                                     
                                                                            


                                                            
                                                         




                          
                             
         
                             
                                                
                                        
                               




                                                    
                 

                    
                                                 
                                        
                               















                                                     
                 
             




                          
                             
         
                         
                                                 
                                        
                               



                                                    
                 

                              
                                                 
                                        
                               


                                                    
                 
             



















                                                 
                                               



















                                                                                 
                                

                                            
                                          
                                          










                                                    



                                                    














                                                                              
require 'htk_io'
gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
        rearrange = true, -- just to make the context order consistent with old results, deprecated
        frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated
        tr_scp = "/speechlab/users/mfy43/swb50/train_bp.scp",
        cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp",
        htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf",
        initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv",
                            "/speechlab/users/mfy43/swb50/swb_global_transf.nerv"},
        chunk_size = 1}

function make_layer_repo(param_repo)
    local layer_repo = nerv.LayerRepo(
    {
        -- global transf
        ["nerv.BiasLayer"] =
        {
            blayer1 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias0"}},
            blayer2 = {dim_in = {429}, dim_out = {429}, params = {bias = "bias1"}}
        },
        ["nerv.WindowLayer"] =
        {
            wlayer1 = {dim_in = {429}, dim_out = {429}, params = {window = "window0"}},
            wlayer2 = {dim_in = {429}, dim_out = {429}, params = {window = "window1"}}
        },
        -- biased linearity
        ["nerv.AffineLayer"] =
        {
            affine0 = {dim_in = {429}, dim_out = {2048},
                        params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
            affine1 = {dim_in = {2048}, dim_out = {2048},
                        params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
            affine2 = {dim_in = {2048}, dim_out = {2048},
                        params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
            affine3 = {dim_in = {2048}, dim_out = {2048},
                        params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
            affine4 = {dim_in = {2048}, dim_out = {2048},
                        params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
            affine5 = {dim_in = {2048}, dim_out = {2048},
                        params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
            affine6 = {dim_in = {2048}, dim_out = {2048},
                        params = {ltp = "affine6_ltp", bp = "affine6_bp"}},
            affine7 = {dim_in = {2048}, dim_out = {3001},
                        params = {ltp = "affine7_ltp", bp = "affine7_bp"}}
        },
        ["nerv.SigmoidLayer"] =
        {
            sigmoid0 = {dim_in = {2048}, dim_out = {2048}},
            sigmoid1 = {dim_in = {2048}, dim_out = {2048}},
            sigmoid2 = {dim_in = {2048}, dim_out = {2048}},
            sigmoid3 = {dim_in = {2048}, dim_out = {2048}},
            sigmoid4 = {dim_in = {2048}, dim_out = {2048}},
            sigmoid5 = {dim_in = {2048}, dim_out = {2048}},
            sigmoid6 = {dim_in = {2048}, dim_out = {2048}}
        },
        ["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
        {
            ce_crit = {dim_in = {3001, 1}, dim_out = {1}, compressed = true}
        },
        ["nerv.SoftmaxLayer"] = -- softmax for decode output
        {
            softmax = {dim_in = {3001}, dim_out = {3001}}
        }
    }, param_repo, gconf)

    layer_repo:add_layers(
    {
        ["nerv.GraphLayer"] =
        {
            global_transf = {
                dim_in = {429}, dim_out = {429},
                layer_repo = layer_repo,
                connections = {
                    {"<input>[1]", "blayer1[1]", 0},
                    {"blayer1[1]", "wlayer1[1]", 0},
                    {"wlayer1[1]", "blayer2[1]", 0},
                    {"blayer2[1]", "wlayer2[1]", 0},
                    {"wlayer2[1]", "<output>[1]", 0}
                }
            },
            main = {
                dim_in = {429}, dim_out = {3001},
                layer_repo = layer_repo,
                connections = {
                    {"<input>[1]", "affine0[1]", 0},
                    {"affine0[1]", "sigmoid0[1]", 0},
                    {"sigmoid0[1]", "affine1[1]", 0},
                    {"affine1[1]", "sigmoid1[1]", 0},
                    {"sigmoid1[1]", "affine2[1]", 0},
                    {"affine2[1]", "sigmoid2[1]", 0},
                    {"sigmoid2[1]", "affine3[1]", 0},
                    {"affine3[1]", "sigmoid3[1]", 0},
                    {"sigmoid3[1]", "affine4[1]", 0},
                    {"affine4[1]", "sigmoid4[1]", 0},
                    {"sigmoid4[1]", "affine5[1]", 0},
                    {"affine5[1]", "sigmoid5[1]", 0},
                    {"sigmoid5[1]", "affine6[1]", 0},
                    {"affine6[1]", "sigmoid6[1]", 0},
                    {"sigmoid6[1]", "affine7[1]", 0},
                    {"affine7[1]", "<output>[1]", 0}
                }
            }
        }
    }, param_repo, gconf)

    layer_repo:add_layers(
    {
        ["nerv.GraphLayer"] =
        {
            ce_output = {
                dim_in = {429, 1}, dim_out = {1},
                layer_repo = layer_repo,
                connections = {
                    {"<input>[1]", "main[1]", 0},
                    {"main[1]", "ce_crit[1]", 0},
                    {"<input>[2]", "ce_crit[2]", 0},
                    {"ce_crit[1]", "<output>[1]", 0}
                }
            },
            softmax_output = {
                dim_in = {