require 'htk_io'
gconf = {lrate = 0.8,
wcost = 1e-6,
momentum = 0.9,
frm_ext = 5,
rearrange = true, -- just to make the context order consistent with old TNet results, deprecated
frm_trim = 5, -- trim the first and last 5 frames, TNet just does this, deprecated
chunk_size = 1,
tr_scp = "/speechlab/users/mfy43/swb50/train_bp.scp",
cv_scp = "/speechlab/users/mfy43/swb50/train_cv.scp",
ali = {file = "/speechlab/users/mfy43/swb50/ref.mlf",
format = "map",
format_arg = "/speechlab/users/mfy43/swb50/dict",
dir = "*/",
ext = "lab"},
htk_conf = "/speechlab/users/mfy43/swb50/plp_0_d_a.conf",
initialized_param = {"/speechlab/users/mfy43/swb50/swb_init.nerv",
"/speechlab/users/mfy43/swb50/swb_global_transf.nerv"},
}
local input_size = 429
local output_size = 3001
local hidden_size = 2048
local trainer = nerv.Trainer
function trainer:make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
{
-- global transf
["nerv.BiasLayer"] =
{
blayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias0"}},
blayer2 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias1"}}
},
["nerv.WindowLayer"] =
{
wlayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window0"}},
wlayer2 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window1"}}
},
-- biased linearity
["nerv.AffineLayer"] =
{
affine0 = {dim_in = {input_size}, dim_out = {hidden_size},
params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
affine1 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
affine2 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
affine3 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
affine4 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
affine5 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
affine6 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine6_ltp", bp = "affine6_bp"}},
affine7 = {dim_in = {hidden_size}, dim_out = {output_size},
params = {ltp = "affine7_ltp", bp = "affine7_bp"}}
},
["nerv.SigmoidLayer"] =
{
sigmoid0 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid1 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid2 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid3 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid4 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid5 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid6 = {dim_in = {hidden_size}, dim_out = {hidden_size}}
},
["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
{
ce_crit = {dim_in = {output_size, 1}, dim_out = {1}, compressed = true}
},
["nerv.SoftmaxLayer"] = -- softmax for decode output
{
softmax = {dim_in = {output_size}, dim_out = {output_size}}
}
}, param_repo, gconf)
layer_repo:add_layers(
{
["nerv.GraphLayer"] =
{
global_transf = {
dim_in = {input_size}, dim_out = {input_size},
layer_repo = layer_repo,
connections = {
{"[1]", "blayer1[1]", 0},
{"blayer1[1]", "wlayer1[1]", 0},
{"wlayer1[1]", "blayer2[1]", 0},
{"blayer2[1]", "wlayer2[1]", 0},
{"wlayer2[1]", "