require 'kaldi_io'
gconf = {lrate = 0.8,
wcost = 1e-6,
momentum = 0.9,
frm_ext = 5,
chunk_size = 1,
tr_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
"scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/train.scp ark:- |",
cv_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
"scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/cv.scp ark:- |",
ali = {targets_rspecifier = "ark:/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-pdf " ..
"/speechlab/users/mfy43/timit/s5/exp/tri3_ali/final.mdl " ..
"\"ark:gunzip -c /speechlab/users/mfy43/timit/s5/exp/tri3_ali/ali.*.gz |\" " ..
"ark:- | " ..
"/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-post " ..
"ark:- ark:- |"},
initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_init.nerv",
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_output.nerv",
"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_dnn/nnet_trans.nerv"},
-- params in nnet_trans.nerv are included in the trained model
decode_param = {"/speechlab/users/mfy43/timit/s5/nerv_2016-05-06_17:40:54/2016-05-06_19:44:43_iter_20_lr0.012500_tr0.867_cv1.464.nerv"}
}
local input_size = 440
local output_size = 1959
local hidden_size = 1024
local trainer = nerv.Trainer
function trainer:make_layer_repo(param_repo)
local layer_repo = nerv.LayerRepo(
{
-- global transf
["nerv.BiasLayer"] =
{
blayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {bias = "bias0"}, no_update_all = true}
},
["nerv.WindowLayer"] =
{
wlayer1 = {dim_in = {input_size}, dim_out = {input_size}, params = {window = "window0"}, no_update_all = true}
},
-- biased linearity
["nerv.AffineLayer"] =
{
affine0 = {dim_in = {input_size}, dim_out = {hidden_size},
params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
affine1 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
affine2 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
affine3 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
affine4 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
affine5 = {dim_in = {hidden_size}, dim_out = {hidden_size},
params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
affine6 = {dim_in = {hidden_size}, dim_out = {output_size},
params = {ltp = "affine6_ltp", bp = "affine6_bp"}}
},
["nerv.SigmoidLayer"] =
{
sigmoid0 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid1 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid2 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid3 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid4 = {dim_in = {hidden_size}, dim_out = {hidden_size}},
sigmoid5 = {dim_in = {hidden_size}, dim_out = {hidden_size}}
},
["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
{
ce_crit = {dim_in = {output_size, 1}, dim_out = {1}, compressed = true}
},
["nerv.SoftmaxLayer"] = -- softmax for decode output
{
softmax = {dim_in = {output_size}, dim_out = {output_size}}
}
}, param_repo, gconf)
layer_repo:add_layers(
{
["nerv.GraphLayer"] =
{
global_transf = {
dim_in = {input_size}, dim_out = {input_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "blayer1[1]", 0},
{"blayer1[1]", "wlayer1[1]", 0},
{"wlayer1[1]", "<output>[1]", 0}
}
},
main = {
dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "affine0[1]", 0},
{"affine0[1]", "sigmoid0[1]", 0},
{"sigmoid0[1]", "affine1[1]", 0},
{"affine1[1]", "sigmoid1[1]", 0},
{"sigmoid1[1]", "affine2[1]", 0},
{"affine2[1]", "sigmoid2[1]", 0},
{"sigmoid2[1]", "affine3[1]", 0},
{"affine3[1]", "sigmoid3[1]", 0},
{"sigmoid3[1]", "affine4[1]", 0},
{"affine4[1]", "sigmoid4[1]", 0},
{"sigmoid4[1]", "affine5[1]", 0},
{"affine5[1]", "sigmoid5[1]", 0},
{"sigmoid5[1]", "affine6[1]", 0},
{"affine6[1]", "<output>[1]", 0}
}
}
}
}, param_repo, gconf)
layer_repo:add_layers(
{
["nerv.GraphLayer"] =
{
ce_output = {
dim_in = {input_size, 1}, dim_out = {1},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "global_transf[1]", 0},
{"global_transf[1]", "main[1]", 0},
{"main[1]", "ce_crit[1]", 0},
{"<input>[2]", "ce_crit[2]", 0},
{"ce_crit[1]", "<output>[1]", 0}
}
},
softmax_output = {
dim_in = {input_size}, dim_out = {output_size},
layer_repo = layer_repo,
connections = {
{"<input>[1]", "global_transf[1]", 0},
{"global_transf[1]", "main[1]", 0},
{"main[1]", "softmax[1]", 0},
{"softmax[1]", "<output>[1]", 0}
}
}
}
}, param_repo, gconf)
return layer_repo
end
function trainer:get_network(layer_repo)
return layer_repo:get_layer("ce_output")
end
function trainer:get_readers(dataset)
local function reader_gen(scp, ali)
return {{reader = nerv.KaldiReader(gconf,
{
id = "main_scp",
feature_rspecifier = scp,
frm_ext = gconf.frm_ext,
mlfs = {
phone_state = ali
}
}),
data = {main_scp = input_size, phone_state = 1}}}
end
if dataset == 'train' then
return reader_gen(gconf.tr_scp, gconf.tr_ali or gconf.ali)
elseif dataset == 'validate' then
return reader_gen(gconf.cv_scp, gconf.cv_ali or gconf.ali)
else
nerv.error('no such dataset')
end
end
function trainer:get_input_order()
return {"main_scp", "phone_state"}
end
function trainer:get_decode_network(layer_repo)
return layer_repo:get_layer("softmax_output")
end
function trainer:make_decode_readers(scp_file)
return {{reader = nerv.KaldiReader(gconf,
{
id = "main_scp",
feature_rspecifier = scp_file,
frm_ext = gconf.frm_ext,
mlfs = {}
}),
data = {main_scp = input_size, phone_state = 1}}}
end
function trainer:get_decode_input_order()
return {"main_scp"}
end
function trainer:get_error()
local ce_crit = self.layer_repo:get_layer("ce_crit")
return ce_crit.total_ce / ce_crit.total_frames
end
function trainer:mini_batch_afterprocess(cnt, info)
if cnt % 1000 == 0 then
self:epoch_afterprocess()
end
end
function trainer:epoch_afterprocess()
local ce_crit = self.layer_repo:get_layer("ce_crit")
nerv.info("*** training stat begin ***")
nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
nerv.printf("accuracy:\t\t%.3f%%\n", ce_crit.total_correct / ce_crit.total_frames * 100)
nerv.info("*** training stat end ***")
end