aboutsummaryrefslogtreecommitdiff
path: root/nerv/examples/timit_baseline2.lua
diff options
context:
space:
mode:
Diffstat (limited to 'nerv/examples/timit_baseline2.lua')
-rw-r--r--nerv/examples/timit_baseline2.lua212
1 files changed, 212 insertions, 0 deletions
diff --git a/nerv/examples/timit_baseline2.lua b/nerv/examples/timit_baseline2.lua
new file mode 100644
index 0000000..d783c3d
--- /dev/null
+++ b/nerv/examples/timit_baseline2.lua
@@ -0,0 +1,212 @@
+require 'kaldi_io'
+gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9, frm_ext = 5,
+ tr_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
+ "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/train.scp ark:- |",
+ cv_scp = "ark:/speechlab/tools/KALDI/kaldi-master/src/featbin/copy-feats " ..
+ "scp:/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/cv.scp ark:- |",
+ initialized_param = {"/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_init.nerv",
+ "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_output.nerv",
+ "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"},
+ decode_param = {"/speechlab/users/mfy43/timit/nnet_init_20160229015745_iter_13_lr0.013437_tr72.434_cv58.729.nerv",
+ "/speechlab/users/mfy43/timit/s5/exp/dnn4_nerv_prepare/nnet_trans.nerv"}}
+
+function make_layer_repo(param_repo)
+ local layer_repo = nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {dim_in = {440}, dim_out = {440}, params = {bias = "bias0"}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {dim_in = {440}, dim_out = {440}, params = {window = "window0"}}
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {dim_in = {440}, dim_out = {1024},
+ params = {ltp = "affine0_ltp", bp = "affine0_bp"}},
+ affine1 = {dim_in = {1024}, dim_out = {1024},
+ params = {ltp = "affine1_ltp", bp = "affine1_bp"}},
+ affine2 = {dim_in = {1024}, dim_out = {1024},
+ params = {ltp = "affine2_ltp", bp = "affine2_bp"}},
+ affine3 = {dim_in = {1024}, dim_out = {1024},
+ params = {ltp = "affine3_ltp", bp = "affine3_bp"}},
+ affine4 = {dim_in = {1024}, dim_out = {1024},
+ params = {ltp = "affine4_ltp", bp = "affine4_bp"}},
+ affine5 = {dim_in = {1024}, dim_out = {1024},
+ params = {ltp = "affine5_ltp", bp = "affine5_bp"}},
+ affine6 = {dim_in = {1024}, dim_out = {1959},
+ params = {ltp = "affine6_ltp", bp = "affine6_bp"}}
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {dim_in = {1024}, dim_out = {1024}},
+ sigmoid1 = {dim_in = {1024}, dim_out = {1024}},
+ sigmoid2 = {dim_in = {1024}, dim_out = {1024}},
+ sigmoid3 = {dim_in = {1024}, dim_out = {1024}},
+ sigmoid4 = {dim_in = {1024}, dim_out = {1024}},
+ sigmoid5 = {dim_in = {1024}, dim_out = {1024}}
+ },
+ ["nerv.SoftmaxCELayer"] = -- softmax + ce criterion layer for finetune output
+ {
+ ce_crit = {dim_in = {1959, 1}, dim_out = {1}, compressed = true}
+ },
+ ["nerv.SoftmaxLayer"] = -- softmax for decode output
+ {
+ softmax = {dim_in = {1959}, dim_out = {1959}}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.GraphLayer"] =
+ {
+ global_transf = {
+ dim_in = {440}, dim_out = {440},
+ layer_repo = layer_repo,
+ connections = {
+ {"<input>[1]", "blayer1[1]", 0},
+ {"blayer1[1]", "wlayer1[1]", 0},
+ {"wlayer1[1]", "<output>[1]", 0}
+ }
+ },
+ main = {
+ dim_in = {440}, dim_out = {1959},
+ layer_repo = layer_repo,
+ connections = {
+ {"<input>[1]", "affine0[1]", 0},
+ {"affine0[1]", "sigmoid0[1]", 0},
+ {"sigmoid0[1]", "affine1[1]", 0},
+ {"affine1[1]", "sigmoid1[1]", 0},
+ {"sigmoid1[1]", "affine2[1]", 0},
+ {"affine2[1]", "sigmoid2[1]", 0},
+ {"sigmoid2[1]", "affine3[1]", 0},
+ {"affine3[1]", "sigmoid3[1]", 0},
+ {"sigmoid3[1]", "affine4[1]", 0},
+ {"affine4[1]", "sigmoid4[1]", 0},
+ {"sigmoid4[1]", "affine5[1]", 0},
+ {"affine5[1]", "sigmoid5[1]", 0},
+ {"sigmoid5[1]", "affine6[1]", 0},
+ {"affine6[1]", "<output>[1]", 0}
+ }
+ }
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.GraphLayer"] =
+ {
+ ce_output = {
+ dim_in = {440, 1}, dim_out = {1},
+ layer_repo = layer_repo,
+ connections = {
+ {"<input>[1]", "main[1]", 0},
+ {"main[1]", "ce_crit[1]", 0},
+ {"<input>[2]", "ce_crit[2]", 0},
+ {"ce_crit[1]", "<output>[1]", 0}
+ }
+ },
+ softmax_output = {
+ dim_in = {440}, dim_out = {1959},
+ layer_repo = layer_repo,
+ connections = {
+ {"<input>[1]", "main[1]", 0},
+ {"main[1]", "softmax[1]", 0},
+ {"softmax[1]", "<output>[1]", 0}
+ }
+ }
+ }
+ }, param_repo, gconf)
+
+ return layer_repo
+end
+
+function get_network(layer_repo)
+ return layer_repo:get_layer("ce_output")
+end
+
+function get_decode_network(layer_repo)
+ return layer_repo:get_layer("softmax_output")
+end
+
+function get_global_transf(layer_repo)
+ return layer_repo:get_layer("global_transf")
+end
+
+function make_readers(scp_file, layer_repo)
+ return {
+ {reader = nerv.KaldiReader(gconf,
+ {
+ id = "main_scp",
+ feature_rspecifier = scp_file,
+ conf_file = gconf.htk_conf,
+ frm_ext = gconf.frm_ext,
+ mlfs = {
+ phone_state = {
+ targets_rspecifier = "ark:/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-pdf " ..
+ "/speechlab/users/mfy43/timit/s5/exp/tri3_ali/final.mdl " ..
+ "\"ark:gunzip -c /speechlab/users/mfy43/timit/s5/exp/tri3_ali/ali.*.gz |\" " ..
+ "ark:- | " ..
+ "/speechlab/tools/KALDI/kaldi-master/src/bin/ali-to-post " ..
+ "ark:- ark:- |",
+ format = "map"
+ }
+ }
+ }),
+ data = {main_scp = 440, phone_state = 1}}
+ }
+end
+
+function make_decode_readers(scp_file, layer_repo)
+ return {
+ {reader = nerv.KaldiReader(gconf,
+ {
+ id = "main_scp",
+ feature_rspecifier = scp_file,
+ conf_file = gconf.htk_conf,
+ frm_ext = gconf.frm_ext,
+ mlfs = {},
+ need_key = true
+ }),
+ data = {main_scp = 440, phone_state = 1}}
+ }
+end
+
+function make_buffer(readers)
+ return nerv.SGDBuffer(gconf,
+ {
+ buffer_size = gconf.buffer_size,
+ batch_size = gconf.batch_size,
+ randomize = gconf.randomize,
+ readers = readers,
+ use_gpu = true
+ })
+end
+
+function get_input_order()
+ return {{id = "main_scp", global_transf = true},
+ {id = "phone_state"}}
+end
+
+function get_decode_input_order()
+ return {{id = "main_scp", global_transf = true}}
+end
+
+function get_accuracy(layer_repo)
+ local ce_crit = layer_repo:get_layer("ce_crit")
+ return ce_crit.total_correct / ce_crit.total_frames * 100
+end
+
+function print_stat(layer_repo)
+ local ce_crit = layer_repo:get_layer("ce_crit")
+ nerv.info("*** training stat begin ***")
+ nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
+ nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
+ nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
+ nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
+ nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(layer_repo))
+ nerv.info("*** training stat end ***")
+end