summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYimmon Zhuang <[email protected]>2015-10-08 22:27:58 +0800
committerYimmon Zhuang <[email protected]>2015-10-08 22:27:58 +0800
commit7975592b94d65b6f356093694a76201de62a7a6a (patch)
treecf1eb9e8726cb016166129c51a3b8078cd9c78fd
parent37286a08b40f68b544983d8dde4a77ac0b488397 (diff)
MMI support
-rw-r--r--nerv/Makefile3
-rw-r--r--nerv/examples/mmi_chime3.lua182
-rw-r--r--nerv/examples/mpe_chime3.lua (renamed from nerv/examples/seq_chime.lua)0
-rw-r--r--nerv/layer/init.lua1
-rw-r--r--nerv/layer/mmi.lua50
5 files changed, 235 insertions, 1 deletions
diff --git a/nerv/Makefile b/nerv/Makefile
index b874a94..ce178a0 100644
--- a/nerv/Makefile
+++ b/nerv/Makefile
@@ -31,7 +31,8 @@ OBJS := $(CORE_OBJS) $(NERV_OBJS) $(LUAT_OBJS)
LIBS := $(INST_LIBDIR)/libnerv.so $(LIB_PATH)/libnervcore.so $(LIB_PATH)/libluaT.so
LUA_LIBS := matrix/init.lua io/init.lua init.lua \
layer/init.lua layer/affine.lua layer/sigmoid.lua layer/softmax_ce.lua layer/softmax.lua \
- layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua layer/mpe.lua \
+ layer/window.lua layer/bias.lua layer/combiner.lua layer/mse.lua layer/affine_recurrent.lua \
+ layer/mpe.lua layer/mmi.lua \
nn/init.lua nn/layer_repo.lua nn/param_repo.lua nn/layer_dag.lua \
io/sgd_buffer.lua
diff --git a/nerv/examples/mmi_chime3.lua b/nerv/examples/mmi_chime3.lua
new file mode 100644
index 0000000..a7ad268
--- /dev/null
+++ b/nerv/examples/mmi_chime3.lua
@@ -0,0 +1,182 @@
+require 'kaldi_io'
+gconf = {lrate = 0.00001, wcost = 0, momentum = 0.0,
+ cumat_type = nerv.CuMatrixFloat,
+ mmat_type = nerv.MMatrixFloat,
+ frm_ext = 5,
+ tr_scp = "ark,o:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_mmi/train.scp ark:- |",
+ initialized_param = {"/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_init_mmi.nerv",
+ "/slfs6/users/ymz09/nerv-project/nerv/nerv-speech/kaldi_seq/test/chime3_global_transf_mmi.nerv"},
+ debug = false}
+
+function make_layer_repo(param_repo)
+ local layer_repo = nerv.LayerRepo(
+ {
+ -- global transf
+ ["nerv.BiasLayer"] =
+ {
+ blayer1 = {{bias = "bias1"}, {dim_in = {440}, dim_out = {440}}},
+ blayer2 = {{bias = "bias2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ ["nerv.WindowLayer"] =
+ {
+ wlayer1 = {{window = "window1"}, {dim_in = {440}, dim_out = {440}}},
+ wlayer2 = {{window = "window2"}, {dim_in = {440}, dim_out = {440}}}
+ },
+ -- biased linearity
+ ["nerv.AffineLayer"] =
+ {
+ affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
+ {dim_in = {440}, dim_out = {2048}}},
+ affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
+ {dim_in = {2048}, dim_out = {2048}}},
+ affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
+ {dim_in = {2048}, dim_out = {2011}}}
+ },
+ ["nerv.SigmoidLayer"] =
+ {
+ sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
+ sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
+ },
+ ["nerv.MMILayer"] =
+ {
+ mmi_crit = {{}, {dim_in = {2011, -1}, dim_out = {1},
+ cmd = {
+ arg = "--class-frame-counts=/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced/ali_train_pdf.counts --acoustic-scale=0.1 --lm-scale=1.0 --learn-rate=0.00001 --drop-frames=true --verbose=1",
+ mdl = "/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/final.mdl",
+ lat = "scp:/slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_denlats/lat.scp",
+ ali = "ark:gunzip -c /slfs5/users/ymz09/chime/baseline/ASR/exp/tri4a_dnn_tr05_multi_enhanced_ali/ali.*.gz |"
+ }
+ }
+ }
+ },
+ ["nerv.SoftmaxLayer"] = -- softmax for decode output
+ {
+ softmax = {{}, {dim_in = {2011}, dim_out = {2011}}}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ global_transf = {{}, {
+ dim_in = {440}, dim_out = {440},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "blayer1[1]",
+ ["blayer1[1]"] = "wlayer1[1]",
+ ["wlayer1[1]"] = "blayer2[1]",
+ ["blayer2[1]"] = "wlayer2[1]",
+ ["wlayer2[1]"] = "<output>[1]"
+ }
+ }},
+ main = {{}, {
+ dim_in = {440}, dim_out = {2011},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "affine0[1]",
+ ["affine0[1]"] = "sigmoid0[1]",
+ ["sigmoid0[1]"] = "affine1[1]",
+ ["affine1[1]"] = "sigmoid1[1]",
+ ["sigmoid1[1]"] = "affine2[1]",
+ ["affine2[1]"] = "sigmoid2[1]",
+ ["sigmoid2[1]"] = "affine3[1]",
+ ["affine3[1]"] = "sigmoid3[1]",
+ ["sigmoid3[1]"] = "affine4[1]",
+ ["affine4[1]"] = "sigmoid4[1]",
+ ["sigmoid4[1]"] = "affine5[1]",
+ ["affine5[1]"] = "sigmoid5[1]",
+ ["sigmoid5[1]"] = "affine6[1]",
+ ["affine6[1]"] = "sigmoid6[1]",
+ ["sigmoid6[1]"] = "affine7[1]",
+ ["affine7[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ layer_repo:add_layers(
+ {
+ ["nerv.DAGLayer"] =
+ {
+ mmi_output = {{}, {
+ dim_in = {440, -1}, dim_out = {1},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "mmi_crit[1]",
+ ["<input>[2]"] = "mmi_crit[2]",
+ ["mmi_crit[1]"] = "<output>[1]"
+ }
+ }},
+ softmax_output = {{}, {
+ dim_in = {440}, dim_out = {2011},
+ sub_layers = layer_repo,
+ connections = {
+ ["<input>[1]"] = "main[1]",
+ ["main[1]"] = "softmax[1]",
+ ["softmax[1]"] = "<output>[1]"
+ }
+ }}
+ }
+ }, param_repo, gconf)
+
+ return layer_repo
+end
+
+function get_network(layer_repo)
+ return layer_repo:get_layer("mmi_output")
+end
+
+function get_decode_network(layer_repo)
+ return layer_repo:get_layer("softmax_output")
+end
+
+function get_global_transf(layer_repo)
+ return layer_repo:get_layer("global_transf")
+end
+
+function make_readers(feature_rspecifier, layer_repo)
+ return {
+ {reader = nerv.KaldiReader(gconf,
+ {
+ id = "main_scp",
+ feature_rspecifier = feature_rspecifier,
+ frm_ext = gconf.frm_ext,
+ global_transf = layer_repo:get_layer("global_transf"),
+ mlfs = {}
+ })
+ }
+ }
+end
+
+function get_input_order()
+ return {{id = "main_scp", global_transf = true},
+ {id = "key"}}
+end
+
+function get_accuracy(layer_repo)
+ return 0
+end
+
+function print_stat(layer_repo)
+ local mmi_crit = layer_repo:get_layer("mmi_crit")
+ nerv.info("*** training stat begin ***")
+ nerv.printf("frames:\t\t\t%d\n", mmi_crit.total_frames)
+ nerv.info("*** training stat end ***")
+end
diff --git a/nerv/examples/seq_chime.lua b/nerv/examples/mpe_chime3.lua
index be723ca..be723ca 100644
--- a/nerv/examples/seq_chime.lua
+++ b/nerv/examples/mpe_chime3.lua
diff --git a/nerv/layer/init.lua b/nerv/layer/init.lua
index b74422f..25dfebb 100644
--- a/nerv/layer/init.lua
+++ b/nerv/layer/init.lua
@@ -80,3 +80,4 @@ nerv.include('combiner.lua')
nerv.include('affine_recurrent.lua')
nerv.include('softmax.lua')
nerv.include('mpe.lua')
+nerv.include('mmi.lua')
diff --git a/nerv/layer/mmi.lua b/nerv/layer/mmi.lua
new file mode 100644
index 0000000..ecc7f48
--- /dev/null
+++ b/nerv/layer/mmi.lua
@@ -0,0 +1,50 @@
+require 'libkaldiseq'
+local MMILayer = nerv.class("nerv.MMILayer", "nerv.Layer")
+
+function MMILayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.arg = layer_conf.cmd.arg
+ self.mdl = layer_conf.cmd.mdl
+ self.lat = layer_conf.cmd.lat
+ self.ali = layer_conf.cmd.ali
+ self:check_dim_len(2, -1) -- two inputs: nn output and utt key
+end
+
+function MMILayer:init(batch_size)
+ self.total_frames = 0
+ self.kaldi_mmi = nerv.KaldiMMI(self.arg, self.mdl, self.lat, self.ali)
+ if self.kaldi_mmi == nil then
+ nerv.error("kaldi arguments is expected: %s %s %s %s", self.arg,
+ self.mdl, self.lat, self.ali)
+ end
+end
+
+function MMILayer:batch_resize(batch_size)
+ -- do nothing
+end
+
+function MMILayer:update(bp_err, input, output)
+ -- no params, therefore do nothing
+end
+
+function MMILayer:propagate(input, output)
+ self.valid = false
+ self.valid = self.kaldi_mmi:check(input[1], input[2])
+ return self.valid
+end
+
+function MMILayer:back_propagate(bp_err, next_bp_err, input, output)
+ if self.valid ~= true then
+ nerv.error("kaldi sequence training back_propagate fail")
+ end
+ local mmat = input[1]:new_to_host()
+ next_bp_err[1]:copy_fromh(self.kaldi_mmi:calc_diff(mmat, input[2]))
+ self.total_frames = self.total_frames + self.kaldi_mmi:get_num_frames()
+end
+
+function MMILayer:get_params()
+ return nerv.ParamRepo({})
+end