summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTed Yin <ted.sybil@gmail.com>2015-10-12 09:26:53 +0800
committerTed Yin <ted.sybil@gmail.com>2015-10-12 09:26:53 +0800
commit0dba4c998fcccb4bae29582b7d8be94de476dd0b (patch)
treeb8529d4f0c2ea0a91ee4b7a4b21a14c0616fc081
parent7acd14eca701deaffb2d16262528da37ee23263a (diff)
parente39fb231f64ddc8b79a6eb5434f529aadb3165fe (diff)
Merge pull request #6 from yimmon/master
add kaldi_seq
-rw-r--r--kaldi_io/Makefile2
-rw-r--r--kaldi_io/example/swb_baseline.lua3
-rw-r--r--kaldi_io/example/swb_baseline_basic.lua157
-rw-r--r--kaldi_io/init.lua1
-rw-r--r--kaldi_io/kaldi.mk70
-rw-r--r--kaldi_seq/.valgrind0
-rw-r--r--kaldi_seq/Makefile47
-rw-r--r--kaldi_seq/init.c8
-rw-r--r--kaldi_seq/init.lua2
-rw-r--r--kaldi_seq/kaldi_seq-scm-1.rockspec36
-rw-r--r--kaldi_seq/layer/mmi.lua50
-rw-r--r--kaldi_seq/layer/mpe.lua52
-rw-r--r--kaldi_seq/src/init.c131
-rw-r--r--kaldi_seq/src/kaldi_mmi.cpp427
-rw-r--r--kaldi_seq/src/kaldi_mmi.h20
-rw-r--r--kaldi_seq/src/kaldi_mpe.cpp411
-rw-r--r--kaldi_seq/src/kaldi_mpe.h21
-rw-r--r--kaldi_seq/tools/net_kaldi2nerv.cpp85
-rw-r--r--kaldi_seq/tools/transf_kaldi2nerv.cpp106
19 files changed, 1400 insertions, 229 deletions
diff --git a/kaldi_io/Makefile b/kaldi_io/Makefile
index 1066fc5..7b0c0bd 100644
--- a/kaldi_io/Makefile
+++ b/kaldi_io/Makefile
@@ -1,5 +1,5 @@
# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
-KDIR := /home/stuymf/kaldi-trunk/
+KDIR := /slfs6/users/ymz09/kaldi/
SHELL := /bin/bash
BUILD_DIR := $(CURDIR)/build
diff --git a/kaldi_io/example/swb_baseline.lua b/kaldi_io/example/swb_baseline.lua
index 8b1e122..3ef6c65 100644
--- a/kaldi_io/example/swb_baseline.lua
+++ b/kaldi_io/example/swb_baseline.lua
@@ -173,7 +173,8 @@ function make_buffer(readers)
end
function get_input_order()
- return {"main_scp", "phone_state"}
+ return {{id = "main_scp", global_transf = true},
+ {id = "phone_state"}}
end
function get_accuracy(layer_repo)
diff --git a/kaldi_io/example/swb_baseline_basic.lua b/kaldi_io/example/swb_baseline_basic.lua
deleted file mode 100644
index e6c8145..0000000
--- a/kaldi_io/example/swb_baseline_basic.lua
+++ /dev/null
@@ -1,157 +0,0 @@
-require 'kaldi_io'
-gconf = {lrate = 0.8, wcost = 1e-6, momentum = 0.9,
- cumat_type = nerv.CuMatrixFloat,
- mmat_type = nerv.MMatrixFloat,
- frm_ext = 5,
- tr_rspecifier = "ark:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs6/users/ymz09/swb_ivec/train_bp.scp ark:- |",
- cv_rspecifier = "ark:/slfs6/users/ymz09/kaldi/src/featbin/copy-feats scp:/slfs6/users/ymz09/swb_ivec/train_cv.scp ark:- |",
- initialized_param = {"/slfs6/users/ymz09/swb_ivec/swb_init.nerv",
- "/slfs6/users/ymz09/swb_ivec/swb_global_transf.nerv"},
- debug = false}
-
-function make_sublayer_repo(param_repo)
- return nerv.LayerRepo(
- {
- -- global transf
- ["nerv.BiasLayer"] =
- {
- blayer1 = {{bias = "bias1"}, {dim_in = {429}, dim_out = {429}}},
- blayer2 = {{bias = "bias2"}, {dim_in = {429}, dim_out = {429}}}
- },
- ["nerv.WindowLayer"] =
- {
- wlayer1 = {{window = "window1"}, {dim_in = {429}, dim_out = {429}}},
- wlayer2 = {{window = "window2"}, {dim_in = {429}, dim_out = {429}}}
- },
- -- biased linearity
- ["nerv.AffineLayer"] =
- {
- affine0 = {{ltp = "affine0_ltp", bp = "affine0_bp"},
- {dim_in = {429}, dim_out = {2048}}},
- affine1 = {{ltp = "affine1_ltp", bp = "affine1_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine2 = {{ltp = "affine2_ltp", bp = "affine2_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine3 = {{ltp = "affine3_ltp", bp = "affine3_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine4 = {{ltp = "affine4_ltp", bp = "affine4_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine5 = {{ltp = "affine5_ltp", bp = "affine5_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine6 = {{ltp = "affine6_ltp", bp = "affine6_bp"},
- {dim_in = {2048}, dim_out = {2048}}},
- affine7 = {{ltp = "affine7_ltp", bp = "affine7_bp"},
- {dim_in = {2048}, dim_out = {3001}}}
- },
- ["nerv.SigmoidLayer"] =
- {
- sigmoid0 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid1 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid2 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid3 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid4 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid5 = {{}, {dim_in = {2048}, dim_out = {2048}}},
- sigmoid6 = {{}, {dim_in = {2048}, dim_out = {2048}}}
- },
- ["nerv.SoftmaxCELayer"] =
- {
- ce_crit = {{}, {dim_in = {3001, 1}, dim_out = {1}, compressed = true}}
- }
- }, param_repo, gconf)
-end
-
-function make_layer_repo(sublayer_repo, param_repo)
- return nerv.LayerRepo(
- {
- ["nerv.DAGLayer"] =
- {
- global_transf = {{}, {
- dim_in = {429}, dim_out = {429},
- sub_layers = sublayer_repo,
- connections = {
- ["<input>[1]"] = "blayer1[1]",
- ["blayer1[1]"] = "wlayer1[1]",
- ["wlayer1[1]"] = "blayer2[1]",
- ["blayer2[1]"] = "wlayer2[1]",
- ["wlayer2[1]"] = "<output>[1]"
- }
- }},
- main = {{}, {
- dim_in = {429, 1}, dim_out = {1},
- sub_layers = sublayer_repo,
- connections = {
- ["<input>[1]"] = "affine0[1]",
- ["affine0[1]"] = "sigmoid0[1]",
- ["sigmoid0[1]"] = "affine1[1]",
- ["affine1[1]"] = "sigmoid1[1]",
- ["sigmoid1[1]"] = "affine2[1]",
- ["affine2[1]"] = "sigmoid2[1]",
- ["sigmoid2[1]"] = "affine3[1]",
- ["affine3[1]"] = "sigmoid3[1]",
- ["sigmoid3[1]"] = "affine4[1]",
- ["affine4[1]"] = "sigmoid4[1]",
- ["sigmoid4[1]"] = "affine5[1]",
- ["affine5[1]"] = "sigmoid5[1]",
- ["sigmoid5[1]"] = "affine6[1]",
- ["affine6[1]"] = "sigmoid6[1]",
- ["sigmoid6[1]"] = "affine7[1]",
- ["affine7[1]"] = "ce_crit[1]",
- ["<input>[2]"] = "ce_crit[2]",
- ["ce_crit[1]"] = "<output>[1]"
- }
- }}
- }
- }, param_repo, gconf)
-end
-
-function get_network(layer_repo)
- return layer_repo:get_layer("main")
-end
-
-function make_readers(feature_rspecifier, layer_repo)
- return {
- {reader = nerv.KaldiReader(gconf,
- {
- id = "main_scp",
- feature_rspecifier = feature_rspecifier,
- frm_ext = gconf.frm_ext,
- mlfs = {
- phone_state = {
- targets_rspecifier = "ark:/slfs6/users/ymz09/kaldi/src/bin/ali-to-pdf /slfs6/users/ymz09/swb_ivec/final.mdl \"ark:gunzip -c /slfs6/users/ymz09/swb_ivec/ali.*.gz |\" ark:- | /slfs6/users/ymz09/kaldi/src/bin/ali-to-post ark:- ark:- |",
- format = "map"
- }
- },
- global_transf = layer_repo:get_layer("global_transf")
- }),
- data = {main_scp = 429, phone_state = 1}}
- }
-end
-
-function make_buffer(readers)
- return nerv.SGDBuffer(gconf,
- {
- buffer_size = gconf.buffer_size,
- randomize = gconf.randomize,
- readers = readers
- })
-end
-
-function get_input_order()
- return {"main_scp", "phone_state"}
-end
-
-function get_accuracy(sublayer_repo)
- local ce_crit = sublayer_repo:get_layer("ce_crit")
- return ce_crit.total_correct / ce_crit.total_frames * 100
-end
-
-function print_stat(sublayer_repo)
- local ce_crit = sublayer_repo:get_layer("ce_crit")
- nerv.info("*** training stat begin ***")
- nerv.printf("cross entropy:\t\t%.8f\n", ce_crit.total_ce)
- nerv.printf("correct:\t\t%d\n", ce_crit.total_correct)
- nerv.printf("frames:\t\t\t%d\n", ce_crit.total_frames)
- nerv.printf("err/frm:\t\t%.8f\n", ce_crit.total_ce / ce_crit.total_frames)
- nerv.printf("accuracy:\t\t%.3f%%\n", get_accuracy(sublayer_repo))
- nerv.info("*** training stat end ***")
-end
diff --git a/kaldi_io/init.lua b/kaldi_io/init.lua
index 3fc5b10..b7e6da8 100644
--- a/kaldi_io/init.lua
+++ b/kaldi_io/init.lua
@@ -66,6 +66,7 @@ function KaldiReader:get_data()
rearranged:copy_toh(feat_utter)
end
res[self.feat_id] = feat_utter
+ res["key"] = self.feat_repo:key()
-- add corresponding labels
for id, repo in pairs(self.lab_repo) do
local lab_utter = repo:get_utter(self.feat_repo,
diff --git a/kaldi_io/kaldi.mk b/kaldi_io/kaldi.mk
deleted file mode 100644
index 4a397f0..0000000
--- a/kaldi_io/kaldi.mk
+++ /dev/null
@@ -1,70 +0,0 @@
-# This file was generated using the following command:
-# ./configure
-
-# Rules that enable valgrind debugging ("make valgrind")
-
-valgrind: .valgrind
-
-.valgrind:
- echo -n > valgrind.out
- for x in $(TESTFILES); do echo $$x>>valgrind.out; valgrind ./$$x >/dev/null 2>> valgrind.out; done
- ! ( grep 'ERROR SUMMARY' valgrind.out | grep -v '0 errors' )
- ! ( grep 'definitely lost' valgrind.out | grep -v -w 0 )
- rm valgrind.out
- touch .valgrind
-
-
-CONFIGURE_VERSION := 2
-OPENFSTLIBS = -L/slwork/users/wd007/src/kaldi/tools/openfst/lib -lfst
-OPENFSTLDFLAGS = -Wl,-rpath=/slwork/users/wd007/src/kaldi/tools/openfst/lib
-FSTROOT = /slwork/users/wd007/src/kaldi/tools/openfst
-ATLASINC = /slwork/users/wd007/src/kaldi/tools/ATLAS/include
-ATLASLIBS = -L/usr/lib -llapack -lcblas -latlas -lf77blas
-# You have to make sure ATLASLIBS is set...
-
-ifndef FSTROOT
-$(error FSTROOT not defined.)
-endif
-
-ifndef ATLASINC
-$(error ATLASINC not defined.)
-endif
-
-ifndef ATLASLIBS
-$(error ATLASLIBS not defined.)
-endif
-
-
-CXXFLAGS = -msse -msse2 -Wall -I.. \
- -fPIC \
- -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN \
- -Wno-sign-compare -Wno-unused-local-typedefs -Winit-self \
- -DHAVE_EXECINFO_H=1 -rdynamic -DHAVE_CXXABI_H \
- -DHAVE_ATLAS -I$(ATLASINC) \
- -I$(FSTROOT)/include \
- $(EXTRA_CXXFLAGS) \
- -g # -O0 -DKALDI_PARANOID
-
-ifeq ($(KALDI_FLAVOR), dynamic)
-CXXFLAGS += -fPIC
-endif
-
-LDFLAGS = -rdynamic $(OPENFSTLDFLAGS)
-LDLIBS = $(EXTRA_LDLIBS) $(OPENFSTLIBS) $(ATLASLIBS) -lm -lpthread -ldl
-CC = g++
-CXX = g++
-AR = ar
-AS = as
-RANLIB = ranlib
-
-#Next section enables CUDA for compilation
-CUDA = true
-CUDATKDIR = /usr/local/cuda
-
-CUDA_INCLUDE= -I$(CUDATKDIR)/include
-CUDA_FLAGS = -g -Xcompiler -fPIC --verbose --machine 64 -DHAVE_CUDA
-
-CXXFLAGS += -DHAVE_CUDA -I$(CUDATKDIR)/include
-CUDA_LDFLAGS += -L$(CUDATKDIR)/lib64 -Wl,-rpath,$(CUDATKDIR)/lib64
-CUDA_LDLIBS += -lcublas -lcudart #LDLIBS : The libs are loaded later than static libs in implicit rule
-
diff --git a/kaldi_seq/.valgrind b/kaldi_seq/.valgrind
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/kaldi_seq/.valgrind
diff --git a/kaldi_seq/Makefile b/kaldi_seq/Makefile
new file mode 100644
index 0000000..e76eea8
--- /dev/null
+++ b/kaldi_seq/Makefile
@@ -0,0 +1,47 @@
+# Change KDIR to `kaldi-trunk' path (Kaldi must be compiled with --share)
+KDIR := /slfs6/users/ymz09/kaldi/
+
+SHELL := /bin/bash
+BUILD_DIR := $(CURDIR)/build
+INC_PATH := $(LUA_BINDIR)/../include/
+OBJS := init.o src/kaldi_mpe.o src/kaldi_mmi.o src/init.o
+LIBS := libkaldiseq.so
+LUA_LIBS := init.lua layer/mpe.lua layer/mmi.lua
+INCLUDE := -I $(LUA_INCDIR) -I $(INC_PATH) -DLUA_USE_APICHECK
+
+SUBDIR := src layer
+OBJ_DIR := $(BUILD_DIR)/objs
+LUA_DIR := $(INST_LUADIR)/kaldi_seq
+KALDIINCLUDE := -I $(KDIR)/tools/ATLAS/include/ -I $(KDIR)/tools/openfst/include/ -I $(KDIR)/src/
+
+OBJS := $(addprefix $(OBJ_DIR)/,$(OBJS))
+LIBS := $(addprefix $(INST_LIBDIR)/,$(LIBS))
+OBJ_SUBDIR := $(addprefix $(OBJ_DIR)/,$(SUBDIR))
+LUA_SUBDIR := $(addprefix $(LUA_DIR)/,$(SUBDIR))
+LUA_LIBS := $(addprefix $(LUA_DIR)/,$(LUA_LIBS))
+LIB_PATH := $(LUA_BINDIR)/../lib
+
+build: $(OBJ_DIR) $(OBJ_SUBDIR) $(OBJS)
+install: $(LUA_DIR) $(LUA_SUBDIR) $(LUA_LIBS) $(LIBS)
+
+include $(KDIR)/src/kaldi.mk
+
+KL1 := -rdynamic -Wl,-rpath=$(KDIR)/tools/openfst/lib -L/usr/local/cuda/lib64 -Wl,-rpath,/usr/local/cuda/lib64 -Wl,-rpath=$(KDIR)/src/lib -L. -L$(KDIR)/src/nnet/ -L$(KDIR)/src/cudamatrix/ -L$(KDIR)/src/lat/ -L$(KDIR)/src/hmm/ -L$(KDIR)/src/tree/ -L$(KDIR)/src/matrix/ -L$(KDIR)/src/util/ -L$(KDIR)/src/base/ $(KDIR)/src/nnet//libkaldi-nnet.so $(KDIR)/src/cudamatrix//libkaldi-cudamatrix.so $(KDIR)/src/lat//libkaldi-lat.so $(KDIR)/src/hmm//libkaldi-hmm.so $(KDIR)/src/tree//libkaldi-tree.so $(KDIR)/src/matrix//libkaldi-matrix.so $(KDIR)/src/util//libkaldi-util.so $(KDIR)/src/base//libkaldi-base.so -L$(KDIR)/tools/openfst/lib -lfst /usr/lib/liblapack.so /usr/lib/libcblas.so /usr/lib/libatlas.so /usr/lib/libf77blas.so -lm -lpthread -ldl -lcublas -lcudart -lkaldi-nnet -lkaldi-cudamatrix -lkaldi-lat -lkaldi-hmm -lkaldi-tree -lkaldi-matrix -lkaldi-util -lkaldi-base
+
+KL2 := -msse -msse2 -Wall -pthread -DKALDI_DOUBLEPRECISION=0 -DHAVE_POSIX_MEMALIGN -Wno-sign-compare -Wno-unused-local-typedefs -Winit-self -DHAVE_EXECINFO_H=1 -rdynamic -DHAVE_CXXABI_H -DHAVE_ATLAS -I$(KDIR)/tools/ATLAS/include -I$(KDIR)/tools/openfst/include -Wno-sign-compare -g -fPIC -I/usr/local/cuda/include -L/usr/local/cuda/lib64 -DKALDI_NO_EXPF
+
+$(OBJ_DIR) $(LUA_DIR) $(OBJ_SUBDIR) $(LUA_SUBDIR):
+ -mkdir -p $@
+$(LUA_DIR)/%.lua: %.lua
+ cp $< $@
+$(LIBS): $(OBJ_DIR)/src/kaldi_mpe.o $(OBJ_DIR)/src/kaldi_mmi.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/init.o
+ gcc -shared -fPIC -o $@ $(OBJ_DIR)/src/kaldi_mpe.o $(OBJ_DIR)/src/kaldi_mmi.o $(OBJ_DIR)/init.o $(OBJ_DIR)/src/init.o -lstdc++ -Wl,-rpath=$(LIB_PATH) -L$(LIB_PATH) -lnervcore -lluaT $(KL1)
+$(OBJ_DIR)/src/kaldi_mpe.o: src/kaldi_mpe.cpp
+ g++ -o $@ -c $< $(KALDIINCLUDE) -g -fPIC $(INCLUDE) $(KL2)
+$(OBJ_DIR)/src/kaldi_mmi.o: src/kaldi_mmi.cpp
+ g++ -o $@ -c $< $(KALDIINCLUDE) -g -fPIC $(INCLUDE) $(KL2)
+$(OBJ_DIR)/%.o: %.c
+ gcc -o $@ -c $< -g $(INCLUDE) -fPIC
+clean:
+ -rm $(OBJ_DIR)/src/*.o
+
diff --git a/kaldi_seq/init.c b/kaldi_seq/init.c
new file mode 100644
index 0000000..ed89473
--- /dev/null
+++ b/kaldi_seq/init.c
@@ -0,0 +1,8 @@
+#include "../nerv/common.h"
+#include <stdio.h>
+
+extern void kaldi_seq_init(lua_State *L);
+int luaopen_libkaldiseq(lua_State *L) {
+ kaldi_seq_init(L);
+ return 1;
+}
diff --git a/kaldi_seq/init.lua b/kaldi_seq/init.lua
new file mode 100644
index 0000000..39f4cb3
--- /dev/null
+++ b/kaldi_seq/init.lua
@@ -0,0 +1,2 @@
+nerv.include('layer/mpe.lua')
+nerv.include('layer/mmi.lua')
diff --git a/kaldi_seq/kaldi_seq-scm-1.rockspec b/kaldi_seq/kaldi_seq-scm-1.rockspec
new file mode 100644
index 0000000..41e34f0
--- /dev/null
+++ b/kaldi_seq/kaldi_seq-scm-1.rockspec
@@ -0,0 +1,36 @@
+package = "kaldi_seq"
+version = "scm-1"
+source = {
+ url = "https://github.com/Nerv-SJTU/nerv-speech.git"
+}
+description = {
+ summary = "Kaldi sequence training support for Nerv",
+ detailed = [[
+ ]],
+ homepage = "https://github.com/Nerv-SJTU/nerv-speech",
+ license = "BSD"
+}
+dependencies = {
+ "nerv >= scm-1",
+ "lua >= 5.1"
+}
+build = {
+ type = "make",
+ build_variables = {
+ CFLAGS="$(CFLAGS)",
+ LIBFLAG="$(LIBFLAG)",
+ LUA_LIBDIR="$(LUA_LIBDIR)",
+ LUA_BINDIR="$(LUA_BINDIR)",
+ LUA_INCDIR="$(LUA_INCDIR)",
+ INST_PREFIX="$(PREFIX)",
+ LUA="$(LUA)",
+ },
+ install_variables = {
+ LUA_BINDIR="$(LUA_BINDIR)",
+ INST_PREFIX="$(PREFIX)",
+ INST_BINDIR="$(BINDIR)",
+ INST_LIBDIR="$(LIBDIR)",
+ INST_LUADIR="$(LUADIR)",
+ INST_CONFDIR="$(CONFDIR)",
+ },
+}
diff --git a/kaldi_seq/layer/mmi.lua b/kaldi_seq/layer/mmi.lua
new file mode 100644
index 0000000..ecc7f48
--- /dev/null
+++ b/kaldi_seq/layer/mmi.lua
@@ -0,0 +1,50 @@
+require 'libkaldiseq'
+local MMILayer = nerv.class("nerv.MMILayer", "nerv.Layer")
+
+function MMILayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.arg = layer_conf.cmd.arg
+ self.mdl = layer_conf.cmd.mdl
+ self.lat = layer_conf.cmd.lat
+ self.ali = layer_conf.cmd.ali
+ self:check_dim_len(2, -1) -- two inputs: nn output and utt key
+end
+
+function MMILayer:init(batch_size)
+ self.total_frames = 0
+ self.kaldi_mmi = nerv.KaldiMMI(self.arg, self.mdl, self.lat, self.ali)
+ if self.kaldi_mmi == nil then
+ nerv.error("kaldi arguments is expected: %s %s %s %s", self.arg,
+ self.mdl, self.lat, self.ali)
+ end
+end
+
+function MMILayer:batch_resize(batch_size)
+ -- do nothing
+end
+
+function MMILayer:update(bp_err, input, output)
+ -- no params, therefore do nothing
+end
+
+function MMILayer:propagate(input, output)
+ self.valid = false
+ self.valid = self.kaldi_mmi:check(input[1], input[2])
+ return self.valid
+end
+
+function MMILayer:back_propagate(bp_err, next_bp_err, input, output)
+ if self.valid ~= true then
+ nerv.error("kaldi sequence training back_propagate fail")
+ end
+ local mmat = input[1]:new_to_host()
+ next_bp_err[1]:copy_fromh(self.kaldi_mmi:calc_diff(mmat, input[2]))
+ self.total_frames = self.total_frames + self.kaldi_mmi:get_num_frames()
+end
+
+function MMILayer:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/kaldi_seq/layer/mpe.lua b/kaldi_seq/layer/mpe.lua
new file mode 100644
index 0000000..ec8a8f3
--- /dev/null
+++ b/kaldi_seq/layer/mpe.lua
@@ -0,0 +1,52 @@
+require 'libkaldiseq'
+local MPELayer = nerv.class("nerv.MPELayer", "nerv.Layer")
+
+function MPELayer:__init(id, global_conf, layer_conf)
+ self.id = id
+ self.gconf = global_conf
+ self.dim_in = layer_conf.dim_in
+ self.dim_out = layer_conf.dim_out
+ self.arg = layer_conf.cmd.arg
+ self.mdl = layer_conf.cmd.mdl
+ self.lat = layer_conf.cmd.lat
+ self.ali = layer_conf.cmd.ali
+ self:check_dim_len(2, -1) -- two inputs: nn output and utt key
+end
+
+function MPELayer:init(batch_size)
+ self.total_correct = 0
+ self.total_frames = 0
+ self.kaldi_mpe = nerv.KaldiMPE(self.arg, self.mdl, self.lat, self.ali)
+ if self.kaldi_mpe == nil then
+ nerv.error("kaldi arguments is expected: %s %s %s %s", self.arg,
+ self.mdl, self.lat, self.ali)
+ end
+end
+
+function MPELayer:batch_resize(batch_size)
+ -- do nothing
+end
+
+function MPELayer:update(bp_err, input, output)
+ -- no params, therefore do nothing
+end
+
+function MPELayer:propagate(input, output)
+ self.valid = false
+ self.valid = self.kaldi_mpe:check(input[1], input[2])
+ return self.valid
+end
+
+function MPELayer:back_propagate(bp_err, next_bp_err, input, output)
+ if self.valid ~= true then
+ nerv.error("kaldi sequence training back_propagate fail")
+ end
+ local mmat = input[1]:new_to_host()
+ next_bp_err[1]:copy_fromh(self.kaldi_mpe:calc_diff(mmat, input[2]))
+ self.total_frames = self.total_frames + self.kaldi_mpe:get_num_frames()
+ self.total_correct = self.total_correct + self.kaldi_mpe:get_utt_frame_acc()
+end
+
+function MPELayer:get_params()
+ return nerv.ParamRepo({})
+end
diff --git a/kaldi_seq/src/init.c b/kaldi_seq/src/init.c
new file mode 100644
index 0000000..9b38056
--- /dev/null
+++ b/kaldi_seq/src/init.c
@@ -0,0 +1,131 @@
+#include "nerv/common.h"
+#include "kaldi_mpe.h"
+#include "kaldi_mmi.h"
+#include <stdio.h>
+
+const char *nerv_kaldi_mpe_tname = "nerv.KaldiMPE";
+const char *nerv_kaldi_mmi_tname = "nerv.KaldiMMI";
+const char *nerv_matrix_cuda_float_tname = "nerv.CuMatrixFloat";
+const char *nerv_matrix_host_float_tname = "nerv.MMatrixFloat";
+
+static int mpe_new(lua_State *L) {
+ const char *arg = luaL_checkstring(L, 1);
+ const char *mdl = luaL_checkstring(L, 2);
+ const char *lat = luaL_checkstring(L, 3);
+ const char *ali = luaL_checkstring(L, 4);
+ KaldiMPE *mpe = new_KaldiMPE(arg, mdl, lat, ali);
+ luaT_pushudata(L, mpe, nerv_kaldi_mpe_tname);
+ return 1;
+}
+
+static int mpe_destroy(lua_State *L) {
+ KaldiMPE *mpe = luaT_checkudata(L, 1, nerv_kaldi_mpe_tname);
+ destroy_KaldiMPE(mpe);
+ return 0;
+}
+
+static int mpe_check(lua_State *L) {
+ KaldiMPE *mpe = luaT_checkudata(L, 1, nerv_kaldi_mpe_tname);
+ const Matrix *cumat = luaT_checkudata(L, 2, nerv_matrix_cuda_float_tname);
+ const char *utt = luaL_checkstring(L, 3);
+
+ lua_pushboolean(L, check_mpe(mpe, cumat, utt));
+ return 1;
+}
+
+static int mpe_calc_diff(lua_State *L) {
+ KaldiMPE *mpe = luaT_checkudata(L, 1, nerv_kaldi_mpe_tname);
+ Matrix *mat = luaT_checkudata(L, 2, nerv_matrix_host_float_tname);
+ const char *utt = luaL_checkstring(L, 3);
+
+ Matrix *diff = calc_diff_mpe(mpe, mat, utt);
+ luaT_pushudata(L, diff, nerv_matrix_host_float_tname);
+ return 1;
+}
+
+static int mpe_get_num_frames(lua_State *L) {
+ KaldiMPE *mpe = luaT_checkudata(L, 1, nerv_kaldi_mpe_tname);
+ lua_pushnumber(L, get_num_frames_mpe(mpe));
+ return 1;
+}
+
+static int mpe_get_utt_frame_acc(lua_State *L) {
+ KaldiMPE *mpe = luaT_checkudata(L, 1, nerv_kaldi_mpe_tname);
+ lua_pushnumber(L, get_utt_frame_acc_mpe(mpe));
+ return 1;
+}
+
+static const luaL_Reg mpe_methods[] = {
+ {"check", mpe_check},
+ {"calc_diff", mpe_calc_diff},
+ {"get_num_frames", mpe_get_num_frames},
+ {"get_utt_frame_acc", mpe_get_utt_frame_acc},
+ {NULL, NULL}
+};
+
+static void mpe_init(lua_State *L) {
+ luaT_newmetatable(L, nerv_kaldi_mpe_tname, NULL,
+ mpe_new, mpe_destroy, NULL);
+ luaL_register(L, NULL, mpe_methods);
+ lua_pop(L, 1);
+}
+
+static int mmi_new(lua_State *L) {
+ const char *arg = luaL_checkstring(L, 1);
+ const char *mdl = luaL_checkstring(L, 2);
+ const char *lat = luaL_checkstring(L, 3);
+ const char *ali = luaL_checkstring(L, 4);
+ KaldiMMI *mmi = new_KaldiMMI(arg, mdl, lat, ali);
+ luaT_pushudata(L, mmi, nerv_kaldi_mmi_tname);
+ return 1;
+}
+
+static int mmi_destroy(lua_State *L) {
+ KaldiMMI *mmi = luaT_checkudata(L, 1, nerv_kaldi_mmi_tname);
+ destroy_KaldiMMI(mmi);
+ return 0;
+}
+
+static int mmi_check(lua_State *L) {
+ KaldiMMI *mmi = luaT_checkudata(L, 1, nerv_kaldi_mmi_tname);
+ const Matrix *cumat = luaT_checkudata(L, 2, nerv_matrix_cuda_float_tname);
+ const char *utt = luaL_checkstring(L, 3);
+
+ lua_pushboolean(L, check_mmi(mmi, cumat, utt));
+ return 1;
+}
+
+static int mmi_calc_diff(lua_State *L) {
+ KaldiMMI *mmi = luaT_checkudata(L, 1, nerv_kaldi_mmi_tname);
+ Matrix *mat = luaT_checkudata(L, 2, nerv_matrix_host_float_tname);
+ const char *utt = luaL_checkstring(L, 3);
+
+ Matrix *diff = calc_diff_mmi(mmi, mat, utt);
+ luaT_pushudata(L, diff, nerv_matrix_host_float_tname);
+ return 1;
+}
+
+static int mmi_get_num_frames(lua_State *L) {
+ KaldiMMI *mmi = luaT_checkudata(L, 1, nerv_kaldi_mmi_tname);
+ lua_pushnumber(L, get_num_frames_mmi(mmi));
+ return 1;
+}
+
+static const luaL_Reg mmi_methods[] = {
+ {"check", mmi_check},
+ {"calc_diff", mmi_calc_diff},
+ {"get_num_frames", mmi_get_num_frames},
+ {NULL, NULL}
+};
+
+static void mmi_init(lua_State *L) {
+ luaT_newmetatable(L, nerv_kaldi_mmi_tname, NULL,
+ mmi_new, mmi_destroy, NULL);
+ luaL_register(L, NULL, mmi_methods);
+ lua_pop(L, 1);
+}
+
+void kaldi_seq_init(lua_State *L) {
+ mpe_init(L);
+ mmi_init(L);
+}
diff --git a/kaldi_seq/src/kaldi_mmi.cpp b/kaldi_seq/src/kaldi_mmi.cpp
new file mode 100644
index 0000000..ea9b4f1
--- /dev/null
+++ b/kaldi_seq/src/kaldi_mmi.cpp
@@ -0,0 +1,427 @@
+#include <string>
+#include "base/kaldi-common.h"
+#include "util/common-utils.h"
+#include "tree/context-dep.h"
+#include "hmm/transition-model.h"
+#include "fstext/fstext-lib.h"
+#include "decoder/faster-decoder.h"
+#include "decoder/decodable-matrix.h"
+#include "lat/kaldi-lattice.h"
+#include "lat/lattice-functions.h"
+
+#include "nnet/nnet-trnopts.h"
+#include "nnet/nnet-component.h"
+#include "nnet/nnet-activation.h"
+#include "nnet/nnet-nnet.h"
+#include "nnet/nnet-pdf-prior.h"
+#include "nnet/nnet-utils.h"
+#include "base/timer.h"
+#include "cudamatrix/cu-device.h"
+
+#include <iomanip>
+
+typedef kaldi::BaseFloat BaseFloat;
+typedef struct Matrix NervMatrix;
+
+namespace kaldi{
+ namespace nnet1{
+ void LatticeAcousticRescore(const kaldi::Matrix<BaseFloat> &log_like,
+ const TransitionModel &trans_model,
+ const std::vector<int32> &state_times,
+ Lattice *lat);
+ }
+}
+
+extern "C" {
+#include "kaldi_mmi.h"
+#include "string.h"
+#include "assert.h"
+#include "nerv/common.h"
+
+ extern NervMatrix *nerv_matrix_host_float_create(long nrow, long ncol, Status *status);
+ extern void nerv_matrix_host_float_copy_fromd(NervMatrix *mat, const NervMatrix *cumat, int, int, int, Status *);
+ using namespace kaldi;
+ using namespace kaldi::nnet1;
+ typedef kaldi::int32 int32;
+
+ struct KaldiMMI {
+ TransitionModel *trans_model;
+ RandomAccessLatticeReader *den_lat_reader;
+ RandomAccessInt32VectorReader *ref_ali_reader;
+
+ Lattice den_lat;
+ vector<int32> state_times;
+
+ PdfPriorOptions *prior_opts;
+ PdfPrior *log_prior;
+
+ std::vector<int32> ref_ali;
+
+ Timer *time;
+ double time_now;
+
+ int32 num_done, num_no_ref_ali, num_no_den_lat, num_other_error;
+ int32 num_frm_drop;
+
+ kaldi::int64 total_frames;
+ double lat_like; // total likelihood of the lattice
+ double lat_ac_like; // acoustic likelihood weighted by posterior.
+ double total_mmi_obj, mmi_obj;
+ double total_post_on_ali, post_on_ali;
+
+ int32 num_frames;
+
+ bool binary;
+ BaseFloat acoustic_scale, lm_scale, old_acoustic_scale;
+ kaldi::int32 max_frames;
+ bool drop_frames;
+ std::string use_gpu;
+ };
+
+ KaldiMMI * new_KaldiMMI(const char* arg, const char* mdl, const char* lat, const char* ali)
+ {
+ KaldiMMI * mmi = new KaldiMMI;
+
+ const char *usage =
+ "Perform one iteration of DNN-MMI training by stochastic "
+ "gradient descent.\n"
+ "The network weights are updated on each utterance.\n"
+ "Usage: nnet-train-mmi-sequential [options] <model-in> <transition-model-in> "
+ "<feature-rspecifier> <den-lat-rspecifier> <ali-rspecifier> [<model-out>]\n"
+ "e.g.: \n"
+ " nnet-train-mmi-sequential nnet.init trans.mdl scp:train.scp scp:denlats.scp ark:train.ali "
+ "nnet.iter1\n";
+
+ ParseOptions po(usage);
+
+ NnetTrainOptions trn_opts; trn_opts.learn_rate=0.00001;
+ trn_opts.Register(&po);
+
+ mmi->binary = true;
+ po.Register("binary", &(mmi->binary), "Write output in binary mode");
+
+ std::string feature_transform;
+ po.Register("feature-transform", &feature_transform,
+ "Feature transform in Nnet format");
+
+ mmi->prior_opts = new PdfPriorOptions;
+ PdfPriorOptions &prior_opts = *(mmi->prior_opts);
+ prior_opts.Register(&po);
+
+ mmi->acoustic_scale = 1.0,
+ mmi->lm_scale = 1.0,
+ mmi->old_acoustic_scale = 0.0;
+ po.Register("acoustic-scale", &(mmi->acoustic_scale),
+ "Scaling factor for acoustic likelihoods");
+ po.Register("lm-scale", &(mmi->lm_scale),
+ "Scaling factor for \"graph costs\" (including LM costs)");
+ po.Register("old-acoustic-scale", &(mmi->old_acoustic_scale),
+ "Add in the scores in the input lattices with this scale, rather "
+ "than discarding them.");
+ mmi->max_frames = 6000; // Allow segments maximum of one minute by default
+ po.Register("max-frames",&(mmi->max_frames), "Maximum number of frames a segment can have to be processed");
+
+ mmi->drop_frames = true;
+ po.Register("drop-frames", &(mmi->drop_frames),
+ "Drop frames, where is zero den-posterior under numerator path "
+ "(ie. path not in lattice)");
+
+ mmi->use_gpu=std::string("yes");
+ po.Register("use-gpu", &(mmi->use_gpu), "yes|no|optional, only has effect if compiled with CUDA");
+
+ int narg = 0;
+ char args[64][1024];
+ char *token;
+ char *saveptr = NULL;
+ char tmpstr[1024];
+
+ strcpy(tmpstr, arg);
+ strcpy(args[0], "nnet-train-mmi-sequential");
+ for(narg = 1, token = strtok_r(tmpstr, " ", &saveptr); token; token = strtok_r(NULL, " ", &saveptr))
+ strcpy(args[narg++], token);
+ strcpy(args[narg++], "0.nnet");
+ strcpy(args[narg++], mdl);
+ strcpy(args[narg++], "feat");
+ strcpy(args[narg++], lat);
+ strcpy(args[narg++], ali);
+ strcpy(args[narg++], "1.nnet");
+
+ char **argsv = new char*[narg];
+ for(int _i = 0; _i < narg; _i++)
+ argsv[_i] = args[_i];
+
+ po.Read(narg, argsv);
+ delete [] argsv;
+
+ if (po.NumArgs() != 6) {
+ po.PrintUsage();
+ exit(1);
+ }
+
+ std::string transition_model_filename = po.GetArg(2),
+ den_lat_rspecifier = po.GetArg(4),
+ ref_ali_rspecifier = po.GetArg(5);
+
+ // Select the GPU
+#if HAVE_CUDA == 1
+ CuDevice::Instantiate().SelectGpuId(mmi->use_gpu);
+#endif
+
+ // Read the class-frame-counts, compute priors
+ mmi->log_prior = new PdfPrior(prior_opts);
+
+ // Read transition model
+ mmi->trans_model = new TransitionModel;
+ ReadKaldiObject(transition_model_filename, mmi->trans_model);
+
+ mmi->den_lat_reader = new RandomAccessLatticeReader(den_lat_rspecifier);
+ mmi->ref_ali_reader = new RandomAccessInt32VectorReader(ref_ali_rspecifier);
+
+ if (mmi->drop_frames) {
+ KALDI_LOG << "--drop-frames=true :"
+ " we will zero gradient for frames with total den/num mismatch."
+ " The mismatch is likely to be caused by missing correct path "
+ " from den-lattice due wrong annotation or search error."
+ " Leaving such frames out stabilizes the training.";
+ }
+
+ mmi->time = new Timer;
+ mmi->time_now = 0;
+ mmi->num_done =0;
+ mmi->num_no_ref_ali = 0;
+ mmi->num_no_den_lat = 0;
+ mmi->num_other_error = 0;
+ mmi->total_frames = 0;
+ mmi->num_frm_drop = 0;
+
+ m